diff --git "a/2506.jsonl" "b/2506.jsonl" new file mode 100644--- /dev/null +++ "b/2506.jsonl" @@ -0,0 +1,744 @@ +{"seq_id":"324571303","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Note\nfrom .forms import NoteForm\n\n# Create your views here.\ndef index(request):\n notes = Note.objects.all()\n count = len(notes)\n context = {\n 'notas':notes,\n 'largo':count \n }\n return render(request,'notes/index.html',context)\ndef otraPagina(request):\n valor = {'nombre':'Soy un parametro de java'}\n return render(request,'notes/index.html',context=valor)\n\ndef add(request):\n form = NoteForm()\n if request.method == 'POST':\n print('Me estas enviado por POST')\n print(request.POST)\n newNota = NoteForm(request.POST)\n if newNota.is_valid():\n newNota.save() \n print(request.method)\n context = {'form':form}\n return render(request,'notes/add.html',context)\n\ndef find(request,id_nota):\n try:\n nota = Note.objects.get(pk=id_nota)\n except Note.DoesNotExist:\n return HttpResponse('Nota no existe')\n context = {'nota':nota}\n return render(request,'notes/find.html',context)","sub_path":"notes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"607287464","text":"# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration\n\n## @PowhegControl SingleTopDecorator\n# Powheg runcard decorator for single top production\n#\n# Authors: James Robinson \n\n#! /usr/bin/env python\nfrom .. import ATLASCommonParameters\n\nclass SingleTopDecorator(object) :\n\n ## Define decorator name string\n name = 'single top'\n\n def __init__( self, decorated ) :\n ## Attach decorations to Powheg configurable\n decorated.run_card_decorators.append( self )\n self.decorated = decorated\n\n self.decorated.add_parameter( 'alphaem_inv', 1.0/float(ATLASCommonParameters.alphaem), default='{0}', desc='EM coupling' )\n self.decorated.add_parameter( 'ttype', 1, default='{0}', desc='(1:t; -1:tbar)' )\n self.decorated.add_parameter( 'wmass', ATLASCommonParameters.mass_W, default='{0}', desc='mass of W boson in GeV' )\n self.decorated.add_parameter( 'wwidth', ATLASCommonParameters.width_W, default='{0}', desc='W width' )\n","sub_path":"Generators/PowhegControl/python/decorators/SingleTopDecorator.py","file_name":"SingleTopDecorator.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"7668114","text":"import BankLib\nmySavings = BankLib.BankAccount(100,1)\nUserInput = \"yes\"\nwhile UserInput == \"yes\":\n print(\"Please select from the following options: \\n\")\n print(\"1. Deposit\")\n print(\"2. Withdraw\")\n print(\"3. Display Balance\")\n print(\"4. Get Interest\")\n print(\"\\n\")\n \n options = int(input(\"Option: \"))\n if options > 4 or options < 1:\n print(\"ERROR! Incorrect option\")\n if options == 1: \n depositamount = int(input(\"Please enter the amount: \"))\n mySavings.deposit(depositamount)\n elif options == 2: \n withdrawamount = int(input(\"Please enter the amount: \"))\n balance = mySavings.getBalance()\n if withdrawamount > balance:\n print(\"Cannot withdraw that amount of money!\")\n else:\n mySavings.withdraw(withdrawamount)\n elif options == 3: \n balance = mySavings.getBalance()\n print(\"Remaining amount: \", balance) \n elif options == 4:\n NewInterest = mySavings.calculateInterest()\n print(\"Interest: \",NewInterest)\n mySavings.AddInterest(NewInterest)\n \n UserInput = input(\"Would you like to continue? \") \n \n \n ","sub_path":"Week 10 OOP/Using Bank Account Lib.py","file_name":"Using Bank Account Lib.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"179263377","text":"#!/usr/bin/env python\n########################################################################\n# created by: Pushtakio\n# purpose: network server client practice\n# date: unknow\n# version: 2.7.91\n########################################################################\n\nimport socket\n'''\ndef getInput():\n ans = input(\"enter your message: \")\n return ans\n\n# This is the method that creates the challenge. In this case it's the sum of ascii values of all characters in the message\ndef createChallenge(message):\n return sum(map(ord, message))\n\n# The method to add a challenge to a message. The return message will be as follows: __\ndef challengeEncode(message):\n # create the challenge\n challenge = createChallenge(message)\n # create the encoded message in the format of __\n return '%d_%s_%d' % (len(message), message, challenge)\n\n# The method to decode a message encoded with a challenges\ndef challengeDecode(message):\n try:\n # first get the number of characters in original message\n num_of_chars = int(message.split('_')[0])\n # calcualte the number of digits in number of characters\n num_of_digits = len(str(num_of_chars))\n # extract the original message from the encoded message\n orig_message = message[num_of_digits+1:num_of_digits+1+num_of_chars]\n # extract the challenge from the encoded message\n challenge = int(message[num_of_digits+1+num_of_chars+1::])\n # calculate the challenge for the original message and compare is to the challenge we got in the message\n return (challenge == createChallenge(orig_message))\n # if an excpetion is thrown it means the encoded message structure was invalid\n except:\n print (\"Could not decode encoded message: message structure is invalid\")\n return False\n\n\ndef echoClient():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = ('localhost', 10000)\n print('connecting to %s port %s' % server_address)\n sock.connect(server_address)\n\n # a varaible to remember if we already sent the username\n username_sent = False\n # a varaible to remember if we already sent the password\n password_sent = False\n\n try:\n while True:\n message = getInput()\n if message.lower() == \"bye\":\n print(\"[!] Disconnecting: Bye\")\n sock.sendall(str.encode(\"bye\"))\n break\n\n # make sure username if sent first\n if not username_sent:\n username_sent = True\n print('[+]sending username \"%s\"' % str.encode(message))\n sock.sendall(str.encode(message))\n # Look for the response\n data = sock.recv(50)\n if data:\n print('[-]received \"%s\"' % data.decode(\"utf-8\"))\n continue\n\n # now send the password\n if not password_sent:\n password_sent = True\n print('[+]sending password')\n # make sure to encode the password message before sending it\n message = challengeEncode(message)\n sock.sendall(str.encode(message))\n # Look for the response\n data = sock.recv(50)\n if data:\n print('[-]received \"%s\"' % data.decode(\"utf-8\"))\n continue\n\n # for any other message - just send it\n else:\n print('[+]sending message \"%s\"' % str.encode(message))\n sock.sendall(str.encode(message))\n # Look for the response\n data = sock.recv(50)\n if data:\n print('[-]received \"%s\"' % data.decode(\"utf-8\"))\n\n finally:\n print('[!]closing socket')\n sock.close()\n\nif __name__ == \"__main__\":\n\techoClient()\n'''\n\n\nimport sys\n\n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Bind the socket to the address given on the command line\nserver_address = ('', 10000)\nsock.bind(server_address)\nprint >>sys.stderr, 'starting up on %s port %s' % sock.getsockname()\nsock.listen(1)\n\nwhile True:\n print >>sys.stderr, 'waiting for a connection'\n connection, client_address = sock.accept()\n try:\n print >>sys.stderr, 'client connected:', client_address\n while True:\n data = connection.recv(16)\n print >>sys.stderr, 'received \"%s\"' % data\n if data:\n connection.sendall(data)\n else:\n break\n finally:\n connection.close()\n","sub_path":"network_exmaples/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"629966800","text":"import datetime\n\nfrom airflow.contrib.operators.mlengine_operator import MLEngineTrainingOperator\nfrom airflow.operators.bash_operator import BashOperator\n\n\ndef training_tasks(model, dag, PROJECT_ID, BUCKET, DATA_DIR, MODEL_NAME, MODEL_VERSION, MODEL_LOCATION):\n # Constants\n # The code package name comes from the model code in the module directory\n REGION = \"us-east1\"\n PACKAGE_URI = BUCKET + \"/taxifare/code/taxifare-0.1.tar.gz\"\n JOB_DIR = BUCKET + \"/jobs\"\n\n # ML Engine training job\n job_id = \"taxifare_{}_{}\".format(model.replace(\".\",\"_\"), datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n train_files = DATA_DIR + \"{}/train-*.csv\".format(model.replace(\".\",\"_\"))\n eval_files = DATA_DIR + \"{}/eval-*.csv\".format(model.replace(\".\",\"_\"))\n output_dir = BUCKET + \"/taxifare/trained_model/{}\".format(model.replace(\".\",\"_\"))\n job_dir = JOB_DIR + \"/\" + job_id\n training_args = [\n \"--job-dir\", job_dir,\n \"--train_data_paths\", train_files,\n \"--eval_data_paths\", eval_files,\n \"--output_dir\", output_dir,\n \"--train_steps\", str(500),\n \"--train_batch_size\", str(32),\n \"--eval_steps\", str(500),\n \"--eval_batch_size\", str(32),\n \"--nbuckets\", str(8),\n \"--hidden_units\", \"128,32,4\"\n ]\n\n # Reference: https://airflow.apache.org/integration.html#cloud-ml-engine\n ml_engine_training_op = MLEngineTrainingOperator(\n task_id=\"ml_engine_training_{}_task\".format(model.replace(\".\",\"_\")),\n project_id=PROJECT_ID,\n job_id=job_id,\n package_uris=[PACKAGE_URI],\n training_python_module=\"trainer.task\",\n training_args=training_args,\n region=REGION,\n scale_tier=\"BASIC\",\n runtime_version=\"1.13\", \n python_version=\"3.5\",\n dag=dag\n )\n\n bash_remove_old_saved_model_op = BashOperator(\n task_id=\"bash_remove_old_saved_model_{}_task\".format(model.replace(\".\",\"_\")),\n bash_command=\"if gsutil ls {0} 2> /dev/null; then gsutil -m rm -rf {0}/*; else true; fi\".format(MODEL_LOCATION + model.replace(\".\",\"_\")),\n dag=dag\n )\n\n bash_copy_new_saved_model_op = BashOperator(\n task_id=\"bash_copy_new_saved_model_{}_task\".format(model.replace(\".\",\"_\")),\n bash_command=\"gsutil -m rsync -d -r `gsutil ls {0}/export/exporter/ | tail -1` {1}\".format(output_dir, MODEL_LOCATION + model.replace(\".\",\"_\")),\n dag=dag\n )\n \n # Build dependency graph, set_upstream dependencies for all tasks\n bash_remove_old_saved_model_op.set_upstream(ml_engine_training_op)\n bash_copy_new_saved_model_op.set_upstream(bash_remove_old_saved_model_op)\n \n return (ml_engine_training_op,\n bash_copy_new_saved_model_op)\n","sub_path":"courses/machine_learning/asl/open_project/cloud_composer_automated_ml_pipeline_taxifare/airflow/dags/module/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"485756558","text":"from typing import List\n\nfrom .signature import Signature\nfrom .package import Package\nfrom pathlib import Path\nfrom conans.client.conan_api import Conan\n\n\nclass Runner:\n def __init__(self, root_path, signature=Signature()):\n self.conanfactory, _, _ = Conan.factory()\n self.packages = self._get_all_packages(root_path, signature)\n\n def create_all(self, configurations):\n for config in configurations:\n print(\n \"#######################################\\n\"\n \"########### create packages ###########\\n\"\n f\"# host profile: {config.host_profile}\\n\"\n f\"# build profile: {config.build_profile}\\n\"\n f\"# host settings: {config.host_settings}\\n\"\n f\"# build settings: {config.build_settings}\\n\"\n f\"# build : {config.build}\\n\"\n f\"# includes: {config.includes}\\n\"\n f\"# excludes: {config.excludes}\\n\"\n \"#######################################\\n\"\n )\n for package in self.packages:\n if package.is_withing_scope(config):\n package.create(config)\n\n # relative_path = path.absolute()\n # eprint(package.pattern)\n\n # package_signature = get_package_signature()\n # package_pattern=f'{package_signature.name}/{package_signature.version}@{package_signature.user}/{package_signature.channel}'\n # conan_command_line.create(package.path,test_build_folder=f'/tmp/{package.pattern}/tbf')\n # TODO:profiles_names =HOST, profiles_build=build\n # conan_command_line.authenticate()\n # conan_command_line.remote_add()\n # conan_command_line.upload(package_pattern)\n # print(f'SUCCESS: {package_pattern}')\n def add_all_remotes(self, remotes, username=None, password=None):\n print(\n \"#######################################\\n\"\n \"########### add remote ################\\n\"\n \"#######################################\\n\"\n )\n if remotes:\n for remote in remotes:\n self.conanfactory.remote_add(\n remote_name=remote.name,\n url=remote.url,\n verify_ssl=remote.verify_ssl,\n insert=remote.priority,\n force=remote.force,\n )\n if remote.login:\n if not username or not password:\n raise Warning(f\"Can't login to {remote.name} no username or password provided!\")\n else:\n self.conanfactory.authenticate(name=username, password=password, remote_name=remote.name)\n else:\n raise Warning(\"No Remotes defined. Nothing to add!\")\n\n def _get_all_packages(self, root_path, signature=Signature()) -> List[Package]:\n conan_packages = []\n for path in Path(root_path).rglob(\"conanfile.py\"):\n path_string = str(path.absolute())\n if \"test_package\" not in path_string:\n conan_packages.append(Package(self.conanfactory, signature, path_string))\n return conan_packages\n\n def export_all(self):\n for package in self.packages:\n package.export()\n\n def get_all_sources(self):\n print(\n \"#######################################\\n\"\n \"########### download sources ##########\\n\"\n \"#######################################\\n\"\n )\n for package in self.packages:\n package.source()\n\n def remove_all_sources(self):\n print(\n \"#######################################\\n\"\n \"########### remove sources ############\\n\"\n \"#######################################\\n\"\n )\n for package in self.packages:\n package.source_remove()\n\n def upload_all_packages(self, remote):\n print(\n \"#######################################\\n\"\n \"########### upload packages ###########\\n\"\n \"#######################################\\n\"\n )\n for package in self.packages:\n package.upload_package(remote)\n","sub_path":"src/conanbuilder/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"281581686","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n\nimport argparse\nimport yaml\n\nfrom graphviz import Digraph\n\nimport gi\ngi.require_version('Gtk','3.0')\nfrom gi.repository import Gtk\n\nimport xdot\n\n\nclass MyDotwindow(xdot.DotWindow):\n\n def __init__(self):\n xdot.DotWindow.__init__(self)\n self.dotwidget.connect('clicked', self.on_url_clicked)\n\n def on_url_clicked(self, widget, url, event):\n dialog = Gtk.MessageDialog(\n parent=self,\n buttons=Gtk.ButtonsType.OK,\n message_format='{} clicked'.format(url))\n dialog.connect('response', lambda dialog, response: dialog.destroy())\n dialog.run()\n return True\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('filename')\n parser.add_argument('--output')\n args = parser.parse_args()\n\n filename = args.filename\n\n with open(filename, 'r') as f:\n map_data = yaml.load(f, Loader=yaml.FullLoader)\n\n edges = map_data['edges']\n\n list_node = []\n list_behavior_type = []\n list_color_for_bt = []\n\n for edge in edges:\n\n if edge['from'] not in list_node:\n list_node.append(edge['from'])\n\n if edge['to'] not in list_node:\n list_node.append(edge['to'])\n\n if edge['behavior_type'] not in list_behavior_type:\n list_behavior_type.append(edge['behavior_type'])\n\n for index, behavior_type in enumerate(list_behavior_type):\n list_color_for_bt.append('{} 1.0 1.0'.format(1.0 * index / len(list_behavior_type)))\n\n dg = Digraph(format='svg')\n\n for node in list_node:\n dg.node(node)\n\n for edge in edges:\n dg.attr('edge', color=list_color_for_bt[list_behavior_type.index(edge['behavior_type'])])\n dg.edge(edge['from'], edge['to'], label=edge['behavior_type'].split('.')[2])\n\n\n if args.output:\n dg.render(args.output)\n else:\n window = MyDotwindow()\n window.set_dotcode(dg.source.encode('utf-8'))\n window.connect('delete-event', Gtk.main_quit)\n Gtk.main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"jsk_spot_robot/jsk_spot_behaviors/spot_behavior_graph/scripts/visualize_map.py","file_name":"visualize_map.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"122037040","text":"# --------------\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#code starts here\n\ndf = pd.read_csv(path)\ntotal = df.shape[0]\npa_num = df[df['fico'] > 700].shape[0]\np_a = pa_num / total\nprint(p_a)\n\npb_num = df[df['purpose'] == 'debt_consolidation'].shape[0]\ndf1= df[df['purpose'] == 'debt_consolidation']\np_b = pb_num / total\nprint(p_b)\n\npab=(df[(df['purpose']=='debt_consolidation') & (df['fico']>700)]).shape[0]\np_a_b=pab/total\nprint(p_a_b)\n\nresult=p_a_b==p_a\nprint(result)\n\n# code ends here\n\n\n# --------------\n# code starts here\n\ntotal = df.shape[0]\n\nproblp_num = df[df['paid.back.loan'] == 'Yes'].shape[0]\nprob_lp = problp_num / total\nprint(prob_lp)\n\nprobcs_num = df[df['credit.policy'] == 'Yes'].shape[0]\nprob_cs = probcs_num / total\nprint(prob_cs)\n\nnew_df = df[df['paid.back.loan'] == 'Yes']\n\nprobpdcs_num = (df[(df['paid.back.loan'] == 'Yes') & (df['credit.policy'] == 'Yes')]).shape[0]\n#prob_pd_cs = probpdcs_num / total\n#print(prob_pd_cs)\nprob_pd_cs = 0.8323182100683655\nbayes = (prob_pd_cs * prob_lp) / prob_cs\nprint(bayes)\n# code ends here\n\n\n# --------------\n# code starts here\nplt.bar(df['purpose'], 0.5)\ndf1 = df[df['paid.back.loan'] == 'No']\nplt.bar(df1['purpose'], 0.5)\n# code ends here\n\n\n# --------------\n# code starts here\ninst_median = df['installment'].median()\ninst_mean = df['installment'].mean()\nplt.hist(df['installment'])\nplt.show()\nplt.hist(df['log.annual.inc'])\nplt.show()\n# code ends here\n\n\n","sub_path":"Probability-of-Loan-Defaulters/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"298373086","text":"#!/usr/bin/python\n#\n# use webkit & GtkOffscreenWindows to create screenshots of a page at\n# different resolutions\n#\n# (c) 2013 Michael Vogt \n#\n\nimport sys\n\nfrom gi.repository import (\n Gtk,\n WebKit,\n)\nfrom optparse import OptionParser\n\n# add security by default (see bugzilla #666280 and #666276)\n# enable certificates validation in webkit views unless specified otherwise\nsession = WebKit.get_default_session()\nsession.set_property(\"ssl-use-system-ca-file\", True)\n\n\n\nclass ScreenshotBrowser(Gtk.OffscreenWindow):\n\n def __init__(self, width, height, user_agent, outputname, outputformat):\n super(ScreenshotBrowser, self).__init__()\n self.init_ui(width, height)\n self.init_output_settings(outputname, outputformat)\n self.init_widgets()\n self.init_settings(width, height, user_agent)\n self.init_signals()\n\n def init_output_settings(self,outputname,outputformat):\n self.outputname = outputname + \".\" + outputformat\n self.outputformat = outputformat\n\n def init_ui(self, width, height):\n self.set_default_size(width, height)\n \n def init_widgets(self):\n box = Gtk.VBox()\n box.show()\n self.add(box)\n # webkit\n self.webview = WebKit.WebView()\n self.webview.show()\n #scroll = Gtk.ScrolledWindow()\n #scroll.add(self.webview)\n scroll.show()\n scroll.get_vscrollbar().set_child_visible(False) \t\t\n scroll.get_hscrollbar().set_child_visible(False) \t\t\n box.pack_start(scroll, True, True, 0)\n self.show_all()\n\n def make_screenshot(self, url):\n self.webview.load_uri(uri)\n\n def init_signals(self):\n self.connect(\"destroy\", Gtk.main_quit)\n self.webview.connect(\"load-progress-changed\", self._on_load_changed)\n\n def init_settings(self, width, height, user_agent):\n settings = self.webview.get_settings()\n settings.set_property(\"enable-plugins\", False)\n settings.set_property(\"enable-java-applet\", False)\n settings.set_property(\"user-agent\", user_agent)\n #attributes = self.webview.get_viewport_attributes()\n #attributes.set_property(\"device-height\", width)\n #attributes.set_property(\"device-width\", height)\n #attributes.set_property(\"available-height\", width)\n #attributes.set_property(\"available-width\", height)\n\n def _on_load_changed(self, view, percent):\n if percent == 100:\n pixbuf = self.get_pixbuf()\n pixbuf.savev(self.outputname, self.outputformat, [], [])\n Gtk.main_quit()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n uri = sys.argv[1]\n else:\n uri = \"http://www.uni-trier.de/\"\n\n usage = \"usage: %prog [options] url\"\n parser = OptionParser(usage=usage)\n parser = OptionParser()\n parser.add_option('-W', '--width', action='store', type='int', help='Resolution width(default %default)', default=\"1024\")\n parser.add_option('-H', '--height', action='store', type='int', help='Resolution height(default %default)', default=\"768\")\n parser.add_option('-d', '--device', action='store', type='string', help='String of the user-agent (default %default)', default=\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.76 Safari/537.36\")\n parser.add_option('-o', '--output', action='store', type='string', help='Output-Name without Extension(default %default)', default=\"output\")\n parser.add_option('-f', '--format', action='store', type='string', help='Output-Format(default %default)', default=\"png\")\n (options, args) = parser.parse_args()\n\t\n browser = ScreenshotBrowser(options.width, options.height, options.device,options.output, options.format)\n browser.make_screenshot(uri)\n browser.show()\n Gtk.main()\n","sub_path":"screenshot-browser.py","file_name":"screenshot-browser.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"352484978","text":"from src.MainWindow import *\nfrom src.Client import *\nfrom tkinter import *\n\n\nclass Menu:\n def __init__(self):\n self.__ip = \"\"\n self.__port = \"\"\n self.__root = Tk()\n self.__button = Button(self.__root, text='New Game', command=self.newGame)\n self.__button2 = Button(self.__root, text='Set IP and channel', command=self.setIP)\n self.__button3 = Button(self.__root, text='Exit', command=self.exit)\n self.__button.pack(pady=20, padx=20)\n self.__button2.pack(pady=30, padx=20)\n self.__button3.pack(pady=40, padx=20)\n self.__root.mainloop()\n\n def newGame(self): # function\n connectData = []\n connectData.append(self.__ip)\n connectData.append(self.__port)\n self.__root.destroy()\n c = Client()\n c.connect(connectData[0],int(connectData[1]))\n #c.connect('127.0.0.1', 56000) #debug\n c.send(0, None)\n uid = c.receive()['id']\n mainWindow = MainWindow(c, uid)\n mainWindow.mainLoop()\n c.disconnect()\n\n def showEntryFields(self, root, e2, e3):\n self.__ip = e2.get()\n self.__port = e3.get()\n root.destroy()\n\n def setIP(self):\n root = Tk()\n e = Entry(root)\n e.grid(row=0, column=1)\n # e.pack()\n\n e2 = Entry(root)\n e2.grid(row=1, column=1)\n # e2.pack()\n ipAddrLabel = Label(root, text=\"IP address\")\n ipAddrLabel.grid(row=0)\n\n portLabel = Label(root, text=\"Port\")\n portLabel.grid(row=1)\n\n button2 = Button(root, text='Quit', command=root.quit)\n button3 = Button(root, text='Accept', command=lambda: self.showEntryFields(root, e, e2))\n\n button2.grid(row=3, column=0)\n button3.grid(row=3, column=1)\n root.mainloop()\n\n def exit(self):\n sys.exit();\n\n\nif __name__ == '__main__':\n menu = Menu()\n","sub_path":"src/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"389124415","text":"##! python3\n\nfrom credential import reddit\nimport praw\nfrom praw.models import MoreComments\nimport pandas as pd\nimport datetime as dt\n\nprint(reddit.user.me()) # confirmation\n\nsubreddit = reddit.subreddit('leagueoflegends')\n# print(subreddit.display_name)\nprint(\"scrapped Reddit\")\n\nhot_subreddit = subreddit.top(limit=1)\n\ntopics_dict = {\n \"likes\": [], \\\n \"popular\": []}\n\nfor submission in hot_subreddit:\n top_level_comments = list(submission.comments)\n for top_level_comment in top_level_comments:\n if isinstance(top_level_comment, MoreComments):\n continue\n topics_dict[\"popular\"].append(top_level_comment.body)\n topics_dict[\"likes\"].append(top_level_comment.score)\n # all_comments = submission.comments.list()\n # for comment in all_comments:\n # topics_dict[\"all\"].append(comment.body)\n\ntopics_data = pd.DataFrame(data=topics_dict)\n\n# def get_date(created):\n# return dt.datetime.fromtimestamp(created)\n#\n# timestamp = topics_data[\"created\"].apply(get_date)\n# topics_data = topics_data.assign(timestamp = timestamp)\n\ntopics_data.to_csv('data.csv', index=False)\n","sub_path":"scrapReddit.py","file_name":"scrapReddit.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"252390794","text":"class Solution:\n def rob(self, nums: List[int]) -> int:\n \"\"\"\n dynamic programming\n state transform function:\n rob(n) = max(rob(n-1), rob(n-2) + money(n))\n \"\"\"\n if len(nums) == 0:\n return 0\n elif len(nums) == 1:\n return nums[0]\n elif len(nums) <= 3:\n return max(nums)\n \n rob0 = nums[0]; rob1 = max(nums[0:2])\n i = 2 \n while i < len(nums) - 1:\n rob_max0 = max(rob0+nums[i], rob1)\n rob0 = rob1; rob1 = rob_max0\n i += 1\n \n nums = nums[-1::-1] \n rob0 = nums[0]; rob1 = max(nums[0:2])\n i = 2 \n while i < len(nums) - 1:\n rob_max1 = max(rob0+nums[i], rob1)\n rob0 = rob1; rob1 = rob_max1\n i += 1\n\n return rob_max0 if rob_max0 > rob_max1 else rob_max1\n","sub_path":"Python/213. House_Robber_II.py","file_name":"213. House_Robber_II.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"46821156","text":"import re\n\nprint (\"Calculadora\")\nprint (\"Digite 'sair' para encerrar\\n\")\n\nprevious = 0\nrun = True\n\ndef performMath():\n global run\n global previous\n equation = input(\"Enter equation:\")\n if equation == \"sair\":\n run = False\n\n else:\n equation = re.sub('[a-zA-Z,.:()]','', equation)\n previous = eval(equation)\n\n print(\"Você digitou\", previous)\n\n \nwhile run :\n performMath()\n","sub_path":"Python/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"399479346","text":"#-*- encoding: UTF-8 -*-\n#---------------------------------import------------------------------------\nimport scrapy\nimport json\nfrom tutorial.items import TeleplayItem\nfrom scrapy import Request\nfrom scrapy.selector import Selector\nimport sys\n#---------------------------------------------------------------------------\nclass DpSpider(scrapy.Spider):\n reload(sys)\n #sys.setdefaultencoding('utf-8')\n #handle_httpstatus_list = [403]\n name = \"letv\"\n allowed_domains = [\"letv.com\"]\n\n start_urls = [\n \"http://list.letv.com/listn/c2_t-1_a-1_y-1_s1_md_o20_d1_p6.html\"\n ]\n\n\n def parse(self, response):\n\n '获取所有分类页'\n req = []\n for i in range(34):\n url='http://list.letv.com/apin/chandata.json?c=2&d=1&md=&o=20&p='+str(i+1)+'&s=1'\n r = Request(url, callback=self.parse_list)\n req.append(r)\n return req\n\n\n def parse_list(self, response):\n req = []\n sel = Selector(response)\n '电影列表'\n s=json.loads(response.body)\n movie_list = s['album_list']\n for movie in movie_list:\n item =TeleplayItem()\n item['name'] = movie['name']\n item['episode'] = movie['episodes']\n item['area'] = movie['areaName']\n item['language'] = movie['language']\n item['directer']=movie['language']\n item['type']=movie['subCategoryName']\n item['playCnts']=movie['playCount']\n item['id'] = movie['aid']\n url='http://www.letv.com/tv/'+str(movie['aid'])+'.html'\n r = Request(url, callback=self.parse_detail)\n r.meta['item'] = item\n req.append(r)\n return req\n\n\n def parse_detail(self,response):\n\n '电影详情'\n sel = Selector(response)\n #items = []\n item = TeleplayItem()\n itemtmp=response.meta['item']\n directer_list=[]\n if sel.xpath('//*[@data-statectn=\"n_textInfo\"]/p[1]/a/text()').extract():\n directer_list=sel.xpath('//*[@data-statectn=\"n_textInfo\"]/p[1]/a/text()').extract()\n directer=''\n for directertmp in directer_list:\n directer=directer+'/'+directertmp.strip()\n\n actor_list=[]\n if sel.xpath('//*[@data-statectn=\"n_textInfo\"]/p[2]/a/text()').extract():\n actor_list=sel.xpath('//*[@data-statectn=\"n_textInfo\"]/p[2]/a/text()').extract()\n actor=''\n for actortmp in actor_list:\n actor=actor+'/'+actortmp.strip()\n\n year=sel.xpath('//*[@data-statectn=\"n_textInfo\"]/p[4]/a[1]/text()').extract()[0]\n\n\n item['name'] = itemtmp['name']\n item['episode'] = itemtmp['episode']\n item['area'] = itemtmp['area']\n item['language'] = itemtmp['language']\n item['directer']=directer[1:].strip()\n item['type']=itemtmp['type']\n item['actor']=actor[1:].strip()\n item['playCnts']=itemtmp['playCnts']\n item['id'] = itemtmp['id']\n item['year']=year\n return item\n\n\n\n\n","sub_path":"tutorial/spiders/letv_spider.py","file_name":"letv_spider.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"45618530","text":"import pandas\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport random\nimport pickle\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\n\n# assume y_real is already not one hot\ndef acc_tophit(yprob, list_class, y_real, topk=5):\n indexes=np.argsort(yprob,axis=1)[:,-topk:]\n acc=0\n for s in range(yprob.shape[0]):\n for i in list(indexes[s]):\n if list_class[i] == y_real[s]:\n acc+=1\n break\n return acc/y_real.shape[0]\n\n# single label, y real non one hot\ndef mrr(yprob, list_class, y_real):\n if len(y_real.shape)>1:\n return mrr2(yprob, list_class, y_real)\n indexes = np.argsort(yprob*-1, axis=1)\n r=0\n for s in range(yprob.shape[0]):\n ci=y_real[s]\n for ii,i in enumerate(list(indexes[s])):\n if list_class[i]==ci:\n r+=1/(ii+1)\n break\n return r/y_real.shape[0]\n\n# multi label, y real one hot multi\ndef mrr2(yprob, list_class, y_real):\n indexes = np.argsort(yprob*-1, axis=1)\n r=0\n for s in range(yprob.shape[0]):\n labels = np.where(np.asarray(y_real[s], dtype=np.int32) > 0)[0].tolist()\n r_t = 0\n # print(labels)\n for ci in labels:\n for ii,i in enumerate(list(indexes[s])):\n if list_class[i]==ci:\n r_t+=1/(ii+1)\n break\n r+=r_t/len(labels)\n return r/y_real.shape[0]\n\n\n# assume y_real is already one hot multilabel\ndef precision_at_k(yprob, list_class, y_real, topk=1):\n indexes = np.argsort(yprob*-1, axis=1)[:, :topk]\n all_acc = 0\n\n for s in range(yprob.shape[0]):\n acc = 0\n c = 0\n labels = np.where(np.asarray(y_real[s], dtype=np.int32) > 0)[0].tolist()\n # print(labels)\n for i in list(indexes[s]):\n if c>=len(labels):\n break\n c += 1\n if list_class[i] in labels:\n # print('{} vs {}'.format(list_class[i], labels))\n acc += 1\n\n all_acc+=acc/c\n return all_acc / yprob.shape[0]\n\ndef erm_bow2(dig_proc_list, max_proc =5, store_path='', task_encode=False):\n all_dig = []\n all_proc = []\n patient_adms = {}\n pid = 0\n aid = 0\n for dp in dig_proc_list:\n patient_adms[pid] = []\n for ad in dp:\n patient_adms[pid].append(aid)\n aid += 1\n all_dig.append(' '.join(ad[0]).replace(',', ''))\n all_proc.append(' '.join(ad[1]).replace(',', ''))\n pid += 1\n\n print('examples of raw diags: {}'.format(all_dig[:5]))\n print('examples of raw procs: {}'.format(all_proc[:5]))\n tf_vectorizer = CountVectorizer(max_df=1.0, min_df=1,token_pattern=u'(?u)\\\\b\\\\w+\\\\b',\n max_features=None)\n\n dig_tf = tf_vectorizer.fit_transform(all_dig).toarray()\n digmap = tf_vectorizer.get_feature_names()\n print('diags mapping token to id examples: {}'.format(tf_vectorizer.get_feature_names()[:5]))\n print('example of vector diags:\\n {}'.format(dig_tf[:5]))\n print('full shape diags: {}'.format(dig_tf.shape))\n tf_vectorizer = CountVectorizer(max_df=1.0, min_df=1, token_pattern=u'(?u)\\\\b\\\\w+\\\\b',\n max_features=None)\n proc_tf = tf_vectorizer.fit_transform(all_proc).toarray()\n print('procs mapping token to id examples: {}'.format(tf_vectorizer.get_feature_names()[:5]))\n procmap = tf_vectorizer.get_feature_names()\n print('example of vector procs:\\n {}'.format(proc_tf[:5]))\n print('full shape procs: {}'.format(proc_tf.shape))\n dig_records = []\n pro_records = []\n for pid, v in patient_adms.items():\n dig_records_tmp = []\n d = np.zeros(dig_tf.shape[1], dtype=np.float32)\n for i, aid in enumerate(v):\n temp=dig_tf[aid].astype(np.float32)\n for j, vv in enumerate(temp):\n d[j]+=vv\n dig_records_tmp.append(d.copy())\n for i, aid in enumerate(v):\n temp = proc_tf[aid].astype(np.float32)\n if task_encode:\n for j, vv in enumerate(temp):\n t=0\n if vv>0:\n task = np.zeros(max_proc, dtype=np.float32)\n task[t]=1\n digfull=np.concatenate([dig_records_tmp[i], task])\n dig_records.append(digfull)\n t+=1\n p = np.zeros(proc_tf.shape[1], dtype=np.float32)\n p[j]=1\n pro_records.append(p)\n else:\n dig_records.append(dig_records_tmp[i])\n p = np.zeros(proc_tf.shape[1], dtype=np.float32)\n t = 0\n for j, vv in enumerate(temp):\n if vv>0:\n p[j]=1\n t+=1\n if t==0:\n print(temp)\n print(np.min(temp))\n print(np.max(temp))\n print(all_proc[aid])\n raise\n\n pro_records.append(p)\n\n label_count={}\n\n for y in pro_records:\n k = tuple(y)\n if k not in label_count:\n label_count[k]=0\n label_count[k]+=1\n\n dig_records2=[]\n pro_records2=[]\n num_discard = 0\n for x,y in zip(dig_records, pro_records):\n k = tuple(y)\n if 1 < label_count[k] < 100:\n dig_records2.append(x)\n pro_records2.append(y)\n else:\n num_discard+=1\n print('num discard {}'.format(num_discard))\n\n dig_records = np.asarray(dig_records)\n pro_records = np.asarray(pro_records)\n print('example of vector digs:\\n {}'.format(dig_records[:5]))\n print('example of vector procs:\\n {}'.format(pro_records[:5]))\n\n print('final input dig shape: {}'.format(dig_records.shape))\n print('final output drug_dnc_decode shape: {}'.format(pro_records.shape))\n\n\n\n # svd = TruncatedSVD(n_components=200, n_iter=35, random_state=12)\n # dig_records = svd.fit_transform(dig_records[:,:-max_proc])\n # # dig_records = np.concatenate([dig_records2,dig_records[:,-max_proc:]],axis=1)\n # print('final input dig shape: {}'.format(dig_records.shape))\n # print('write final input output...')\n pickle.dump(digmap, open(store_path + '/dig_map.pkl', 'wb'))\n pickle.dump(procmap, open(store_path + '/proc_map.pkl', 'wb'))\n pickle.dump(dig_records, open(store_path + '/dig_input.pkl', 'wb'))\n pickle.dump(pro_records, open(store_path + '/proc_output.pkl', 'wb'))\n\ndef erm_bow(dig_proc_list, max_adm=10, store_path=''):\n all_dig=[]\n all_proc=[]\n patient_adms={}\n pid=0\n aid=0\n for dp in dig_proc_list:\n patient_adms[pid]=[]\n for ad in dp:\n patient_adms[pid].append(aid)\n aid+=1\n all_dig.append(' '.join(ad[0]).replace(',',''))\n all_proc.append(' '.join(ad[1]).replace(',',''))\n pid+=1\n\n print('examples of raw diags: {}'.format(all_dig[:5]))\n print('examples of raw procs: {}'.format(all_proc[:5]))\n tf_vectorizer = CountVectorizer(max_df=1.0, min_df=1,\n max_features=None)\n\n dig_tf=tf_vectorizer.fit_transform(all_dig).toarray()\n digmap=tf_vectorizer.get_feature_names()\n print('diags mapping token to id examples: {}'.format(tf_vectorizer.get_feature_names()[:5]))\n print('example of vector diags:\\n {}'.format(dig_tf[:5]))\n print('full shape diags: {}'.format(dig_tf.shape))\n tf_vectorizer = CountVectorizer(max_df=1.0, min_df=1,\n max_features=None)\n proc_tf=tf_vectorizer.fit_transform(all_proc).toarray()\n print('procs mapping token to id examples: {}'.format(tf_vectorizer.get_feature_names()[:5]))\n procmap=tf_vectorizer.get_feature_names()\n print('example of vector procs:\\n {}'.format(proc_tf[:5]))\n print('full shape procs: {}'.format(proc_tf.shape))\n dig_records=[]\n pro_records=[]\n for pid, v in patient_adms.items():\n d = np.zeros((max_adm, dig_tf.shape[1]),dtype=np.float32)\n for i,aid in enumerate(v):\n d[i]=dig_tf[aid].astype(np.float32)\n dig_records.append(d)\n p = np.zeros((max_adm, proc_tf.shape[1]), dtype=np.float32)\n for i, aid in enumerate(v):\n p[i] = proc_tf[aid].astype(np.float32)\n pro_records.append(p)\n\n dig_records=np.asarray(dig_records)\n pro_records=np.asarray(pro_records)\n\n print('final input dig shape: {}'.format(dig_records.shape))\n print('final output drug_dnc_decode shape: {}'.format(pro_records.shape))\n\n print('write final input output...')\n pickle.dump(digmap, open(store_path + '/dig_map.pkl', 'wb'))\n pickle.dump(procmap, open(store_path + '/proc_map.pkl', 'wb'))\n pickle.dump(dig_records, open(store_path+'/dig_input.pkl', 'wb'))\n pickle.dump(pro_records, open(store_path+'/proc_output.pkl', 'wb'))\n\n\ndef load_all_raw_data(fpath='./data/mimic/mimic-iii.tsv', out_folder='big',\n adm_range=[2,10], dig_range=[1,40], proc_range=[1,40]):\n data_path = os.path.dirname(os.path.abspath(fpath))\n out_path = data_path+'/'+out_folder\n if not os.path.isdir(out_path):\n os.mkdir(out_path)\n df = pandas.DataFrame.from_csv(fpath, sep='\\t')\n patients = {}\n count = 0\n for index, row in df.iterrows():\n # print('{} vs {}'.format(row['DIAG'], row['PROC']))\n diag=row['DIAG'][1:-2].split()\n proc=row['PROC'][1:-2].split()\n sub_id=row.name\n if sub_id not in patients:\n patients[sub_id]={\"num_adm\":0,\"ldiags\":[],\"lprocs\":[],\"data\":[]}\n patients[sub_id]['data'].append((diag, proc))\n patients[sub_id]['ldiags'].append(len(diag))\n patients[sub_id]['lprocs'].append(len(proc))\n patients[sub_id]['num_adm']+=1\n count+=1\n\n print('num patients: {} vs total records: {}'.format(len(patients), count))\n all_ld=[]\n all_lp=[]\n all_adm=[]\n chosen_patients=[]\n for p, v in patients.items():\n if adm_range[0] <= v['num_adm'] <= adm_range[1] and \\\n min(v['ldiags']) >= dig_range[0] and max(v['ldiags']) <= dig_range[1] and \\\n min(v['lprocs']) >=proc_range[0] and max(v['lprocs']) <= proc_range[1]:\n all_ld.extend(v['ldiags'])\n all_lp.extend(v['lprocs'])\n all_adm.append(v['num_adm'])\n chosen_patients.append(v['data'])\n\n print('num filtered patients: {} vs total: {}'.format(len(chosen_patients), len(patients)))\n\n print('avg all_ld: {} vs max all_ld: {}'.format(sum(all_ld)/len(all_ld), max(all_ld)))\n # plt.hist(all_ld, normed=False, bins=100)\n # plt.show()\n print('avg all_lp: {} vs max all_lp: {}'.format(sum(all_lp) / len(all_lp), max(all_lp)))\n # plt.hist(all_lp, normed=False, bins=100)\n # plt.show()\n print('avg all_adm: {} vs max all_adm: {}'.format(sum(all_adm) / len(all_adm), max(all_adm)))\n # plt.hist(all_adm, normed=False, bins=100)\n # plt.show()\n print('write raw data...')\n # random.shuffle(chosen_patients)\n pickle.dump(chosen_patients,open(out_path+'/dig_proc_raw.pkl','wb'))\n # erm_bow(chosen_patients, adm_range[1], store_path=out_path)\n # random.shuffle(chosen_patients)\n erm_bow2(chosen_patients, max_proc=proc_range[1], store_path=out_path)\n\n\nif __name__ == '__main__':\n load_all_raw_data(out_folder='big',adm_range=[1, 2], dig_range=[1, 2], proc_range=[1, 1])","sub_path":"rare-mann/mimic_prepare.py","file_name":"mimic_prepare.py","file_ext":"py","file_size_in_byte":11521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"328170463","text":"# ===========================================================\n# IMPORT LIBRARIES\n# ===========================================================\n\nfrom vosk import Model, KaldiRecognizer, SetLogLevel\nimport datetime\nimport os\nimport pyaudio\nimport pyttsx3\nimport json\nfrom time import sleep\nimport random as rdm\n\n# ===========================================================\n# IMPORT THE CORE LIBRARY\n# ===========================================================\n\nfrom core import SystemInfo, NameInfo\n\n# ===========================================================\n# IMPORT COLORS LIBRARY\n# ===========================================================\n\nimport colors as cl\n\n# ===========================================================\n# IMPORT THE QUESTIONS AND ANSWERS LISTS LIBRARY\n# ===========================================================\n\nimport lists.list as aList\n\n# ===========================================================\n# DEFINE TENSORFLOW LOG LEVEL\n# ===========================================================\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n# ===========================================================\n# OUTPUT FUNCTION CONFIGURATION\n# ===========================================================\n\neng = pyttsx3.init()\n\n\ndef speak(text):\n eng.say(text)\n eng.runAndWait()\n\n# ===========================================================\n# INPUT FUNCTION CONFIGURATION\n# ===========================================================\n\n\nSetLogLevel(-1)\n\nmodel = Model('model')\nrec = KaldiRecognizer(model, 16000)\n\np = pyaudio.PyAudio()\nstream = p.open(format=pyaudio.paInt16, channels=1, rate=16000,\n input=True, frames_per_buffer=8000)\nstream.start_stream()\n\n\ndef listen():\n data = stream.read(16000)\n if len(data) == 0:\n return ''\n if rec.AcceptWaveform(data):\n # Result is a str\n res = rec.Result()\n # Convert result to dict/json\n res = json.loads(res)\n # Taking only what user said\n text = res['text']\n\n return text\n\n# ===========================================================\n# SEARCH FUNCTION CONFIGURATION\n# ===========================================================\n\n\ndef search(string, list):\n if string == None:\n string = 'aqwrterhs'\n ans = False\n # if any(string in s for s in list):\n for s in list:\n if s in string:\n ans = True\n return s\n\n # return ''\n\n# ===========================================================\n# WELCOME SCREEN #Part 1\n# ===========================================================\n\nprint(cl.fg_Green, '[LOG]', cl.reset, 'Getting all engines ready...', cl.reset)\nspeak('Getting all engines ready...')\n\n# ===========================================================\n# IMPORT NLU CLASSIFIER\n# ===========================================================\n\nfrom nlu.classifier import classify\nfrom nlu.model import inputs\n\n# ===========================================================\n# WELCOME SCREEN #Part 2\n# ===========================================================\n\nprint(cl.fg_Green, '[LOG]', cl.reset, cl.bold, 'Done!', cl.reset)\nsleep(0.3)\nprint(cl.fg_Green, '[LOG]', cl.reset, 'Starting up system...', cl.reset)\nspeak('Starting up system...')\nprint(cl.fg_Green, '[LOG]', cl.reset, cl.bold, 'Done!', cl.reset)\nsleep(0.3)\nprint(cl.fg_Green, '[LOG]', cl.reset, 'Setting up your preferences...', cl.reset)\nspeak('Setting up your preferences...')\nprint(cl.fg_Green, '[LOG]', cl.reset, cl.bold, 'Done!', cl.reset)\nsleep(0.3)\n\nprint(cl.bold, cl.fg_Yellow, \"\"\"\n ____ _ _ \n| _ \\ __ _ __ __(_) __| |\n| | | | / _` |\\ \\ / /| | / _` |\n| |_| || (_| | \\ V / | || (_| |\n|____/ \\__,_| \\_/ |_| \\__,_|\n\"\"\", cl.reset)\nsleep(0.3)\nspeak('I am ready now!')\nprint('Hello, I\\'m ' + cl.bold + cl.fg_Red + 'David' + cl.reset + ', your AI Virtual Assistant!')\nspeak('Hello! Ready to work?')\n\n# ===========================================================\n# CODE\n# ===========================================================\n\nwhile True:\n\n text = listen()\n\n if text == '':\n choice = rdm.choice(aList.errorL)\n print(choice)\n speak(choice)\n\n # break\n\n # ========================================================\n # AI FUNCTIONALITIES\n # ========================================================\n\n text = search(text, inputs)\n\n if text == None:\n entity = None\n else:\n entity = classify(text)\n\n # 1. Get Time\n\n if entity == 'time\\\\getTime':\n\n spkList = SystemInfo.get_time()\n\n spkHr = str(spkList[0])\n spkMin = str(spkList[1])\n\n if len(spkHr) == 2:\n strHr = spkHr\n else:\n strHr = '0' + spkHr\n\n if len(spkMin) == 2:\n strMin = spkMin\n else:\n strMin = '0' + spkMin\n\n if strHr == '00' and strMin == '00':\n spkHr = 'midnight'\n spkMin = ''\n elif strHr == '12' and strMin == '00':\n strHrL = ['midday', 'noon', '12']\n spkHr = rdm.choice(strHrL)\n if strHr == '12':\n spkMin = 'o\\'clock'\n else:\n spkMin = ''\n elif (strMin == '00' and strHr != '00') or (strMin == '00' and strHr != '12'):\n spkMin = 'o\\'clock'\n\n timeChoiceP = rdm.choice(aList.timePL)\n timeChoiceS = rdm.choice(aList.timeSL)\n\n prt = timeChoiceP + f'{strHr}:{strMin}.'\n if strMin == 'o\\'clock':\n spkStr = timeChoiceS + spkHr + ' ' + spkMin + '.'\n elif strMin == '':\n spkStr = timeChoiceS + spkHr + '.'\n else:\n spkStr = timeChoiceS + spkHr + ' ' + spkMin + '.'\n\n print(prt)\n speak(spkStr)\n\n elif entity == 'time\\\\getDate':\n dateL = SystemInfo.get_date()\n\n day = str(dateL[0])\n month = str(dateL[1])\n year = str(dateL[2])\n\n if len(day) == 2:\n strDay = day\n else:\n strDay = '0' + day\n \n if len(month) == 2:\n strMonth = month\n else:\n strMonth = '0' + month\n\n # Day to speak\n if len(day) == 1:\n if day == '1':\n day = '1st'\n elif day == '2':\n day = '2nd'\n elif day == '3':\n day = '3rd'\n else:\n day = day + 'th'\n elif len(day) == 2:\n if day[1] == '1' and day != '11':\n day = day + 'st'\n elif day[1] == '2' and day != '12':\n day = day +'nd'\n elif day[1] == '3' and day != '13':\n day = day + 'rd'\n else:\n day = day + 'th'\n\n # Month to speak\n month = aList.monthL[int(month)]\n\n dateChoiceS = rdm.choice(aList.dateSL)\n dateChoiceP = rdm.choice(aList.datePL)\n\n strSpk = dateChoiceS + 'the ' + day + ' of ' + month + ' of the year of ' + year + '.'\n strPrt = dateChoiceP + strDay + '/' + strMonth + '/' + year + '.'\n\n print(strPrt)\n speak(strSpk)\n\n elif entity == 'name\\\\readName':\n name = NameInfo.get_name()\n\n # print(name)\n\n if name == '':\n print('Your name is not yet in my system.')\n speak('Your name is not yet in my system. Use ' + cl.italic + 'set name' + cl.reset + ' or ' + cl.italic + 'set my name' + cl.reset + 'to save it.')\n\n else:\n choice = rdm.choice(aList.getNameL)\n\n print(choice + str(name).capitalize() + '.')\n\n speak(choice + str(name))\n\n elif entity == 'name\\\\setName':\n print('What is your name?')\n speak('What is your name?')\n name = listen()\n\n while name != '' and name != None:\n res = NameInfo.set_name(name)\n\n # print(name)\n\n print(res)\n speak(res)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"221039841","text":"from bottle import route, run, template, static_file, get, post, delete, request\nfrom sys import argv\nimport json\nimport pymysql\n\n#connect to database\nconnection=pymysql.connect(\n host='localhost',\n user='root',\n password='root',\n db='mystore',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor\n)\n\n@get(\"/admin\")\ndef admin_portal():\n return template(\"pages/admin.html\")\n\n@get(\"/\")\ndef index():\n return template(\"index.html\")\n\n@get('/js/')\ndef javascripts(filename):\n return static_file(filename, root='js')\n\n@get('/css/')\ndef stylesheets(filename):\n return static_file(filename, root='css')\n\n@get('/images/')\ndef images(filename):\n return static_file(filename, root='images')\n\n#admin functions\n@post('/category')\ndef add_category():\n cat_name = request.POST.get('name')\n if cat_name:\n cat_list = fetchCategories()\n for category in cat_list:\n if category['Name'] == cat_name:\n STATUS = \"ERROR\"\n MSG = \"200 - Category already exists\"\n insert_new_catogory(cat_name)\n else:\n STATUS = \"ERROR\"\n MSG = \"400 - Bad Request. Please enter catoegory name\"\n result = {\"STATUS\":STATUS, \"MSG\":MSG}\n return json.dumps(result)\ndef insert_new_category(category):\n try:\n with connection.cursor() as cursor:\n sql = \"INSERT INTO categories(Name) VALUES ('{}')\".format(category)\n cursor.execute(sql)\n connection.commit()\n STATUS = \"SUCCESS\"\n MSG = \"\"\n except Exception as e:\n STATUS = \"ERROR\"\n MSG = \"500 - Internal Error\"\n result = {\"STATUS\":STATUS, \"MSG\":MSG}\n return result\n\n@delete('/category/')\ndef delete_category(id):\n cat_list = fetchCategories()\n for category in cat_list:\n if category['Id'] == int(id):\n remove_category(id)\n break\n else:\n STATUS = \"ERROR\"\n MSG = \"404 - Category not found\"\n result = {\"STATUS\":STATUS, \"MSG\":MSG}\n return json.dumps(result)\ndef remove_category(id):\n try:\n with connection.cursor() as cursor:\n sql = \"DELETE FROM categories WHERE Id={}\".format(int(id))\n cursor.execute(sql)\n connection.commit()\n STATUS = \"SUCCESS\"\n MSG = \"\"\n except Exception as e:\n STATUS = \"ERROR\"\n MSG = \"500 - internal error\"\n result = {\"STATUS\":STATUS,\"MSG\":MSG}\n return result\n\n@get('/categories')\ndef fetchCategories():\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM categories\"\n cursor.execute(sql)\n CATEGORIES = cursor.fetchall()\n STATUS = \"SUCCESS\"\n MSG = \"\"\n except Exception as e:\n STATUS = \"ERROR\"\n MSG = \"500 - Internal Error\"\n result = {\"STATUS\":STATUS, \"CATEGORIES\":CATEGORIES,\"MSG\":MSG}\n return json.dumps(result)\n\n# @post('/product')\n# def edit_product():\n# title = request.POST.get('title')\n# desc = request.POST.get('desc')\n# price = request.POST.get('price')\n# img_url = request.POST.get('img_url')\n# category = request.POST.get('category')\n# favorite = request.POST.get('favorite')\n\n\n@get('/product/')\ndef get_product(id):\n prod_list = fetchProducts()\n for product in prod_list:\n if product['Id'] == int(id):\n PRODUCT = product\n STATUS = \"SUCCESS\"\n MSG = \"\"\n else:\n STATUS = \"ERROR\"\n MSG = \"404 - Product not found\"\n result = {\"STATUS\":STATUS, \"PRODUCT\":PRODUCT,\"MSG\":MSG}\n return json.dumps(result)\n\n@delete('/product/')\ndef delete_product(id):\n prod_list = fetchProducts()\n for product in prod_list:\n if product['Id'] == int(id):\n remove_product(id)\n break\n else:\n STATUS = \"ERROR\"\n MSG = \"404 - Product not found\"\n result = {\"STATUS\":STATUS, \"MSG\":MSG}\n return json.dumps(result)\ndef remove_product(id):\n try:\n with connection.cursor() as cursor:\n sql = \"DELETE FROM products WHERE Id={}\".format(int(id))\n cursor.execute(sql)\n connection.commit()\n STATUS = \"SUCCESS\"\n MSG = \"\"\n except Exception as e:\n STATUS = \"ERROR\"\n MSG = \"500 - internal error\"\n result = {\"STATUS\":STATUS,\"MSG\":MSG}\n return result\n\n@get('/products')\ndef fetchProducts():\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM products\"\n cursor.execute(sql)\n PRODUCTS = cursor.fetchall()\n STATUS = \"SUCCESS\"\n MSG = \"\"\n except Exception as e:\n STATUS = \"ERROR\"\n MSG = \"500 - Internal Error\"\n result = {\"STATUS\":STATUS, \"PRODUCTS\":PRODUCTS,\"MSG\":MSG}\n return json.dumps(result)\n\n@get('/category//products')\ndef fetch_products_by_category(id):\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM products WHERE Category_Id = {}\".format(int(id))\n cursor.execute(sql)\n PRODUCTS = cursor.fetchall()\n STATUS = \"SUCCESS\"\n MSG = \"\"\n except Exception as e:\n STATUS = \"ERROR\"\n MSG = \"500 - Internal Error\"\n result = {\"STATUS\":STATUS, \"PRODUCTS\":PRODUCTS,\"MSG\":MSG}\n return json.dumps(result)\n\ndef main():\n run(host='localhost', port=7000)\n\nif __name__==\"__main__\":\n main()","sub_path":"store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"42482084","text":"import setuptools\n\nwith open('README.md', 'r') as file:\n long_description = file.read()\n\nsetuptools.setup(\n name = 'preprocess_mikelakoju', #Remember this name must be unique globally\n version = '0.0.3',\n author_email = 'lakojum@yahoo.com',\n description = 'This is a preprocessing package for NLP',\n long_description = long_description,\n long_description_content_type = 'text/markdown',\n packages = setuptools.find_packages(),\n classifiers = [\n 'Programming Language :: Python :: 3',\n 'License :: OSI Aproved :: MIT License',\n 'Operating System :: OS :: Independent'\n ],\n python_requires = '>=3.5'\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"251064492","text":"#!usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDemo of the TCP server with multi-threading for concurrent connections.\n\"\"\"\n\n__author__ = 'Ziang Lu'\n\nimport concurrent.futures as cf\nimport socket\nimport socketserver\nimport time\nfrom typing import Tuple\n\n\ndef tcp_worker(sock_conn, addr: Tuple[str, int]) -> None:\n \"\"\"\n Thread function to handle TCP connection.\n :param sock_conn: socket\n :param addr: tuple(str, int)\n :return: None\n \"\"\"\n host, port = addr\n print(f'[SERVER] Connection accepted from {host}:{port}')\n sock_conn.sendall(b'Welcome!')\n # This while-loop is like an \"event loop\".\n while True:\n # By default, \"socket.recv()\" is blocking, so the event loop will block\n # here, waiting for some data to come in.\n data = sock_conn.recv(1024)\n time.sleep(1)\n if not data or data.decode('utf-8') == 'exit':\n break\n sock_conn.sendall(f\"Hello, {data.decode('utf-8')}\".encode('utf-8'))\n sock_conn.close()\n print(f'[SERVER] Connection from {host}:{port} CLOSED')\n\n\n##### METHOD 1: With \"socket\" module #####\n\n# Create an IPv4, TCP socket\nserver_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)\n# Bind the socket to server address \"127.0.0.1:9999\"\nserver_sock.bind(('127.0.0.1', 9999))\nprint('[SERVER] Server bound to 127.0.0.1:9999')\n\n# Let the server socket start listening for connections requests\nserver_sock.listen() # Becomes a server socket\nprint('[SERVER] Server listening for connection...')\n\n# => Use a thread pool to reuse the thread, and thus improve performance\nwith cf.ThreadPoolExecutor(max_workers=50) as pool:\n try:\n # This while-loop is like an \"event loop\".\n while True:\n # By default, \"socket.accept()\" is blocking, so the event loop will\n # block here, waiting for a connection request.\n sock_conn, addr = server_sock.accept() # Accepted a connection\n # We want to fire up a thread to handle the connection, so that the\n # server is not blocked away from other connections.\n # => Use a thread pool to reuse the thread, and thus improve\n # performance\n pool.submit(tcp_worker, sock_conn, addr)\n except KeyboardInterrupt:\n # Close the server socket\n server_sock.close()\n\n\n##### METHOD 2: With \"socketserver\" module #####\n\n\nclass MyTCPHandler(socketserver.BaseRequestHandler):\n \"\"\"\n TCP handler class for our TCP server.\n When a connection is accepted, a thread is fired up to handle it, an object\n of this class is instantiated, and the corresponding handle() method is\n called.\n \"\"\"\n\n def handle(self):\n sock_conn = self.request\n # Reuse the \"thread\" function defined above\n tcp_worker(sock_conn, self.client_address)\n\n\n# Create a multi-threaded version of TCP server, and bind the server socket to\n# server address \"127.0.0.1:9999\"\nserver = socketserver.ThreadingTCPServer(\n server_address=('127.0.0.1', 9999), RequestHandlerClass=MyTCPHandler\n)\n# With ThreadingTCPServer, each connection will have a thread fired up to handle\n# it, a RequestHandlerClass object is instantiated, and the corresponding\n# handle() method is called.\nprint('[SERVER] Server bound to 127.0.0.1:9999')\n\ntry:\n # Activate the server\n # Let the server start listening for connections requests\n print('[SERVER] Listening for connections...')\n server.serve_forever()\nexcept KeyboardInterrupt:\n # Close the server\n server.server_close()\n\n# Output:\n# [SERVER] Server bound to 127.0.0.1:9999\n# [SERVER] Server listening for connection...\n# [SERVER] Connection accepted from 127.0.0.1:56389\n# [SERVER] Connection from 127.0.0.1:56389 CLOSED\n","sub_path":"网络编程/Python/tcp_server_threaded.py","file_name":"tcp_server_threaded.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"205313405","text":"import cv2\nimport os\nimport aitool\nimport mmcv\n\n\n\nif __name__ == '__main__':\n #gt annotation file\n # ann_file = '/mapai/haowenguo/data/GF/airplane/ann_new/instances_test2022.json'\n ann_file = '/mapai/haowenguo/data/fair1m/fair1m_v2/FAIR1M_test2022.json'\n #detection results generated by detectors, in the format of .json\n identical_str = 'fair_faster_60'\n res_files = {\n # 'iou': '/home/xc/mmdetection-aitod/mmdet-aitod/work_dirs/v001.01.01_aitod_faster_rcnn_r50_baseline/v001.01.01.bbox.json',\n 'sota': '/mapai/haowenguo/code/SPL/mmdetection/visualization/' + identical_str + '.bbox.json'} # IS_based_contrastive_learning_serving_thr00_p04_con0.2 faster_r50_on_newset_tsne\n img_dir = '/mapai/haowenguo/data/fair1m/fair1m_v2/trainval-test/'\n # img_dir = '/mapai/haowenguo/data/GF/airplane/trainval-test/'\n\n sample_basenames = aitool.get_basename_list(\n '/mapai/haowenguo/data/fair1m/fair1m_v2/trainval-test/')\n # samples =['155', '578', '835', '870', '878', '386', '389']\n # samples = ['841', '852']\n # samples =['142__1__0___0', '156__1__0___0', '245__1__0___0', '268__1__0___0', '691__1__0___0', '728__1__0___0']\n samples = ['531__1__0___0', '1713__1__0___0']\n\n score = 0.7\n final = dict()\n\n save_dir = '/mapai/haowenguo/data/GF/show_result/' + identical_str\n\n try:\n os.mkdir(save_dir)\n except:\n pass\n \n class_dict = {1:'Boeing737', 2:'Boeing747', 3:'Boeing777', 4:'Boeing787', 5:'A220', 6:'A321',\n 7:'A330', 8:'A350', 9:'ARJ21', 10:'other-airplane', 11:'C919'}\n # class_dict = {1:'Boeing737', 2:'Boeing747', 3:'Boeing777', 4:'Boeing787', 5:'A220', 6:'A321',\n # 7:'A330', 8:'A350', 9:'ARJ21', 10:'others'}\n\n for method in ['sota']:\n # save_dir = f'/data/small/v1/results/CascadeRCNN/{method}/ship'\n # save_dir = f'/mapai/haowenguo/data/GF/show_result/faster_r50_on_newset/'\n res_file = res_files[method]\n coco_parser = aitool.COCOParser(ann_file)\n objects = coco_parser.objects\n img_name_with_id = coco_parser.img_name_with_id\n coco_result_parser = aitool.COCOJsonResultParser(res_file)\n for img_name in list(objects.keys())[::-1]:\n count = 0\n # if img_name not in ['0000182_01220_d_0000039__0_0', '0000225_05003_d_0000016__600_0', '1127__1200_1200', 'P2245__1.0__469___0']:\n # continue\n if img_name not in samples: # samples sample_basenames\n continue\n image_id = img_name_with_id[img_name]\n prediction = coco_result_parser(image_id)\n # print(prediction)\n # assert False\n if len(prediction) == 0:\n continue\n ground_truth = coco_parser(img_name)\n # print(ground_truth)\n # assert False\n\n img = cv2.imread(os.path.join(img_dir, img_name + '.png'))\n\n gt_bboxes, pred_bboxes = [], []\n gt_cat_id_list = []\n for _ in ground_truth:\n gt_bboxes.append(_['bbox'])\n gt_cat_id_list.append(_['category_id'])\n \n gt_cat_name_list = [class_dict[k] for k in gt_cat_id_list]\n\n\n for _ in prediction:\n if _['score'] < score:\n continue\n if _['category_id'] > 0:\n count += 1\n ###\n pred_bboxes_w_cat_id = []\n pred_bboxes_w_cat_id.append(_['bbox'])\n pred_bboxes_w_cat_id.append(_['category_id'])\n pred_bboxes.append(pred_bboxes_w_cat_id)\n ###\n # pred_bboxes.append(_['bbox'])\n\n # print(pred_bboxes)\n # assert False\n\n\n gt_bboxes = aitool.drop_invalid_bboxes([aitool.xywh2xyxy(_) for _ in gt_bboxes])\n ###\n tmp_pred_bboxes = [aitool.xywh2xyxy(_[0]) for _ in pred_bboxes]\n bboxes = []\n # print(tmp_pred_bboxes)\n for i in range(len(tmp_pred_bboxes)):\n tmp_bboxes = []\n tmp_bboxes.append(tmp_pred_bboxes[i])\n tmp_bboxes.append(pred_bboxes[i][1])\n bboxes.append(tmp_bboxes)\n # print(bboxes)\n # assert False\n pred_bboxes = aitool.drop_invalid_bboxes_w_cat_id(bboxes)\n pred_cat_id_list = [_[1] for _ in pred_bboxes]\n\n pred_cat_name_list = [class_dict[k] for k in pred_cat_id_list]\n\n pred_bboxes = [_[0] for _ in pred_bboxes]\n # print(category_id_list, pred_bboxes)\n ###\n # pred_bboxes = aitool.drop_invalid_bboxes([aitool.xywh2xyxy(_) for _ in pred_bboxes])\n\n if len(gt_bboxes) == 0:\n continue\n # print(gt_bboxes)\n # print(gt_cat_name_list[3])\n # print(pred_bboxes)\n img = aitool.draw_confusion_matrix(img, gt_bboxes, pred_bboxes, gt_cat_name_list, pred_cat_name_list, # 不需要text就不传入pred_cat_name_list\n with_gt_TP=False, line_width=2, font_scale=3, thickness=2)\n\n if isinstance(img, list):\n continue\n\n output_file = os.path.join(save_dir, img_name + '_' + identical_str + '.png')\n cv2.imwrite(output_file,img)\n #aitool.show_image(img, output_file=output_file, wait_time=10)","sub_path":"aitool/demo/visualization/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":5421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"508044111","text":"from django.urls import path,include\nfrom .views import(\n ProductListView,\n ProductDetailView,\n add_to_cart,\n remove_from_cart,\n OrderSummaryView,\n remove_single_item_from_cart,\n CheckoutView,\n PaymentView,\n AddCoupon,\n RefundRequestView\n)\nurlpatterns = [\n path('', ProductListView.as_view(),name = \"item-list\"),\n path('product//',ProductDetailView.as_view(),name = \"product-detail\"),\n path('order-summary/',OrderSummaryView.as_view(),name = \"order-summary\"),\n path('product//add-to-cart',add_to_cart,name = \"add-to-cart\"),\n path('product//remove-single-item',remove_single_item_from_cart,name = \"remove-single-item\"),\n path('product//remove-from-cart',remove_from_cart,name = \"remove-from-cart\"),\n path('checkout/', CheckoutView.as_view(),name = \"checkout\"),\n path('add-coupon/', AddCoupon.as_view(),name = \"add-coupon\"),\n path('payment//', PaymentView.as_view(),name = \"payment\"),\n path('request-refund/', RefundRequestView.as_view(),name = \"request-refund\"),\n\n]\n","sub_path":"ecommerce/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"163504611","text":"from EMA_Crossing_Check import final_dates\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndate1,_ = final_dates()\r\n\r\ndata = pd.read_csv('Data/TCS.csv')\r\n\r\n\r\n#---------------- Day when 5 Day EMA Cross 13 Day EMA -------------\r\nrecord13 = []\r\nfor x in date1:\r\n for i in data.index:\r\n if data['Date'][i] == x:\r\n record13.append([data['Date'][i+1],data['Open'][i+1],data['High'][i+1],data['Close'][i+1],data['Low'][i+1]])\r\n\r\n\r\n#--------------- Buy on High of Day when 5 Day EMA Cross 13 Day EMA -------------\r\nbuyonhigh = []\r\nrate = float(input('Enter Profit Rate :'))\r\nrate = rate*0.01\r\nfor i in record13:\r\n buyonhigh.append([i[0],np.round(i[2], decimals=2),np.round((i[2]*rate)+i[2],decimals=2)])\r\n\r\nfor i in data.index:\r\n for j in buyonhigh:\r\n if data['Date'][i] == j[0]:\r\n print(j[0],\"---\",j[1],\"---\",j[2])\r\n for count in range(1,5):\r\n print(data['High'][i+count])\r\n if j[2] < data['High'][i+count]:\r\n print(\"...Profit Booked...\")","sub_path":"13 Day EMA Profit.py","file_name":"13 Day EMA Profit.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"423440396","text":"import discord\nimport asyncio\nimport requests\nimport re\nfrom discord.utils import get \nfrom discord.ext import commands\nimport sys\nimport traceback\n\ndef admin_or_owner():\n async def predicate(ctx):\n \n role = get(ctx.message.guild.roles, name = \"A d m i n\")\n output = (role in ctx.message.author.roles) or ctx.message.author.id in [220742049631174656, 203948352973438995]\n return output\n return commands.check(predicate)\n\nclass Admin(commands.Cog, name=\"Admin\"):\n def __init__ (self, bot):\n self.bot = bot\n \n @commands.group()\n async def react(self, ctx):\t\n pass\n \n \n #this function allows you to specify a channel and message and have the bot react with a given emote\n #Not tested with emotes the bot might not have access to\n @react.command()\n @admin_or_owner()\n async def add(self, ctx, channel: int, msg: int, emote: str):\n ch = ctx.guild.get_channel(channel)\n message = await ch.fetch_message(msg)\n await message.add_reaction(emote)\n await ctx.message.delete()\n \n #Allows the sending of messages\n @commands.command()\n @admin_or_owner()\n async def send(self, ctx, channel: int, *, msg: str):\n ch = ctx.guild.get_channel(channel)\n await ch.send(content=msg)\n\n #this function allows you to specify a channel and message and have the bot remove its reaction with a given emote\n #Not tested with emotes the bot might not have access to\n @react.command()\n @admin_or_owner()\n async def remove(self, ctx, channel: int, msg: int, emote: str):\n ch = ctx.guild.get_channel(channel)\n message = await ch.fetch_message(msg)\n await message.remove_reaction(emote, self.bot.user)\n await ctx.message.delete()\n \n @commands.command()\n @admin_or_owner()\n async def reload(self, ctx, cog: str):\n \n try:\n self.bot.reload_extension('cogs.'+cog)\n print(f\"{cog} has been reloaded\")\n except commands.ExtensionNotLoaded as e:\n try:\n self.bot.load_extension(\"cogs.\" + cog)\n print(f\"{cog} has been added\")\n except (discord.ClientException, ModuleNotFoundError):\n print(f'Failed to load extension {cog}.')\n traceback.print_exc()\n except Exception as e:\n print(f'Failed to load extension {cog}.')\n traceback.print_exc()\n\n\ndef setup(bot):\n bot.add_cog(Admin(bot))\n","sub_path":"cogs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"176232916","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/7/23 \n# @Author : sunyihuan\n\n'''\nA为所有文件,将不在B中的文件拷贝至C\n'''\nimport os\nimport shutil\nfrom tqdm import tqdm\n\n\ndef file_sub(all_data, using_dir, sub_save_dir):\n '''\n 将在all_data文件夹中,但不在using_dir文件夹中的文件,保存到sub_save_dir中\n :param all_data: 所有图片地址\n :param using_dir: 已经使用图片地址\n :param sub_save_dir: 未使用图片要保存的地址\n :return:\n '''\n file_dirs = os.listdir(all_data)\n using_files = os.listdir(using_dir)\n all_use_f = [] # 所有已用列表\n for c in using_files:\n for f in os.listdir(os.path.join(using_dir, c)):\n all_use_f.append(f.strip())\n print(len(all_use_f))\n for fil in tqdm(file_dirs):\n if fil != \".DS_Store\":\n if fil not in all_use_f:\n shutil.move(os.path.join(all_data, fil), os.path.join(sub_save_dir, fil))\n\n\nif __name__ == \"__main__\":\n all_data = \"F:/serve_data/202101-04/covert_jpg\"\n using_dir = \"F:/serve_data/202101-04/classes\"\n sub_save_dir = \"F:/serve_data/202101-04/classes_others\"\n if not os.path.exists(sub_save_dir): os.mkdir(sub_save_dir)\n file_sub(all_data, using_dir, sub_save_dir)\n","sub_path":"data_script/file_sub.py","file_name":"file_sub.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"311137080","text":"# coding:utf8\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authtoken.serializers import AuthTokenSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets, decorators\nimport urllib2, urllib, json\nfrom django.contrib.auth.models import User\nfrom serializers import WeChatOpenIdSerializer, WeChatUserSerializer\nfrom models import WeChatUser\n\n\nclass WeChatUserViewSet(viewsets.ViewSet):\n serializer_class = WeChatUserSerializer\n\n def list(self, request):\n return Response('post /member/update')\n\n @decorators.list_route(methods=['post'])\n def info(self, request, *args, **kwargs):\n wechat_user = WeChatUser.objects.get(user=request.user)\n serializer = self.serializer_class(wechat_user,data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n\n\nclass WeChatAuthTokenViewSet(viewsets.ViewSet):\n serializer_class = AuthTokenSerializer\n \"\"\"\n 微信认证\n \"\"\"\n\n def list(self, request):\n return Response('post /wechat/login - post /wechat/logout')\n\n @decorators.list_route(methods=['post'])\n def login(self, request, *args, **kwargs):\n req_data = request.data\n wx_req_data = urllib.urlencode({\n \"appid\": \"*\",\n \"secret\": \"*\",\n \"js_code\": req_data['wx_code'],\n \"grant_type\": \"authorization_code\"\n })\n wx_request = urllib2.Request(url=\"https://api.weixin.qq.com/sns/jscode2session\", data=wx_req_data)\n wx_session = urllib2.urlopen(wx_request)\n wx_session_reslut = WeChatOpenIdSerializer(data=json.loads(wx_session.read()))\n if wx_session_reslut.is_valid():\n openid = wx_session_reslut.data['openid']\n user, status = User.objects.get_or_create(username=openid)\n wechat_user, status = WeChatUser.objects.get_or_create(user=user,\n openid=openid)\n token, created = Token.objects.get_or_create(user=user)\n return Response({'token': token.key})\n return Response({'error': \"登陆失败!\"})\n\n @decorators.list_route(methods=['post'])\n def logout(self, request, *args, **kwargs):\n pass\n","sub_path":"apps/member/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"95030855","text":"import os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nplt.style.use('ggplot')\r\n\r\n\r\ndef read_data(name, filepath=os.path.realpath(os.path.dirname(__file__))):\r\n x = []\r\n y = []\r\n dataset_path = os.path.join(filepath, \"data/\" + name)\r\n with open(dataset_path, 'r') as f:\r\n for line in f:\r\n numbers = [float(i) for i in line.split()]\r\n x.append([numbers[0], numbers[1]])\r\n y.append(numbers[2])\r\n\r\n return np.array(x), np.array(y).reshape(len(y), 1) - 1\r\n\r\n\r\ndef visualize_data(x, y):\r\n plt.figure(figsize=(7, 7))\r\n plt.scatter(x[:, 0], x[:, 1], marker='o', c=y[:,0], s=25, edgecolor='k')\r\n plt.title(\"Samples\")\r\n plt.xlabel('Feature 1')\r\n plt.ylabel('Feature 2')\r\n plt.show()\r\n\r\n\r\ndef hypotesis(x, theta, k):\r\n s = 0\r\n for j in range(theta.shape[1]):\r\n s += np.exp(np.dot(x, theta[:, j]))\r\n p = np.exp(np.dot(x, theta[:, k]))/s\r\n return p\r\n\r\n\r\ndef cost_function(x, y, theta):\r\n J = 0\r\n for i in range(len(x)):\r\n for k in range(theta.shape[1]):\r\n J += int(y[i] == k)*np.log(hypotesis(x[i, :], theta, k))\r\n return J\r\n\r\n\r\ndef gradient(x, y, theta):\r\n grad = np.zeros(theta.shape)\r\n for k in range(theta.shape[1]):\r\n for i in range(len(x)):\r\n grad[:, k] += x[i, :]*((y[i]==k) - hypotesis(x[i, :], theta, k))\r\n return grad\r\n\r\n\r\ndef soft_max(x, y, k=3, alpha=0.001, iterations=1000):\r\n z = np.ones((len(x), 1))\r\n X = np.concatenate((z, x), axis=1)\r\n theta = np.ones((X.shape[1], k))\r\n cost = np.zeros((iterations, 1))\r\n for i in range(iterations):\r\n theta += alpha*gradient(X, y, theta)\r\n cost[i] = cost_function(X, y, theta)\r\n return theta, cost\r\n\r\n\r\ndef predict(x, theta):\r\n X = np.concatenate((np.ones((len(x), 1)), x), axis=1)\r\n P = np.zeros((len(x), theta.shape[1]))\r\n for i in range(theta.shape[1]):\r\n P[:, i] = hypotesis(X, theta, i)\r\n prediction = np.zeros((len(x), 1))\r\n for i in range(len(x)):\r\n k = np.argmax(P[i, :])\r\n prediction[i] = k\r\n\r\n return prediction\r\n\r\n\r\ndef plot_cost_function(cost):\r\n plt.figure(figsize=(8, 8))\r\n plt.plot(cost)\r\n plt.title(\"Negative cost as fucntion \\n of number of iterations\")\r\n plt.xlabel(\"Number of iterations\")\r\n plt.ylabel(\"Negative cost J\")\r\n plt.show()\r\n\r\n\r\ndef plot_line(x, y, theta):\r\n Z = predict(x, theta)\r\n missed = []\r\n label = []\r\n for i in range(len(x)):\r\n if Z[i] != y[i]:\r\n missed.append(x[i,: ])\r\n label.append(int(Z[i]))\r\n missed = np.array(missed)\r\n\r\n plt.figure(figsize=(8, 8))\r\n plt.scatter(x[np.where(y == 0)[0], 0], x[np.where(y == 0)[0], 1], marker='o', c='r', s=25, label=\"First class\")\r\n plt.scatter(x[np.where(y == 1)[0], 0], x[np.where(y == 1)[0], 1], marker='o', c='y', s=25, label=\"Second class\")\r\n plt.scatter(x[np.where(y == 2)[0], 0], x[np.where(y == 2)[0], 1], marker='o', c='b', s=25, label=\"Third class\")\r\n\r\n plt.scatter(missed[:, 0], missed[:, 1], marker='*', linewidth=3.0, c='g', s=25, label='Misclassified samples')\r\n plt.xlabel('Feature 1')\r\n plt.ylabel('Feature 2')\r\n plt.legend()\r\n plt.show()\r\n return 1 - len(missed)/len(y)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n x, y = read_data(\"softmaxData.txt\")\r\n theta, c = soft_max(x, y)\r\n visualize_data(x, y)\r\n plot_cost_function(c)\r\n acc = plot_line(x, y, theta)\r\n print(\"**********Parameters*********\")\r\n print(theta)\r\n print(\"**********Accuracy*********\")\r\n print(acc)\r\n","sub_path":"soft_max.py","file_name":"soft_max.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"439543224","text":"import ClientForm as _CF\r\nimport ClientCookie as _CC\r\nimport mechanize as _mz\r\nimport urllib2 as _u2\r\n\r\nclass PBPScriptError(Exception):\r\n \"\"\"Type for all pbpscript errors for easier catching\"\"\"\r\n\r\n# group together the ones that could be caused by errors\r\n# in your web application (as distinct from errors in pbp's code \r\n# itself)\r\nwebinteraction_errors = (PBPScriptError,\r\n _u2.HTTPError, \r\n _u2.URLError,\r\n _CF.ControlNotFoundError, \r\n _CF.ParseError,\r\n _CF.ItemNotFoundError, \r\n _CF.ItemCountError,\r\n _CC.LoadError, \r\n _CC.RobotExclusionError,\r\n _mz.BrowserStateError, \r\n _mz.FormNotFoundError,\r\n _mz.LinkNotFoundError,\r\n )\r\n\r\nclass FailedPyloadError(PBPScriptError):\r\n def __init__(self, filename):\r\n self.filename = filename\r\n def __str__(self):\r\n t = \"%s was not found or did not contain a variable named __pbp__\"\r\n return t % (self.filename,)\r\n\r\nclass NoResponseError(PBPScriptError):\r\n \"\"\"An attempt to access the last_res failed because no response\r\n has been seen from the server\r\n \"\"\"\r\n def __str__(self):\r\n t = \"failed because there was no response from the server\"\r\n return t\r\n __repr__ = __str__\r\n\r\n\r\nclass TimedOutError(PBPScriptError):\r\n \"\"\"Too much time elapsed (do_endtimer)\"\"\"\r\n def __init__(self, expected, elapsed):\r\n self.expected = expected\r\n self.elapsed = elapsed\r\n def __str__(self):\r\n t = \"Operation took %s seconds but %s was the maximum\"\r\n return t % (self.elapsed, self.expected)\r\n\r\nclass MissingFormError(PBPScriptError):\r\n def __init__(self, formspec):\r\n self.formspec = formspec\r\n def __str__(self):\r\n return \"No form %s found on the page.\" % (self.formspec,)\r\n __repr__ = __str__\r\n\r\nclass NoCodeMatchError(PBPScriptError):\r\n def __init__(self, expected, response, code):\r\n self.expected = expected\r\n self.response = response\r\n self.code = code\r\n def __str__(self):\r\n if self.response:\r\n t = \"\\\r\nPage %(url)s came back with code %(code)s but you expected %(expected)s\"\r\n return t % dict(url=self.response.wrapped.url, code=self.code,\r\n expected=self.expected) \r\n else:\r\n t = \"Server error code was %(code)s but you expected %(expected)s\"\r\n return t % dict(code=self.code,\r\n expected=self.expected)\r\n __repr__ = __str__\r\n\r\nclass DataNotFoundError(PBPScriptError):\r\n \"\"\"text expected by the find command (or the stopat\r\n argument of go/submit/follow) was not found\r\n \"\"\"\r\n def __init__(self, expected, response):\r\n self.response = response\r\n self.expected = expected # the string used to search\r\n def __str__(self):\r\n t = \"Page %s didn't match the search string: %s\"\r\n return t % (self.response.wrapped.url, self.expected)\r\n __repr__ = __str__\r\n\r\nclass DataFoundInappropriatelyError(PBPScriptError):\r\n \"\"\"text expected by the nofind command (or the stopat\r\n argument of go/submit/follow) was found and shouldn't have been\r\n \"\"\"\r\n def __init__(self, expected, response):\r\n self.response = response \r\n self.expected = expected # the string used to search\r\n def __str__(self):\r\n t = \"Page %s matched the search string, even though it shouldn't have: %s\"\r\n return t % (self.response.wrapped.url, self.expected)\r\n __repr__ = __str__\r\n\r\nclass PBPUsageError(PBPScriptError):\r\n def __init__(self, command):\r\n self.command = command\r\n def __str__(self):\r\n fullcmd = self.command\r\n shortcmd = fullcmd.split(' ', 1)[0]\r\n return \"This command failed: %s (try help %s)\" % (fullcmd, shortcmd)\r\n\r\n __repr__ = __str__\r\n\r\nclass FieldValueError(PBPScriptError):\r\n def __init__(self, val):\r\n self.val = val\r\n def __str__(self):\r\n return \"The value %s specified for the field was impossible. (Did you forget a + or -?)\" % (self.val,)\r\n __repr__ = __str__\r\n\r\n\r\n","sub_path":"tags/0.2.1/code/pbp/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"323865182","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Image, Location, Category\n\n\n# Create your views here.\n\ndef welcome(request):\n images = Image.get_all_images()\n locations = Location.get_all_locations()\n \n return render(request, 'welcome.html', {'images': images, 'locations': locations})\n\n\ndef search_results(request):\n if 'category' in request.GET and request.GET[\"category\"]:\n search_term = request.GET.get(\"category\")\n searched_images = Image.search_by_category(search_term)\n message = f\"{search_term}\"\n \n return render(request,'search.html',{\"message\":message, \"images\":searched_images, \"category\":search_term})\n \n else:\n message = \"You haven't searched for any category\"\n \n return render(request, 'search.html', {\"message\":message})\n \n \n# def location(request,location):\n# selected_location = Location.objects.get(id = location)\n# images = Image.objects.filter(location = selected_location.id)\n \n# return render(request, 'location.html', {\"location\":selected_location,\"images\":images})\n\n\ndef filter_by_location(request,location_id):\n \"\"\"\n Function that filters images by location\n \"\"\"\n images = Image.filter_location(id=location_id)\n print(id,'it works')\n return render (request, 'location.html', {\"images\":images})\n","sub_path":"photos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"22614275","text":"from liteeth.common import *\n\nfrom litex.gen.genlib.misc import chooser\n\n\nclass LiteEthMACPreambleInserter(Module):\n def __init__(self, dw):\n self.sink = stream.Endpoint(eth_phy_description(dw))\n self.source = stream.Endpoint(eth_phy_description(dw))\n\n # # #\n\n preamble = Signal(64, reset=eth_preamble)\n cnt_max = (64//dw)-1\n cnt = Signal(max=cnt_max+1)\n clr_cnt = Signal()\n inc_cnt = Signal()\n\n self.sync += \\\n If(clr_cnt,\n cnt.eq(0)\n ).Elif(inc_cnt,\n cnt.eq(cnt+1)\n )\n\n fsm = FSM(reset_state=\"IDLE\")\n self.submodules += fsm\n fsm.act(\"IDLE\",\n self.sink.ready.eq(1),\n clr_cnt.eq(1),\n If(self.sink.valid,\n self.sink.ready.eq(0),\n NextState(\"INSERT\"),\n )\n )\n fsm.act(\"INSERT\",\n self.source.valid.eq(1),\n chooser(preamble, cnt, self.source.data),\n If(cnt == cnt_max,\n If(self.source.ready, NextState(\"COPY\"))\n ).Else(\n inc_cnt.eq(self.source.ready)\n )\n )\n\n self.comb += [\n self.source.data.eq(self.sink.data),\n self.source.last_be.eq(self.sink.last_be)\n ]\n fsm.act(\"COPY\",\n self.sink.connect(self.source, leave_out=set([\"data\", \"last_be\"])),\n\n If(self.sink.valid & self.sink.last & self.source.ready,\n NextState(\"IDLE\"),\n )\n )\n\n\nclass LiteEthMACPreambleChecker(Module):\n def __init__(self, dw):\n self.sink = stream.Endpoint(eth_phy_description(dw))\n self.source = stream.Endpoint(eth_phy_description(dw))\n\n # # #\n\n preamble = Signal(64, reset=eth_preamble)\n cnt_max = (64//dw) - 1\n cnt = Signal(max=cnt_max+1)\n clr_cnt = Signal()\n inc_cnt = Signal()\n\n self.sync += \\\n If(clr_cnt,\n cnt.eq(0)\n ).Elif(inc_cnt,\n cnt.eq(cnt+1)\n )\n\n discard = Signal()\n clr_discard = Signal()\n set_discard = Signal()\n\n self.sync += \\\n If(clr_discard,\n discard.eq(0)\n ).Elif(set_discard,\n discard.eq(1)\n )\n\n ref = Signal(dw)\n match = Signal()\n self.comb += [\n chooser(preamble, cnt, ref),\n match.eq(self.sink.data == ref)\n ]\n\n fsm = FSM(reset_state=\"IDLE\")\n self.submodules += fsm\n\n fsm.act(\"IDLE\",\n self.sink.ready.eq(1),\n clr_cnt.eq(1),\n clr_discard.eq(1),\n If(self.sink.valid,\n clr_cnt.eq(0),\n inc_cnt.eq(1),\n clr_discard.eq(0),\n set_discard.eq(~match),\n NextState(\"CHECK\"),\n )\n )\n fsm.act(\"CHECK\",\n self.sink.ready.eq(1),\n If(self.sink.valid,\n set_discard.eq(~match),\n If(cnt == cnt_max,\n If(discard | (~match),\n NextState(\"IDLE\")\n ).Else(\n NextState(\"COPY\")\n )\n ).Else(\n inc_cnt.eq(1)\n )\n )\n )\n self.comb += [\n self.source.data.eq(self.sink.data),\n self.source.last_be.eq(self.sink.last_be)\n ]\n fsm.act(\"COPY\",\n self.sink.connect(self.source, leave_out=set([\"data\", \"last_be\"])),\n If(self.source.valid & self.source.last & self.source.ready,\n NextState(\"IDLE\"),\n )\n )\n","sub_path":"liteeth/core/mac/preamble.py","file_name":"preamble.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"6039435","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom hello_world.models import Rating, StuffToRate\n\n\ndef index(request):\n display = ''\n objs = StuffToRate.objects.all()\n obj = objs[0]\n display += '{} - {} - {}'.format(obj.title, obj.color, obj.get_rating())\n # return HttpResponse(display)\n context = {\n 'objs': objs,\n # 'obj': obj,\n }\n return render(request, 'base.html', context)\n\n\ndef hello_name(request):\n display = 'Hello Alvin this is hello_name function inside hello_world.views'\n return HttpResponse(display)\n","sub_path":"hello_world/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"179258487","text":"from django.db import models\nfrom django.forms import ModelForm\nfrom django import forms\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Post(models.Model):\n user = models.ForeignKey(User)\n title = models.CharField(max_length=200)\n text = models.TextField()\n image = models.ImageField(upload_to='upload/%Y/%m/%d', blank=True, default='')\n thumbnail = models.ImageField(upload_to='upload/%Y/%m/%d',max_length=100,blank=True,null=True)\n pub_date = models.DateTimeField('publication date',auto_now=True)\n\n def __unicode__(self):\n return self.title\n \n class Meta:\n ordering = [\"-pub_date\"] \n \n def create_thumbnail(self):\n # original code for this method came from\n # http://snipt.net/danfreak/generate-thumbnails-in-django-with-pil/\n\n # If there is no image associated with this.\n # do not create thumbnail\n if not self.image:\n return\n\n from PIL import Image\n from cStringIO import StringIO\n from django.core.files.uploadedfile import SimpleUploadedFile\n import os\n\n # Set our max thumbnail size in a tuple (max width, max height)\n THUMBNAIL_SIZE = (100,100)\n\n # Open original photo which we want to thumbnail using PIL's Image\n image = Image.open(StringIO(self.image.read()))\n image_type = image.format.lower()\n\n # Convert to RGB if necessary\n # Thanks to Limodou on DjangoSnippets.org\n # http://www.djangosnippets.org/snippets/20/\n #\n # I commented this part since it messes up my png files\n #\n #if image.mode not in ('L', 'RGB'):\n # image = image.convert('RGB')\n\n # We use our PIL Image object to create the thumbnail, which already\n # has a thumbnail() convenience method that contrains proportions.\n # Additionally, we use Image.ANTIALIAS to make the image look better.\n # Without antialiasing the image pattern artifacts may result.\n image.thumbnail(THUMBNAIL_SIZE, Image.ANTIALIAS)\n\n # Save the thumbnail\n temp_handle = StringIO()\n image.save(temp_handle, image_type)\n temp_handle.seek(0)\n\n # Save image to a SimpleUploadedFile which can be saved into\n # ImageField\n suf = SimpleUploadedFile(os.path.split(self.image.name)[-1],\n temp_handle.read(), content_type='image/%s' % (image_type))\n # Save SimpleUploadedFile into thumbnail field\n self.thumbnail.save('%s_thumbnail.%s'%(os.path.splitext(suf.name)[0],image_type), suf, save=False)\n\nclass AddForm(ModelForm):\n class Meta:\n model = Post\n fields = ('title', 'text', 'image')\n\n title = forms.CharField(max_length=200)\n text = forms.CharField(widget=forms.Textarea)\n","sub_path":"blog/~models.py","file_name":"~models.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"596892418","text":"import tensorflow as tf\nimport numpy as np\n\n\ndef create_padding_mask(seq):\n '''\n 用于产生mask\n\n :param seq:\n :return:\n '''\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\n # 添加额外的维度来将填充加到注意力对数(logits)。\n return seq # (batch_size, 1, 1, seq_len)\n\nclass Attention(tf.keras.layers.Layer):\n def __init__(self, d_model):\n super(Attention, self).__init__()\n self.P = tf.keras.layers.Dense(d_model)\n def call(self,x,y,mask):\n a = tf.matmul(x,self.P(y),transpose_b=True)\n if mask is not None:\n\n a += mask * -1e9\n print(a)\n a = tf.nn.softmax(a,axis=-2)\n\n out = tf.matmul(a,x,transpose_a=True)\n print(out)\n return out\n\n\n\nclass LSTMdecoder(tf.keras.layers.Layer):\n def __init__(self,d_model, input_vocab_size,size,layer_num):\n super(LSTMdecoder, self).__init__()\n self.cell = []\n self.size = size\n for i in range(layer_num):\n self.cell.append(tf.keras.layers.LSTMCell(units=size,dropout=0.1))\n\n # self.cell = tf.keras.layers.LSTMCell(units=size,dropout=0.1)\n\n self.rnn_layer = tf.keras.layers.RNN(self.cell,return_sequences = True,return_state=True)\n self.Embedding = tf.keras.layers.Embedding(input_vocab_size,d_model)\n self.d_model = d_model\n self.DecoderInputDense = tf.keras.layers.Dense(d_model)\n self.attention = Attention(d_model)\n pass\n def call(self,enc_vec,init_state,last_word,mode,mask):\n\n last_word_embedding = self.Embedding(last_word)\n # last_word_embedding = tf.reshape(last_word_embedding,[-1,1,self.d_model])\n # enc_vec = tf.reshape(enc_vec,[-1,1,self.size])\n # enc_vec = tf.math.l2_normalize(enc_vec,-1)\n print(enc_vec)\n decoder_input = self.attention(enc_vec,last_word_embedding,mask)\n decoder_input = self.DecoderInputDense(decoder_input)\n\n if mode == 'train':\n res = self.rnn_layer(decoder_input)\n decoder_output = res[0]\n return decoder_output,tf.constant(0)\n #\n else:\n res = self.rnn_layer(decoder_input,init_state)\n decoder_output,decoder_states = res[0],res[1:]\n states = []\n for s in decoder_states:\n states.extend([tf.expand_dims(cstate,1) for cstate in s])\n\n states = tf.concat(states,1)\n # return decoder_output,[decoder_states_c,decoder_states_m]\n return decoder_output,states\n\n\n\n\n\nclass RNNS2Smodel(tf.keras.Model):\n\n def __init__(self,d_model, input_vocab_size,encoder_size,encoder_layer_num,decoder_size,decoder_layer_num):\n super(RNNS2Smodel, self).__init__()\n self.input_embedding = tf.keras.layers.Embedding(input_vocab_size,d_model)\n\n self.decoder = LSTMdecoder(d_model=d_model,input_vocab_size=input_vocab_size,size=decoder_size,\n layer_num=decoder_layer_num)\n self.final_layer = tf.keras.layers.Dense(input_vocab_size)\n pass\n\n def call(self,source,source_len,last_word,mode,init_state,mask=None):\n # enc_vec,enc_rnn_out = self.encoder(source,source_len)\n enc_vec = self.input_embedding(source)\n # enc_vec = None\n mask = create_padding_mask(source)\n mask = tf.expand_dims(mask,-1)\n seq_len = tf.shape(last_word)[1]\n mask = tf.tile(mask,[1,1,seq_len])\n decoder_output, decoder_state = self.decoder(enc_vec,init_state,last_word,mode,mask)\n decoder_output = self.final_layer(decoder_output)\n\n return decoder_output,decoder_state,enc_vec\n\n\n\n\n","sub_path":"model/RNNw2sdp.py","file_name":"RNNw2sdp.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"147345258","text":"#Linear metadata model for testing purposes\nfrom comet_ml import Experiment\nimport tensorflow as tf\nfrom DeepTreeAttention.trees import AttentionModel\nfrom DeepTreeAttention.models import metadata\nfrom DeepTreeAttention.callbacks import callbacks\nimport pandas as pd\n\nmodel = AttentionModel(config=\"/home/b.weinstein/DeepTreeAttention/conf/tree_config.yml\")\nmodel.create()\n\n#Log config\nexperiment = Experiment(project_name=\"neontrees\", workspace=\"bw4sz\")\nexperiment.log_parameters(model.config[\"train\"])\nexperiment.log_parameters(model.config[\"evaluation\"]) \nexperiment.log_parameters(model.config[\"predict\"])\nexperiment.add_tag(\"RGB\")\n\n##Train\n\n#Train see config.yml for tfrecords path with weighted classes in cross entropy\nmodel.read_data()\nclass_weight = model.calc_class_weight()\n\n##Train subnetwork\nexperiment.log_parameter(\"Train subnetworks\", True)\nwith experiment.context_manager(\"RGB_spatial_subnetwork\"):\n print(\"Train RGB spatial subnetwork\")\n model.read_data(mode=\"RGB_submodel\")\n model.train(submodel=\"spatial\", sensor=\"RGB\",class_weight=[class_weight, class_weight, class_weight], experiment=experiment)\n\nwith experiment.context_manager(\"RGB_spectral_subnetwork\"):\n print(\"Train RGB spectral subnetwork\") \n model.read_data(mode=\"RGB_submodel\") \n model.train(submodel=\"spectral\", sensor=\"RGB\", class_weight=[class_weight, class_weight, class_weight], experiment=experiment)\n \n#Train full model\nwith experiment.context_manager(\"RGB_model\"):\n experiment.log_parameter(\"Class Weighted\", True)\n model.read_data(mode=\"RGB_train\")\n model.train(class_weight=class_weight, sensor=\"RGB\", experiment=experiment)\n \n #Get Alpha score for the weighted spectral/spatial average. Higher alpha favors spatial network.\n if model.config[\"train\"][\"RGB\"][\"weighted_sum\"]:\n estimate_a = model.RGB_model.get_layer(\"weighted_sum\").get_weights()\n experiment.log_metric(name=\"spatial-spectral weight\", value=estimate_a[0][0])","sub_path":"experiments/Trees/run_RGB.py","file_name":"run_RGB.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"220005926","text":"import matplotlib as mpl\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plotter(Epsi,psi,xvec_in,V,n, label='x'):\n\n # Extracting W\n xvec = xvec_in.magnitude\n xmin = xvec[0]\n xmax = xvec[-1]\n \n # Offsetting and scaling for the wave functions\n ytop = Epsi[n].magnitude\n ybot = np.min(np.diag(V.magnitude))\n vertical_scale = ytop-ybot\n extra_space = vertical_scale*.1\n ybot -= extra_space\n\n # Create the figure\n f=plt.figure()\n ax=f.add_subplot(111)\n \n # Set limits\n plt.xlim(xvec[0],xvec[-1])\n plt.ylim(ybot,ytop)\n \n # Plot the wave functions\n wavefunctionscale = vertical_scale/5\n for i in np.arange(n-1,-1,-1):\n color=mpl.cm.jet_r((i)/(float)(n-1),1)\n plt.plot(xvec,psi[:,i]*wavefunctionscale+Epsi[i].magnitude,c=color)\n thislabel = 'n='+str(i+1)+', E={}'.format(np.round(Epsi[i]*1000)/1000.0)\n ax.axhline(y=Epsi[i].magnitude,xmin=xmin,xmax=xmax,c=color,ls='--',label=thislabel)\n \n # Labels and legend\n plt.xlabel(label+' ('+str(xvec_in.units)+')')\n plt.ylabel('Energy (hartree)')\n L=plt.legend(bbox_to_anchor=(1.05,1),loc=2,borderaxespad=0.)\n box=ax.get_position()\n ax.set_position([box.x0,box.y0,0.7*box.width,box.height])\n \n # Potential too\n Varray = np.diag(V.magnitude)\n plt.plot(xvec,Varray,color='gray',linewidth=4)\n plt.grid(True)\n \n\ndef flat_potential(xvec, V0=0, graphit=False):\n \"\"\"Creates a flat surface with value V0\n The surface is returned in the form of a diagonal matrix whose dimensions match xvec\"\"\" \n nsteps = len(xvec)\n Varray = np.ones(nsteps)*V0\n if graphit:\n f = plt.figure()\n ax=f.add_subplot(111)\n plt.plot(xvec,Varray,label='Potential',color='gray',linewidth=4)\n plt.xlabel('x')\n plt.ylabel('potential energy')\n L=plt.legend(bbox_to_anchor=(1.05,1),loc=2,borderaxespad=0.)\n box=ax.get_position()\n ax.set_position([box.x0,box.y0,0.7*box.width,box.height])\n plt.grid(True)\n V = np.diag(Varray)\n return V\n\ndef sloped_potential(xvec, V1=0, V2=1, graphit=False):\n \"\"\"Creates a sloped surface with values ranging from V1 to V2\n The surface is returned in the form of a diagonal matrix whose dimensions match xvec\"\"\" \n nsteps = len(xvec)\n xarray = (xvec-xvec[0])/(xvec[-1]-xvec[0])\n xarray = xarray.magnitude\n Varray = V1 + (V2-V1)*xarray\n if graphit:\n f = plt.figure()\n ax=f.add_subplot(111)\n plt.plot(xvec,Varray,label='Potential',color='gray',linewidth=4)\n plt.xlabel('x')\n plt.ylabel('potential energy')\n L=plt.legend(bbox_to_anchor=(1.05,1),loc=2,borderaxespad=0.)\n box=ax.get_position()\n ax.set_position([box.x0,box.y0,0.7*box.width,box.height])\n plt.grid(True)\n V = np.diag(Varray)\n return V\n\ndef step_potential(xvec, xbump, Vleft, Vright, graphit=False):\n \"\"\"Creates a surface that jumps from Vleft to Vright, at position xbump\n The surface is returned in the form of a diagonal matrix whose dimensions match xvec\"\"\" \n nsteps = len(xvec)\n Varray = np.ones(nsteps)\n for i in range(nsteps):\n if xvec[i].magnitude < xbump:\n Varray[i] = Varray[i]*Vleft\n else:\n Varray[i] = Varray[i]*Vright\n if graphit:\n f = plt.figure()\n ax=f.add_subplot(111)\n plt.plot(xvec,Varray,label='Potential',color='gray',linewidth=4)\n plt.xlabel('x')\n plt.ylabel('potential energy')\n L=plt.legend(bbox_to_anchor=(1.05,1),loc=2,borderaxespad=0.)\n box=ax.get_position()\n ax.set_position([box.x0,box.y0,0.7*box.width,box.height])\n plt.grid(True)\n V = np.diag(Varray)\n return V\n\n\n# Draw the box (clumsily)\nfrom itertools import product, combinations\ndef drawbox_xx(xinit,xfinal,y,z,fig=[]):\n if np.size(fig) == 0:\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_box_aspect(aspect = (xinit,y,z))\n rext = xfinal/xinit\n r = [0, 1]\n for s, e in combinations(np.array(list(product(r, r, r))), 2):\n if np.sum(np.abs(s-e)) == r[1]-r[0]:\n ax.plot3D(*zip(s, e), color=\"b\")\n ax.plot3D([1.0, rext],[0, 0],[0, 0],color='g')\n ax.plot3D([1.0, rext],[1, 1],[1, 1],color='g')\n ax.plot3D([1.0, rext],[0, 0],[1, 1],color='g')\n ax.plot3D([1.0, rext],[1, 1],[0, 0],color='g')\n ax.plot3D([rext, rext],[0, 1],[0, 0],color='g')\n ax.plot3D([rext, rext],[0, 0],[0, 1],color='g')\n ax.plot3D([rext, rext],[0, 1],[1, 1],color='g')\n ax.plot3D([rext, rext],[1, 1],[0, 1],color='g')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n return fig","sub_path":"Quantum 2023 (Neshyba)/Notebooks/Week_14a.IntroToKMTheory/PchemLibrary.py","file_name":"PchemLibrary.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"262921086","text":"import pandas as pd\nimport numpy as np\nimport multiprocessing\nfrom multiprocessing import Process, Manager, Queue\nimport math\n\nfrom PyProM.src.data.importing import Import\n\nimport sys\nimport os\nfrom PyProM.src.utility.util_profile import Util_Profile\nfrom PyProM.src.utility.util_multiprocessing import Util_Multiprocessing\nimport time\nfrom functools import wraps\n\ndef timefn(fn):\n\t@wraps(fn)\n\tdef measure_time(*args, **kwargs):\n\t\tt1 = time.time()\n\t\tresult = fn(*args, **kwargs)\n\t\tt2 = time.time()\n\t\tprint(\"@timefn: {} took {} seconds\".format(fn.__name__, t2-t1))\n\t\treturn result\n\treturn measure_time\n\n\ntimefn = Util_Profile.timefn\nclass Eventlog(pd.DataFrame):\n\t\"\"\"docstring for Eventlog\"\"\"\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(Eventlog, self).__init__(*args, **kwargs)\n\t\tself._columns = []\n\n\n\t@property\n\tdef _constructor(self):\n\t\treturn Eventlog\n\n\t@classmethod\n\tdef from_xes(cls, path):\n\t\t_import = Import(path, format='xes')\n\t\tdict_eventlog = _import.eventlog\n\t\tif isinstance(dict_eventlog, dict):\n\t\t\tprint(\"import dict and produce eventlog\")\n\t\t\tdf = Eventlog.from_dict(dict_eventlog)\n\t\t\treturn df\n\n\t@classmethod\n\tdef from_txt(cls, path, sep='\\t', encoding=None, **kwargs):\n\t\tif 'dtype' in kwargs:\n\t\t\tdtype = kwargs['dtype']\n\t\telse:\n\t\t\tdtype = None\n\t\tif 'index_col' in kwargs:\n\t\t\tindex_col = kwargs['index_col']\n\t\telse:\n\t\t\tindex_col=False\n\t\tdf = pd.read_csv(path, sep = sep, index_col = index_col, dtype=dtype, encoding=encoding)\n\t\treturn Eventlog(df)\n\n\t\"\"\"\n\tdef __call__(self, path, format='xes'):\n\t\tif format == 'xes':\n\t\t\t_import = Import(path, format='xes')\n\t\t\tdict_eventlog = _import.eventlog\n\t\t\treturn self.dict_to_dataframe(dict_eventlog)\n\n\t\tif format == 'txt':\n\t\t\treturn self.csv_to_dataframe(path)\n\t\"\"\"\n\t@timefn\n\tdef assign_caseid(self, *args):\n\t\tcount = 0\n\t\tfor arg in args:\n\t\t\tif count == 0:\n\t\t\t\tself['CASE_ID'] = self[arg].apply(str)\n\t\t\telse:\n\t\t\t\tself['CASE_ID'] += '_' + self[arg].apply(str)\n\t\t\t#del self[arg]\n\t\t\tcount +=1\n\t\tself._columns.append('CASE_ID')\n\t\treturn self\n\n\t@timefn\n\tdef assign_activity(self, *args):\n\t\tcount = 0\n\t\tfor arg in args:\n\t\t\tif count == 0:\n\t\t\t\tself['Activity'] = self[arg].apply(str)\n\t\t\telse:\n\t\t\t\tself['Activity'] += '_' + self[arg].apply(str)\n\t\t\t#del self[arg]\n\t\t\tcount +=1\n\t\tself._columns.append('Activity')\n\t\treturn self\n\n\t@timefn\n\tdef assign_resource(self, *args):\n\t\tcount = 0\n\t\tfor arg in args:\n\t\t\tif count == 0:\n\t\t\t\tself['Resource'] = self[arg].astype(str)\n\t\t\telse:\n\t\t\t\tself['Resource'] += '_' + self[arg].astype(str)\n\t\t\t#del self[arg]\n\t\t\tcount +=1\n\t\tself._columns.append('Resource')\n\t\treturn self\n\n\t@timefn\n\tdef assign_timestamp(self, name, new_name = 'TIMESTAMP', _format = '%Y/%m/%d %H:%M:%S', errors='ignore'):\n\t\tprint(_format)\n\t\tself[name] = pd.to_datetime(self[name], format = _format, errors=errors)\n\t\tself.rename(columns={name: new_name}, inplace=True)\n\t\t#self.loc[pd.isna(self[name]),name] = '-'\n\t\tself._columns.append(new_name)\n\t\treturn self\n\n\tdef assign_attr(self, **kwargs):\n\t\t\"\"\"\n\t\t이 함수는, ~~~~다.\n\t\t#할일: 컬럼명만 바꾸는 것으로!\n\t\t:param kwargs: old_col=데이터에 포함된 컬럼명, new_col=생성한 이벤트로그에 지정할 컬럼명\n\t\t:return: 이벤트로그\n\t\t\"\"\"\n\t\tif 'old_col' in kwargs:\n\t\t\told_col = kwargs['old_col']\n\t\tif 'new_col' in kwargs:\n\t\t\tnew_col = kwargs['new_col']\n\t\telse:\n\t\t\tnew_col = kwargs['old_col']\n\t\tself[new_col] = self[old_col]\n\t\tself._columns.append(new_col)\n\t\tdel self[old_col]\n\t\tself._columns.append(new_col)\n\t\treturn self\n\n\tdef assign_cluster(self, *args):\n\t\tcount = 0\n\t\tfor arg in args:\n\t\t\tif count == 0:\n\t\t\t\tself['Cluster'] = self[arg].astype(str)\n\t\t\telse:\n\t\t\t\tself['Cluster'] += '_' + self[arg].astype(str)\n\t\t\t#del self[arg]\n\t\t\tcount +=1\n\t\tself._columns.append('Cluster')\n\t\treturn self\n\n\tdef sort(self, by=['CASE_ID']):\n\t\tself = self.sort_values(by)\n\t\treturn self\n\n\tdef clear_columns(self, *args, **kwargs):\n\t\tif 'extra' in kwargs:\n\t\t\textra = kwargs['extra']\n\t\telse:\n\t\t\textra = []\n\t\tself = self[self._columns]\n\t\treturn self\n\n\n\n\tdef join_columns(self, col_name, *args):\n\t\tif len(args) < 2:\n\t\t\tprint(\"join_columns requires at least 2 columns\")\n\t\tcount = 0\n\t\ttmp = self.copy(deep=True)\n\t\tfor arg in args:\n\t\t\tif count == 0:\n\t\t\t\tself[col_name] = tmp[arg].astype(str)\n\t\t\telse:\n\t\t\t\tself[col_name] += '/' + tmp[arg].astype(str)\n\t\t\t#del self[arg]\n\t\t\tcount +=1\n\t\treturn self\n\n\t\"\"\"\n\tutility functions\n\t\"\"\"\n\tdef get_event_trace(self, workers, value = 'Activity'):\n\t\toutput = self.parallelize(self._get_event_trace, workers, value)\n\t\tevent_trace = Util_Multiprocessing.join_dict(output)\n\t\treturn event_trace\n\n\tdef _get_event_trace(self, eventlog, x, value='Activity'):\n\t\tevent_trace = dict()\n\t\tcount = 0\n\t\tfor instance in eventlog.itertuples():\n\t\t\tindex = instance.Index\n\t\t\tif value == 'Activity':\n\t\t\t\tai = eventlog.get_activity_by_index(index)\n\t\t\telif value == 'Resource':\n\t\t\t\tai = eventlog.get_resource_by_index(index)\n\t\t\telif value == 'TIMESTAMP':\n\t\t\t\tai = eventlog.get_timestamp_by_index(index)\n\t\t\telse:\n\t\t\t\tai = eventlog.get_col_value_by_index(value, index)\n\t\t\tif index == 0:\n\t\t\t\tevent_trace[instance.CASE_ID] = [ai]\n\t\t\t\tcontinue\n\n\t\t\tcaseid = eventlog.get_caseid_by_index(index-1)\n\n\t\t\tif instance.CASE_ID == caseid:\n\t\t\t\tevent_trace[instance.CASE_ID].append(ai)\n\n\t\t\telse:\n\t\t\t\tevent_trace[instance.CASE_ID] = [ai]\n\n\n\t\tprint(\"Finish\")\n\n\t\tx.append(event_trace)\n\n\tdef _get_trace_count(self, event_trace):\n\t\ttrace_count = dict()\n\t\ttraces = event_trace.values()\n\t\tfor trace in traces:\n\t\t\ttrace = tuple(trace)\n\t\t\tif trace not in trace_count:\n\t\t\t\ttrace_count[trace] = 0\n\t\t\ttrace_count[trace] += 1\n\t\treturn trace_count\n\n\n\tdef get_caseids(self):\n\t\tunique_caseids = self['CASE_ID'].unique()\n\t\treturn unique_caseids\n\n\tdef get_activities(self):\n\t\tunique_activities = self['Activity'].unique()\n\t\treturn unique_activities\n\n\tdef get_resources(self):\n\t\tunique_resources = self['Resource'].unique()\n\t\treturn unique_resources\n\n\tdef get_timestamps(self):\n\t\tunique_timestamps = self['TIMESTAMP'].unique()\n\t\treturn unique_timestamps\n\n\t#특정 col의 unique한 값을 리스트 형태로 리턴\n\tdef get_col_values(self,col):\n\t\treturn list(set(self[col]))\n\n\tdef get_first_caseid(self):\n\t\treturn self['CASE_ID'][0]\n\n\tdef get_caseid_by_index(self,index):\n\t\treturn self['CASE_ID'][index]\n\n\tdef get_resource_by_index(self, index):\n\t\treturn self['Resource'][index]\n\n\tdef get_activity_by_index(self, index):\n\t\treturn self['Activity'][index]\n\n\tdef get_timestamp_by_index(self, index):\n\t\treturn self['TIMESTAMP'][index]\n\n\tdef get_col_value_by_index(self, col, index):\n\t\treturn self[col][index]\n\n\t#특정 col의 특정 value를 포함하는 row를 리턴\n\tdef get_col_value(self, col, value):\n\t\tvalue_df = self.loc[self[col]==value]\n\t\tvalue_df.name = value\n\t\treturn value_df\n\n\tdef change_col_value(self, col, old_val, new_val):\n\t\tself.loc[self[col]==old_val, col] = new_val\n\t\treturn self\n\n\tdef col_val_to_numeric(self, col):\n\t\t\"\"\"\n\t\tTo make a chart using bokeh, x values and y values must be numeric.\n\t\tAccordingly, change column values to numeric so that it can be properly drawn by bokeh\n\n\t\tKey arguements\n\t\tcol -- column to be converted to numeric\n\t\t\"\"\"\n\t\tself.sort_values(by=col, inplace=True)\n\t\tself.reset_index(drop=True, inplace=True)\n\t\tindexs = []\n\t\ti=1\n\t\tfor index, instance in self.iterrows():\n\t\t\tif index==0:\n\t\t\t\tindexs.append(i)\n\t\t\t\tcontinue\n\t\t\tvalue = self[col][index-1]\n\t\t\tif instance[col] != value:\n\t\t\t\ti+=1\n\t\t\tindexs.append(i)\n\t\tself.loc[:, 'new_col'] = indexs\n\t\treturn self\n\n\n\tdef filter(self, criterion, value):\n\t\treturn self.loc[self[criterion] == value, :]\n\n\t# 특정 col에 특정 value를 포함하는 row를 삭제\n\tdef remove_col_value(self, col, value):\n\t\treturn self.loc[self[col] != value]\n\n\t#eventlog의 event 총 개수를 리턴\n\tdef count_event(self):\n\t\treturn len(self.index)\n\n\t#eventlog 내 case의 개수를 리턴\n\tdef count_case(self):\n\t\treturn len(set(self['CASE_ID']))\n\n\t#특정 col의 unique한 값의 개수를 리턴\n\tdef count_col_values(self, col):\n\t\treturn len(set(self[col]))\n\n\t#모든 col의 unique한 값의 개수를 프린트함\n\tdef show_col_counts(self):\n\t\tcolumns = self.columns\n\t\tfor col in columns:\n\t\t\tprint(\"unique counts of {}: {}\".format(col,len(set(self[col]))))\n\n\tdef count_col_case(self, col):\n\t\tcol_case = self.groupby(col).CASE_ID.apply(list).apply(set)\n\t\tcol_case_count = col_case.apply(len)\n\t\tcol_case_count_mean = np.mean(col_case_count)\n\t\tcol_case_count_std = np.std(col_case_count)\n\t\tprint(\"CLUSTER count: {}\".format(col_case_count))\n\t\tprint(\"CLUSTER count mean: {}\".format(col_case_count_mean))\n\t\tprint(\"CLUSTER count std: {}\".format(col_case_count_std))\n\t\treturn col_case_count\n\n\tdef count_duplicate_values(self, eventlog, **kwargs):\n\t\t\"\"\"특정 값이 중복되는 경우 중복횟수의 빈도를 return함\n\t\te.g. 1번 중복: 100, 2번 중복: 300\n\n\t\tKeyword arguments:\n\t\tcol -- 특정 col이 중복된 것을 확인하고 싶은 경우 (default: Activity)\n\n\t\t\"\"\"\n\t\tif 'col' in kwargs:\n\t\t\tcol = kwargs['col']\n\t\t\ttraces = eventlog.get_event_trace(workers=4, value=col)\n\t\telse:\n\t\t\ttraces = eventlog.get_event_trace(workers=4, value='Activity')\n\t\tcount=0\n\t\tinv_act_counts = []\n\t\tfor t in traces:\n\t\t\tact_count = dict(Counter(traces[t]))\n\n\t\t\tinv_act_count = dict()\n\t\t\tfor k,v in act_count.items():\n\t\t\t\tif v < 2:\n\t\t\t\t\tcontinue\n\t\t\t\tif v in inv_act_count:\n\t\t\t\t\tinv_act_count[v].append(k)\n\t\t\t\telse:\n\t\t\t\t\tinv_act_count[v] = [k]\n\t\t\tinv_act_counts.append(inv_act_count)\n\n\t\tcount_result_step = dict()\n\t\tfor inv_act_count in inv_act_counts:\n\t\t\tfor k in inv_act_count:\n\t\t\t\tif k not in count_result_step:\n\t\t\t\t\tcount_result_step[k] = 1\n\t\t\t\telse:\n\t\t\t\t\tcount_result_step[k] += 1\n\n\t\tresult = pd.DataFrame(list(count_result_step.items()), columns=['repetition', 'count'])\n\t\treturn result\n\n\tdef count_loops(self, eventlog, **kwargs):\n\t\t\"\"\"step이 연속된 경우를 count함. Step1-->Step1인 경우 1, Step1-->Step1-->Step1인 경우 2, 동시에 동일 device에서 수행되었는지도 계산함\n\n\t\tKeyword arguments:\n\t\tcol -- 특정 col이 중복된 것을 확인하고 싶은 경우 (default: Activity)\n\t\tvalue -- 특정 값이 연속된 것을 확인하고 싶은 경우 e.g. 'Null'\n\t\t\"\"\"\n\t\tif 'col' in kwargs:\n\t\t\tcol = kwargs['col']\n\t\t\ttraces = eventlog.get_event_trace(workers=4, value=col)\n\t\telse:\n\t\t\ttraces = eventlog.get_event_trace(workers=4, value='Activity')\n\t\tcount=0\n\t\tif 'value' in kwargs:\n\t\t\tvalue = kwargs['value']\n\t\telse:\n\t\t\tvalue = 'default'\n\t\tfor t, r in zip(traces, resource_traces):\n\t\t\tfor index, act in enumerate(traces[t]):\n\t\t\t\tif index == len(traces[t]) -1:\n\t\t\t\t\tcontinue\n\t\t\t\tif value == 'default':\n\t\t\t\t\t\tcount+=1\n\t\t\t\telse:\n\t\t\t\t\tif act == value and traces[t][index+1] == value:\n\t\t\t\t\t\tcount+=1\n\t\tprint(\"count_consecutives: {}\".format(count))\n\t\treturn count\n\n\n\tdef describe(self):\n\t\tprint(\"# events: {}\".format(len(self)))\n\t\tprint(\"# cases: {}\".format(len(set(self['CASE_ID']))))\n\t\tprint(\"# activities: {}\".format(len(set(self['Activity']))))\n\t\tprint(\"# resources: {}\".format(len(set(self['Resource']))))\n\t\ttry:\n\t\t\tprint(\"average yield: {}\".format(np.mean(self['VALUE'])))\n\t\texcept AttributeError:\n\t\t\tprint(\"yield not exists\")\n\n\tdef split_on_case(self, split):\n\t\tcaseid = self.get_caseids()\n\t\tsub_cases = []\n\t\tfor d in np.array_split(caseid, split):\n\t\t\tsub_cases.append(d)\n\t\tsub_logs = []\n\t\tfor i in range(len(sub_cases)):\n\t\t\tsub_log = self.loc[self['CASE_ID'].isin(sub_cases[i]), :]\n\t\t\tsub_log.reset_index(drop=True, inplace=True)\n\t\t\tsub_logs.append(sub_log)\n\t\treturn sub_logs\n\n\tdef parallelize(self, func, workers=multiprocessing.cpu_count(), *args):\n\t\tsublogs = self.split_on_case(workers)\n\t\toutput = Queue()\n\t\tmanager = Manager()\n\t\toutput = manager.list()\n\t\t# Setup a list of processes that we want to run\n\t\tprocesses = [Process(target=func, args=(sublogs[i], output)+args) for i in range(len(sublogs))]\n\t\t# Run processes\n\t\tfor p in processes:\n\t\t p.start()\n\n\t\t# Exit the completed processes\n\t\tfor p in processes:\n\t\t\tp.join()\n\n\t\treturn output\n\n\t#Relation Dictionary(key : AfterActovoty,, value : PreActivity list)\n\t##You need to specify the objective of this function\n\t##Additionally, please try to make the code below more efficient. (Both in terms of performance and visibility)\n\tdef relation_dictionary(self, pre_col, aft_col):\n\t\trelation_set = {}\n\t\taft_activity_list = self.get_col_values(pre_col)\n\t\tfor i in aft_activity_list:\n\t\t\trelation_set[i] = []\n\t\tfor i in range(len(self)):\n\t\t\trelation_set[self[aft_col][i]].append(self[pre_col][i])\n\n\t\treturn relation_set\n\n\n\nif __name__ == '__main__':\n\t\"\"\"\n\teventlog = Eventlog.from_xes('./example/running_example.xes')\n\tprint(type(eventlog))\n\t\"\"\"\n\teventlog = Eventlog.from_txt('/Users/GYUNAM/Desktop/LAB/SAMSUNG_PROJECT/IMPLE/input/Sample_data.txt')\n\n\teventlog = eventlog.assign_caseid('ROOT_LOT_ID', 'WAFER_ID')\n\teventlog = eventlog.assign_timestamp('TKIN_TIME', 'TKOUT_TIME')\n\tprint(eventlog)\n\n\n\n\n\n\n\n","sub_path":"src/data/Eventlog.py","file_name":"Eventlog.py","file_ext":"py","file_size_in_byte":12724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"251415231","text":"import unittest\nfrom blogcompile.model import Post, Page, Image\nfrom blogcompile import query\nfrom blogcompile.query import query_images, query_pages, query_posts, paginate, filtered_dataset, pagination\n\n@filtered_dataset(query_posts)\ndef print_post(post):\n return post.path[-1]\n\n@pagination(query_images, pagesize=3)\ndef paginated_images(index, imgs, pagecount):\n return [ img.path[-1] for img in imgs ]\n\nclass QueryTest(unittest.TestCase):\n def test_queries(self):\n dataset = [\n Page('Page1'), Page('Page2'), Page('Page3'),\n Post('Post1'), Post('Post2'), Post('Post3'), Post('Post4'),\n Image('Image1')\n ]\n self.assertEqual(len(query.execute(query_posts, dataset)), 4)\n self.assertEqual(len(query.execute(query_pages, dataset)), 3)\n self.assertEqual(len(query.execute(query_images, dataset)), 1)\n\n def test_paginate(self):\n self.assertEqual(paginate(list(range(20)), 3),\n [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14], [15, 16, 17], [18, 19]])\n\n def test_filtered_dataset(self):\n dataset = [\n Page('Page1'), Page('Page2'), Page('Page3'),\n Post('Post1'), Post('Post2'), Post('Post3'), Post('Post4'),\n Image('Image1'), Image('Image2'), Image('Image3'), Image('Image4'), Image('Image5')\n ]\n\n self.assertEqual(['1', '2', '3', '4'], list(print_post(dataset)))\n self.assertEqual([['1', '2', '3'], ['4', '5']], list(paginated_images(dataset)))\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_queries.py","file_name":"test_queries.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"473173006","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\nif not cap.isOpened():\n print('Initialization failed!')\n cap.open()\n\n# cap.get(ID) #get the properties of video.\n# cap.set(ID) #get the properties of video.\n\nwhile(True):\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.imshow('frame', gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"cv2/chapter1/videoCapture.py","file_name":"videoCapture.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"461403052","text":"import numpy as np \nimport chainer\nfrom chainer import cuda\nimport time \nxp= cuda.cupy\nfrom matplotlib import pyplot as plt \nfrom numba import jit\n\n\n@jit\ndef do_something(n,mat):\n\tfor i in range(n):\n\t\tmat=mat.dot(mat)\n\t\tmat=mat-mat.dot(mat)\n\n@jit\ndef cpucalc(n):\n\tstart=time.time()\n\tmat=np.arange(n*n).reshape(n,n)\n\tdo_something(n,mat)\n\tend=time.time()\n\telapsed=end-start\n\tprint(\"CPU end-start=%s\"%elapsed)\n\treturn elapsed\n\ndef gpucalc(n):\n\tstart=time.time()\n\tmat=xp.arange(n*n).reshape(n,n)\n\tdo_something(n,mat)\n\tend=time.time()\n\telapsed=end-start\n\tprint(\"GPU end-start=%s\"%elapsed)\n\treturn elapsed\n\ndef main():\n\tcs=[]\n\tgs=[]\n\titeration=500\n\tstep=50\n\tfor size in range(1,iteration,step):\n\t\tprint(size)\n\t\tgs.append(gpucalc(size))\n\t\tcs.append(cpucalc(size))\n\n\tfig,ax=plt.subplots()\n\tax.plot([i for i in range(1,iteration,step)],gs,color=\"green\")\n\tax.plot([i for i in range(1,iteration,step)],cs,color=\"blue\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()","sub_path":"cupy/matrixdot.py","file_name":"matrixdot.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"447100640","text":"#!/usr/bin/env python3\n\n\"\"\"Unit Tests for Netflix.py\"\"\"\n\n# -------\n# imports\n# -------\n\nimport os\n\nfrom io import StringIO\nfrom unittest import main, TestCase\n\nfrom contextlib import contextmanager\nfrom pickle import dump\n\nfrom Netflix import (netflix_read, netflix_eval, netflix_load,\n netflix_print, netflix_rmse, netflix_solve, pickle_load)\n\n\n@contextmanager\ndef temp_pickle(data, name, path=''):\n \"\"\"\n open/close temp filled pickle file\n data a blob of python data\n name a filename string\n path a path/dir string\n \"\"\"\n filename = path + name\n with open(filename, \"wb\") as pickle_file:\n dump(data, pickle_file)\n yield\n os.remove(filename)\n\n# -----------\n# TestNetflix\n# -----------\n# pylint: disable=R0904\nclass TestNetflix(TestCase):\n \"\"\"Unit Tests for Netflix.py\"\"\"\n # ----\n # read\n # ----\n\n def test_read_1(self):\n \"\"\"Test netflix_read for a customer ID\"\"\"\n string = '123\\n'\n data_id, data_type = netflix_read(string)\n self.assertEqual(data_id, 123)\n self.assertEqual(data_type, 'customer')\n\n def test_read_2(self):\n \"\"\"Test netflix_read for a movie ID\"\"\"\n string = '123:\\n'\n data_id, data_type = netflix_read(string)\n self.assertEqual(data_id, 123)\n self.assertEqual(data_type, 'movie')\n\n def test_read_3(self):\n \"\"\"Test netflix_read for a string\"\"\"\n string = 'abc\\n'\n error = False\n try:\n netflix_read(string)\n except ValueError:\n error = True\n self.assertTrue(error)\n\n # ------\n # pickle\n # ------\n\n def test_pickle_1(self):\n \"\"\"Test pickle_load with array\"\"\"\n name = 'test_pickle_1.p'\n data_in = ['a', 'b', 'c']\n with temp_pickle(data_in, name):\n data_out = pickle_load(name, *['']*3)\n self.assertEqual(data_in, data_out)\n\n def test_pickle_2(self):\n \"\"\"Test pickle_load with dict\"\"\"\n name = 'test_pickle_2.p'\n data_in = {'a': 1, 'b': 2, 'c': 3}\n with temp_pickle(data_in, name):\n data_out = pickle_load(name, *['']*3)\n self.assertEqual(data_in, data_out)\n\n def test_pickle_3(self):\n \"\"\"Test pickle_load with int\"\"\"\n name = 'test_pickle_3.p'\n data_in = 123\n with temp_pickle(data_in, name):\n data_out = pickle_load(name, *['']*3)\n self.assertEqual(data_in, data_out)\n\n # ----\n # load\n # ----\n\n def test_load_1(self):\n \"\"\"Test netflix_cache with pickle load\"\"\"\n files = ['total']\n data_in = [3.2281371945000967]\n data_out = netflix_load(files, [])\n self.assertEqual(data_in, data_out)\n\n def test_load_2(self):\n \"\"\"Test netflix_cache with no pickle load\"\"\"\n data_in = [123]\n data_out = netflix_load('', data_in)\n self.assertEqual(data_in, data_out)\n\n # ----\n # rmse\n # ----\n\n def test_rmse_1(self):\n \"\"\"Test netflix_rmse for correct prediction\"\"\"\n rmse = netflix_rmse([(1, 1), (5, 5)])\n self.assertEqual(rmse, 0.0)\n\n def test_rmse_2(self):\n \"\"\"Test netflix_rmse for avg rating\"\"\"\n rmse = round(netflix_rmse([(3.2, 1), (3.2, 5)]), 2)\n self.assertEqual(rmse, 2.01)\n\n def test_rmse_3(self):\n \"\"\"Test netflix_rmse for invalid input\"\"\"\n error = False\n try:\n netflix_rmse([(1,)])\n except ValueError:\n error = True\n self.assertTrue(error)\n\n\n # ----\n # eval\n # ----\n\n def test_eval_1(self):\n \"\"\"Test netflix_eval\"\"\"\n rating, actual = netflix_eval(30878, 1)\n self.assertEqual(rating, 3.7255000228804325)\n self.assertEqual(actual, 4)\n\n def test_eval_2(self):\n \"\"\"Test netflix_eval\"\"\"\n rating, actual = netflix_eval(1952305, 10)\n self.assertEqual(rating, 2.9539170532434964)\n self.assertEqual(actual, 3)\n\n def test_eval_3(self):\n \"\"\"Test netflix_eval\"\"\"\n rating, actual = netflix_eval(1485175, 10012)\n self.assertEqual(rating, 3.782384142757868)\n self.assertEqual(actual, 3)\n\n def test_eval_4(self):\n \"\"\"Test netflix_eval\"\"\"\n rating, actual = netflix_eval(430376, 10014)\n self.assertEqual(rating, 3.2289684823453744)\n self.assertEqual(actual, 3)\n\n # -----\n # print\n # -----\n\n def test_print_1(self):\n \"\"\"Test netflix_print for an int\"\"\"\n writer = StringIO()\n netflix_print(writer, 10)\n self.assertEqual(writer.getvalue(), '10\\n')\n\n def test_print_2(self):\n \"\"\"Test netflix_print for a string\"\"\"\n writer = StringIO()\n netflix_print(writer, 'abc')\n self.assertEqual(writer.getvalue(), 'abc\\n')\n\n def test_print_3(self):\n \"\"\"Test netflix_print for 2 lines\"\"\"\n writer = StringIO()\n netflix_print(writer, 'abc\\n123')\n self.assertEqual(writer.getvalue(), 'abc\\n123\\n')\n\n # -----\n # solve\n # -----\n\n def test_solve_1(self):\n \"\"\"Test netflix_solve\"\"\"\n reader = StringIO('1123:\\n448549\\n2444222\\n1522889\\n')\n writer = StringIO()\n netflix_solve(reader, writer)\n self.assertEqual(writer.getvalue(), '1123:\\n3.3\\n2.9\\n4.1\\nRMSE: 0.4148335185443161\\n')\n\n def test_solve_2(self):\n \"\"\"Test netflix_solve\"\"\"\n reader = StringIO('1:\\n1989766\\n\\n1989766\\n')\n writer = StringIO()\n error = False\n try:\n netflix_solve(reader, writer)\n except ValueError:\n error = True\n self.assertTrue(error)\n\n def test_solve_3(self):\n \"\"\"Test netflix_solve\"\"\"\n reader = StringIO('1:\\n1989766\\n')\n writer = StringIO()\n netflix_solve(reader, writer)\n self.assertEqual(writer.getvalue(), '1:\\n4.0\\nRMSE: 0.0\\n')\n\n# ----\n# main\n# ----\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cat3238-TestNetflix.py","file_name":"cat3238-TestNetflix.py","file_ext":"py","file_size_in_byte":5907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"217649764","text":"import imutils\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom skimage.filters import threshold_local\r\nimport cv2\r\nimport pytesseract\r\nfrom pdf2image import convert_from_path\r\n\r\n\r\ndef order_points(pts):\r\n rect = np.zeros((4, 2), dtype=\"float32\")\r\n s = pts.sum(axis=1)\r\n rect[0] = pts[np.argmin(s)]\r\n rect[2] = pts[np.argmax(s)]\r\n diff = np.diff(pts, axis=1)\r\n rect[1] = pts[np.argmin(diff)]\r\n rect[3] = pts[np.argmax(diff)]\r\n return rect\r\n\r\n\r\ndef four_point_transform(image, pts):\r\n rect = order_points(pts)\r\n (tl, tr, br, bl) = rect\r\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\r\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\r\n maxWidth = max(int(widthA), int(widthB))\r\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\r\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\r\n maxHeight = max(int(heightA), int(heightB))\r\n dst = np.array([\r\n [0, 0],\r\n [maxWidth - 1, 0],\r\n [maxWidth - 1, maxHeight - 1],\r\n [0, maxHeight - 1]], dtype=\"float32\")\r\n M = cv2.getPerspectiveTransform(rect, dst)\r\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\r\n return warped\r\n\r\n\r\ndef Image_filter(img_address):\r\n image = cv2.imread(img_address)\r\n try:\r\n if image == None:\r\n image = plt.imread(img_address)\r\n except:\r\n pass\r\n image = cv2.copyMakeBorder(image.copy(), 10, 10, 10, 10, cv2.BORDER_CONSTANT, value=[1, 1, 1])\r\n ratio = image.shape[0] / 500.0\r\n orig = image.copy()\r\n image = imutils.resize(image, height=500)\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.GaussianBlur(gray, (5, 5), 0)\r\n edged = cv2.Canny(gray, 120, 200)\r\n cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\r\n cnts = imutils.grab_contours(cnts)\r\n cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]\r\n\r\n for c in cnts:\r\n peri = cv2.arcLength(c, True)\r\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\r\n\r\n if len(approx) == 4:\r\n screenCnt = approx\r\n break\r\n\r\n cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)\r\n warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)\r\n warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)\r\n T = threshold_local(warped, 9, offset=7, method=\"gaussian\")\r\n warped = (warped > T).astype(\"uint8\") * 255\r\n img = warped.copy()\r\n kernel = np.ones((1, 1), np.uint8)\r\n img = cv2.dilate(img, kernel, iterations=1)\r\n img = cv2.erode(img, kernel, iterations=1)\r\n img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\r\n plt.imshow(img, cmap='gray')\r\n plt.show()\r\n\r\n\r\n\r\nImage_filter(\"C:\\\\Users\\\\Shiv\\\\Downloads\\\\CamScanner\\\\images\\\\Sample Pages\\\\eng_5.jpg\")\r\n","sub_path":"First_try/image_filter.py","file_name":"image_filter.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"523908668","text":"from rest_framework import serializers\n\nfrom .models import User, Interest\n\nINFO = [\n 'email',\n 'username',\n 'is_active',\n 'is_admin',\n 'date_of_birth',\n 'role',\n 'first_name',\n 'last_name',\n 'joined_on',\n 'last_login',\n]\nDISPLAY_SETTINGS = [\n 'display_occupation_to',\n 'display_occupation',\n 'display_location_to',\n 'display_location',\n 'display_date_of_birth_to',\n 'display_date_of_birth',\n 'display_full_name_to',\n 'display_full_name',\n]\nPROFILE = [\n 'location',\n 'tag',\n 'bio',\n 'friends',\n 'occupation',\n 'interests',\n 'board_count',\n 'avatar',\n 'avatar_thumbnail',\n 'primary_color',\n 'secondary_color',\n]\n\n\nclass InterestSerializer(serializers.ModelSerializer):\n class Meta:\n model = Interest\n fields = '__all__'\n\n\nclass QuickUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\n 'id',\n 'username',\n 'avatar_thumbnail',\n 'primary_color',\n 'secondary_color'\n ]\n\n\nclass LoginSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\n 'username',\n 'isAuthenticated',\n 'online',\n 'avatar',\n 'avatar_thumbnail',\n 'dailyChance',\n 'is_staff',\n 'auth_token',\n 'primary_color',\n 'secondary_color',\n 'shinies',\n 'muns'\n ]\n\n\nclass ColorSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\n 'primary_color',\n 'secondary_color',\n ]\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\n 'email',\n 'username',\n 'date_of_birth',\n 'password'\n ]\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n\n first_name = serializers.SerializerMethodField()\n last_name = serializers.SerializerMethodField()\n date_of_birth = serializers.SerializerMethodField()\n location = serializers.SerializerMethodField()\n occupation = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = INFO + PROFILE\n\n def get_first_name(self, user):\n display = user.display_full_name\n return None if not display else user.first_name\n\n def get_last_name(self, user):\n display = user.display_full_name\n return None if not display else user.last_name\n\n def get_date_of_birth(self, user):\n display = user.display_date_of_birth\n return None if not display else user.date_of_birth\n\n def get_location(self, user):\n display = user.display_location\n return None if not display else user.location\n\n def get_occupation(self, user):\n display = user.display_occupation\n return None if not display else user.occupation\n\n\nclass OwnProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n # fields = '__all__'\n exclude = ['password', ]\n\n interests = InterestSerializer(read_only=True, many=True)\n board_count = serializers.SerializerMethodField()\n friends = QuickUserSerializer(many=True)\n\n def get_board_count(self, user):\n return user.board_count\n \n\nclass DailyChanceSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['dailyChance', 'dailyChanceDate']\n","sub_path":"users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"97490293","text":"import os\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom sphinx.util.osutil import copyfile\nfrom distutils.dir_util import copy_tree\n\n\nclass ExampleCodeDirective(Directive):\n has_content = True\n\n def run(self):\n self.assert_has_content()\n text = '\\n'.join(self.content)\n node = nodes.container(text)\n node['classes'].append('example-code')\n self.add_name(node)\n self.state.nested_parse(self.content, self.content_offset, node)\n return [node]\n\n\ndef add_assets(app):\n app.add_css_file('main.css')\n app.add_js_file('codeexample.js')\n\n\ndef copy_assets(app, exception=None):\n if app.builder.name != 'html' or exception:\n return\n dest = os.path.join(app.builder.outdir, '_static/style')\n source = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"_static/style\")\n copy_tree(source, dest)\n dest = os.path.join(app.builder.outdir, '_static/main.css')\n source = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"_static/main.css\")\n copyfile(source, dest)\n dest = os.path.join(app.builder.outdir, '_static/codeexample.js')\n source = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"_static/codeexample.js\")\n copyfile(source, dest)\n\n\ndef setup(app):\n app.add_directive('example-code', ExampleCodeDirective)\n app.connect('builder-inited', add_assets)\n app.connect('build-finished', copy_assets)\n return {'version': '0.1'}\n","sub_path":"source/codeexample.py","file_name":"codeexample.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"536957389","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreate Time: 2019/8/30 下午6:13\nAuthor: ybx\n\"\"\"\nimport os\nimport sys\nimport pydicom\nimport pandas as pd\n\n\npath = \"/media/tx-eva-data/Data4/CE_DATA/基础数据库\"\nid_list = []\nKVP_list = []\nXRayTubeCurrent_list = []\nbodypart_list = []\nconvolutionKernel_list = []\nslice_list = []\ndose_list = []\nage_list = []\nManufacturer_list = []\nManufacturerModelName_list = []\nsex_list = []\nExposure_list = []\nwc_list = []\nww_list = []\nLargestImage = []\nstudydate_list = []\nstudytime_list = []\nSmallestImage_list = []\nhospital_name_list = []\nPixelSpacing_list = []\nPatientDate_list = []\naccession_list = []\nWCWW_list = []\nstudyuid_list = []\nseruid_list = []\nSmallest_list = []\nLargest_list = []\ncount = 0\nfolder_List = []\nsop_list = []\n\nfor folder in os.listdir(path):\n h_folder_path = os.path.join(path,folder)\n for sfile in os.listdir(h_folder_path):\n t_folder_path = os.path.join(h_folder_path, sfile)\n if sfile == 'anno' or sfile.endswith('.txt'):\n continue\n for tfile in os.listdir(t_folder_path):\n dcm_folder_path = os.path.join(t_folder_path, tfile)\n\n for dcm_file in os.listdir(dcm_folder_path):\n count += 1\n dcm_file_path = os.path.join(dcm_folder_path,dcm_file)\n ds = pydicom.read_file(dcm_file_path,force=True,stop_before_pixels=True)\n try:\n PatientAge = ds.PatientAge\n except:\n PatientAge = \"N/A\"\n try:\n kvp = ds.KVP\n except:\n kvp = \"N/A\"\n try:\n XRayTubeCurrent = ds.XRayTubeCurrent\n except:\n XRayTubeCurrent = \"N/A\"\n try:\n convolutionKernel = ds.ConvolutionKernel\n except:\n convolutionKernel = \"N/A\"\n try:\n sliceThickness = ds.SliceThickness\n except:\n sliceThickness = \"N/A\"\n try:\n PixelSpacing = ds.PixelSpacing\n except:\n PixelSpacing = \"N/A\"\n try:\n Manufacturer = ds.Manufacturer\n except:\n Manufacturer = \"N/A\"\n try:\n ManufacturerModelName = ds.ManufacturerModelName\n except:\n ManufacturerModelName = \"N/A\"\n try:\n PatientSex = ds.PatientSex\n except:\n PatientSex = \"N/A\"\n try:\n ID = ds.PatientID\n except:\n ID = \"N/A\"\n try:\n AccessionNumber = ds.AccessionNumber\n except:\n AccessionNumber = \"N/A\"\n try:\n StudyDate = ds.StudyDate\n except:\n StudyDate = \"N/A\"\n StudyTime = ds.StudyTime\n WindowCenter = str(ds.WindowCenter)\n WindowWidth = str(ds.WindowWidth)\n StudyInstanceUID = ds.StudyInstanceUID\n SeriesInstanceUID = ds.SeriesInstanceUID\n SOPInstanceUID = ds.SOPInstanceUID\n try:\n PatientDate = ds.PatientBirthDate\n except:\n PatientDate = \"N/A\"\n try:\n BodyPartExamined = ds.BodyPartExamined\n except:\n BodyPartExamined = \"N/A\"\n\n try:\n LargestImagePixelValue = ds.LargestImagePixelValue\n except:\n LargestImagePixelValue = \"N/A\"\n try:\n SmallestImagePixelValue = ds.SmallestImagePixelValue\n except:\n SmallestImagePixelValue = \"N/A\"\n try:\n WW = ds.WindowCenterWidthExplanation\n except:\n WW = \"N/A\"\n id_list.append(str(ID))\n folder_List.append(folder)\n sop_list.append(SOPInstanceUID)\n accession_list.append(AccessionNumber)\n sex_list.append(PatientSex)\n age_list.append(PatientAge)\n PatientDate_list.append(PatientDate)\n studydate_list.append(StudyDate)\n studytime_list.append(StudyTime)\n studyuid_list.append(StudyInstanceUID)\n seruid_list.append(SeriesInstanceUID)\n Manufacturer_list.append(Manufacturer)\n ManufacturerModelName_list.append(ManufacturerModelName)\n bodypart_list.append(BodyPartExamined)\n slice_list.append(sliceThickness)\n KVP_list.append(kvp)\n XRayTubeCurrent_list.append(XRayTubeCurrent)\n convolutionKernel_list.append(convolutionKernel)\n PixelSpacing_list.append(PixelSpacing)\n Smallest_list.append(SmallestImagePixelValue)\n Largest_list.append(LargestImagePixelValue)\n wc_list.append(WindowCenter)\n ww_list.append(WindowWidth)\n WCWW_list.append(WW)\n break\nwriter = pd.ExcelWriter(path+'_info.xls',encoding = 'unicode_escape')\ndf = pd.DataFrame(data={'ID':id_list,'Age':age_list,'AccessionNumber':accession_list,\n 'ConvolutionKernel':convolutionKernel_list,'originalFolderName':folder_List,\n 'KVP':KVP_list,'XRayTubeCurrent':XRayTubeCurrent_list,'SOPInstanceUID':sop_list,\n 'Manufacturer':Manufacturer_list,'ManufacturerModelName':ManufacturerModelName_list,\n 'PatientSex':sex_list,'PixelSpacing':PixelSpacing_list,\n 'PatientBirthDate':PatientDate_list,'WindowCenter':wc_list,'WindowWidth':ww_list,\n 'WindowCenter&WidthExplanation':WCWW_list,\"StudyDate\":studydate_list,'StudyTime':studytime_list,\n 'LargestImagePixelValue':Largest_list,'SmallestImagePixelValue':Smallest_list,\n 'BodyPartExamined':bodypart_list,\"SliceThickness\":slice_list})\n\ndf.to_excel(writer,index=False)\nwriter.save()\nprint(count)","sub_path":"com/infervision/deal_excel/get_all_information.py","file_name":"get_all_information.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"425782181","text":"import argparse\nimport sys\nimport os\nimport yaml\nfrom pathlib import Path\nfrom csv import DictReader\nfrom operator import itemgetter, attrgetter\nfrom itertools import groupby\n\nfrom genetracks import Figure, Track, Multitrack, Coverage\nimport micall.core.plot_contigs as plot_contigs\n\n\ndef main(args):\n with open(args.blast_csv) as f:\n plot_blast(f, args.fasta, args.hivseqinr_results)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'blast_csv',\n type=Path\n )\n parser.add_argument('fasta', type=Path)\n parser.add_argument('hivseqinr_results', type=Path)\n args = parser.parse_args()\n return args\n\n\ndef get_contig_names(fasta):\n contig_nums = {} # {contig_num: contig_name}\n with open(fasta) as f:\n for line in f:\n if line.startswith('>'):\n contig_name = line[1:-1]\n contig_nums[len(contig_nums) + 1] = contig_name\n return contig_nums\n\n\ndef get_landmark_tracks():\n tracks = []\n refname = 'HIV1-B-FR-K03455-seed'\n landmarks_path = (Path(os.path.realpath(__file__)).parent.parent / 'data' / 'landmark_references.yaml')\n landmark_groups = yaml.safe_load(landmarks_path.read_text())\n for reference_set in landmark_groups:\n if reference_set['coordinates'] != refname:\n continue\n prev_landmark = None\n for i, landmark in enumerate(sorted(reference_set['landmarks'],\n key=itemgetter('start'))):\n landmark.setdefault('frame', 0)\n if prev_landmark and 'end' not in prev_landmark:\n prev_landmark['end'] = landmark['start'] - 1\n prev_landmark = landmark\n for frame, frame_landmarks in groupby(reference_set['landmarks'],\n itemgetter('frame')):\n subtracks = []\n for landmark in frame_landmarks:\n subtracks.append(Track(landmark['start'],\n landmark['end'],\n label=landmark['name'],\n color=landmark['colour']))\n tracks.append(Multitrack(subtracks))\n break\n return tracks\n\n\ndef get_colors():\n colors = {\n 'Intact': 'green',\n\t'5DEFECT': 'purple',\n\t'5DEFECT_GagNoATGGagFailed': 'purple',\n\t'5DEFECT_GagNoATGGagPassed': 'purple',\n\t'5DFECT_IntoGag': '',\n\t'Hypermut': 'red',\n\t'InternalInversion': 'purple',\n\t'LargeDeletion': 'orange',\n\t'NonHIV': 'purple',\n\t'PrematureStop_OR_AAtooLong_OR_AAtooShort': 'purple',\n }\n\n return colors\n\n\ndef get_states(hivseqinr_results):\n results = {}\n with open(hivseqinr_results, newline='') as csvfile:\n reader = DictReader(csvfile)\n for row in reader:\n results[row['SEQID']] = row['MyVerdict']\n return results\n\n\ndef plot_blast(blast_csv, fasta, hivseqinr_results):\n states = get_states(hivseqinr_results)\n padding = 2\n colors = get_colors()\n reader = DictReader(blast_csv)\n visited = set()\n figure = Figure(track_height=1)\n #figure.add(Track(1, 100, label='No contigs found', color='none'))\n contig_names = get_contig_names(fasta)\n with open('./contig_names', 'w') as f:\n yaml.dump(contig_names, f)\n\n min_start = 638\n max_end = 9604\n\n landmark_tracks = get_landmark_tracks()\n for track in landmark_tracks:\n figure.add(track)\n #for row in reader:\n # print(contig_names[int(row['contig_num'])])\n multitracks = {\n 'intact': {\n 'data': [],\n 'color': 'green'\n },\n 'hypermut': {\n 'data': [],\n 'color': 'red'\n },\n 'largedel': {\n 'data': [],\n 'color': 'orange'\n },\n 'other_defect': {\n 'data': [],\n 'color': 'brown'\n }\n }\n for contig_num, group in groupby(reader, itemgetter('contig_num')):\n subtracks = []\n group = merge_group(group)\n for block in group:\n state = states[contig_names[int(contig_num)]]\n block_track = Track(\n block[0],\n block[1],\n\t\tcolor=colors[state],\n h=0.001\n )\n subtracks.append(block_track)\n# label_track = Track(\n# 1,\n# 9000,\n# label=contig_names[int(contig_num)],\n# color='none'\n# )\n# subtracks.append(label_track)\n multitrack = Multitrack(subtracks)\n if state == 'Intact':\n multitracks['intact']['data'].append(multitrack)\n elif state == 'Hypermut':\n multitracks['hypermut']['data'].append(multitrack)\n elif state == 'LargeDeletion':\n multitracks['largedel']['data'].append(multitrack)\n else:\n multitracks['other_defect']['data'].append(multitrack)\n #figure.add(multitrack)\n for _type in ('intact', 'hypermut', 'other_defect', 'largedel'):\n for track in multitracks[_type]['data']:\n figure.add(track, padding=padding)\n figure.show(w=970).saveSvg('./saved_figure.svg')\n\n\ndef merge_group(group):\n min_start = 638\n max_end = 9604\n target = 6623\n window = 50\n skip_next = False\n merged = []\n sorted_group = sorted(group, key=lambda x: int(x['ref_start']))\n for i, block in enumerate(sorted_group):\n if skip_next:\n skip_next = False\n continue\n if (\n int(block['ref_start']) < min_start\n ):\n continue\n try:\n next_block = sorted_group[i+1]\n except IndexError:\n pass\n if target-window <= int(block['ref_end']) <= target+window:\n new_block = (\n int(block['ref_start']),\n int(next_block['ref_end'])\n )\n skip_next = True\n merged.append(new_block)\n continue\n new_block = (\n int(block['ref_start']),\n min(int(block['ref_end']), max_end)\n )\n merged.append(new_block)\n return merged\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n","sub_path":"simpleplot.py","file_name":"simpleplot.py","file_ext":"py","file_size_in_byte":6199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"314468608","text":"#! Deprecated\n\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nfrom std_msgs.msg import Int8MultiArray\nfrom turtlebot import *\nfrom constants import *\nfrom lib_lidar import *\n\n\n# * Variables\nparking_LEFT = 1\nparking_RIGHT = 2\nparking_enable = False # True : arrive at parking spot / False : not yet\ntmp = 1\n\n\n# * parking function!\ndef make_delay():\n TURTLE.set_angular(0)\n TURTLE.set_speed_by_percentage(0)\n TURTLE.move()\n for i in range(0,10,1):\n rospy.sleep(rospy.Duration(0.1))\n\n\ndef parking_move(parking_direction): \n parking_angles = [-0.4, 0, 0, 0, -1, 0]\n parking_speeds = [0.8, 1, 0, -1, 0, 1]\n parking_times = [4, 0.4, 1.1, 2.2, 1.6, 3]\n\n make_delay()\n if parking_direction == parking_RIGHT:\n TURTLE.set_angular(-0.4)\n TURTLE.set_speed_by_percentage(0.8)\n else:\n TURTLE.set_angular(0.4)\n TURTLE.set_speed_by_percentage(0.8)\n TURTLE.move()\n rospy.sleep(rospy.Duration(4))\n for i in range(0, 5, 1):\n make_delay()\n TURTLE.set_angular(parking_angles[i])\n TURTLE.set_speed_by_percentage(parking_speeds[i])\n TURTLE.move()\n rospy.sleep(rospy.Duration(parking_times[i]))\n\n make_delay()\n\n # go right parking spot, stop, go back, turn right, stop\n\n\n\n# * Thread function!\ndef parking_control(lidar):\n global parking_enable; global parking_space; global LEFT; global RIGHT\n set_lidar_values(lidar)\n\n #print(\"right : \",right_distance)\n #print(\"left : \",left_distance)\n\n # select parking space : left or right \n print(\"============================\")\n for i in range(30,40,1):\n distance = lidar.ranges[i]\n if distance > 0.7:\n distance = 0\n elif distance < 0.5:\n parking_enable = True\n parking_move(parking_RIGHT)\n \n if IS_DEBUG_MODE == True:\n print(\"right parking start!\")\n print(str(i) + \" \" + str(distance))\n\n\n for i in range(320,330,1):\n distance = lidar.ranges[i]\n if distance > 0.7:\n distance = 0\n elif distance < 0.5:\n parking_enable = True\n parking_move(parking_LEFT)\n\n if IS_DEBUG_MODE == True:\n print(\"left parking start!\")\n print(str(i) + \" \" + str(distance))\n\n print(\"============================\")\n ","sub_path":"packages/galapagos/scripts/_lib_parking.py","file_name":"_lib_parking.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"1615595","text":"# -*- coding: utf-8 -*-\n# Copyright 2015 Alex Woroschilow (alex.woroschilow@gmail.com)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport inject\nfrom logging import getLogger\n\n# from .gui.preview.widget import PreviewScrollArea\nfrom .gui.preview.list import PreviewScrollArea\n\n\nclass ModuleActions(object):\n\n @inject.params(search='search', storage='storage', window='window', status='status')\n def onActionSearchRequest(self, widget=None, search=None, storage=None, window=None, status=None):\n\n text = widget.text()\n if len(text) == 0:\n return None\n\n preview = PreviewScrollArea(window)\n preview.editAction.connect(self.onActionEditRequest)\n\n index = 0\n for index, path in enumerate(search.search(widget.text()), start=1):\n preview.addPreview(storage.index(path))\n\n status.info(\"Search request: '{}', {} records found\".format(text, index))\n\n title = text if len(text) <= 25 else \\\n \"{}...\".format(text[0:22])\n\n window.tab.emit((preview, title))\n\n @inject.params(storage='storage', window='window', dashboard='notepad.dashboard')\n def onActionEditRequest(self, index, storage, window, dashboard):\n try:\n\n if index is None: return None\n\n if storage.isDir(index):\n dashboard.group(index)\n window.tabSwitch.emit(0)\n return None\n\n if storage.isFile(index):\n dashboard.note(index)\n window.tabSwitch.emit(0)\n return None\n\n return None\n\n except Exception as ex:\n logger = getLogger('search')\n logger.exception(ex)\n\n @inject.params(storage='storage', search='search')\n def onNoteCreated(self, index, storage, search):\n\n try:\n # update search index only after\n # the update was successful\n name = storage.fileName(index)\n path = storage.filePath(index)\n\n content = storage.fileContent(index)\n if content is None or not len(content):\n return None\n\n search.append(name, path, content)\n\n except Exception as ex:\n logger = getLogger('search')\n logger.exception(ex)\n\n @inject.params(storage='storage', search='search')\n def onNoteUpdated(self, index, storage, search):\n\n try:\n # update search index only after\n # the update was successful\n name = storage.fileName(index)\n path = storage.filePath(index)\n\n content = storage.fileContent(index)\n if content is None or not len(content):\n return None\n\n search.update(name, path, content)\n\n except Exception as ex:\n logger = getLogger('search')\n logger.exception(ex)\n\n @inject.params(storage='storage', search='search')\n def onNoteRemoved(self, index, storage, search):\n\n try:\n # update search index only after\n # the update was successful\n path = storage.filePath(index)\n if path is None or not len(path):\n return None\n\n search.remove(path)\n\n except Exception as ex:\n logger = getLogger('search')\n logger.exception(ex)\n","sub_path":"src/main/python/modules/window_notepad_search/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"45920363","text":"import random\r\nfrom tkinter import Frame, Label, CENTER\r\nfrom collections import namedtuple\r\nimport logic\r\nimport constants as c\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom itertools import count\r\nimport numpy as np\r\nimport time\r\nimport math\r\nkey_map = {\r\n 0: \"'w'\",\r\n 1: \"'s'\",\r\n 2: \"'a'\",\r\n 3: \"'d'\"\r\n\r\n}\r\n\r\nclass DQN(nn.Module):\r\n def __init__(self,device):\r\n super().__init__()\r\n self.fc1=nn.Linear(in_features=16,out_features=128)\r\n self.fc2=nn.Linear(in_features=128,out_features=128)\r\n self.fc3=nn.Linear(in_features=128,out_features=64)\r\n self.out=nn.Linear(in_features=64,out_features=4)\r\n self.device=device\r\n def forward(self,t):\r\n t=torch.tensor(t).to(self.device)\r\n t = F.relu(self.fc1(t))\r\n t = F.relu(self.fc2(t))\r\n t = F.relu(self.fc3(t))\r\n t = self.out(t)\r\n return t\r\n\r\n\r\nExperience = namedtuple(\r\n 'Experience',\r\n ('state', 'action', 'next_state', 'reward')\r\n)\r\n\r\n\r\nclass ReplayMemory():\r\n def __init__(self, capacity):\r\n self.capacity = capacity\r\n self.memory = []\r\n self.push_count = 0\r\n\r\n def push(self, experience):\r\n if len(self.memory) < self.capacity:\r\n self.memory.append(experience)\r\n else:\r\n self.memory[self.push_count % self.capacity] = experience\r\n self.push_count += 1\r\n\r\n def sample(self, batch_size):\r\n return random.sample(self.memory, batch_size)\r\n\r\n def can_provide_sample(self, batch_size):\r\n return len(self.memory) >= batch_size\r\n\r\n\r\nclass EpsilonGreedyStrategy():\r\n def __init__(self, start, end, decay):\r\n self.start = start\r\n self.end = end\r\n self.decay = decay\r\n\r\n def get_exploration_rate(self, current_step):\r\n return self.end + (self.start - self.end) * math.exp(-1. * current_step * self.decay)\r\n\r\n\r\nclass Agent():\r\n def __init__(self, strategy, num_actions, device):\r\n self.current_step = 0\r\n self.strategy = strategy\r\n self.num_actions = num_actions\r\n self.device = device\r\n\r\n def select_action(self, state, policy_net):\r\n rate = strategy.get_exploration_rate(self.current_step)\r\n self.current_step += 1\r\n if rate > random.random():\r\n action = random.randrange(self.num_actions)\r\n return torch.tensor([action]).to(self.device) # explore\r\n else:\r\n with torch.no_grad():\r\n temp = (policy_net(state).argmax(dim=0).to(self.device))\r\n temp=temp.cpu()\r\n temp = temp.numpy()\r\n temp = int(temp)\r\n return torch.tensor([temp]).to(self.device) # exploit\r\n\r\nclass GameGrid(Frame):\r\n\r\n def __init__(self):\r\n # Frame.__init__(self)\r\n self.score = 0\r\n self.game = Frame()\r\n self.game.grid()\r\n self.game.master.title('2048')\r\n # self.master.bind(\"\", self.key_down)\r\n\r\n # self.gamelogic = gamelogic\r\n self.game.commands = {c.KEY_UP: logic.up, c.KEY_DOWN: logic.down,\r\n c.KEY_LEFT: logic.left, c.KEY_RIGHT: logic.right,\r\n c.KEY_UP_ALT: logic.up, c.KEY_DOWN_ALT: logic.down,\r\n c.KEY_LEFT_ALT: logic.left, c.KEY_RIGHT_ALT: logic.right,\r\n c.KEY_H: logic.left, c.KEY_L: logic.right,\r\n c.KEY_K: logic.up, c.KEY_J: logic.down}\r\n\r\n self.game.grid_cells = []\r\n self.init_grid(self.game)\r\n self.init_matrix()\r\n self.update_grid_cells(self.game)\r\n\r\n def render(self):\r\n\r\n self.game.update_idletasks()\r\n self.game.update()\r\n time.sleep(0.01)\r\n\r\n time.sleep(0.01)\r\n\r\n def init_grid(self, game):\r\n background = Frame(game, bg=c.BACKGROUND_COLOR_GAME,\r\n width=c.SIZE, height=c.SIZE)\r\n background.grid()\r\n\r\n for i in range(c.GRID_LEN):\r\n grid_row = []\r\n for j in range(c.GRID_LEN):\r\n cell = Frame(background, bg=c.BACKGROUND_COLOR_CELL_EMPTY,\r\n width=c.SIZE / c.GRID_LEN,\r\n height=c.SIZE / c.GRID_LEN)\r\n cell.grid(row=i, column=j, padx=c.GRID_PADDING,\r\n pady=c.GRID_PADDING)\r\n t = Label(master=cell, text=\"\",\r\n bg=c.BACKGROUND_COLOR_CELL_EMPTY,\r\n justify=CENTER, font=c.FONT, width=5, height=2)\r\n t.grid()\r\n grid_row.append(t)\r\n\r\n game.grid_cells.append(grid_row)\r\n\r\n def gen(self):\r\n return random.randint(0, c.GRID_LEN - 1)\r\n\r\n def init_matrix(self):\r\n self.matrix = logic.new_game(c.GRID_LEN)\r\n self.history_matrixs = list()\r\n self.matrix = logic.add_two(self.matrix)\r\n self.matrix = logic.add_two(self.matrix)\r\n\r\n def update_grid_cells(self, game):\r\n for i in range(c.GRID_LEN):\r\n for j in range(c.GRID_LEN):\r\n new_number = self.matrix[i][j]\r\n if new_number == 0:\r\n game.grid_cells[i][j].configure(\r\n text=\"\", bg=c.BACKGROUND_COLOR_CELL_EMPTY)\r\n else:\r\n game.grid_cells[i][j].configure(text=str(\r\n new_number), bg=c.BACKGROUND_COLOR_DICT[new_number],\r\n fg=c.CELL_COLOR_DICT[new_number])\r\n game.update_idletasks()\r\n\r\n def num_actions_available(self):\r\n return 4\r\n\r\n def get_state(self):\r\n flat = np.array(self.matrix).flatten().astype(np.float32())\r\n return torch.from_numpy(flat)\r\n\r\n def key_down(self, event):\r\n key = event\r\n game_done = False\r\n game_result = False\r\n temp = 0\r\n current_state = self.matrix\r\n\r\n if key == c.KEY_BACK and len(self.history_matrixs) > 1:\r\n self.matrix = self.history_matrixs.pop()\r\n self.update_grid_cells()\r\n print('back on step total step:', len(self.history_matrixs))\r\n\r\n elif key in self.game.commands:\r\n self.matrix, done = self.game.commands[key](self.matrix)\r\n\r\n if done:\r\n self.matrix = logic.add_two(self.matrix)\r\n # record last move\r\n self.history_matrixs.append(self.matrix)\r\n self.update_grid_cells(self.game)\r\n\r\n done = False\r\n if logic.game_state(self.matrix) == 'win':\r\n game_done = True\r\n game_result = True\r\n print(\"WON\")\r\n self.game.grid_cells[1][1].configure(\r\n text=\"You\", bg=c.BACKGROUND_COLOR_CELL_EMPTY)\r\n self.game.grid_cells[1][2].configure(\r\n text=\"Win!\", bg=c.BACKGROUND_COLOR_CELL_EMPTY)\r\n if logic.game_state(self.matrix) == 'lose':\r\n game_done = True\r\n game_result = False\r\n self.game.grid_cells[1][1].configure(\r\n text=\"You\", bg=c.BACKGROUND_COLOR_CELL_EMPTY)\r\n self.game.grid_cells[1][2].configure(\r\n text=\"Lose!\", bg=c.BACKGROUND_COLOR_CELL_EMPTY)\r\n if (game_done and game_result):\r\n self.score = sum(np.array(self.matrix).flatten())\r\n elif (game_done and not (game_result)):\r\n self.score = -1 * sum(np.array(self.matrix).flatten())\r\n else:\r\n if (self.score != sum(np.array(self.matrix).flatten())):\r\n\r\n for i in range(4):\r\n for j in range(4):\r\n if (self.matrix[i][j] == 2 * current_state[i][j]):\r\n temp += self.matrix[i][j]\r\n self.score = sum(np.array(self.matrix).flatten())\r\n return self.matrix, game_done, temp\r\n\r\n else:\r\n\r\n # print(0)\r\n return self.matrix, game_done, -1\r\n\r\n return self.matrix, game_done, self.score\r\n\r\n def generate_next(self):\r\n index = (self.gen(), self.gen())\r\n while self.matrix[index[0]][index[1]] != 0:\r\n index = (self.gen(), self.gen())\r\n self.matrix[index[0]][index[1]] = 2\r\n\r\n\r\ndef extract_tensors(experiences):\r\n # Convert batch of Experiences to Experience of batches\r\n batch = Experience(*zip(*experiences))\r\n\r\n t1 = torch.stack(batch.state)\r\n t2 = torch.cat(batch.action)\r\n t3 = torch.cat(batch.reward)\r\n t4 = torch.stack(batch.next_state)\r\n\r\n return (t1, t2, t3, t4)\r\nclass QValues():\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n @staticmethod\r\n def get_current(policy_net, states, actions):\r\n temp = policy_net(states)\r\n print(\"c111111111111111111111111111111\")\r\n print(actions.shape)\r\n print(temp)\r\n return temp.gather(dim=1, index=actions.unsqueeze(-1))\r\n\r\n @staticmethod\r\n def get_next(target_net, next_states):\r\n batch_size = next_states.shape[0]\r\n non_final_state_locations = torch.zeros(batch_size, dtype=torch.bool).to(QValues.device)\r\n for i in range(len(next_states)):\r\n if (1024 in next_states[i]):\r\n non_final_state_locations[i] = False\r\n else:\r\n non_final_state_locations[i] = True\r\n non_final_states = next_states[non_final_state_locations]\r\n values = torch.zeros(batch_size).to(QValues.device)\r\n values[non_final_state_locations] = target_net(non_final_states).max(dim=1)[0].detach()\r\n\r\n return values\r\n\r\n\r\n\r\nbatch_size = 1\r\ngamma = 0.999\r\neps_start = 1\r\neps_end = 0.01\r\neps_decay = 0.001\r\ntarget_update = 100\r\nmemory_size = 100000\r\nlr = 0.1\r\nnum_episodes = 1000\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n\r\nstrategy = EpsilonGreedyStrategy(eps_start, eps_end, eps_decay)\r\n\r\nagent = Agent(strategy, 4, device)\r\nmemory = ReplayMemory(memory_size)\r\n\r\npolicy_net = DQN(device).to(device)\r\ntarget_net = DQN(device).to(device)\r\n\r\ntarget_net.load_state_dict(policy_net.state_dict())\r\ntarget_net.eval()\r\n\r\noptimizer = optim.Adam(params=policy_net.parameters(), lr=lr)\r\n\r\nepisode_durations = []\r\n\r\nfor episode in range(num_episodes):\r\n em = GameGrid()\r\n state = em.get_state()\r\n for timestep in count():\r\n em.render()\r\n action = agent.select_action(state, policy_net)\r\n next_state, done, reward = em.key_down(key_map[action.item()])\r\n next_state=em.get_state()\r\n memory.push(Experience(state, action, next_state, torch.tensor([reward], device=device,dtype=torch.float32)))\r\n state = next_state\r\n if memory.can_provide_sample(batch_size):\r\n experiences = memory.sample(batch_size)\r\n states, actions, rewards, next_states = extract_tensors(experiences)\r\n\r\n current_q_values = QValues.get_current(policy_net, states, actions)\r\n next_q_values = QValues.get_next(target_net, next_states)\r\n target_q_values = (next_q_values * gamma) + rewards\r\n\r\n loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1))\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n if done: \r\n em.game.destroy()\r\n episode_durations.append(timestep)\r\n print(timestep)\r\n\r\n break\r\n if episode % target_update == 0:\r\n target_net.load_state_dict(policy_net.state_dict())\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"qlearning.py","file_name":"qlearning.py","file_ext":"py","file_size_in_byte":11645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"395894165","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n主要测试内容:\n 模拟调用api接口\n使用方法:\n 1. 修改参数 TEST_OPTION 指定测试模块\n 2. 运行脚本 /data/code/cy_devops/bin/python3 /data/www/cmdb/api_web/tests.py\n\"\"\"\nimport requests\nimport os\nimport django\nimport uuid\nimport sys\nimport json\nimport random\nfrom cmdb.settings import PRODUCTION_ENV\n\npathname = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, pathname)\nsys.path.insert(0, os.path.abspath(os.path.join(pathname, '..')))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cmdb.settings\")\ndjango.setup()\n\nCMDB_URL = 'http://127.0.0.1:8000/api_web/'\nTOKEN = '431b65c0a00dfa00399a8e36c47f54ad5d3686d5'\n\"\"\"\n1: 区服下线计划\n2: 修改开服时间\n3. 接收web挂维护信息\n\"\"\"\nTEST_OPTION = (3,)\n\n\nclass TEST_CMDB_API(object):\n \"\"\"测试cmdb api类\"\"\"\n\n def __init__(self):\n self.token = TOKEN\n self.url = CMDB_URL\n\n def get_api(self, url, query_param):\n \"\"\"get请求\"\"\"\n msg = 'ok'\n success = True\n try:\n res = requests.get(url + query_param, headers={'Authorization': 'token {}'.format(self.token)})\n if res.status_code == 200:\n r = res.json()\n print(r)\n if r.get('resp', 1) != 1 or not r.get('success', True):\n raise Exception(r.get('reason', '') + r.get('msg', ''))\n else:\n raise Exception(res.status_code)\n except Exception as e:\n msg = str(e)\n success = False\n finally:\n return success, msg\n\n def post_api(self, url, post_data, token=TOKEN):\n \"\"\"post请求\"\"\"\n msg = 'ok'\n success = True\n try:\n headers = {'Accept': 'application/json', 'Authorization': 'Token ' + token}\n post_data = post_data\n res = requests.post(url, data=post_data, headers=headers, timeout=60, verify=False)\n if res.status_code == 200:\n r = res.json()\n print(r)\n if r.get('resp', 1) != 1 or not r.get('success', True):\n raise Exception(r.get('reason', '') + r.get('msg', ''))\n else:\n raise Exception(res.status_code)\n except Exception as e:\n msg = str(e)\n success = False\n finally:\n return success, msg\n\n def test_gameserveroff_create(self):\n \"\"\"测试游戏区服下线新增计划\"\"\"\n result = True\n api_name = '游戏区服下线新增计划-'\n msg = api_name + '接口调用成功'\n try:\n url = CMDB_URL + 'GameServerOff.Create/'\n post_data = {\n \"project\": \"ssss\",\n \"area\": \"cn\",\n \"srv_id\": '[\"5800001\", \"2100000023\"]', # web区服id\n \"off_time\": \"1381419601\",\n \"web_callback_url\": \"https://xxxxxx/\",\n }\n success, reason = self.post_api(url, post_data)\n if not success:\n raise Exception(reason)\n except Exception as e:\n msg = api_name + '接口调用失败:' + str(e)\n result = False\n finally:\n return result, msg\n\n def test_gameserveroff_delete(self):\n \"\"\"测试游戏区服下线删除计划\"\"\"\n result = True\n api_name = '游戏区服下线删除计划-'\n msg = api_name + '接口调用成功'\n try:\n url = CMDB_URL + 'GameServerOff.Delete/'\n post_data = {\n \"project\": \"ssss\",\n \"area\": \"cn\",\n \"srv_id\": '[\"5800001\", \"2100000023\"]', # web区服id\n \"off_time\": \"1381419601\",\n \"web_callback_url\": \"https://xxxxxx/\",\n }\n success, reason = self.post_api(url, post_data)\n if not success:\n raise Exception(reason)\n except Exception as e:\n msg = api_name + '接口调用失败:' + str(e)\n result = False\n finally:\n return result, msg\n\n def test_modify_srv_opentime_schedule_create(self):\n \"\"\"测试游戏区服下线新增计划\"\"\"\n result = True\n api_name = '修改开服时间新增计划-'\n msg = api_name + '接口调用成功'\n try:\n url = CMDB_URL + 'ModifySrvOpenTimeSchedule.Create/'\n post_data = {\n \"project\": \"cyh5s7\",\n \"area\": \"cn\",\n \"srv_id\": \"1600001\", # web区服id\n \"open_time\": \"1581419600\",\n }\n success, reason = self.post_api(url, post_data)\n if not success:\n raise Exception(reason)\n except Exception as e:\n msg = api_name + '接口调用失败:' + str(e)\n result = False\n finally:\n return result, msg\n\n def test_version_update_maintenance(self):\n \"\"\"测试接收版本更新挂维护信息\"\"\"\n result = True\n api_name = '接收版本更新挂维护信息-'\n msg = api_name + '接口调用成功'\n try:\n url = CMDB_URL + 'RecvWebMaintenanceInfo/'\n post_data = {\n \"project\": \"cyh5s7\",\n \"area\": \"cn\",\n \"maintenance_type\": \"3\",\n }\n success, reason = self.post_api(url, post_data)\n if not success:\n raise Exception(reason)\n except Exception as e:\n msg = api_name + '接口调用失败:' + str(e)\n result = False\n finally:\n return result, msg\n\n\nif __name__ == '__main__':\n if not PRODUCTION_ENV:\n test = TEST_CMDB_API()\n for option in TEST_OPTION:\n if option == 1:\n \"\"\"区服下线计划测试\"\"\"\n result, msg = test.test_gameserveroff_create()\n print(result, msg)\n result, msg = test.test_gameserveroff_delete()\n print(result, msg)\n elif option == 2:\n \"\"\"修改开服时间计划测试\"\"\"\n result, msg = test.test_modify_srv_opentime_schedule_create()\n print(result, msg)\n elif option == 3:\n \"\"\"接收版本更新挂维护信息\"\"\"\n result, msg = test.test_version_update_maintenance()\n print(result, msg)\n else:\n print('未知的测试类型')\n","sub_path":"api_web/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"73263567","text":"from django.urls import path\nfrom django.conf.urls import url\nfrom django.urls import include, re_path\nfrom . import views\nfrom django_filters.views import FilterView\nfrom catalog.filters import BookFilter\nfrom django.contrib.auth.decorators import login_required\n\nurlpatterns = [\n path('', views.index, name='index'),\n url(r'^search/$', FilterView.as_view(filterset_class=BookFilter,\n template_name='catalog/result.html'), name='search'), \n\n path('books/', views.BookListView.as_view(), name='books'),\n re_path(r'^book/(?P\\d+)$', views.BookDetailView.as_view(), name='book-detail'),\n path('authors/', views.AuthorListView.as_view(), name='authors'),\n re_path(r'^author/(?P\\d+)$', views.AuthorDetailView.as_view(), name='author-detail'),\n ]\n\nurlpatterns += [ \n path('mybooks/', views.ReadingListView.as_view(), name='reading_list'),\n]\n\nurlpatterns += [ \n path('add_book/', login_required(views.add_book), name='add_book'),\n path('add_author/', views.add_author, name='add_author'),\n path('add_genre/', views.add_genre, name='add_genre'),\n path('add_language/', views.add_language, name='add_language'),\n]\n\nurlpatterns += [\n path('add_to_wish_list/', views.add_to_wish_list, name='add_to_wish_list'),\n path('add_to_reading/', views.add_to_reading, name='add_to_reading'),\n path('add_to_completed/', views.add_to_completed, name='add_to_completed'),\n path('check_status/', views.check_status, name='check_status'),\n]\n\nfrom rest_framework.urlpatterns import format_suffix_patterns\nurlpatterns += [\n path('xbooks/', views.BookList.as_view()),\n path('xbook//', views.BookDetail.as_view()),\n\n path('xauthors/', views.AuthorList.as_view()),\n path('xauthor//', views.AuthorDetail.as_view()),\n\n path('xgenres/', views.GenreList.as_view()),\n path('xgenre//', views.GenreDetail.as_view()),\n\n path('xlanguages/', views.LanguageList.as_view()),\n path('xlanguage//', views.LanguageDetail.as_view()),\n\n\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n\n","sub_path":"catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"413035751","text":"from modules.p2ptrust.testing.experiments.sampler import Attack\nfrom modules.p2ptrust.testing.experiments.strategies.basic_strategy import Strategy\nfrom modules.p2ptrust.testing.experiments.utils import NetworkUpdate\n\n\nclass StrategyMaliciousDeviceA(Strategy):\n\n def __init__(self, target_list):\n super().__init__()\n self.override_handle_update = True\n self.override_handle_data_request = True\n self.is_good = False\n self.do_p2p = False\n self.target_ip_list = target_list\n\n def on_round_start(self, round_no: int):\n if round_no == 0:\n return NetworkUpdate.JoinWithSameIp, None\n return None, None\n\n def choose_round_behavior(self, round_no: int, peer_ips: list):\n if round_no < 2:\n attack_plan = {}\n for peer_ip in peer_ips:\n if peer_ip in self.target_ip_list:\n attack_plan[peer_ip] = Attack.GeneralAttack\n else:\n attack_plan[peer_ip] = Attack.Benign\n else:\n attack_plan = dict.fromkeys(peer_ips, Attack.GeneralAttack)\n return attack_plan\n\n def on_round_end(self, round_no: int):\n pass\n\n def handle_update(self, ip_address: str):\n print(\"I am an attacker, I don't check score updates\")\n\n def handle_data_request(self, message_data: str):\n print(\"I am an attacker, I don't respond to queries\")","sub_path":"strategies/strategy_malicious_deviceA.py","file_name":"strategy_malicious_deviceA.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"49061792","text":"from math import sqrt\r\nimport numpy as np\r\nfrom numpy import array\r\n\r\n\r\nA = array([[24,-7,-4,4],[-3,-9,-2,-2],[3,7,24,9],[1,-6,-2,-15]])\r\n\r\nb = array([-190,-12,155,-17])\r\nm = len(A)\r\nx = [.0 for i in range(m)]\r\nIteration = 0\r\nconverge = False\r\npogr = 0.\r\nwhile not converge:\r\n x_new = np.copy(x)\r\n for i in range(m):\r\n s1 = sum(A[i][j] * x[j] for j in range(i))\r\n s2 = sum(A[i][j] * x[j] for j in range(i + 1, m))\r\n x_new[i] = (b[i] - s2 - s1) / A[i][i]\r\n pogr = sum(abs(x_new[i] - x[i]) for i in range(m))\r\n converge = pogr < 1e-6\r\n Iteration += 1\r\n x = x_new\r\nprint('Количество итераций :', Iteration)\r\nprint('Решение системы уравнений :', x)\r\nprint('Погрешность :', pogr)\r\n","sub_path":"8_3(прост).py","file_name":"8_3(прост).py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"18767682","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 21 19:02:42 2019\r\n\r\n@author: EJ\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\r\nimport requests\r\nfrom flask import Flask, request, Response\r\nfrom festival import festival_list_date\r\nfrom openpyxl import load_workbook\r\n\r\nEXCEL_FILE_NAME = 'Database.xlsx'\r\ndb = load_workbook(filename=EXCEL_FILE_NAME)\r\ntuto_db = db['fv']\r\n\r\nAPI_KEY = '975231772:AAEFSZRTH1hrpYV4c-9esIR4i5I1JIId874'\r\n\r\napp = Flask(__name__)\r\n\r\n\r\ndef write_with_index(fv_user_pick_list):\r\n\r\n tuto_db['A1'].value = fv_user_pick_list\r\n db.save(EXCEL_FILE_NAME)\r\n print('엑셀파일에 저장되었따 리스트가')\r\n \r\ndef write_user_choice_num(user_choice_num):\r\n\r\n tuto_db['A5'].value = user_choice_num\r\n db.save(EXCEL_FILE_NAME)\r\n print('엑셀파일에 저장되었따 유저가 선택한 번호가 ')\r\n \r\ndef write_with_index_all(user_festival_list):\r\n\r\n tuto_db['A3'].value = user_festival_list\r\n db.save(EXCEL_FILE_NAME)\r\n print('엑셀파일에 저장되었따 원데이타가!')\r\n \r\n \r\ndef read_with_index(loc):\r\n read_result = tuto_db[loc].value\r\n return read_result\r\n \r\ndef parse_message(data):\r\n chat_id = data['message']['chat']['id']\r\n msg = data['message']['text']\r\n \r\n return chat_id, msg\r\n\r\ndef pick_list_back(pick_list):\r\n return pick_list\r\n\r\ndef send_message(chat_id, text='bla-bla-bla'):\r\n \"\"\"\r\n Chat-id 와 text를 변수로 받아 메세지를 보내주는데\r\n params 안에 키보드를 설정해서 같이 보내주는 방법\r\n \r\n https://core.telegram.org/bots/api#keyboardbutton\r\n \"\"\"\r\n url = 'https://api.telegram.org/bot{token}/sendMessage'.format(token=API_KEY) #sendMessage\r\n keyboard = { # Keyboard 형식\r\n 'keyboard':[[{\r\n 'text': '기간'\r\n },\r\n {'text': '종류'\r\n }]\r\n ],\r\n 'one_time_keyboard' : True\r\n }\r\n \r\n \r\n if text[5:].isdigit():\r\n print('###@@@###사용자가날짜를입력했다.###### #####')\r\n params = {'chat_id':chat_id, 'text': text}\r\n requests.post(url, json=params)\r\n elif (text=='기간') & (len(text)==2): \r\n print('#######기간버튼누름###########')\r\n params = {'chat_id':chat_id, 'text': '입력방식=[기간검색!+원하는 기간]을 입력 예)기간검색!2019080120191230 오타 조심..' }\r\n requests.post(url, json=params)\r\n elif (text=='종류') & (len(text)==2):\r\n undercon='누가 종류를 눌렀는가..누르지 말라고 했거늘..종류는 아직 준비중일세;'\r\n keyboard = {\r\n 'keyboard':[[{'text': '여기를 클릭하시오 ....;;'}]],\r\n 'one_time_keyboard' : True\r\n } \r\n text=text+undercon\r\n params = {'chat_id':chat_id, 'text': text, 'reply_markup' : keyboard}\r\n requests.post(url, json=params)\r\n return 0 \r\n elif len(text)>100: #기간입력하면 여기서 리스트 뿌려줌.\r\n #엑셀에 쓴다. \r\n write_with_index(text) \r\n print('100개이상이니까 출력해줌')\r\n text=' 번호 하나 선택 바람, 입력방식=[원하는번호+번] 예)2번 '+'\\n'+text\r\n params = {'chat_id':chat_id, 'text': text}\r\n requests.post(url, json=params)\r\n elif len(text)==2 : # 리스트중 한개선택했다면\r\n print('리스트중 번호하나 선택했으면')\r\n write_user_choice_num(text) #선택한 번호 엑셀에 저장하였다. \r\n read_result=read_with_index('A1') #엑셀에서 읽어 들어온다 \r\n print('엑셀서읽어들어왔따.') \r\n show_list=read_result.split('\\n') # 줄바꿈으로 구분자해서 리스트로 바꿔준다. \r\n for item in show_list:\r\n if text in item:\r\n final_decision_title=item\r\n print(final_decision_title) \r\n keyboard = {\r\n 'keyboard':[[{'text': '네'},\r\n {'text': '아니요'}]],\r\n 'one_time_keyboard' : True\r\n } \r\n #params = {'chat_id':chat_id, 'text': '\"'+final_decision_title+'\"'+'이라.. 탁월한 선택이군..상세정보도 원하는가? 네 라고 답해야만 한다네..흠', 'reply_markup' : keyboard}\r\n params = {'chat_id':chat_id, 'text': '탁월한 선택이군..상세정보도 원하는가? 네 라고 답해야만 한다네..흠', 'reply_markup' : keyboard}\r\n requests.post(url, json=params)\r\n elif text=='네': #디테일정보 뿌려줌 이 축제가 맞다고 하면\r\n print('네 라고했다!!')\r\n read_result_all=read_with_index('A3') #엑셀에서 읽어 들어온다 \r\n print('엑셀서읽어들어왔따 모든데이타를!.')\r\n show_list_detail=read_result_all.split('\\n') # 줄바꿈으로 구분자해서 리스트로 바꿔준다. \r\n print('show_list_detail.',show_list_detail)\r\n read_user_num=read_with_index('A5')#엑셀에서 읽어 들어온다 사용자가 선택한 넘버를\r\n for item in show_list_detail:\r\n if read_user_num in item:\r\n final_decision_detail=item\r\n # print(final_decision_detail)\r\n final_decision_detail=final_decision_detail+'.....이게 내가 가진 모든 정보라네.. 더이상은 무리야..축제에서 즐거운 시간 보내길 바라.. 흠흠..그럼 난 이만 도봉산으로 퇴근 총총..날 다시 부르려면 [나와라]라고 입력해줘...'\r\n params = {'chat_id':chat_id, 'text': final_decision_detail}\r\n requests.post(url, json=params)\r\n elif text=='나와라':\r\n params = {'chat_id':chat_id, 'text': '누가 날 부르는가...축제를 가려나보구먼..여기서는 축제를 기간 또는 종류로 검색할 수 있다네. 아니다. 종류는 아직 준비중이다 기간 클릭하시게.. . ', 'reply_markup' : keyboard}\r\n requests.post(url, json=params)\r\n elif text=='아니요':\r\n params = {'chat_id':chat_id, 'text': '아니라니...대화가 종료되었다네... 날 다시 부르려면 [나와라]라고 입력하시오..'}\r\n requests.post(url, json=params)\r\n else:\r\n params = {'chat_id':chat_id, 'text': '누가 날 부르는가...축제를 가려나보구먼..여기서는 축제를 기간 또는 종류로 검색할 수 있다네. 아니다. 종류는 아직 준비중이다 기간 클릭하시게.. . ', 'reply_markup' : keyboard}\r\n requests.post(url, json=params)\r\n # 변수들을 딕셔너리 형식으로 묶음\r\n #params = {'chat_id':chat_id, 'text': '호잇', 'reply_markup' : keyboard}\r\n \r\n # Url 에 params 를 json 형식으로 변환하여 전송\r\n # 메세지를 전송하는 부분\r\n #response = requests.post(url, json=params)\r\n \r\n return 0\r\n \r\n# 경로 설정, URL 설정\r\n@app.route('/', methods=['POST', 'GET'])\r\ndef index():\r\n if request.method == 'POST':\r\n message = request.get_json()\r\n\r\n \r\n chat_id, msg = parse_message(message)\r\n send_message(chat_id, msg)\r\n \r\n if msg[:5] == \"기간검색!\": \r\n userDate = msg[5:] #userDate= 2019090920191010\r\n print(userDate,'=====userdate ,기간검색!으로 찾았으니까 함수불러')\r\n festivalListShow,user_festival_list = festival_list_date(userDate)\r\n #원데이타를 엑셀에써준다.\r\n #원데이타가 리스트라 엑셀에 안써져서 str로 바꾼뒤 써줌 \r\n user_all_data=''\r\n i = 1\r\n for item in user_festival_list:\r\n user_all_data = user_all_data + f'{i}'+'번 '+ item['title'] +item['firstimage']+' '+' 주 소 : '+item['addr1']+' 축제기간 : '+str(item['eventstartdate'])+'~'+str(item['eventenddate'])+'.'+ '\\n'\r\n i += 1\r\n \r\n write_with_index_all(user_all_data)#엑셀에 작성하는 코드 \r\n \r\n send_message(chat_id, festivalListShow)\r\n \r\n return Response('ok', status=200)\r\n else:\r\n return 'Hello World!'\r\n\r\n\r\n# Python 에서는 실행시킬때 __name__ 이라는 변수에\r\n# __main__ 이라는 값이 할당\r\nif __name__ == '__main__':\r\n app.run(port = 5000)\r\n","sub_path":"alone_festival.py","file_name":"alone_festival.py","file_ext":"py","file_size_in_byte":8418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"481779421","text":"import pygame\nimport game\nimport imp\nimp.reload(game)\n\n#초기화\npygame.mixer.init()\npygame.init()\n\n#스크린 생성\nSCREEN_WIDTH, SCREEN_HEIGHT = 1000, 650\nsize = [SCREEN_WIDTH, SCREEN_HEIGHT]\nscreen = pygame.display.set_mode(size)\ntitle = \"Teemo Adventure\"\npygame.display.set_caption(title)\nicon = pygame.image.load('images/title_icon.png')\npygame.display.set_icon(icon)\n\n#시간 생성\nclock = pygame.time.Clock()\nFPS = 60\n\n#인트로 관련\nintro_animation = []\nintro_animation.append(pygame.transform.scale(pygame.image.load('images/intro_1.png'), [SCREEN_WIDTH, SCREEN_HEIGHT]))\nintro_animation.append(pygame.transform.scale(pygame.image.load('images/intro_2.png'), [SCREEN_WIDTH, SCREEN_HEIGHT]))\nintro_animation.append(pygame.transform.scale(pygame.image.load('images/intro_3.png'), [SCREEN_WIDTH, SCREEN_HEIGHT]))\nintro_animation.append(pygame.transform.scale(pygame.image.load('images/intro_4.png'), [SCREEN_WIDTH, SCREEN_HEIGHT]))\nintro_animation.append(pygame.transform.scale(pygame.image.load('images/intro_5.png'), [SCREEN_WIDTH, SCREEN_HEIGHT]))\n\nintro_font = pygame.font.SysFont(None, 35)\ncurrent_intro = 0\nintro_comment_var = 0\nintro = True\n\nsfx_intro_bgm = pygame.mixer.Sound('sounds/teemo_song.wav')\nsfx_intro_bgm.set_volume(0.2)\nsfx_intro_bgm.play()\n\n#종료 관련\nresult_img = pygame.transform.scale(pygame.image.load('images/result.png'), (600, 450))\nresult_font_info = pygame.font.SysFont(None, 35)\nresult_font_button = pygame.font.SysFont(None, 50)\nresult_clear_or_not = pygame.font.SysFont(None, 70)\nhp = pygame.transform.scale(pygame.image.load('images/HP_no.png'), (32, 32))\n\nreplay_button = pygame.Surface((230, 78))\nreplay_button.set_alpha(0)\nreplay_button.fill((255, 255, 255))\nexit_button = pygame.Surface((230, 78))\nexit_button.set_alpha(0)\nexit_button.fill((255, 255, 255))\n\nsfx_clear = pygame.mixer.Sound('sounds/game_clear.wav')\nsfx_clear.set_volume(0.075)\nclear_sound_var = True\n\n#게임 조작법\ninfo_img = pygame.transform.scale(pygame.image.load('images/info.png'), [SCREEN_WIDTH, SCREEN_HEIGHT])\ninfo = True\ninfo_comment_var = 0\n\n#메인 루프\nplaying = True\nwhile playing:\n dt = clock.tick(FPS)\n #인트로\n while intro:\n current_intro += 0.1\n current_intro %= 5\n intro_comment_var += 1\n if int(intro_comment_var) % 120 < 90:\n intro_comment = \"Please press any key\"\n else:\n intro_comment = None\n intro_text = intro_font.render(intro_comment, True, (255, 255, 255))\n for event in pygame.event.get():#이벤트리스트(큐)를 얻는 코드\n if event.type == pygame.QUIT:\n playing = False\n pygame.quit() \n if event.type == pygame.KEYDOWN:\n intro = False\n screen.blit(intro_animation[int(current_intro)], (0, 0))\n screen.blit(intro_text, (370, 600))\n pygame.display.update()\n #조작법\n while info:\n info_comment_var += 1\n if int(info_comment_var) % 120 < 90:\n info_comment = \"If you ready, please press key to start!!\"\n else:\n info_comment = None\n info_text = intro_font.render(info_comment, True, (255, 255, 255))\n for event in pygame.event.get():#이벤트리스트(큐)를 얻는 코드\n if event.type == pygame.QUIT:\n playing = False\n pygame.quit() \n if event.type == pygame.KEYDOWN:\n info = False\n sfx_intro_bgm.stop()\n screen.blit(info_img, (0, 0))\n screen.blit(info_text, (280, 600))\n pygame.display.update()\n \n # 게임 실행\n result, result_time, result_hp, clear = game.game()\n #종료 화면\n total_result_text = ''\n total_result_color = [0, 0 ,0]\n while result:\n replay_rect = screen.blit(replay_button, (260,380))\n exit_rect = screen.blit(exit_button, (506,380))\n for event in pygame.event.get():#이벤트리스트(큐)를 얻는 코드\n if event.type == pygame.QUIT:\n pygame.quit() \n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n pos = pygame.mouse.get_pos()\n if replay_rect.collidepoint(pos):\n playing = True\n elif exit_rect.collidepoint(pos):\n playing = False\n pygame.quit()\n result = False\n if clear is True:\n if clear_sound_var is True:\n sfx_clear.play(0)\n clear_sound_var = False\n total_result_text = 'C L E A R'\n total_result_color = [0, 200, 0]\n elif clear is False:\n total_result_text = 'F A L S E'\n total_result_color = [200, 0, 0]\n result_info_time_text = 'Play Time : ' + str(int(result_time)) + ' sec'\n result_info_time = result_font_info.render(result_info_time_text, True, (0, 0, 0))\n result_info_hp = result_font_info.render('Spended HP : ', True, (0, 0, 0))\n result_button_replay = result_font_button.render('replay()', True, (255, 255, 255))\n result_button_exit = result_font_button.render('exit()', True, (255, 255, 255))\n total_result = result_clear_or_not.render(total_result_text, True, total_result_color)\n \n screen.fill((0,0,0))\n screen.blit(result_img, (200, 50))\n screen.blit(result_info_time, (280, 190))\n screen.blit(result_info_hp, (280, 250))\n for i in range(0, result_hp):\n screen.blit(hp, (485+(40*i), 250))\n screen.blit(total_result,(400, 310))\n screen.blit(result_button_replay,(315, 400))\n screen.blit(result_button_exit,(580, 400))\n \n pygame.display.update()\n \npygame.quit()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"208622672","text":"\n# which chromecasts to use\nchromecasts = (\"This Room\", \"Another Room\")\n\n# use external display or not\nuse_display = False\n\n# which GPIO pins your rotary encoder is useing\nclk = 17\ndt = 18\nsw = 23\n\n# how big for the font to be on the screen\nfont_size = 55\n\n# set to 'raspberry' for raspberry pi or 'orange' or orange pi zero\nboard_type = 'raspberry'\n\ndebug = True\n","sub_path":"config.dist.py","file_name":"config.dist.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"466978000","text":"# coding=utf-8\nimport threading\n\nfrom src.base.log4py import logger\nfrom src.service.art_of_war import ArtOfWar\n\n\"\"\"\n多线程启动不同设备\n\"\"\"\n\n\nclass ProcessJimmy(threading.Thread):\n def __init__(self, device_id, frequency=1, debug=False):\n threading.Thread.__init__(self)\n self.device_id = str(device_id)\n self.debug = debug\n self.frequency = frequency\n self.is_stop = False\n\n def stop(self):\n '''\n stop thread\n :return:\n '''\n logger.debug(\"stop thread ! \")\n self.is_stop = True\n self.join()\n\n def run(self):\n '''\n running thread\n :return:\n '''\n logger.debug(\"running thread for device [\" + self.device_id + \"] \")\n\n try:\n ArtOfWar(self.device_id).run_sand_jimmy()\n # ArtOfWar(self.device_id).run_jimmy()\n logger.debug(\"connected to device:\" + self.device_id)\n\n except:\n logger.debug(\"lost device: \" + self.device_id)\n","sub_path":"src/service/art_of_war_thread_jimmy.py","file_name":"art_of_war_thread_jimmy.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"164247130","text":"import string\n#########################\n# LanguageHelper class\n#########################\n\nclass LanguageHelper:\n \"\"\"A simple spell checking class that gives suggestions\"\"\"\n\n def __init__ (self, languageFilename):\n \"\"\"Initializes the set of all words in the english dictionary\"\"\"\n if not isinstance(languageFilename, str): # Checks if the filename is entered as a string.\n raise TypeError('The filename must be a string')\n self._words = set()\n try:\n with open(languageFilename) as data:\n line = data.readline()\n while line:\n line = line.rstrip()\n self._words.add(line)\n line = data.readline()\n except IOError:\n print('Please specify the correct name for the dictionary')\n\n def __contains__(self, query):\n \"\"\"Checks whether the query is a legitimate word\n \n The query should be a string.\n \"\"\"\n if not isinstance(query, str): # Checks if the query is entered as a string.\n raise TypeError('The query must be a string')\n if query in self._words:\n return True\n elif query.lower() in self._words:\n return True\n else:\n return False\n\n def getSuggestions(self,query):\n \"\"\"Returns a sorted list of all legitimate language words that are precisely one edit away from the query.\n The query should be a string.\n \"\"\"\n if not isinstance(query, str): # Checks if the query is entered as a string.\n raise TypeError('The query must be a string')\n self._possible = [] #List of strings one change away\n self._final = [] #Final list of suggestions\n self._alphabet = list(string.ascii_lowercase) # Produces a list of all lowercase letters.\n self._alphabet.extend(('-',' '))\n self._query = query.lower()\n for i in range((len(query))-1):\n possible = self._query[:i]+self._query[i+1]+self._query[i]+self._query[(i+2):] #Add cases of inverting two letters\n self._possible.append(possible)\n for i in range(len(query)):\n possible = self._query[:i] + self._query[(i+1):] #Add cases of deleting one letter\n self._possible.append(possible)\n for g in range(len(self._alphabet)):\n possible = self._query[:i]+self._alphabet[g]+self._query[(i+1):] #Add cases of inserting one letter\n possibleAlso = self._query[:i]+self._alphabet[g]+self._query[i:] #Add cases of replacing one letter\n self._possible.append(possible)\n self._possible.append(possibleAlso)\n suggestionLength = len(self._possible)\n for i in range(suggestionLength):\n self._possible.append(self._possible[i].capitalize()) #Add all possible strings, capitalized (doubles list length)\n for i in self._possible:\n if i in self._words:\n if i not in self._final: #Removes duplicates from final list\n if i != query: \n self._final.append(i)\n if query.islower() == True:\n for i in self._final:\n if i[0].isupper() == True:\n if i[0] != query[0].upper():\n self._final.remove(i)\n if query.istitle() == True:\n self._final = [i.capitalize() for i in self._final]\n self._final.sort()\n return self._final\n \n#########################\n# Unit Testing\n#########################\n\nif __name__ == '__main__':\n helper = LanguageHelper ('English.txt')\n \n if ('dogs' in helper):\n print('Found \"dogs\"')\n\n if ('missouri' in helper):\n print('Wrong')\n \n # Should print out Missouri\n print(helper.getSuggestions('Missouri'))\n print(helper.getSuggestions('missouri'))\n \n # Should print out a list containing words that are similar to 'test'\n print(helper.getSuggestions('tess'))\n\n # Should only print out a list containing capital words that are similar to 'test'\n print(helper.getSuggestions('Tess'))\n \n # Scans through all the words in a file. If are incorrect words, suggestions are given.\n def fileChecker(filename):\n \"\"\"Checks through all the words in a file\"\"\"\n translator = str.maketrans('', '', string.punctuation)\n allWords = []\n wrongWords = []\n listOfSuggestions = []\n try:\n with open(filename) as data:\n for line in data:\n line = line.translate(translator) #Removes all punctuation\n words = line.split()\n for word in words:\n allWords.append(word)\n except IOError:\n print('Please enter a valid filename')\n for i in allWords:\n if i not in helper:\n wrongWords.append(i)\n for x in wrongWords:\n listOfSuggestions.append(helper.getSuggestions(x))\n for i in range(len(wrongWords)):\n print('Word: ' + \"'\" + wrongWords[i] + \"', \" + 'List of Suggestions:', str(listOfSuggestions[i]))\n \n \n # Should give an error\n fileChecker('sampl.txt')\n\n # Checks for spelling errors in files\n fileChecker('sample.txt')\n fileChecker('sampleLetter.txt')\n\n","sub_path":"Programming/Program05/LanguageHelper.py","file_name":"LanguageHelper.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"311995203","text":"#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nwith open('README.rst', encoding='utf-8') as f:\n readme = f.read()\n\nsetup(\n name='flask-http-client',\n version='0.0.4',\n description='HTTP client extension for Flask.',\n long_description=readme,\n author='codeif',\n author_email='me@codeif.com',\n url='https://github.com/codeif/flask-http-client',\n license='MIT',\n packages=find_packages(),\n install_requires=['Flask', 'requests'],\n classifiers=[\n 'Programming Language :: Python :: 3',\n ]\n)\n","sub_path":"pypi_install_script/flask-http-client-0.0.4.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"63636758","text":"'''\nDESCRIPTION:\n\nANALYSIS:\n\nAUTOR: \n\nDATE: 03.05.2017\n\n'''\n\n# ------ LIBRARIES IMPORT ------ #\nfrom fenics import *\nfrom mshr import *\n\n########################################################\n# ------ ------ 01) FOWARD PROBLEM SOLVE ------ ------ #\n########################################################\n\n# ------ GEOMETRICAL PARAMETERS ------ #\nresolution = 70\n\ndim_0 = 0.0\ndim_L = 5.0\ndim_D = 1.0\n\n# ------ SIMULATION PARAMETERS CONFIGURATION ------ #\ncons_rh1 = 1.0E+3\ncons_rh2 = 1.3E+3\ncons_mu1 = 1.0E-3\ncons_mu2 = 1.0E-3\ncons_gg = 1E-12\n\nv_0 = 1E-6\n\n# ------ MESH CONFIGURATION ------ #\npart1 = Rectangle(\n Point( dim_0, dim_0),\n Point( dim_L, dim_D),\n )\ndomain = part1\nmesh = generate_mesh(domain, resolution)\n\n# ------ VARIATIONAL FORMULATION ------ #\nFE_u = FiniteElement('P', mesh.ufl_cell(), 2)\nFE_p = FiniteElement('P', mesh.ufl_cell(), 1)\nFE_a = FiniteElement('P', mesh.ufl_cell(), 1)\nelem = MixedElement([FE_u, FE_u, FE_u, FE_u, FE_p, FE_a])\nU = FunctionSpace(mesh, elem)\n\nans = Function(U)\nux1,uy1,ux2,uy2,p1,a1 = split(ans)\nvx1,vy1,vx2,vy2,q1,b1 = TestFunctions(U)\n\nu1 = as_vector([ux1,uy1])\nu2 = as_vector([ux2,uy2])\nv1 = as_vector([vx1,vy1])\nv2 = as_vector([vx2,vy2])\n\nN1 = Constant(1)\nRH1 = Constant(cons_rh1)\nRH2 = Constant(cons_rh2)\nMU1 = Constant(cons_mu1)\nMU2 = Constant(cons_mu2)\nBx = Constant(0.0)\nBy = Constant(-cons_gg)\nBB = as_vector([Bx, By])\n\na2 = N1-a1\nTAU_1 = MU1*(grad(u1)+grad(u1).T)\nTAU_2 = MU2*(grad(u2)+grad(u2).T)\nx,y = 0,1\n\nF = div(u1*a1)*q1*dx \\\n + div(u2*a2)*b1*dx \\\n + inner( a1*RH1*dot(u1,grad(u1).T), v1 ) *dx \\\n + inner( a1*TAU_1, grad(v1) ) *dx \\\n - a1*p1*div(v1) *dx \\\n - inner( BB*RH1*a1, v1) *dx \\\n + inner( a2*RH2*dot(u2,grad(u2).T), v1 ) *dx \\\n + inner( a2*TAU_2, grad(v2) ) *dx \\\n - a2*p1*div(v2) *dx \\\n - inner( BB*RH2*a2, v2) *dx\n\n# ------ BOUNDARY CONDITIONS AND SOLVE ------ #\ninlet = '(near(x[0],'+str(dim_0)+') && on_boundary)'\noutlet = '(near(x[0],'+str(dim_L)+') && on_boundary)'\nwalls = 'on_boundary && !'+inlet+'&& !'+outlet\np_ref = 'x[0]==0 && x[1]==0'\na_in = Constant(0.5)\n#v_in = Expression('4*v_in*x[1]*(1-x[1])', v_in=v_0, degree=2)\nv_in = Constant(v_0)\nv_null = Constant(0)\np_ux1,p_uy1,p_ux2,p_uy2,p_p1,p_aa = 0,1,2,3,4,5\nBC = [\n DirichletBC(U.sub(p_aa) , a_in , inlet ),\n DirichletBC(U.sub(p_ux1), v_in , inlet ),\n DirichletBC(U.sub(p_uy1), v_null, inlet ),\n DirichletBC(U.sub(p_ux2), v_in , inlet ),\n DirichletBC(U.sub(p_uy2), v_null, inlet ),\n DirichletBC(U.sub(p_ux1), v_null, walls ),\n DirichletBC(U.sub(p_uy1), v_null, walls ),\n DirichletBC(U.sub(p_ux2), v_null, walls ),\n DirichletBC(U.sub(p_uy2), v_null, walls ),\n #DirichletBC(U.sub(p_uy1), v_null, outlet ),\n #DirichletBC(U.sub(p_uy2), v_null, outlet ),\n #DirichletBC(U.sub(p_p1), Constant(0), p_ref, method='pointwise'),\n ]\n\nassign(ans.sub(p_ux1), project(Constant(v_0 ), FunctionSpace(mesh, FE_u)))\nassign(ans.sub(p_uy1), project(Constant(0 ), FunctionSpace(mesh, FE_u)))\nassign(ans.sub(p_ux2), project(Constant(v_0 ), FunctionSpace(mesh, FE_u)))\nassign(ans.sub(p_uy2), project(Constant(0 ), FunctionSpace(mesh, FE_u)))\nassign(ans.sub(p_p1) , project(Constant(1e-5), FunctionSpace(mesh, FE_p)))\nassign(ans.sub(p_aa) , project(Constant(0.5 ), FunctionSpace(mesh, FE_a)))\n\ndF = derivative(F, ans)\nproblem = NonlinearVariationalProblem(F, ans, BC, dF)\nsolver = NonlinearVariationalSolver(problem)\nprm = solver.parameters[\"newton_solver\"]\n#prm[\"convergence_criterion\" ] = \"residual\"\n#prm[\"linear_solver\" ] = \"mumps\"\n#prm[\"method\" ] = \"full\"\n#prm[\"preconditioner\" ] = \"none\"\nprm[\"error_on_nonconvergence\" ] = True\nprm[\"maximum_iterations\" ] = 8\nprm[\"absolute_tolerance\" ] = 6E-16\nprm[\"relative_tolerance\" ] = 8E-16\nprm[\"relaxation_parameter\" ] = 1.0\n#prm[\"report\" ] = True\n#set_log_level(PROGRESS)\n\nfoldername = 'results.Pgrad_Separation_Galerkin'\nvtk_ua1 = File(foldername+'/velocity_intrinsic1.pvd')\nvtk_ua2 = File(foldername+'/velocity_intrinsic2.pvd')\nvtk_u1 = File(foldername+'/velocity1.pvd')\nvtk_u2 = File(foldername+'/velocity1.pvd')\nvtk_pp = File(foldername+'/pressure.pvd')\nvtk_aa = File(foldername+'/fraction.pvd')\nFE_vector = FunctionSpace(mesh, VectorElement('P', mesh.ufl_cell(), 1))\nFE_scalar = FunctionSpace(mesh, FiniteElement('P', mesh.ufl_cell(), 1))\n\ndef save_results():\n ua1_viz = project(u1*a1, FE_vector); vtk_ua1 << ua1_viz\n ua2_viz = project(u2*a2, FE_vector); vtk_ua2 << ua2_viz\n u1_viz = project(u1 , FE_vector); vtk_u1 << u1_viz\n u2_viz = project(u2 , FE_vector); vtk_u1 << u1_viz\n pp_viz = project(p1 , FE_scalar); vtk_pp << pp_viz\n aa_viz = project(a1 , FE_scalar); vtk_aa << aa_viz\n\ng_exp_init = -12.0\ndelta_exp = 0.05\nsteps = 1000\ngg_list = [10**(g_exp_init+i*delta_exp) for i in range(steps)]\nfor g_value in gg_list:\n print('Solving for g={}'.format(g_value))\n By.assign( -g_value )\n solver.solve()\n save_results()\n\n","sub_path":"620.Project_IC/130.TwoFluidModel/131.Separation_PGrad_Galerkin.py","file_name":"131.Separation_PGrad_Galerkin.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"40565193","text":"import SCons\nimport os\n\n\ndef _gen_com_str(action, toolchain):\n return \"[%s] [%s] ${SOURCE} ${TARGET}\" % (action, toolchain)\n\n\ndef _set_toolchain(env, toolchain):\n if \"CT_NG_DIR\" not in env:\n env[\"CT_NG_DIR\"] = env.Dir(os.path.expanduser(\"~/x-tools\"))\n bin_dir = env[\"CT_NG_DIR\"].Dir(toolchain).Dir(\"bin\")\n sysroot = env[\"CT_NG_DIR\"].Dir(toolchain).Dir(toolchain).Dir(\"sysroot\")\n env[\"CC\"] = bin_dir.File(toolchain + \"-gcc\")\n env[\"LD\"] = bin_dir.File(toolchain + \"-ld\")\n env[\"AR\"] = bin_dir.File(toolchain + \"-ar\")\n env[\"RANLIB\"] = bin_dir.File(toolchain + \"-ranlib\")\n\n env[\"CCCOMSTR\"] = _gen_com_str(\"compile\", toolchain)\n env[\"LINKCOMSTR\"] = _gen_com_str(\"link\", toolchain)\n env[\"ARCOMSTR\"] = _gen_com_str(\"archive\", toolchain)\n env[\"RANLIBCOMSTR\"] = _gen_com_str(\"ranlib\", toolchain)\n env.Append(CCFLAGS=[\"--sysroot=%s\" % sysroot])\n\n\ndef generate(env):\n env.AddMethod(_set_toolchain, \"SetToolchain\")\n\n\ndef exists(env):\n return True\n","sub_path":"site_scons/site_tools/ct_ng.py","file_name":"ct_ng.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"359313599","text":"n = int(input(\"Enter the no. of elements : \"))\r\na = 0\r\nb = 1\r\nprint(\"List of \",n, \" Fibonacci numbers : \")\r\nprint(\"0 1\",end=\" \") \r\nfor i in range (2,n):\r\n sum = a + b\r\n print(sum , end=\" \")\r\n a = b\r\n b = sum\r\n","sub_path":"Fibonacci_Numbers.py","file_name":"Fibonacci_Numbers.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"324990555","text":"import numpy as np\nimport cv2\n\n\nclass WindowSlider:\n def __init__(self, transformer, classifier, extractor):\n self.svc = classifier\n self.X_scaler = transformer\n self.extractor = extractor\n self.bounding_boxes = []\n self.window = 64\n self.pix_per_cell = 8\n self.cells_per_window = (self.window//self.pix_per_cell)\n\n # find all windows in region with specified overlap\n # assume that region is scaled already\n def slide_window(self, region_shape, xy_overlap):\n window_list = []\n\n # Instead of overlap, define how many cells to step\n x_cells_per_step = np.int(self.cells_per_window*(1-xy_overlap[0]))\n y_cells_per_step = np.int(self.cells_per_window*(1-xy_overlap[1]))\n # print('per step', x_cells_per_step, y_cells_per_step)\n\n # Define blocks and steps as above\n # print('region', region_shape[1], region_shape[0])\n n_x_cells = (region_shape[1]//self.pix_per_cell)\n n_y_cells = (region_shape[0]//self.pix_per_cell)\n # print('cells', n_x_cells, n_y_cells)\n\n # Compute the number of windows in x/y\n n_x_steps = (n_x_cells-self.cells_per_window)//x_cells_per_step+1\n n_y_steps = (n_y_cells-self.cells_per_window)//y_cells_per_step+1\n # print('steps', n_x_steps, n_y_steps)\n\n # Loop through finding x and y window positions\n # Note: you could vectorize this step, but in practice\n # you'll be considering windows one by one with your\n # classifier, so looping makes sense\n for y in range(n_y_steps):\n for x in range(n_x_steps):\n # Calculate window position (for HOG)\n x_pos = x*x_cells_per_step\n y_pos = y*y_cells_per_step\n # Calculate window scaled coordinates\n x_start = x_pos*self.pix_per_cell\n y_start = y_pos*self.pix_per_cell\n\n window_list.append(((x_pos, y_pos), (x_start, y_start)))\n\n return window_list\n\n @staticmethod\n def get_scaled_image_region(converted_image, scale, x_start_stop, y_start_stop):\n img_to_search = converted_image[y_start_stop[0]:y_start_stop[1], x_start_stop[0]:x_start_stop[1], :]\n if scale != 1:\n im_shape = img_to_search.shape\n scaled_image = cv2.resize(img_to_search, (np.int(im_shape[1]/scale), np.int(im_shape[0]/scale)))\n else:\n scaled_image = img_to_search\n return scaled_image\n\n # function find windows on particular area which presumably contains cars\n def find_hot_windows(self, scale, converted_image, x_start_stop=[None, None], y_start_stop=[None, None],\n xy_overlap=(0.5, 0.5)):\n # If x and/or y start/stop positions not defined, set to image size\n if x_start_stop[0] is None:\n x_start_stop[0] = 0\n if x_start_stop[1] is None:\n x_start_stop[1] = converted_image.shape[1]\n if y_start_stop[0] is None:\n y_start_stop[0] = 0\n if y_start_stop[1] is None:\n y_start_stop[1] = converted_image.shape[0]\n\n scaled_image = self.get_scaled_image_region(converted_image, scale, x_start_stop, y_start_stop)\n\n # pre-calculate HOG features\n self.extractor.prepare_hog_features(scaled_image)\n\n windows = self.slide_window(scaled_image.shape, xy_overlap=xy_overlap)\n win_draw = np.int(self.window*scale)\n\n for (x_pos, y_pos), (x_start, y_start) in windows:\n if (x_start+self.window > scaled_image.shape[1]) or (y_start+self.window > scaled_image.shape[0]):\n continue\n\n window_img = cv2.resize(scaled_image[y_start:y_start+self.window, x_start:x_start+self.window],\n (self.window, self.window))\n features = self.extractor.extract_features(window_img, y_pos, x_pos, self.cells_per_window)\n\n scaled_features = self.X_scaler.transform(features)\n test_prediction = self.svc.predict(scaled_features)\n\n if test_prediction == 1:\n x_top_left = np.int(x_start*scale)+x_start_stop[0]\n y_top_left = np.int(y_start*scale)+y_start_stop[0]\n\n self.bounding_boxes.append([\n (x_top_left, y_top_left),\n (x_top_left+win_draw, y_top_left+win_draw)\n ])\n\n def find_cars(self, img):\n self.bounding_boxes = []\n scale_and_region = [\n [1.0, (300, img.shape[1]-300), (380, 450)],\n [1.2, (0, img.shape[1]), (380, 530)],\n [1.3, (0, img.shape[1]), (400, 580)],\n [1.5, (0, img.shape[1]), (400, 600)],\n [2.0, (0, img.shape[1]), (380, 650)]\n ]\n converted_image = self.extractor.convert(img)\n for scale, x_region, y_region in scale_and_region:\n self.find_hot_windows(scale, converted_image, x_region, y_region, xy_overlap=(0.75, 0.75))\n\n return self.bounding_boxes\n","sub_path":"src/WindowSlider.py","file_name":"WindowSlider.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"470361329","text":"\"\"\"Dependency Resolution\n\nThe dependency resolution in pip is performed as follows:\n\nfor top-level requirements:\n a. only one spec allowed per project, regardless of conflicts or not.\n otherwise a \"double requirement\" exception is raised\n b. they override sub-dependency requirements.\nfor sub-dependencies\n a. \"first found, wins\" (where the order is breadth first)\n\"\"\"\n\nimport logging\nimport os\nfrom itertools import chain\n\nfrom pip._vendor import pkg_resources, requests\n\nfrom pip.download import (\n is_dir_url, is_file_url, is_vcs_url, unpack_url, url_to_path\n)\nfrom pip.exceptions import (\n BestVersionAlreadyInstalled, DirectoryUrlHashUnsupported,\n DistributionNotFound, HashError, HashErrors, HashUnpinned,\n InstallationError, PreviousBuildDirError, UnsupportedPythonVersion,\n VcsHashUnsupported\n)\nfrom pip.req.req_install import InstallRequirement\nfrom pip.utils import display_path, dist_in_usersite, ensure_dir\nfrom pip.utils.hashes import MissingHashes\nfrom pip.utils.logging import indent_log\nfrom pip.utils.packaging import check_dist_requires_python\nfrom pip.vcs import vcs\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_abstract_dist(req_to_install):\n \"\"\"Factory to make an abstract dist object.\n\n Preconditions: Either an editable req with a source_dir, or satisfied_by or\n a wheel link, or a non-editable req with a source_dir.\n\n :return: A concrete DistAbstraction.\n \"\"\"\n if req_to_install.editable:\n return IsSDist(req_to_install)\n elif req_to_install.link and req_to_install.link.is_wheel:\n return IsWheel(req_to_install)\n else:\n return IsSDist(req_to_install)\n\n\nclass DistAbstraction(object):\n \"\"\"Abstracts out the wheel vs non-wheel Resolver.resolve() logic.\n\n The requirements for anything installable are as follows:\n - we must be able to determine the requirement name\n (or we can't correctly handle the non-upgrade case).\n - we must be able to generate a list of run-time dependencies\n without installing any additional packages (or we would\n have to either burn time by doing temporary isolated installs\n or alternatively violate pips 'don't start installing unless\n all requirements are available' rule - neither of which are\n desirable).\n - for packages with setup requirements, we must also be able\n to determine their requirements without installing additional\n packages (for the same reason as run-time dependencies)\n - we must be able to create a Distribution object exposing the\n above metadata.\n \"\"\"\n\n def __init__(self, req_to_install):\n self.req_to_install = req_to_install\n\n def dist(self, finder):\n \"\"\"Return a setuptools Dist object.\"\"\"\n raise NotImplementedError(self.dist)\n\n def prep_for_dist(self):\n \"\"\"Ensure that we can get a Dist for this requirement.\"\"\"\n raise NotImplementedError(self.dist)\n\n\nclass IsWheel(DistAbstraction):\n\n def dist(self, finder):\n return list(pkg_resources.find_distributions(\n self.req_to_install.source_dir))[0]\n\n def prep_for_dist(self):\n # FIXME:https://github.com/pypa/pip/issues/1112\n pass\n\n\nclass IsSDist(DistAbstraction):\n\n def dist(self, finder):\n dist = self.req_to_install.get_dist()\n # FIXME: shouldn't be globally added:\n if dist.has_metadata('dependency_links.txt'):\n finder.add_dependency_links(\n dist.get_metadata_lines('dependency_links.txt')\n )\n return dist\n\n def prep_for_dist(self):\n self.req_to_install.run_egg_info()\n self.req_to_install.assert_source_matches_version()\n\n\nclass Installed(DistAbstraction):\n\n def dist(self, finder):\n return self.req_to_install.satisfied_by\n\n def prep_for_dist(self):\n pass\n\n\nclass Resolver(object):\n \"\"\"Resolves which packages need to be installed/uninstalled to perform \\\n the requested operation without breaking the requirements of any package.\n \"\"\"\n\n _allowed_strategies = {\"eager\", \"only-if-needed\", \"to-satisfy-only\"}\n\n def __init__(self, session, finder, use_user_site,\n ignore_dependencies, ignore_installed, ignore_requires_python,\n force_reinstall, isolated, upgrade_strategy):\n super(Resolver, self).__init__()\n assert upgrade_strategy in self._allowed_strategies\n\n self.finder = finder\n self.session = session\n\n self.require_hashes = None # This is set in resolve\n\n self.upgrade_strategy = upgrade_strategy\n self.force_reinstall = force_reinstall\n self.isolated = isolated\n self.ignore_dependencies = ignore_dependencies\n self.ignore_installed = ignore_installed\n self.ignore_requires_python = ignore_requires_python\n self.use_user_site = use_user_site\n\n def resolve(self, requirement_set):\n \"\"\"Resolve what operations need to be done\n\n As a side-effect of this method, the packages (and their dependencies)\n are downloaded, unpacked and prepared for installation.\n\n Once PyPI has static dependency metadata available, it would be\n possible to move this side-effect to become a step separated from\n dependency resolution.\n \"\"\"\n # make the wheelhouse\n if requirement_set.wheel_download_dir:\n ensure_dir(requirement_set.wheel_download_dir)\n\n # If any top-level requirement has a hash specified, enter\n # hash-checking mode, which requires hashes from all.\n root_reqs = (\n requirement_set.unnamed_requirements +\n requirement_set.requirements.values()\n )\n self.require_hashes = (\n requirement_set.require_hashes or\n any(req.has_hash_options for req in root_reqs)\n )\n\n # Display where finder is looking for packages\n locations = self.finder.get_formatted_locations()\n if locations:\n logger.info(locations)\n\n # Actually prepare the files, and collect any exceptions. Most hash\n # exceptions cannot be checked ahead of time, because\n # req.populate_link() needs to be called before we can make decisions\n # based on link type.\n discovered_reqs = []\n hash_errors = HashErrors()\n for req in chain(root_reqs, discovered_reqs):\n try:\n discovered_reqs.extend(\n self._resolve_one(requirement_set, req)\n )\n except HashError as exc:\n exc.req = req\n hash_errors.append(exc)\n\n if hash_errors:\n raise hash_errors\n\n def _is_upgrade_allowed(self, req):\n if self.upgrade_strategy == \"to-satisfy-only\":\n return False\n elif self.upgrade_strategy == \"eager\":\n return True\n else:\n assert self.upgrade_strategy == \"only-if-needed\"\n return req.is_direct\n\n # XXX: Stop passing requirement_set for options\n def _check_skip_installed(self, req_to_install, requirement_set):\n \"\"\"Check if req_to_install should be skipped.\n\n This will check if the req is installed, and whether we should upgrade\n or reinstall it, taking into account all the relevant user options.\n\n After calling this req_to_install will only have satisfied_by set to\n None if the req_to_install is to be upgraded/reinstalled etc. Any\n other value will be a dist recording the current thing installed that\n satisfies the requirement.\n\n Note that for vcs urls and the like we can't assess skipping in this\n routine - we simply identify that we need to pull the thing down,\n then later on it is pulled down and introspected to assess upgrade/\n reinstalls etc.\n\n :return: A text reason for why it was skipped, or None.\n \"\"\"\n # Check whether to upgrade/reinstall this req or not.\n req_to_install.check_if_exists()\n if req_to_install.satisfied_by:\n upgrade_allowed = self._is_upgrade_allowed(req_to_install)\n\n # Is the best version is installed.\n best_installed = False\n\n if upgrade_allowed:\n # For link based requirements we have to pull the\n # tree down and inspect to assess the version #, so\n # its handled way down.\n should_check_possibility_for_upgrade = not (\n self.force_reinstall or req_to_install.link\n )\n if should_check_possibility_for_upgrade:\n try:\n self.finder.find_requirement(\n req_to_install, upgrade_allowed)\n except BestVersionAlreadyInstalled:\n best_installed = True\n except DistributionNotFound:\n # No distribution found, so we squash the\n # error - it will be raised later when we\n # re-try later to do the install.\n # Why don't we just raise here?\n pass\n\n if not best_installed:\n # don't uninstall conflict if user install and\n # conflict is not user install\n if not (self.use_user_site and not\n dist_in_usersite(req_to_install.satisfied_by)):\n req_to_install.conflicts_with = \\\n req_to_install.satisfied_by\n req_to_install.satisfied_by = None\n\n # Figure out a nice message to say why we're skipping this.\n if best_installed:\n skip_reason = 'already up-to-date'\n elif self.upgrade_strategy == \"only-if-needed\":\n skip_reason = 'not upgraded as not directly required'\n else:\n skip_reason = 'already satisfied'\n\n return skip_reason\n else:\n return None\n\n def _resolve_one(self, requirement_set, req_to_install):\n \"\"\"Prepare a single requirements file.\n\n :return: A list of additional InstallRequirements to also install.\n \"\"\"\n # Tell user what we are doing for this requirement:\n # obtain (editable), skipping, processing (local url), collecting\n # (remote url or package name)\n if req_to_install.constraint or req_to_install.prepared:\n return []\n\n req_to_install.prepared = True\n\n # ###################### #\n # # print log messages # #\n # ###################### #\n if req_to_install.editable:\n logger.info('Obtaining %s', req_to_install)\n else:\n # satisfied_by is only evaluated by calling _check_skip_installed,\n # so it must be None here.\n assert req_to_install.satisfied_by is None\n if not self.ignore_installed:\n skip_reason = self._check_skip_installed(\n req_to_install, requirement_set\n )\n\n if req_to_install.satisfied_by:\n assert skip_reason is not None, (\n '_check_skip_installed returned None but '\n 'req_to_install.satisfied_by is set to %r'\n % (req_to_install.satisfied_by,))\n logger.info(\n 'Requirement %s: %s (%s)', skip_reason,\n req_to_install,\n req_to_install.satisfied_by.version)\n else:\n if (req_to_install.link and\n req_to_install.link.scheme == 'file'):\n path = url_to_path(req_to_install.link.url)\n logger.info('Processing %s', display_path(path))\n else:\n logger.info('Collecting %s', req_to_install)\n\n assert self.require_hashes is not None, \\\n \"This should have been set in resolve()\"\n\n with indent_log():\n # ################################ #\n # # vcs update or unpack archive # #\n # ################################ #\n if req_to_install.editable:\n if self.require_hashes:\n raise InstallationError(\n 'The editable requirement %s cannot be installed when '\n 'requiring hashes, because there is no single file to '\n 'hash.' % req_to_install)\n req_to_install.ensure_has_source_dir(requirement_set.src_dir)\n req_to_install.update_editable(not requirement_set.is_download)\n abstract_dist = make_abstract_dist(req_to_install)\n abstract_dist.prep_for_dist()\n if requirement_set.is_download:\n req_to_install.archive(requirement_set.download_dir)\n req_to_install.check_if_exists()\n elif req_to_install.satisfied_by:\n if self.require_hashes:\n logger.debug(\n 'Since it is already installed, we are trusting this '\n 'package without checking its hash. To ensure a '\n 'completely repeatable environment, install into an '\n 'empty virtualenv.')\n abstract_dist = Installed(req_to_install)\n else:\n # @@ if filesystem packages are not marked\n # editable in a req, a non deterministic error\n # occurs when the script attempts to unpack the\n # build directory\n req_to_install.ensure_has_source_dir(requirement_set.build_dir)\n # If a checkout exists, it's unwise to keep going. version\n # inconsistencies are logged later, but do not fail the\n # installation.\n # FIXME: this won't upgrade when there's an existing\n # package unpacked in `req_to_install.source_dir`\n # package unpacked in `req_to_install.source_dir`\n if os.path.exists(\n os.path.join(req_to_install.source_dir, 'setup.py')):\n raise PreviousBuildDirError(\n \"pip can't proceed with requirements '%s' due to a\"\n \" pre-existing build directory (%s). This is \"\n \"likely due to a previous installation that failed\"\n \". pip is being responsible and not assuming it \"\n \"can delete this. Please delete it and try again.\"\n % (req_to_install, req_to_install.source_dir)\n )\n req_to_install.populate_link(\n self.finder,\n self._is_upgrade_allowed(req_to_install),\n self.require_hashes\n )\n # We can't hit this spot and have populate_link return None.\n # req_to_install.satisfied_by is None here (because we're\n # guarded) and upgrade has no impact except when satisfied_by\n # is not None.\n # Then inside find_requirement existing_applicable -> False\n # If no new versions are found, DistributionNotFound is raised,\n # otherwise a result is guaranteed.\n assert req_to_install.link\n link = req_to_install.link\n\n # Now that we have the real link, we can tell what kind of\n # requirements we have and raise some more informative errors\n # than otherwise. (For example, we can raise VcsHashUnsupported\n # for a VCS URL rather than HashMissing.)\n if self.require_hashes:\n # We could check these first 2 conditions inside\n # unpack_url and save repetition of conditions, but then\n # we would report less-useful error messages for\n # unhashable requirements, complaining that there's no\n # hash provided.\n if is_vcs_url(link):\n raise VcsHashUnsupported()\n elif is_file_url(link) and is_dir_url(link):\n raise DirectoryUrlHashUnsupported()\n if (not req_to_install.original_link and\n not req_to_install.is_pinned):\n # Unpinned packages are asking for trouble when a new\n # version is uploaded. This isn't a security check, but\n # it saves users a surprising hash mismatch in the\n # future.\n #\n # file:/// URLs aren't pinnable, so don't complain\n # about them not being pinned.\n raise HashUnpinned()\n hashes = req_to_install.hashes(\n trust_internet=not self.require_hashes)\n if self.require_hashes and not hashes:\n # Known-good hashes are missing for this requirement, so\n # shim it with a facade object that will provoke hash\n # computation and then raise a HashMissing exception\n # showing the user what the hash should be.\n hashes = MissingHashes()\n\n try:\n download_dir = requirement_set.download_dir\n # We always delete unpacked sdists after pip ran.\n autodelete_unpacked = True\n if req_to_install.link.is_wheel \\\n and requirement_set.wheel_download_dir:\n # when doing 'pip wheel` we download wheels to a\n # dedicated dir.\n download_dir = requirement_set.wheel_download_dir\n if req_to_install.link.is_wheel:\n if download_dir:\n # When downloading, we only unpack wheels to get\n # metadata.\n autodelete_unpacked = True\n else:\n # When installing a wheel, we use the unpacked\n # wheel.\n autodelete_unpacked = False\n unpack_url(\n req_to_install.link, req_to_install.source_dir,\n download_dir, autodelete_unpacked,\n session=self.session, hashes=hashes,\n progress_bar=requirement_set.progress_bar)\n except requests.HTTPError as exc:\n logger.critical(\n 'Could not install requirement %s because '\n 'of error %s',\n req_to_install,\n exc,\n )\n raise InstallationError(\n 'Could not install requirement %s because '\n 'of HTTP error %s for URL %s' %\n (req_to_install, exc, req_to_install.link)\n )\n abstract_dist = make_abstract_dist(req_to_install)\n abstract_dist.prep_for_dist()\n if requirement_set.is_download:\n # Make a .zip of the source_dir we already created.\n if req_to_install.link.scheme in vcs.all_schemes:\n req_to_install.archive(requirement_set.download_dir)\n # req_to_install.req is only avail after unpack for URL\n # pkgs repeat check_if_exists to uninstall-on-upgrade\n # (#14)\n if not self.ignore_installed:\n req_to_install.check_if_exists()\n if req_to_install.satisfied_by:\n should_modify = (\n self.upgrade_strategy != \"to-satisfy-only\" or\n self.ignore_installed\n )\n if should_modify:\n # don't uninstall conflict if user install and\n # conflict is not user install\n if not (self.use_user_site and not\n dist_in_usersite(req_to_install.satisfied_by)):\n req_to_install.conflicts_with = \\\n req_to_install.satisfied_by\n req_to_install.satisfied_by = None\n else:\n logger.info(\n 'Requirement already satisfied (use '\n '--upgrade to upgrade): %s',\n req_to_install,\n )\n\n # register tmp src for cleanup in case something goes wrong\n requirement_set.reqs_to_cleanup.append(req_to_install)\n\n # ###################### #\n # # parse dependencies # #\n # ###################### #\n\n dist = abstract_dist.dist(self.finder)\n try:\n check_dist_requires_python(dist)\n except UnsupportedPythonVersion as err:\n if self.ignore_requires_python:\n logger.warning(err.args[0])\n else:\n raise\n more_reqs = []\n\n def add_req(subreq, extras_requested):\n sub_install_req = InstallRequirement.from_req(\n str(subreq),\n req_to_install,\n isolated=self.isolated,\n wheel_cache=requirement_set._wheel_cache,\n )\n more_reqs.extend(\n requirement_set.add_requirement(\n sub_install_req, req_to_install.name,\n extras_requested=extras_requested\n )\n )\n\n # We add req_to_install before its dependencies, so that we\n # can refer to it when adding dependencies.\n if not requirement_set.has_requirement(req_to_install.name):\n # 'unnamed' requirements will get added here\n requirement_set.add_requirement(req_to_install, None)\n\n if not self.ignore_dependencies:\n if req_to_install.extras:\n logger.debug(\n \"Installing extra requirements: %r\",\n ','.join(req_to_install.extras),\n )\n missing_requested = sorted(\n set(req_to_install.extras) - set(dist.extras)\n )\n for missing in missing_requested:\n logger.warning(\n '%s does not provide the extra \\'%s\\'',\n dist, missing\n )\n\n available_requested = sorted(\n set(dist.extras) & set(req_to_install.extras)\n )\n for subreq in dist.requires(available_requested):\n add_req(subreq, extras_requested=available_requested)\n\n if not req_to_install.editable and not req_to_install.satisfied_by:\n # XXX: --no-install leads this to report 'Successfully\n # downloaded' for only non-editable reqs, even though we took\n # action on them.\n requirement_set.successfully_downloaded.append(req_to_install)\n\n return more_reqs\n","sub_path":"pip/resolve.py","file_name":"resolve.py","file_ext":"py","file_size_in_byte":23594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"152848836","text":"import unittest\nimport lib\n\nclass TestCase(unittest.TestCase):\n def test_welcome_message(self):\n # given\n expected_msg = \"welcome to py_app built from bazel!\"\n # when\n actual_msg = lib.welcome_message()\n # then\n self.assertEqual(expected_msg, actual_msg)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"projects/py_app/lib_test.py","file_name":"lib_test.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"604533764","text":"import random\nfrom morestrategy_too.Util import getRandomMonsterName\nfrom morestrategy.GameObjects import Item\n\n\nSkeleton_Body = 0\nSkeleton_Head = 1\nSkeleton_Arm = 2\nSkeleton_Leg = 3\n\nclass MinionStats(object):\n HEALTH, ATTACK, DEFENSE, AGILITY = range (4)\n \n def __init__(self, health, attack, defense, agility):\n self.health = health\n self.attack = attack\n self.defense = defense\n self.agility = agility\n \n def __getitem__ (self, index):\n if index == 0:\n return self.health\n if index == 1:\n return self.attack\n if index == 2:\n return self.defense\n if index == 3:\n return self.agility\n \n def __setitem__ (self, index, value):\n if index == 0:\n self.health = value\n if index == 1:\n self.attack = value\n if index == 2:\n self.defense = value\n if index == 3:\n self.agility = value\n \n def __str__(self, *args, **kwargs):\n return \"(Health: \"+str(self.health)+\", Attack: \"+str(self.attack)+\", Defense: \"+str(self.defense)+\", Agility: \"+str(self.agility)+\")\"\n \nclass BasicMinionComponent (object):\n \n def __init__ (self, baseItem):\n self.baseItem = baseItem\n \n def isValid (self, rootObj = False):\n if rootObj:\n return False\n return True\n \n def iterateOverChildren (self):\n raise StopIteration \n \nclass StatsOnlyComp (BasicMinionComponent):\n \n def __init__(self, baseItem):\n BasicMinionComponent.__init__(self, baseItem)\n self.statModifier = baseItem.statsEffect\n \nclass LeafComp (StatsOnlyComp):\n \n def __init__(self, baseItem):\n StatsOnlyComp.__init__(self, baseItem)\n ib = baseItem.itemBody\n self.augSlots = ib.augSlots\n self.weaponSlots = ib.weaponSlots\n self.armorSlots = ib.armorSlots\n self.augments = []\n self.weapons = []\n self.armor = []\n \n def isValid (self, rootObj = False):\n if rootObj:\n return False\n if len(self.augments) > self.augSlots:\n return False\n if len(self.weapons) > self.weaponSlots:\n return False\n if len(self.armor) > self.armorSlots:\n return False\n return True\n \n def iterateOverChildren (self):\n for a in self.augments:\n yield a\n for c in a.iterateOverChildren():\n yield c\n for w in self.weapons:\n yield w\n for c in w.iterateOverChildren():\n yield c\n for a in self.armor:\n yield a\n for c in a.iterateOverChildren():\n yield c\n raise StopIteration\n \nclass RootComp (LeafComp):\n \n def __init__(self, baseItem):\n LeafComp.__init__(self, baseItem)\n ib = baseItem.itemBody\n self.headSlots = ib.headSlots\n self.armSlots = ib.armSlots\n self.legSlots = ib.legSlots\n self.legs = []\n self.heads = []\n self.arms = []\n \n def isValid (self, rootObj = False):\n \n if not rootObj:\n return False\n \n #self valid\n if not LeafComp.isValid(self, False):\n return False\n \n #children valid\n for child in self.iterateOverChildren():\n if not child.isValid():\n return False\n \n #meta valid\n headCount = 0\n armCount = 0\n legCount = 0\n for c in self.iterateOverChildren():\n if c.baseItem.iClass == Item.HEAD:\n headCount += 1\n elif c.baseItem.iClass == Item.ARM:\n armCount += 1\n elif c.baseItem.iClass == Item.LEG:\n legCount += 1\n \n if headCount == 0:\n return False\n \n if headCount > self.headSlots:\n return False\n \n if armCount > self.armSlots:\n return False\n \n if legCount > self.legSlots:\n return False\n \n return True\n \n def iterateOverChildren (self):\n for l in self.legs:\n yield l\n for c in l.iterateOverChildren():\n yield c\n for h in self.heads:\n yield h\n for c in h.iterateOverChildren():\n yield c\n for a in self.arms:\n yield a\n for c in a.iterateOverChildren():\n yield c\n \n LeafComp.iterateOverChildren(self)\n raise StopIteration\n\nclass MinionStatsModifier (MinionStats):\n \n def __init__(self, health, attack, defense, agility, isPercentage):\n MinionStats.__init__(self, health, attack, defense, agility)\n self.isPercentage = isPercentage\n \n def modifyAllStatsOnlyAbs (self, stats):\n for i in range(4):\n if self.isPercentage[i] == \"0\":\n stats[i] = self[i] + stats[i]\n \n def modifyAllStatsOnlyPer (self, stats):\n for i in range(4):\n if self.isPercentage[i] == \"1\":\n stats[i] = stats[i] + (self[i] / 100) * stats[i]\n\nclass Minion (object):\n\n def __init__ (self, skeleton, stats, name = getRandomMonsterName()):\n self.name = name\n self.originalStats = stats\n self.cStats = MinionStats(stats.health, stats.attack, stats.defense, stats.agility)\n self.skeleton = skeleton\n \n def takeAHit (self, attack):\n damage = attack * attack / (self.cStats.defense + attack)\n print (\"Raw Attack: \"+str(attack)+\", Real Damage: \"+str(damage))\n self.cStats.health -= damage\n if self.cStats.health < 0:\n self.cStats.health = 0\n \n def __str__(self, *args, **kwargs):\n return str(self.originalStats)\n \n def getPrettyStatStr (self, statIndex):\n orgStat = self.originalStats[statIndex]\n cStat = self.cStats[statIndex]\n \n if orgStat != cStat:\n return str(cStat)+\"/\"+str(orgStat)\n return str(orgStat)\n\ndef createMinion (minionSkelet):\n if not minionSkelet.isValid(True):\n return\n \n stats = MinionStats(0,0,0,0)\n for b in minionSkelet.iterateOverChildren():\n b.statModifier.modifyAllStatsOnlyAbs(stats)\n \n for b in minionSkelet.iterateOverChildren():\n b.statModifier.modifyAllStatsOnlyPer(stats)\n \n return Minion(minionSkelet, stats)\n\ndef convertItemToMinionComp (item):\n if item.iClass == Item.TRASH:\n print (\"Trash items cant become Minion parts!\")\n return None\n if item.iClass == Item.BODY:\n return RootComp(item)\n if item.iClass == Item.HEAD or item.iClass == Item.ARM or item.iClass == Item.LEG:\n return LeafComp(item)\n if item.iClass == Item.AUG or item.iClass == Item.WEAPON or item.iClass == Item.ARMOR:\n return StatsOnlyComp(item)\n \n print (\"Couldn't convert item to minion part. Returning None\")\n \ndef letMinionFightMinion (minionA, minionB):\n ms = [minionA, minionB]\n if minionA.cStats.agility > minionB.cStats.agility:\n cIMin = 0\n elif minionA.cStats.agility == minionB.cStats.agility:\n cIMin = random.choice([0,1])\n else:\n cIMin = 1\n nIMin = (cIMin + 1) % 2\n while (minionA.cStats.health > 0 and minionB.cStats.health > 0):\n ms[nIMin].takeAHit(ms[cIMin].cStats.attack)\n if nIMin == 0:\n print (\"B hit A with \"+str(ms[cIMin].cStats.attack)+\", A.health = \"+str(ms[nIMin].cStats.health))\n else:\n print (\"A hit B with \"+str(ms[cIMin].cStats.attack)+\", B.health = \"+str(ms[nIMin].cStats.health))\n nIMin = cIMin\n cIMin = (cIMin + 1) % 2\n \n \n \n ","sub_path":"morestrategy/Minions.py","file_name":"Minions.py","file_ext":"py","file_size_in_byte":7804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"644733971","text":"import logging,json,sys,requests,pprint\n\ndef main():\n logging.basicConfig(level=logging.INFO,format='%(asctime)s - %(levelname)s- %(message)s')\n\n #get location \n location=\"\"\n if len(sys.argv) <2:\n location=input('Type locaton: ').replace(' ','+')\n else:\n location='+'.join(sys.argv[1:])\n #print(location)\n\n #download JSON\n #url='https://api.openweathermap.org/data/2.5/weather?q=%s&appid=542ffd081e67f4512b705f89d2a611b2' %location\n url='http://api.openweathermap.org/data/2.5/forecast/daily?q=%s&cnt=3&appid=542ffd081e67f4512b705f89d2a611b2' %location\n\n #print(url)\n response = requests.get(url)\n response.raise_for_status()\n #print(response.text)\n w=json.loads(response.text)\n #print(response.text)\n #pprint.pprint(w)\n\n print('Current weather in %s:' % (location))\n print('Today: '+w['list'][0]['weather'][0]['description']+'\\n')\n print('Tomorrow: '+w['list'][1]['weather'][0]['description']+'\\n')\n print('Day after tomorrow: '+w['list'][2]['weather'][0]['description'])\n\n #print(w['weather'][0]['description'])\nif __name__ == \"__main__\":\n main()","sub_path":"automate_with_python/chp14_JSON_weather.py","file_name":"chp14_JSON_weather.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"169989053","text":"import numpy as np\ndef lr(X, Y):\n # This function perform linear regression on X and Y,\n # return slope m and intercept c\n A = np.array([[X[i], 1] for i in range(len(X))]) # A = [X, 1]\n b = np.array([[Y[i]] for i in range(len(X))]) # b = Y in column form\n AT = np.transpose(A)\n ATA = np.dot(AT, A)\n ATA_inv = np.linalg.inv(ATA)\n ATb = np.dot(AT, b)\n C = np.transpose(np.dot(ATA_inv, ATb)) # [m,c] = (AT*A)_inv*AT*b\n [m, c] = C[0]\n return [m, c]","sub_path":"linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"335434195","text":"from utils.path_finder import resolve_path_from_project_dir\nimport json\n\n\ndef get_class_from_name(input_name):\n entity_config = json.load(open(resolve_path_from_project_dir('configs/entity_configuration.json')))\n for (key, value) in entity_config.items():\n if value['name'] == input_name:\n return key\n\n\ndef get_index(value, input_list):\n try:\n index_value = input_list.index(value)\n except ValueError:\n index_value = -1\n return index_value\n","sub_path":"utils/config_utils.py","file_name":"config_utils.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"96569625","text":"\"\"\" Handles the fetching of requests on the web.\n\"\"\"\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys \nfrom selenium.webdriver.chrome.options import Options\nfrom boilerpipe.extract import Extractor\nfrom multiprocessing import RLock\n\nimport requests\n# Specify a specific build using the following two arguments\n#\"deviceMetrics\": { \"width\": 360, \"height\": 640, \"pixelRatio\": 3.0 },\n #\"userAgent\": \"Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19\" }\n\n\nclass FetchService:\n def __init__(self, url, desk_width=1920, desk_height=1080, mobile_width=360, mobile_height=640, mobile_pixel_ratio=3.0):\n self.url = url\n\n # Make sure status code is 200\n r = requests.head(url)\n\n # Attempt to follow redirects\n attempts = 0\n while r.status_code == 301 and attempts < 3:\n # If no location to follow through, give up\n if 'Location' not in r.headers:\n raise requests.ConnectionError\n # Navigate to new location\n r = requests.head(r.headers['Location'])\n attempts = attempts + 1\n\n if r.status_code != 200:\n # Throw exception with status code\n raise requests.ConnectionError(str(r.status_code))\n\n chrome_options = Options() \n chrome_options.add_argument(\"--hide-scrollbars\")\n chrome_options.set_headless()\n\n # Start chrome driver, and set window to initial width and height\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_window_size(desk_width, desk_height)\n\n # Set load timeouts to load for 20s, 10s is default\n driver.implicitly_wait(20)\n\n # Set script timeouts to prevent malicious execution\n driver.set_script_timeout(20)\n\n # Grab desktop view\n driver.get(self.url)\n\n # Expand height to be the full length of the page for image processing\n height = driver.execute_script(\"return Math.max(document.body.scrollHeight, document.body.offsetHeight, document.documentElement.clientHeight, document.documentElement.scrollHeight, document.documentElement.offsetHeight);\")\n driver.set_window_size(desk_width, height + 100)\n\n # Extract text from desktop view\n self.__html = driver.page_source\n extractor = Extractor(extractor='ArticleExtractor', html=self.__html)\n self.__text = extractor.getText()\n\n # Add a lock to the driver so there are no access colisions during threaded execution\n driver.lock = RLock()\n \n # We need to save this driver in order for heuristics to analyse the DOM\n self.__desk_driver = driver\n\n # Start chrome for mobile view\n mobile_emulation = {\"deviceMetrics\": { \"width\": mobile_width, \"height\": mobile_height, \"pixelRatio\": mobile_pixel_ratio },\n \"userAgent\": \"Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19\" }\n\n # Enable mobile emulation for headless\n chrome_options.add_experimental_option(\"mobileEmulation\", mobile_emulation)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n\n # Set load timeouts to load for 20s, 10s is default\n driver.implicitly_wait(20)\n\n # Set script timeouts to prevent malicious execution\n driver.set_script_timeout(20)\n\n\n # Nav to page and expand to full view\n driver.get(self.url)\n height = driver.execute_script(\"return Math.max(document.body.scrollHeight, document.body.offsetHeight, document.documentElement.clientHeight, document.documentElement.scrollHeight, document.documentElement.offsetHeight);\")\n \n # Add a lock to the driver so there are no access colisions during threaded execution\n driver.lock = RLock()\n\n # We need to save this driver in order for heuristics to analyse the mobile DOM\n self.__mobile_driver = driver\n\n def extract_text(self):\n return self.__text\n\n def get_desk_driver(self):\n return self.__desk_driver\n\n def get_mobile_driver(self):\n return self.__mobile_driver\n\n def get_html(self):\n return self.__html\n","sub_path":"api/common/webfetch.py","file_name":"webfetch.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"538990761","text":"from django.core.urlresolvers import reverse, reverse_lazy\nfrom django.shortcuts import render, HttpResponseRedirect\nfrom bookmarks.models import Bookmark\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\nfrom . import forms\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef index(request):\n bookmarks = Bookmark.objects.all().order_by('-id').filter(user_id=request.user.id)\n context = {'form': forms.UrlForm, 'bookmarks': bookmarks}\n return render(request, 'index.html', context)\n\n\ndef login(request):\n if not request.user.is_authenticated():\n if request.method == 'POST':\n form = forms.LoginForm(request.POST)\n if form.is_valid():\n user = auth.authenticate(username=form.cleaned_data['login'], password=form.cleaned_data['password'])\n if user:\n auth.login(request, user)\n return HttpResponseRedirect(reverse('index'))\n else:\n form = forms.LoginForm\n return render(request, 'login.html', {'form': form})\n else:\n return HttpResponseRedirect(reverse('index'))\n\n\ndef logout(request):\n if request.user.is_authenticated():\n auth.logout(request)\n return HttpResponseRedirect(reverse('index'))\n\n\ndef register(request):\n if not request.user.is_authenticated():\n if request.method == 'POST':\n form = forms.RegisterForm(request.POST)\n if form.is_valid():\n User.objects.create_user(username=form.cleaned_data['login'], password=form.cleaned_data['password'])\n return HttpResponseRedirect(reverse('index'))\n else:\n form = forms.RegisterForm\n return render(request, 'register.html', {'form': form})\n else:\n return HttpResponseRedirect(reverse('index'))\n","sub_path":"async_bookmarks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"585045708","text":"import logging\nimport os\nimport sys\n\n# Add '/opt' to the PATH for Lambda Layers\nsys.path.append('/opt')\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nfrom security_helpers import validate_token\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nBLACKLIST_TABLE = os.getenv('BLACKLIST_TABLE')\n\n\ndef generate_policy(principal_id, effect=None, resource=None, context=None):\n auth_response = {\n 'principalId': principal_id\n }\n\n if effect and resource:\n auth_response['policyDocument'] = {\n 'Version': '2012-10-17',\n 'Statement': [\n {\n 'Action': 'execute-api:Invoke',\n 'Effect': effect,\n 'Resource': '/'.join(resource.split('/')[:2] + ['*'])\n }\n ]\n }\n\n if context:\n auth_response['context'] = dict()\n for k, v in context.items():\n auth_response['context'][k] = str(v)\n\n logger.info(auth_response)\n return auth_response\n\n\ndef read_token_from_header(event):\n try:\n type_, token = event['authorizationToken'].split()\n except (ValueError, AttributeError):\n logger.error(\"No token found\")\n raise Exception('Unauthorized')\n\n if type_ != 'Bearer':\n logger.error(\"Incorrect authentication type: \"\n f\"{event['authorizationToken']}\")\n raise Exception('Unauthorized')\n\n return token\n\n\ndef is_token_blacklisted(token_id):\n blacklist_table = boto3.resource('dynamodb').Table(BLACKLIST_TABLE)\n\n try:\n resp = blacklist_table.get_item(Key={'token_id': token_id})\n except ClientError:\n logger.exception('Unable to read from DynamoDB!')\n raise Exception('Internal Server Error')\n\n return bool(resp.get('Item'))\n\n\ndef lambda_handler(event, context):\n method_arn = event['methodArn']\n token = read_token_from_header(event)\n\n logger.info('Validating token...')\n decoded_token = validate_token(token)\n\n logger.info('Checking token blacklist...')\n\n if is_token_blacklisted(decoded_token['jti']):\n return generate_policy(token, 'Deny', method_arn)\n\n return generate_policy(token, 'Allow', method_arn, decoded_token)\n","sub_path":"src/functions/authorizer/authorizer.py","file_name":"authorizer.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"632435417","text":"# Copyright © by Mohammad Mahmud\n# Problem 2\ndef main():\n\n # This file read's it!\n\n word_list = open('test.txt').read().split()\n\n final_word = []\n\n # this loop replaced a four letter word into *** & save into list\n for x, y in enumerate(word_list):\n if len(y) == 4:\n a = word_list[x].replace(str(y), '****')\n final_word.append(a)\n else:\n final_word.append(y)\n\n a = \" \".join(final_word)\n\n # create the new file and save's it.\n final_file = open('final_file.txt','w')\n\n final_file.writelines(a)\n\n\nmain()\n","sub_path":"Data Structures/Assignment 2/Data Structures - Assignment 2.py","file_name":"Data Structures - Assignment 2.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"289198782","text":"#!/home/hmsjwzb/python/bin/python3.5\n\nimport sys\nfrom DirectedCycle import DirectedCycle\nfrom DepthFirstOrder import DepthFirstOrder\nfrom SymbolDigraph import SymbolDigraph\n\nclass Topological:\n def __init__(self, G):\n finder = DirectedCycle(G)\n if not finder.hasCycle():\n dfs = DepthFirstOrder(G)\n self.order = dfs.reversePost()\n self.rank = [0] * G.Vertex()\n i = 0\n for v in self.order:\n i += 1\n self.rank[v] = i\n\n def Order(self):\n return self.order\n\n def hasOrder(self):\n return self.order != None\n\n def isDAG(self):\n return self.hasOrder()\n\n def Rank(self, v):\n self.validateVertex(v)\n if self.hasOrder():\n return self.rank[v]\n else:\n return -1\n\n def validateVertex(self, v):\n V = len(self.rank)\n if v < 0 or v >= V:\n raise(\"vertex %d is not between 0 and %d \" % (v, V-1))\n\nif __name__ == '__main__':\n filename = sys.argv[1]\n delimiter = sys.argv[2]\n sg = SymbolDigraph(filename, delimiter)\n topological = Topological(sg.digraph())\n for v in topological.Order():\n print(sg.nameOf(v))\n","sub_path":"ch4/Topological/Topological.py","file_name":"Topological.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"78917123","text":"#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\nimport logging\n\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n self.response.write('Hello, world!')\n\nclass UserHandler(webapp2.RequestHandler):\n\tdef get(self):\n\t\tself.response.write(\"User Handler\")\nclass AdminHandler(webapp2.RequestHandler):\n\tdef get(self):\n\t\tself.response.write(\"Admin Handler\")\nclass ProductHandler(webapp2.RequestHandler):\n\tdef get(self, product_id):\n\t\t self.response.write('This is the ProductHandler. '\n 'The product id is %s' % product_id)\n\ndef handle_404(request, response, exception):\n logging.exception(exception)\n response.write(\"This page is not found !!!!\")\n response.set_status(404)\n\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/user', UserHandler),\n ('/admin', AdminHandler),\n ('/product/(\\d+)', ProductHandler),\n], debug=True)\napp.error_handlers[404] = handle_404","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"267229836","text":"#!/usr/bin/python\nimport os.path\nfrom exception import *\nfrom pyPdf import PdfFileReader\n\nclass Builder(object):\n def __init__(self, lite, save, outputFile, inputFiles):\n self.lite = lite\n self.save = save\n self.outputFile = outputFile\n self.inputFiles = inputFiles\n\n def checkOutput(self):\n if self.outputFile == '':\n raise MissingFileError('Missing output file name')\n\n def checkInput(self):\n num = len(self.inputFiles)\n if num == 0:\n raise MissingFileError('Missing input files')\n if num == 1:\n raise MissingFileError('At least two input files are required')\n\n for inputFile in self.inputFiles:\n if not os.path.isfile(inputFile):\n raise FileError(inputFile + ' is not a file')\n try:\n pdf = PdfFileReader(file(inputFile, 'rb'))\n except:\n raise FileError(inputFile + ' is not a PDF file')\n\n def checkOverwrite(self):\n raise NotImplementedError('checkOverwrite() was not implemented')\n\n def merge(self):\n raise NotImplementedError('merge() was not implemented')\n","sub_path":"pdfm/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"80632834","text":"#!/usr/bin/env python\n\"\"\"\nentry of mongodb connection\n\"\"\"\nimport threading\nfrom .mongodb import connect_mongodb\n\nlock = threading.Lock()\n\n\ndef save_blog_to_mongodb(blog):\n \"\"\"\n get the instance of mongodb collection and save blog-data\n :param blog: dict data\n :return: None\n \"\"\"\n lock.acquire()\n mongodb = connect_mongodb()\n mongodb.save_blog(blog)\n lock.release()\n","sub_path":"save_data/save_data.py","file_name":"save_data.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"486079761","text":"import argparse\nimport os\nimport sys\nimport numpy as np\nimport cv2\n\nfrom deeplab_resnet import decode_labels\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Prepare segmentation data\")\n parser.add_argument(\"data_list\", type=str,\n help=\"list with (image, label) pairs\")\n parser.add_argument(\"data_dir\", type=str, default=\"\",\n help=\"Path to the directory of the dataset.\")\n args = parser.parse_args()\n\n with open(args.data_list, 'r') as f:\n lines = f.read().split('\\n')\n\n print(len(lines))\n\n i = 0\n for line in lines:\n img_path, seg_path = line.split('\\t')\n img_path = os.path.join(args.data_dir, img_path)\n seg_path = os.path.join(args.data_dir, seg_path)\n\n if not os.path.exists(seg_path):\n print(\"skip non-existent: \"+ seg_path)\n continue\n if not os.path.exists(img_path):\n print(\"skip non-existent: \"+ img_path)\n continue\n\n seg = cv2.imread(seg_path, cv2.IMREAD_UNCHANGED)\n\n seg = np.expand_dims(np.expand_dims(seg, 0), -1)\n\n msk = decode_labels(seg, num_classes=np.max(seg) + 1)\n im = msk[0]\n img_o = cv2.imread(img_path)\n\n img_path = str(img_path)\n\n print(im.shape, im.dtype)\n print(img_o.shape, img_o.dtype)\n # img = np.array(im) * 0.9 + np.array(img_o) * 0.7\n img = np.hstack([im, img_o])\n img[img > 255] = 255\n\n cv2.imshow(\"labels\", img.astype(np.uint8))\n cv2.waitKey(0)\n","sub_path":"visualize_seg_labels.py","file_name":"visualize_seg_labels.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"475476987","text":"'''\ncifar10 test\n'''\nimport os\nimport random\nfrom pathlib import Path\nimport tensorflow as tf\nimport numpy as np\nimport vgg16_inference\nimport cifar10_reader\n\nDELDIR1 = 'train'\nDELDIR2 = 'test'\nif Path(DELDIR1).exists():\n DELDIR1 = os.listdir(DELDIR1)\n for f in DELDIR1:\n filePath = os.path.join(DELDIR1, f)\n os.remove(filePath)\n os.rmdir(DELDIR1)\nif Path(DELDIR2).exists():\n DELDIR2 = os.listdir(DELDIR2)\n for f in DELDIR2:\n filePath = os.path.join(DELDIR2, f)\n os.remove(filePath)\n os.rmdir(DELDIR2)\n\nlayerName_list_vgg16 = iter([\n 'block1_conv1', 'block1_conv2', 'block1_pool', 'block2_conv1',\n 'block2_conv2', 'block2_pool', 'block3_conv1', 'block3_conv2',\n 'block3_conv3', 'block3_pool', 'block4_conv1', 'block4_conv2',\n 'block4_conv3', 'block4_pool', 'block5_conv1', 'block5_conv2',\n 'block5_conv3', 'block5_pool'\n])\nparameters_vgg16 = []\ndefault_initializer = tf.contrib.layers.xavier_initializer\nLAYER1_NODES = 200\nLAYER2_NODES = 200\nLAYER3_NODES = 200\nDROP_OUT_RATE = 1\nLAMBDA = 0.01\nBATCH_SIZE = 100\nEPOCH = 100\nLEARNING_RATE = 0.01\n\n# 定义网络输入\nx = tf.placeholder(tf.float32, [None, 32, 32, 3])\ny_ = tf.placeholder(tf.int32, [\n None,\n])\nkeep_prob = tf.placeholder(tf.float32)\n\n# 构建vgg16的卷积层\ny_block5_pool, _, _ = vgg16_inference.inference(x, layerName_list_vgg16,\n parameters_vgg16)\n\n# flatten操作\nshp = y_block5_pool.get_shape()\nflattened_shape = shp[1].value * shp[2].value * shp[3].value\nresh1 = tf.reshape(y_block5_pool, [-1, flattened_shape], name=\"reshape\")\n\n# 构建fc层\nlayer1_weights = tf.get_variable(\n name='layer1_weights',\n shape=[flattened_shape, LAYER1_NODES],\n initializer=default_initializer(),\n trainable=True)\nlayer1_biases = tf.get_variable(\n name='layer1_biases',\n shape=[LAYER1_NODES],\n initializer=tf.constant_initializer(0.1),\n trainable=True)\ny = tf.nn.relu_layer(resh1, layer1_weights, layer1_biases, name='layer1')\ny_drop = tf.nn.dropout(y, keep_prob, name=\"layer1_drop\")\n\nlayer2_weights = tf.get_variable(\n name='layer2_weights',\n shape=[LAYER1_NODES, LAYER2_NODES],\n initializer=default_initializer(),\n trainable=True)\nlayer2_biases = tf.get_variable(\n name='layer2_biases',\n shape=[LAYER2_NODES],\n initializer=tf.constant_initializer(0.1),\n trainable=True)\ny = tf.nn.relu_layer(y_drop, layer2_weights, layer2_biases, name='layer2')\ny_drop = tf.nn.dropout(y, keep_prob, name=\"layer2_drop\")\n\nlayer3_weights = tf.get_variable(\n name='layer3_weights',\n shape=[LAYER2_NODES, LAYER3_NODES],\n initializer=default_initializer(),\n trainable=True)\nlayer3_biases = tf.get_variable(\n name='layer3_biases',\n shape=[LAYER3_NODES],\n initializer=tf.constant_initializer(0.1),\n trainable=True)\ny = tf.nn.relu_layer(y_drop, layer3_weights, layer3_biases, name='layer3')\ny_drop = tf.nn.dropout(y, keep_prob, name=\"layer3_drop\")\noutput_weights = tf.get_variable(\n name='output_weights',\n shape=[LAYER2_NODES, 10],\n initializer=default_initializer(),\n trainable=True)\noutput_biases = tf.get_variable(\n name='output_biases',\n shape=[10],\n initializer=tf.constant_initializer(0.1),\n trainable=True)\ny = tf.add(tf.matmul(y_drop, output_weights), output_biases, 'output')\n\ntf.summary.histogram('layer1_weights', layer1_weights)\ntf.summary.histogram('layer2_weights', layer2_weights)\ntf.summary.histogram('layer3_weights', layer3_weights)\ntf.summary.histogram('output_weights', output_weights)\n\n# 定义loss\nregularizer = tf.contrib.layers.l2_regularizer(LAMBDA)\nloss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=y_)) +\\\n regularizer(layer1_weights) +\\\n regularizer(layer2_weights) + regularizer(layer3_weights) + \\\n regularizer(output_weights)\ntf.summary.scalar('loss', loss)\ntrain_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)\n\n# 定义准确率\naccuracy = tf.reduce_mean(\n tf.cast(tf.equal(tf.argmax(y, 1, output_type=tf.int32), y_), tf.float32))\ntf.summary.scalar('accuracy', accuracy)\n\n# 读取cifar10数据\ndict_data, dict_label = cifar10_reader.read('cifar-10-batches-py')\ntest_data, test_label = cifar10_reader.read('cifar-10-batches-py', False)\ntest_data = test_data[0:500]\ntest_label = test_label[0:500]\ntotallist = list(range(50000))\n\n# 开始会话\nsess = tf.Session()\nmerged = tf.summary.merge_all()\ntrain_writer = tf.summary.FileWriter('./train', sess.graph, flush_secs=5)\ntest_writer = tf.summary.FileWriter('./test', flush_secs=5)\nsess.run(tf.global_variables_initializer())\n\n# 加载预训练的网络参数\nweights = np.load('vgg16_weights.npz')\nkeys = sorted(weights.keys())\nfor i, k in enumerate(keys):\n # sess.run(parameters_vgg16[i].assign(weights[k]))\n if i < 26:\n parameters_vgg16[i].load(weights[k], sess)\n\n# 开始训练\nfor i in range(EPOCH):\n randomlist = random.sample(totallist, BATCH_SIZE)\n x_train = dict_data[randomlist]\n y_train = dict_label[randomlist]\n sess.run(train_step, {x: x_train, y_: y_train, keep_prob: DROP_OUT_RATE})\n train_summary, train_accuracy = sess.run([merged, accuracy], {\n x: x_train,\n y_: y_train,\n keep_prob: 1\n })\n train_writer.add_summary(train_summary, i)\n test_summary, test_accuracy = sess.run([merged, accuracy], {\n x: test_data,\n y_: test_label,\n keep_prob: 1\n })\n test_writer.add_summary(test_summary, i)\n print('step %d,train accuracy is %f, test accuracy is %f' %\n (i, train_accuracy, test_accuracy))\n\npredict_y = sess.run(y, feed_dict={x: [test_data[0]], y: np.zeros((1, 10))})\nprint(predict_y)\n","sub_path":"cifar10_test.py","file_name":"cifar10_test.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"437061785","text":"n=int(input())\nres=[]\nfor _ in range(n):\n k=int(input())\n nums=list(map(int, input().split(\" \")))\n temp=1\n for h in nums:\n temp=temp*h\n ans=[]\n for h in nums:\n ans.append(temp//h)\n res.append(ans)\nfor t in res:\n for h in t:\n print(str(h)+\" \", end=\"\")\n print()","sub_path":"Code/CodeRecords/2468/60749/286235.py","file_name":"286235.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"442652351","text":"import msgpack\nimport pytest\n\nimport salt.config\nimport salt.transport.zeromq\n\n\nasync def test_req_server_garbage_request(io_loop):\n \"\"\"\n Validate invalid msgpack messages will not raise exceptions in the\n RequestServers's message handler.\n \"\"\"\n opts = salt.config.master_config(\"\")\n request_server = salt.transport.zeromq.RequestServer(opts)\n\n def message_handler(payload):\n return payload\n\n request_server.post_fork(message_handler, io_loop)\n\n byts = msgpack.dumps({\"foo\": \"bar\"})\n badbyts = byts[:3] + b\"^M\" + byts[3:]\n\n try:\n ret = await request_server.handle_message(None, badbyts)\n except Exception as exc: # pylint: disable=broad-except\n pytest.fail(f\"Exception was raised {exc}\")\n finally:\n request_server.close()\n\n assert ret == {\"msg\": \"bad load\"}\n","sub_path":"tests/pytests/unit/transport/test_zeromq.py","file_name":"test_zeromq.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"117671585","text":"class Walker(object):\n def __init__(self):\n self.x = width/2\n self.y = height/2\n \n def display(self):\n stroke(0)\n point(self.x, self.y)\n \n def step(self):\n r = random(0, 1)\n if r< 0.4:\n self.x += 1\n elif r < 0.6:\n self.x -= 1\n elif r < 0.8:\n self.y += 1\n else:\n self.y -= 1\n ","sub_path":"NOC_Chp0/NOC_0_3/walker.py","file_name":"walker.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"462115445","text":"\"\"\"Categorical GRU Policy.\n\nA policy represented by a Categorical distribution\nwhich is parameterized by a Gated Recurrent Unit (GRU).\n\"\"\"\n# pylint: disable=wrong-import-order\nimport akro\nimport numpy as np\nimport tensorflow as tf\n\nfrom garage.experiment import deterministic\nfrom garage.tf.models import CategoricalGRUModel\nfrom garage.tf.policies.policy import Policy\n\n\n# pylint: disable=too-many-ancestors\nclass CategoricalGRUPolicy(CategoricalGRUModel, Policy):\n \"\"\"Categorical GRU Policy.\n\n A policy represented by a Categorical distribution\n which is parameterized by a Gated Recurrent Unit (GRU).\n\n It only works with akro.Discrete action space.\n\n Args:\n env_spec (garage.envs.env_spec.EnvSpec): Environment specification.\n name (str): Policy name, also the variable scope.\n hidden_dim (int): Hidden dimension for LSTM cell.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s). It should return a tf.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n tf.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n tf.Tensor.\n recurrent_nonlinearity (callable): Activation function for recurrent\n layers. It should return a tf.Tensor. Set it to None to\n maintain a linear activation.\n recurrent_w_init (callable): Initializer function for the weight\n of recurrent layer(s). The function should return a\n tf.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a tf.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n tf.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n tf.Tensor.\n hidden_state_init (callable): Initializer function for the\n initial hidden state. The functino should return a tf.Tensor.\n hidden_state_init_trainable (bool): Bool for whether the initial\n hidden state is trainable.\n state_include_action (bool): Whether the state includes action.\n If True, input dimension will be\n (observation dimension + action dimension).\n layer_normalization (bool): Bool for using layer normalization or not.\n\n \"\"\"\n\n def __init__(self,\n env_spec,\n name='CategoricalGRUPolicy',\n hidden_dim=32,\n hidden_nonlinearity=tf.nn.tanh,\n hidden_w_init=tf.initializers.glorot_uniform(\n seed=deterministic.get_tf_seed_stream()),\n hidden_b_init=tf.zeros_initializer(),\n recurrent_nonlinearity=tf.nn.sigmoid,\n recurrent_w_init=tf.initializers.glorot_uniform(\n seed=deterministic.get_tf_seed_stream()),\n output_nonlinearity=tf.nn.softmax,\n output_w_init=tf.initializers.glorot_uniform(\n seed=deterministic.get_tf_seed_stream()),\n output_b_init=tf.zeros_initializer(),\n hidden_state_init=tf.zeros_initializer(),\n hidden_state_init_trainable=False,\n state_include_action=True,\n layer_normalization=False):\n if not isinstance(env_spec.action_space, akro.Discrete):\n raise ValueError('CategoricalGRUPolicy only works'\n 'with akro.Discrete action space.')\n\n self._env_spec = env_spec\n self._obs_dim = env_spec.observation_space.flat_dim\n self._action_dim = env_spec.action_space.n\n\n self._hidden_dim = hidden_dim\n self._hidden_nonlinearity = hidden_nonlinearity\n self._hidden_w_init = hidden_w_init\n self._hidden_b_init = hidden_b_init\n self._recurrent_nonlinearity = recurrent_nonlinearity\n self._recurrent_w_init = recurrent_w_init\n self._output_nonlinearity = output_nonlinearity\n self._output_w_init = output_w_init\n self._output_b_init = output_b_init\n self._hidden_state_init = hidden_state_init\n self._hidden_state_init_trainable = hidden_state_init_trainable\n self._layer_normalization = layer_normalization\n self._state_include_action = state_include_action\n\n if state_include_action:\n self._input_dim = self._obs_dim + self._action_dim\n else:\n self._input_dim = self._obs_dim\n\n self._f_step_prob = None\n\n super().__init__(\n output_dim=self._action_dim,\n hidden_dim=self._hidden_dim,\n name=name,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n recurrent_nonlinearity=recurrent_nonlinearity,\n recurrent_w_init=recurrent_w_init,\n hidden_state_init=hidden_state_init,\n hidden_state_init_trainable=hidden_state_init_trainable,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n layer_normalization=layer_normalization)\n\n self._prev_actions = None\n self._prev_hiddens = None\n self._init_hidden = None\n\n self._initialize_policy()\n\n def _initialize_policy(self):\n \"\"\"Initialize policy.\"\"\"\n state_input = tf.compat.v1.placeholder(shape=(None, None,\n self._input_dim),\n name='state_input',\n dtype=tf.float32)\n step_input_var = tf.compat.v1.placeholder(shape=(None,\n self._input_dim),\n name='step_input',\n dtype=tf.float32)\n step_hidden_var = tf.compat.v1.placeholder(shape=(None,\n self._hidden_dim),\n name='step_hidden_input',\n dtype=tf.float32)\n (_, step_out, step_hidden,\n self._init_hidden) = super().build(state_input, step_input_var,\n step_hidden_var).outputs\n\n self._f_step_prob = tf.compat.v1.get_default_session().make_callable(\n [step_out, step_hidden],\n feed_list=[step_input_var, step_hidden_var])\n\n # pylint: disable=arguments-differ\n def build(self, state_input, name=None):\n \"\"\"Build policy.\n\n Args:\n state_input (tf.Tensor) : State input.\n name (str): Name of the policy, which is also the name scope.\n\n Returns:\n tfp.distributions.OneHotCategorical: Policy distribution.\n tf.Tensor: Step output, with shape :math:`(N, S^*)`.\n tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.\n tf.Tensor: Initial hidden state , used to reset the hidden state\n when policy resets. Shape: :math:`(S^*)`.\n\n \"\"\"\n _, step_input_var, step_hidden_var = self.inputs\n return super().build(state_input,\n step_input_var,\n step_hidden_var,\n name=name)\n\n @property\n def input_dim(self):\n \"\"\"int: Dimension of the policy input.\"\"\"\n return self._input_dim\n\n def reset(self, do_resets=None):\n \"\"\"Reset the policy.\n\n Note:\n If `do_resets` is None, it will be by default np.array([True]),\n which implies the policy will not be \"vectorized\", i.e. number of\n paralle environments for training data sampling = 1.\n\n Args:\n do_resets (numpy.ndarray): Bool that indicates terminal state(s).\n\n \"\"\"\n if do_resets is None:\n do_resets = [True]\n do_resets = np.asarray(do_resets)\n if self._prev_actions is None or len(do_resets) != len(\n self._prev_actions):\n self._prev_actions = np.zeros(\n (len(do_resets), self.action_space.flat_dim))\n self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))\n\n self._prev_actions[do_resets] = 0.\n self._prev_hiddens[do_resets] = self._init_hidden.eval()\n\n def get_action(self, observation):\n \"\"\"Return a single action.\n\n Args:\n observation (numpy.ndarray): Observations.\n\n Returns:\n int: Action given input observation.\n dict(numpy.ndarray): Distribution parameters.\n\n \"\"\"\n actions, agent_infos = self.get_actions([observation])\n return actions[0], {k: v[0] for k, v in agent_infos.items()}\n\n def get_actions(self, observations):\n \"\"\"Return multiple actions.\n\n Args:\n observations (numpy.ndarray): Observations.\n\n Returns:\n list[int]: Actions given input observations.\n dict(numpy.ndarray): Distribution parameters.\n\n \"\"\"\n if not isinstance(observations[0],\n np.ndarray) or len(observations[0].shape) > 1:\n observations = self.observation_space.flatten_n(observations)\n if self._state_include_action:\n assert self._prev_actions is not None\n all_input = np.concatenate([observations, self._prev_actions],\n axis=-1)\n else:\n all_input = observations\n probs, hidden_vec = self._f_step_prob(all_input, self._prev_hiddens)\n actions = list(map(self.action_space.weighted_sample, probs))\n prev_actions = self._prev_actions\n self._prev_actions = self.action_space.flatten_n(actions)\n self._prev_hiddens = hidden_vec\n agent_info = dict(prob=probs)\n if self._state_include_action:\n agent_info['prev_action'] = np.copy(prev_actions)\n return actions, agent_info\n\n @property\n def env_spec(self):\n \"\"\"Policy environment specification.\n\n Returns:\n garage.EnvSpec: Environment specification.\n\n \"\"\"\n return self._env_spec\n\n @property\n def state_info_specs(self):\n \"\"\"State info specifcation.\n\n Returns:\n List[str]: keys and shapes for the information related to the\n policy's state when taking an action.\n\n \"\"\"\n if self._state_include_action:\n return [\n ('prev_action', (self._action_dim, )),\n ]\n return []\n\n def clone(self, name):\n \"\"\"Return a clone of the policy.\n\n It copies the configuration of the primitive and also the parameters.\n\n Args:\n name (str): Name of the newly created policy. It has to be\n different from source policy if cloned under the same\n computational graph.\n\n Returns:\n garage.tf.policies.CategoricalGRUPolicy: Newly cloned policy.\n\n \"\"\"\n new_policy = self.__class__(\n name=name,\n env_spec=self._env_spec,\n hidden_dim=self._hidden_dim,\n hidden_nonlinearity=self._hidden_nonlinearity,\n hidden_w_init=self._hidden_w_init,\n hidden_b_init=self._hidden_b_init,\n recurrent_nonlinearity=self._recurrent_nonlinearity,\n recurrent_w_init=self._recurrent_w_init,\n output_nonlinearity=self._output_nonlinearity,\n output_w_init=self._output_w_init,\n output_b_init=self._output_b_init,\n hidden_state_init=self._hidden_state_init,\n hidden_state_init_trainable=self._hidden_state_init_trainable,\n state_include_action=self._state_include_action,\n layer_normalization=self._layer_normalization)\n new_policy.parameters = self.parameters\n return new_policy\n\n def __getstate__(self):\n \"\"\"Object.__getstate__.\n\n Returns:\n dict: the state to be pickled for the instance.\n\n \"\"\"\n new_dict = super().__getstate__()\n del new_dict['_f_step_prob']\n del new_dict['_init_hidden']\n return new_dict\n\n def __setstate__(self, state):\n \"\"\"Object.__setstate__.\n\n Args:\n state (dict): Unpickled state.\n\n \"\"\"\n super().__setstate__(state)\n self._initialize_policy()\n","sub_path":"src/garage/tf/policies/categorical_gru_policy.py","file_name":"categorical_gru_policy.py","file_ext":"py","file_size_in_byte":12935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"127937412","text":"import scipy.stats\n\ndef parse(inp):\n\tif len(inp) == 1:\n\t\tif inp[0].strip().isdigit():\n\t\t\treturn [int(inp[0].strip()[:4])]\n\t\telse:\n\t\t\treturn []\n\tif not inp[0]:\n\t\tinp[0] = '1927'\n\tif not inp[1]:\n\t\tinp[1] = '2018'\n\tif inp[0] <= inp[1] and (inp[0].strip().isdigit() and inp[1].strip().isdigit()):\n\t\tlst = [int(entry[:4].strip()) for entry in inp]\n\t\tif lst[0] > 2018 or lst[1] < 1900:\n\t\t\treturn []\n\t\telse:\n\t\t\treturn [int(entry[:4].strip()) for entry in inp]\n\telse:\n\t\treturn []\n\ndef parse_single(inp):\n\treturn int(inp[-4:].strip())\n\ndef filter_hard(movie_dict,low_bound, high_bound,):\n\trtn_movie = {}\n\tfor movie in movie_dict:\n\t\tif int(movie_dict[movie]['release_date'][-4:]) >= low_bound and int(movie_dict[movie]['release_date'][-4:]) <= high_bound:\n\t\t\trtn_movie[movie] = movie_dict[movie]\n\treturn rtn_movie\n\ndef gaussian_release_score(movie_dict,mean,high_val,low_val):\n score_dict = {}\n movie_to_weight = {}\n\n dist = scipy.stats.norm(mean,20)\n for movie in movie_dict:\n movie_to_weight[movie] = dist.pdf(int(movie_dict[movie]['release_date'][-4:]))\n\n # movie -> weight value between 0 and 1\n max_val,min_val = max(movie_to_weight.values()), min(movie_to_weight.values())\n if min_val < max_val:\n \tmovie_to_weight = {k:((v - min_val)/(max_val - min_val)) for k,v in movie_to_weight.iteritems()}\n\n # movie -> weight value between high and low\n for movie in movie_to_weight:\n score_dict[movie] = (movie_to_weight[movie]*(high_val + low_val) - low_val)\n return score_dict\n\n\ndef main(movie_dict, inp):\n\tvals = parse(inp)\n\tif vals == []:\n\t\treturn {}\n\tif len(vals) == 2:\n\t\treturn filter_hard(movie_dict,vals[0],vals[1])\n\treturn filter_hard(movie_dict,vals[0],vals[0])","sub_path":"app/irsystem/controllers/user_release.py","file_name":"user_release.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"294272359","text":"#!/usr/bin/python3\n\n\"\"\"\nTHIS IS A PROTOTYPE.\n\nAs a result the code is quite awful. Next up is rewriting it with tests and\nabstractions.\n\"\"\"\n\nfrom json import loads\nfrom os import setuid, environ, rename\nfrom subprocess import Popen, check_output\nfrom sys import argv, stdout, exit\nimport time\n\n\ndef _get_service_keys(environment):\n # XXX duplicated in remote-telepresence\n # XXX also check for TCPness.\n # Order matters for service_keys, need it to be consistent with port\n # forwarding order in remote container.\n result = [key for key in environment if key.endswith(\"_SERVICE_HOST\")]\n result.sort(key=lambda s: s[:-len(\"_SERVICE_HOST\")])\n return result\n\n\ndef get_remote_env(pod_name):\n env = str(check_output([\"kubectl\", \"exec\", pod_name, \"env\"]), \"utf-8\")\n result = {}\n for line in env.splitlines():\n key, value = line.split(\"=\", 1)\n result[key] = value\n return result\n\n\ndef get_env_variables(pod_name):\n \"\"\"Generate environment variables that match kubernetes.\"\"\"\n remote_env = get_remote_env(pod_name)\n filter_keys = set()\n result = {}\n # XXX we're recreating the port generation logic\n i = 0\n for i, service_key in enumerate(_get_service_keys(remote_env)):\n port = str(2000 + i)\n ip = \"127.0.0.1\"\n # XXX bad abstraction\n name = service_key[:-len(\"_SERVICE_HOST\")]\n # XXX ugh\n filter_prefix = \"{}_PORT_{}_TCP\".format(name, remote_env[name + \"_SERVICE_PORT\"])\n filter_keys |= set([filter_prefix + s for s in (\"\", \"_PROTO\", \"_PORT\", \"_ADDR\")])\n # XXX will be wrong for UDP\n full_address = \"tcp://{}:{}\".format(ip, port)\n result[name + \"_SERVICE_HOST\"] = ip\n result[name + \"_SERVICE_PORT\"] = port\n result[name + \"_PORT\"] = full_address\n port_name = name + \"_PORT_\" + port + \"_TCP\"\n result[port_name] = full_address\n # XXX will break for UDP\n result[port_name + \"_PROTO\"] = \"tcp\"\n result[port_name + \"_PORT\"] = port\n result[port_name + \"_ADDR\"] = ip\n for key, value in remote_env.items():\n # We don't want env variables that are service addresses (did those\n # above) nor those that are already present in this container.\n # XXX we're getting env variables from telepresence that are image-specific, not coming from the Deployment. figure out way to differentiate.\n if key not in result and key not in environ and key not in filter_keys:\n result[key] = value\n return result\n\n\ndef write_env(pod_name):\n with open(\"/output/out.env.tmp\", \"w\") as f:\n for key, value in get_env_variables(pod_name).items():\n f.write(\"{}={}\\n\".format(key, value))\n rename(\"/output/out.env.tmp\", \"/output/out.env\")\n\n\ndef write_etc_hosts(additional_hosts):\n \"\"\"Update /etc/hosts with records that match k8s DNS entries for services.\"\"\"\n services_json = loads(str(\n check_output([\"kubectl\", \"get\", \"service\", \"-o\", \"json\"]), \"utf-8\"))\n with open(\"/etc/hosts\", \"a\") as hosts:\n for service in services_json[\"items\"]:\n name = service[\"metadata\"][\"name\"]\n namespace = service[\"metadata\"][\"namespace\"]\n hosts.write(\"127.0.0.1 {}\\n\".format(name))\n hosts.write(\"127.0.0.1 {}.{}.svc.cluster.local\\n\".format(name, namespace))\n for host in additional_hosts:\n hosts.write(\"127.0.0.1 {}\\n\".format(host))\n\n\ndef get_pod_name(deployment_name):\n \"\"\"Given the deployment name, return the name of its pod.\"\"\"\n pods = [line.split()[0] for line in\n str(check_output([\"kubectl\", \"get\", \"pod\"]), \"utf-8\").splitlines()]\n for pod in pods:\n if pod.startswith(deployment_name + \"-\"):\n return pod\n raise RuntimeError(\"Telepresence pod not found for Deployment '{}'.\".format(\n deployment_name))\n\n\ndef print_status(deployment_name, ports):\n message = \"\"\"\nAn environment file named {}.env has been written out to $PWD.\n\nYou can now run your own code locally and have it be exposed within Kubernetes, e.g.:\n\n telepresence run-local --deployment {} \\\\\n --rm -i -t busybox\"\"\".format(deployment_name, deployment_name)\n if ports:\n message += \" nc -l -p {}\".format(ports[0])\n\n print(message + \"\\n\")\n stdout.flush()\n\n\ndef main(uid, deployment_name, local_exposed_ports, custom_proxied_hosts):\n processes = []\n pod_name = get_pod_name(deployment_name)\n proxied_ports = set(range(2000, 2020)) | set(map(int, local_exposed_ports))\n proxied_ports.add(22)\n custom_ports = [int(s.split(\":\", 1)[1]) for s in custom_proxied_hosts]\n for port in custom_ports:\n if port in proxied_ports:\n exit((\"OOPS: Can't proxy port {} more than once. \"\n \"Currently mapped ports: {}.This error is due \"\n \"to a limitation in Telepresence, see \"\n \"https://github.com/datawire/telepresence/issues/6\").format(\n port, proxied_ports))\n else:\n proxied_ports.add(int(port))\n\n # 1. write /etc/hosts\n write_etc_hosts([s.split(\":\", 1)[0] for s in custom_proxied_hosts])\n # 2. forward remote port to here, by tunneling via remote SSH server:\n processes.append(Popen([\"kubectl\", \"port-forward\", pod_name, \"22\"]))\n time.sleep(2) # XXX lag until port 22 is open; replace with retry loop\n for port_number in local_exposed_ports:\n processes.append(Popen([\n \"sshpass\", \"-phello\",\n \"ssh\", \"-q\",\n \"-oStrictHostKeyChecking=no\", \"root@localhost\",\n \"-R\", \"*:{}:127.0.0.1:{}\".format(port_number, port_number), \"-N\"]))\n\n # 3. start proxies for custom-mapped hosts:\n for host, port in [s.split(\":\", 1) for s in custom_proxied_hosts]:\n processes.append(Popen([\n \"sshpass\", \"-phello\",\n \"ssh\", \"-q\",\n \"-oStrictHostKeyChecking=no\", \"root@localhost\",\n \"-L\", \"{}:{}:{}\".format(port, host, port), \"-N\"]))\n # 4. start proxies for Services:\n # XXX maybe just do everything via SSH, now that we have it?\n for port in range(2000, 2020):\n # XXX what if there is more than 20 services\n p = Popen([\"kubectl\", \"port-forward\", pod_name, str(port)])\n processes.append(p)\n time.sleep(5)\n # 5. write docker envfile, which tells CLI we're ready:\n setuid(uid)\n write_env(pod_name)\n for p in processes:\n p.wait()\n\n\nif __name__ == '__main__':\n main(int(argv[1]), argv[2], argv[3].split(\",\") if argv[3] else [],\n argv[4].split(\",\") if argv[4] else [])\n","sub_path":"local/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":6560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"102110595","text":"__author__ = 'tri'\nfrom controller.event_controller import EventController\nfrom controller.env_controller import EnvController\nfrom controller.main_controller import MainController\nimport pygame\n\n\nenv_controller = EnvController()\nevent_controller = EventController()\nmain_controller = MainController(event_controller, env_controller)\n\nmain_controller.init_game()\n\n# Control fps\nclock = pygame.time.Clock()\n\nwhile not main_controller.quit_game:\n # Listen events\n event_controller.run()\n\n # Update screen\n main_controller.run()\n\n pygame.display.flip()\n\n # Approximately 60fps\n clock.tick(60)\n\n\n\npygame.quit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"490080542","text":"from tests.RegressionTestCase import RegressionTestCase\nfrom subprocess import Popen, PIPE\nfrom shutil import rmtree\nfrom os import makedirs\nfrom os.path import dirname, exists\n\nTIMEOUT_SECONDS = 5\nOUT_DIR = 'tests/tmp'\n\ndef makeDirsForFile(filepath):\n dirs = dirname(filepath)\n if not exists(dirs):\n makedirs(dirs)\n\nclass test_fileLinker(RegressionTestCase):\n '''\n Test fileLinker.py\n '''\n\n def setUp(self):\n try:\n rmtree(OUT_DIR)\n except:\n pass\n makedirs(OUT_DIR)\n\n def tearDown(self):\n rmtree(OUT_DIR)\n\n def test_simple(self):\n command1 = './slurp.py tests/testData/test_fileLinker tests/tmp'\n command2 = './fileLinker.py --fileExtensions=css,html'\n command3 = './spit.py'\n\n proc1 = Popen(command1, stdin=None, stdout=PIPE, stderr=PIPE, shell=True)\n proc2 = Popen(command2, stdin=proc1.stdout, stdout=PIPE, stderr=PIPE, shell=True)\n proc3 = Popen(command3, stdin=proc2.stdout, stdout=PIPE, stderr=PIPE, shell=True)\n proc3.communicate()\n\n proc1.wait(TIMEOUT_SECONDS)\n proc2.wait(TIMEOUT_SECONDS)\n proc3.wait(TIMEOUT_SECONDS)\n\n self.regressionTest(\n 'tests/testData/test_fileLinker/testSimpleOutput', 'tests/tmp/file1.html')\n self.regressionTest(\n 'tests/testData/test_fileLinker/testDifferentFileTypesOutput', 'tests/tmp/file1.css')\n self.regressionTest(\n 'tests/testData/test_fileLinker/testRelativeImports', 'tests/tmp/test_dir/file3.html')\n # self.regressionTest(\n # 'tests/testData/test_fileLinker/test_html_import_output', 'tests/tmp/html_import_syntax.html')","sub_path":"tests/test_FileLinker.py","file_name":"test_FileLinker.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"76162436","text":"import requests\nimport json\n\n#dicionário com as informações de autenticação\n\nbase_url='http://34.95.216.159:8080'\n\ndata = {\n \"username\": \"gmonteiro\",\n \"password\": \"teste123\",\n \"firstname\": \"Guilherme\",\n \"lastname\": \"Monteiro\",\n}\n\n#dicionário que sera passado na requisição do token\ncredentials = {\n \"username\": data[\"username\"],\n \"password\": data[\"password\"]\n}\n\n#post na endpoint que valida as credenciais e retona o token\nauth = requests.post(f'{base_url}/authorization', data=credentials)\nprint(auth.status_code)\n\n#caso o usuário não exista (status code 401), ele é criado no endpoint register e em seguida o token é recuperado\nif auth.status_code == 401: \n response = requests.post(url=f'{base_url}/users/register', data=data)\n auth = requests.post(f'{base_url}/authorization', data=credentials)\n\n#exibe o token que foi colocado no cabeçalho da próxima requisição\naccess_token = auth.json()['access_token']\nheaders={'Authorization': f'Bearer {access_token}' }\nprint(headers)\n\n\n#lista que irá armazenar a reposta\nauthors = []\n\n#lista de autores que será enviada\nauthors_json = {\n \"firstname\": \"Brad\", \n \"lastname\": \"Stone\"\n }\n\nfor author_json in authors_json:\n print(author_json)\n post_author = requests.post(url=f'{base_url}/authors', data=author_json, headers=headers)\n print(post_author.status_code)\n print(post_author.json())\n authors.append(post_author.json())\n\n\n#após ter cadastrado os autores, é possível cadastrar o livro e a lista de autores do mesmo\nbook_json = {\n \"title\": \"A loja de tudo\",\n \"subtitle\" : \"Jeff Bezos e a era da Amazon\",\n \"booktype\": \"Livro\", \n \"authors\": authors_json\n\n}\n\nprint(book_json)\n\npost_book = requests.post(url=f'{base_url}/books', json=book_json, headers=headers)\nprint(post_book.status_code)\nprint(post_book.json())\n\n\n\n#após efetuar o cadastro do livro, usamos o seu id para efetuar o cadastro da edição\nedtions = requests.get(f'{base_url}/edtions')\nedtion_json = {\n \"isbn\": \"9788551004739\",\n \"nedtion\": 1,\n \"volume\": 1,\n \"printnumber\": 1,\n \"tome\": None,\n \"year\": 2019,\n \"length\": 2.2,\n \"width\": 16,\n \"height\": 23,\n \"pages\": 400,\n \"subject\": \"Biografias\",\n \"language\": \"Português\",\n \"bookbinding\": \"Brochura\",\n \"publisher\": \"Intrínseca \",\n \"synopsis\": \"Pioneira no comércio de livros pela internet, a Amazon esteve à frente da primeira grande febre das pontocom. \\\n Mas Jeff Bezos, seu visionário criador, não se contentaria com uma livraria virtual descolada: ele queria que a Amazon dispusesse de \\\n uma seleção ilimitada de produtos a preços radicalmente baixos – e se tornasse “a loja de tudo”. Para pôr em prática essa visão, Bezos \\\n desenvolveu uma cultura corporativa de ambição implacável e alto sigilo que poucos conheciam de verdade.\",\n \"fk_tb_book_id\": post_book.json()['book']['id_book']\n }\n\n#print(edtion_json)\n\n\npost_edtion = requests.post(url=f'{base_url}/edtions', data=edtion_json, headers=headers)\nprint(post_edtion.status_code)\nprint(post_edtion.json())\n\n","sub_path":"testes/testeapi.py","file_name":"testeapi.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"127928655","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('UserManager', '0009_auto_20160316_1447'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userinfo',\n name='ustate',\n field=models.CharField(default=b'T', max_length=50, verbose_name=b'\\xe7\\x94\\xa8\\xe6\\x88\\xb7\\xe7\\x8a\\xb6\\xe6\\x80\\x81', choices=[(b'T', b'\\xe6\\xad\\xa3\\xe5\\xb8\\xb8'), (b'F', b'\\xe5\\xb7\\xb2\\xe9\\x94\\x81')]),\n ),\n ]\n","sub_path":"monitorOS/UserManager/migrations/0010_auto_20160316_1451.py","file_name":"0010_auto_20160316_1451.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"306061448","text":"import autoaim\nimport cv2\nimport os\nw = 1280\nh = 720\ncamera = autoaim.Camera(1)\ncapture = camera.capture\ncapture.set(3, w)\ncapture.set(4, h)\ncapture.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)\ncapture.set(cv2.CAP_PROP_EXPOSURE, 1)\nprint(os.path.abspath(__file__ + '/../../data/capture'))\ncamera.snapshot('00:00:00', '00:20:00', 200,\n os.path.abspath(__file__ + '/../../data/capture')+'/')\n","sub_path":"app/senary.py","file_name":"senary.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"262831369","text":"import struct\nimport numpy as np\n\n\ndef read_images(filename):\n with open(filename,'rb') as f:\n #magic that reads binary format \n magic_number, num_images, dim1, dim2 = struct.unpack('>IIII',f.read(16))\n \n return np.fromstring(f.read(),dtype=np.uint8).reshape((num_images,dim1,dim2))\n\ndef read_labels(filename):\n with open(filename,'rb') as f:\n magic_number, num_labels = struct.unpack('>II',f.read(8))\n return np.fromstring(f.read(),dtype=np.uint8)\n\nif __name__==\"__main__\":\n train_images = read_images('../data/train-images-idx3-ubyte')\n train_labels = read_labels('../data/train-labels-idx1-ubyte')\n \n print(train_images.shape,train_labels.shape)\n","sub_path":"3-mnist-logistic-regression/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"532723632","text":"# Python3 program to find missing k numbers in an array.\n\n# Prints first k natural numbers in arr[0..n-1]\ndef printKMissing(arr, n, k):\n list(set(arr)).sort()\n print(arr)\n\n # Find first positive number\n i = 0\n while (i < n) and (arr[i] <= 0):\n i = i + 1\n\n # Now find missing numbers between array elements\n count = 0\n curr = 1\n res = []\n while (count < k) and (i < n):\n if arr[i] != curr:\n res.append(curr)\n count = count + 1\n else:\n i = i + 1\n curr = curr + 1\n\n # Find missing numbers after maximum.\n while count < k:\n print(\">> \", curr)\n res.append(curr)\n curr = curr + 1\n count = count + 1\n return res\n\n# Driver code\narr = [2, 5, 6, 3, 5]\nn = len(arr)\nk = 5\nprint(printKMissing(arr, n, k))","sub_path":"assignments/missing_int.py","file_name":"missing_int.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"74697667","text":"import random\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--count',\n type=int,\n required=True,\n help='Number of products to generate.')\nparser.add_argument('--output',\n required=True,\n help='Output file to write results to.')\nargs = parser.parse_args()\n\ncount = args.count\noutput_file = args.output\n\nadj_file = open('dict/data.adj', 'r')\nadjs = adj_file.readlines()\nadj_file.close()\n\nnoun_file = open('dict/data.noun', 'r')\nnouns = noun_file.readlines()\nnoun_file.close()\n\nnum_adjs = len(adjs)\nnum_nouns = len(nouns)\n\nf = open(output_file, 'w')\n\nfor ii in range(0,count):\n adj1 = adjs[random.randint(0, num_adjs - 1)].rstrip('\\n')\n adj2 = adjs[random.randint(0, num_adjs - 1)].rstrip('\\n')\n noun = nouns[random.randint(0, num_nouns - 1)].rstrip('\\n')\n score = random.randint(0, 10000)\n f.write('%s %s %s: %s\\n' % (adj1, adj2, noun, score))\n\nf.close()\n","sub_path":"generate_products/generate_products.py","file_name":"generate_products.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"427101087","text":"# Ivan Carvalho\n# Solution to https://www.beecrowd.com.br/judge/problems/view/1248\n# -*- coding: utf-8 -*-\n\n\"\"\"\nEscreva a sua solução aqui\nCode your solution here\nEscriba su solución aquí\n\"\"\"\nordem = int(input())\nfor i in range(ordem):\n entrada1 = [k for k in input()]\n entrada2 = [l for l in input()]\n entrada3 = [q for q in input()]\n try:\n for j in entrada2:\n entrada1.remove(j)\n for j in entrada3:\n entrada1.remove(j)\n print(\"\".join(sorted(entrada1)))\n except ValueError:\n print(\"CHEATER\")\n","sub_path":"beecrowd/1248.py","file_name":"1248.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"644451283","text":"'''\r\nAccepted on leetcode(55)\r\ntime - O(N)\r\nspace - O(1)\r\nApproach:\r\n1. Initialize lastIndex to the last index in the given array.\r\n2. Iterate over the nums array from second last index. Here, we are doing so to verify if the last index is reachable from each index my moving from backwards.\r\n3. Check the condition if index + number >= lastindex at that position then it is sure that the index is reachable, so change the last index to current index.\r\n4. If the lastIndex is 0 at the end then there is a way from begin to end.\r\n'''\r\n\r\n\r\nclass Solution:\r\n def canJump(self, nums) -> bool:\r\n lastIndex = len(nums) - 1\r\n\r\n for i in range(len(nums) - 2, -1, -1):\r\n if i + nums[i] >= lastIndex:\r\n lastIndex = i\r\n return lastIndex == 0","sub_path":"134_JumpGame.py","file_name":"134_JumpGame.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"623374436","text":"from __future__ import print_function\nimport time, datetime\nimport dateutil.parser\nimport requests\nimport random\nimport json\nimport websocket\nimport sys\nfrom websocket import create_connection\n\nfrom pprint import pprint\n\nfrom graphenebase.types import *\nfrom graphenebase.objects import GrapheneObject, isArgsThisClass\nfrom steembase.operations import Amount\nfrom piston.steem import Steem\nimport os\n\n# Config\n\ninterval_init = 60*60*float(os.environ['feed_interval_init'])\nrand_level = float(os.environ['feed_rand_level'])\nfreq = int(os.environ['feed_freq'])\nmin_change = float(os.environ['feed_min_change'])\nmax_age = 60*60*int(os.environ['feed_max_age'])\nmanual_conf = float(os.environ['feed_manual_conf'])\ndiscount = float(os.environ['feed_discount'])\nuse_telegram = os.environ['feed_use_telegram']\ntelegram_token = os.environ['feed_telegram_token']\ntelegram_id = os.environ['feed_telegram_id']\nbts_ws = [\"wss://dele-puppy.com/ws\", \"wss://bitshares.openledger.info/ws\", \"wss://valen-tin.fr:8090/ws\"]\n\n# Piston/Account Configuration\nsteemnode = os.environ['feed_node'] # The steemnode to connect to\nwitness = os.environ['feed_account'] # Your witness name\nwif = os.environ['feed_wif'] # Your active WIF key\n\n# New Classes, should be migrated to xeroc's library\nclass Exchange_rate(GrapheneObject):\n def __init__(self, *args, **kwargs) :\n if isArgsThisClass(self, args):\n self.data = args[0].data\n else:\n if len(args) == 1 and len(kwargs) == 0:\n kwargs = args[0]\n\n super().__init__(OrderedDict([\n ('base', Amount(kwargs[\"base\"])),\n ('quote', Amount(kwargs[\"quote\"])),\n ]))\n\nclass Feed_publish(GrapheneObject) :\n def __init__(self, *args, **kwargs) :\n if isArgsThisClass(self, args):\n self.data = args[0].data\n else:\n if len(args) == 1 and len(kwargs) == 0:\n kwargs = args[0]\n super().__init__(OrderedDict([\n ('publisher', String(kwargs[\"publisher\"])),\n ('exchange_rate', Exchange_rate(kwargs[\"exchange_rate\"])),\n ]))\n\ndef publish_feed(account, amount):\n op = Feed_publish(\n **{ \"publisher\": account,\n \"exchange_rate\": {\n \"base\": amount + \" SBD\",\n \"quote\": \"1.000 STEEM\"\n }}\n )\n steem.executeOp(op, wif)\n\ndef rand_interval(intv):\n intv += intv*rand_level*random.uniform(-1, 1)\n if intv < 60*60:\n intv = 60*60\n elif intv > 60*60*24*7:\n intv = 60*60*24*7\n return(int(intv))\n\ndef confirm(pct, p, last_update_id=None):\n if use_telegram == 0:\n conf = input(\"Your price feed change is over \" + format(pct*100, \".1f\") + \"% (\" + p + \" USD/STEEM) If you confirm this, type 'confirm': \")\n if conf.lower() == \"confirm\":\n return True\n else:\n reconf = input(\"You denied to publish this feed. Are you sure? (Y/n): \")\n if reconf.lower() == \"n\":\n conf = input(\"If you confirm this, type 'confirm': \")\n if conf.lower() == \"confirm\":\n return True\n else:\n print(\"Publishing denied\")\n return False\n else:\n print(\"Publishing denied\")\n return False\n elif use_telegram == 1:\n custom_keyboard = [[\"deny\"]]\n reply_markup = json.dumps({\"keyboard\":custom_keyboard, \"resize_keyboard\": True})\n conf_msg = (\"Your price feed change is over \" + format(pct*100, \".1f\") + \"% (\" + p + \" USD/STEEM) If you confirm this, type 'confirm'\")\n payload = {\"chat_id\":telegram_id, \"text\":conf_msg, \"reply_markup\":reply_markup}\n m = telegram(\"sendMessage\", payload)\n while True:\n try:\n updates = telegram(\"getUpdates\", {\"offset\":last_update_id+1})[\"result\"][-1]\n chat_id = updates[\"message\"][\"from\"][\"id\"]\n update_id = updates[\"update_id\"]\n cmd = updates[\"message\"][\"text\"]\n except:\n update_id = 0\n cmd = \"\"\n if update_id > last_update_id and cmd != \"\":\n if chat_id == telegram_id and cmd.lower() == \"confirm\":\n payload = {\"chat_id\":telegram_id, \"text\":\"Publishing confirmed\"}\n m = telegram(\"sendMessage\", payload)\n last_update_id = update_id\n return True\n elif chat_id == telegram_id and cmd.lower() == \"deny\":\n payload = {\"chat_id\":telegram_id, \"text\":\"Publishing denied\"}\n m = telegram(\"sendMessage\", payload)\n last_update_id = update_id\n return False\n else:\n payload = {\"chat_id\":telegram_id, \"text\":\"Wrong command. Please select confirm or deny\"}\n m = telegram(\"sendMessage\", payload)\n last_update_id = update_id\n time.sleep(3)\n\ndef telegram(method, params=None):\n url = \"https://api.telegram.org/bot\"+telegram_token+\"/\"\n params = params\n r = requests.get(url+method, params = params).json()\n return r\n\ndef btc_usd():\n prices = {}\n try:\n r = requests.get(\"https://api.bitfinex.com/v1/pubticker/BTCUSD\").json()\n prices['bitfinex'] = {'price': float(r['last_price']), 'volume': float(r['volume'])}\n except:\n pass\n try:\n r = requests.get(\"https://api.exchange.coinbase.com/products/BTC-USD/ticker\").json()\n prices['coinbase'] = {'price': float(r['price']), 'volume': float(r['volume'])}\n except:\n pass\n try:\n r = requests.get(\"https://www.okcoin.com/api/v1/ticker.do?symbol=btc_usd\").json()[\"ticker\"]\n prices['okcoin'] = {'price': float(r['last']), 'volume': float(r['vol'])}\n except:\n pass\n try:\n r = requests.get(\"https://www.bitstamp.net/api/v2/ticker/btcusd/\").json()\n prices['bitstamp'] = {'price': float(r['last']), 'volume': float(r['volume'])}\n except:\n pass\n if not prices:\n return 0\n total_usd = 0\n total_btc = 0\n for p in prices.values():\n total_usd += p['price'] * p['volume']\n total_btc += p['volume']\n avg_price = total_usd / total_btc\n return avg_price\n\ndef bts_dex_hist(address):\n for s in address:\n try:\n ws = create_connection(s)\n login = json.dumps({\"jsonrpc\": \"2.0\", \"id\":1,\"method\":\"call\",\"params\":[1,\"login\",[\"\",\"\"]]})\n hist_api = json.dumps({\"jsonrpc\": \"2.0\", \"id\":2, \"method\":\"call\",\"params\":[1,\"history\",[]]})\n btc_hist = json.dumps({\"jsonrpc\": \"2.0\", \"id\": 3, \"method\": \"call\", \"params\": [2, \"get_fill_order_history\", [\"1.3.861\", \"1.3.973\", 50]]})\n bts_hist = json.dumps({\"jsonrpc\": \"2.0\", \"id\": 4, \"method\": \"call\", \"params\": [2, \"get_fill_order_history\", [\"1.3.0\", \"1.3.973\", 50]]})\n bts_feed = json.dumps({\"jsonrpc\": \"2.0\", \"id\": 5, \"method\": \"call\", \"params\": [0, \"get_objects\", [[\"2.4.3\"]]]})\n ws.send(login)\n ws.recv()\n ws.send(hist_api)\n ws.recv()\n ws.send(btc_hist)\n dex_btc_h = json.loads(ws.recv())[\"result\"]\n ws.send(bts_hist)\n dex_bts_h = json.loads(ws.recv())[\"result\"]\n ws.send(bts_feed)\n bts_btc_feed = json.loads(ws.recv())[\"result\"][0][\"current_feed\"][\"settlement_price\"]\n bts_btc_p = bts_btc_feed[\"base\"][\"amount\"]/bts_btc_feed[\"quote\"][\"amount\"]/10**3\n ws.close()\n return (dex_btc_h, dex_bts_h, bts_btc_p)\n except:\n return (0, 0, 0)\n\n\nif __name__ == '__main__':\n print(\"Connecting to Steem RPC\")\n\n steem = Steem(node=steemnode, wif=wif)\n info = steem.info()\n try:\n bh = steem.info()[\"head_block_number\"]\n print(\"Connected. Current block height is \" + str(bh))\n except:\n print(\"Connection error. Check your cli_wallet\")\n quit()\n\n if use_telegram == 1:\n try:\n print(\"Connecting to Telegram\")\n test = telegram(\"getMe\")\n except:\n print(\"Telegram connection error\")\n quit()\n\n steem_q = 0\n btc_q = 0\n last_update_t = 0\n try:\n last_update_id = telegram(\"getUpdates\")[\"result\"][-1][\"update_id\"]\n except:\n last_update_id = 0\n interval = rand_interval(interval_init)\n time_adj = time.time() - datetime.utcnow().timestamp()\n start_t = (time.time()//freq)*freq - freq\n last_t = start_t - 1\n my_info = steem.rpc.get_witness_by_account(witness)\n if float(my_info[\"sbd_exchange_rate\"][\"quote\"].split()[0]) == 0:\n last_price = 0\n else:\n last_price = float(my_info[\"sbd_exchange_rate\"][\"base\"].split()[0]) / float(my_info[\"sbd_exchange_rate\"][\"quote\"].split()[0]) \n print(\"Your last feed price is \" + format(last_price, \".3f\") + \" USD/STEEM\")\n\n while True:\n curr_t = (time.time()//freq)*freq - freq\n if curr_t > last_t:\n# Bittrex\n try:\n bt_h = requests.get(\"https://bittrex.com/api/v1.1/public/getmarkethistory?market=BTC-STEEM\")\n bt_hist = bt_h.json()\n for i in range(200):\n strf_t = bt_hist[\"result\"][i][\"TimeStamp\"]\n unix_t = dateutil.parser.parse(strf_t).timestamp()\n unix_t += time_adj\n if unix_t >= curr_t:\n steem_q += bt_hist[\"result\"][i][\"Quantity\"]\n btc_q += bt_hist[\"result\"][i][\"Total\"]\n pass\n else:\n break\n except:\n print(\"Error in fetching Bittrex market history \")\n pass\n\n# Poloniex\n try:\n po_h = requests.get(\"https://poloniex.com/public?command=returnTradeHistory¤cyPair=BTC_STEEM&start=\"+str(curr_t))\n po_hist = po_h.json()\n for i in range(len(po_hist)):\n steem_q += float(po_hist[i][\"amount\"])\n btc_q += float(po_hist[i][\"total\"])\n pass\n except:\n print(\"Error in fetching Poloniex market history\")\n pass\n\n# Bitshares DEX\n dex_btc_h, dex_bts_h, bts_btc_p = bts_dex_hist(bts_ws)\n for i in range(50):\n if (isinstance(dex_btc_h, list) and dateutil.parser.parse(dex_btc_h[i][\"time\"]).timestamp() + time_adj) >= curr_t:\n if dex_btc_h[i][\"op\"][\"pays\"][\"asset_id\"] == \"1.3.973\":\n steem_q += float(dex_btc_h[i][\"op\"][\"pays\"][\"amount\"])/10**3\n btc_q += float(dex_btc_h[i][\"op\"][\"receives\"][\"amount\"])/10**8\n else:\n steem_q += float(dex_btc_h[i][\"op\"][\"receives\"][\"amount\"])/10**3\n btc_q += float(dex_btc_h[i][\"op\"][\"pays\"][\"amount\"])/10**8\n for i in range(50):\n if (isinstance(dex_btc_h, list) and dateutil.parser.parse(dex_bts_h[i][\"time\"]).timestamp() + time_adj) >= curr_t:\n if dex_bts_h[i][\"op\"][\"pays\"][\"asset_id\"] == \"1.3.973\":\n steem_q += float(dex_bts_h[i][\"op\"][\"pays\"][\"amount\"])/10**3\n btc_q += (float(dex_bts_h[i][\"op\"][\"receives\"][\"amount\"])/10**5)*bts_btc_p\n else:\n steem_q += float(dex_bts_h[i][\"op\"][\"receives\"][\"amount\"])/10**3\n btc_q += (float(dex_bts_h[i][\"op\"][\"pays\"][\"amount\"])/10**5)*bts_btc_p\n last_t = curr_t\n\n if curr_t - start_t >= interval:\n if steem_q > 0:\n price = btc_q/steem_q*btc_usd()\n price_str = format(price*(1-discount), \".3f\")\n # If this is our first price submission, just execute\n if last_price == 0:\n publish_feed(witness, price_str)\n print(\"Published price feed: \" + price_str + \" (-\" + str(discount * 100) + \"\\%) USD/STEEM at \" + time.ctime()+\"\\n\")\n last_price = price\n steem_q = 0\n btc_q = 0\n last_update_t = curr_t\n # otherwise perform normally\n else:\n if (abs(1 - price/last_price) < min_change) and ((curr_t - last_update_t) < max_age):\n print(\"No significant price change and last feed is still valid\")\n print(\"Last price: \" + format(last_price, \".3f\") + \" Current price: \" + price_str + \" \" + format((price/last_price*100 - 100), \".1f\") + \"% / Feed age: \" + str(int((curr_t - last_update_t)/3600)) + \" hours\")\n steem_q = 0\n btc_q = 0\n else:\n if abs(1 - price/last_price) > manual_conf:\n if confirm(manual_conf, price_str, last_update_id) is True:\n publish_feed(witness, price_str)\n print(\"Published price feed: \" + price_str + \" USD/STEEM at \" + time.ctime()+\"\\n\")\n last_price = price\n else:\n publish_feed(witness, price_str)\n print(\"Published price feed: \" + price_str + \" USD/STEEM at \" + time.ctime()+\"\\n\")\n last_price = price\n steem_q = 0\n btc_q = 0\n last_update_t = curr_t\n else:\n print(\"No trades occured during this period\")\n interval = rand_interval(interval_init)\n start_t = curr_t\n left_min = (interval - (curr_t - start_t))/60\n print(str(int(left_min)) + \" minutes to next update / Volume: \" + format(btc_q, \".4f\") + \" BTC \" + str(int(steem_q)) + \" STEEM\\r\")\n sys.stdout.flush()\n time.sleep(freq*0.7)\n","sub_path":"steemfeed.py","file_name":"steemfeed.py","file_ext":"py","file_size_in_byte":14101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"612165345","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Jul 29\r\n\r\n@author: Abysswalker\r\n\"\"\"\r\n\r\ndef red25(number):\r\n\tfor reducers in [2,5]:\r\n\t\twhile number>1:\r\n\t\t\tif number%reducers==0:\r\n\t\t\t\tnumber/=reducers\r\n\t\t\telse:\r\n\t\t\t\tbreak\r\n\treturn int(number)\r\n\r\nd=0\r\nl=0\r\nfor i in range(2,1000):\r\n\ta=red25(i)\r\n\tif a!=1:\r\n\t\tj=1\r\n\t\twhile True:\r\n\t\t\tif (10**j)%a==1:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tj+=1\r\n\t\t\r\n\t\tif j>l:\r\n\t\t\tl=j\r\n\t\t\td=i\r\n\t\t\t\r\nprint(\"length\", l, \"for denom\",d)","sub_path":"Euler Projekt 026 - Reciprocal Cycles/EulerProjekt_26.py","file_name":"EulerProjekt_26.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"46149849","text":"\"\"\"\nLoads and pre-processes a bAbI dataset into TFRecords.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport tqdm\nimport json\nimport random\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('output_dir', '/home/rmm120030/working/eeg/relnet/data/feature', 'Dataset destination.')\ntf.app.flags.DEFINE_string('run_name', 'relnet', 'Run name')\ntf.app.flags.DEFINE_string('input_file', '/home/rmm120030/working/eeg/relnet/data/seed.json', 'Input json datafile.')\ntf.app.flags.DEFINE_boolean('only_relevant_sections', False,\n 'Only use the two sections the subj/obj entities are from instead of the full record?')\ntf.app.flags.DEFINE_integer('max_record_length', 20, 'maximum number of sentences in a record')\ntf.app.flags.DEFINE_integer('max_sentence_length', 200, 'maximum number of sentences in a record')\ntf.app.flags.DEFINE_integer('max_entities', 50, 'maximum number of entities in a record')\n\nPAD_TOKEN = '_PAD'\nPAD_ID = 0\nrel2id = {u'NONE': 0,\n u'EVOKES': 1,\n u'EVIDENCES': 2,\n u'TREATMENT_FOR': 3,\n u'OCCURS_WITH': 4,\n u'OCCURS_WITH_P': 4,\n u'OCCURS_WITH_A': 4,\n u'OCCURS_WITH_Te': 4,\n u'OCCURS_WITH_Tr': 4}\n\n\ndef parse_records(json_datafile, only_relevant_sections=False):\n \"\"\"\n Parse the json data file\n\n records is a list of (story, entity_indexes, entities, label) tuples where:\n - story is a list of lists of token strings\n - entity_indexes is list of ints\n - entities is a list of entity id strings\n - label is a string\n \"\"\"\n records = []\n records_json = json.load(json_datafile)['records']\n for record in records_json:\n section_dict = {}\n for section_json in record['sections']:\n sentences = [[s.strip() for s in sentence.split(' ')] for sentence in section_json['sentences']]\n entities = section_json['concepts']\n section_dict[section_json['name']] = (sentences, entities)\n if not only_relevant_sections:\n full_story = []\n all_entities = set()\n for (sentences, entities) in section_dict.values():\n full_story.extend(sentences)\n all_entities.update(entities)\n all_entities = list(all_entities)\n for label_json in record['labels']:\n if only_relevant_sections:\n entities = set()\n (subj_sec, subj_ents) = section_dict[label_json['section1']]\n (obj_sec, obj_ents) = section_dict[label_json['section2']]\n entities.update(subj_ents)\n story = subj_sec\n if not label_json['section1'] == label_json['section2']:\n story.extend(obj_sec)\n entities.update(obj_ents)\n entities = list(entities)\n else:\n entities = all_entities\n story = full_story\n entity_indexes = [0 for _ in entities]\n entity_indexes[entities.index(label_json['subject'])] = 1\n entity_indexes[entities.index(label_json['object'])] = 1\n assert (sum(entity_indexes) == 2)\n records.append((story, entity_indexes, entities, label_json['label']))\n\n return records\n\n\ndef save_dataset(records, path):\n \"\"\"\n Save the stories into TFRecords.\n\n NOTE: Since each sentence is a consistent length from padding, we use\n `tf.train.Example`, rather than a `tf.train.SequenceExample`, which is\n _slightly_ faster.\n \"\"\"\n writer = tf.python_io.TFRecordWriter(path)\n for story, entity_indexes, entities, label in tqdm.tqdm(records, \"writing\"):\n story_flat = [token_id for sentence in story for token_id in sentence]\n\n story_feature = tf.train.Feature(int64_list=tf.train.Int64List(value=story_flat))\n entity_indexes_feature = tf.train.Feature(int64_list=tf.train.Int64List(value=entity_indexes))\n keys_feature = tf.train.Feature(int64_list=tf.train.Int64List(value=entities))\n label_feature = tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))\n\n features = tf.train.Features(feature={\n 'story': story_feature,\n 'entity_indexes': entity_indexes_feature,\n 'keys': keys_feature,\n 'label': label_feature\n })\n\n example = tf.train.Example(features=features)\n writer.write(example.SerializeToString())\n writer.close()\n\n\ndef tokenize_records(records, token_to_id):\n \"\"\"\n Convert all tokens into their unique ids.\n :param records: list of (story, entity_indexes, entities, label) tuples where\n story is a list of lists of tokens\n ...\n :param token_to_id: dict\n :return: list of (story, query, answer) with tokens converted to unique integer ids\n \"\"\"\n story_ids = []\n for story, entity_indexes, entities, label in records:\n story = [[token_to_id[token] for token in sentence] for sentence in story]\n entities = [token_to_id[ent] for ent in entities]\n label = rel2id[label]\n story_ids.append((story, entity_indexes, entities, label))\n return story_ids\n\n\ndef get_tokenizer(records):\n \"Recover unique tokens as a vocab and map the tokens to ids.\"\n tokens_all = []\n for story, entity_indexes, entities, label in records:\n tokens_all.extend([token for sentence in story for token in sentence] + entities)\n vocab = [PAD_TOKEN] + sorted(set(tokens_all))\n token_to_id = {token: i for i, token in enumerate(vocab)}\n print('Vocab size: %d' % len(vocab))\n return vocab, token_to_id\n\n\ndef pad_records(records, max_sentence_length, max_story_length, max_entities_length):\n \"Pad sentences, stories, and queries to a consistent length.\"\n for story, entity_indexes, entities, label in records:\n for sentence in story:\n for _ in range(max_sentence_length - len(sentence)):\n sentence.append(PAD_ID)\n assert len(sentence) == max_sentence_length\n\n for _ in range(max_entities_length - len(entity_indexes)):\n entity_indexes.append(0)\n assert len(entity_indexes) == max_entities_length\n\n for _ in range(max_story_length - len(story)):\n story.append([PAD_ID for _ in range(max_sentence_length)])\n assert len(story) == max_story_length\n\n for _ in range(max_entities_length - len(entities)):\n entities.append(PAD_ID)\n assert len(entities) == max_entities_length\n\n return records\n\n\ndef truncate_records(stories, max_length):\n \"Truncate a story to the specified maximum length.\"\n stories_truncated = []\n for story, a, b, c in stories:\n story_truncated = story[-max_length:]\n stories_truncated.append((story_truncated, a, b, c))\n return stories_truncated\n\n\ndef create_dataset(config):\n \"Main entrypoint.\"\n\n json_path = config.input_file\n print('Reading json data from %s...' % json_path)\n\n # read stories\n with open(json_path, 'r') as jsonfile:\n records = parse_records(jsonfile, config.only_relevant_sections)\n task_size = len(records)\n\n # truncate stories to max story length\n records = truncate_records(records, config.max_record_length)\n\n vocab, token_to_id = get_tokenizer(records)\n vocab_size = len(vocab)\n\n # convert stories from strings to indexes into the vocab set\n records = tokenize_records(records, token_to_id)\n\n story_lengths = [len(sentence) for story, _, _, _ in records for sentence in story]\n max_sentence_length = max(story_lengths)\n max_story_length = max([len(story) for story, _, _, _ in records])\n max_entities_length = max([len(entities) for _, _, entities, _ in records])\n\n print('real max_sentence_length: %s tokens' % max_sentence_length)\n max_sentence_length = min(max_sentence_length, config.max_sentence_length)\n print('real max_story_length: %s sentences' % max_story_length)\n max_story_length = min(max_story_length, config.max_record_length)\n print('real max_entities_length: %s entities' % max_entities_length)\n max_entities_length = min(max_entities_length, config.max_entities)\n metadata = {\n 'run_name': config.run_name,\n 'task_size': task_size,\n 'max_entities_length': max_entities_length,\n 'max_story_length': max_story_length,\n 'max_sentence_length': max_sentence_length,\n 'vocab': vocab,\n 'vocab_size': vocab_size\n }\n\n # pad each story sentence\n records_pad = pad_records(records, max_sentence_length, max_story_length, max_entities_length)\n\n return records_pad, metadata\n\n\ndef main():\n if not os.path.exists(FLAGS.output_dir):\n print('config output dir: %s' % FLAGS.output_dir)\n os.makedirs(FLAGS.output_dir)\n metadata_path = os.path.join(FLAGS.output_dir, '%s.json' % FLAGS.run_name)\n dataset_path_train = os.path.join(FLAGS.output_dir, '%s_train.tfrecords' % FLAGS.run_name)\n dataset_path_test = os.path.join(FLAGS.output_dir, '%s_test.tfrecords' % FLAGS.run_name)\n data, metadata = create_dataset(FLAGS)\n metadata['filename'] = {\n 'train': os.path.basename(dataset_path_train),\n 'test': os.path.basename(dataset_path_test)\n }\n with open(metadata_path, 'w') as f:\n json.dump(metadata, f)\n\n pair2list = {}\n for story, entity_indexes, entities, label in data:\n pair = tuple([entities[i] for i, e in enumerate(entity_indexes) if e == 1])\n list_ = pair2list[pair] if pair in pair2list else []\n list_.append((story, entity_indexes, entities, label))\n pair2list[pair] = list_\n print('%d different relations in dataset: %s...' % (len(pair2list.keys()), pair2list.keys()[:5]))\n\n num_train = int(0.75 * len(pair2list.keys()))\n train = [v for k in pair2list.keys()[:num_train] for v in pair2list[k]]\n random.seed(1337)\n random.shuffle(train)\n test = [v for k in pair2list.keys()[num_train:] for v in pair2list[k]]\n random.seed(1337)\n random.shuffle(test)\n\n save_dataset(train, dataset_path_train)\n save_dataset(test, dataset_path_test)\n\nif __name__ == '__main__':\n main()\n","sub_path":"eeg/eeg-report-annotations/src/main/python/relation/relnet/prep_data.py","file_name":"prep_data.py","file_ext":"py","file_size_in_byte":10212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"262345047","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# #########################################################################\n# Copyright (c) 2016, UChicago Argonne, LLC. All rights reserved. #\n# #\n# Copyright 2016. UChicago Argonne, LLC. This software was produced #\n# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #\n# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #\n# U.S. Department of Energy. The U.S. Government has rights to use, #\n# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #\n# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #\n# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #\n# modified to produce derivative works, such modified software should #\n# be clearly marked, so as not to confuse it with the version available #\n# from ANL. #\n# #\n# Additionally, redistribution and use in source and binary forms, with #\n# or without modification, are permitted provided that the following #\n# conditions are met: #\n# #\n# * Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# * Redistributions in binary form must reproduce the above copyright #\n# notice, this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# * Neither the name of UChicago Argonne, LLC, Argonne National #\n# Laboratory, ANL, the U.S. Government, nor the names of its #\n# contributors may be used to endorse or promote products derived #\n# from this software without specific prior written permission. #\n# #\n# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #\n# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\n# POSSIBILITY OF SUCH DAMAGE. #\n# #########################################################################\n\n\"\"\"\nPlease make sure the installation :ref:`pre-requisite-reference-label` are met.\n\nThis module feeds the data coming from detector to a process using queue. It interracts with a channel access\nplug in of area detector. The read of frame data from channel access happens on event of frame counter change.\nThe change is detected with a callback. The data is passed to the consuming process.\nThis module requires configuration file with the following parameters:\n'detector', a string defining the first prefix in area detector.\n'no_frames', number of frames that will be fed\n'args', optional, list of process specific parameters, they need to be parsed to the desired format in the wrapper\n\"\"\"\n\nfrom multiprocessing import Process, Queue\nimport json\nimport sys\nimport time\nimport dquality.common.utilities as utils\nimport dquality.common.report as report\nfrom dquality.feeds.pv_feed import Feed\nimport dquality.common.constants as const\nimport dquality.clients.fb_client.feedback as fb\nimport dquality.feeds.adapter as adapter\nfrom dquality.feeds.pv_feed_decorator import FeedDecorator\n\n\n__author__ = \"Barbara Frosik\"\n__copyright__ = \"Copyright (c) 2016, UChicago Argonne, LLC.\"\n__docformat__ = 'restructuredtext en'\n__all__ = ['init',\n 'RT.verify',\n 'RT.finish']\n\n\nclass RT:\n\n def init(self, config):\n \"\"\"\n This function initializes variables according to configuration.\n\n It gets values from the configuration file, evaluates and processes the values. If mandatory parameter is missing,\n the script logs an error and exits.\n\n Parameters\n ----------\n config : str\n configuration file name, including path\n\n Returns\n -------\n logger : Logger\n logger instance\n\n limits : dictionary\n a dictionary containing limit values read from the configured 'limit' file\n\n quality_checks : dict\n a dictionary containing quality check functions ids\n\n feedback : list\n a list of strings defining real time feedback of quality checks errors. Currently supporting 'PV', 'log', and\n 'console'\n\n report_type : int\n report type; currently supporting 'none', 'error', and 'full'\n\n consumers : dict\n a dictionary parsed from json file representing consumers\n\n \"\"\"\n conf = utils.get_config(config)\n if conf is None:\n print ('configuration file is missing')\n exit(-1)\n\n logger = utils.get_logger(__name__, conf)\n\n feed_args = []\n feed_kwargs = {}\n\n limitsfile = utils.get_file(conf, 'limits', logger)\n if limitsfile is None:\n sys.exit(-1)\n\n with open(limitsfile) as limits_file:\n limits = json.loads(limits_file.read())\n feed_args.append(limits)\n\n qcfile = utils.get_file(conf, 'quality_checks', logger)\n if qcfile is None:\n sys.exit(-1)\n\n with open(qcfile) as qc_file:\n dict = json.loads(qc_file.read())\n feed_args.append(dict)\n\n try:\n no_frames = int(conf['no_frames'])\n except KeyError:\n print ('no_frames parameter not configured. Continuous mode.')\n no_frames = -1\n feed_args.append(no_frames)\n\n try:\n callback_pv = conf['callback_pv']\n feed_kwargs['callback_pv'] = callback_pv\n except KeyError:\n pass\n\n try:\n detector = conf['detector']\n feed_kwargs['detector'] = detector\n except KeyError:\n print ('detector parameter not configured.')\n sys.exit(-1)\n\n try:\n consumers = conf['zmq_snd_port']\n feed_kwargs['consumers'] = consumers\n except KeyError:\n pass\n\n try:\n aggregate_limit = int(conf['aggregate_limit'])\n except KeyError:\n aggregate_limit = no_frames\n feed_kwargs['aggregate_limit'] = aggregate_limit\n\n try:\n feedback = conf['feedback_type']\n if len(feedback) == 0:\n feedback = None\n except KeyError:\n feedback = None\n\n try:\n decor_conf = conf['decor']\n decor_map = {}\n for entry in decor_conf:\n entry = entry.split('>')\n decor_map[entry[0].strip()] = entry[1].strip()\n if len(decor_map) == 0:\n decor_map = None\n except KeyError:\n decor_map = None\n\n try:\n report_type = conf['report_type']\n except KeyError:\n report_type = const.REPORT_FULL\n\n return feed_args, feed_kwargs, feedback, decor_map, logger, report_type\n\n\n def verify(self, config, report_file=None, sequence = None):\n \"\"\"\n This function starts real time verification process according to the given configuration.\n\n This function reads configuration and initiates variables accordingly.\n It creates a Feed instance and starts data_feed and waits to receive results in aggregateq.\n The results are then written into a report file.\n\n Parameters\n ----------\n conf : str\n configuration file name, including path\n\n report_file : file\n a file where the report will be written, defaulted to None, if no report wanted\n\n sequence : list or int\n information about data sequence or number of frames\n\n Returns\n -------\n boolean\n\n \"\"\"\n feed_args, feed_kwargs, feedback, decor_map, logger, report_type = self.init(config)\n\n # init the pv feedback\n if not feedback is None:\n feedbackq = Queue()\n feedback_pvs = utils.get_feedback_pvs(feed_args[1])\n fb_args = {'feedback_pvs':feedback_pvs, 'detector':feed_kwargs['detector']}\n feedback_obj = fb.Feedback(feedbackq, feedback, **fb_args)\n # put the logger to args\n if const.FEEDBACK_LOG in feedback:\n feedback_obj.set_logger(logger)\n feed_kwargs['feedbackq'] = feedbackq\n\n self.p = Process(target=feedback_obj.deliver, args=())\n self.p.start()\n\n reportq = Queue()\n\n # address the special cases of quality checks when additional arguments are required\n if decor_map is None:\n self.feed = Feed()\n else:\n self.feed = FeedDecorator(decor_map)\n\n ack = self.feed.feed_data(logger, reportq, *feed_args, **feed_kwargs)\n if ack == 1:\n bad_indexes = {}\n aggregate = reportq.get()\n\n if report_file is not None:\n report.report_results(logger, aggregate, None, report_file, report_type)\n report.add_bad_indexes(aggregate, bad_indexes)\n\n return bad_indexes\n\n\n def finish(self):\n try:\n self.feed.finish()\n time.sleep(1)\n except:\n pass\n\n try:\n self.p.terminate()\n except:\n pass\n self.feed = None\n\n","sub_path":"dquality/real_time_pv.py","file_name":"real_time_pv.py","file_ext":"py","file_size_in_byte":10495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"105913911","text":"import urllib.request\nimport re\n\"\"\"\nurl = \"http://74.41.0.1:8080/index.html#modules/tips/custquery.html\"\nheaders=(\"User-Agent\",\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0\")\nopener = urllib.request.build_opener()\nopener.add_handler = [headers]\nurllib.request.install_opener(opener)\n\ndata = urllib.request.urlopen(url).read()\npath = \"C:/Users/diaozhende/Desktop/data.html\"\nfile = open(path,\"wb\")\nfile.write(data)\nfile.close()\n\"\"\"\nurl = \"http://74.41.0.1:8080/login.html\"\nheaders = (\"User-Agent\",\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36\")\nopener = urllib.request.build_opener()\nopener.add_handler = [headers]\nurllib.request.install_opener(opener)\ndata = urllib.request.urlopen(url).read().decode(\"utf-8\",\"ignore\")\n# path = \"C:/Users/diaozhende/Desktop/login.html\"\n# file = open(path,\"wb\")\n# file.write(data)\npat = \"background-image: url(.*?);\"\nresult = re.compile(pat).findall(data)\nfor i in range(0,len(result)):\n img = result[i]\n imgPath = img[1:len(img)-1]\n imgUrl = \"http://74.41.0.1:8080/\"+imgPath\n print(imgUrl)\n path = \"C:/Users/diaozhende/Desktop/login.jpg\"\n urllib.request.urlretrieve(imgUrl,filename=path)\n","sub_path":"pythonAnalysis/firstProject/dataAnalysisDemo/myCrmAnalysisDemo.py","file_name":"myCrmAnalysisDemo.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"29055843","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@author: loopgan\n@time: 9/7/17 10:39 AM\n\"\"\"\nimport sys\nfrom pathlib import Path\n\nworkspace = './'\nsys.path.append(workspace)\n\n\nfrom model import mlp_model, rf_model\nfrom utils.plots import *\n\nif __name__ == '__main__':\n\n # Load inputs\n if len(sys.argv) == 3:\n cell_line = sys.argv[1]\n date = sys.argv[2]\n feature_pos= Path(\"/net/data.isilon/ag-cherrmann/echernova/model_input/{0}/positives_{1}.csv\".format(cell_line, cell_line))\n feature_neg= Path(\"/net/data.isilon/ag-cherrmann/echernova/model_input/{0}/negatives_{1}.csv\".format(cell_line, cell_line))\n else:\n print(\"Incorrect number of inputs!\")\n\n # Testing the full model and the model with features available for microglia\n exclude = False # the list is included\n data_list = [False, ['H3K9me3', 'H3K4me1', 'H3K36me3', 'H3K27me3', 'H3K27ac', 'CTCF']] # data which is available for microglia\n \n # MLP model\n # print(\"MLP...\")\n # result_folder_mlp = Path('/net/data.isilon/ag-cherrmann/echernova/model_output/{0}'.format(cell_line))\n # for exluded_features in data_list:\n # print(exluded_features)\n # mlp_model.mlp_result((feature_pos, feature_neg), result_folder_mlp, hist_list=exluded_features, exclude=exclude, date=date, input_type='csv')\n # plot_roc_folder(result_folder_mlp, result_folder_mlp/'{0}_mlp_ROC_curve_{1}_only_available_mods.png'.format(date, cell_line), 'ROC comparison: MLP on {0}'.format(cell_line))\n \n # RF\n print(\"RF...\")\n result_folder_rf = Path('/net/data.isilon/ag-cherrmann/echernova/model_output/{0}'.format(cell_line))\n for exluded_features in data_list:\n print(exluded_features)\n rf_model.rf_result((feature_pos, feature_neg), result_folder_rf, hist_list=exluded_features, exclude=exclude, date=date, input_type='csv')\n plot_roc_folder(result_folder_rf, result_folder_rf/'{0}_rf_ROC_curve_{1}_only_available_mods.png'.format(date, cell_line), 'ROC comparison: RF on {0}'.format(cell_line))\n","sub_path":"src/tad_lactuca.py","file_name":"tad_lactuca.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"171686924","text":"class Solution:\n def solveSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: void Do not return anything, modify board in-place instead.\n \"\"\"\n self.board = board\n self.solve()\n\n def solve(self):\n self.solve_caller()\n\n def solve_caller(self):\n row, col = self.findBlankBlock()\n if row == -1:\n return True\n\n # back tracking\n for fill_num in map(lambda x: str(x), range(1, 10)):\n if self.isValid(row, col, fill_num):\n self.board[row][col] = fill_num\n if self.solve_caller():\n return True\n self.board[row][col] = '.'\n\n return False\n\n def findBlankBlock(self):\n for row in range(9):\n for col in range(9):\n if self.board[row][col] == '.':\n return row, col\n return -1, -1\n\n def isValid(self, row, col, fill_num):\n if self.checkRow(row, fill_num) and self.checkCol(col, fill_num) and self.checkBox(row, col, fill_num):\n return True\n else:\n return False\n\n def checkRow(self, row, fill_num):\n for col in range(9):\n if self.board[row][col] == fill_num:\n return False\n return True\n\n def checkCol(self, col, fill_num):\n for row in range(9):\n if self.board[row][col] == fill_num:\n return False\n return True\n\n def checkBox(self, row, col, fill_num):\n if 0 <= row <= 2:\n row_start = 0\n elif 3 <= row <= 5:\n row_start = 3\n else:\n row_start = 6\n\n if 0 <= col <= 2:\n col_start = 0\n elif 3 <= col <= 5:\n col_start = 3\n else:\n col_start = 6\n\n for r in range(row_start, row_start + 3):\n for c in range(col_start, col_start + 3):\n if self.board[r][c] == fill_num:\n return False\n return True\n\n\ns = Solution()\nboard = [[\"5\", \"3\", \".\", \".\", \"7\", \".\", \".\", \".\", \".\"], [\"6\", \".\", \".\", \"1\", \"9\", \"5\", \".\", \".\", \".\"],\n [\".\", \"9\", \"8\", \".\", \".\", \".\", \".\", \"6\", \".\"], [\"8\", \".\", \".\", \".\", \"6\", \".\", \".\", \".\", \"3\"],\n [\"4\", \".\", \".\", \"8\", \".\", \"3\", \".\", \".\", \"1\"], [\"7\", \".\", \".\", \".\", \"2\", \".\", \".\", \".\", \"6\"],\n [\".\", \"6\", \".\", \".\", \".\", \".\", \"2\", \"8\", \".\"], [\".\", \".\", \".\", \"4\", \"1\", \"9\", \".\", \".\", \"5\"],\n [\".\", \".\", \".\", \".\", \"8\", \".\", \".\", \"7\", \"9\"]]\ns.solveSudoku(board)\n","sub_path":"python/37. Sudoku Solver.py","file_name":"37. Sudoku Solver.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"602358753","text":"#!/usr/bin/env python3\n\nimport sys\n#import time\n\ndef bsearch(a, q):\n low = 0\n high = len(a)\n while low < high:\n mid = (low + high)//2\n\n if a[mid] < q:\n low = mid + 1\n else:\n high = mid\n\n return low\n\ndef contains(a, q, i):\n return i < len(a) and a[i] == q\n\ndef reverse(s):\n return s[::-1]\n\ndef with_reverse(a, s):\n srev = reverse(s)\n i = bsearch(a, srev)\n return contains(a, srev, i)\n\ndef main():\n #t0 = time.time()\n\n words = [line.rstrip() for line in sys.stdin]\n words_caseless = [w.casefold() for w in words]\n w_revs = [w for w in words if 5 <= len(w) and with_reverse(words_caseless, w.casefold())]\n print(w_revs)\n\n #t1 = time.time()\n #print(t1 - t0)\n\nif __name__ == '__main__':\n main()","sub_path":"year1_1718/computer_programming_2/scripts/20180722180629/2018-02-23/reversecomp_031.py","file_name":"reversecomp_031.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"81450330","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('',views.board,name=\"board\"),\n path('detail/',views.detail,name=\"detail\"),\n # <- path converter(여러 객체를 다루는, 계층적인 URL자동생성 유리)\n #blog_id를 blog.views.detail에 인자로 넘겨준다\n path('detail2/',views.detail2,name=\"detail2\"),\n path('detail3/',views.detail3,name=\"detail3\"),\n path('new/',views.new,name=\"new\"),\n path('create',views.create,name=\"create\"),\n path('place/',views.place,name=\"place\"),\n path('java/',views.java,name=\"java\"),\n path('android/',views.android,name=\"android\"),\n #path('어떤 url이 들어오면',(어디에 있는)어떤함수를 실행시켜라)\n]","sub_path":"project/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"375058152","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nimport sys, getopt\r\nimport os \r\n#print(\"current dir \", os.getcwd())\r\n#raise SystemExit()\r\n\r\ndef func(x):\r\n x = 1 \r\n return x\r\n\r\n#if __name__ == '__main__':\r\n# func()\r\n\r\n \r\nopt, args = getopt.getopt(sys.argv[1:],'hi:o:Ir:')\r\nprint(dict(opt))\r\n\r\n\r\nf = open(dict(opt)['-r'], 'w')\r\nsys.stdout = f\r\n\r\nwith open(dict(opt)['-i'], 'r') as w:\r\n for key in dict(opt):\r\n if key != '-r' and key != '-i':\r\n if True:\r\n for line in w:\r\n f.write(line)\r\n\r\n","sub_path":"get_opt_cmd_boring_task.py","file_name":"get_opt_cmd_boring_task.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"437368491","text":"import random\n\nmember = ['성우', '재서', '희제', '호찬']\ntoday = ['React', 'PS', 'OS/NET', 'JS']\n\nrandom_q = []\n\nwhile True:\n\trandom_q = [i for i in range(4)]\n\trandom.shuffle(random_q)\n\tis_dup = False\n\tfor i in range(4):\n\t\tif random_q[i] == i:\n\t\t\tis_dup = True\n\tif not is_dup:\n\t\tbreak\n\nfor idx, ran_num in enumerate(random_q):\n\tprint(f\"{member[idx]}은(는) {today[ran_num]} 질문을 받는다!\")\n","sub_path":"member_random.py","file_name":"member_random.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"420568339","text":"\"\"\"empty message\n\nRevision ID: 6326f169b312\nRevises: 30591aaff1a6\nCreate Date: 2019-08-18 09:44:40.538853\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6326f169b312'\ndown_revision = '30591aaff1a6'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('name',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=200), nullable=True),\n sa.Column('surname', sa.String(length=300), nullable=True),\n sa.Column('age', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('username', sa.String(length=50), nullable=False),\n sa.Column('email', sa.String(length=100), nullable=False),\n sa.Column('password_hash', sa.String(length=100), nullable=False),\n sa.Column('created_on', sa.DateTime(), nullable=True),\n sa.Column('updated_on', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('username')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('users')\n op.drop_table('name')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/6326f169b312_.py","file_name":"6326f169b312_.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"425657710","text":"#coding=utf-8\n'''\nfrom collections import Iterable\nx = isinstance('abc',Iterable)\nprint(x)\n\nfor i, value in enumerate(['A', 'B', 'C']):\n print(i,value)\n\ny = list(range(1, 11))\nprint(y)\n\nL = []\nfor x in range(1, 11):\n L.append(x * x)\nprint(L)\n\nprint([x * x for x in range(1, 11)])\nz = [x * x for x in range(1, 11) if x % 2 == 0]\nprint(z)\na = [m + n for m in 'ABC' for n in 'XYZ']\nprint(a)\n\nimport os\nprint([d for d in os.listdir('.')])\n\nd = {'x': 'A', 'y': 'B', 'z': 'C' }\nfor k, v in d.items():\n print(k, '=', v)\n \nprint([k + '=' + v for k, v in d.items()])\n\nL = ['Hello', 'World', 'IBM', 'Apple']\nprint([s.lower() for s in L])\n'''\n#杨辉三角 \n#把每行与其上一行当做长度相同的list,两端用0补充\ndef triangles():\n a = [1]\n while True:\n yield a\n a = [sum(i) for i in zip([0]+a,a+[0])]\nn=0\nfor t in triangles():\n print(t)\n n=n+1\n if n == 10:\n break\n\n#与上面的错位方法一样,只不过用-1下表和i下标同时指向0\n#然后新一行的每个数字等于其上一行相邻两个数字之和(从上一行的第一个数字开始算,起始位置为0和1)\ndef Triangles():\n N=[1]\n while True:\n yield N\n N.append(0)\n N=[N[i-1] + N[i] for i in range(len(N))]\nn=0\nfor t in Triangles():\n print(t)\n n=n+1\n if n == 10:\n break\n'''\nx = [1,2,3]\ny = [2,3,4]\nz = zip(x,y) #实现错位\nfor t in z:\n print(t)\n print(sum(t)) #同位置上的数字相加\n'''","sub_path":"Learning/base5.py","file_name":"base5.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"64677594","text":"from PIL import Image\nimport argparse\nimport os\n\nBASE_PATH = 'test'\n\n\nclass ImageHandler():\n\t\"\"\" \"\"\"\n\n\tdef __init__(self, imagename):\n\t\tself.imgname = imagename\n\t\tself.img = Image.open(imagename)\n\n\tdef flip(self):\n\t\timg = self.img.transpose(Image.FLIP_LEFT_RIGHT)\n\t\tnew_name = self.imgname.split('.')[-2] + '_flipped.' + self.imgname.split('.')[-1]\n\t\tprint(new_name)\n\t\tself.write_image(img, new_name)\n\n\t\t#raise saturation and value\n\t\t#color jittering\n\t\t#potentially random crops\n\n\tdef write_image(self, img, name):\n\t\timg.save(name)\n\n\ndef run_images(path):\n\tfor file in os.listdir(path):\n\t\ti = ImageHandler(path+'/'+file)\n\t\ti.flip()\n\n\n\nif __name__ == \"__main__\":\n\trun_images(BASE_PATH)\n","sub_path":"increase_data(1).py","file_name":"increase_data(1).py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"377014627","text":"\"\"\"\nThis module contains the class for detecting\nthe presence of keywords in an audio stream\n\"\"\"\nimport logging\nimport os\n\nimport numpy as np # type: ignore\n\nfrom spokestack.context import SpeechContext\nfrom spokestack.models.tensorflow import TFLiteModel\nfrom spokestack.ring_buffer import RingBuffer\n\n\n_LOG = logging.getLogger(__name__)\n\n\nclass WakewordTrigger:\n \"\"\"Detects the presence of a wakeword in the audio input\n\n Args:\n pre_emphasis (float): The value of the pre-emmphasis filter\n sample_rate (int): The number of audio samples per second of audio (kHz)\n fft_window_type (str): The type of fft window. (only support for hann)\n fft_hop_length (int): Audio sliding window for STFT calculation (ms)\n model_dir (str): Path to the directory containing .tflite models\n posterior_threshold (float): Probability threshold for if a wakeword\n was detected\n \"\"\"\n\n def __init__(\n self,\n pre_emphasis: float = 0.0,\n sample_rate: int = 16000,\n fft_window_type: str = \"hann\",\n fft_hop_length: int = 10,\n model_dir: str = \"\",\n posterior_threshold: float = 0.5,\n **kwargs,\n ) -> None:\n\n self.pre_emphasis: float = pre_emphasis\n self.hop_length: int = int(fft_hop_length * sample_rate / 1000)\n\n if fft_window_type != \"hann\":\n raise ValueError(\"Invalid fft_window_type\")\n\n self.filter_model: TFLiteModel = TFLiteModel(\n model_path=os.path.join(model_dir, \"filter.tflite\")\n )\n self.encode_model: TFLiteModel = TFLiteModel(\n model_path=os.path.join(model_dir, \"encode.tflite\")\n )\n self.detect_model: TFLiteModel = TFLiteModel(\n model_path=os.path.join(model_dir, \"detect.tflite\")\n )\n\n # window size calculated based on fft\n # the filter inputs are (fft_size - 1) / 2\n # which makes the window size (post_fft_size - 1) * 2\n self._window_size = (self.filter_model.input_details[0][\"shape\"][-1] - 1) * 2\n self._fft_window = np.hanning(self._window_size)\n\n # retrieve the mel_length and mel_width based on the encoder model metadata\n # these allocate the buffer to the correct size\n self.mel_length: int = self.encode_model.input_details[0][\"shape\"][1]\n self.mel_width: int = self.encode_model.input_details[0][\"shape\"][-1]\n\n # initialize the first state input for autoregressive encoder model\n # retrieve the encode_length and encode_width from the model detect_model\n # metadata. We get the dimensions from the detect_model inputs because the\n # encode_model runs autoregressively and outputs a single encoded sample.\n # the detect_model input is a collection of these samples.\n #self.state = np.zeros(self.encode_model.input_details[1][\"shape\"], np.float32)\n self.encode_length: int = self.detect_model.input_details[0][\"shape\"][1]\n self.encode_width: int = self.detect_model.input_details[0][\"shape\"][-1]\n\n self.sample_window: RingBuffer = RingBuffer(shape=[self._window_size])\n self.frame_window: RingBuffer = RingBuffer(\n shape=[self.mel_length, self.mel_width]\n )\n self.encode_window: RingBuffer = RingBuffer(\n shape=[1, self.encode_length, self.encode_width]\n )\n\n # initialize the frame and encode windows with zeros\n # this minimizes the delay caused by filling the buffer\n self.frame_window.fill(0.0)\n self.encode_window.fill(-1.0)\n\n self._posterior_threshold: float = posterior_threshold\n self._posterior_max: float = 0.0\n self._prev_sample: float = 0.0\n self._is_speech: bool = False\n\n def __call__(self, context: SpeechContext, frame) -> None:\n \"\"\"Entry point of the trigger\n\n Args:\n context (SpeechContext): current state of the speech pipeline\n frame (np.ndarray): a single frame of an audio signal\n\n Returns: None\n\n \"\"\"\n\n # detect vad edges for wakeword deactivation\n vad_fall = self._is_speech and not context.is_speech\n self._is_speech = context.is_speech\n\n # sample frame to detect the presence of wakeword\n if not context.is_active:\n self._sample(context, frame)\n\n # reset on vad fall deactivation\n if vad_fall:\n if not context.is_active:\n _LOG.info(f\"wake: {self._posterior_max}\")\n self.reset()\n\n def _sample(self, context: SpeechContext, frame) -> None:\n # convert the PCM-16 audio to float32 in (-1.0, 1.0)\n frame = frame.astype(np.float32) / (2 ** 15 - 1)\n frame = np.clip(frame, -1.0, 1.0)\n\n # pull out a single value from the frame and apply pre-emphasis\n # with the previous sample then cache the previous sample\n # to be use in the next iteration\n prev_sample = frame[-1]\n frame -= self.pre_emphasis * np.append(self._prev_sample, frame[:-1])\n self._prev_sample = prev_sample\n\n # fill the sample window to analyze speech containing samples\n # after each window fill the buffer advances by the hop length\n # to produce an overlapping window\n for sample in frame:\n self.sample_window.write(sample)\n if self.sample_window.is_full:\n if context.is_speech:\n self._analyze(context)\n self.sample_window.rewind().seek(self.hop_length)\n\n def _analyze(self, context: SpeechContext) -> None:\n # read the full contents of the sample window to calculate a single frame\n # of the STFT by applying the DFT to a real-valued input and\n # taking the magnitude of the complex DFT\n frame = self.sample_window.read_all()\n frame = np.fft.rfft(frame * self._fft_window, n=self._window_size)\n frame = np.abs(frame).astype(np.float32)\n\n # compute mel spectrogram\n self._filter(context, frame)\n\n def _filter(self, context: SpeechContext, frame) -> None:\n # add the batch dimension and compute the mel spectrogram with filter model\n frame = np.expand_dims(frame, 0)\n frame = self.filter_model(frame)[0]\n\n # advance the window by 1 and write mel frame to the frame buffer\n self.frame_window.rewind().seek(1)\n self.frame_window.write(frame)\n\n # encode the mel spectrogram\n self._encode(context)\n\n def _encode(self, context: SpeechContext) -> None:\n # read the full contents of the frame window and add the batch dimension\n # run the encoder and save the output state for autoregression\n frame = self.frame_window.read_all()\n frame = np.expand_dims(frame, 0)\n #frame, self.state = self.encode_model(frame, self.state)\n frame = self.encode_model(frame)\n\n # accumulate encoded samples until size of detection window\n self.encode_window.rewind().seek(1)\n self.encode_window.write(np.squeeze(frame))\n self._detect(context)\n\n def _detect(self, context: SpeechContext) -> None:\n # read the full contents of the encode window and add the batch dimension\n # calculate a scalar probability of if the frame contains the wakeword\n # with the detect model\n frame = self.encode_window.read_all()\n #frame = np.expand_dims(frame, 0)\n posterior = self.detect_model(frame)[0][0][1]\n\n if posterior > self._posterior_max:\n self._posterior_max = posterior\n if posterior > self._posterior_threshold:\n context.is_active = True\n _LOG.info(f\"wake: {self._posterior_max}\")\n\n def reset(self) -> None:\n \"\"\" Resets the currect WakewordDetector state \"\"\"\n self.sample_window.reset()\n self.frame_window.reset().fill(0.0)\n self.encode_window.reset().fill(-1.0)\n #self.state[:] = 0.0\n self._posterior_max = 0.0\n\n def close(self) -> None:\n \"\"\" Close interface for use in the pipeline \"\"\"\n self.reset()\n","sub_path":"spokestack/wakeword/tflite_orig.py","file_name":"tflite_orig.py","file_ext":"py","file_size_in_byte":8129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"57286323","text":"# coding:utf-8\n\nclass PlayerAI:\n\n def __init__(self, team_index, player_index, jockey):\n self.team_index = team_index\n self.player_index = player_index\n self.player_id = jockey.get_player_id(team_index, player_index)\n self.jockey = jockey\n self.money = jockey.get_player_money(team_index, player_index)\n # # print(self.jockey)\n # self.check_turn_data()\n\n def check_market(self):\n if not self.jockey.market_state:\n # print('market is closed')\n return False\n\n return True\n\n\n def assign_items(self):\n for player_item in self.jockey.itp_list:\n if self.player_index is player_item['player']:\n self.items = player_item\n return self.items\n\n def buy(self, saled_item):\n\n # item에 관련된 정보, 이름, 초기가격, 변화가\n # 시나리오\n # 플레이어가 특정 아이템을 산다.\n # 어떤 아이템인가? 그 시점의 가격은 얼마인가?\n # 마켓 상태를 먼저 확인해야 한다. 마켓이 열려 있으면 구매가 가능하고 닫혀 있으면 구매가 불가하다.\n # 마켓이 열려있다면, 플레이어가 가진 돈에서 클릭 당시의 돈이 깎여야 한다.\n # 그리고 플레이어가 클릭한 아이템의 수량이 +1 증가한다.\n if not self.check_market():\n # print('market is closed')\n return\n\n current_item_price = self.jockey.get_item_price(self.jockey.turn_index, saled_item)\n\n if self.money < current_item_price:\n # print('buy failed')\n return\n\n if self.money > current_item_price:\n self.money = self.money - current_item_price\n self.jockey.update_item_count(saled_item, self.player_id, 1)\n self.jockey.update_money(self.player_id, self.money)\n\n\n # 가격 공식에 따라 가격을 얻어 온다. 어디서?\n\n # print(\"buy\")\n return\n\n def sell(self, saled_item):\n\n # pre-condition (early-return)\n # 튼튼한 코드\n # coverage\n if not self.check_market():\n return\n\n current_item_price = self.jockey.get_item_price(self.jockey.turn_index, saled_item)\n item_count = self.jockey.get_item_count(saled_item, self.player_id)\n\n if item_count == 0:\n # print('sell failed')\n return\n\n self.money = self.money + current_item_price\n self.jockey.update_item_count(saled_item, self.player_id, 1)\n self.jockey.update_money(self.team_index, self.player_index, self.money)\n\n\n # print(\"sell\")\n return\n\n def trade(self, turn_index, item, chip):\n if not self.check_market():\n return\n\n item_count = self.jockey.get_item_count(item, self.player_id)\n exchange_rate = self.jockey.get_exchange_rate(turn_index)\n\n if chip > item_count/exchange_rate:\n # print('trade failed', chip, item_count, exchange_rate, 'item name is', item)\n return\n\n # 결국에는, 칩이 교환되었다는 의미는, 아이템의 개수가 감소했다는 의미이다.\n count = chip*exchange_rate\n\n self.jockey.update_item_count(item, self.player_id, -count)\n result = self.jockey.populate_trade_requests(self.team_index, self.player_index, self.player_id, item, count)\n\n # print(\"trade\")\n return result\n","sub_path":"db/player_ai.py","file_name":"player_ai.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"299611845","text":"import logging\nimport re\nimport sys\nimport time\nimport unittest\nimport xmlrpclib\n\nSWIFT_DISK_SIZE = 10\nSWIFT_HASH_PATH_SUFFIX = 'lota'\nGLANCE_STORE = 'swift'\nBRIDGE_INTERFACE = 'eth3'\nGUEST_NW_VIF_MODE = 'noip'\n\ndef conditional_skip():\n \"\"\" Decorator for skipping a test according to preferences set by the\n user on the command line when the test run was launched\n \"\"\" \n def deco_conditional_skip(f):\n def f_conditional_skip(*args,**kwargs):\n testObject = args[0] # self in the test\n if hasattr(testObject, 'skip_list'):\n match=re.match(\"^test_([0-9][0-9]).*$\",testObject._testMethodName)\n if len(match.groups())>0:\n idx = match.group(1)\n if idx in testObject.skip_list:\n testObject.skipTest(\"Skipped by user\")\n return\n return f(*args, **kwargs)\n return f_conditional_skip\n \n return deco_conditional_skip\n\ndef read_rolemappings_file(filename):\n roles_file = open(filename)\n vpx_roles_data = roles_file.read().split('\\n')\n vpx_roles = {}\n roles_vpx = {}\n for vpx in vpx_roles_data:\n if vpx != '':\n fields = vpx.split(' ')\n vpx_roles[fields[0]] = dict( host=fields[1],\n roles=fields[2:])\n for role in fields[2:]:\n if not role in roles_vpx:\n roles_vpx[role] = []\n roles_vpx[role].append(fields[0])\n return vpx_roles, roles_vpx\n\nclass GeppettoAPISetupTestCase(unittest.TestCase):\n\n def setUp(self):\n self.log = logging.getLogger(\"guest_networking.setup_cloud\")\n self.log.debug(\"Running %s\", self._testMethodName)\n self.proxy = xmlrpclib.ServerProxy(master_url)\n\n @conditional_skip()\n def test_00_hypervisor_password(self):\n #set password\n self.proxy.Config.set(\"HAPI_PASS\", xs_root_password)\n #TODO: remotely execute hapi_check on hypervisor\n \n @conditional_skip()\n def test_01_setup_rabbit_mysql(self):\n mysql_worker = \"%s.%s\" %(roles_vpx['mysqld'][0], dns_suffix)\n rabbit_worker = \"%s.%s\" %(roles_vpx['rabbitmq-server'][0], dns_suffix)\n self.log.debug(\"MySQL worker:%s\", mysql_worker)\n self.log.debug(\"rabbit worker:%s\", rabbit_worker)\n self.proxy.Compute.add_database(mysql_worker, {\"MYSQL_PASS\": \"citrix\"})\n self.proxy.Compute.add_message_queue(rabbit_worker)\n self.log.debug(\"test_01_setup_rabbit_mysql completed\")\n \n @conditional_skip()\n def test_02_setup_identity(self):\n keystone_worker = \"%s.%s\" %(roles_vpx['openstack-keystone-auth'][0],\n dns_suffix)\n self.log.debug(\"Keystone worker:%s\", keystone_worker)\n self.proxy.Identity.add_auth(keystone_worker, {})\n self.log.debug(\"test_02_setup_identity completed\")\n \n @conditional_skip()\n def test_03_setup_object_store_and_imaging(self):\n storage_workers = [ \"%s.%s\" %(role, dns_suffix)\n for role in roles_vpx['openstack-swift-container']]\n proxy_worker = \"%s.%s\" %(roles_vpx['openstack-swift-proxy'][0],\n dns_suffix)\n self.log.debug(\"Swift container workers:%s\", storage_workers)\n self.log.debug(\"Swift proxy worker:%s\", proxy_worker)\n \n disk_size = SWIFT_DISK_SIZE\n hash_suff = SWIFT_HASH_PATH_SUFFIX\n self.proxy.Config.set(\"SWIFT_HASH_PATH_SUFFIX\", hash_suff)\n self.proxy.ObjectStorage.add_apis([proxy_worker], {})\n self.proxy.ObjectStorage.add_workers(storage_workers,\n {\"SWIFT_DISK_SIZE_GB\":disk_size})\n # wait for the tasks to start executing and populate the swift address\n time.sleep(30)\n glance_worker = \"%s.%s\" %(roles_vpx['openstack-glance-registry'][0],\n dns_suffix)\n config = {}\n config[\"GLANCE_STORE\"] = GLANCE_STORE\n config[\"GLANCE_SWIFT_ADDRESS\"] = proxy_worker\n self.proxy.Imaging.add_registry(glance_worker, config)\n self.log.debug(\"test_03_setup_object_store_and_imaging completed\")\n \n @conditional_skip()\n def test_04_setup_api(self):\n api_worker = \"%s.%s\" %(roles_vpx['openstack-nova-api'][0], dns_suffix)\n scheduler_worker = \"%s.%s\" %(roles_vpx['openstack-nova-scheduler'][0],\n dns_suffix)\n self.proxy.Compute.add_apis([api_worker], {})\n self.proxy.Scheduling.add_workers([scheduler_worker], {})\n self.log.debug(\"test_04_setup_api completed\")\n \n @conditional_skip()\n def test_05_setup_network(self):\n config = {}\n config['MODE'] = networking_mode.split('-')[0]\n self.log.debug(\"Configuring networking mode:%s\", config['MODE'])\n config['GUEST_NETWORK_BRIDGE'] = guest_network_bridge\n config['BRIDGE_INTERFACE'] = BRIDGE_INTERFACE\n config['GUEST_NW_VIF_MODE'] = GUEST_NW_VIF_MODE\n if networking_mode.lower().endswith('ha'):\n config[\"MULTI_HOST\"] = True\n self.log.debug(\"Performing HA setup\")\n self.proxy.Network.configure_ha(config)\n else:\n config[\"MULTI_HOST\"] = False\n # Pick first worker only\n network_worker = \"%s.%s\" %(roles_vpx['openstack-nova-network'][0], dns_suffix)\n self.log.debug(\"Network worker:%s\", network_worker)\n self.proxy.Network.add_workers([network_worker], config)\n self.log.debug(\"test_05_setup_network completed\")\n\n @conditional_skip()\n def test_06_setup_compute(self):\n # MUST ADD ALL THE WORKERS!!!\n compute_workers = [ \"%s.%s\" %(role, dns_suffix)\n for role in roles_vpx['openstack-nova-compute']]\n self.log.debug(\"Compute workers:%s\", compute_workers)\n self.proxy.Compute.add_workers(compute_workers, {})\n self.log.debug(\"test_06_setup_compute completed\")\n \n @conditional_skip()\n def test_07_wait_for_stable_cloud(self):\n self.log.debug(\"Waiting for deployment to become stable\")\n fqdns = self.proxy.Node.get_all()\n details=self.proxy.Node.get_details(fqdns)\n \n max_retries = 20\n retries = 0\n \n while True:\n details=self.proxy.Node.get_details(fqdns)\n all_stable = all([details[fqdn]['report_status']=='u'\n for fqdn in fqdns])\n self.log.debug(\"All stable:%s\", all_stable)\n if all_stable:\n break\n time.sleep(60)\n retries = retries + 1\n self.log.debug(\"retries:%s\", retries)\n self.assertNotEqual(retries, max_retries,\n \"The cloud was unstable after %d seconds.\"\n \"Test failed\"\n %(max_retries*60))\n self.log.debug(\"test_07_wait_for_stable_cloud completed\")\n \nif __name__ == \"__main__\":\n master_url = '%s/openstack/geppetto/v1' % sys.argv[1]\n xs_root_password = sys.argv[2]\n vpx_roles_file = sys.argv[3]\n networking_mode = sys.argv[4]\n man_network_bridge = sys.argv[5]\n public_network_bridge = sys.argv[6] \n guest_network_bridge = sys.argv[7]\n floating_ip_range = sys.argv[8]\n dns_suffix=sys.argv[9]\n logging.basicConfig( stream=sys.stdout )\n logging.getLogger(\"guest_networking.setup_cloud\").\\\n setLevel( logging.DEBUG )\n vpx_roles, roles_vpx = read_rolemappings_file(vpx_roles_file)\n unittest.main(argv=[sys.argv[0]])\n","sub_path":"jenkins/guest_networking/setup_cloud.py","file_name":"setup_cloud.py","file_ext":"py","file_size_in_byte":7650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"498870829","text":"\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, prev, next, child):\n self.val = val\n self.prev = prev\n self.next = next\n self.child = child\n\"\"\"\n\nclass Solution(object):\n def flatten(self, head):\n \"\"\"\n :type head: Node\n :rtype: Node\n \"\"\"\n\n # next_stack stores the next node\n p, next_stack = head, []\n while p is not None:\n # if node p has child, then it should be flatten\n if p.child is not None:\n # if p has child and next, then the next node of p should be the next node of p.child\n # so store it in next stack(may have nested children)\n if p.next:\n next_stack.append(p.next)\n # establish the relationship of p and p.child, set p.child = None\n p.next, p.child.prev, p.child = p.child, p, None\n # if p.next is None and next_stack is not empty\n # means p should be the prev node of the last element of next stack\n if p.next is None and next_stack:\n p.next = next_stack.pop()\n p.next.prev = p\n p = p.next\n\n return head\n","sub_path":"flatten_a_multilevel_doubly_linked_list.py","file_name":"flatten_a_multilevel_doubly_linked_list.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"544689141","text":"class Solution:\r\n \"\"\"\r\n @param A: An integer array.\r\n @param k: A positive integer (k <= length(A))\r\n @param target: Integer\r\n @return a list of lists of integer \r\n \"\"\"\r\n def kSumII(self, A, k, target):\r\n length = len(A)\r\n if length == 0 or length < k:\r\n return None\r\n result = []\r\n n = k\r\n def helper(A, n, target, temp):\r\n length = len(A)\r\n if sum(temp) > target or length< n:\r\n return\r\n if len(temp) == k and sum(temp) == target:\r\n result.append(temp)\r\n for i in xrange(length):\r\n helper(A[i+ 1:], n- 1, target, temp+[A[i]])\r\n helper(A, n, target, [])\r\n return result\r\n","sub_path":"kSumII.py","file_name":"kSumII.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"638321722","text":"from tkinter import *\n\nclass trainSchedWindow2():\n def __init__(self, rootwin):\n self.trainSchedWindow2 = rootwin\n self.titleFrame = None\n self.titleLabel = None\n self.tableFrame = None\n\n self.make_view_train_sched2()\n\n def make_view_train_sched2(self):\n self.trainSchedWindow2.title(\"View Train Schedule\")\n\n # title label frame\n self.titleFrame = Frame(rootwin)\n self.titleFrame.pack(side = TOP)\n\n # View Train Schedule title label\n self.titleLabel = Label(self.titleFrame, text=\"View Train Schedule\", font=(\"\",20))\n self.titleLabel.grid(row=0, column=0, sticky=N+E+W, padx=50, pady=10)\n\n # Train Schedule Table frame\n self.tableFrame = Frame(rootwin)\n self.tableFrame.pack(side=TOP)\n\nrootwin = Tk()\napp = trainSchedWindow2(rootwin)\nrootwin.mainloop()","sub_path":"Done/view_train_schedule2.py","file_name":"view_train_schedule2.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"499231962","text":"\"\"\"\n============\n3D animation\n============\n\nA simple example of an animated plot... In 3D!\n\"\"\"\nfrom primes import *\nfrom random import random\nfrom math import floor,pi,e\ntimestep = .002 #timestep in natural unit of time\nphasestep = .01\nnumphases = int(1./phasestep)\nnumts = 500 #number of timesteps\nnumTori = 6\ntdim = 8\n#thMt = [1.,1.] + [0.]*(tdim-2)\n#thMs = [2.] + [1.] + [0.]*(tdim-3) + [1.]\n#thMs = [0.]*tdim\n#thMt = [0.,0.] + [2.]*(tdim-3) + [0.]\nthMt = [0.]*tdim\nphases = [0]*tdim\nthMs = [0.]*(tdim-1) + [1.]\n#thMt = [int(round(random())) for i in range(tdim)]\noffsets = [0.]*tdim\noffsett = [0.]*tdim\n#offsets = [6,5,4,3,2,1,1,2,3,4,5,6]\n#offsett = [1,2,1] + [0.]*(tdim-3)\n#offsett = getNRandomPrimes(range(tdim),tdim)\nnat = (1,1)\n#speeds = [[(2,1),nat,(1,2)],[(1,2),nat,(2,1)]]\n#speeds = [[nat,nat,nat,nat,nat,nat,nat,(8,1)]]\n#speeds = [[(1,1.5),(1,1.5),nat],[(1,1.5),(1,1.5),(2,1)]]\n#speeds = [[(7,1),(5,1),(1,3)]]\n#speeds = [[(7,1),(1,4),(1,2)]]\n#speeds = [[(1,1)]*(tdim-3) + [(1,1),(2,1),(2,1)], [(1,1)]*(tdim-3) + [(1,1),(1,2),(1,2)]]\n#numTori = len(speeds)\n#offset = [int(round(random()))]*tdim\n#thM = [3.,2.,1.,0.,0.,1.,2.,3.]\n#thM = [0.]*tdim\n#thM = getPrimes(tdim)\n#offset = [0.,0.,0.,0.,0.,0.,0.,0.]\n#thM = [0.,0.,1.]\n#offset = [0.,2.,0.]\n#offset = getNRandomPrimes(range(tdim),tdim)\n#offset = [o-1 for o in offset]\n#thM = [1.,0.]\n#offset = [0.,1.]\n\n#proj = [[1,1] + [0]*(tdim-2) + [1]]*numTori\n#proj = [[1,1,1] + [0]*(tdim-3)]*numTori\nproj = [[1,1,1,0,0,0,0,0,0]]*numTori\nsection = [None,None,None,0.,0.,0.,0.]\nuseproj = True\n#useproj = False\nusesection = False\n#usesection = True\n\n\n#primes = getNRandomPrimes(range(6),numTori/2)\nprimes = getNRandomPrimes(range(numTori),numTori)\nstartind = int(floor(random()*(len(primes)-numTori)))\n#speeds = [[1/(1+offset1+th1M*i),1/(1+offset2+th2M*i)] for i in range(numTori/2)]\n#speeds += [[(1+offset1+th1M*i),(1+offset2+th2M*i)] for i in range(numTori/2)]\n#speeds = [[(1+offset1+th1M*i),(1+offset2+th2M*i)] for i in range(numTori)]\n#print speeds\nif 'speeds' not in locals():\n speeds = [[(1+offsets[j]+thMs[j]*i, 1+offsett[j]+thMt[j]*i) for j in range(tdim)] for i in range(numTori)]\ntols = [[(1/(2*pi))*timestep*(s/t) for s,t in sp] + [(1/(2*pi))*timestep] for sp in speeds]\n#speeds = [[float(p),1.] for p in primes]\n#speeds += [[1.,float(p)] for p in primes]\n#speeds = [[float(p),1.] for p in primes]\n#speeds = [[1/2.,1],[1/3.,1],[1/4.,1],[2,1],[3,1],[4,1]]\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d.axes3d as p3\nimport matplotlib.animation as animation\nfrom horntorus import *\n\ndef genTorusPath(t,ts,nts):\n #expects 2D horn torus\n path = np.empty((3,nts))\n for i in range(nts):\n x,y,z = t.timestepGetCart(ts)\n path[0][i] = x\n path[1][i] = y\n path[2][i] = z\n return path\n\ndef genNDTorusSectionPath(ndt,sec,tol,ts,nts):\n path = np.empty((3,nts))\n for i in range(nts):\n arr = ndt.timestepGetCart(ts,'section',sec,tol)\n if(not arr):\n path[0][i] = 0\n path[1][i] = 0\n path[2][i] = 0\n else:\n path[0][i] = arr[0]\n path[1][i] = arr[1]\n path[2][i] = arr[2]\n return path\n\ndef genNDTorusProjPath(ndt,proj,ts,nts):\n ndim = 0\n for p in proj:\n if(p is not 0):\n ndim += 1\n path = np.empty((ndim,nts))\n for i in range(nts):\n coords = ndt.timestepGetCart(ts,'proj',proj)\n for j in range(ndim):\n path[j][i] = coords[j]\n return path\n\n\ndef update_lines(num, dataLines, lines):\n for line, data in zip(lines, dataLines):\n # NOTE: there is no .set_data() for 3 dim data...\n '''\n if(num > 4):\n line.set_data(data[0:2, num-4:num])\n line.set_3d_properties(data[2, num-4:num])\n else:\n line.set_data(data[0:2, :num])\n line.set_3d_properties(data[2, :num])\n '''\n line.set_data(data[0:2, :num])\n line.set_3d_properties(data[2, :num])\n return lines\n\ndef next_line(num, dataLines, lines):\n for line, data in zip(lines, dataLines[num]):\n # NOTE: there is no .set_data() for 3 dim data...\n '''\n if(num > 4):\n line.set_data(data[0:2, num-4:num])\n line.set_3d_properties(data[2, num-4:num])\n else:\n line.set_data(data[0:2, :num])\n line.set_3d_properties(data[2, :num])\n '''\n line.set_data(data[0:2, :])\n line.set_3d_properties(data[2, :])\n return lines\n\ndef next_pcollection(num, dataPts, pcolls):\n for pcoll, data in zip(pcolls, dataPts[num]):\n import ipdb; ipdb.set_trace()\n pcoll._offsets3d(data[0:3,:])\n pcoll.set_color(data[3,:])\n return pcolls\n\ndef minandmax(data):\n '''returns minimum and maximum along each axis for\n list of arrays'''\n xmax = 0\n xmin = 2e32\n ymax = 0\n ymin = 2e32\n zmax = 0\n zmin = 2e32\n for arr in data:\n maxes = np.amax(arr,1)\n if(maxes[0] > xmax):\n xmax = maxes[0]\n if(maxes[1] > ymax):\n ymax = maxes[1]\n if(maxes[2] > zmax):\n zmax = maxes[2]\n mins = np.amin(arr,1)\n if(mins[0] < xmin):\n xmin = mins[0]\n if(mins[1] < ymin):\n ymin = mins[1]\n if(mins[2] < zmin):\n zmin = mins[2]\n return ([xmin,xmax],[ymin,ymax],[zmin,zmax])\n\n# Attaching 3D axis to the figure\nfig = plt.figure()\nax = p3.Axes3D(fig)\n\n# Fifty lines of random 3-D lines\n#data = [Gen_RandLine(25, 3) for index in range(50)]\n\n#first generate the tori\ntoruses = [NDHornTorus(tdim,phases=phases) for i in range(numTori)]\n#toruses = [HornTorus() for i in range(1)]\n#then set the speeds for each torus\n#speeds = [[th1M*i+1.,th2M*i+1.] for i in range(numTori)]\nfor i,torus in enumerate(toruses):\n torus.setSpeeds(speeds[i])\n#now generate their paths to be animated\n#data = [genTorusPath(torus,timestep,numts) for torus in toruses]\n\nfor step in range(numphases):\n newphases = [ph + step*phasestep*2.*pi for ph in phases]\n for i, torus in enumerate(toruses):\n torus.setAngles(newphases)\n if(useproj):\n if 'data' in locals():\n data += [[genNDTorusProjPath(torus,proj[i],timestep,numts) for i, torus in\n enumerate(toruses)]]\n else:\n data = [[genNDTorusProjPath(torus,proj[i],timestep,numts) for i, torus in\n enumerate(toruses)]]\n elif(usesection):\n if 'data' in locals():\n data += [[genNDTorusSectionPath(torus,section,tols[i],timestep,numts) for i, torus in\n enumerate(toruses)]]\n else:\n data = [[genNDTorusSectionPath(torus,section,tols[i],timestep,numts) for i, torus in\n enumerate(toruses)]]\n else:\n if 'data' in locals():\n data += [[genTorusPath(torus,timestep,numts) for torus in toruses]]\n else:\n data = [[genTorusPath(torus,timestep,numts) for torus in toruses]]\n# Creating fifty line objects.\n# NOTE: Can't pass empty arrays into 3d version of plot()\nneeds_color = False\nif(len(data[0][0]) > 3):\n needs_color = True\n maximum = np.amax(data)\n minimum = np.amin(data)\n lines = [ax.scatter3D(dat[0, :], dat[1, :], zs=dat[2,\\\n :],c=dat[3,:],vmin=minimum,vmax=maximum) for dat in data[0]]\nelse:\n lines = [ax.plot(dat[0, :], dat[1, :], dat[2,:])[0] for dat in data[0]]\n\n# Setting the axes properties\n'''\nxlim,ylim,zlim = minandmax(data[0])\nif(xlim[0]*-1 > xlim[1]):\n xlim = [xlim[0],-xlim[0]]\nelse:\n xlim = [-xlim[1],xlim[1]]\nif(ylim[0]*-1 > ylim[1]):\n ylim = [ylim[0],-ylim[0]]\nelse:\n ylim = [-ylim[1],ylim[1]]\nif(zlim[0]*-1 > zlim[1]):\n zlim = [zlim[0],-zlim[0]]\nelse:\n zlim = [-zlim[1],zlim[1]]\ncoord = max(zlim[1],xlim[1],ylim[1])\ncoords = [-coord,coord]\n'''\nmaximum = np.amax(data)\nminimum = np.amin(data)\nif(-minimum > maximum):\n coords = [minimum,-minimum]\nelse:\n coords = [-maximum,maximum]\n\n#ax.set_xlim3d([data,1/pi])\n#ax.set_xlim3d(xlim)\nax.set_xlim3d(coords)\nax.set_xlabel('X')\n\n#ax.set_ylim3d([-1/pi,1/pi])\n#ax.set_ylim3d(ylim)\nax.set_ylim3d(coords)\nax.set_ylabel('Y')\n\n#ax.set_zlim3d([-1/(2*pi), 1/(2*pi)])\n#ax.set_zlim3d([-1/(pi), 1/(pi)])\n#ax.set_zlim3d(zlim)\nax.set_zlim3d(coords)\nax.set_zlabel('Z')\n\nax.set_title('3D Test')\nax.legend\n\n# Creating the Animation object\n#line_ani = animation.FuncAnimation(fig, update_lines, numts, fargs=(data[0], lines),\n# interval=30, blit=False)\nif(needs_color):\n line_ani = animation.FuncAnimation(fig, next_pcollection, numphases, fargs=(data, lines),\n interval=10, blit=False)\nelse:\n line_ani = animation.FuncAnimation(fig, next_line, numphases, fargs=(data, lines),\n interval=10, blit=False)\n\n#line_ani.save('horntori_{0}_{1}s_{2}t'.format(numTori,thMs,thMt),fps=30,extra_args=['-vcodec','libx264'])\nplt.show()\n","sub_path":"ex_animator.py","file_name":"ex_animator.py","file_ext":"py","file_size_in_byte":8931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"284366924","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nimport torch\nimport torch.nn as nn\nimport torch.autograd as autograd\n\nfrom torch.optim import Adam, SGD, LBFGS\nfrom torch.utils.data import DataLoader\n\nfrom models import FNN1d, FNN2d\nfrom utils import PDELoss, zero_grad\n\n\ndef predict(model, x, t, nu):\n '''\n Params:\n - model: model\n - xt: (N, 2) tensor\n Return: \n - u: (N, 1) tensor\n - residual: (N, 1) tensor\n '''\n model.eval()\n\n x.requires_grad = True\n t.requires_grad = True\n\n u = model(torch.cat([x, t], dim=1))\n\n grad_x, grad_t = autograd.grad(outputs=u.sum(), inputs=[x, t],\n create_graph=True)\n gradgrad_x, = autograd.grad(outputs=grad_x.sum(), inputs=[x])\n\n residual = grad_t + u * grad_x - nu * gradgrad_x\n return u.detach(), residual.detach()\n\n\ndef train(model, X_u, u, X_f,\n nu=1.0, num_epoch=100,\n device=torch.device('cpu'), optim='LBFGS'):\n model.to(device)\n model.train()\n optimizer = LBFGS(model.parameters(),\n lr=1.0,\n max_iter=50000,\n max_eval=50000,\n history_size=50,\n tolerance_grad=1e-5,\n tolerance_change=1.0 * np.finfo(float).eps,\n line_search_fn=\"strong_wolfe\")\n mse = nn.MSELoss()\n # training stage\n xts = torch.from_numpy(X_u).float().to(device)\n us = torch.from_numpy(u).float().to(device)\n\n xs = torch.from_numpy(X_f[:, 0:1]).float().to(device)\n ts = torch.from_numpy(X_f[:, 1:2]).float().to(device)\n xs.requires_grad = True\n ts.requires_grad = True\n iter = 0\n\n def loss_closure():\n nonlocal iter\n iter = iter + 1\n\n optimizer.zero_grad()\n\n zero_grad(xs)\n zero_grad(ts)\n # print(xs.grad)\n # MSE loss of prediction error\n pred_u = model(xts)\n mse_u = mse(pred_u, us)\n\n # MSE loss of PDE constraint\n f = PDELoss(model, xs, ts, nu)\n\n mse_f = torch.mean(f ** 2)\n loss = mse_u + mse_f\n loss.backward()\n\n if iter % 200 == 0:\n print('Iter: {}, total loss: {}, mse_u: {}, mse_f: {}'.\n format(iter, loss.item(), mse_u.item(), mse_f.item()))\n return loss\n\n optimizer.step(loss_closure)\n\n return model\n\n\nif __name__ == '__main__':\n\n # # net = FNN1d(modes=16, width=64)\n net = FNN2d(modes1=16, modes2=16, width=64)\n # # net2 = FNNd(modes1=16, modes2=16, width=64)\n data = torch.randn((1, 100, 256, 2))\n pred = net(data)\n print(pred.shape)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"393887126","text":"from dataclasses import dataclass, field\nfrom typing import Any, Union\n\nimport numpy as np\n\nfrom napari.layers.base._slice import _next_request_id\nfrom napari.layers.utils._slice_input import _SliceInput\n\n\n@dataclass(frozen=True)\nclass _VectorSliceResponse:\n \"\"\"Contains all the output data of slicing an Vectors layer.\n\n Attributes\n ----------\n indices : array like\n Indices of the sliced Vectors data.\n alphas : array like or scalar\n Used to change the opacity of the sliced vectors for visualization.\n Should be broadcastable to indices.\n dims : _SliceInput\n Describes the slicing plane or bounding box in the layer's dimensions.\n request_id : int\n The identifier of the request from which this was generated.\n \"\"\"\n\n indices: np.ndarray = field(repr=False)\n alphas: Union[np.ndarray, float] = field(repr=False)\n dims: _SliceInput\n request_id: int\n\n\n@dataclass(frozen=True)\nclass _VectorSliceRequest:\n \"\"\"A callable that stores all the input data needed to slice a Vectors layer.\n\n This should be treated a deeply immutable structure, even though some\n fields can be modified in place. It is like a function that has captured\n all its inputs already.\n\n In general, the calling an instance of this may take a long time, so you may\n want to run it off the main thread.\n\n Attributes\n ----------\n dims : _SliceInput\n Describes the slicing plane or bounding box in the layer's dimensions.\n data : Any\n The layer's data field, which is the main input to slicing.\n dims_indices : tuple of ints or slices\n The slice indices in the layer's data space.\n others\n See the corresponding attributes in `Layer` and `Vectors`.\n \"\"\"\n\n dims: _SliceInput\n data: Any = field(repr=False)\n dims_indices: Any = field(repr=False)\n length: float = field(repr=False)\n out_of_slice_display: bool = field(repr=False)\n id: int = field(default_factory=_next_request_id)\n\n def __call__(self) -> _VectorSliceResponse:\n # Return early if no data\n if len(self.data) == 0:\n return _VectorSliceResponse(\n indices=np.empty(0, dtype=int),\n alphas=np.empty(0),\n dims=self.dims,\n request_id=self.id,\n )\n\n not_disp = list(self.dims.not_displayed)\n if not not_disp:\n # If we want to display everything, then use all indices.\n # alpha is only impacted by not displayed data, therefore 1\n return _VectorSliceResponse(\n indices=np.arange(len(self.data), dtype=int),\n alphas=1,\n dims=self.dims,\n request_id=self.id,\n )\n\n # We want a numpy array so we can use fancy indexing with the non-displayed\n # indices, but as self.dims_indices can (and often/always does) contain slice\n # objects, the array has dtype=object which is then very slow for the\n # arithmetic below. As Vectors._round_index is always False, we can safely\n # convert to float to get a major performance improvement.\n not_disp_indices = np.array(self.dims_indices)[not_disp].astype(float)\n\n if self.out_of_slice_display and self.dims.ndim > 2:\n slice_indices, alphas = self._get_out_of_display_slice_data(\n not_disp, not_disp_indices\n )\n else:\n slice_indices, alphas = self._get_slice_data(\n not_disp, not_disp_indices\n )\n\n return _VectorSliceResponse(\n indices=slice_indices,\n alphas=alphas,\n dims=self.dims,\n request_id=self.id,\n )\n\n def _get_out_of_display_slice_data(self, not_disp, not_disp_indices):\n \"\"\"This method slices in the out-of-display case.\"\"\"\n data = self.data[:, 0, not_disp]\n distances = abs(data - not_disp_indices)\n # get the scaled projected vectors\n projected_lengths = abs(self.data[:, 1, not_disp] * self.length)\n # find where the distance to plane is less than the scaled vector\n matches = np.all(distances <= projected_lengths, axis=1)\n alpha_match = projected_lengths[matches]\n alpha_match[alpha_match == 0] = 1\n alpha_per_dim = (alpha_match - distances[matches]) / alpha_match\n alpha_per_dim[alpha_match == 0] = 1\n alpha = np.prod(alpha_per_dim, axis=1).astype(float)\n slice_indices = np.where(matches)[0].astype(int)\n return slice_indices, alpha\n\n def _get_slice_data(self, not_disp, not_disp_indices):\n \"\"\"This method slices in the simpler case.\"\"\"\n data = self.data[:, 0, not_disp]\n distances = np.abs(data - not_disp_indices)\n matches = np.all(distances <= 0.5, axis=1)\n slice_indices = np.where(matches)[0].astype(int)\n return slice_indices, 1\n","sub_path":"napari/layers/vectors/_slice.py","file_name":"_slice.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"156931284","text":"# 6.0001/6.00 Problem Set 5 - RSS Feed Filter\n# Name:\n# Collaborators:\n# Time:\n\nimport feedparser\nimport string\nimport time\nimport threading\nfrom project_util import translate_html\nfrom mtTkinter import *\nfrom datetime import datetime\nimport pytz\n\n\n#-----------------------------------------------------------------------\n\n#======================\n# Code for retrieving and parsing\n# Google and Yahoo News feeds\n# Do not change this code\n#======================\n\ndef process(url):\n \"\"\"\n Fetches news items from the rss url and parses them.\n Returns a list of NewsStory-s.\n \"\"\"\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n for entry in entries:\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n description = translate_html(entry.description)\n pubdate = translate_html(entry.published)\n\n try:\n pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %Z\")\n pubdate.replace(tzinfo=pytz.timezone(\"GMT\"))\n # pubdate = pubdate.astimezone(pytz.timezone('EST'))\n # pubdate.replace(tzinfo=None)\n except ValueError:\n pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %z\")\n\n newsStory = NewsStory(guid, title, description, link, pubdate)\n ret.append(newsStory)\n return ret\n\n#======================\n# Data structure design\n#======================\n\n# Problem 1\n\nclass NewsStory():\n def __init__(self, guid, title, description, link, pubdate):\n self._guid = guid\n self._title = title\n self._description = description\n self._link = link\n self._pubdate = pubdate\n def get_guid(self):\n return self._guid\n def get_title(self):\n return self._title\n def get_description(self):\n return self._description\n def get_link(self):\n return self._link\n def get_pubdate(self):\n return self._pubdate\n\n#======================\n# Triggers\n#======================\n\nclass Trigger(object):\n def evaluate(self, story):\n \"\"\"\n Returns True if an alert should be generated\n for the given news item, or False otherwise.\n \"\"\"\n # DO NOT CHANGE THIS!\n raise NotImplementedError\n\n# PHRASE TRIGGERS\n\n# Problem 2\n\nclass PhraseTrigger(Trigger):\n def __init__(self, phrase):\n self._words = tuple(phrase.lower().split(' '))\n def evaluate_text(self, text):\n text = text.lower()\n # Replace all punctuation with space.\n for p in string.punctuation:\n text = text.replace(p, ' ')\n words = [w for w in text.split(' ') if w]\n for i, w in enumerate(words):\n if w == self._words[0]:\n return tuple(words[i: i + len(self._words)]) == self._words\n return False\n# Problem 3\n\nclass TitleTrigger(PhraseTrigger):\n def evaluate(self, story):\n return self.evaluate_text(story.get_title())\n\n# Problem 4\n\nclass DescriptionTrigger(PhraseTrigger):\n def evaluate(self, story):\n return self.evaluate_text(story.get_description())\n\n# TIME TRIGGERS\n\n# Problem 5\n# Constructor:\n# Input: Time has to be in EST and in the format of \"%d %b %Y %H:%M:%S\".\n# Convert time from string to a datetime before saving it as an attribute.\n\nclass TimeTrigger(Trigger):\n def __init__(self, time_string):\n self._time = datetime.strptime(time_string, '%d %b %Y %H:%M:%S')\n self._time = self._time.replace(tzinfo=pytz.UTC)\n\n# Problem 6\n# TODO: BeforeTrigger and AfterTrigger\n\nclass BeforeTrigger(TimeTrigger):\n def evaluate(self, story):\n pubtime = story.get_pubdate()\n pubtime = pubtime.replace(tzinfo=pytz.UTC)\n return pubtime < self._time\n \nclass AfterTrigger(TimeTrigger):\n def evaluate(self, story):\n pubtime = story.get_pubdate()\n pubtime = pubtime.replace(tzinfo=pytz.UTC)\n return pubtime >= self._time\n\n# COMPOSITE TRIGGERS\n\n# Problem 7\nclass NotTrigger(Trigger):\n def __init__(self, trigger):\n self._trigger = trigger\n def evaluate(self, story):\n return not self._trigger.evaluate(story)\n\n# Problem 8\nclass AndTrigger(Trigger):\n def __init__(self, left_trigger, right_trigger):\n self._left_trigger = left_trigger\n self._right_trigger = right_trigger\n def evaluate(self, story):\n return self._left_trigger.evaluate(story) \\\n and self._right_trigger.evaluate(story)\n\n# Problem 9\nclass OrTrigger(Trigger):\n def __init__(self, left_trigger, right_trigger):\n self._left_trigger = left_trigger\n self._right_trigger = right_trigger\n def evaluate(self, story):\n return self._left_trigger.evaluate(story) \\\n or self._right_trigger.evaluate(story)\n\n\n#======================\n# Filtering\n#======================\n\n# Problem 10\ndef filter_stories(stories, triggerlist):\n \"\"\"\n Takes in a list of NewsStory instances.\n\n Returns: a list of only the stories for which a trigger in triggerlist fires.\n \"\"\"\n return [s for s in stories\n if any([t.evaluate(s) for t in triggerlist])]\n\n\n\n#======================\n# User-Specified Triggers\n#======================\n# Problem 11\ndef read_trigger_config(filename):\n \"\"\"\n filename: the name of a trigger configuration file\n\n Returns: a list of trigger objects specified by the trigger configuration\n file.\n \"\"\"\n # We give you the code to read in the file and eliminate blank lines and\n # comments. You don't need to know how it works for now!\n trigger_file = open(filename, 'r')\n lines = []\n for line in trigger_file:\n line = line.rstrip()\n if not (len(line) == 0 or line.startswith('//')):\n lines.append(line)\n\n # line is the list of lines that you need to parse and for which you need\n # to build triggers\n\n triggers = {}\n trigger_class_map = {\n 'TITLE': TitleTrigger, \n 'DESCRIPTION': DescriptionTrigger, \n 'AFTER': AfterTrigger,\n 'BEFORE': BeforeTrigger,\n 'NOT': NotTrigger,\n 'AND': AndTrigger,\n 'OR': OrTrigger,\n }\n two_params_trigger = set([\n 'AND', 'OR', \n ])\n triggerlist = []\n def get_trigger(inputs):\n trigger_class = trigger_class_map[inputs[0]]\n if inputs[0] in two_params_trigger:\n return trigger_class(triggers[inputs[1]],\n triggers[inputs[2]])\n return trigger_class(inputs[1])\n\n for i, line in enumerate(lines):\n inputs = line.split(',')\n if inputs[0] == 'ADD':\n triggerlist.extend([triggers[n] for n in inputs[1:]])\n continue\n triggers[inputs[0]] = get_trigger(inputs[1:])\n return triggerlist\n\n\nSLEEPTIME = 120 #seconds -- how often we poll\n\ndef main_thread(master):\n # A sample trigger list - you might need to change the phrases to correspond\n # to what is currently in the news\n try:\n t1 = TitleTrigger(\"election\")\n t2 = DescriptionTrigger(\"Trump\")\n t3 = DescriptionTrigger(\"Clinton\")\n t4 = AndTrigger(t2, t3)\n t5 = DescriptionTrigger(\"car\")\n triggerlist = [t1, t4, t5]\n\n # Problem 11\n # TODO: After implementing read_trigger_config, uncomment this line \n triggerlist = read_trigger_config('triggers.txt')\n \n # HELPER CODE - you don't need to understand this!\n # Draws the popup window that displays the filtered stories\n # Retrieves and filters the stories from the RSS feeds\n frame = Frame(master)\n frame.pack(side=BOTTOM)\n scrollbar = Scrollbar(master)\n scrollbar.pack(side=RIGHT,fill=Y)\n\n t = \"Google & Yahoo Top News\"\n title = StringVar()\n title.set(t)\n ttl = Label(master, textvariable=title, font=(\"Helvetica\", 18))\n ttl.pack(side=TOP)\n cont = Text(master, font=(\"Helvetica\",14), yscrollcommand=scrollbar.set)\n cont.pack(side=BOTTOM)\n cont.tag_config(\"title\", justify='center')\n button = Button(frame, text=\"Exit\", command=root.destroy)\n button.pack(side=BOTTOM)\n guidShown = []\n def get_cont(newstory):\n if newstory.get_guid() not in guidShown:\n cont.insert(END, newstory.get_title()+\"\\n\", \"title\")\n cont.insert(END, \"\\n---------------------------------------------------------------\\n\", \"title\")\n cont.insert(END, newstory.get_description())\n cont.insert(END, \"\\n*********************************************************************\\n\", \"title\")\n guidShown.append(newstory.get_guid())\n\n while True:\n\n print(\"Polling . . .\", end=' ')\n # Get stories from Google's Top Stories RSS news feed\n stories = process(\"http://news.google.com/news?output=rss\")\n\n # Get stories from Yahoo's Top Stories RSS news feed\n stories.extend(process(\"http://news.yahoo.com/rss/topstories\"))\n\n stories = filter_stories(stories, triggerlist)\n\n list(map(get_cont, stories))\n scrollbar.config(command=cont.yview)\n\n\n print(\"Sleeping...\")\n time.sleep(SLEEPTIME)\n\n except Exception as e:\n import traceback\n traceback.print_exc()\n print(e)\n\n\nif __name__ == '__main__':\n root = Tk()\n root.title(\"Some RSS parser\")\n t = threading.Thread(target=main_thread, args=(root,))\n t.start()\n root.mainloop()\n\n","sub_path":"ps5/ps5.py","file_name":"ps5.py","file_ext":"py","file_size_in_byte":9494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"339230701","text":"\nfrom kde import KDE\nfrom scipy.stats import ks_2samp\nfrom matplotlib import pyplot as plt\n\n\nfrom IPython.terminal.embed import InteractiveShellEmbed\nipshell = InteractiveShellEmbed()\n\nclass KSTest:\n\n\tdef compare(self, dist_one, dist_two, plot_path=None, \n\t\t\tdist_one_label=\"Dist_One\", dist_two_label=\"Dist_Two\", resolution=50):\n\n\t\t\"\"\"\n\t\tPerforms KS-test\n\n\t\tParameters:\n\t\t-----------\n\n\t\tdist_one : pandas DataFrame \n\n\t\tdist_two : pandas DataFrame\n\n\t\tplot_path : String \n\n\t\tdist_one_label : String \n\n\t\tdist_two_label : String \n\n\t\tresolution : int \n\n\t\tReturns:\n\t\t--------\n\n\t\tks_statistic\n\n\t\tp_value \n\n\t\t\"\"\"\n\n\t\tmin_lat = min(dist_one['lat'].min(), dist_two['lat'].min())\n\t\tmax_lat = max(dist_one['lat'].max(), dist_two['lat'].max())\n\n\t\tmin_lng = min(dist_one['lng'].min(), dist_two['lng'].min())\n\t\tmax_lng = max(dist_one['lng'].max(), dist_two['lng'].max())\n\n\t\ttest_bl = (min_lat, min_lng)\n\t\ttest_tr = (max_lat, max_lng)\n\n\t\ty_one = KDE().get_heatmap(dist_one, test_resolution=resolution, test_bl=test_bl, test_tr=test_tr)[:,2]\n\t\t\n\t\ty_two = KDE().get_heatmap(dist_two, test_resolution=resolution, test_bl=test_bl, test_tr=test_tr)[:,2]\n\n\t\tks_statistic, pvalue = ks_2samp(y_two, y_one)\n\n\t\t# ipshell()\n\n\t\tif plot_path is not None:\n\n\t\t\tax = plt.subplot(211, title=\"Number of %s: %s\\nNumber of %s: %s\\nKS-Statistic: %s\\nP-Value: %s \"\\\n\t\t\t\t %(dist_one_label, len(dist_one), dist_two_label, len(dist_two), ks_statistic, pvalue))\n\n\t\t\tplt.pcolor(y_one.reshape(resolution,resolution))\n\n\t\t\tax = plt.subplot(212, title=\"%s(above) %s(below)\" %(dist_one_label, dist_two_label))\n\n\t\t\tplt.pcolor(y_two.reshape(resolution,resolution))\n\n\t\t\tplt.tight_layout()\n\n\t\t\tplt.savefig(str(plot_path))\n\n\t\treturn ks_statistic, pvalue\n\t\t\n","sub_path":"analysis/kstest.py","file_name":"kstest.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"165052961","text":"from rest_framework import serializers\n\nfrom my_apps.monitor_manager.models import MonitorTask, MonitorItem, MonitorResult\n\n\nclass MonitorResultSerializer(serializers.ModelSerializer):\n id = serializers.UUIDField(read_only=True)\n\n class Meta:\n model = MonitorResult\n fields = ('id',\n 'monitor_item',\n 'time',\n 'value')\n\n\nclass MonitorItemSerializer(serializers.ModelSerializer):\n id = serializers.UUIDField(read_only=True)\n monitor_results = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n\n class Meta:\n model = MonitorItem\n fields = ('id',\n 'name',\n 'monitor_task',\n 'host_ip',\n 'host_user',\n 'host_passwd',\n 'host_port',\n 'command',\n 'interval_time',\n 'is_running',\n 'show_result_pattern',\n 'monitor_results')\n\n\nclass MonitorTaskSerializer(serializers.ModelSerializer):\n id = serializers.UUIDField(read_only=True)\n monitor_items = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n\n class Meta:\n model = MonitorTask\n fields = ('id',\n 'name',\n 'create_time',\n 'owner',\n 'related_users',\n 'project',\n 'is_send_email',\n 'email_receivers',\n 'is_running',\n 'monitor_items')\n","sub_path":"python/work_helper/my_apps/monitor_manager/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"511429520","text":"\ndef convert(num, b):\n \"\"\"Recursive function that returns a string representing num in the base b\"\"\"\n if num == 0:\n return str(num)\n if num % b >= 10:\n newnum = chr(num % b + 55)\n else:\n newnum = num % b\n if num//b != 0:\n return '{}{}'.format(convert(num//b,b), newnum)\n else:\n return newnum\n","sub_path":"project1/base_convert.py","file_name":"base_convert.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"229573681","text":"#!/bin/python3\n\n# parse.py\n# Parsing file content into a usable form\n\nimport re\n\nsymbol_table = {\"exp\": {}, \"var\": {}, \"eval\": []}\n\nassignment_pat = \"let (.*) = (.*)\"\nassignment_exp = re.compile(assignment_pat)\neval_pat = \"eval (.*)\"\neval_exp = re.compile(eval_pat)\nop_pat = r\"\\\\(OR|AND|NOT|IMP|EQU)(.*)\"\nop_exp = re.compile(op_pat)\n\n\n# Replaces expression variables with the expanded expression\n# The language requires that things are defined prior to usage\ndef clean_args(string):\n match = op_exp.match(string)\n if match:\n if match.groups()[0] == \"NOT\":\n arg = clean_args(match.groups()[1][1:-1])\n return \"\\\\NOT{{{0}}}\".format(arg)\n else:\n op = match.groups()[0]\n remaining = match.groups()[1]\n start_arg_1 = 1\n end_arg_1 = count_block(remaining) - 1\n start_arg_2 = end_arg_1 + 1\n end_arg_2 = count_block(remaining[start_arg_2:]) + start_arg_2\n arg1 = clean_args(remaining[start_arg_1:end_arg_1])\n arg2 = clean_args(remaining[start_arg_2+1:end_arg_2-1])\n return \"\\\\{0}{{{1}}}{{{2}}}\".format(op, arg1, arg2)\n else:\n if string in symbol_table['exp']:\n return symbol_table['exp'][string]\n elif string in symbol_table['var']:\n return string\n else:\n print(\"Undefined Symbol:\", string)\n return string\n\n\n# Loads the contents of the file\n# Produces the symbol table for all the variables and expression\ndef load_file(fname):\n line_number = 0\n with open(fname, \"r\") as f:\n for l in f:\n line_number += 1\n if len(l) == 0 or l[0] == \"#\" or l[0] == '\\n':\n continue\n if \"let\" in l:\n match = assignment_exp.match(l)\n if not match:\n print(\"Assignment Error, line {0}: {1}\"\n .format(line_number, l))\n break\n else:\n if match.groups()[1].lower() == \"true\" \\\n or match.groups()[1].lower() == \"false\":\n # Variable\n symbol_table['var'][match.groups()[0]] = \\\n match.groups()[1].lower() == \"true\"\n else:\n args = clean_args(match.groups()[1])\n symbol_table['exp'][match.groups()[0]] = args\n elif \"eval\" in l:\n match = eval_exp.match(l)\n if match:\n symbol_table['eval'].append(match.groups()[0])\n return symbol_table\n\n\n# Finds distance to closing block\ndef count_block(string):\n count = 0\n stack = 0\n for c in string:\n if c == \"{\":\n stack += 1\n elif c == \"}\":\n stack -= 1\n count += 1\n if stack == 0:\n break\n return count\n\n\n# Converts the implications to an OR(NOT(A), B)\ndef break_impl(string):\n if \"\\IMP\" not in string:\n return string\n starting = string.find(\"\\IMP\") + len(\"\\IMP\")\n start_2 = starting + count_block(string[starting:-1])\n end = start_2 + 1 + count_block(string[start_2:-1])\n arg_1 = string[starting:start_2]\n arg_2 = string[start_2:start_2 + 1 + count_block(string[start_2:-1])]\n old = string[starting - len(\"\\IMP\"):end]\n new = r\"\\OR{\\NOT\"+arg_1 + \"}\" + arg_2\n string = string.replace(old, new)\n string = break_impl(string)\n return string\n\n\n# Converts the equalities\ndef break_equ(string):\n if \"\\EQU\" not in string:\n return string\n starting = string.find(\"\\EQU\") + len(\"\\EQU\")\n start_2 = starting + count_block(string[starting:-1])\n end = start_2 + 1 + count_block(string[start_2:-1])\n arg_1 = string[starting:start_2]\n arg_2 = string[start_2:start_2+1+count_block(string[start_2:-1])]\n old = string[starting-len(\"\\EQU\"):end]\n new = r\"\\AND{\\IMP\"+arg_1+\"\"+arg_2+\"}{\\IMP\"+arg_2 + arg_1+\"}\"\n string = string.replace(old, new)\n string = break_equ(string)\n return string\n\n\n# Performs all the conversions as necessary\ndef fix(string):\n return break_impl(break_equ(string))\n\n\n# For testing purposes!\ndef main():\n a = r\"\\AND{\\OR{A}{B}}{\\OR{\\NOT{C}}{D}}\"\n b = r\"\\IMP{A}{B}\"\n c = r\"\\IMP{\\OR{A}{B}}{\\OR{\\IMP{C}{D}}{E}}\"\n d = r\"\\AND{\\IMP{A}{B}}{C}\"\n e = r\"\\EQU{A}{B}\"\n\n print(a, \"\\t=>\\t\", fix(a))\n print(b, \"\\t=>\\t\", fix(b))\n print(c, \"\\t=>\\t\", fix(c))\n print(d, \"\\t=>\\t\", fix(d))\n print(e, \"\\t=>\\t\", fix(e))\n\n load_file(\"file.logi\")\n print(symbol_table)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"assignment_3/part2/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"599822102","text":"import glob\nimport platform\nimport shutil\nfrom subprocess import call\n\nPLATFORM_WINDOWS = 'windows'\n\nis_windows = platform.platform().lower().startswith(PLATFORM_WINDOWS)\n\n\ndef os_call(cmd_win, cmd_other=None):\n \"\"\"\n Switch commands based on OS\n :param cmd_win: The command to run if windows is detected\n :param cmd_other: The command to run otherwise\n :return: The return code\n \"\"\"\n cmd_run = cmd_win if cmd_other is None or is_windows else cmd_other\n assert cmd_run is not None\n\n cmd_list = cmd_run\n if isinstance(cmd_run, str):\n cmd_list = cmd_run.split(' ')\n assert isinstance(cmd_list, list)\n\n return call(cmd_list)\n\n\ndef main():\n # Clean up ENV and .egg-info folders\n shutil.rmtree('ENV', ignore_errors=True)\n for p in glob.glob('src/*.egg-info'):\n shutil.rmtree(p, ignore_errors=True)\n\n os_call('python3 -m venv ENV')\n\n os_call(\n cmd_win='ENV\\\\Scripts\\\\pip install -e .',\n cmd_other='ENV/bin/pip install -e .'\n )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/setup-venv.py","file_name":"setup-venv.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"254330774","text":"import numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tensorflow as tf\nimport pathlib\nfrom collections import defaultdict\nfrom matplotlib import pyplot as plt\nimport cv2\nimport imutils\nimport time\nfrom sklearn.metrics import pairwise\nfrom imutils.video import FPS\nimport copy\n\n\nfrom utils import ops as utils_ops\nfrom utils import label_map_util\n\n\n\nutils_ops.tf = tf.compat.v1\ntf.gfile = tf.io.gfile\nPATH_TO_LABELS = '../bigdata/data/mscoco_label_map.pbtxt'\ncategory_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)\n\n\n\n\nmodel_name = 'ssdlite_mobilenet_v2_coco_2018_05_09'\nmodel_dir = \"../bigdata/models/\" + model_name + \"/saved_model\"\ndetection_model = tf.saved_model.load(str(model_dir))\ndetection_model = detection_model.signatures['serving_default']\n\n\n\n# print(category_index)\ncolors = np.random.uniform(0, 255, size=(len(category_index), 3))\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\nprint(detection_model.inputs)\nprint(detection_model.output_dtypes)\nprint(detection_model.output_shapes)\n\n\n\n\n\n# original\n# startRedLower = (0 , 180 , 50)\n# startRedUpper = (15 , 255, 255)\n# endRedLower = (165 , 180 , 50)\n# endRedUpper = (180 , 255 , 255)\n\n# a.mp4 659(changed from above to this) b.mp4(147) not-working \n# startRedLower = (0 , 130 , 50)\n# startRedUpper = (15 , 255, 255)\n# endRedLower = (165 , 130 , 50)\n# endRedUpper = (180 , 255 , 255)\n\n# b.mp4 147 (changed from above to this)\n# startRedLower = (0 , 130 , 50)\n# startRedUpper = (13 , 255, 255)\n# endRedLower = (167 , 130 , 50)\n# endRedUpper = (180 , 255 , 255)\n\n# d.mp4 164(changed from above to this)\n# startRedLower = (0 , 130 , 50)\n# startRedUpper = (13 , 255, 255)\n# endRedLower = (150 , 130 , 50)\n# endRedUpper = (180 , 255 , 255)\n\n\n \n\n\n\n\nstartRedLower = (0 , 130 , 50)\nstartRedUpper = (13 , 255, 255)\nendRedLower = (150 , 130 , 50)\nendRedUpper = (180 , 255 , 255)\nsignalCounter = -99999\nflagSignal = [0] * 10\ndef signalDetection(image_np , indexesLights , boxesLights):\n global signalCounter , flagSignal\n maskRed = np.zeros_like(image_np)\n fr = copy.deepcopy(image_np)\n trafficLights = []\n areas = []\n boxes = []\n for j in indexesLights:\n i = j[0]\n x, y, w, h = boxesLights[i]\n label = (w * h)\n if label < 450:\n label = \"less\"\n else:\n cv2.rectangle(image_np, (x, y), (x + w, y + h), (255,255,0), 2)\n cv2.putText(image_np, str(label), (x, y - 5), font, 3, (255,255,0), 2)\n trafficLights.append([x , y , w , h , str(label)])\n crop = image_np[y:y+h , x:x+w , :]\n maskRed[y:y+h , x:x+w , :] = crop\n color = colors[i]\n cv2.rectangle(fr, (x, y), (x + w, y + h), (255,255,0), 2)\n cv2.putText(fr, str(label), (x, y - 5), font, 3, (255,255,0), 2)\n\n cv2.imshow(\"light boxes\" , fr)\n cv2.imshow(\"crop\",maskRed)\n \n blurred = cv2.GaussianBlur(maskRed, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n mask1 = cv2.inRange(hsv, startRedLower, startRedUpper)\n mask2 = cv2.inRange(hsv, endRedLower, endRedUpper)\n maskRed = mask1 + mask2\n maskRed = cv2.erode(maskRed, None, iterations=2)\n maskRed = cv2.dilate(maskRed, None, iterations=2)\n\n (_, contours , hierarchy) = cv2.findContours(maskRed.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n hull = []\n redcircles = []\n flagSignal.pop(0) \n flag = 0\n for i in range(len(contours)):\n chull = cv2.convexHull(contours[i], False)\n extreme_top = tuple(chull[chull[:, :, 1].argmin()][0])\n extreme_bottom = tuple(chull[chull[:, :, 1].argmax()][0])\n extreme_left = tuple(chull[chull[:, :, 0].argmin()][0])\n extreme_right = tuple(chull[chull[:, :, 0].argmax()][0])\n cX = int((extreme_left[0] + extreme_right[0]) / 2)\n cY = int((extreme_top[1] + extreme_bottom[1]) / 2)\n distance = pairwise.euclidean_distances([(cX, cY)], Y=[extreme_left, extreme_right, extreme_top, extreme_bottom])[0]\n radius = int(distance[distance.argmax()])\n if radius >= 4:\n hull.append(chull)\n redcircles.append([radius , cX , cY]) \n flag = 1\n if flag == 1:\n flagSignal.append(1)\n else:\n flagSignal.append(0)\n if sum(flagSignal) > 5:\n cv2.putText(image_np, \"Hey !! traffic signal is red\", (30,30), font, 1.2, (0,255,255), 2,cv2.LINE_AA)\n signalCounter = 8\n else:\n signalCounter = signalCounter - 1\n if -20 < signalCounter <= 0:\n cv2.putText(image_np, \"You can move now\", (30,30), font, 1.2, (0,255,255), 2,cv2.LINE_AA)\n\n # print(len(hull))\n\n # draw contours and hull points\n for i in range(len(hull)):\n color_contours = (0, 255, 0) # green - color for contours\n color_hull = (0, 255, 255) # blue - color for convex hull\n # draw ith contour\n cv2.putText(image_np, str(redcircles[i][0]), (redcircles[i][1] - 5, redcircles[i][2] - 5), font, 2, (255,255,255), 2)\n cv2.drawContours(image_np, contours, i, color_contours, 1, 8, hierarchy)\n cv2.drawContours(image_np, hull, i, color_hull, 2, 8) \n return image_np\n\n\ndef visualize(output_dict,image_np,height,width):\n class_ids , confidences , boxes = [] , [] , []\n boxesLights , confidencesLights = [] , []\n num = output_dict['num_detections']\n for ind in range(num):\n scr = output_dict['detection_scores'][ind]\n classId = output_dict['detection_classes'][ind] \n box = output_dict['detection_boxes'][ind]\n if classId ==10:\n ymin, xmin, ymax, xmax = box\n confidencesLights.append(float(scr))\n boxesLights.append([int(xmin*width) , int(ymin*height) , int((xmax-xmin)*width) , int((ymax-ymin)*height)])\n elif classId==2 or classId==3 or classId==4 or classId==6 or classId==8:\n pass\n\n indexesLights = cv2.dnn.NMSBoxes(boxesLights, confidencesLights, 0.5, 0.4)\n maskRed = signalDetection(image_np , indexesLights , boxesLights)\n return maskRed\n\n\n\ndef show_inference(model, image_path,ctt):\n image_np = np.array(image_path)\n height,width,channel = image_np.shape\n input_tensor = tf.convert_to_tensor(image_np)\n input_tensor = input_tensor[tf.newaxis,...]\n\n # output_dict is a dict with keys detection_classes , num_detections , detection_boxes(4 coordinates of each box) , detection_scores for 100 boxes\n output_dict = model(input_tensor)\n\n # num_detections gives number of objects in current frame\n num_detections = int(output_dict.pop('num_detections'))\n # output_dict is a dict with keys detection_classes , detection_boxes(4 coordinates of each box) , detection_scores for num_detections boxes\n output_dict = {key:value[0, :num_detections].numpy() \n for key,value in output_dict.items()}\n # adding num_detections that was earlier popped out\n output_dict['num_detections'] = num_detections\n # converting all values in detection_classes as ints.\n output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n\n maskRed = visualize(output_dict,image_np,height,width)\n\n # cv2.imshow(\"traffic light\", image_np)\n cv2.imshow(\"red\",maskRed)\n\n\n\n\n# cap=cv2.VideoCapture(0)\ncap=cv2.VideoCapture('../videos/i.mp4')\ncap.set(1,537*24)\n\n# fourcc = cv2.VideoWriter_fourcc(*'XVID')\n# out1 = cv2.VideoWriter('i.avi', fourcc, 3.0, (int(cap.get(3)),int(cap.get(4))))\n\nfps = FPS().start()\n\nctt = 0\nwhile True:\n (grabbed, frame) = cap.read()\n if grabbed != True:\n break\n # print(ctt)\n ctt = ctt + 1\n\n show_inference(detection_model, frame,ctt)\n \n\n # out1.write(frame)\n fps.update()\n key=cv2.waitKey(1)\n if key & 0xFF == ord(\"q\"):\n break\n \n# stop the timer and display FPS information\nfps.stop()\nprint(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\ncap.release()\n# out1.release()\ncv2.destroyAllWindows() \n\n\n\n# size of traffic lights\n# size of radius\n# number of hulls inside tf light\n# clear to go\n\n# a.mp4 210*25(red) 238*25(green) 273*25(red) 291*25(red-green-orange) 358*25(red) 659*25(red) 903*25(red) 937(red)\n# b.mp4 6*24(green) 147*24(yellow red doubt ) 339*24\n# c.mp4 90*24 342(no-light) 441(red) 525\n# d.mp4 0(green) 164(red) 221 233 379(green-red)-d 467\n# e.mp4 44(red-green) 74(red) 416\n# f.mp4 0(red) 128(red) 178 311(green)\n# g.mp4 110 141 209 285\n# h.mp4 139 401\n# i.mp4 27 231(red-green)-d 252 378 537\n\n\n","sub_path":"tf-signal.py","file_name":"tf-signal.py","file_ext":"py","file_size_in_byte":8308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"525278772","text":"#!/usr/bin/env python\n'''String searching naiive way'''\nimport sys\n\ndef findmatch(pattern,text):\n\tm = len(pattern)\n\tn = len(text)\n\tfor i in range(0,n-m+1):\n\t\tj = 0 \n\t\twhile j ')\n\t\tsys.exit(0)\n\tpattern = sys.argv[1]\n\ttext = sys.argv[2]\n\tindex = findmatch(pattern,text)\n\tif index == -1:\n\t\tprint(\"Pattern not found in text\")\n\telse:\n\t\tprint(\"Pattern found in text at \"+str(index))","sub_path":"strings/stringsearchnaive.py","file_name":"stringsearchnaive.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"567115777","text":"from models.spectral_nn import CayleySignedNet\nfrom mains.train_spectral import train_spectralmodel, test_spectral\nfrom utils.graph_operators import *\nfrom scipy.linalg import eigh\nimport time\nfrom utils.utils_bis import *\nfrom utils.graphic import *\nfrom sklearn.metrics import adjusted_rand_score\n\nimport torch.nn as nn\nimport torch.optim as optim\n\n\ndef weighted_euclidean_loss(X, A):\n dist = torch.pdist(X, p=2).reshape(1,-1)\n #print(dist.size(), A.size())\n out = A * dist\n loss = torch.mean(out)\n return loss\n\n\ndef train_cayley(model, labels, features, optimizer, A):\n \"\"\"Trains model for one epoch using criterion loss and optimizer\"\"\"\n\n model.train()\n optimizer.zero_grad()\n\n output = model(features)\n\n loss = weighted_euclidean_loss(output, A)\n\n loss.backward()\n optimizer.step()\n\n return loss, output\n\n\n\"\"\" \ndef train_cayley(model, labels, features, idx_train, optimizer, validate=False,\n idx_val=None, method=\"supervised\", *A):\n \n model.train()\n optimizer.zero_grad()\n\n output = model(features)\n\n # Loss function (Semi-supervised or unsupervised setting)\n if method == \"unsupervised\":\n loss = weighted_euclidean_loss(output, A)\n print('loss_train: {:.4f}'.format(loss))\n else:\n loss = nn.CrossEntropyLoss(output[idx_train], labels[idx_train])\n acc = accuracy(output[idx_train], labels[idx_train])\n print('loss_train: {:.4f}'.format(loss.item()), 'acc_train: {:.4f}'.format(acc.item()))\n\n loss.backward()\n optimizer.step()\n\n if method == \"unsupervised\":\n return loss\n\n if validate:\n model.eval()\n output = model(features)\n\n loss_val = nn.CrossEntropyLoss(output[idx_val], labels[idx_val])\n acc_val = accuracy(output[idx_val], labels[idx_val])\n\n return loss.item(), acc.item(), loss_val.item(), acc_val.item()\n\n return loss.item(), acc.item()\n\"\"\"\n\n\ndef main_cayley(n, k, p, eta, nconv, nconv_hid, nfc_layers, nfeat, p_train, p_valid, max_epochs, lr,\n method=\"supervised\"):\n # Split training, validation, test\n n_train = math.floor(n * p_train)\n n_valid = math.floor(n * p_valid)\n idx_train = torch.LongTensor(range(n_train))\n idx_valid = torch.LongTensor(range(n_train, n_train + n_valid))\n idx_test = torch.LongTensor(range(n_train + n_valid, n))\n\n # Network generation\n data = SSBM_boundary(n, k, p, p, eta)\n labels = data[1]\n Apos = data[0][0]\n Aneg = data[0][1]\n Asig = signed_adj(Apos, Aneg)\n #Aup = ss.coo_matrix(Asig[np.triu_indices(n, k=1)])\n Aup = Asig[np.triu_indices(n, k=1)]\n Lpos = signed_laplacian(Apos)\n Lneg = signed_laplacian(Aneg)\n features = np.eye(n) # featureless setting\n (w_pos, U_pos) = eigh(Lpos.todense())\n (w_neg, U_neg) = eigh(Lneg.todense())\n\n # Cast to torch tensors\n features = torch.FloatTensor(features)\n labels = torch.LongTensor(labels)\n U_pos = torch.FloatTensor(U_pos)\n w_pos = torch.FloatTensor(w_pos)\n U_neg = torch.FloatTensor(U_neg)\n w_neg = torch.FloatTensor(w_neg)\n Aup = torch.FloatTensor(Aup)\n\n \"\"\"#Visualization\n plt.figure()\n visualise(labels, labels, Asig, k, True)\n \"\"\"\n\n model = CayleySignedNet(U_pos=U_pos, U_neg=U_neg, w_pos=w_pos, w_neg=w_neg, r=r, dim_input=features.shape[1],\n n_conv=nconv, n_convhid=nconv_hid, n_fclayers=nfc_layers, n_feat=nfeat, nclass=k,\n method=method)\n\n # Optimization method\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.0)\n\n # Training with early stopping\n epoch = 0\n window = np.zeros(10)\n eps = 1e-3\n train_results = [] # train loss, accuracy/ari\n val_results = [] # validation loss, accuracy\n t_train = time.time()\n while epoch < max_epochs:\n\n if method == \"unsupervised\":\n loss, output = train_cayley(model, labels, features, optimizer, Aup)\n window = np.delete(np.insert(window, 0, loss.item()), -1)\n x = sl.KMeans(n_clusters=k).fit(output.data.numpy())\n score = adjusted_rand_score(x.labels_, labels)\n\n train_results.append([loss.item(), score])\n\n if epoch % 1 == 0:\n print('Epoch: {:04d}'.format(epoch + 1),\n 'loss_train: {:.4f}'.format(loss.item()),\n 'adjusted rand index: {:04f}'.format(score),\n 'time: {:.4f}s'.format(time.time() - t_train))\n\n else:\n criterion = nn.CrossEntropyLoss()\n loss, acc, loss_val, acc_val = train_spectralmodel(model, labels, features, idx_train, criterion, optimizer,\n validate=True, idx_val=idx_valid)\n window = np.delete(np.insert(window, 0, loss_val), -1)\n train_results.append([loss, acc])\n val_results.append([loss_val, acc_val])\n\n if epoch % 1 == 0:\n print('Epoch: {:04d}'.format(epoch + 1),\n 'loss_train: {:.4f}'.format(loss),\n 'acc_train: {:.4f}'.format(acc),\n 'time: {:.4f}s'.format(time.time() - t_train))\n\n if epoch > 40 and (np.max(window) - np.min(window) < eps\n or (epoch > 10 and np.argmax(window) < np.argmin(window))):\n max_epochs = epoch\n\n epoch = epoch + 1\n t_train = time.time() - t_train\n\n if method != \"unsupervised\":\n # Test\n loss_test, acc_test, ari_test = test_spectral(model, labels, features, idx_test, criterion, optimizer)\n perf = np.array([loss_test, acc_test, ari_test])\n\n\n # Output\n return perf, t_train, train_results, val_results\n\n return loss, score\n\n\n\"\"\"\nn = 100\nk = 3\np = 0.1\nq = 0.1\neta = 0.05\np_train = 0.7 # proportion of labelled nodes for training\np_valid = 0.1 # proportion of labelled nodes for validation\nn_epoch = 40\nr = 15\nnconv = 3\nnconv_hid = 64\nnfc_layers = 1\nnfeat = 32\nlr = 0.003\n\nmain_cayley(n, k, p, eta, nconv, nconv_hid, nfc_layers, nfeat, p_train, p_valid, n_epoch, lr, method=\"unsupervised\")\n\"\"\"","sub_path":"mains/main_cayley.py","file_name":"main_cayley.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"47782765","text":"from random import randint\n\nclass World:\n\n LIFES_ON_MAP = 3\n\n _world = []\n hearts_positions = [[]]\n exit_position = []\n size_y = 0\n size_x = 0\n\n def __init__(self, size_y, size_x):\n \n self.size_y = size_y\n self.size_x = size_x\n \n self.hearts_positions = self.generate_empty_positions(self.LIFES_ON_MAP)\n \n self.exit_position = self.generate_empty_positions()[0]\n \n \n def _set_positions(self, symbol):\n\n\n def return_function(*positions):\n \n for position in positions:\n self._world[position[0]][position[1]] = symbol\n\n return None\n\n\n return return_function\n\n \n def _occupied_positions(self):\n\n # If new occupied places created than this method should be updated\n return_val = self.hearts_positions.copy()\n return_val.append(self.exit_position)\n \n return return_val \n\n def generate_empty_positions(self, number=1):\n\n positions = []\n \n i = 0\n while i < number:\n\n position = [randint(0, self.size_y - 1),randint(0, self.size_x - 1)]\n\n if (position not in positions and\n position not in self._occupied_positions()):\n\n positions.append(position)\n i += 1\n\n return positions\n\n def heart_check(self, position):\n\n if position in self.hearts_positions:\n self.hearts_positions.remove(position)\n return 1\n else:\n return 0\n\n def exit_check(self, position):\n\n if position == self.exit_position:\n return True\n else:\n return False\n\n def get_world(self):\n \"\"\"\n function get_world() returns visual representation of world\n \"\"\"\n\n set_heart = self._set_positions('H')\n set_exit = self._set_positions('E')\n\n self._world = [[' ' for _ in range(self.size_x)] for _ in range(self.size_y)]\n\n set_heart(*self.hearts_positions)\n set_exit(self.exit_position)\n \n return self._world\n","sub_path":"rusalovskyi_oleksandr/06/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"233420229","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nfrom datetime import datetime, timedelta\nimport calendar, pytz\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT\n\nclass HRAttendanceList(models.Model):\n _name = 'hr.attendance.list'\n _rec_name = 'employee_id'\n \n employee_id = fields.Many2one('hr.employee', string=\"Employee\")\n attendance_list_line = fields.One2many('attendance.list.line','attendance_list_id', string=\"Attendance List\")\n \nclass AttendanceListLine(models.Model):\n _name = 'attendance.list.line'\n \n def convert_tz_to_utz(self, date):\n check_in_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT)\n local_tz = pytz.timezone(self.env.user.tz or 'UTC')\n check_in_local = check_in_dt.replace(tzinfo = pytz.utc).astimezone(local_tz)\n check_in = datetime.strftime(check_in_local, DEFAULT_SERVER_DATETIME_FORMAT)\n return check_in\n \n @api.multi\n @api.depends('o_timein','o_timeout')\n def _compute_time(self):\n for rec in self:\n day_list = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']\n day = day_list.index(rec.day) if rec.day in day_list else False\n rec.total_hours = rec.o_timeout - rec.o_timein\n for work_day in rec.attendance_list_id.employee_id.calendar_id.attendance_ids:\n contract_id = self.env['hr.contract'].search([('employee_id', '=', rec.attendance_list_id.employee_id.id),('state','=','open')], order='date_start desc', limit=1)\n if contract_id and contract_id.rate_per_hour:\n if day == int(work_day.dayofweek):\n rec.over_time = rec.o_timeout - work_day.hour_to if (rec.o_timeout - work_day.hour_to) > 0.0 else 0.0\n rec.ot_1_0 = rec.o_timeout - work_day.hour_to if (rec.o_timeout - work_day.hour_to) > 0.0 else 0.0\n else:\n rec.over_time = rec.o_timeout - 17.83 if (rec.o_timeout - 17.83) > 0.0 else 0.0\n rec.ot_1_0 = rec.o_timeout - 17.83 if (rec.o_timeout - 17.83) > 0.0 else 0.0\n else:\n if int(work_day.dayofweek) not in [5,6]:\n if day == int(work_day.dayofweek):\n rec.over_time = rec.o_timeout - work_day.hour_to if (rec.o_timeout - work_day.hour_to) > 0.0 else 0.0\n rec.ot_1_5 = rec.o_timeout - work_day.hour_to if (rec.o_timeout - work_day.hour_to) > 0.0 else 0.0\n else:\n rec.over_time = rec.o_timeout - 17.83 if (rec.o_timeout - 17.83) > 0.0 else 0.0\n rec.ot_2_0 = rec.o_timeout - 17.83 if (rec.o_timeout - 17.83) > 0.0 else 0.0\n\n @api.multi\n def _related_hod(self):\n for rec in self:\n hod_id = self.env['hr.employee'].search([('user_id', '=', self.env.user.id), ('job_id.name', '=', 'HOD')])\n if hod_id:\n rec.hod_id = hod_id[0].id\n \n attendance_list_id = fields.Many2one('hr.attendance.list',string='Attendance List')\n attendance_id = fields.Many2one('hr.attendance', string=\"Attendance\")\n hod_id = fields.Many2one(\"hr.employee\", string=\"HOD\", compute='_related_hod')\n check_in = fields.Datetime('Check In')\n check_out = fields.Datetime('Check Out')\n date_dt = fields.Date(\"Date\")\n o_timein = fields.Float(\"O Timein\")\n o_timeout = fields.Float(\"O Timeout\")\n adj_timein =fields.Float(\"adj_timein\")\n adj_timeout = fields.Float(\"adj_timeout\")\n total_hours = fields.Float('Total Hours', compute='_compute_time', store=True)\n over_time = fields.Float('Over Time', compute='_compute_time', store=True)\n ot_1_0 = fields.Float('OT #1.0', compute='_compute_time', store=True)\n ot_1_5 = fields.Float('OT #1.5', compute='_compute_time', store=True)\n ot_2_0 = fields.Float('OT #2.0', compute='_compute_time', store=True)\n day = fields.Char(\"Day\")\n emp_remark = fields.Char(\"Emp Remark\")\n lev_remark = fields.Char(\"Lev Remark\")\n sup_remark = fields.Char(\"Sup Remark\")\n state = fields.Selection([('draft','Draft'),('approved','Approved')], string=\"status\", default='draft')\n \nclass HRAttendance(models.Model):\n _inherit = 'hr.attendance'\n \n @api.model\n def create(self, values):\n res = super(HRAttendance, self).create(values)\n attendance_list_ids = self.env['hr.attendance.list'].search([('employee_id','=',res.employee_id.id)])\n if attendance_list_ids:\n attendance_list_id = attendance_list_ids[0]\n else:\n attendance_list_id = self.env['hr.attendance.list'].create({'employee_id': res.employee_id.id})\n list_line_id = self.env['attendance.list.line'].create({'attendance_list_id': attendance_list_id.id,\n 'attendance_id': res.id,\n 'check_in': res.check_in,\n 'check_out': res.check_out,\n 'day': res.day,\n 'date_dt': res.date_dt,\n 'o_timein': res.o_timein,\n 'o_timeout': res.o_timeout,\n 'adj_timein': res.adj_timein,\n 'adj_timeout': res.adj_timeout,\n 'state': 'draft',\n })\n return res\n \n @api.multi\n def write(self, values):\n attendance_line_ids = self.env['attendance.list.line'].search([('attendance_id','=',self.id)], limit=1)\n if attendance_line_ids:\n attendance_line_ids.write(values)\n return super(HRAttendance, self).write(values)\n","sub_path":"beta-dev1/teo_hr_attendance_list/models/hr.py","file_name":"hr.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"16877092","text":"import numpy as np\nimport cv2\nimport pyautogui\n\ncap = cv2.VideoCapture(0)\n\nH_MIN = 0\nH_MAX = 256\nS_MIN = 0\nS_MAX = 256\nV_MIN = 0\nV_MAX = 256\nFRAME_WIDTH = 640.0\nFRAME_HEIGHT = 480.0\nSCREEN_WIDTH = 1600.0\nSCREEN_HEIGHT = 900.0\nHEIGHT_RATIO = SCREEN_HEIGHT / FRAME_HEIGHT\nWIDTH_RATIO = SCREEN_WIDTH / FRAME_WIDTH\nMAX_NUM_OBJECTS = 10\nMIN_OBJECT_AREA = 20*20\nMAX_OBJECT_AREA = int(FRAME_HEIGHT * FRAME_WIDTH / 1.5)\n\n\ndef nothing(x):\n pass\n\ncv2.namedWindow('image')\ncv2.namedWindow('image2')\ncv2.createTrackbar('H_MIN','image',77,255,nothing)\ncv2.createTrackbar('S_MIN','image',69,255,nothing)\ncv2.createTrackbar('V_MIN','image',0,255,nothing)\ncv2.createTrackbar('H_MAX','image',91,255,nothing)\ncv2.createTrackbar('S_MAX','image',255,255,nothing)\ncv2.createTrackbar('V_MAX','image',174,255,nothing)\ncv2.createTrackbar('H_MIN2','image',16,255,nothing)\ncv2.createTrackbar('S_MIN2','image',79,255,nothing)\ncv2.createTrackbar('V_MIN2','image',116,255,nothing)\ncv2.createTrackbar('H_MAX2','image',44,255,nothing)\ncv2.createTrackbar('S_MAX2','image',157,255,nothing)\ncv2.createTrackbar('V_MAX2','image',225,255,nothing)\n\nswitch = '0: OFF \\n1 : ON'\ncv2.createTrackbar(switch,'image',0,1,nothing)\n\nswitch2 = '0: Movement \\n1: Clicking'\ncv2.createTrackbar(switch2,'image',0,1,nothing)\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n #blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # Display the resulting frame\n h_min = cv2.getTrackbarPos('H_MIN', 'image')\n h_max = cv2.getTrackbarPos('H_MAX', 'image')\n s_min = cv2.getTrackbarPos('S_MIN', 'image')\n s_max = cv2.getTrackbarPos('S_MAX', 'image')\n v_min = cv2.getTrackbarPos('V_MIN', 'image')\n v_max = cv2.getTrackbarPos('V_MAX', 'image')\n\n h_min2 = cv2.getTrackbarPos('H_MIN2', 'image')\n h_max2 = cv2.getTrackbarPos('H_MAX2', 'image')\n s_min2 = cv2.getTrackbarPos('S_MIN2', 'image')\n s_max2 = cv2.getTrackbarPos('S_MAX2', 'image')\n v_min2 = cv2.getTrackbarPos('V_MIN2', 'image')\n v_max2 = cv2.getTrackbarPos('V_MAX2', 'image')\n\n\n lower = np.array([h_min, s_min, v_min])\n upper = np.array([h_max, s_max, v_max])\n lower2 = np.array([h_min2, s_min2, v_min2])\n upper2 = np.array([h_max2, s_max2, v_max2])\n\n on = cv2.getTrackbarPos(switch,'image')\n\n mask = cv2.inRange(hsv, lower, upper)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n mask2 = cv2.inRange(hsv, lower2, upper2)\n mask2 = cv2.erode(mask2, None, iterations=2)\n mask2 = cv2.dilate(mask2, None, iterations=2)\n\n if cv2.waitKey(1) & 0xFF == ord('q') or on:\n break\n\n if not cv2.getTrackbarPos(switch2,'image'):\n cv2.imshow('image2', mask)\n else:\n cv2.imshow('image2',mask2)\n # When everything done, release the capture\ncv2.destroyAllWindows()\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n #blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n mask = cv2.inRange(hsv, lower, upper)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n mask2 = cv2.inRange(hsv, lower2, upper2)\n mask2 = cv2.erode(mask2, None, iterations=2)\n mask2 = cv2.dilate(mask2, None, iterations=2)\n\n contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n contours2 = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n if len(contours) > 0:\n c = max(contours, key=cv2.contourArea)\n m = cv2.moments(c)\n center = (int(m['m10'] / m['m00']), int(m['m01'] / m['m00']))\n screen_center = [int((FRAME_WIDTH - center[0]) * WIDTH_RATIO), int(center[1] * HEIGHT_RATIO)]\n if on:\n pyautogui.moveTo(screen_center[0],screen_center[1])\n\n if len(contours2) > 0:\n c2 = max(contours2, key=cv2.contourArea)\n if cv2.contourArea(c2) > MIN_OBJECT_AREA:\n pyautogui.mouseDown()\n else:\n pyautogui.mouseUp()\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n # When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"modified_colormouse.py","file_name":"modified_colormouse.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"533163917","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 6 12:39:04 2021\n\n@author: padma carstens\n\"\"\"\n\nimport os\nfrom os.path import exists\nfrom figshare.figshare import Figshare\nfrom ldcoolp.curation.UPack_v2 import ObjFormatter\nfrom ldcoolp.curation import retrieve\nimport json\n\n\n#Enter article id: this is the last number in the \"cite\" on data.lib.vt.edu\narticle_id=\"XYZ\"\n# Enter Ingest Accession Number from the spreadsheet:\nIngestAccessionNumber= \"I1234\" \n#Enter Requestor name\nRequestor=\"XYZ\"\n#Enter corresponding author name\nCorrespondingAuthor=\"XYZ\"\n#Enter version number\nVersion=\"01\"\n#Enter date ingested in YYYYMMDD format\nDateIngested= \"20211018\" #in YYYYMMDD format\n#Enter your token\ntoken='1234'\n\n#Create Ingest folder to store dataset\ndata_directory1=f\"{IngestAccessionNumber}\"\ndata_directory2=f\"{IngestAccessionNumber}_{Requestor}_{CorrespondingAuthor}_v{Version}_{DateIngested}\"\ndata_directory_path=os.path.join(data_directory1, data_directory2)\nmetadata_directory_path=f\"{IngestAccessionNumber}_DownloadedFileMetadata\"\n#-----Download dataset for private article under review using LD-Cool-P and save it as Ingest metadata in json file format\nfs=Figshare(token=token,private=True)\nFileDownload=retrieve.download_files(article_id, fs, data_directory=data_directory_path, metadata_directory=metadata_directory_path)\nprivatefigshare_url='https://api.figshare.com/v2/account/articles/'+str(article_id)\n#-----Get article details for private article under review using LD-Cool-P and save it as Ingest metadata in json file format\njson_out_file=f\"{data_directory_path}/{IngestAccessionNumber}_IngestedMetadata.json\"\njson_response=fs.get_article_details(article_id,version=None)\n\nif not os.path.exists(json_out_file):\n with open(json_out_file, 'w') as f:\n json.dump(json_response,f,indent=4)\nelse:\n print(f\"File exists: {json_out_file}\")\n if overwrite:\n print(\"Overwriting!\")\n with open(json_out_file, 'w') as f:\n json.dump(json_response,f,indent=4)\n\n#Call parts of modified UPACK_v2 code written by Luke. I. Menzies(lmenzies@uab.edu) to bag and tar ingest record\n\nmyobj=ObjFormatter#()\ntarfiledir=data_directory1\ningest_bag=myobj.run_bagit(bagsdir=tarfiledir)\ningest_bag_tar=myobj.run_tar(tarfolder=tarfiledir)\n\n","sub_path":"create_ingest_bag.py","file_name":"create_ingest_bag.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"618823243","text":"# memoized version of UVa 11450: training with dynamic programming\n\n\nimport math\n\n\ndef main():\n test_cases = int(input())\n for test in range(test_cases):\n M, C = [int(x) for x in input().split(\" \")]\n prices = {} # dictionary that holds for each garment the prices of different models\n for k in range(C):\n values = [int(x) for x in input().split(\" \")]\n prices[k] = values[1:]\n ans = ws(M, C, prices)\n if ans == -1:\n print(\"impossible\")\n else:\n print(ans)\n\n\ndef ws(M, C, prices):\n T = init_ws(M, C)\n return rec_ws(M, C, M, 0, prices, T)\n\n\ndef init_ws(M, C): # create M+1 x C memo table with all values set to -1\n return [[-1 for j in range(C)] for i in range(M + 1)]\n\n\ndef rec_ws(M, C, money, g, prices, T):\n if money < 0: # base case no.1\n return -1\n if g == C: # base case no.2\n return M - money\n if T[money][g] != -1: # lookup on the table\n return T[money][g]\n else:\n k_g = len(prices[g]) # number of models of garment g\n ans = -math.inf\n for i in range(k_g): # using OSP to find answer\n ans = max(ans, rec_ws(M, C, money - prices[g][i], g + 1, prices, T))\n T[money][g] = ans\n return ans\n\n\nmain()\n","sub_path":"Coding Training/weddingshopping_memoized.py","file_name":"weddingshopping_memoized.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"564930652","text":"#!/usr/bin/env python3\nimport inspect\nimport configparser\nimport logging\nimport os\nimport socket\n\nlogging.basicConfig()\n\nconfig = configparser.ConfigParser()\nconfig.read(['/etc/conf.d/cellaserv'])\n\n\ndef make_setting(name, default, cfg_section, cfg_option, env, coerc=str):\n val = default\n try:\n val = config.get(cfg_section, cfg_option)\n except:\n pass\n val = coerc(os.environ.get(env, val))\n # Inject in the current global namespace\n globals()[name] = val\n\ndef make_logger(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG if DEBUG >= 1 else logging.INFO)\n return logger\n\nmake_setting('HOST', 'evolutek.org', 'client', 'host', 'CS_HOST')\nmake_setting('PORT', 4200, 'client', 'port', 'CS_PORT', int)\nmake_setting('DEBUG', 0, 'client', 'debug', 'CS_DEBUG', int)\n\n\ndef get_socket():\n \"\"\"Open a socket to cellaserv using user configuration.\"\"\"\n return socket.create_connection((HOST, PORT))\n\nlogger = make_logger(__name__)\nlogger.debug(\"DEBUG: %s\", DEBUG)\nlogger.debug(\"HOST: %s\", HOST)\nlogger.debug(\"PORT: %s\", PORT)\n","sub_path":"cellaserv/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"341264135","text":"def elementNum(N):\n\n cnt = 1\n for i in range(1, N // 2 + 1):\n if not N % i:\n \n cnt += 1\n return cnt\n\ndef solution(left, right):\n \n answer = 0\n for i in range(left, right+1):\n\n if not elementNum(i) % 2:\n answer += i\n else:\n answer -= i\n\n\n return answer\n\nif '__main__' == __name__:\n print(solution(13, 17))\n print(solution(24, 27))\n print(solution(1, 1))","sub_path":"python/programmers/월간 코드 첼린지 시즌2/5월/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"512483093","text":"# File to download FEC primary election data and merge with DIME's dime_final.csv\nimport pandas as pd\nimport os\n\n\ndef download_primary_data():\n \"\"\"\n Download the primary election data from the FEC website and put into dataframe.\n \"\"\"\n election_files = [\n \"https://www.fec.gov/documents/1700/federalelections2014.xls\",\n \"https://www.fec.gov/documents/1890/federalelections2016.xlsx\",\n \"https://www.fec.gov/documents/2706/federalelections2018.xlsx\",\n ]\n\n df_primaries = None\n\n for election_file in election_files:\n year = election_file.split(\".\")[-2][-4:]\n df_year = pd.read_excel(\n election_file,\n sheet_name=\"{year} US House Results by State\".format(year=year),\n index_col=0,\n )\n df_year.rename(\n columns={\n \"FEC ID#\": \"Cand.ID\",\n \"PRIMARY VOTES\": \"votes_primary\",\n \"PRIMARY %\": \"pct_primary\",\n \"RUNOFF VOTES\": \"votes_runoff\",\n \"RUNOFF %\": \"pct_runoff\",\n \"GENERAL VOTES \": \"votes_general\",\n \"GENERAL %\": \"pct_general\",\n \"GE WINNER INDICATOR\": \"won_general\",\n \"PARTY\": \"party\",\n \"DISTRICT\": \"district\",\n \"D\": \"district\",\n },\n inplace=True,\n errors=\"ignore\",\n )\n\n df_year = df_year.dropna(subset=[\"Cand.ID\"])\n df_year[\"Cand.ID\"] = df_year[\"Cand.ID\"].str.strip()\n df_year[\"party\"] = df_year[\"party\"].str.strip()\n # Minnesota's Dem party is officially the Democratic-Farmer-Labor party...\n df_year.loc[df_year[\"party\"] == \"DFL\", \"party\"] = \"D\"\n df_year = df_year[(df_year[\"party\"] == \"D\") | (df_year[\"party\"] == \"R\")]\n\n # A couple of manual fixes:\n if year == \"2016\":\n # Dave Koller has an extra random row\n df_year.drop(752, inplace=True)\n\n if year == \"2018\":\n # John Chrin is two rows for some reason\n df_year.loc[3480, \"votes_primary\"] = df_year.loc[3481][\"votes_primary\"]\n df_year.loc[3480, \"pct_primary\"] = df_year.loc[3481][\"pct_primary\"]\n df_year.drop(3481, inplace=True)\n\n # Jennifer Zordani has the wrong ID\n df_year.loc[1141, \"Cand.ID\"] = \"H8IL06105\"\n\n # Anya Tynia is listed multiple times for different parties, only gets >30 votes once\n df_year.drop([4293], inplace=True)\n\n df_year = df_year[\n [\n \"Cand.ID\",\n # \"party\",\n \"votes_primary\",\n \"pct_primary\",\n \"votes_runoff\",\n \"pct_runoff\",\n \"votes_general\",\n \"pct_general\",\n \"won_general\",\n # \"district\",\n ]\n ]\n df_year[\"cycle\"] = int(year)\n\n # Remove special election if there's already a regular election\n df_year.drop_duplicates(subset=[\"Cand.ID\", \"cycle\"], inplace=True)\n\n if df_primaries is None:\n df_primaries = df_year\n else:\n df_primaries = pd.concat([df_primaries, df_year])\n\n return df_primaries\n\n\ndef merge_primary_data():\n df_dime = pd.read_csv(\"../../data/dime_final.csv\")\n df_primary = download_primary_data()\n df_merged = pd.merge(df_dime, df_primary, how=\"left\", on=[\"Cand.ID\", \"cycle\"])\n # 11/6/2020: 10510 pre-2020 DIME candidates, 4826 primary results of which 51 unmatched\n df_merged.drop(columns=[\"Unnamed: 0\", \"Unnamed: 0.1\"], inplace=True)\n df_merged.to_csv(\"../../data/dime_with_primaries.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n merge_primary_data()\n","sub_path":"process_data/dime/merge_primary_data.py","file_name":"merge_primary_data.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"442033400","text":"# the main UI\n# check : Gtk-Message: 20:05:27.575: GtkDialog mapped without a transient parent. This is discouraged.\n## \tpyhton 3.67\n## Gedit\n##\tCrypto\n##\t116CS0177\n\n''''\nvariable declaration:-\n\n1) self.main_flag:- \tstatus -class string\n\t\t\t\t\t\tit is used to notify the process is encryption or decryption\n\n2) self.key:-\tstatus - class member variable string\n\t\t\t\tIt contains the 16 digit key for both enc and dec\n\n3) self.in_path:-\tstatus- class member string var\n\t\t\t\t\tstring input file adr with file name\n\n\n4) self.op_path:- \tstatus- class member string var\n\t\t\t\t\top path only , file name not included\n\n5)self.log_path:-\tstatus- class member string var\n\t\t\t\t\tlog file path\n\n6)self.process_flag\n'''\n\n\n#from fbs_runtime.application_context import ApplicationContext\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import Qt\n\nfrom Crypto.Cipher import AES,DES3\nfrom Crypto import Random\nfrom random import randint,seed,shuffle\n\nfrom time import ctime,sleep\nimport sys\nimport os\nimport struct\nimport csv\n\n\npadd=['@','!','$','%','&','?','>','<','+','~','*','1','2','3','4','5','6','7','8','9','0','(',')','{','}']\n\nclass App(QWidget):\n\t\"\"\"\n\n \"\"\"\n\tdef selector(self,text): # Selects either its encryption or decryption\n\t\tif text=='ENCRYPTION':\n\t\t\tself.main_flag='e'\n\t\telif text=='DECRYPTION' :\n\t\t\tself.main_flag='d'\n\t\t#print(self.main_flag)\n\t\tself.myMessageBox.setText(\"Now select the i/p file and o/p folder\")\n\n\tdef check(self): # A check function // Can be removed\n\t\tprint(\"hello\")\n\n\n\tdef onActivated(self,text):\n\t\t\"\"\"\n\t\tThis function is activated by the second combo-box\n\t\tIt selects the methode we are using for encryption/decryption\n\t\t\t\t\t\t\t\t\t\t\tlike :-AES,DES3\n\t\t\"\"\"\n\t\t#print(self.in_path)\n\t\tif self.main_flag=='e':\n\t\t\tl=self.in_path.split('/')\n\t\t\tfname=l[-1]\n\t\t\t#print(fname)\n\t\t\tself.op_path=self.op_path+'/'+fname+'.enc'\n\t\telif self.main_flag=='d':\n\t\t\top=os.path.splitext(self.in_path)[0]\n\t\t\top=op.split('/')\n\t\t\tself.op_path=self.op_path+'/'+op[-1]\n\t\t\t#print(self.op_path)\n\t\tif text=='AES':\n\t\t\tself.process_flag='AES'\n\t\t\tself.myMessageBox.setText(\"AES is selected, MODE:-CBC , Now enter the key \")\n\t\telif text=='DES3':\n\t\t\tself.process_flag='DES3'\n\t\t\tself.myMessageBox.setText(\"DES3 is selected, MODE:-OFB , Now enter the key\")\n\n\n\tdef key_generator(self,text): # generates and validates the key // 8+8 for now\n\t\tif len(text)==8 and self.main_flag=='e' and (self.process_flag=='DES3' or self.process_flag=='AES'): ## this part is for encryption\n\t\t\tself.key=text;\n\t\t\tkey_length=len(self.key)\n\n\t\t\tpadding_length=16-key_length;\n\t\t\ti=0\n\t\t\t## randomness generator seed()\n\t\t\tseed()\n\t\t\tfor i in range(padding_length):\n\t\t\t\tl=len(padd)\n\t\t\t\tx=randint(0,l-1)\n\t\t\t\tself.key=self.key+padd[x]\n\n\t\t\t#print(key) # now the 16 byte key is ready\n\t\t\t'''\n\t\t\t# shufflling\n\t\t\tself.key=list(self.key)\n\t\t\tshuffle(self.key)\n\t\t\tself.key=''.join(self.key)\n\t\t\t## shuffled key is ready\n\t\t\t'''\n\t\t\t#print(self.key)\n\t\t\tself.myMessageBox.setText(\"Keep the key \"+self.key+\" safe !!!\")\n\t\t\tself.key=self.key.encode('utf-8')\n\n\t\tif self.main_flag=='d'\tand len(text)==16 and (self.process_flag=='DES3' or self.process_flag=='AES'):\n\t\t\t## this part is for decryption\n\t\t\tself.key=text\n\t\t\t#print(self.key)\n\t\t\tself.myMessageBox.setText(\"The key received is \"+self.key)\n\t\t\tself.key=self.key.encode('utf-8')\n\n\n\tdef open_files(self): # browser Button to select i/p file\n\t\tdialog = QFileDialog()\n\t\tfname = dialog.getOpenFileName(self, \"Open file\")\n\t\tfilename=fname[0]\n\t\tself.in_path=filename\n\t\tself.myTextBox.setText(self.in_path)\n\t\tself.myMessageBox.setText(\"Input file selected.May jump to o/p folder selection\")\n\n\tdef open_files_log(self): # log file browser Button\n\t\tdialog = QFileDialog()\n\t\tfname = dialog.getOpenFileName(self, \"Open log file\")\n\t\tfilename=fname[0]\n\t\tself.log_path=filename\n\t\tself.logTextBox.setText(filename)\n\t\tself.myMessageBox.setText(\"log file changed\")\n\n\tdef op_files(self): # Selects the o/p folder\n\t\tself.op_path= str(QFileDialog.getExistingDirectory(self, \"Select Directory\"))\n\t\tself.opTextBox.setText(self.op_path)\n\t\tself.myMessageBox.setText(\"Now select the methode\")\n\n\n\tdef startP(self): # the Function\n\t\t#print(self.process_flag)\n\t\tif(self.process_flag=='' or self.key=='' or self.in_path=='' or self.in_path==None or self.in_path==''):\n\t\t\t#ERROR\n\t\t\t# have a message box\n\t\t\tself.myMessageBox.setText(\"Please enter all the credential for the program to run\")\n\t\t\tpass\n\t\tif self.process_flag=='AES':\n\t\t\tif len(self.key)!=16:\n\t\t\t\t#print(self.key)\n\t\t\t\tlen(self.key)\n\t\t\t\tself.myMessageBox.setText(\"You have to enter key\")\n\t\t\telif len(self.key)==16 and self.main_flag=='e':\n\t\t\t\t#\tnow let's do the encryption\n\t\t\t\tself.myMessageBox.setText(\"Encryption is going on\")\n\t\t\t\tkey=self.key\n\t\t\t\tin_filename=self.in_path\n\t\t\t\tlog_path=self.log_path\n\t\t\t\tout_filename=self.op_path\n\t\t\t\tchunksize=self.chunksize;\n\t\t\t\t# **** #\n\t\t\t\tlog=[]\n\t\t\t\tcurr_time=ctime()\n\t\t\t\tcTime=curr_time.split(' ')\n\t\t\t\tdate=cTime[1]+' '+cTime[2]+' '+cTime[-1]\n\t\t\t\tday=cTime[0]\n\t\t\t\tclk=cTime[3]\n\n\t\t\t\tlog.append(day)\n\t\t\t\tlog.append(date)\n\t\t\t\tlog.append(clk)\n\n\t\t\t\ttemp_key=key.decode(\"utf-8\")\n\t\t\t\tlog.append(temp_key)\n\t\t\t\tlog.append(in_filename)\n\n\t\t\t\tif out_filename=='':\n\t\t\t\t\tout_filename = in_filename + '.enc'\n\n\t\t\t\t# system key file\n\t\t\t\tiv =Random.new().read(AES.block_size)\n\t\t\t\tencryptor = AES.new(key, AES.MODE_CBC, iv)\n\t\t\t\tfilesize = os.path.getsize(in_filename)\n\n\t\t\t\twith open(in_filename,'rb') as infile:\n\t\t\t\t\twith open(out_filename,'wb') as outfile:\n\t\t\t\t\t\toutfile.write(struct.pack('Choice <\\b>\")\n\t\tself.select = QComboBox(self)\n\t\tself.select.addItem(' select ')\n\t\tself.select.addItem('ENCRYPTION')\n\t\tself.select.addItem('DECRYPTION')\n\t\tself.select.activated[str].connect(self.selector)\n\n\t\t## 2.1) browse button to select file to be encrypted or decrypted\n\t\tself.bil1=QLabel(\"Define input file <\\b>\")\n\t\tbrowserButton=QPushButton(\"browse files \",self)\n\t\tbrowserButton.resize(browserButton.sizeHint())\n\t\tbrowserButton.setToolTip(\"Press to select the file you want \")\n\t\tbrowserButton.clicked.connect(self.open_files)\n\t\t#print(self.result)\n\t\t## 2.2) this is a text box to show the file selected\n\t\tself.myTextBox=QTextEdit(self)\n\n\t\t## 3)log file\n\t\tself.bil_log=QLabel(\"log<\\b>\")\n\t\tself.log_browser=QPushButton(\"select log file \",self)\n\t\tself.log_browser.resize(browserButton.sizeHint())\n\t\tself.log_browser.setToolTip(\"Press to select the file you want \")\n\t\tself.log_browser.clicked.connect(self.open_files_log)\n\t\tself.logTextBox=QTextEdit(self)\n\t\tself.logTextBox.setText(\"Selected BY default\")\n\n\t\t## 4) op path\n\t\tself.bil_op=QLabel(\"Define output path <\\b>\")\n\t\topButton=QPushButton(\"select the path \",self)\n\t\topButton.resize(opButton.sizeHint())\n\t\topButton.setToolTip(\"select the output folder \")\n\t\topButton.clicked.connect(self.op_files)\n\t\t## 4.2) this is a text box to show the file selected\n\t\tself.opTextBox=QTextEdit(self)\n\n\t\t## 5) combobox to select type of encryption\n\t\tself.bil_type=QLabel(\"Select the type of encryption<\\b>\")\n\t\tcombo = QComboBox(self)\n\t\tcombo.addItem(\"Select\")\n\t\tcombo.addItem('AES')\n\t\tcombo.addItem('DES3')\n\t\t#combo.addItem('XOR')\n\t\tcombo.activated[str].connect(self.onActivated)\n\n\t\t##6) key enter\n\t\tself.bil_key=QLabel(\"Key <\\b>\")\n\t\tself.keyBox=QLineEdit(self)\n\t\ttry:\n\t\t\tself.keyBox.textChanged[str].connect(self.key_generator)\n\t\texcept:\n\t\t\tpass\n\n\t\t## 7) start the process\n\t\tstartButton= QPushButton(\"START\",self)\n\t\tstartButton.resize(startButton.sizeHint())\n\t\tstartButton.setToolTip(\"Press to run the Encryption \")\n\t\tstartButton.clicked.connect(self.startP)\n\n\t\t## 8) message box\n\t\tself.bil_msg=QLabel(\"Message Box<\\b>\")\n\t\tself.myMessageBox=QTextEdit(self)\n\t\tself.myMessageBox.setText(\"Please enter all the credential for the program to run\")\n\n\n\t\t## grid\n\t\tgrid=QGridLayout()\n\t\tgrid.setSpacing(10)\n\t\tgrid.addWidget(self.bil_main,1,0)\n\t\tgrid.addWidget(self.select,2,0)\n\t\tgrid.addWidget(self.bil1,3,0)\n\t\tgrid.addWidget(browserButton,4,0) ## browse file\n\t\tgrid.addWidget(self.myTextBox,4,1)\n\t\tgrid.addWidget(self.bil_log,5,0)\n\t\tgrid.addWidget(self.log_browser,6,0)\n\t\tgrid.addWidget(self.logTextBox,6,1)\n\t\tgrid.addWidget(self.bil_op,7,0)\n\t\tgrid.addWidget(opButton,8,0)\n\t\tgrid.addWidget(self.opTextBox,8,1)\n\t\tgrid.addWidget(self.bil_type,9,0)\n\t\tgrid.addWidget(combo,10,0)\n\t\tgrid.addWidget(self.bil_key,11,0)\n\t\tgrid.addWidget(self.keyBox,12,0)\n\t\tgrid.addWidget(self.bil_msg,13,0)\n\t\tgrid.addWidget(self.myMessageBox,14,0)\n\t\tgrid.addWidget(startButton,15,0)\n\t\tself.setLayout(grid)\n\n\t\tself.show()\n\n\tdef closeEvent(self,event):\n\t\treply=QMessageBox.question(self,\"Message\",\"Close ?\",QMessageBox.Yes | QMessageBox.No,QMessageBox.No)\n\t\tif reply==QMessageBox.Yes:\n\t\t\tevent.accept()\n\t\tif reply==QMessageBox.No :\n\t\t\tevent.ignore()\n\n\n# ['Breeze', 'Oxygen', 'QtCurve', 'Windows', 'Fusion']\nif __name__ == '__main__':\n\tapp=QApplication(sys.argv)\n\tapp.setStyle('Fusion')\n\tr=App()\n\tsys.exit(app.exec_())\n","sub_path":"Encrypto_Windows.py","file_name":"Encrypto_Windows.py","file_ext":"py","file_size_in_byte":15460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"606017520","text":"class SegmentTreeNode:\n def __init__(self, start, end, sum):\n self.start = start\n self.end = end\n self.sum = sum\n \nclass NumArray(object):\n def __init__(self, nums):\n \"\"\"\n initialize your data structure here.\n :type nums: List[int]\n \"\"\"\n self.nums = nums\n self.root = self.build(nums, 0, len(nums)-1)\n \n def build(self, nums, start, end):\n if start > end: return None\n if start == end: return SegmentTreeNode(start, end, nums[start])\n \n left_child = self.build(nums, start, (start+end)/2)\n right_child = self.build(nums, (start+end)/2+1, end)\n root = SegmentTreeNode(start, end, left_child.sum + right_child.sum)\n root.left = left_child\n root.right = right_child\n return root\n \n\n def update(self, i, val):\n \"\"\"\n :type i: int\n :type val: int\n :rtype: int\n \"\"\"\n \n self._update(self.root, i, val)\n \n def _update(self, root, i, val):\n if root and i >= root.start and i <= root.end:\n stack = []\n curr = root\n while i != curr.start or i != curr.end:\n stack.append(curr)\n if i <= curr.left.end:\n curr = curr.left\n else:\n curr = curr.right\n \n diff = val - curr.sum\n curr.sum = val\n while stack:\n out = stack.pop()\n out.sum += diff\n \n \n \n \n def query(self, root, i, j):\n \n if root is None: return 0\n if i > j: return 0\n if i == root.start and j == root.end: return root.sum\n \n if j <= root.left.end:\n return self.query(root.left, i, j)\n elif i >= root.right.start:\n return self.query(root.right, i, j)\n else:\n return self.query(root.left, i, root.left.end) + self.query(root.right, root.right.start, j)\n\n def sumRange(self, i, j):\n \"\"\"\n sum of elements nums[i..j], inclusive.\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n return self.query(self.root, i, j)\n \n \n\n\n# Your NumArray object will be instantiated and called as such:\n# numArray = NumArray(nums)\n# numArray.sumRange(0, 1)\n# numArray.update(1, 10)\n# numArray.sumRange(1, 2)","sub_path":"307-Range-Sum-Query---Mutable/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"559895701","text":"import numpy as np\nimport lal\nfrom scipy.interpolate import interp1d\nfrom scipy.special import i0, i0e\nfrom matplotlib import pyplot as pl\n\n\nclass Interferometer(object):\n \"\"\"Class for the Interferometer & strain data frequency series \"\"\"\n\n\n def __init__(self, name, sensitivity, f_min, T, Fs, t_start, seed):\n \"\"\"\n Instantiate an Interferometer object.\n\n Parameters\n ----------\n name: str\n Interferometer name, e.g., 'H1'.\n sensitivity: str\n Detector sensitivity, e.g. 'O1'\n minimum_frequency: float\n Minimum frequency to analyse for detector.\n \"\"\"\n self.name = name\n self.sensitivity = sensitivity\n self.laldetector = lal.cached_detector_by_prefix[self.name]\n self.seed=seed\n \n asd_data = np.genfromtxt('data/' + self.name + '_'+ self.sensitivity + '_strain.txt')\n self.asd = interp1d(asd_data[:, 0], asd_data[:, 1])\n self.f_max = asd_data[-1, 0]\n \n self.f_min = f_min\n self.T = T #data segment duration\n self.Fs = Fs #sampling frequency \n self.df = 1/T #frequency resolution in frequency domain\n self.Fn = Fs/2 #Nyquist frequency\n self.t_start = t_start #start time of the analyzed data segment for the detector \n \n self.noise = 0\n self.signal = 0\n self.strain = 0\n \n #get the frequency array for the complex frequency series\n N = int(self.T*self.Fs) #number of time domain data points \n dt = 1/self.Fs #sample spacing in the time domain\n\n freqs = np.fft.rfftfreq(N,dt)\n \n start=np.where(freqs==self.f_min)[0][0]\n self.freqs=freqs[start:]\n \n self.psd = self.asd(self.freqs)**2\n \n\n def get_time_delay(self, ra, dec, t_gps):\n \"\"\" get time delay from geocenter \n \"\"\"\n return lal.TimeDelayFromEarthCenter(self.laldetector.location, ra, dec, t_gps)\n \n def get_antenna_response(self, ra, dec, psi, t_gps):\n \"\"\" get the plus and cross polarizations antenna response \n \"\"\"\n gps = lal.LIGOTimeGPS(t_gps)\n gmst_rad = lal.GreenwichMeanSiderealTime(gps)\n \n response = self.laldetector.response\n\n # computation of plus and cross antenna factors\n fp, fc = lal.ComputeDetAMResponse(response, ra, dec, psi, gmst_rad)\n\n return fp, fc\n\n def get_frequency_array(self):\n \"\"\" get frequency array corresponding to the complex frequency series\n \"\"\"\n N = int(self.T*self.Fs) #number of time domain data points \n dt = 1/self.Fs #sample spacing in the time domain\n \n freqs = np.fft.rfftfreq(N,dt)\n \n start=np.where(freqs==self.f_min)[0][0]\n freqs=freqs[start:]\n \n return freqs\n \n def get_noise_realization_from_psd(self):\n \"\"\" a complex frequency series noise realization corresponding to the detector psd\n \"\"\"\n state=np.random.get_state()\n np.random.seed(self.seed)\n \n noise_psd = self.psd #noise psd \n sigma_noise = 1/np.sqrt(2) * np.sqrt(self.T/2 * noise_psd) #noise stds array, one per freq bin\n \n noise = np.zeros(len(self.freqs)) + 0j #array to hold the noise realization\n \n for i in range(len(noise)):\n noise[i] = sigma_noise[i]*np.random.randn() + 1j * sigma_noise[i]*np.random.randn() #random noise ralization\n \n np.random.set_state(state)\n return noise\n \n def inject_signal_into_noise(self, signal):\n self.noise = self.get_noise_realization_from_psd() \n self.signal = signal\n self.strain = self.signal + self.noise\n \n def get_signal_snr(self):\n\n SNR_squared=4*self.df*np.sum((np.abs(self.signal)**2/self.asd(self.freqs)**2))\n SNR=np.sqrt(SNR_squared)\n \n return SNR\n \n def plot_signal(self):\n freqs = self.freqs\n fig, ax = pl.subplots(1, 1, figsize=(9,6))\n ax.set_title(self.name)\n ax.loglog(freqs, self.asd(freqs), 'b')\n ax.loglog(freqs, abs(self.signal), 'g', label = 'SNR =' + str(self.get_signal_snr()))\n ax.set_ylim([5e-25, 1e-20])\n ax.set_xlim([self.f_min, self.Fn])\n ax.set_xlabel('frequency /Hz')\n ax.set_ylabel(r'h(f)')\n ax.legend()\n \n def loglikelihood(self, model):\n \"\"\" calculates loglikelihood\n \"\"\" \n return - 2 * self.df * np.sum(np.abs(self.strain - model)**2 / self.psd)\n \n def h_inner_h_plus_d_inner_d(self, model):\n return np.sum((np.abs(model)**2 + np.abs(self.strain)**2) / self.psd) \n \n def complex_h_inner_d(self, model):\n return np.sum(model * np.conj(self.strain) / self.psd) \n \n \n \nclass Network(object):\n \"\"\"Class for the Interferometer Network \"\"\"\n\n def __init__(self, detectors):\n \"\"\"\n Instantiate an Interferometer object.\n\n Parameters\n ----------\n name: str\n Interferometer name, e.g., 'H1'.\n sensitivity: str\n Detector sensitivity, e.g. 'O1'\n minimum_frequency: float\n Minimum frequency to analyse for detector.\n \"\"\"\n self.detectors = detectors\n \n def network_phase_marg_loglikelihood(self, models):\n \"\"\" calculates network phase marginalized loglikelihood\n \"\"\"\n Y = -2 * self.detectors[0].df * sum([det.h_inner_h_plus_d_inner_d(models[det.name]) for det in self.detectors])\n X = 4 * self.detectors[0].df * np.abs(sum([det.complex_h_inner_d(models[det.name]) for det in self.detectors]))\n \n #print(sum([det.complex_h_inner_d(models[det.name]) for det in self.detectors]))\n\n return Y + np.log(i0e(X)) + X #i0e(x) = exp(-abs(X))*i0(X), where i0 is the modified bessel functionof first kind\n \n def snr(self):\n return np.sqrt(sum([det.get_signal_snr()**2 for det in self.detectors]))\n\n \n \n \n \n","sub_path":"interferometer.py","file_name":"interferometer.py","file_ext":"py","file_size_in_byte":6085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"526930437","text":"# %% --------------------------------------- Imports -------------------------------------------------------------------\nimport numpy as np\nfrom keras.models import load_model\n\nimport os\nos.system(\"sudo pip install \")\n\n\ndef predict(x):\n # Here x is a NumPy array. On the actual exam it will be a list of paths.\n # %% --------------------------------------------- Data Prep -------------------------------------------------------\n x = x.reshape(len(x), -1)\n x = x / 255\n # Write any data prep you used during training\n # %% --------------------------------------------- Predict ---------------------------------------------------------\n model = load_model('mlp_ajafari.hdf5')\n # If using more than one model to get y_pred, they need to be named as \"mlp_ajafari1.hdf5\", \"\"mlp_ajafari2.hdf5\", etc.\n y_pred = np.argmax(model.predict(x), axis=1)\n return y_pred, model\n # If using more than one model to get y_pred, do the following:\n # return y_pred, model1, model2 # If you used two models\n # return y_pred, model1, model2, model3 # If you used three models, etc.\n","sub_path":"Exam_MiniProjects/7-Keras_Exam1_Sample_Codes_S20/3-predict_ajafari.py","file_name":"3-predict_ajafari.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"535420733","text":"# jl\n# 2018-04-27\n# library ANTs\n\nfrom nipype.interfaces import ants\n\ndef ants_extract_brain(dataIn,out_prefix,keep_temp_files):\n\tbrainextraction = ants.segmentation.BrainExtraction()\n\tbrainextraction.inputs.dimension = 3\n\tbrainextraction.inputs.anatomical_image = dataIn\n\tbrainextraction.inputs.brain_template = \\\n\t'/Users/Shared/MICCAI2012-Multi-Atlas-Challenge-Data/T_template0.nii.gz'\n\tbrainextraction.inputs.brain_probability_mask = \\\n\t'/Users/Shared/MICCAI2012-Multi-Atlas-Challenge-Data/T_template0_BrainCerebellumProbabilityMask.nii.gz'\n\tbrainextraction.inputs.out_prefix = out_prefix\n\tbrainextraction.inputs.keep_temporary_files = keep_temp_files\n\tbrainextraction.inputs.extraction_registration_mask = \\\n\t'/Users/Shared/MICCAI2012-Multi-Atlas-Challenge-Data/T_template0_BrainCerebellumRegistrationMask.nii.gz'\n\tcmd = brainextraction.cmdline\n\t#brainextraction.run()\n\treturn cmd\n\t\ndef ants_registration(dataIn,dataRef,output_transform_prefix,dataOut,transforms,\\\n metric,transform_parameters,number_of_iterations,\\\n convergence_threshold,smoothing_sigmas,\\\n radius_or_number_of_bins,sampling_strategy,sampling_percentage):\n\treg = ants.Registration()\n\treg.inputs.fixed_image = dataRef\n\treg.inputs.moving_image = dataIn\n\treg.inputs.output_transform_prefix = output_transform_prefix\n\treg.inputs.output_warped_image = dataOut\n\treg.inputs.collapse_output_transforms = True\n\treg.inputs.dimension = 3\n\treg.inputs.transforms = transforms #['Rigid', 'Affine', 'SyN']\n\treg.inputs.metric = metric #['MI','MI','CC']\n\treg.inputs.transform_parameters = transform_parameters #[(0.1,), (0.1,), (0.1, 3.0, 0.0)]\n\treg.inputs.number_of_iterations = number_of_iterations #[[1000, 500, 250, 0], [1000, 500, 250, 0], [100, 100, 70, 20]]\n\treg.inputs.convergence_threshold = convergence_threshold #[1.e-6, 1.e-6, 1.e-9]\n\treg.inputs.convergence_window_size = [10]*len(transforms) #[10]*3\n\treg.inputs.shrink_factors = [[6,4,2,1]]*len(transforms) #[[6,4,2,1]]*3\n\treg.inputs.smoothing_sigmas = smoothing_sigmas #[[4,2,1,0], [4,2,1,0], [3,2,1,0]]\n\treg.inputs.use_histogram_matching = [True]*len(transforms) #[True]*3 # This is the default\n\treg.inputs.float = True\n\treg.inputs.verbose = True\n\treg.inputs.winsorize_lower_quantile = 0.01\n\treg.inputs.winsorize_upper_quantile = 0.99\n\treg.inputs.write_composite_transform = False\n\treg.inputs.initialize_transforms_per_stage = False\n\treg.inputs.metric_weight = [1]*len(transforms) #[1]*3 # Default (value ignored currently by ANTs)\n\treg.inputs.radius_or_number_of_bins = radius_or_number_of_bins #[32,32,4]\n\treg.inputs.sampling_strategy = sampling_strategy #['Regular', 'Regular', None]\n\treg.inputs.sampling_percentage = sampling_percentage #[0.25, 0.25, None]\n\tcmd = reg.cmdline\n\t#reg.run()\n\treturn cmd\t \t\n# \treg.inputs.sigma_units = ['vox'] * 2\n# \treg.inputs.use_estimate_learning_rate_once = [True, True]\n\ndef ants_applytransform(dataIn,dataRef,transform,interpolation,dataOut):\n\tat = ants.ApplyTransforms()\n\tat.inputs.input_image = dataIn\n\tat.inputs.reference_image = dataRef\n\tat.inputs.transforms = transform\n\tat.inputs.interpolation = interpolation # 'Linear','NearestNeighbor','CosineWindowedSinc','WelchWindowedSinc','HammingWindowedSinc','LanczosWindowedSinc','MultiLabel','Gaussian','BSpline'\n\tat.inputs.output_image = dataOut\n\tat.inputs.dimension = 3\n\tat.inputs.float = True\n\tcmd = at.cmdline\n\t#at.run()\n\treturn cmd","sub_path":"ants_library.py","file_name":"ants_library.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"310601244","text":"# Copyright (c) 2020\n# @Author: xiaoweixiang\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def recoverTree(self, root: TreeNode) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n \"\"\"\n 先找到错误的那个节点,然后遍历所有节点,来判断交换后是否满足二叉搜索树\n \"\"\"\n l = self.inorder(root)\n x, y = self.findTwoSwapped(l)\n print(x)\n print(y)\n\n def re(r: TreeNode) -> None:\n if not r:\n return\n re(r.left)\n if r.val == x:\n r.val = y\n elif r.val == y:\n r.val = x\n re(r.right)\n\n re(root)\n\n def inorder(self, r: TreeNode) -> List[int]:\n if r:\n return self.inorder(r.left) + [r.val] + self.inorder(r.right)\n else:\n return []\n\n def findTwoSwapped(self, nums: List[int]) -> (int, int):\n n = len(nums)\n x = y = -1\n k = 0\n for i in range(n - 1):\n if nums[i] > nums[i + 1]:\n if k == 0:\n x = nums[i]\n y = nums[i + 1]\n k += 1\n elif k == 1:\n y = nums[i + 1]\n break\n return x, y\n\n def isValidBST(self, root: TreeNode) -> bool:\n \"\"\"\n 验证二叉搜索树,最简单的方法是用中序排序\n :param root:\n :return:\n \"\"\"\n\n def helper(node, lower=float('-inf'), upper=float('inf')):\n if not node:\n return True\n val = node.val\n if val <= lower or val >= upper:\n return False\n if not helper(node.left, lower, val):\n return False\n if not helper(node.right, val, upper):\n return False\n return True\n\n return helper(root)\n","sub_path":"src/finished/no99_恢复二叉搜索树.py","file_name":"no99_恢复二叉搜索树.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"313812347","text":"import yaml\nimport numpy as np\nimport pandas as pd\nfrom kmerprediction.utils import parse_metadata, convert_feature_name\nfrom kmerprediction import constants\nimport matplotlib\n\nmatplotlib.use('agg')\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef get_distribution(feature, ova, label_header):\n args = {'metadata': constants.OMNILOG_METADATA,\n 'fasta_header': 'Strain',\n 'train_header': None,\n 'label_header': label_header,\n 'one_vs_all': ova\n }\n x_train, y_train, x_test, y_test = parse_metadata(**args)\n\n all_data = list(x_train) + list(x_test)\n all_labels = list(y_train) + list(y_test)\n\n sample_distributions = {k: [] for k in np.unique(all_labels)}\n\n for index, value in enumerate(all_data):\n sample_distributions[all_labels[index]].append(value)\n\n plate_number, well_index = convert_feature_name(feature)\n pattern = r'%s\\s(.+)%s' % (plate_number, well_index)\n data = pd.read_csv(constants.OMNILOG_DATA, index_col=0)\n row = data.loc[data.index.str.match(pattern)]\n\n output = {}\n for key in sample_distributions.keys():\n genomes = [x for x in sample_distributions[key] if x in row]\n data = row[genomes].values\n output[key] = data.reshape(data.shape[1])\n return output\n\ndef gather_distribution_data(feature_data, ova, label_header):\n cols = ['Feature', 'Distribution', 'Sample Type']\n data = pd.DataFrame(columns=cols)\n feature_names = feature_data['Feature']\n seen_features = []\n count = 0\n for name in feature_names:\n if name not in seen_features:\n seen_features.append(name)\n distributions = get_distribution(name, ova, label_header)\n for key in distributions:\n for elem in distributions[key]:\n data.loc[count] = [name, elem, key]\n count += 1\n return data\n\ndef plot_bars(data, palette, ova, p):\n ova = ova or 'All'\n ax = sns.barplot(x='Score', y='Feature', hue='Model', data=data, palette=palette)\n\n ax.set_ylabel(ax.get_ylabel(), fontsize=18)\n ax.set_yticklabels(ax.get_yticklabels(), fontsize=15)\n\n ax.set_xlim(0, 1.01)\n ax.set_xlabel(ax.get_xlabel(), fontsize=18)\n ax.set_xticks(np.arange(0, 1.05, 0.05))\n ax.set_xticklabels(np.arange(0, 1.05, 0.05), fontsize=15)\n\n ax.set_title('Features Important for predicting {ova} {p}'.format(ova=ova, p=p),\n fontsize=24)\n legend = ax.legend(title='Model', fontsize=15, loc='lower right')\n plt.setp(legend.get_title(), fontsize=15)\n\n return ax\n\ndef plot_distributions(data, palette):\n ax = sns.stripplot(x='Distribution', y='Feature', hue='Sample Type',\n data=data, alpha=0.35, size=10, palette=palette)\n ax.set_ylabel('')\n ax.set_yticklabels([])\n\n x_ticks = np.arange(0, 910, 100)\n ax.set_xticks(x_ticks)\n ax.set_xticklabels(x_ticks, fontsize=15)\n ax.set_xlim(min(x_ticks)-10, max(x_ticks)+10)\n ax.set_xlabel('Omnilog Are Under the Curve', fontsize=18)\n\n legend = ax.legend(fontsize=15, loc='lower right', ncol=2)\n plt.setp(legend.get_title(), fontsize=15)\n\n ax.set_title('Sample Distribution', fontsize=24)\n return ax\n\ndef main():\n bar_data = pd.read_csv(snakemake.input[0])\n if snakemake.wildcards.ova == 'all':\n ova = False\n else:\n ova = snakemake.wildcards.ova\n\n if snakemake.wildcards.prediction == 'Otype':\n label_header = 'O type'\n elif snakemake.wildcards.prediction == 'Htype':\n label_header = 'H type'\n elif snakemake.wildcards.prediction == 'Lineage':\n label_header = 'LSPA6'\n else:\n label_header = snakemake.wildcards.prediction\n\n dist_data = gather_distribution_data(bar_data, ova, label_header)\n\n palette1 = sns.color_palette('deep')\n palette2 = sns.color_palette('Set1')\n sns.set(context='paper')\n\n fig = plt.figure(1, figsize=(25, 12.5))\n\n plt.subplot(1, 2, 1)\n plot_bars(bar_data, palette1, ova, label_header)\n\n plt.subplot(1, 2, 2)\n plot_distributions(dist_data, palette2)\n\n plt.tight_layout()\n\n plt.savefig(snakemake.output[0])\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n","sub_path":"scripts/omni_features_figs.py","file_name":"omni_features_figs.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"206638128","text":"\"\"\"\nФункция sum_numbers.\n\nПринимает строку string содержащую целое число больше или равное 0.\nПример: '123', '00', '0603', '0054310003323566767'.\nВернуть сумму этих чисел.\nПример: string='0603', result=9 (0+6+0+3).\n\nСтрока не должна содержать пробелов или любых других символов,\nто есть должна корректно конвертироваться в int.\nПодсказка: isdigit()\nЕсли строка не соответствует этим требованиям, то вернуть None.\nПример (с ошибкой): '765eew', '5 57 767', '$ewe23', '664.232', ''.\n\"\"\"\n\n\ndef sum_numbers(string):\n # if not string:\n # return None\n\n if not string.isdigit():\n return None\n\n my_sum = 0\n for my_symbol in string:\n # if not my_symbol.isdigit():\n # return None\n my_sum += int(my_symbol)\n\n return my_sum\n\n\nif __name__ == '__main__':\n print(sum_numbers(\"4*6\"))\n\n print(\"0603\".isdigit())\n","sub_path":"topic_02_syntax/practice/loop_2_sum_numbers.py","file_name":"loop_2_sum_numbers.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"487915595","text":"import exchangelayer as el\nimport genconfig as gc\nimport genutils as gu\nimport strategies as st\nimport trader as trd\n\nLastOrder = 'N'\nOrderPrice = 0\nMarketAskPrice = 0\nMarketBidPrice = 0\n\n\ndef GetTradeAmount(order):\n if order == 'buy':\n ta = gu.RoundIfGreaterThan((el.GetTradeAmount('currency')\n / MarketAskPrice), 3)\n elif order == 'sell':\n ta = gu.RoundIfGreaterThan(el.GetTradeAmount('asset'), 3)\n else:\n ta = 0\n return ta\n\n\ndef SetMarketPrice(order):\n if order == 'buy':\n trd.MarketAskPrice = el.GetMarketPrice('ask')\n elif order == 'sell':\n trd.MarketBidPrice = el.GetMarketPrice('bid')\n\n\ndef TradeFromStrategy():\n # Wait until we have enough data to trade off\n if len(st.Trade_list) >= gc.Trader.TradeDelay:\n if st.Trade_list[-1] == 'Buy':\n el.CancelLastOrderIfExist()\n # Get fresh ask price\n SetMarketPrice('buy')\n TradeAmount = GetTradeAmount('buy')\n if TradeAmount > gc.API.AssetTradeMin:\n el.Trade('buy', trd.MarketAskPrice, TradeAmount,\n gc.API.TradePair)\n print('BUYING', TradeAmount, gc.API.Asset, 'at',\n trd.MarketAskPrice, gc.API.Currency)\n if gc.Trader.Enabled:\n gu.RecordTrades('BOUGHT', trd.MarketAskPrice, TradeAmount)\n # KISS method...\n trd.OrderPrice = trd.MarketAskPrice\n trd.LastOrder = 'buy'\n else:\n print('Wanted to BUY', TradeAmount, gc.API.Asset,\n 'at', trd.MarketAskPrice, 'but needed more', gc.API.Currency)\n elif st.Trade_list[-1] == 'Sell':\n el.CancelLastOrderIfExist()\n TradeAmount = GetTradeAmount('sell')\n # Get fresh bid price\n SetMarketPrice('sell')\n if TradeAmount > gc.API.AssetTradeMin:\n el.Trade(\n 'sell', trd.MarketBidPrice, TradeAmount, gc.API.TradePair)\n print('SELLING', TradeAmount, gc.API.Asset, 'at',\n trd.MarketBidPrice, gc.API.Currency)\n if gc.Trader.Enabled:\n gu.RecordTrades('SOLD', trd.MarketBidPrice, TradeAmount)\n # KISS method...\n trd.OrderPrice = trd.MarketBidPrice\n trd.LastOrder = 'sell'\n else:\n print('Wanted to SELL', TradeAmount, gc.API.Asset, 'at',\n trd.MarketBidPrice, 'but needed more', gc.API.Asset)\n\n\ndef ReIssueTrade():\n if el.OrderExist():\n el.CancelLastOrderIfExist()\n if LastOrder == 'sell':\n CurrPrice = el.GetMarketPrice('bid')\n if LastOrder == 'buy':\n CurrPrice = el.GetMarketPrice('ask')\n Prices = [CurrPrice, OrderPrice]\n PriceDelta = max(Prices) / min(Prices)\n if not PriceDelta == 1.0:\n if PriceDelta <= (gc.Trader.ReIssueSlippage / 100) + 1:\n TradeAmount = GetTradeAmount(LastOrder)\n if TradeAmount > gc.API.AssetTradeMin:\n el.Trade(LastOrder, CurrPrice, TradeAmount,\n gc.API.TradePair)\n print('Re-', LastOrder.upper(), 'at ', CurrPrice)\n","sub_path":"trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"350082183","text":"#! python3\n\nimport praw\nimport sys, os, glob, shutil\nimport urllib.request\nimport sqlite3\nimport time\nimport keyboard\nimport fetcher_main.move_images as move_images\n\nexit = 0\n\ndef retrieve():\n print(\"Acquiring Data\")\n #Connect to test.db\n conn = sqlite3.connect('data.db')\n #print(\"Opened database successfully\")\n cursor = conn.cursor()\n\n #Create Table DETAILS\n conn.execute('''CREATE TABLE IF NOT EXISTS DETAILS \n (ID INT PRIMARY KEY NOT NULL,\n TITLE TEXT NOT NULL,\n URL TEXT NOT NULL,\n CREATED INT NOT NULL); ''')\n\n #Checking if the user pressed 'q'\n if keyboard.is_pressed('q'): # if key 'q' is pressed \n exit = 1\n\n #Initialize RedditBot\n reddit = praw.Reddit(client_id='aVFs_elcBcouwg',\n client_secret='xEVULnu-o7oU28BoxEqw13LrHkTntg',\n user_agent='RepostBot',\n username='ReponstBot',\n password='Reponst123')\n\n\n #Choose subreddit to download images\n subreddit = reddit.subreddit('final_projekt')\n top_subreddit = subreddit.new(limit=25)\n\n for submission in top_subreddit:\n\n #print(submission)\n cursor.execute(\"SELECT ID FROM DETAILS WHERE ID = ?\", (submission.id,))\n data = cursor.fetchall()\n\n if len(data) != 0:\n continue\n elif ((submission.url.endswith(\".png\")) or (submission.url.endswith(\".jpg\"))):\n #Insert details in DB\n cursor.execute(\"INSERT INTO DETAILS (ID, TITLE, URL, CREATED) VALUES (?, ?, ?, ?)\",\n (submission.id, submission.title, submission.url, submission.created))\n print(\"New Post Added to Database\")\n\n #Select only .png and .jpg files to download\n if submission.url.endswith(\".png\"):\n name = submission.id + \".png\"\n elif submission.url.endswith(\".jpg\"):\n name = submission.id + \".jpg\"\n else:\n continue\n #Download images\n urllib.request.urlretrieve(submission.url, name)\n\n conn.commit()\n conn.close()\n move_images.move()\n print(\"Acquiring Complete\")\n\n #To make to code loop after specific interval\n \"\"\"while True:\n #Your script here\n time.sleep(Amount of time in seconds)\"\"\"\n\ndef loop():\n global exit\n while exit != 1:\n if keyboard.is_pressed('q'): # if key 'q' is pressed \n exit = 1\n retrieve()\n time.sleep(45) #seconds\n\n\n","sub_path":"fetcher_main/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"446633912","text":"import numpy as np\nimport random as rand\nimport math\nfrom functools import reduce\nfrom keras.models import Sequential\nfrom keras.layers import Activation, Dense, Reshape, Merge\n\n##################################\n# Genomes are represented as fixed-with lists of integers corresponding\n# to sequential layers and properties. A model with 2 convolutional layers\n# and 1 dense layer would look like:\n#\n# []\n#\n# The makeup of the convolutional layers and dense layers is defined in the\n# GenomeConfig below under self.convolutional_layer_shape and\n# self.dense_layer_shape. consists of just one property.\n###################################\n\nclass Genome:\n __slots__ = ['input_shape', 'output_nodes','final_activation','loss_func','genes', 'optimizer', 'activation','metrics']\n def __init__(self, input_shape, output_nodes, final_activation_func, loss_func, metrics=['acc'],\n optimizers=None, activations=None):\n\n # Input and Output\n self.input_shape = input_shape\n self.output_nodes = output_nodes\n self.final_activation = final_activation_func\n self.loss_func = loss_func\n self.metrics = metrics \n\n # Genes\n self.genes = []\n\n self.optimizer = optimizers or [\n 'adam',\n 'rmsprop',\n 'adagrad',\n 'adadelta'\n ]\n self.activation = activations or [\n 'relu',\n 'sigmoid',\n 'linear'\n ]\n\n def mutate(self, genome, num_mutations):\n num_mutations = np.random.choice(num_mutations)\n for i in range(num_mutations):\n index = np.random.choice(list(range(1, len(genome))))\n\n offset = 0\n for gene in self.genes:\n next_offset = offset + gene.genome_size\n if offset <= index < next_offset:\n gene.mutate(genome[offset:next_offset], index-offset)\n break\n offset = next_offset\n else:\n genome[index] = np.random.choice(list(range(len(self.optimizer)))) \n return genome\n\n def add(self, gene):\n self.genes.append(gene)\n\n def decode(self, genome):\n if not self.is_compatible_genome(genome):\n raise ValueError(\"Invalid genome for specified configs\")\n\n model = Sequential()\n\n # Simplify input layer\n model.add(Reshape(self.input_shape, input_shape = self.input_shape))\n \n offset = 0\n for gene in self.genes:\n n = gene.genome_size\n gene.decode(genome[offset:offset+n], model)\n offset += n\n\n model.add(Dense(self.output_nodes, activation=self.final_activation))\n model.compile(loss=self.loss_func,\n optimizer=self.optimizer[genome[offset]],\n metrics=self.metrics)\n\n return model\n\n @property\n def representation(self):\n encodings = []\n for gene in self.genes:\n encodings += gene.representation\n \n encodings.append(\"')\n encodings.append(\"\")\n return encodings\n\n def generate(self):\n genome = []\n\n offset = 0\n for gene in self.genes:\n genome += gene.generate()\n genome.append(np.random.choice(list(range(len(self.optimizer)))))\n genome[0] = 1\n return genome\n\n def is_compatible_genome(self, genome):\n expected_len = reduce(lambda x,y: x + y.genome_size, self.genes, 1)\n if len(genome) != expected_len:\n return False\n offset = 0\n for gene in self.genes:\n n = gene.genome_size\n if not gene.is_compatible(genome[offset:offset+n]):\n return False\n offset += n\n return True\n\n # metrics = accuracy or loss\n def best_genome(self, csv_path, metric=\"accuracy\", include_metrics=True):\n best = max if metric is \"accuracy\" else min\n col = -1 if metric is \"accuracy\" else -2\n data = np.genfromtxt(csv_path, delimiter=\",\")\n row = list(data[:, col]).index(best(data[:, col]))\n genome = list(map(int, data[row, :-2]))\n if include_metrics:\n genome += list(data[row, -2:])\n return genome\n\n # metric = accuracy or loss\n def decode_best(self, csv_path, metric=\"accuracy\"):\n return self.decode(self.best_genome(csv_path, metric, False))\n","sub_path":"genome.py","file_name":"genome.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"520393968","text":"import psycopg2\nfrom psycopg2 import pool\nimport os\nfrom dotenv import load_dotenv\nimport simplejson as json\nfrom psycopg2.extras import RealDictCursor\n\nimport settings\n\n############################################################\n# Establish Connection Pool\n############################################################\n\npool = None\n\ndef connect():\n global pool\n pool = psycopg2.pool.ThreadedConnectionPool(1,10,\n user = os.environ.get('SQL_USER'),\n password = os.environ.get('SQL_PASS'),\n host = os.environ.get('SQL_IP'),\n port = os.environ.get('SQL_PORT'),\n database = os.environ.get('SQL_DB'))\n\n############################################################\n# DB Query Functions\n############################################################\n\n# General DB View function\ndef getResultSetFromDB(funcName, params):\n conn = pool.getconn()\n with conn, conn.cursor(cursor_factory=RealDictCursor) as cursor:\n cursor.callproc(funcName, params)\n result = json.dumps(cursor.fetchall(), default=str)\n pool.putconn(conn)\n return result\n\n# View without js encoding\ndef getResultSetFromDBNoJS(funcName, params):\n conn = pool.getconn()\n with conn, conn.cursor(cursor_factory=RealDictCursor) as cursor:\n cursor.callproc(funcName, params)\n # Convert from RealDict => json => Python list\n result = json.loads(json.dumps(cursor.fetchall(), default=str))\n pool.putconn(conn)\n return result\n\n# Modify function\ndef modifyDB(funcName, params):\n conn = pool.getconn()\n with conn, conn.cursor(cursor_factory=RealDictCursor) as cursor:\n cursor.callproc(funcName, params)\n result=json.dumps(cursor.fetchall())\n pool.putconn(conn)\n # Return status and error message\n return result\n\n# Call at end of application\ndef closeDB():\n if pool:\n pool.closeall()\n","sub_path":"src/postgres_client.py","file_name":"postgres_client.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"584991380","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 14 09:58:49 2019\n\n@author: jean\n\"\"\"\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Dropout\nfrom keras.utils import np_utils\nfrom keras.layers import Conv2D, MaxPooling2D\nimport numpy as np\nfrom sklearn.model_selection import StratifiedKFold\n\nseed = 5\nnp.random.seed(seed)\n\n(X,y),(X_trein, y_trein) = mnist.load_data()\n\nprevisores = X.reshape(X.shape[0],28,28,1)\nprevisores = previsores.astype('float32')\n\nprevisores /= 255\n\nclasse = np_utils.to_categorical(y,10)\n\n#controla validação cruzada\nkfold = StratifiedKFold(n_splits = 5, shuffle=True, random_state=seed)\nresults = []\n\nb = np.zeros(shape=(classe.shape[0], 1))\n\nfor indice_treinamento, indice_teste in kfold.split(previsores, \n np.zeros(shape=(classe.shape[0], 1))):\n #print('Indices treinamento: ',indice_treinamento, 'Indice teste', indice_teste)\n classificador = Sequential()\n classificador.add(Conv2D(32,(3,3),input_shape=(28,28,1),activation='relu'))\n classificador.add(MaxPooling2D(pool_size=(2,2)))\n classificador.add(Flatten())\n classificador.add(Dense(units=128,activation='relu'))\n classificador.add(Dense(units=10,activation='softmax'))\n classificador.compile(loss='categorical_crossentropy',optimizer='adam',\n metrics=['accuracy'])\n classificador.fit(previsores[indice_treinamento],classe[indice_treinamento],\n batch_size=128,epochs=5)\n precisao = classificador.evaluate(previsores[indice_teste],classe[indice_teste])\n results.append(precisao[1])\n \n\n#media = results.mean()\nmedia = sum(results)/len(results)\n\n","sub_path":"cnn/mnist/mnist_valid_cruzada.py","file_name":"mnist_valid_cruzada.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"459122204","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 2 13:56:57 2020\n\n@author: pavan\n\"\"\"\nimport pandas as pd\ndf_in = pd.read_csv('glassdoor_jobs.csv')\n\ndf_in.columns\n\n# Removing unneccesary columns\ndf = df_in.drop(['Unnamed: 0'],axis = 1)\n\n\n# Salary Estimate cleaning\ndf['hourly'] = df['Salary Estimate'].apply(lambda x: 1 if 'per hour' in x.lower() else 0)\ndf['emp_provid_sal'] = df['Salary Estimate'].apply(lambda x: 1 if 'employer provided salary:' in x.lower() else 0)\n\ndf =df[df['Salary Estimate'] != '-1']\n\nsalary = df['Salary Estimate'].apply(lambda x: x.split('(')[0])\n\nsalary_dk = salary.apply(lambda x: x.replace('K','').replace('$',''))\nsalary_hr_est = salary_dk.apply(lambda x: x.lower().replace('per hour','').replace('employer provided salary:',''))\n\ndf['min_salary'] = salary_hr_est.apply(lambda x: int(x.split('-') [0]))\ndf['max_salary'] = salary_hr_est.apply(lambda x: int(x.split('-') [1]))\n\ndf['avg_salary'] = (df['min_salary'] + df['max_salary']) / 2\n\n#Location : extract only State names\n\ndf['job_location'] = df['Location'].apply(lambda x: x.split(',')[1])\n\ndf.columns\n# Location and head Quaters are same or not\ndf['same_location'] = df.apply(lambda x: 1 if x['Location'] == x['Headquarters'] else 0,axis = 1)\n\n\n#Company Name: remove ratngs for the company name\ndf['company_txt'] = df.apply(lambda x: x['Company Name'] if x.Rating < 0 else x['Company Name'][:-3],axis = 1)\n\n# company age\ndf['age'] = df.Founded.apply(lambda x: x if x < 1 else 2020 - x)\n\n#Revenue : Cleanse the Revenue with hexAvg Revenue\n#df['avg_revenue'] = df['Revenue'].apply(lambda x: x.lower().replace('million (usd)','').replace('$','').split('to')([0]+[1])/2)\n#revenue = df['Revenue'].apply(lambda x: x.split('(')[0])\n#revenue_dk = revenue.apply(lambda x: x.replace('to','-').replace('$',''))\n#revenue_hr_est = salary_dk.apply(lambda x: x.lower().replace('unknown / non-applicable',''))\n\n\n#Job Description parsing for specific skills like Python,sprk,aws,java,R,sql\ndf['python'] = df['Job Description'].apply(lambda x: 1 if 'python' in x.lower() else 0)\ndf['python'].value_counts()\n\ndf['spark'] = df['Job Description'].apply(lambda x: 1 if 'spark' in x.lower() else 0)\ndf['spark'].value_counts()\n\ndf['sql'] = df['Job Description'].apply(lambda x: 1 if 'sql' in x.lower() else 0)\ndf['sql'].value_counts()\n\ndf['aws'] = df['Job Description'].apply(lambda x: 1 if 'aws' in x.lower() else 0)\ndf['aws'].value_counts()\n\ndf.to_csv('salary_data_cleansed.csv',index = False)","sub_path":"Salary_estimator_data_cleaning.py","file_name":"Salary_estimator_data_cleaning.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"73019021","text":"#Universidade Federal do Amazonas\n#Daiara Colpani - 21600544\n\nfrom math import*\n\nn = int(input(\"Insira n:\"))\nh0 = sqrt(2)\nrep = 1\n\nwhile (rep<=n):\n\tx = sqrt(1-(h0/2)**2)\n\thn = sqrt(2-2*(x))\n\tsoma = 2**(rep)* h0\n\trep = rep + 1\n\th0 = hn\n\t\nprint(round(soma,8))\n\n","sub_path":"exs/1496-1140.py","file_name":"1496-1140.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"520120776","text":"import MeCab, math\n\ndef test(strings, N):\n p_li=[]\n for i in xrange(N):\n f_di = count_word(strings)\n p_li.append(get_prob(f_di))\n return aggregate_dict(p_li)\n\ndef count_word(strings):\n dict={}\n t = MeCab.Tagger()\n node = t.parseToNode(strings)\n while node:\n if node.surface in dict:\n dict[node.surface] += 1\n else:\n dict[node.surface] = 1\n node = node.next\n return dict\n\ndef get_prob(freq_dic):\n probdic = {}\n word_sum = sum(freq_dic.itervalues())\n for word in freq_dic:\n probdic[word] = math.log(1.0 * freq_dic[word] / word_sum)\n return probdic\n\ndef aggregate_dict(dict_list):\n word_set=set([])\n word_dic={}\n for dic1 in dict_list:\n for word in [w_li for w_li in dic1 if not w_li in word_set]:\n word_set = word_set.union(word)\n value=0\n for dic in [di_li for di_li in dict_list if word in di_li]:\n value += dic[word]\n word_dic[word] = value\n return word_dic","sub_path":"compile_test/py_mecab.py","file_name":"py_mecab.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"337005803","text":"from sklearn.neural_network import MLPRegressor\nfrom sklearn.model_selection import cross_val_score\nfrom hyperopt import hp, fmin, tpe, Trials, STATUS_OK\n\nx = 0\ny = 0\nreg = 0\n\ndef set_hidden_layer(param):\n layer_one = int(param[\"layer_one\"])\n layer_two = int(param[\"layer_two\"])\n layer_three = int(param[\"layer_three\"])\n hidden_layer = (layer_one, layer_two, layer_three)\n return MLPRegressor(hidden_layer_sizes=hidden_layer, max_iter=1200)\n\n\ndef f(param):\n global reg\n reg = set_hidden_layer(param)\n acc = cross_val_score(reg, x, y, n_jobs=-1).mean()\n return {\"loss\": -acc, \"status\": STATUS_OK}\n\n\ndef calc(data_x, data_y):\n global x\n global y\n global reg\n x = data_x\n y = data_y\n\n space_rf = {\n 'layer_one': hp.quniform('layer_one', 1, 100, 1),\n 'layer_two': hp.quniform('layer_two', 1, 100, 1),\n 'layer_three': hp.quniform('layer_three', 1, 100, 1),\n }\n\n trials = Trials()\n best = fmin(\n f,\n space=space_rf,\n algo=tpe.suggest,\n max_evals=100,\n trials=trials\n )\n best_reg = set_hidden_layer(best)\n best_score = cross_val_score(best_reg, x, y, cv=5, n_jobs=-1).mean()\n return best_reg, best_score","sub_path":"ai4chem/automl/multi_layer_perception.py","file_name":"multi_layer_perception.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"561782128","text":"\"\"\"\nPlot the Keras model as an image.\n\"\"\"\nfrom keras.models import model_from_json\nfrom keras.utils import plot_model\n\nMODEL_NAME = 'cnn_mfcc'\n\nwith open('{}.json'.format(MODEL_NAME), 'r') as fd:\n model = model_from_json(fd.read())\n\nplot_model(model, to_file='{}.png'.format(MODEL_NAME),\n show_shapes=True, show_layer_names=True)\n","sub_path":"hw1/plot_model.py","file_name":"plot_model.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"381574893","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nACTION_BACK = 0\nACTION_END = 1\n\nclass Env:\n def __init__(self):\n self.prob = 0.9\n\n def step(self, action):\n \"\"\"\n 0: left, 1: right\n \"\"\"\n next_state = 0\n reward = 0.0\n if action == ACTION_BACK:\n result = np.random.binomial(1, 0.9) #\n if result == 0: #\n next_state = -1\n reward = 1.0\n elif action == ACTION_END:\n next_state = -1\n return next_state, reward\n\n \nclass policy:\n def __init__(self, left_prob):\n self.left_prob = left_prob\n\n def action(self):\n action = np.random.binomial(1, 1 - self.left_prob)\n return action\n\n\ndef figure_5_4():\n env = Env()\n target_action_prob = 1.0\n behaviour_action_prob = 0.5\n target_policy = policy(target_action_prob)\n behaviour_policy = policy(behaviour_action_prob)\n ratios_list = []\n returns_list = []\n values_list = []\n runs = 10\n episodes = 100000\n for i in range(runs):\n for j in range(episodes):\n returns = 0\n ratio = 1.0\n # run every episode\n while True:\n action = behaviour_policy.action()\n next_state, reward = env.step(action)\n returns += reward\n\n if action == ACTION_BACK:\n ratio = ratio * target_policy.left_prob / behaviour_policy.left_prob\n elif action == ACTION_END: #\n target_action_end_prob = 1 - target_policy.left_prob\n behaviour_action_end_prob = 1 - behaviour_policy.left_prob\n ratio = ratio * target_action_end_prob / behaviour_action_end_prob\n break\n\n if next_state == -1:\n break\n \n ratios_list.append(ratio)\n returns_list.append(returns)\n values_list.append(ratio * returns)\n values = np.add.accumulate(values_list)\n estimations = np.asarray(values) / np.arange(1, episodes + 1)\n plt.plot(estimations)\n \n ratios_list = []\n returns_list = []\n values_list = []\n \n # plt.ion()\n # plt.pause(0.1)\n # plt.ioff()\n plt.xlabel('Episodes (log scale)')\n plt.ylabel('Ordinary Importance Sampling')\n plt.xscale('log')\n\n plt.savefig('../images/figure_5_4.png')\n plt.show()\n plt.close()\n\n\nif __name__ == \"__main__\":\n figure_5_4()\n","sub_path":"rl/5_chapter/figure_5_5.py","file_name":"figure_5_5.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"289593052","text":"import logging\nimport os\nimport urllib.parse\nfrom datetime import timedelta\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom typing import List, MutableMapping, Tuple\n\nfrom background_task import background\nfrom background_task.models import Task\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.utils import timezone\nfrom rest_framework.status import HTTP_207_MULTI_STATUS, HTTP_208_ALREADY_REPORTED\n\nfrom mail import requests as mail_requests\nfrom mail.enums import ChiefSystemEnum, ReceptionStatusEnum, SourceEnum\nfrom mail.libraries.builders import build_licence_data_mail\nfrom mail.libraries.data_processors import build_request_mail_message_dto\nfrom mail.libraries.lite_to_edifact_converter import EdifactValidationError\nfrom mail.libraries.routing_controller import check_and_route_emails, send, update_mail\nfrom mail.libraries.usage_data_decomposition import build_json_payload_from_data_blocks, split_edi_data_by_id\nfrom mail.models import LicenceIdMapping, LicencePayload, Mail, UsageData\nfrom mail.servers import smtp_send\n\nlogger = logging.getLogger(__name__)\n\n\nMANAGE_INBOX_TASK_QUEUE = \"manage_inbox_queue\"\nNOTIFY_USERS_TASK_QUEUE = \"notify_users_queue\"\nLICENCE_DATA_TASK_QUEUE = \"licences_updates_queue\"\nUSAGE_FIGURES_QUEUE = \"usage_figures_queue\"\nTASK_BACK_OFF = 3600 # Time, in seconds, to wait before scheduling a new task (used after MAX_ATTEMPTS is reached)\n\n\n# Send Usage Figures to LITE API\ndef get_lite_api_url():\n \"\"\"The URL for the licence usage callback, from the LITE_API_URL setting.\n\n If the configured URL has no path, use `/licences/hmrc-integration/`.\n \"\"\"\n url = settings.LITE_API_URL\n components = urllib.parse.urlparse(url)\n\n if components.path in (\"\", \"/\"):\n components = components._replace(path=\"/licences/hmrc-integration/\")\n url = urllib.parse.urlunparse(components)\n\n return url\n\n\n@background(queue=USAGE_FIGURES_QUEUE, schedule=0)\ndef send_licence_usage_figures_to_lite_api(lite_usage_data_id):\n \"\"\"Sends HMRC Usage figure updates to LITE\"\"\"\n\n logger.info(\"Preparing LITE UsageData [%s] for LITE API\", lite_usage_data_id)\n\n try:\n lite_usage_data = UsageData.objects.get(id=lite_usage_data_id)\n licences = UsageData.licence_ids\n except UsageData.DoesNotExist: # noqa\n _handle_exception(\n f\"LITE UsageData [{lite_usage_data_id}] does not exist.\",\n lite_usage_data_id,\n )\n return\n\n # Extract usage details of Licences issued from LITE\n _, data = split_edi_data_by_id(lite_usage_data.mail.edi_data, lite_usage_data)\n payload = build_json_payload_from_data_blocks(data)\n\n # We only process usage data for active licences so below error is unlikely\n if len(payload[\"licences\"]) == 0:\n logger.error(\"Licences is blank in payload for %s\", lite_usage_data, exc_info=True)\n return\n\n payload[\"usage_data_id\"] = lite_usage_data_id\n lite_api_url = get_lite_api_url()\n logger.info(\"Sending LITE UsageData [%s] figures for Licences [%s] to LITE API\", lite_usage_data_id, licences)\n\n try:\n lite_usage_data.lite_payload = payload\n lite_usage_data.save()\n\n response = mail_requests.put(\n lite_api_url,\n lite_usage_data.lite_payload,\n hawk_credentials=settings.HAWK_LITE_HMRC_INTEGRATION_CREDENTIALS,\n timeout=settings.LITE_API_REQUEST_TIMEOUT,\n )\n except Exception as exc: # noqa\n _handle_exception(\n f\"An unexpected error occurred when sending LITE UsageData [{lite_usage_data_id}] to LITE API -> \"\n f\"{type(exc).__name__}: {exc}\",\n lite_usage_data_id,\n )\n return\n\n if response.status_code not in [HTTP_207_MULTI_STATUS, HTTP_208_ALREADY_REPORTED]:\n _handle_exception(\n f\"An unexpected response was received when sending LITE UsageData [{lite_usage_data_id}] to \"\n f\"LITE API -> status=[{response.status_code}], message=[{response.text}]\",\n lite_usage_data_id,\n )\n return\n\n if response.status_code == HTTP_207_MULTI_STATUS:\n try:\n response, accepted_licences, rejected_licences = parse_response(response)\n except Exception as exc: # noqa\n _handle_exception(\n f\"An unexpected error occurred when parsing the response for LITE UsageData \"\n f\"[{lite_usage_data_id}] -> {type(exc).__name__}: {exc}\",\n lite_usage_data_id,\n )\n return\n save_response(lite_usage_data, accepted_licences, rejected_licences, response)\n\n logger.info(\"Successfully sent LITE UsageData [%s] to LITE API\", lite_usage_data_id)\n\n\ndef schedule_licence_usage_figures_for_lite_api(lite_usage_data_id):\n logger.info(\"Scheduling UsageData '%s' for LITE API\", lite_usage_data_id)\n task = Task.objects.filter(queue=USAGE_FIGURES_QUEUE, task_params=f'[[\"{lite_usage_data_id}\"], {{}}]')\n\n if task.exists():\n logger.info(\"UsageData '%s' has already been scheduled\", lite_usage_data_id)\n else:\n send_licence_usage_figures_to_lite_api(lite_usage_data_id)\n logger.info(\"UsageData '%s' has been scheduled\", lite_usage_data_id)\n\n\ndef parse_response(response) -> Tuple[MutableMapping, List[str], List[str]]:\n response = response.json()\n licences = response[\"licences\"]\n\n accepted_licences = [\n LicenceIdMapping.objects.get(lite_id=licence.get(\"id\")).reference\n for licence in licences[\"accepted\"]\n if licence.get(\"id\")\n ]\n rejected_licences = [\n LicenceIdMapping.objects.get(lite_id=licence.get(\"id\")).reference\n for licence in licences[\"rejected\"]\n if licence.get(\"id\")\n ]\n\n return response, accepted_licences, rejected_licences\n\n\ndef save_response(lite_usage_data: UsageData, accepted_licences, rejected_licences, response):\n lite_usage_data.lite_accepted_licences = accepted_licences\n lite_usage_data.lite_rejected_licences = rejected_licences\n lite_usage_data.lite_sent_at = timezone.now()\n lite_usage_data.lite_response = response\n\n if not lite_usage_data.has_spire_data:\n lite_usage_data.mail.status = ReceptionStatusEnum.REPLY_RECEIVED\n lite_usage_data.mail.save()\n\n lite_usage_data.save()\n\n\ndef schedule_max_tried_task_as_new_task(lite_usage_data_id):\n \"\"\"\n Used to schedule a max-tried task as a new task (starting from attempts=0);\n Abstracted from 'send_licence_usage_figures_to_lite_api' to enable unit testing of a recursive operation\n \"\"\"\n\n logger.warning(\n \"Maximum attempts of %s for LITE UsageData [%s] has been reached\", settings.MAX_ATTEMPTS, lite_usage_data_id\n )\n\n schedule_datetime = timezone.now() + timedelta(seconds=TASK_BACK_OFF)\n logger.info(\n \"Scheduling new task for LITE UsageData [%s] to commence at [%s]\", lite_usage_data_id, schedule_datetime\n )\n send_licence_usage_figures_to_lite_api(lite_usage_data_id, schedule=TASK_BACK_OFF) # noqa\n\n\ndef _handle_exception(message, lite_usage_data_id):\n error_message = f\"Failed to send LITE UsageData [{lite_usage_data_id}] to LITE API -> {message} \"\n\n try:\n task = Task.objects.get(queue=USAGE_FIGURES_QUEUE, task_params=f'[[\"{lite_usage_data_id}\"], {{}}]')\n except Task.DoesNotExist:\n logger.error(\"No task was found for UsageData [%s]\", lite_usage_data_id)\n else:\n # Get the task's current attempt number by retrieving the previous attempts and adding 1\n current_attempt = task.attempts + 1\n\n # Schedule a new task if the current task has been attempted MAX_ATTEMPTS times;\n # HMRC Integration tasks need to be resilient and keep retrying post-failure indefinitely.\n # This logic will make MAX_ATTEMPTS attempts to send licence changes according to the Django Background Task\n # Runner scheduling, then wait TASK_BACK_OFF seconds before starting the process again.\n if current_attempt >= settings.MAX_ATTEMPTS:\n schedule_max_tried_task_as_new_task(lite_usage_data_id)\n\n # Raise an exception\n # this will cause the task to be marked as 'Failed' and retried if there are retry attempts left\n raise Exception(error_message)\n\n\n@background(queue=LICENCE_DATA_TASK_QUEUE, schedule=0)\ndef send_licence_data_to_hmrc():\n \"\"\"Sends LITE (or ICMS) licence updates to HMRC\n\n Return: True if successful\n \"\"\"\n source = SourceEnum.ICMS if settings.CHIEF_SOURCE_SYSTEM == ChiefSystemEnum.ICMS else SourceEnum.LITE\n logger.info(f\"Sending {source} licence updates to HMRC\")\n\n if Mail.objects.exclude(status=ReceptionStatusEnum.REPLY_SENT).count():\n logger.info(\n \"Currently we are either waiting for a reply or next one is ready to be processed,\\n\"\n \"so we cannot send this update now and will be picked up in the next cycle\"\n )\n return\n\n try:\n with transaction.atomic():\n licences = LicencePayload.objects.filter(is_processed=False).select_for_update(nowait=True)\n\n if not licences.exists():\n logger.info(\"There are currently no licences to send\")\n return\n\n mail = build_licence_data_mail(licences, source)\n mail_dto = build_request_mail_message_dto(mail)\n licence_references = [licence.reference for licence in licences]\n logger.info(\n \"Created Mail [%s] with subject %s from licences [%s]\", mail.id, mail_dto.subject, licence_references\n )\n\n send(mail_dto)\n update_mail(mail, mail_dto)\n\n licences.update(is_processed=True)\n logger.info(\"Licence references [%s] marked as processed\", licence_references)\n\n except EdifactValidationError as err: # noqa\n raise err\n except Exception as exc: # noqa\n logger.error(\n \"An unexpected error occurred when sending %s licence updates to HMRC -> %s\",\n source,\n type(exc).__name__,\n exc_info=True,\n )\n else:\n logger.info(\"Successfully sent %s licences updates in Mail [%s] to HMRC\", source, mail.id)\n return True\n\n\n# Notify Users of Rejected Mail\n@background(queue=NOTIFY_USERS_TASK_QUEUE, schedule=0)\ndef notify_users_of_rejected_mail(mail_id, mail_response_date):\n \"\"\"If a rejected email is found, this task notifies users of the rejection\"\"\"\n\n logger.info(\"Notifying users of rejected Mail [%s, %s]\", mail_id, mail_response_date)\n\n try:\n multipart_msg = MIMEMultipart()\n multipart_msg[\"From\"] = settings.EMAIL_USER\n multipart_msg[\"To\"] = \",\".join(settings.NOTIFY_USERS)\n multipart_msg[\"Subject\"] = \"Mail rejected\"\n body = MIMEText(f\"Mail [{mail_id}] received at [{mail_response_date}] was rejected\")\n multipart_msg.attach(body)\n\n smtp_send(multipart_msg)\n except Exception as exc: # noqa\n error_message = (\n f\"An unexpected error occurred when notifying users of rejected Mail \"\n f\"[{mail_id}, {mail_response_date}] -> {type(exc).__name__}: {exc}\"\n )\n\n # Raise an exception\n # this will cause the task to be marked as 'Failed' and retried if there are retry attempts left\n raise Exception(error_message)\n else:\n logger.info(\"Successfully notified users of rejected Mail [%s, %s]\", mail_id, mail_response_date)\n\n\n# Manage Inbox\n\n\n@background(queue=MANAGE_INBOX_TASK_QUEUE, schedule=0)\ndef manage_inbox():\n \"\"\"Main task which scans inbox for SPIRE and HMRC emails\"\"\"\n\n logger.info(\"Polling inbox for updates\")\n\n try:\n check_and_route_emails()\n except Exception as exc: # noqa\n logger.error(\n \"An unexpected error occurred when polling inbox for updates -> %s\",\n type(exc).__name__,\n exc_info=True,\n )\n raise exc\n\n\n@background(queue=\"test_queue\", schedule=0)\ndef emit_test_file():\n test_file_path = os.path.join(settings.BASE_DIR, \".background-tasks-is-ready\")\n with open(test_file_path, \"w\") as test_file:\n test_file.write(\"OK\")\n","sub_path":"mail/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":12133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"309182676","text":"#!/usr/bin/env python3\n\"\"\"PetBG: Track pets' glucose levels.\n\nhttps://github.com/Effenberg0x0/PetBG\nAlvaro Leal , 2019\n\"\"\"\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import QSize\nfrom PyQt5.QtWidgets import QCalendarWidget\nfrom PyQt5.QtWidgets import QCheckBox\nfrom PyQt5.QtWidgets import QDateTimeEdit\nfrom PyQt5.QtWidgets import QGridLayout\nfrom PyQt5.QtWidgets import QGroupBox\nfrom PyQt5.QtWidgets import QHeaderView\nfrom PyQt5.QtWidgets import QMenu\nfrom PyQt5.QtWidgets import QPushButton\nfrom PyQt5.QtWidgets import QTableWidget\nfrom PyQt5.QtWidgets import QTableWidgetItem\nfrom PyQt5.QtWidgets import QVBoxLayout\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtWidgets import QAbstractItemView\nfrom PyQt5.QtWidgets import QHBoxLayout\nfrom PyQt5.QtWidgets import QFileDialog\nfrom PyQt5.QtWidgets import QDialog\nfrom PyQt5.QtWidgets import QTabWidget\nfrom PyQt5.QtWidgets import QSplitter\n\nimport csv\nimport json\nimport xlsxwriter\nimport datetime\n\nfrom glucometers.alphakat.driver import buffer_format\nfrom glucometers.alphakat.driver import Glucometer\nfrom ui.widgets.glucose_chart2 import GlucoseChart\nfrom ui.widgets.record_selector import RecordSelector\nfrom ui.windows.select_pet_window import SelectPetWindow\nfrom ui.windows.import_data_to_pet_window import ImportDataToPetWindow\n\n\nclass GmDataWindow(QDialog):\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.test_buffer = kwargs.get(\"test_buffer\", None)\n self.db = kwargs.get(\"database\", None)\n self.records = []\n\n self.setWindowTitle(\"PetBG - Glucometer data\")\n\n self.load()\n\n self.start_date_index = 0\n self.stop_date_index = 0\n\n if self.records:\n if len(self.records) > 0:\n self.stop_date_index = len(self.records) # Non-inclusive\n\n self.set_default_record_visibility()\n self.data_window_layout = QVBoxLayout()\n self.setLayout(self.data_window_layout)\n self.data_window_layout.addWidget(self.create_splitters())\n self.update_calendar_group()\n self.update()\n self.show()\n\n def sizeHint(self):\n return QSize(850, 650)\n\n def set_default_record_visibility(self):\n if self.records and len(self.records) > 0:\n for record in self.records:\n record.show = True\n\n def create_splitters(self):\n lower_splitter: QSplitter = QSplitter(Qt.Horizontal)\n lower_splitter.setHandleWidth(1)\n lower_splitter.setContentsMargins(0, 10, 0, 0)\n lower_splitter.addWidget(self.create_calendar_group())\n lower_splitter.addWidget(self.create_chart_control_group())\n lower_splitter.addWidget(self.create_actions_groupbox())\n\n lower_splitter.widget(0).setBaseSize(180, 160)\n lower_splitter.widget(0).setMinimumSize(180, 160)\n lower_splitter.widget(0).setMaximumSize(235, 160)\n\n lower_splitter.widget(2).setBaseSize(240, 160)\n lower_splitter.widget(2).setMinimumSize(240, 160)\n lower_splitter.widget(2).setMaximumSize(300, 160)\n\n right_splitter = QSplitter(Qt.Vertical)\n right_splitter.setHandleWidth(1)\n right_splitter.setContentsMargins(10, 10, 0, 0)\n right_splitter.addWidget(self.create_glucose_chart_groupbox())\n right_splitter.addWidget(lower_splitter)\n\n main_splitter = QSplitter(Qt.Horizontal)\n main_splitter.setHandleWidth(1)\n main_splitter.addWidget(self.create_tabs())\n main_splitter.addWidget(right_splitter)\n\n main_splitter.widget(0).setBaseSize(500, 150)\n main_splitter.widget(0).setMinimumSize(500, 150)\n # lower_splitter.widget(0).setMaximumSize(300, 160)\n\n return main_splitter\n\n def load(self):\n self.load_glucometer_data()\n self.setWindowTitle(\"PetBG - Glucometer data (no pet selected)\")\n\n def create_tabs(self):\n # Initialize tab screen\n tabs = QTabWidget(objectName=\"tabs\")\n tab_records = QWidget(objectName=\"records_tab\")\n tab_records.needs_update = True\n\n tab_curves = QWidget(objectName=\"curves_tab\")\n tab_curves.needs_update = True\n\n # Add tabs\n tabs.addTab(tab_records, \"Records\")\n tabs.addTab(tab_curves, \"Curves\")\n\n # Create records tab\n tab_records.layout = QGridLayout(self)\n records_table1_groupbox: QGroupBox = self.create_records_table1_groupbox() # noqa\n records_table1_groupbox.setBaseSize(350, -1)\n\n tab_records.layout.addWidget(records_table1_groupbox, 0, 0)\n tab_records.setLayout(tab_records.layout)\n\n # Create curves tab\n tab_curves.layout = QVBoxLayout()\n curves_tab_splitter = QSplitter(Qt.Vertical)\n curves_tab_splitter.setHandleWidth(1)\n curves_tab_splitter.addWidget(self.create_curves_table_groupbox())\n curves_tab_splitter.addWidget(self.create_records_table2_groupbox())\n # curves_tab_splitter.setStretchFactor(4, 1)\n tab_curves.layout.addWidget(curves_tab_splitter)\n tab_curves.setLayout(tab_curves.layout)\n\n # Update specific tables when tab is selected.\n tabs.currentChanged.connect(self.tab_changed)\n return tabs\n\n def tab_changed(self, tab_id):\n tabs: QTabWidget = self.findChild(QTabWidget, \"tabs\")\n if not tabs:\n print(\"tabs not found.\")\n return\n if tabs.currentWidget().needs_update:\n self.update()\n\n def create_records_table1_groupbox(self):\n title = \"Records (glucometer data)\"\n\n groupBox = QGroupBox(title, objectName=\"r_tab1_group\")\n\n records_table1 = QTableWidget(0, 5, objectName=\"records_table1\")\n records_table1.cellClicked.connect(self.table_clicked)\n records_table1.setEditTriggers(QAbstractItemView.NoEditTriggers)\n header: QHeaderView = records_table1.horizontalHeader()\n header.setStretchLastSection(1)\n # header.setSectionResizeMode(QHeaderView.Interactive)\n header.setSectionResizeMode(0, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(1, QHeaderView.Stretch)\n header.setSectionResizeMode(2, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(3, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(4, QHeaderView.ResizeToContents)\n\n records_table1.setHorizontalHeaderLabels([\"Record id\", \"Date\", \"Time\",\n \"Level\", \"Select\"])\n\n vbox = QVBoxLayout()\n vbox.addWidget(records_table1)\n groupBox.setLayout(vbox)\n return groupBox\n\n def table_clicked(self, row, col):\n print(f\"{row}, {col}\")\n if col == 5:\n table: QTableWidget = self.findChild(QTableWidget,\n \"records_table1\")\n checkbox_item: QTableWidgetItem = table.item(row, 4)\n record_id_item: QTableWidgetItem = table.item(row, 0)\n record_id = int(record_id_item.text())\n print(\"id {0} state: {1}\"\n .format(record_id, checkbox_item.checkState()))\n\n def create_records_table2_groupbox(self):\n title = \"Records (glucometer data)\"\n\n groupBox = QGroupBox(title, objectName=\"r_tab2_group\")\n\n self.records_table2 = QTableWidget(0, 4, objectName=\"records_table2\")\n # self.records_table2.setBaseSize(350, -1)\n self.records_table2.setEditTriggers(QAbstractItemView.NoEditTriggers)\n header: QHeaderView = self.records_table2.horizontalHeader()\n header.setSectionResizeMode(QHeaderView.Interactive)\n header.setSectionResizeMode(0, QHeaderView.Stretch)\n header.setSectionResizeMode(1, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(2, QHeaderView.ResizeToContents)\n\n self.records_table2.setHorizontalHeaderLabels([\"Date\", \"Time\", \"Level\",\n \"Select\"])\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.records_table2)\n groupBox.setLayout(vbox)\n return groupBox\n\n def create_curves_table_groupbox(self):\n title = \"Detected curves (glucometer data)\"\n\n groupBox = QGroupBox(title, objectName=\"curves_group\")\n curves_table = QTableWidget(0, 4, objectName=\"curves_table\")\n # curves_table.setBaseSize(350, -1)\n curves_table.setEditTriggers(QAbstractItemView.NoEditTriggers)\n header = curves_table.horizontalHeader()\n header.setSectionResizeMode(0, QHeaderView.Stretch)\n header.setSectionResizeMode(1, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(2, QHeaderView.ResizeToContents)\n\n curves_table.accessibleName = \"table\"\n curves_table.setHorizontalHeaderLabels([\"Date\", \"Time\", \"Level\",\n \"Select\"])\n\n vbox = QVBoxLayout()\n vbox.addWidget(curves_table)\n groupBox.setLayout(vbox)\n return groupBox\n\n def create_glucose_chart_groupbox(self):\n title = \"Glucose chart (glucometer data)\"\n groupBox = QGroupBox(title, objectName=\"glucose_chart_group\")\n\n if not self.records or len(self.records) == 0:\n self.chart = GlucoseChart(self.records) # noqa\n else:\n self.chart = GlucoseChart(self.records[self.start_date_index:self.stop_date_index]) # noqa\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.chart)\n groupBox.setLayout(vbox)\n return groupBox\n\n def load_glucometer_data(self):\n if self.test_buffer:\n glucometer = Glucometer(test_buffer=self.test_buffer)\n else:\n glucometer = Glucometer()\n glucometer.load_device_data()\n\n glucometer.record_count = glucometer._get_record_count()\n self.records = glucometer._get_records(buffer_format.first_record,\n glucometer.record_count)\n self.set_records_source()\n\n self.set_default_record_visibility()\n print(\"Loaded {0} records from the glucometer.\"\n .format(len(self.records)))\n\n def set_records_source(self):\n for record in self.records:\n record.source = 1 # TODO: Anything better than this aberration.\n\n @pyqtSlot(int, int, name=\"update_records_visibility\")\n def update_records_visibility(self, record_id, status):\n print(\"Updating visibility for record {0} to {1}\"\n .format(record_id, status))\n\n for record in self.records:\n if record.id == record_id:\n record.show = bool(status)\n # self.records[record_id].show = bool(status)\n # tabs: QTabWidget = self.findChild(QTabWidget, \"tabs\")\n # if not tabs:\n # print(\"Tabs not found.\")\n # return\n # tabs.currentWidget().needs_update = True\n self.chart.needs_update = True\n tabs: QTabWidget = self.findChild(QTabWidget, \"tabs\")\n if not tabs:\n print(\"tabs not found.\")\n return\n\n tabs.currentWidget().needs_update = True\n self.update()\n\n def update_records_table(self, table):\n while table.rowCount() > 0:\n table.removeRow(0)\n\n for i, record in enumerate(self.records[self.start_date_index:self.stop_date_index+1]): # noqa\n table.insertRow(i)\n date = (f\"{record.datetime:%m}/\"\n f\"{record.datetime:%d}/\"\n f\"{record.datetime:%y}\")\n\n time = f\"{record.datetime:%H}:{record.datetime:%M}\"\n\n r = RecordSelector(record, initial_state=record.show)\n r.state_changed.connect(self.update_records_visibility)\n table.setItem(i, 0, QTableWidgetItem(str(record.id)))\n table.setItem(i, 1, QTableWidgetItem(date))\n table.setItem(i, 2, QTableWidgetItem(time))\n table.setItem(i, 3, QTableWidgetItem(str(record.level)))\n table.setCellWidget(i, 4, r)\n\n def update_curves_table(self, table):\n while table.rowCount() > 0:\n table.removeRow(0)\n\n for i, record in enumerate(self.records[self.start_date_index:self.stop_date_index]): # noqa\n table.insertRow(i)\n date = \"{0}/{1}/{2}\".format(record.datetime.month,\n record.datetime.day,\n record.datetime.year)\n time = \"{0}:{1}\".format(record.datetime.hour,\n record.datetime.minute)\n\n table.setItem(i, 0, QTableWidgetItem(date))\n table.setItem(i, 1, QTableWidgetItem(time))\n table.setItem(i, 2, QTableWidgetItem(record.level))\n\n checkbox_cell_layout = QHBoxLayout()\n checkbox_cell_layout.setAlignment(Qt.AlignCenter)\n checkbox_cell_layout.setContentsMargins(0, 0, 0, 0)\n checkbox_cell_layout.addWidget(QCheckBox())\n checkbox_cell = QWidget()\n checkbox_cell.setLayout(checkbox_cell_layout)\n\n table.setCellWidget(i, 3, checkbox_cell)\n\n def create_calendar_group(self):\n groupBox = QGroupBox(\"Filter by date\", objectName=\"calendar_groupbox\")\n groupBox.setCheckable(True)\n groupBox.setChecked(False)\n\n start_date = QDateTimeEdit(objectName=\"start_date\")\n start_date.setCalendarPopup(True)\n start_calwidget: QCalendarWidget = start_date.calendarWidget()\n start_calwidget.setDateEditEnabled(True)\n start_date.dateTimeChanged.connect(self.set_min_date_index)\n\n stop_date = QDateTimeEdit(objectName=\"stop_date\")\n stop_date.setReadOnly(False)\n stop_date.setCalendarPopup(True)\n stop_calwidget: QCalendarWidget = stop_date.calendarWidget()\n stop_calwidget.setDateEditEnabled(True)\n stop_date.dateTimeChanged.connect(self.set_max_date_index)\n\n vbox = QVBoxLayout()\n # vbox.setContentsMargins(20, 20, 20, 20)\n vbox.addWidget(start_date)\n vbox.addWidget(stop_date)\n groupBox.setLayout(vbox)\n groupBox.clicked.connect(self.toggle_calendar)\n return groupBox\n\n def update_calendar_group(self):\n start_date_widget: QDateTimeEdit = self.findChild(QDateTimeEdit,\n \"start_date\")\n if not start_date_widget:\n print(\"Could not find start_date_widget.\")\n return\n\n if self.records and len(self.records) > 0:\n start_date_widget.setDateTime(self.records[0].datetime)\n start_date_widget.setMinimumDate(self.records[0].datetime)\n start_date_widget.setMaximumDate(self.records[-1].datetime)\n\n else:\n start_date_widget.setDateTime(datetime.datetime.now())\n start_date_widget.setMinimumDate(datetime.datetime.now())\n start_date_widget.setMaximumDate(datetime.datetime.now())\n\n stop_date_widget: QDateTimeEdit = self.findChild(QDateTimeEdit,\n \"stop_date\")\n if not stop_date_widget:\n print(\"Could not find stop_date_widget.\")\n return\n\n if self.records and len(self.records) > 0:\n stop_date_widget.setDateTime(self.records[-1].datetime)\n stop_date_widget.setMinimumDate(self.records[0].datetime)\n stop_date_widget.setMaximumDate(self.records[-1].datetime)\n\n else:\n stop_date_widget.setDateTime(datetime.datetime.now())\n stop_date_widget.setMinimumDate(datetime.datetime.now())\n stop_date_widget.setMaximumDate(datetime.datetime.now())\n\n def toggle_calendar(self):\n calendar_group = self.findChild(QGroupBox, \"calendar_groupbox\")\n if not calendar_group:\n print(\"Calendar group not found.\")\n return\n\n if calendar_group.isChecked():\n # Filter by date was ENABLED: Update start/stop calendar widgets to\n # show records 0 and -1.\n print(\"Calendar is checked\")\n self.start_date_index = 0\n self.stop_date_index = 0\n if self.records:\n if len(self.records) > 0:\n self.stop_date_index = len(self.records)\n\n start: QDateTimeEdit = calendar_group.findChild(QDateTimeEdit,\n \"start_date\")\n if not start:\n print(\"Could not find start_date widget.\")\n return\n\n start.setDateTime(self.records[0].datetime)\n start.setMinimumDateTime(self.records[0].datetime)\n start.setMaximumDateTime(self.records[-1].datetime)\n\n stop: QDateTimeEdit = calendar_group.findChild(QDateTimeEdit,\n \"stop_date\")\n if not stop:\n print(\"Could not find stop_date widget.\")\n return\n\n stop.setDateTime(self.records[-1].datetime)\n stop.setMinimumDateTime(self.records[0].datetime)\n stop.setMaximumDateTime(self.records[-1].datetime)\n self.update()\n else:\n print(\"Calendar is unchecked.\")\n # Filter by date was disabled: Reset start/stop controls AND show\n # all records.\n\n self.start_date_index = 0\n self.stop_date_index = len(self.records)\n tabs: QTabWidget = self.findChild(QTabWidget, \"tabs\")\n if not tabs:\n print(\"tabs not found.\")\n return\n tabs.currentWidget().needs_update = True\n self.chart.needs_update = True\n self.update_calendar_group()\n self.update()\n\n @pyqtSlot()\n def set_min_date_index(self):\n start_date_widget: QDateTimeEdit = self.findChild(QDateTimeEdit,\n \"start_date\")\n if not start_date_widget:\n print(\"Could not find start_date calendar control.\")\n return\n start_date = start_date_widget.dateTime()\n\n print(\"Received min_date: {0}\".format(start_date.date()))\n start_index = 0\n if not self.records:\n print(\"No records loaded.\")\n return\n\n for record in self.records:\n if start_index == len(self.records):\n break\n if record.datetime < start_date.date():\n start_index += 1\n else:\n break\n # print(\"Start date: {0}. Index: {1}\"\n # .format(self.records[start_index].date, start_index))\n\n # Make sure stop index is >= start_index.\n self.start_date_index = start_index\n if self.stop_date_index < start_index:\n self.stop_date_index = start_index\n\n tabs: QTabWidget = self.findChild(QTabWidget, \"tabs\")\n if not tabs:\n print(\"tabs not found.\")\n return\n tabs.currentWidget().needs_update = True\n self.chart.needs_update = True\n self.update()\n\n @pyqtSlot()\n def set_max_date_index(self):\n stop_date_widget: QDateTimeEdit = self.findChild(QDateTimeEdit,\n \"stop_date\")\n if not stop_date_widget:\n print(\"Could not find stop_date calendar control.\")\n return\n stop_date = stop_date_widget.dateTime()\n\n print(\"Received max_date: {0}\".format(stop_date.date()))\n stop_index = 0\n for record in self.records:\n if stop_index == len(self.records) - 1:\n break\n if record.datetime <= stop_date.date():\n stop_index += 1\n else:\n break\n # print(\"Stop date: {0}. Index: {1}\"\n # .format(self.records[stop_index].date))\n\n # Make sure start index is <= stop_index.\n self.stop_date_index = stop_index\n if self.start_date_index > stop_index:\n self.start_date_index = stop_index\n\n tabs: QTabWidget = self.findChild(QTabWidget, \"tabs\")\n if not tabs:\n print(\"tabs not found.\")\n return\n tabs.currentWidget().needs_update = True\n self.chart.needs_update = True\n self.update()\n\n @pyqtSlot()\n def update(self):\n # The values set at self.start_date_index, self.stop_date_index and\n # self.excLude_indexes are assumed to be true and all other\n # controls must be updated according to them.\n\n tabs: QTabWidget = self.findChild(QTabWidget, \"tabs\")\n if not tabs:\n print(\"tabs not found.\")\n return\n\n if tabs.currentWidget().needs_update:\n tables = tabs.findChildren(QTableWidget)\n if not tables:\n print(\"Tables not found.\")\n return\n table: QTableWidget = None\n for table in tables:\n if \"curves\" in table.objectName():\n self.update_curves_table(table)\n else:\n self.update_records_table(table)\n\n tabs.currentWidget().needs_update = False\n\n if self.chart.needs_update:\n self.chart.update_series(self.records[self.start_date_index:self.stop_date_index]) # noqa\n self.chart.needs_update = False\n\n @pyqtSlot()\n def reload_data_from_glucometer(self):\n self.selected_pet_id = None\n self.load()\n\n # Reset date filters and calendar widgets\n self.start_date_index = 0\n self.stop_date_index = -1\n calendar_group: QGroupBox = self.findChild(QGroupBox,\n \"calendar_groupbox\")\n if not calendar_group:\n print(\"Calendar group not found.\")\n return\n calendar_group.setChecked(False)\n\n # Resets chart\n chart_group: QGroupBox = self.findChild(QGroupBox,\n \"glucose_chart_group\")\n chart_group.setTitle(\"Glucose Level X Date\")\n self.chart.needs_update = True\n\n # Reset tabs groupboxes labels\n tabs: QTabWidget = self.findChild(QTabWidget, \"tabs\")\n if not tabs:\n print(\"Tabs not found.\")\n return\n\n r_tab1_group = self.findChild(QGroupBox, \"r_tab1_group\")\n r_tab1_group.setTitle(\"Records (glucometer data)\")\n\n r_tab2_group = self.findChild(QGroupBox, \"r_tab2_group\")\n r_tab2_group.setTitle(\"Detected curves (glucometer data)\")\n\n curves_group = self.findChild(QGroupBox, \"curves_group\")\n curves_group.setTitle(\"Detected curves (glucometer data)\")\n\n chart_group = self.findChild(QGroupBox, \"glucose_chart_group\")\n chart_group.setTitle(\"Glucose chart (glucometer data)\")\n\n tabs.widget(0).needs_update = True\n tabs.widget(1).needs_update = True\n self.chart.needs_update = True\n self.update()\n\n def create_actions_groupbox(self):\n groupBox = QGroupBox(\"&Actions\")\n vbox = QVBoxLayout()\n\n load_gc_title = \"&Reload Data From Glucometer\"\n load_data_button = QPushButton(load_gc_title)\n load_data_button.clicked.connect(self.reload_data_from_glucometer)\n vbox.addWidget(load_data_button)\n\n export_data_button = QPushButton(\"Ex&port Data\")\n menu = QMenu(self)\n menu.addAction(\"&Excel (xls)\", self.export_xls)\n menu.addAction(\"&CSV\", self.export_csv)\n menu.addAction(\"&JSON\", self.export_json)\n export_data_button.setMenu(menu)\n vbox.addWidget(export_data_button)\n\n import_data_button = QPushButton(\"&Import Data to pet\")\n import_data_button.clicked.connect(self.import_to_pet)\n vbox.addWidget(import_data_button)\n\n back_button = QPushButton(\"&Back\")\n back_button.clicked.connect(self.back)\n vbox.addWidget(back_button)\n\n vbox.addStretch(1)\n groupBox.setLayout(vbox)\n\n return groupBox\n\n @pyqtSlot()\n def import_to_pet(self):\n pet_select_window: QDialog = SelectPetWindow(self.db)\n pet_id = pet_select_window.exec()\n if not pet_id:\n print(\"Failed to obtain pet_id\")\n return\n import_data_to_pet_window: QDialog = ImportDataToPetWindow(self.db,\n pet_id,\n self.records)\n ret = import_data_to_pet_window.exec()\n print(\"Received code {0}\".format(ret))\n\n if ret == 1:\n self.done(pet_id)\n\n def export_json(self):\n data = []\n for record in self.records:\n data.append({\"level\": record.level,\n \"date\": record.date.isoformat()})\n json_data = json.dumps(data)\n filename = QFileDialog.getSaveFileName(self,\n \"Save File\",\n \"untitled01.json\",\n \"JSON files (*.json)\")\n if filename:\n print(filename[0])\n with open(filename[0], \"w\") as fp:\n fp.write(json_data)\n\n def export_xls(self):\n data = []\n data.append([\"level\", \"date\"])\n for record in self.records:\n data.append([record.level,\n record.date.isoformat()])\n filename = QFileDialog.getSaveFileName(self,\n \"Save File\",\n \"untitled01.xlsx\",\n \"Excel files (*.xlsx)\")\n if filename:\n print(filename[0])\n\n workbook = xlsxwriter.Workbook(filename[0])\n worksheet = workbook.add_worksheet()\n for r, row in enumerate(data):\n for c in range(len(row)):\n worksheet.write(r, c, row[c])\n workbook.close()\n\n def export_xml(self):\n raise NotImplementedError\n\n def export_csv(self):\n data = []\n data.append([\"level\", \"date\"])\n for record in self.records:\n data.append([record.level,\n record.date.isoformat()])\n filename = QFileDialog.getSaveFileName(self,\n \"Save File\",\n \"untitled01.csv\",\n \"Comma Separated Values files (*.csv)\") # noqa\n if filename:\n print(filename[0])\n with open(filename[0], \"w\") as fp:\n csv_writer = csv.writer(fp, dialect=\"excel\")\n csv_writer.writerows(data)\n\n @pyqtSlot()\n def back(self):\n self.close()\n\n def create_chart_control_group(self):\n groupBox = QGroupBox(\"Chart controls\", objectName=\"chart_controls\")\n groupBox.setCheckable(True)\n\n vbox = QVBoxLayout()\n groupBox.setLayout(vbox)\n return groupBox\n","sub_path":"ui/windows/gm_data_window.py","file_name":"gm_data_window.py","file_ext":"py","file_size_in_byte":27059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"423424045","text":"\nimport os\nimport yaml\nimport logging\n\n\nclass Manifest(object):\n \"\"\"\n Cloud Foundry utilizes a MANIFEST.yml file as the source\n of application configuration. As we setup services and\n run our applications the manifest is a place to store\n important configuration details.\n \"\"\"\n def __init__(self, manifest_path, app_name='my-predix-app', debug=False):\n self.manifest_path = os.path.expanduser(manifest_path)\n self.app_name = app_name\n\n # App may have a client\n self.client_id = None\n self.client_secret = None\n\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Read or Generate a manifest file\n if os.path.exists(self.manifest_path):\n manifest = self.read_manifest()\n else:\n manifest = self.create_manifest()\n\n # Probably always want manifest loaded into environment\n self.set_os_environ()\n\n def read_manifest(self):\n \"\"\"\n Read an existing manifest.\n \"\"\"\n with open(self.manifest_path, 'r') as input_file:\n self.manifest = yaml.safe_load(input_file)\n if 'env' not in self.manifest:\n self.manifest['env'] = {}\n if 'services' not in self.manifest:\n self.manifest['services'] = []\n\n input_file.close()\n\n def create_manifest(self):\n \"\"\"\n Create a new manifest and write it to\n disk.\n \"\"\"\n self.manifest = {}\n self.manifest['applications'] = [{'name': self.app_name}]\n self.manifest['services'] = []\n self.manifest['env'] = {}\n\n self.write_manifest()\n\n def create_manifest_from_space(self):\n \"\"\"\n Populate a manifest file generated from details from the\n cloud foundry space environment.\n \"\"\"\n import predix.admin.cf.spaces\n space = predix.admin.cf.spaces.Space()\n\n summary = space.get_space_summary()\n for instance in summary['services']:\n service_type = instance['service_plan']['service']['label']\n name = instance['name']\n if service_type == 'predix-uaa':\n import predix.admin.uaa\n uaa = predix.admin.uaa.UserAccountAuthentication(name=name)\n uaa.add_to_manifest(self.manifest_path)\n elif service_type == 'predix-acs':\n import predix.admin.acs\n acs = predix.admin.acs.AccessControl(name=name)\n acs.add_to_manifest(self.manifest_path)\n elif service_type == 'predix-asset':\n import predix.admin.asset\n asset = predix.admin.asset.Asset(name=name)\n asset.add_to_manifest(self.manifest_path)\n elif service_type == 'predix-timeseries':\n import predix.admin.timeseries\n timeseries = predix.admin.timeseries.TimeSeries(name=name)\n timeseries.add_to_manifest(self.manifest_path)\n elif service_type == 'predix-blobstore':\n import predix.admin.blobstore\n blobstore = predix.admin.blobstore.BlobStore(name=name)\n blobstore.add_to_manifest(self.manifest_path)\n elif service_type == 'us-weather-forecast':\n import predix.admin.weather\n weather = predix.admin.weather.WeatherForecast(name=name)\n weather.add_to_manifest(self.manifest_path)\n else:\n logging.warn(\"Unsupported service type: %s\" % service_type)\n\n def write_manifest(self):\n \"\"\"\n Write manifest to disk.\n \"\"\"\n with open(self.manifest_path, 'w') as output_file:\n yaml.safe_dump(self.manifest, output_file,\n default_flow_style=False, explicit_start=True)\n output_file.close()\n\n def add_env_var(self, key, value):\n \"\"\"\n Add the given key / value as another environment\n variable.\n \"\"\"\n self.manifest['env'][key] = value\n\n def add_service(self, service_name):\n \"\"\"\n Add the given service to the manifest.\n \"\"\"\n if service_name not in self.manifest['services']:\n self.manifest['services'].append(service_name)\n\n def set_os_environ(self):\n \"\"\"\n Will load any environment variables found in the\n manifest file into the current process for use\n by applications.\n\n When apps run in cloud foundry this would happen\n automatically.\n \"\"\"\n for key in self.manifest['env'].keys():\n os.environ[key] = self.manifest['env'][key]\n\n def get_client_id(self):\n \"\"\"\n Return the client id that should have all the\n needed scopes and authorities for the services\n in this manifest.\n \"\"\"\n key = 'PREDIX_SECURITY_UAA_CLIENT_ID'\n if key not in self.manifest['env']:\n raise ValueError(\"%s undefined in manifest.\" % key)\n\n self.client_id = self.manifest['env'][key]\n return self.client_id\n\n def get_client_secret(self):\n \"\"\"\n Return the client secret that should correspond with\n the client id.\n \"\"\"\n key = 'PREDIX_SECURITY_UAA_CLIENT_SECRET'\n if key not in self.manifest['env']:\n raise ValueError(\"%s must be added to manifest.\" % key)\n\n self.client_secret = self.manifest['env'][key]\n return self.client_secret\n\n def create_timeseries(self):\n \"\"\"\n Creates an instance of the Time Series Service.\n \"\"\"\n import predix.admin.timeseries\n ts = predix.admin.timeseries.TimeSeries()\n ts.create()\n\n client_id = self.get_client_id()\n if client_id:\n ts.grant_client(client_id)\n\n ts.add_to_manifest(self.manifest_path)\n return ts\n\n def get_timeseries(self, *args, **kwargs):\n \"\"\"\n Returns an instance of the Time Series Service.\n \"\"\"\n import predix.data.timeseries\n ts = predix.data.timeseries.TimeSeries(*args, **kwargs)\n return ts\n\n def create_asset(self):\n \"\"\"\n Creates an instance of the Asset Service.\n \"\"\"\n import predix.admin.asset\n asset = predix.admin.asset.Asset()\n asset.create()\n\n client_id = self.get_client_id()\n if client_id:\n asset.grant_client(client_id)\n\n asset.add_to_manifest(self.manifest_path)\n return asset\n\n def get_asset(self):\n \"\"\"\n Returns an instance of the Asset Service.\n \"\"\"\n import predix.data.asset\n asset = predix.data.asset.Asset()\n return asset\n\n def create_uaa(self, admin_secret):\n \"\"\"\n Creates an instance of UAA Service.\n \"\"\"\n import predix.admin.uaa\n uaa = predix.admin.uaa.UserAccountAuthentication()\n if not uaa.exists():\n uaa.create(admin_secret)\n uaa.add_to_manifest(self.manifest_path)\n return uaa\n\n def create_client(self, client_id, client_secret):\n \"\"\"\n Create a client and add it to the manifest.\n \"\"\"\n import predix.admin.uaa\n uaa = predix.admin.uaa.UserAccountAuthentication()\n uaa.create_client(client_id, client_secret)\n uaa.add_client_to_manifest(client_id, client_secret,\n self.manifest_path)\n\n def get_uaa(self):\n \"\"\"\n Returns an insstance of the UAA Service.\n \"\"\"\n import predix.security.uaa\n uaa = predix.security.uaa.UserAccountAuthentication()\n return uaa\n\n def create_acs(self):\n \"\"\"\n Creates an instance of the Asset Service.\n \"\"\"\n import predix.admin.acs\n acs = predix.admin.acs.AccessControl()\n acs.create()\n\n client_id = self.get_client_id()\n if client_id:\n acs.grant_client(client_id)\n\n acs.grant_client(client_id)\n acs.add_to_manifest(self.manifest_path)\n return acs\n\n def get_acs(self):\n \"\"\"\n Returns an instance of the Asset Control Service.\n \"\"\"\n import predix.security.acs\n acs = predix.security.acs.AccessControl()\n return acs\n\n def create_weather(self):\n \"\"\"\n Creates an instance of the Asset Service.\n \"\"\"\n import predix.admin.weather\n weather = predix.admin.weather.WeatherForecast()\n weather.create()\n\n client_id = self.get_client_id()\n if client_id:\n weather.grant_client(client_id)\n\n weather.grant_client(client_id)\n weather.add_to_manifest(self.manifest_path)\n return weather\n\n def get_weather(self):\n \"\"\"\n Returns an instance of the Weather Service.\n \"\"\"\n import predix.data.weather\n weather = predix.data.weather.WeatherForecast()\n return weather\n\n def create_blobstore(self):\n \"\"\"\n Creates an instance of the BlobStore Service.\n \"\"\"\n import predix.admin.blobstore\n blobstore = predix.admin.blobstore.BlobStore()\n blobstore.create()\n\n blobstore.add_to_manifest(self.manifest_path)\n return blobstore\n\n def get_blobstore(self):\n import predix.data.blobstore\n blobstore = predix.data.blobstore.BlobStore()\n return blobstore\n","sub_path":"predix/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"198535203","text":"import os\nimport you_get\nimport pymongo\nimport urllib.parse\nimport time\nimport datetime\n\nhtml_folder = '/home/cwyalpha/html/acfun/'\nacfun_site = 'http://www.acfun.tv/'\nMONGODB_SERVER = \"localhost\"\nMONGODB_PORT = 27017\nMONGODB_DB = \"youku\"\nMONGODB_VIDEO_COLLECTION = \"youku_video_page\"\nthres = 30\ndays = 7\ndel_days = 14\nconnection = pymongo.MongoClient(MONGODB_SERVER, MONGODB_PORT)\ndb = connection[MONGODB_DB]\ndb.authenticate('youku', '318yazhang')\n\nvideo_collection = db[MONGODB_VIDEO_COLLECTION]\nvideo_collection.ensure_index([('dyn.views', pymongo.DESCENDING)])\nvideo_collection.ensure_index([('dyn.favors', pymongo.DESCENDING)])\nvideo_collection.ensure_index([('dyn.comments', pymongo.DESCENDING)])\nvideo_collection.ensure_index([('time', pymongo.DESCENDING)])\n\ndef id2folder(_id):\n s = str(_id)\n if len(s) > 3:\n return os.path.join(s[:-3], s[-3:])\n else:\n return s\n\ndef del_video():\n start_time = datetime.datetime.now() - datetime.timedelta(days=abs(int(del_days)))\n for video in video_collection.find({'$and':[{'html.timestamp':{'$lt':start_time}}, {'video_download_status':2}]}):\n try:\n print('del ', video['video_filename'])\n os.remove(video['video_filename'])\n \n video_collection.update({\"_id\" : video['_id']}, \\\n {'$set':{'video_download_status':3}})\n except:\n print(video['_id'], 'not deleted')\n\ndef findAndDownload():\n retries = 0\n video = None\n while 1:\n try:\n del_video()\n start_time = datetime.datetime.now() - datetime.timedelta(days=abs(int(days)))\n cursor = video_collection.find({'$and':[{'$or':[{'video_failed':{'$exists':False}},\\\n {'video_failed':0}]}, \\\n {'$or':[{'video_download_status':{'$exists':False}}, \\\n {'video_download_status':0}, \\\n {'video_download_status':1}]}, \\\n {'time':{'$gte':start_time}}]})\\\n .sort([('dyn.favors', pymongo.DESCENDING)])\n \n video = None\n video = cursor.__next__()\n print(video['dyn'][-1]['favors'])\n if int(video['dyn'][-1]['favors']) < thres:\n #pass\n continue\n href = video['href']\n url = urllib.parse.urljoin(acfun_site, href)\n print(url)\n if retries > 5:\n video_collection.update({\"_id\" : video['_id']}, \\\n {'$set':{'video_failed':1}})\n retries = 0\n continue\n folder = os.path.join(html_folder, id2folder(video['_id']))\n folderInMongo = id2folder(video['_id'])\n if not os.path.exists(folder):\n os.makedirs(folder)\n video_collection.update({\"_id\" : video['_id']}, \\\n {'$set':{'video_download_status':1}})\n you_get.acfun_download(url = url, output_dir = folder)\n video_filename = None\n for filename in os.listdir(str(folder)):\n if filename.split('.')[-1] in set(['hd2', 'mp4', 'flv', '3gp', \\\n 'f4v', 'asf', 'wmv', 'mp3', 'mp3']):\n video_filename = os.path.join(folderInMongo, filename)\n break\n print(video_filename)\n if video_filename != None:\n video_collection.find_and_modify({\"_id\" : video['_id']}, \\\n update={'$set':{'video_download_status':2, \\\n 'video_filename':video_filename}}, \\\n upsert=True, new=True)\n json_filename = None\n for filename in os.listdir(str(folder)):\n if filename.split('.')[-1] in set(['json']):\n json_filename = os.path.join(folderInMongo, filename)\n break\n print(json_filename)\n if json_filename != None:\n video_collection.find_and_modify({\"_id\" : video['_id']}, \\\n update={'$set':{'json_filename':video_filename}}, \\\n upsert=True, new=True)\n except (KeyboardInterrupt, SystemExit):\n if video:\n video_collection.update({\"_id\" : video['_id']}, \\\n {'$set':{'video_download_status':0}})\n break\n except:\n if video:\n retries += 1\n video_collection.update({\"_id\" : video['_id']}, \\\n {'$set':{'video_download_status':0}})\n\n#findAndDownload()\ndel_video()\n\n","sub_path":"crawlerScript/AcFun/video/del.py","file_name":"del.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"168882270","text":"import requests, bs4, os\nfrom lxml import html\n\npage_num = 5\nusername = 'XXX@XXX.com'\npassword = 'AAABBBCCC'\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; '\n 'Intel Mac OS X 10_11_2) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/47.0.2526.111 Safari/537.36'\n}\n\nsession = requests.Session()\n\nresponse = session.get('https://www.tumblr.com/login')\nr = html.fromstring(response.text)\nform_key1 = r.xpath(\"//meta[@id='tumblr_form_key']/@content\")[0]\nform_key2 = r.xpath(\"//input[@name='form_key']\")[0].value\n\npayload = {\n 'user[email]': username,\n 'user[password]': password,\n 'tumblelog[name]': '',\n 'user[age]': '',\n 'context': 'other',\n 'version': 'STANDARD',\n 'follow': '',\n 'http_referer': 'https://www.tumblr.com/',\n 'form_key': form_key1,\n 'seen_suggestion': '0',\n 'used_suggestion': '0',\n 'used_auto_suggestion': '0',\n 'about_tumblr_slide': '',\n 'random_username_suggestions': '[\"DarkGlitterCollective\", \"ShinyCrusadeWolf\", \"ZanyStrawberryGlitter\", \"HerPainterNacho\",\"TenderlySpookyFlower\"]',\n}\n\nsession.post('https://www.tumblr.com/login', headers=headers, data=payload)\n\npage = session.get('https://www.tumblr.com/dashboard')\nsoup = bs4.BeautifulSoup(page.text, features='html5lib')\n\nfor j in range(0, page_num):\n\n for i in soup.select('.post_media_photo'):\n # download every image in the post list\n print(i.get('width'), i.get('height'), i.get('src'))\n\n try:\n img_url = i.get('src')\n # Download the image.\n print('Downloading image %s...' % (img_url))\n res = requests.get(img_url)\n res.raise_for_status()\n except requests.exceptions.MissingSchema:\n # skip this image\n continue\n\n try:\n # make folder for every page\n folder = os.path.join('tumblr', str(j))\n os.makedirs(folder, exist_ok=True)\n\n print('Saving image:', folder, os.path.basename(img_url))\n\n imageFile = open(os.path.join(folder, os.path.basename(img_url)), 'wb')\n except FileExistsError:\n continue\n\n for chunk in res.iter_content():\n imageFile.write(chunk)\n\n imageFile.close()\n\n # get next page\n next_page_link = 'https://www.tumblr.com' + soup.select('#next_page_link')[0].get('href')\n page = session.get(next_page_link)\n soup = bs4.BeautifulSoup(page.text, features='html5lib')\n","sub_path":"all-gists/faf747ec5406997683ee/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"283941803","text":"import math\nfrom math import *\n\nf = eval(\"lambda x: \"+input(\"Ingrese la funcion: \"))\ndf = eval(\"lambda x: \"+input(\"Ingrese la derivada de la funcion: \"))\ndf2 = eval(\"lambda x: \"+input(\"Ingrese la segunda derivada de la funcion: \"))\ntol = float(input(\"Ingrese la tolerancia: \"))\nx0 = float(input(\"Ingrese x0: \"))\nniter = float(input(\"Ingrese el maximo de iteraciones: \"))\n\nfx = f(x0)\ndfx = df(x0)\ndfx2 = df2(x0)\ncont = 0\nerr = 0\nerr = tol + 1\nprint(\"\"\"\nRaices Multiples\n\nTabla de resultados: \n\n|i| xi | f(xi) | E |\n\"\"\")\n\n\nwhile (err > tol) and (fx != 0) and (dfx != 0) and (dfx2 != 0) and (cont < niter):\n if err == tol + 1:\n print(f\" {cont} {x0:.10e} {fx:.10e}\")\n else:\n if cont < 10:\n print(f\" {cont} {x0:.10e} {fx:.10e} {err:.10e}\")\n else:\n print(f\" {cont} {x0:.10e} {fx:.10e} {err:.10e}\")\n x1 = x0 - ((fx*dfx)/((dfx)**2-(fx*dfx2)))\n fx = f(x1)\n dfx = df(x1)\n dfx2 = df2(x1)\n err = abs(x1 - x0)\n x0 = x1\n cont += 1\nif cont < 10:\n print(f\" {cont} {x0:.10e} {fx:.10e} {err:.10e}\")\nelse:\n print(f\" {cont} {x0:.10e} {fx:.10e} {err:.10e}\")\nif fx == 0:\n print(f\"{x0} es raiz\")\nelif err < tol:\n print(f\"{x1} es aproximacion a una raiz con una tolerancia:\", tol)\nelif dfx == 0 or dfx2 == 0:\n print(f\"{x1} es una posible raiz multiple\")\nelse:\n print(f\"Fracaso en {niter} iteraciones\")\n","sub_path":"analisis_numerico/simple_scripts/raicesMultiples.py","file_name":"raicesMultiples.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"159763876","text":"# -*- coding: utf-8 -*-\n\nimport collections\nimport cProfile\nimport functools\nimport math\nimport operator\nimport os\nimport shutil\nimport sys\nimport timeit\nimport typing\n\n\nassert sys.version_info[:2] >= (3, 7)\n\n\nDICE_FACES = 6\nLRU_CACHE_SIZE = int(1e6)\nMAX_SUBSEGMENT_FIRST_PASS = 110 # Drop large values as they are generally not needed and exacerbate the combinatorics\nSUBSOLUTION_CACHE = {}\n\n# These are used for rendering formulas in human readable formats.\nMODE_FORMATS = [\"{a} + {b}\", \"{a} - {b}\", \"{b} - {a}\", \"{a} * {b}\", \"{a} / {b}\", \"{b} / {a}\"]\nMODE_PRIORITY = [0, 1, 1, 2, 3, 3]\nMAX_PRIORITY = max(MODE_PRIORITY) + 1\n\nTARGETS = [(3, 5, 7), (11, 13, 17), (19, 23, 29), (31, 37, 41), (43, 47, 53), (59, 61, 67), (71, 73, 79),\n (83, 89, 97), (101, 103, 107)]\nTARGET_SETS = [set(i) for i in TARGETS]\nOUTPUT_DIR = os.path.join(os.getcwd(), f\"results_{DICE_FACES}_faces\")\n\n\n@functools.lru_cache(maxsize=100)\ndef fact(x):\n # type: (int) -> int\n \"\"\"Wrap math.factorial in a cache since it will be repeatedly called for small values.\n \"\"\"\n return math.factorial(x)\n\n\ndef vector_to_indexed_counts(x):\n # type: (typing.Iterable) -> (typing.Tuple[int], typing.Tuple[int])\n \"\"\"Convert a standard vector of integers to a compact and representation.\n \"\"\"\n count_dict = collections.defaultdict(int)\n for i in x:\n count_dict[i] += 1\n\n keys = sorted(count_dict.keys())\n return tuple(keys), tuple(count_dict[i] for i in keys)\n\n\n@functools.lru_cache(maxsize=LRU_CACHE_SIZE)\ndef partition_counts(counts):\n # type: (tuple) -> typing.List[tuple]\n \"\"\"Returns all possible partitions of a vector.\n\n The inputs are expected to be in compact form (see vector_to_indexed_counts) and will return a list of\n partitions. (conjugates are implied)\n \"\"\"\n if len(counts) == 0:\n return [()]\n\n subsequence = partition_counts(counts[1:])\n\n output = []\n for i in range(counts[0] + 1):\n new_entries = [(i,) + seq for seq in subsequence]\n new_entries = [i for i in new_entries if sum(i)]\n output.extend(new_entries)\n\n return output or [counts]\n\n\n@functools.lru_cache(maxsize=LRU_CACHE_SIZE)\ndef group_contiguous(x):\n # type: (tuple) -> typing.List[tuple]\n \"\"\"Separate tuple into blocks of contiguous elements.\n \"\"\"\n x_grouped = []\n for i in sorted(x):\n if not x_grouped:\n x_grouped.append([i])\n elif i == x_grouped[-1][-1] + 1:\n x_grouped[-1].append(i)\n else:\n x_grouped.append([i])\n return [tuple(i) for i in x_grouped]\n\n\n@functools.lru_cache(maxsize=LRU_CACHE_SIZE)\ndef int_combine(a, b):\n # type: (int, int) -> typing.Tuple[typing.Tuple[int, int], ...]\n \"\"\"Enumerate all legal combinations of two values. (either raw values or subgroups)\n \"\"\"\n output = [a + b, a - b, b - a, a * b, None if (b == 0 or a % b) else int(a // b),\n None if (a == 0 or b % a) else int(b // a)]\n return tuple((k, mode) for mode, k in enumerate(output) if k is not None)\n\n\n@functools.lru_cache(maxsize=LRU_CACHE_SIZE)\ndef _vector_combine(x, y, max_subsegment):\n # type: (typing.Iterable[int], typing.Iterable[int], int) -> typing.ItemsView[int, tuple]\n \"\"\"Inner cached vector combination function.\n\n This function expects to be given contiguous blocks of integers, and as a result picks up some additional cache\n hits on combination tasks which have a small number of missing elements.\n \"\"\"\n results = {}\n for i in x:\n for j in y:\n if max_subsegment is not None:\n results.update({k: (i, j, mode) for k, mode in int_combine(i, j) if (0 <= k <= max_subsegment)})\n else:\n results.update({k: (i, j, mode) for k, mode in int_combine(i, j)})\n\n return results.items()\n\n\n@functools.lru_cache(maxsize=LRU_CACHE_SIZE)\ndef vector_combine(x, y, max_subsegment):\n # type: (tuple, tuple, int) -> dict\n \"\"\"Enumerate all ways that two vectors of candidates can be combined up to an optional maximum value.\n \"\"\"\n x_grouped = group_contiguous(x)\n y_grouped = group_contiguous(y)\n\n results = {}\n for x_chunk in x_grouped:\n for y_chunk in y_grouped:\n results.update({k: v for k, v in _vector_combine(x_chunk, y_chunk, max_subsegment)})\n if max_subsegment is not None and len(results) == max_subsegment + 1:\n return results\n return results\n\n\n@functools.lru_cache(maxsize=LRU_CACHE_SIZE)\ndef conj_vector(partition, counts):\n # type: (tuple, tuple) -> tuple\n \"\"\"Convenience function to compute the conjugate set of a given partition.\n \"\"\"\n return tuple(ct - partition[i] for i, ct in enumerate(counts))\n\n\n@functools.lru_cache(maxsize=LRU_CACHE_SIZE)\ndef all_computable(keys, counts, max_subsegment):\n \"\"\"Recursively compute the set of numbers that a combination of rolls can compute.\n \"\"\"\n # type: (typing.Tuple[int], typing.Tuple[int], int) -> tuple\n if sum(counts) == 1:\n return tuple(keys[i] for i, ct in enumerate(counts) if ct)\n\n partitions = partition_counts(counts)\n seen = set()\n out_set = set()\n for partition in partitions:\n conjugate = conj_vector(partition, counts)\n if conjugate in seen or not sum(conjugate):\n continue\n seen.update([partition, conjugate])\n\n partition_computable = all_computable(keys, partition, max_subsegment)\n conjugate_computable = all_computable(keys, conjugate, max_subsegment)\n\n combined_results = vector_combine(\n partition_computable, conjugate_computable, max_subsegment\n )\n for k, v in combined_results.items():\n SUBSOLUTION_CACHE[(k, counts, keys)] = (partition, conjugate, v)\n combined_computable = combined_results.keys()\n\n out_set.update(combined_computable)\n if max_subsegment is not None and len(out_set) == max_subsegment + 1:\n break # all values are computable\n\n return tuple(sorted(out_set))\n\n\n@functools.lru_cache(maxsize=LRU_CACHE_SIZE)\ndef code_op_to_str(args):\n if isinstance(args, (int, str)):\n return args, MAX_PRIORITY\n elif len(args) == 1:\n return args[0], MAX_PRIORITY\n a, b, mode = args\n [a, a_priority], [b, b_priority] = code_op_to_str(a), code_op_to_str(b)\n if a_priority <= MODE_PRIORITY[mode]:\n a = f\"({a})\"\n if b_priority <= MODE_PRIORITY[mode]:\n b = f\"({b})\"\n\n return MODE_FORMATS[mode].format(a=a, b=b), MODE_PRIORITY[mode]\n\n\n@functools.lru_cache(maxsize=LRU_CACHE_SIZE)\ndef _lookup_formula(cache_key):\n (_, counts, keys) = cache_key\n if sum(counts) == 1:\n return tuple(keys[i] for i, ct in enumerate(counts) if ct)[0]\n\n partition, conjugate, [partition_target, conjugate_target, mode] = SUBSOLUTION_CACHE[cache_key]\n cache_key0 = (partition_target, partition, keys)\n cache_key1 = (conjugate_target, conjugate, keys)\n return _lookup_formula(cache_key0), _lookup_formula(cache_key1), mode\n\n\ndef lookup_formula(keys, counts, target):\n if sum(counts) == 1:\n return str(target) if tuple(keys[i] for i, ct in enumerate(counts) if ct)[0] == target else None\n\n cache_key = (target, counts, keys)\n if cache_key not in SUBSOLUTION_CACHE:\n return None\n\n result_str = code_op_to_str(_lookup_formula(cache_key))[0]\n result_str_eval = eval(result_str)\n\n assert int(result_str_eval) == result_str_eval\n assert int(result_str_eval) == target\n\n return code_op_to_str(_lookup_formula(cache_key))[0]\n\n\ndef generate_all_rolls(max_num_dice):\n output = []\n rolls = [(i,) for i in range(1, DICE_FACES + 1)]\n output.append(rolls)\n for i in range(max_num_dice - 1):\n rolls_new = []\n for subroll in rolls:\n for j in range(1, DICE_FACES + 1):\n if j < subroll[-1]:\n continue\n rolls_new.append(subroll + (j,))\n rolls = rolls_new\n output.append(rolls)\n\n return output\n\n\ndef get_formulas(roll, max_subsegment):\n keys, counts = vector_to_indexed_counts(roll)\n\n omega_denom = functools.reduce(operator.mul, [fact(i) for i in counts], 1)\n assert not fact(sum(counts)) % omega_denom\n omega = fact(sum(counts)) // omega_denom\n x = {i for i in all_computable(keys, counts, max_subsegment) if i > 0}\n targets = [i.intersection(x) for i in TARGET_SETS]\n targets = [i.pop() if i else None for i in targets]\n\n return [f\"{lookup_formula(keys, counts, i)} = {i}\" if i else \"\" for i in targets], omega\n\n\nCACHED_FNS = [partition_counts, int_combine, group_contiguous, _vector_combine, vector_combine, all_computable,\n conj_vector, fact, code_op_to_str, _lookup_formula]\ndef clear_caches():\n for fn in CACHED_FNS:\n if hasattr(fn, \"cache_clear\"):\n fn.cache_clear()\n\n\ndef cache_info(write_to_summary=False):\n name_len = max([len(fn.__name__) for fn in CACHED_FNS])\n for fn in CACHED_FNS:\n if not hasattr(fn, \"cache_info\"):\n continue\n info = fn.cache_info()\n hit_rate = info.hits / (info.hits + info.misses)\n info_str = \"{} {} {}\".format(fn.__name__.ljust(name_len + 3), f\"hit_rate={hit_rate*100:.1f}% \", info)\n print(info_str)\n if write_to_summary:\n with open(os.path.join(OUTPUT_DIR, \"summary.txt\"), \"at\") as f:\n f.write(info_str + \"\\n\")\n\n\ndef main(max_num_dice=4):\n if os.path.exists(OUTPUT_DIR):\n shutil.rmtree(OUTPUT_DIR)\n os.makedirs(OUTPUT_DIR)\n summary_file = os.path.join(OUTPUT_DIR, \"summary.txt\")\n\n st = timeit.default_timer()\n compute_time = 0\n overshoot_solutions = []\n for i, rolls in enumerate(generate_all_rolls(max_num_dice)):\n loop_st = timeit.default_timer()\n num_dice = i + 1\n results = []\n omegas = []\n for roll in rolls:\n result, omega = get_formulas(roll, MAX_SUBSEGMENT_FIRST_PASS)\n omegas.append(omega)\n\n # A more efficient ordering would be to do all of the rigorous passes at the end to reduce cache eviction,\n # but this is easier to read.\n if not all(result):\n prior_result = result.copy()\n result, _ = get_formulas(roll, None)\n overshoot_solutions.extend([r for i, r in enumerate(result) if r and not prior_result[i]])\n results.append(result)\n compute_time += timeit.default_timer() - loop_st\n results = list(zip(*results))\n widths = [max([len(r) for r in result]) for result in results]\n results = list(zip(*results))\n\n perfect_count = 0\n omega_len = max([len(str(w)) for w in omegas])\n assert sum(omegas) == DICE_FACES ** num_dice\n lines, impossible_lines = [], []\n for roll, omega, result in zip(rolls, omegas, results):\n perfect_count += all(result)\n omega_str = f\"(ω = {omega})\".ljust(omega_len + 7)\n formulas = \" | \".join([r.replace(\"=\", \"{}=\").format(\" \" * (widths[i] - len(r)))\n if r else \" \" * widths[i] for i, r in enumerate(result)])\n\n lines.append(f\"{list(roll)} {omega_str} {formulas}\")\n if not all(result):\n impossible_str = \" \".join([\" \" if r else str(i+1) for i, r in enumerate(result)])\n impossible_lines.append(f\"{list(roll)} {omega_str} {impossible_str}\")\n\n omega_perfect = sum([omega if all(result) else 0 for omega, result in zip(omegas, results)])\n\n table_path = os.path.join(OUTPUT_DIR, f\"solutions_{str(num_dice).zfill(2)}_dice.txt\")\n\n with open(table_path, \"wt\", encoding=\"utf-8\") as f:\n for line in lines:\n f.write(line + \"\\n\")\n\n if impossible_lines:\n impossible_path = os.path.join(OUTPUT_DIR, f\"impossible_{str(num_dice).zfill(2)}_dice.txt\")\n with open(impossible_path, \"wt\", encoding=\"utf-8\") as f:\n for line in impossible_lines:\n f.write(line + \"\\n\")\n\n summary = [\n f\"{num_dice} dice complete:\",\n f\" {timeit.default_timer() - st:.4f} seconds ({compute_time:.4f} in primary compute)\",\n f\" {perfect_count} / {len(results)} rolls succeed at all 9 levels.\",\n f\" {omega_perfect} / {sum(omegas)} ({omega_perfect / sum(omegas) * 100:.9f}%) that a roll will succeed \"\n f\"at all 9 levels\",\n \"\",\n ]\n with open(summary_file, \"at\", encoding=\"utf-8\") as f:\n f.write(\"\\n\".join(summary))\n f.write(\"\\n\")\n print(\"\\n\".join(summary))\n\n with open(os.path.join(OUTPUT_DIR, \"honorable_mentions.txt\"), \"wt\", encoding=\"utf-8\") as f:\n f.write(\"Overshoot:\\n\")\n f.write(f\"Cases where an intermediate result > {MAX_SUBSEGMENT_FIRST_PASS} is needed to hit a given level.\\n\")\n f.write(\"\\n\".join(overshoot_solutions))\n\n\nif __name__ == \"__main__\":\n # cProfile.run(\"main(12)\")\n main(20 if DICE_FACES <= 6 else 14)\n cache_info(True)\n","sub_path":"sacred_geometry.py","file_name":"sacred_geometry.py","file_ext":"py","file_size_in_byte":13049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"495704271","text":"from django.db import IntegrityError\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import permission_required\n\nfrom .models import Tag, TagGroup\n\n@permission_required('tags.view_tag_control_panel')\ndef edit(request, group_id=0):\n group_id = int(group_id)\n\n context = {\n 'groups': TagGroup.objects.all()\n }\n\n if group_id > 0:\n try:\n context['group'] = TagGroup.objects.get(pk=group_id)\n context['tags'] = context['group'].tag_set.all().order_by('label')\n except TagGroup.DoesNotExist:\n return HttpResponseRedirect('/tags/edit/')\n\n return render(request, 'tags/edit.html', context)\n\n@permission_required('tags.add_tag')\ndef new(request, group_id):\n group_id = int(group_id)\n\n try:\n group = TagGroup.objects.get(pk=group_id)\n except (TagGroup.DoesNotExist):\n return JsonResponse({\n 'success': False,\n 'error': 'Unable to find category'\n })\n\n if 'tag' in request.POST and request.POST['tag']:\n label = request.POST['tag'].strip().lower()\n try:\n tag = Tag.objects.create(label=label, group=group)\n return JsonResponse({\n 'success': True,\n 'pk': tag.id,\n })\n except IntegrityError:\n return JsonResponse({\n 'success': False,\n 'error': 'Tag already exists',\n })\n else:\n return JsonResponse({\n 'success': False,\n 'error': 'Tag cannot be empty'\n })\n\n@permission_required('tags.delete_tag')\ndef delete(request, tag_id):\n tag_id = int(tag_id)\n try:\n tag = Tag.objects.get(pk=tag_id)\n tag.delete()\n return JsonResponse({\n 'success': True,\n })\n except Tag.DoesNotExist:\n return JsonResponse({\n 'success': False,\n 'error': 'Cannot delete nonexisting tag',\n })\n\n@permission_required('tags.view_tag_control_panel')\ndef name(request, name):\n try:\n group = TagGroup.objects.get(title=name)\n return HttpResponseRedirect('/tags/%r/edit/' % group.id)\n except TagGroup.DoesNotExist:\n return HttpResponseRedirect('/tags/edit/')\n","sub_path":"tags/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"617027516","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 26 23:57:40 2020\n\n@author: farhad\n\"\"\"\nfrom SignalDto import SignalDto\nfrom Utils import Utils\nfrom time import sleep\nfrom selenium import webdriver\n\n\"\"\"FOR3X_SIGNAL\"\"\"\nclass FOR3X_SIGNAL:\n \n \n def __init__(self,driver):\n self.driver= driver\n\n utils= Utils()\n \n\n \n \n def createSignalDto(self,msg,chName):\n print('creating signalDto for '+chName+ ' started')\n lines=str.splitlines(msg)\n signalDto= SignalDto()\n \n signalDto.provider = chName\n #signalDto.signalTime =self.utils.getDate(msgTime)\n \n enter= lines[0].split(\" \")# first line is USDCAD BUY 1.3045\n \n signalDto.symbol = enter[0]\n signalDto.enter_type = 1 if enter[1] == \"BUY\" else 2\n signalDto.enterPrice = float(enter[2])\n \n signalDto.sl = float(lines[1].split(\" \")[1]) #SL 1.2960\n signalDto.tp = float(lines[2].split(\" \")[1]) #TP 1.2960\n \n\n \n print('creating signalDto for '+chName+ ' finished') \n return {0:signalDto}","sub_path":"backup/13990617-2/forex-py/providers/FOR3X_SIGNAL.py","file_name":"FOR3X_SIGNAL.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"34487118","text":"from django.contrib.auth import get_user_model\n\nfrom rest_framework import serializers\n\nfrom rest_auth.serializers import UserDetailsSerializer as DefaultUserDetailsSerializer, LoginSerializer as DefaultLoginSerializer\n\n\nCustomUser = get_user_model()\n\n\nclass LoginSerializer(DefaultLoginSerializer):\n username = None\n email = serializers.EmailField(required=True, allow_blank=False)\n\n\nclass UserDetailsSerializer(DefaultUserDetailsSerializer):\n class Meta:\n model = CustomUser\n fields = ('pk', 'username', 'email', 'first_name', 'last_name')\n read_only_fields = ('email', )\n\n\nclass KnoxSerializer(serializers.Serializer):\n \"\"\"\n Serializer for Knox authentication.\n \"\"\"\n token = serializers.CharField()\n user = UserDetailsSerializer()\n","sub_path":"backend/core/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"299631893","text":"'''Noveno punto'''\r\n'''Tercera parte'''\r\n\r\ndef sacar_la_contraseña():\r\n \"\"\"Programa para descubrir la contraseña, utilizando una variable bool\"\"\"\r\n #Ingreso cual es la contraseña\r\n contasena = \"admin\"\r\n contrasena_de_usuario = (input(\"ingrese la contraseña: \"))\r\n #Utilizo el if, y la varieble bool para mostrar cual es la respuesta\r\n if contrasena_de_usuario == contasena:\r\n return \"felicidades, ingreso la conraseña correcta\"\r\n correcto = True\r\n else:\r\n correcto = False\r\n\r\n if correcto == True:\r\n return \"felicidades, ingreso la conraseña correcta\"\r\n elif correcto == False:\r\n return \"no esta ingresando la contraseña correcta \"\r\nprint(sacar_la_contraseña())\r\n","sub_path":"Tp9_3.py","file_name":"Tp9_3.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"204006451","text":"import http.client\nimport urllib\nfrom config import app_token, user_token\n\n\n\ndef send_push(name):\n conn = http.client.HTTPSConnection(\"api.pushover.net:443\")\n conn.request(\"POST\", \"/1/messages.json\",\n urllib.parse.urlencode({\n \"token\": app_token,\n \"user\": user_token,\n \"message\": name+ \" is online.\"\n }), {\"Content-type\": \"application/x-www-form-urlencoded\"})\n conn.getresponse()\n \ndef send_startup_push():\n conn = http.client.HTTPSConnection(\"api.pushover.net:443\")\n conn.request(\"POST\", \"/1/messages.json\",\n urllib.parse.urlencode({\n \"token\": app_token,\n \"user\": user_token,\n \"message\": \"IP notifictation has started\",\n \"title\": \"IP Checker Started\",\n \"priority\": 0\n }), {\"Content-type\": \"application/x-www-form-urlencoded\" })\n conn.getresponse()\n print(\"startup push sent\")\n\n","sub_path":"ip_notification/send_push.py","file_name":"send_push.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"4264819","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# lstm_sizes = [32, 64, 128, 192, 256, 320, 448, 600]\n\ndir = '01/'\nstart = 2\none_epoch = 23\nmean_size = 3\noffset = 9\ndef get_X_Y(dir, lstm_size):\n file_name = dir + 'out_lstm_size_' + str(lstm_size) + '.txt'\n data = []\n validation_data = []\n test_data = []\n X = []\n Y = []\n Y1 = []\n with open(file_name, encoding='UTF-8') as f:\n cnt = 0\n for line in f:\n # if(cnt >= start):\n # print(cnt - start, line)\n if(cnt >= start and (cnt - start) % one_epoch >= one_epoch - mean_size):\n # print(cnt - start, line)\n if((cnt - start) % one_epoch == one_epoch - 1):\n test_data.append(float(line.split()[6]))\n else:\n data.append(float(line.split()[offset]))\n validation_data.append(float(line.split()[offset+8]))\n cnt = cnt + 1\n for i in range(0, len(data), mean_size - 1):\n X.append(i // mean_size)\n Y.append(np.mean(data[i: i+mean_size]))\n Y1.append(np.mean(validation_data[i: i+mean_size]))\n return X, Y, Y1\nplt.ylabel('Accuracy')\nplt.xlabel('Layer Number')\nfor lstm_size in [32, 64, 128]:\n X, Y, _ = get_X_Y(dir, lstm_size)\n plt.plot(X, Y, label='lstm-size='+str(lstm_size))\nplt.legend(loc='lower left')\nplt.show()\n","sub_path":"model3/increase_layer_compare.py","file_name":"increase_layer_compare.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"572502753","text":"# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport time\n\nfrom oslo_config import cfg\n\nfrom rally.benchmark.scenarios import base\nfrom rally.benchmark import utils as bench_utils\n\n\nMANILA_BENCHMARK_OPTS = [\n cfg.FloatOpt(\n \"manila_share_create_prepoll_delay\",\n default=2.0,\n help=\"Delay between creating Manila share and polling for its \"\n \"status.\"),\n cfg.FloatOpt(\n \"manila_share_create_timeout\",\n default=300.0,\n help=\"Timeout for Manila share creation.\"),\n cfg.FloatOpt(\n \"manila_share_create_poll_interval\",\n default=3.0,\n help=\"Interval between checks when waiting for Manila share \"\n \"creation.\"),\n cfg.FloatOpt(\n \"manila_share_delete_timeout\",\n default=180.0,\n help=\"Timeout for Manila share deletion.\"),\n cfg.FloatOpt(\n \"manila_share_delete_poll_interval\",\n default=2.0,\n help=\"Interval between checks when waiting for Manila share \"\n \"deletion.\"),\n]\n\nCONF = cfg.CONF\nbenchmark_group = cfg.OptGroup(name=\"benchmark\", title=\"benchmark options\")\nCONF.register_opts(MANILA_BENCHMARK_OPTS, group=benchmark_group)\n\n\nclass ManilaScenario(base.Scenario):\n \"\"\"Base class for Manila scenarios with basic atomic actions.\"\"\"\n\n @base.atomic_action_timer(\"manila.create_share\")\n def _create_share(self, share_proto, size=1, **kwargs):\n \"\"\"Create a share.\n\n :param share_proto: share protocol for new share,\n available values are NFS, CIFS, GlusterFS and HDFS.\n :param size: size of a share in GB\n :param snapshot_id: ID of the snapshot\n :param name: name of new share\n :param description: description of a share\n :param metadata: optional metadata to set on share creation\n :param share_network: either instance of ShareNetwork or str with ID\n :param share_type: either instance of ShareType or str with ID\n :param is_public: defines whether to set share as public or not.\n :returns: instance of :class:`Share`\n \"\"\"\n if not kwargs.get(\"name\"):\n kwargs[\"name\"] = self._generate_random_name()\n\n share = self.clients(\"manila\").shares.create(\n share_proto, size, **kwargs)\n time.sleep(CONF.benchmark.manila_share_create_prepoll_delay)\n share = bench_utils.wait_for(\n share,\n is_ready=bench_utils.resource_is(\"available\"),\n update_resource=bench_utils.get_from_manager(),\n timeout=CONF.benchmark.manila_share_create_timeout,\n check_interval=CONF.benchmark.manila_share_create_poll_interval,\n )\n return share\n\n @base.atomic_action_timer(\"manila.delete_share\")\n def _delete_share(self, share):\n \"\"\"Delete the given share.\n\n :param share: :class:`Share`\n \"\"\"\n share.delete()\n error_statuses = (\"error_deleting\", )\n bench_utils.wait_for_delete(\n share,\n update_resource=bench_utils.get_from_manager(error_statuses),\n timeout=CONF.benchmark.manila_share_delete_timeout,\n check_interval=CONF.benchmark.manila_share_delete_poll_interval)\n\n @base.atomic_action_timer(\"manila.list_shares\")\n def _list_shares(self, detailed=True, search_opts=None):\n \"\"\"Returns user shares list.\n\n :param detailed: defines either to return detailed list of\n objects or not.\n :param search_opts: container of search opts such as\n \"name\", \"host\", \"share_type\", etc.\n \"\"\"\n return self.clients(\"manila\").shares.list(\n detailed=detailed, search_opts=search_opts)\n\n @base.atomic_action_timer(\"manila.create_share_network\")\n def _create_share_network(self, neutron_net_id=None,\n neutron_subnet_id=None,\n nova_net_id=None, name=None, description=None):\n \"\"\"Create share network.\n\n :param neutron_net_id: ID of Neutron network\n :param neutron_subnet_id: ID of Neutron subnet\n :param nova_net_id: ID of Nova network\n :param name: share network name\n :param description: share network description\n :returns: instance of :class:`ShareNetwork`\n \"\"\"\n name = name or self._generate_random_name()\n share_network = self.clients(\"manila\").share_networks.create(\n neutron_net_id=neutron_net_id,\n neutron_subnet_id=neutron_subnet_id,\n nova_net_id=nova_net_id,\n name=name,\n description=description)\n return share_network\n\n @base.atomic_action_timer(\"manila.delete_share_network\")\n def _delete_share_network(self, share_network):\n \"\"\"Delete share network.\n\n :param share_network: instance of :class:`ShareNetwork`.\n \"\"\"\n share_network.delete()\n bench_utils.wait_for_delete(\n share_network,\n update_resource=bench_utils.get_from_manager(),\n timeout=CONF.benchmark.manila_share_delete_timeout,\n check_interval=CONF.benchmark.manila_share_delete_poll_interval)\n\n @base.atomic_action_timer(\"manila.list_share_networks\")\n def _list_share_networks(self, detailed=True, search_opts=None):\n \"\"\"List share networks.\n\n :param detailed: defines either to return detailed list of\n objects or not.\n :param search_opts: container of search opts such as\n \"project_id\" and \"name\".\n :returns: list of instances of :class:`ShareNetwork`\n \"\"\"\n share_networks = self.clients(\"manila\").share_networks.list(\n detailed=detailed, search_opts=search_opts)\n return share_networks\n\n @base.atomic_action_timer(\"manila.list_share_servers\")\n def _list_share_servers(self, search_opts=None):\n \"\"\"List share servers. Admin only.\n\n :param search_opts: set of key-value pairs to filter share servers by.\n Example: {\"share_network\": \"share_network_name_or_id\"}\n :returns: list of instances of :class:`ShareServer`\n \"\"\"\n share_servers = self.admin_clients(\"manila\").share_servers.list(\n search_opts=search_opts)\n return share_servers\n","sub_path":"rally/plugins/openstack/scenarios/manila/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"276647306","text":"from Utilities import *\nfrom Model_FusionNet import *\nfrom Losses import *\n\nfrom albumentations import (\n PadIfNeeded,\n HorizontalFlip,\n VerticalFlip, \n CenterCrop, \n Crop,\n Compose,\n Transpose,\n RandomRotate90,\n ElasticTransform,\n GridDistortion, \n OpticalDistortion,\n RandomSizedCrop,\n OneOf,\n CLAHE,\n RandomContrast,\n RandomGamma,\n RandomBrightness\n)\nimport cv2\n###############################################################################\ndef time_seed ():\n seed = None\n while seed == None:\n cur_time = time.time ()\n seed = int ((cur_time - int (cur_time)) * 1000000)\n return seed\n\nclass ImageDataFlow(RNGDataFlow):\n def __init__(self, \n imageDir, \n labelDir, \n size, \n dtype='float32', \n isTrain=False, \n isValid=False, \n isTest=False, \n pruneLabel=False, \n shape=[1, 512, 512]):\n\n self.dtype = dtype\n self.imageDir = imageDir\n self.labelDir = labelDir\n self._size = size\n self.isTrain = isTrain\n self.isValid = isValid\n\n imageFiles = natsorted (glob.glob(self.imageDir + '/*.*'))\n labelFiles = natsorted (glob.glob(self.labelDir + '/*.*'))\n print(imageFiles)\n print(labelFiles)\n self.images = []\n self.labels = []\n self.data_seed = time_seed ()\n self.data_rand = np.random.RandomState(self.data_seed)\n self.rng = np.random.RandomState(999)\n for imageFile in imageFiles:\n image = skimage.io.imread (imageFile)\n self.images.append(image)\n for labelFile in labelFiles:\n label = skimage.io.imread (labelFile)\n self.labels.append(label)\n \n self.DIMZ = shape[0]\n self.DIMY = shape[1]\n self.DIMX = shape[2]\n self.pruneLabel = pruneLabel\n\n def size(self):\n return self._size\n\n def AugmentPair(self, src_image, src_label, pipeline, seed=None, verbose=False):\n np.random.seed(seed) if seed else np.random.seed(2015)\n # print(src_image.shape, src_label.shape) #if verbose else ''\n if src_image.ndim==2:\n src_image = np.expand_dims(src_image, 0)\n src_label = np.expand_dims(src_label, 0)\n \n # Create the result\n aug_images = [] \n aug_labels = [] \n \n for z in range(src_image.shape[0]): # Convert image 2 rgb\n image = src_image[z,...]\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n label = src_label[z,...]\n augmented = pipeline(image=image, mask=label)\n image = augmented['image']\n label = augmented['mask']\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n aug_images.append(image)\n aug_labels.append(label)\n\n\n aug_images = np.array(aug_images).astype(np.float32)\n aug_labels = np.array(aug_labels).astype(np.float32)\n # print(aug_images.shape, aug_labels.shape)\n return aug_images, aug_labels\n ###############################################################################\n def random_reverse(self, image, seed=None):\n assert ((image.ndim == 2) | (image.ndim == 3))\n if seed:\n self.rng.seed(seed)\n random_reverse = self.rng.randint(1,3)\n if random_reverse==1:\n reverse = image[::1,...]\n elif random_reverse==2:\n reverse = image[::-1,...]\n image = reverse\n return image\n ###############################################################################\n def grow_boundaries(self, gt, steps=1, background=0):\n from scipy import ndimage\n foreground = np.zeros(shape=gt.shape, dtype=np.bool)\n masked = None\n \n for label in np.unique(gt):\n if label == background:\n continue\n label_mask = gt==label\n # Assume that masked out values are the same as the label we are\n # eroding in this iteration. This ensures that at the boundary to\n # a masked region the value blob is not shrinking.\n if masked is not None:\n label_mask = np.logical_or(label_mask, masked)\n eroded_label_mask = ndimage.binary_erosion(label_mask, iterations=steps, \n border_value=1)\n foreground = np.logical_or(eroded_label_mask, foreground)\n\n # label new background\n background = np.logical_not(foreground)\n gt[background] = 0\n \n return gt\n ###############################################################################\n def get_data(self):\n for k in range(self._size):\n #\n # Pick randomly a tuple of training instance\n #\n rand_index = self.data_rand.randint(0, len(self.images))\n image_p = self.images[rand_index]\n label_p = self.labels[rand_index]\n\n seed = time_seed () #self.rng.randint(0, 20152015)\n \n\n dimz, dimy, dimx = image_p.shape\n # The same for pair\n randz = self.data_rand.randint(0, dimz-self.DIMZ+1)\n randy = self.data_rand.randint(0, dimy-self.DIMY+1)\n randx = self.data_rand.randint(0, dimx-self.DIMX+1)\n\n image_p = image_p[randz:randz+self.DIMZ,randy:randy+self.DIMY,randx:randx+self.DIMX]\n label_p = label_p[randz:randz+self.DIMZ,randy:randy+self.DIMY,randx:randx+self.DIMX]\n # image_p = image_p[randz:randz+self.DIMZ,...]\n # label_p = label_p[randz:randz+self.DIMZ,...]\n\n if self.isTrain:\n # Augment the pair image for same seed\n pipeline = Compose([\n OneOf([RandomSizedCrop(min_max_height=(400, 512), height=self.DIMY, width=self.DIMX, interpolation=cv2.INTER_NEAREST, p=0.5),\n PadIfNeeded(min_height=self.DIMY, min_width=self.DIMX, p=0.5)], p=1), \n VerticalFlip(p=0.5), \n RandomRotate90(p=0.5),\n OneOf([\n ElasticTransform(p=0.5, alpha=1, sigma=5, alpha_affine=5, interpolation=cv2.INTER_NEAREST),\n GridDistortion(p=0.5, interpolation=cv2.INTER_NEAREST),\n OpticalDistortion(p=0.5, distort_limit=(0.05, 0.05), shift_limit=(0, 0), interpolation=cv2.INTER_NEAREST) \n ], p=0.8),\n CLAHE(p=0.8),\n RandomContrast(p=0.8),\n RandomBrightness(p=0.8),\n RandomGamma(p=0.8)])\n \n\n image_p, label_p = self.AugmentPair(image_p.copy(), label_p.copy(), pipeline, seed=seed)\n \n image_p = self.random_reverse(image_p, seed=seed)\n label_p = self.random_reverse(label_p, seed=seed)\n\n\n # # Calculate linear label\n if self.pruneLabel:\n label_p, nb_labels_p = skimage.measure.label(label_p.copy(), return_num=True) \n\n # if self.grow_boundaries\n # label_p = self.grow_boundaries(label_p)\n #label_p[0,0,0] = 0 # hack for entire label is 1 due to CB3\n # Expand dim to make single channel\n image_p = np.expand_dims(image_p, axis=-1)\n label_p = np.expand_dims(label_p, axis=-1)\n\n \n yield [image_p.astype(np.float32), \n label_p.astype(np.float32), \n ] \n\n###############################################################################\ndef get_data(dataDir, isTrain=False, isValid=False, isTest=False, shape=[1, 512, 512]):\n # Process the directories \n if isTrain:\n num=500\n names = ['trainA', 'trainB']\n if isValid:\n num=10\n names = ['trainA', 'trainB']\n if isTest:\n num=10\n names = ['validA', 'validB']\n\n \n dset = ImageDataFlow(os.path.join(dataDir, names[0]),\n os.path.join(dataDir, names[1]),\n num, \n isTrain=isTrain, \n isValid=isValid, \n isTest =isTest, \n shape=shape, \n pruneLabel=False)\n dset.reset_state()\n return dset\n###############################################################################\nclass Model(ModelDesc):\n @auto_reuse_variable_scope\n def generator(self, img, last_dim=1, nl=INLReLU, nb_filters=64):\n assert img is not None\n ret = arch_fusionnet_translator_2d(img, last_dim=last_dim, nl=nl, nb_filters=nb_filters)\n return ret \n\n def inputs(self):\n return [\n tf.placeholder(tf.float32, (None, args.DIMY, args.DIMX, 1), 'image'),\n tf.placeholder(tf.float32, (None, args.DIMY, args.DIMX, 1), 'label'),\n ]\n\n def build_graph(self, image, label):\n G = tf.get_default_graph()\n pi, pa = image, label\n \n pa = pa/255\n\n # Construct the graph\n with tf.variable_scope('gen'):\n with tf.device('/device:GPU:0'):\n with tf.variable_scope('image2membr'):\n pia = self.generator(tf_2tanh(pi), last_dim=1, nl=tf.nn.tanh, nb_filters=64)\n pia = tf_2imag(pia, maxVal=1.0)\n pia = tf.identity(pia, 'pia')\n \n\n losses = [] \n with tf.name_scope('loss_mae'):\n mae_ia = tf.reduce_mean(tf.abs(pa - pia), name='mae_ia')\n losses.append(1e0*mae_ia)\n add_moving_summary(mae_ia)\n\n with tf.name_scope('loss_dice'):\n dice_ia = tf.identity(1.0 - dice_coe(pia, pa, axis=[0,1,2,3], loss_type='jaccard'), \n name='dice_ia') \n losses.append(1e2*dice_ia)\n add_moving_summary(dice_ia)\n\n \n\n # Aggregate final loss\n cost = tf.reduce_sum(losses, name='cost')\n add_moving_summary(cost)\n\n # Segmentation\n pz = tf.zeros_like(pi)\n viz = tf.concat([tf.concat([pi, 255*pa, 255*pia], axis=2),\n ], axis=1)\n viz = tf.cast(tf.clip_by_value(viz, 0, 255), tf.uint8, name='viz')\n tf.summary.image('labelized', viz, max_outputs=50)\n\n return cost\n\n def optimizer(self):\n lr = symbolic_functions.get_scalar_var('learning_rate', 2e-4, summary=True)\n return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)\n\n###############################################################################\nclass VisualizeRunner(Callback):\n def __init__(self, input, tower_name='InferenceTower', device=0):\n self.dset = input \n self._tower_name = tower_name\n self._device = device\n\n def _setup_graph(self):\n self.pred = self.trainer.get_predictor(\n ['image', 'label'], ['viz'])\n\n def _before_train(self):\n global args\n self.dset = get_data(args.data, isTrain=False, isValid=True, shape=[1, 512, 512])\n self.dset.reset_state()\n\n def _trigger(self):\n for image, label in self.dset.get_data():\n # print(image.shape, label.shape)\n viz_test = self.pred(image, label)\n viz_test = np.squeeze(np.array(viz_test))\n self.trainer.monitors.put_image('viz_test', viz_test)\n\n###############################################################################\ndef sample(dataDir, model_path, prefix='.'):\n print(\"Starting...\")\n print(dataDir)\n imageFiles = glob.glob(os.path.join(dataDir, '*.tif'))\n print(imageFiles)\n # Load the model \n predict_func = OfflinePredictor(PredictConfig(\n model=Model(),\n session_init=get_model_loader(model_path),\n input_names=['image'],\n output_names=['pia']))\n\n for imageFile in imageFiles:\n head, tail = os.path.split(imageFile)\n print(tail)\n affntFile = prefix+tail\n print(affntFile)\n\n # Read the image file\n image = skimage.io.imread(imageFile)\n\n image = np.expand_dims(image, axis=-1)\n # convert to 3 channel image\n #image = np.stack((image, image, image), -1)\n print(image.shape)\n def weighted_map_blocks(arr, inner, outer, ghost, func=None): # work for 3D, inner=[1, 3, 3], ghost=[0, 2, 2], \n dtype = np.float32 #arr.dtype\n\n arr = arr.astype(np.float32)\n # param\n if outer==None:\n outer = inner + 2*ghost\n outer = [(i + 2*g) for i, g in zip(inner, ghost)]\n shape = outer\n steps = inner\n \n print(outer)\n print(shape)\n print(inner)\n \n padding=arr.copy()\n print(padding.shape)\n #print(padding)\n \n weights = np.zeros_like(padding)\n results = np.zeros_like(padding)\n \n v_padding = sliding_window_view(padding, shape, steps)\n v_weights = sliding_window_view(weights, shape, steps)\n v_results = sliding_window_view(results, shape, steps)\n \n print('v_padding', v_padding.shape)\n def invert(val):\n #return 255-val \n return val\n\n for z in range(v_padding.shape[0]):\n for y in range(v_padding.shape[1]):\n for x in range(v_padding.shape[2]):\n \n # Get the result\n #v_result = invert(v_padding[z,y,x]) ### Todo function is here\n v_result = np.array(func(\n (v_padding[z,y,x,0][...,0:1]) ) ) ### Todo function is here\n v_result = np.squeeze(v_result, axis=0).astype(np.float32)\n #v_result[0,:,:,0] = v_result[1,:,:,0] \n #v_result[:,0,:,1] = v_result[:,1,:,1] \n #v_result[:,:,0,2] = v_result[:,:,1,2] \n #v_results[z,y,x] += v_result\n #v_results[z,y,x] = np.maximum(v_result, v_results[z,y,x])\n #v_weight = np.ones_like(v_result)\n\n #construct gaussian weight\n zz, yy, xx = np.meshgrid(np.linspace(-1,1,shape[0], dtype=np.float32), \n np.linspace(-1,1,shape[1], dtype=np.float32), \n np.linspace(-1,1,shape[2], dtype=np.float32))\n d = np.sqrt(zz*zz+xx*xx+yy*yy)\n sigma, mu = 0.5, 0.0\n v_weight = 1e-6+np.exp(-( (d-mu)**2 / ( 2.0 * sigma**2 ) ) )\n v_weight = v_weight/v_weight.max()\n \n \n v_weight = np.expand_dims(v_weight, axis=-1)\n v_weights[z,y,x] += v_weight\n\n v_results[z,y,x] += v_result * v_weight\n \n # Divided by the weight param\n results /= weights \n \n \n current_shape = results.shape\n trimmed_shape = [np.arange(ghost[0]),(results.shape[0] - ghost[0]), \n np.arange(ghost[1]),(results.shape[1] - ghost[1]), \n np.arange(ghost[2]),(results.shape[2] - ghost[2]), \n np.arange(ghost[3]),(results.shape[3] - ghost[3]), \n ]\n \n return results.astype(dtype)\n \n\n affnt = weighted_map_blocks(image, inner=[32, 32, 64, 1], \n outer=[128, 128, 256, 1], \n ghost=[32, 32, 64, 0], \n func=predict_func) # inner, ghost\n\n affnt = np.squeeze(affnt)\n skimage.io.imsave(affntFile, affnt)\n return None\n\n\n \n###############################################################################\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', default='0', help='comma seperated list of GPU(s) to use.')\n parser.add_argument('--data', default='data/Kasthuri15/3D/', required=True, \n help='Data directory, contain trainA/trainB/validA/validB')\n parser.add_argument('--load', help='Load the model path')\n parser.add_argument('--DIMX', type=int, default=512)\n parser.add_argument('--DIMY', type=int, default=512)\n parser.add_argument('--DIMZ', type=int, default=1)\n parser.add_argument('--sample', help='Run the deployment on an instance',\n action='store_true')\n parser.add_argument('--srcDir', help='srcDir')\n parser.add_argument('--dstDir', help='dstDir')\n args = parser.parse_args()\n \n # python Exp_FusionNet2D_-VectorField.py --gpu='0' --data='arranged/'\n\n \n train_ds = get_data(args.data, isTrain=True, isValid=False, isTest=False, shape=[args.DIMZ, args.DIMY, args.DIMX])\n valid_ds = get_data(args.data, isTrain=False, isValid=True, isTest=False, shape=[args.DIMZ, args.DIMY, args.DIMX])\n # test_ds = get_data(args.data, isTrain=False, isValid=False, isTest=True)\n\n\n train_ds = PrefetchDataZMQ(train_ds, 8)\n train_ds = PrintData(train_ds)\n valid_ds = PrintData(valid_ds)\n model = Model()\n\n os.environ['PYTHONWARNINGS'] = 'ignore'\n\n # Set the GPU\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n # Running train or deploy\n if args.sample:\n # TODO\n print(\"Deploy the data\")\n sample(args.data, args.load, prefix='deploy_')\n # pass\n else:\n # Set up configuration\n # Set the logger directory\n logger.auto_set_dir()\n\n # Set up configuration\n config = TrainConfig(\n model = model, \n dataflow = train_ds,\n callbacks = [\n PeriodicTrigger(ModelSaver(), every_k_epochs=100),\n PeriodicTrigger(VisualizeRunner(valid_ds), every_k_epochs=5),\n ScheduledHyperParamSetter('learning_rate', [(0, 2e-4), (100, 1e-4), (200, 2e-5), (300, 1e-5), (400, 2e-6), (500, 1e-6)], interp='linear'),\n ],\n max_epoch = 10000, \n session_init = SaverRestore(args.load) if args.load else None,\n )\n \n # Train the model\n launch_train_with_config(config, QueueInputTrainer())\n","sub_path":"Toy_ISBI2012_FusionNet.py","file_name":"Toy_ISBI2012_FusionNet.py","file_ext":"py","file_size_in_byte":18907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"366037516","text":"from tiling_cpp import (fill_mesh, fill_mesh_function, fill_mesh_valuecollection,\n fill_mf_from_mvc)\nfrom test_periodic import compute_vertex_periodicity\n\nfrom dolfin import (CompiledSubDomain, Mesh, MeshEditor, MeshFunction, MeshValueCollection,\n Timer, info, SubsetIterator)\nfrom collections import defaultdict\nfrom itertools import izip\nimport numpy as np\nimport operator\n\n# FIXME: \n#\n# mesh connectivity to identify entities takes the most time\n#\n# if make_mesh stored MeshValueCollections and there was way to \n# make mvc to MeshFunctions then a lot of space could be saved\n\n\ndef TileMesh(tile, shape, mesh_data={}, TOL=1E-9):\n '''\n [tile tile;\n tile tile;\n tile tile;\n tile tile]\n\n The shape is an ntuple describing the number of pieces put next \n to each other in the i-th axis. mesh_data : (tdim, tag) -> [entities] \n is the way to encode mesh data of the tile.\n '''\n # All the axis shapes needs to be power of two\n assert all((((v & (v - 1)) == 0) and v > 0) for v in shape)\n # Sanity for glueing\n gdim = tile.geometry().dim()\n assert len(shape) <= gdim\n # While evolve is general mesh writing is limited to simplices only (FIXME)\n # so we bail out early\n assert str(tile.ufl_cell()) in ('interval', 'triangle', 'tetrahedron')\n\n t = Timer('evolve')\n # Do nothing\n if all(v == 1 for v in shape):\n return tile, mesh_data\n\n # We want to evolve cells, vertices of the mesh using geometry information\n # and periodicity info\n x = tile.coordinates()\n min_x = np.min(x, axis=0)\n max_x = np.max(x, axis=0)\n shifts = max_x - min_x\n \n shifts_x = [] # Geometry\n vertex_mappings = [] # Periodicity\n # Compute geometrical shift for EVERY direction:\n for axis in range(len(shape)):\n shift = shifts[axis]\n # Vector to shift cell vertices\n shift_x = np.zeros(gdim); shift_x[axis] = shift\n shifts_x.append(shift_x)\n\n # Compute periodicity in the vertices\n to_master = lambda x, shift=shift_x: x - shift\n # Mapping facets\n master = CompiledSubDomain('near(x[i], A, tol)', i=axis, A=min_x[axis], tol=TOL)\n slave = CompiledSubDomain('near(x[i], A, tol)', i=axis, A=max_x[axis], tol=TOL)\n\n error, vertex_mapping = compute_vertex_periodicity(tile, master, slave, to_master)\n # Fail when exended direction is no periodic\n assert error < 10*TOL, error\n \n vertex_mappings.append(vertex_mapping)\n # The final piece of information is cells\n cells = np.fromiter(tile.cells().flat, dtype='uintp').reshape(tile.cells().shape)\n \n # Evolve\n while shape:\n # Evolve is a bang method on vertex_mappings, shifts_x\n x, cells, shape = evolve(x, cells, vertex_mappings, shifts_x, shape, mesh_data=mesh_data)\n info('\\tEvolve took %g s ' % t.stop())\n\n # Mesh data is evolved, (x cells) -> to mesh\n mesh = make_mesh(x, cells, tdim=tile.topology().dim(), gdim=gdim)\n\n return mesh, mesh_data\n\n \ndef evolve(x, cells, vertex_mappings, shifts_x, shape, mesh_data={}):\n '''Evolve tile along the last exis'''\n axis, gdim = len(shape) - 1, x.shape[1]\n assert gdim > axis >= 0\n\n # We're done evolving if only one tile is to be plae in the axis dir\n if shape[axis] == 1:\n vertex_mappings.pop() # No longer needed\n shifts_x.pop() # Do not touch x and cells\n return x, cells, shape[:-1]\n\n # Use the axis's periodicity and shifting.\n # NOTE: used only here and discarded\n vertex_mapping, shift_x = vertex_mappings.pop(), shifts_x.pop()\n\n master_vertices = vertex_mapping.values()\n slave_vertices = vertex_mapping.keys()\n\n refine = shape[axis]\n while refine > 1: \n n = len(x)\n # To make the tile piece we add all but the master vertices\n new_vertices = np.fromiter(sorted(set(range(n)) - set(master_vertices)),\n dtype=int, count=n-len(master_vertices))\n # Verices of the glued tiles\n x = np.vstack([x, x[new_vertices] + shift_x])\n\n # NOTE: using getitem and arrays seems to be on par in efficiency\n # with dicts. So then I keep translate as array because efficiency\n translate = np.arange(n)\n # Offset the free\n translate[new_vertices] = n + np.arange(len(new_vertices))\n # Those at master positions take slave values\n translate[master_vertices] = slave_vertices\n\n # Cells of the glued tiles\n new_cells = np.zeros_like(cells)\n new_cells.ravel()[:] = translate[cells.flatten()]\n\n cells = np.vstack([cells, new_cells])\n # Update the periodicty mapping - slaves are new\n slave_vertices = translate[slave_vertices]\n # For the directions that do not evolve we add the periodic pairs\n for vm in vertex_mappings:\n vm.update(dict(izip(translate[vm.keys()], translate[vm.values()])))\n # Add the entities defined in terms of the vertices\n if mesh_data:\n evolve_data(mesh_data, translate)\n \n # Iterate\n refine /= 2\n shift_x *= 2\n # Discard data not needed in next evolution\n return x, cells, shape[:-1]\n\n\ndef evolve_data(data, mapping):\n '''\n If mapping holds (tdim, tag) -> [tuple of indices]) where indices are \n w.r.t of old numbering and mapping is old to new we simply add the mapped \n entities.\n '''\n for key in data.keys():\n old = data[key]\n \n new = np.zeros_like(old)\n new.ravel()[:] = mapping[old.flatten()]\n data[key] = np.vstack([old, new])\n return data\n\n\ndef make_mesh(coordinates, cells, tdim, gdim):\n '''Mesh by MeshEditor from vertices and cells'''\n mesh = Mesh()\n assert mesh.mpi_comm().tompi4py().size == 1\n\n fill_mesh(coordinates.flatten(), cells.flatten(), tdim, gdim, mesh)\n \n return mesh\n\n\ndef mf_from_data(mesh, data):\n '''Build tdim -> mesh function from the data of TileMesh'''\n return _mx_from_data(mesh, data,\n fill=fill_mesh_function,\n init_container=lambda m, t: MeshFunction('size_t', m, t, 0))\n\n\ndef mvc_from_data(mesh, data):\n '''Build tdim -> mesh value collection from data of TileMesh'''\n return _mx_from_data(mesh, data,\n fill=fill_mesh_valuecollection,\n init_container=lambda m, t: MeshValueCollection('size_t', m, t))\n\n\ndef groupby(pairs, index):\n '''Organize pairs by pairs[index]'''\n groups = defaultdict(list)\n for pair in pairs: groups[pair[index]].append(pair)\n\n for item in groups.iteritems():\n yield item\n\n \ndef _mx_from_data(mesh, data, fill, init_container):\n '''Fill the contained over mesh by data'''\n assert mesh.mpi_comm().tompi4py().size == 1\n\n containers = {}\n # We have define entities in terms of vertex numbering\n # Order keys such by tdim (the first key)\n for tdim, keys in groupby(data.keys(), 0):\n # So we'll be getting the entity index by lookup\n mesh.init(tdim)\n mesh.init(0, tdim)\n # Build the meshfunction from data\n f = init_container(mesh, tdim)\n for key in keys:\n indices = data[key]\n # These entity indices get the 'color'\n fill(mesh, indices.flatten(), tdim, key[1], f)\n containers[tdim] = f\n\n return containers\n\n\ndef as_meshf(mvc, init_value=0):\n '''Make a mesh function out of mesh value collection'''\n if isinstance(mvc, (tuple, list)):\n return [as_meshf(x, init_value) for x in mvc]\n\n if isinstance(mvc, dict):\n return dict(zip(mvc.keys(), as_meshf(mvc.values())))\n\n # Base case\n mesh_f = MeshFunction('size_t', mvc.mesh(), mvc.dim(), init_value)\n fill_mf_from_mvc(mvc, mesh_f)\n\n return mesh_f\n\n\ndef load_data(mesh, h5_file, data_set, dim, data):\n '''\n Fill the data dictionary with data_set representing mesh function with \n dim over mesh read from h5_file according to key spec expected by tiling \n algorithm.\n '''\n mf = MeshFunction('size_t', mesh, dim, 0)\n h5_file.read(mf, data_set)\n \n # Data to evolve\n mesh.init(dim, 0)\n e2v = tile.topology()(dim, 0)\n\n tags = set(mf.array())\n # Don't evolve zero - we initialize to it\n if 0 in tags: tags.remove(0)\n info('%s evolves tags %r' % (data_set, tags))\n\n for tag in tags:\n data[(dim, tag)] = np.array([e2v(e.index()) for e in SubsetIterator(mf, tag)],\n dtype='uintp')\n return data\n\n# ------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n from dolfin import mpi_comm_world, HDF5File, Timer, File\n import argparse, os\n\n parser = argparse.ArgumentParser(description='Put n tiles in x axis, m in y axis.')\n parser.add_argument('tile', type=str, help='H5 file that is the file')\n parser.add_argument('-n', type=int, default=1)\n parser.add_argument('-m', type=int, default=1)\n parser.add_argument('-facet_tags', type=str, default='surfaces',\n help='name under which H5 stores facet tags')\n parser.add_argument('-cell_tags', type=str, default='volumes',\n help='name under which H5 stores volume tags')\n\n save_pvd_parser = parser.add_mutually_exclusive_group(required=False)\n save_pvd_parser.add_argument('--save_pvd', dest='save_pvd', action='store_true')\n save_pvd_parser.add_argument('--no_save_pvd', dest='save_pvd', action='store_false')\n parser.set_defaults(save_pvd=False)\n\n args = parser.parse_args()\n\n # Some sanity\n root, ext = os.path.splitext(args.tile)\n assert ext == '.h5'\n\n shape = (args.n, args.m)\n assert all((((v & (v - 1)) == 0) and v > 0) for v in shape)\n \n # Load the tile mesh\n comm = mpi_comm_world()\n h5 = HDF5File(comm, args.tile, 'r')\n tile = Mesh()\n h5.read(tile, 'mesh', False)\n\n data = {}\n cell_dim = tile.topology().dim()\n facet_dim = cell_dim - 1\n\n if args.facet_tags: \n data = load_data(tile, h5, args.facet_tags, facet_dim, data)\n \n if args.cell_tags: \n data = load_data(tile, h5, args.cell_tags, cell_dim, data)\n\n t = Timer('tile')\n mesh, mesh_data = TileMesh(tile, shape, mesh_data=data)\n info('\\nTiling took %g s; nvertices %d, ncells %d' % (t.stop(),\n mesh.num_vertices(),\n mesh.num_cells()))\n\n # Saving\n t = Timer('save')\n h5_file = '%s_%d_%d.h5' % (root, shape[0], shape[1])\n \n out = HDF5File(mesh.mpi_comm(), h5_file, 'w')\n out.write(mesh, 'mesh')\n \n tt = Timer('data')\n # To mesh functions\n if mesh_data:\n mfs = mf_from_data(mesh, mesh_data)\n\n for dim, name in (zip((facet_dim, cell_dim), (args.facet_tags, args.cell_tags))):\n if name:\n out.write(mfs[dim], name)\n \n if args.save_pvd:\n File('%s_%d_%d_%s.pvd' % (root, shape[0], shape[1], name)) << mfs[dim]\n \n info('\\t\\tGetting data as MeshFoo took %g s' % tt.stop())\n \n info('\\tSaving took %g' % t.stop())\n","sub_path":"gmsh_cad/tiling.py","file_name":"tiling.py","file_ext":"py","file_size_in_byte":11273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"334536612","text":"import discord\nfrom discord.ext import commands\n\n\nintents = discord.Intents(messages=True, guilds=True, reactions=True, members=True, presences=True)\nclient = commands.Bot(command_prefix='=', intents=intents); client.remove_command('help')\nYOUR_TOKEN = open(\"token.txt\", \"r\").readline()\n\n\n@client.event\nasync def on_ready():\n print(f'Launched: {client.user.name} // {client.user.id}')\n\n\n \n@client.command()\nasync def check(ctx, user : discord.Member):\n count = 0\n messages = await ctx.message.channel.history().flatten()\n for msg in messages:\n if msg.author == user and msg.attachments:\n count += 1\n await ctx.send(embed=discord.Embed(description=f'{user} has uploaded **{count}** images', color=65535))\n\n\n\n \nclient.run(YOUR_TOKEN)\n","sub_path":"image check/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"595785848","text":"def collatz(n):\n while n > 1:\n yield n\n if n % 2 == 0:\n n = int(n / 2)\n else:\n n = 3 * n + 1\n yield n\n\nif __name__ == \"__main__\":\n k = input(\"upper bound for starting points in the collatz sequence \")\n k = int(k)\n len_max = 0\n start_max = 0\n for start in range(k):\n tmp = 0\n for _ in collatz(start):\n tmp += 1\n if tmp > len_max:\n start_max = start\n len_max = tmp\n print(start_max)\n","sub_path":"11-20/p14.py","file_name":"p14.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"525879522","text":"import pygame\nfrom Constants import *\nfrom Projective import *\nfrom Mob import *\nfrom Character import *\nfrom Skills import *\n\nclass Player(Character):\n def __init__(self, game, name):\n Character.__init__(self, game, name, START_X, START_Y, RIGHT, PLAYER_IMAGE_PACK, PLAYER_SPEED)\n self.skill_list.append(Aimed_Shot(self))\n\n def render_ui(self, screen):\n screen.blit(pygame.image.load('img\\hpframe.png'), (self.x + 12, self.y + 58))\n screen.blit(pygame.image.load('img\\mpframe.png'), (self.x + 12, self.y + 64))\n\n m = 1\n z = self.hp // 5\n\n hptick = pygame.image.load('img\\hptick.png')\n mptick = pygame.image.load('img\\mptick.png')\n\n while m <= z:\n screen.blit(hptick, (self.x + 11 + m * 2, self.y + 59))\n m += 1\n\n m = 1\n z = self.mp // 5\n\n while m <= z:\n screen.blit(mptick, (self.x + 11 + m * 2, self.y + 65))\n m += 1\n\n [i.render(self.game.screen) for i in self.skill_list]\n\n def tick(self):\n if self.state != DEAD:\n self.mp += MP_REGEN\n self.hp += HP_REGEN\n if self.mp > MAX_MP:\n self.mp = MAX_MP\n if self.hp > MAX_HP:\n self.hp = MAX_HP\n if pygame.time.get_ticks() > self.spell_casted + 1000:\n self.state = ALIVE\n if self.hp <= 0:\n self.kill()\n for i in self.skill_list:\n if i.cd > 0:\n i.cd -= self.previous_tick\n if i.cd < 0:\n i.cd = 0\n\n self.previous_tick = pygame.time.get_ticks() - self.previous_tick\n\n def shoot_z(self):\n if self.mp >= SKILL1_COST and self.state != SHOOT:\n self.mp -= SKILL1_COST\n self.state = SHOOT\n self.spell_casted = pygame.time.get_ticks()\n\n\n def __shoot__(self):\n if self.direction == RIGHT:\n sx = self.x + 12\n sy = self.y\n elif self.direction == DOWN:\n sx = self.x\n sy = self.y + 12\n elif self.direction == LEFT:\n sx = self.x - 12\n sy = self.y\n elif self.direction == UP:\n sx = self.x\n sy = self.y - 12\n self.game.projective.append(Arrow(self.game, sx, sy, self.direction))\n\n def __str__(self):\n return (self.name, self.x, self.y)","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"169539133","text":"# -*- coding: utf-8 -*-\nimport spider.taobao.taobao_spider.taobao_list_spider as tlg\nfrom store.store_mysql import StoreMysql\nimport datetime\nfrom spider import config\n\nclass ResultStore(object):\n def __init__(self):\n self.db = StoreMysql(**config.TAOBAO_DB)\n # self.db = StoreMysql(host=\"127.0.0.1\", user=\"root\",password=\"\", db=\"eb_monitor\")\n\n def store(self, results, device, complete_num):\n if len(results) <= 0:\n return\n s = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n keyword_id = results[0][\"keyword_id\"];\n for result in results:\n result[\"insert_date\"] = s\n result[\"device\"] = device\n self.db.save(table=\"eb_result_taobao\", data=result, mapping_fields={})\n # self.db.close()\n self.db.do(\"update keyword_task set complete_info=complete_info|{0} where id = {1}\".format(complete_num,keyword_id))\n # self.db.update(table=\"keyword_info_new\", data={\"last_query_date\": s, \"keyword_id\": keyword_id},\n # field=\"keyword_id\")\n\n\ndef main():\n results = tlg.get(\"牛仔裤\", 1)\n re = ResultStore()\n re.store(results)\n pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"store/tb_result_store/result_store.py","file_name":"result_store.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"300854349","text":"#!/usr/bin/python3\n\nimport os\nimport argparse\nimport sys\n\ndef testpings(ip_addr_list):\n\tos.system('touch pingtest.txt')\n\tfor ip in ip_addr_list:\n\t\tos.system('ping -c 3 %s > pingtest.txt'%ip)\n\n\t\tif \"100.0% packet loss\" in open('pingtest.txt').read():\n\t\t\tprint('Node %s Unreachable!!'%ip)\n\n\t\t#re-open file to place the file pointer to beginning\n\t\telif \"0.0% packet loss\" in open('pingtest.txt').read():\n\t\t\tprint('Node %s is Reachable.'%ip)\n\n\t\telse:\n\t\t\tprint('packet loss detected!! for %s'%ip)\n\n\t\t\n\texit()\n\nif len(sys.argv) < 2:\n\tprint (\"Usage: pingtest.py destination_node_addr\")\nelse :\n\ttestpings(sys.argv[1:])\n","sub_path":"pingtest.py","file_name":"pingtest.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"169673460","text":"import math\nimport numpy as np\nimport mdpClass\nimport rl2Class\nimport matplotlib.pyplot as plt\n\n# Test bandit implementations from the RL2 class (rl2Class.py) using transition (T),\n# reward (R) matrix, and discount factor initializations below. Performs nTrials trials\n# to eliminate noise. Plots the average reward in each iteration for each bandit method.\n\ndef sampleBernoulli(mean):\n ''' function to obtain a sample from a Bernoulli distribution\n\n Input: mean -- mean of the Bernoulli\n \n Output: sample -- sample (0 or 1)\n '''\n\n if np.random.rand(1) < mean: return 1\n else: return 0\n\n # Multi-arm bandit problems (3 arms with probabilities 0.3, 0.5 and 0.7)\nT = np.array([[[1]],[[1]],[[1]]])\nR = np.array([[0.3],[0.5],[0.7]])\ndiscount = 0.999\nmdp = mdpClass.MDP(T,R,discount)\nbanditProblem = rl2Class.RL2(mdp,sampleBernoulli)\n\navgrewards = np.zeros([3,200])\nnTrials = 1000\nseq = np.arange(1,201)\n\nfor i in range(nTrials):\n # Test epsilon greedy strategy\n avgrewards[0,:] += banditProblem.epsilonGreedyBandit(nIterations=200)\n # Test UCB strategy\n avgrewards[1,:] += banditProblem.UCBbandit(nIterations=200)\n # Test Thompson sampling strategy\n avgrewards[2,:] += banditProblem.thompsonSamplingBandit(prior=np.ones([mdp.nActions,2]),\n nIterations=200)\n \navgrewards = avgrewards/nTrials\nplt.plot(seq,avgrewards[0,:],'b',label=\"Epsilon-greedy bandit\")\nplt.plot(seq,avgrewards[1,:],'r',label=\"UCB bandit\")\nplt.plot(seq,avgrewards[2,:],'g',label=\"Thompson sampling bandit\")\nplt.xlabel('Iteration')\nplt.ylabel('Average reward over 1000 trials')\nplt.legend(loc='upper left')\nplt.ylim(ymax=0.9)\nplt.show()","sub_path":"banditTests.py","file_name":"banditTests.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"474904009","text":"\"\"\"\nhttps://github.com/ozgur/python-linkedin\n\"\"\"\n\nfrom linkedin import linkedin\n\nAPI_KEY = 'wFNJekVpDCJtRPFX812pQsJee-gt0zO4X5XmG6wcfSOSlLocxodAXNMbl0_hw3Vl'\nAPI_SECRET = 'daJDa6_8UcnGMw1yuq9TjoO_PMKukXMo8vEMo7Qv5J-G3SPgrAV0FqFCd0TNjQyG'\nRETURN_URL = 'http://localhost:8000'\n\nauthentication = linkedin.LinkedInAuthentication(API_KEY, API_SECRET, RETURN_URL, linkedin.PERMISSIONS.enums.values())\nprint(authentication.authorization_url) # open this url on your browser\napplication = linkedin.LinkedInApplication(authentication)\n\nauthentication.authorization_code = 'AQTXrv3Pe1iWS0EQvLg0NJA8ju_XuiadXACqHennhWih7iRyDSzAm5jaf3R7I8'\ntoken = authentication.get_access_token()\n\n# application = linkedin.LinkedInApplication(token='AQTFtPILQkJzXHrHtyQ0rjLe3W0I')\napplication = linkedin.LinkedInApplication(token=token)\n\n","sub_path":"linkedin_test.py","file_name":"linkedin_test.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"341738156","text":"# Написать функцию is_prime, принимающую 1 аргумент — число от 0 до 1000, и возвращающую True, если оно простое, и False - иначе.\n\ndef check_number(number:int):\n if(number < 0 or number > 1000):\n raise ValueError(\"Значение должно быть в пределах от 0 до 1000\")\n\ndef is_prime(number):\n check_number(number)\n for i in range(2, number):\n if((number%i)==0):\n return False\n return True\n\ndef is_prime2(number: int):\n \"\"\"Возвращает 0, если число простое, и его наименьший множитель, если непростое\n \"\"\"\n check_number(number)\n for i in range(2, number//2+1):\n if((number%i)==0):\n return i\n return 0\n\nwhile True:\n s = input()\n if s=='':\n break\n number = int(s)\n #print(\"{0} {1}\".format(number, \"простое\" if is_prime(number) else \"(блин, а как такие числа называются?)\"))\n try:\n mul = is_prime2(number)\n except ValueError as e:\n if(len(e.args) > 0):\n print(e.args[0])\n continue\n print(\"{0} {1}\".format(number, \"простое\" if mul==0 else \"непростое. Наименьший множитель - {}\".format(mul)))\n","sub_path":"PyHelloWorld/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"390568523","text":"import json\nimport logging\nfrom api.tweet.models import Tweet\n# For this demo, let's log ALL THE THINGS!\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\ndef respond(err, res=None):\n # In a real app error messages shouldn't be sent to the end user.\n # This is a security concern. However, in a demo, for debugging, it's okay.\n body = {}\n if err:\n body = json.dumps({'error': err})\n else:\n body = json.dumps(res)\n return {\n 'statusCode': '400' if err else '200',\n 'body': body,\n 'headers': {\n 'Content-Type': 'application/json',\n },\n }\n\ndef handler(event, context):\n tweet = None\n exception = None\n try:\n tweet = Tweet()\n tweet.init_from_dict(json.loads(event['body']))\n tweet.save()\n logging.info(\"Tweet with id {} saved!\".format(tweet.id))\n except Exception as ex:\n exception = ex.args[0]\n tweet = None\n logging.error(\"Error saving tweet in get.py. Message: {}\".format(exception))\n\n return respond(exception, tweet.to_dict() if tweet else {})\n\n\n \n","sub_path":"api/tweet/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"163078924","text":"# -*- coding: utf-8 -*-\nfrom .base import *\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '3k37ir3&11a2of5d(ro6p)=bau9pgsq(@+p#b#ci7pbehm0au*'\n\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n\nALLOWED_HOSTS = ['*']\n\n\nTHIRD_PARTY_APPS_LOCAL = (\n 'debug_toolbar',\n 'django_extensions',\n 'crispy_forms',\n)\n\n\nINSTALLED_APPS += THIRD_PARTY_APPS_LOCAL\n\n\nMIDDLEWARE_CLASSES += (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'postgres',\n 'USER': 'postgres',\n 'HOST': 'db',\n 'PORT': 5432,\n }\n}\n\n\n# Pipeline configuration\nPIPELINE = {\n 'PIPELINE_ENABLED': False,\n}\n\nGRAPH_MODELS = {\n 'all_applications': True,\n 'group_models': True,\n}\n","sub_path":"horarios/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"497065195","text":"from .models import SocialMediaLinksModel\ndef add_variable_to_context(request):\n try:\n links = SocialMediaLinksModel.objects.get(id = 1)\n\n return{\n 'facebook':links.facebook,\n 'github': links.github,\n 'twitter': links.twitter,\n 'instagram': links.instagram,\n 'github': links.github,\n 'linkedin': links.linkedin,\n 'youtube': links.youtube\n }\n\n except:\n return dict()","sub_path":"csehelp/about/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"53561792","text":"# -*- coding: utf-8 -*-\n\nfrom flask.ext.script import Manager\n\nfrom fbone import create_app\nfrom fbone.extensions import db\nfrom fbone.utils import MALE\n\n\napp = create_app()\nmanager = Manager(app)\n\n\n@manager.command\ndef run():\n \"\"\"Run in local machine.\"\"\"\n\n app.run()\n\n\n@manager.command\ndef initdb():\n \"\"\"Init/reset database.\"\"\"\n\n db.drop_all()\n db.create_all()\n\nmanager.add_option('-c', '--config',\n dest=\"config\",\n required=False,\n help=\"config file\")\n\nif __name__ == \"__main__\":\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"14588107","text":"class Media(object):\n\n def __init__(self, media_id, kindle_asin, media_type, priority, platform, video_id, original_width,\n original_height, image_url, metadata_url):\n self.id = media_id\n self.kindle_asin = kindle_asin\n self.media_type = media_type\n self.priority = priority\n self.platform = platform\n self.video_id = video_id\n self.original_width = original_width\n self.original_height = original_height\n self.image_url = image_url\n self.metadata_url = metadata_url\n","sub_path":"ph_py/models/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"469196220","text":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for recurring_tasks.\"\"\"\n\nimport unittest\nfrom unittest import mock\n\nimport flask\nimport webtest\n\nfrom clusterfuzz._internal.datastore import data_types\nfrom clusterfuzz._internal.tests.test_libs import helpers\nfrom clusterfuzz._internal.tests.test_libs import test_utils\nfrom handlers.cron import recurring_tasks\n\n\n@test_utils.with_cloud_emulators('datastore')\nclass OpenReproducibleTestcaseTasksSchedulerTest(unittest.TestCase):\n \"\"\"Tests OpenReproducibleTestcaseTasksScheduler.\"\"\"\n\n def setUp(self):\n flaskapp = flask.Flask('testflask')\n flaskapp.add_url_rule(\n '/schedule-open-reproducible-testcase-tasks',\n view_func=recurring_tasks.OpenReproducibleTestcaseTasksScheduler.\n as_view('/schedule-open-reproducible-testcase-tasks'))\n self.app = webtest.TestApp(flaskapp)\n\n self.testcase_0 = data_types.Testcase(\n open=True,\n one_time_crasher_flag=False,\n status='Processed',\n job_type='job',\n queue='jobs-linux')\n self.testcase_0.put()\n\n self.testcase_1 = data_types.Testcase(\n open=False,\n one_time_crasher_flag=False,\n status='Processed',\n job_type='job',\n queue='jobs-linux')\n self.testcase_1.put()\n\n self.testcase_2 = data_types.Testcase(\n open=True,\n one_time_crasher_flag=True,\n status='Processed',\n job_type='job',\n queue='jobs-linux')\n self.testcase_2.put()\n\n self.testcase_3 = data_types.Testcase(\n open=True,\n one_time_crasher_flag=False,\n status='NA',\n job_type='job',\n queue='jobs-linux')\n self.testcase_3.put()\n\n self.testcase_4 = data_types.Testcase(\n open=True,\n one_time_crasher_flag=False,\n status='Processed',\n job_type='job_windows',\n queue='jobs-windows')\n self.testcase_4.put()\n\n data_types.Job(name='job', environment_string='', platform='LINUX').put()\n data_types.Job(\n name='job_windows', environment_string='', platform='WINDOWS').put()\n\n helpers.patch(self, [\n 'handlers.base_handler.Handler.is_cron',\n ])\n\n def test_execute(self):\n \"\"\"Tests that we don't directly use this scheduler.\"\"\"\n with self.assertRaises(webtest.AppError):\n self.app.get('/schedule-open-reproducible-testcase-tasks')\n\n\nclass ProgressionTasksSchedulerTest(OpenReproducibleTestcaseTasksSchedulerTest):\n \"\"\"Tests ProgressionTasksScheduler.\"\"\"\n\n def setUp(self):\n super().setUp()\n flaskapp = flask.Flask('testflask')\n flaskapp.add_url_rule(\n '/schedule-progression-tasks',\n view_func=recurring_tasks.ProgressionTasksScheduler.as_view(\n '/schedule-progression-tasks'))\n self.app = webtest.TestApp(flaskapp)\n\n helpers.patch(self, [\n 'clusterfuzz._internal.base.tasks.add_task',\n ])\n\n def test_execute(self):\n \"\"\"Tests scheduling of progression tasks.\"\"\"\n self.app.get('/schedule-progression-tasks')\n self.mock.add_task.assert_has_calls([\n mock.call('progression', 1, 'job', queue='jobs-linux'),\n mock.call('progression', 5, 'job_windows', queue='jobs-windows')\n ])\n\n\nclass ImpactTasksSchedulerTest(OpenReproducibleTestcaseTasksSchedulerTest):\n \"\"\"Tests ProgressionTasksScheduler.\"\"\"\n\n def setUp(self):\n super().setUp()\n flaskapp = flask.Flask('testflask')\n flaskapp.add_url_rule(\n '/schedule-impact-tasks',\n view_func=recurring_tasks.ImpactTasksScheduler.as_view(\n '/schedule-impact-tasks'))\n self.app = webtest.TestApp(flaskapp)\n helpers.patch(self, [\n 'clusterfuzz._internal.base.tasks.add_task',\n ])\n\n def test_execute(self):\n \"\"\"Tests scheduling of progression tasks.\"\"\"\n self.app.get('/schedule-impact-tasks')\n self.mock.add_task.assert_has_calls([\n mock.call('impact', 1, 'job', queue='jobs-linux'),\n mock.call('impact', 5, 'job_windows', queue='jobs-windows'),\n ])\n","sub_path":"src/clusterfuzz/_internal/tests/appengine/handlers/cron/recurring_tasks_test.py","file_name":"recurring_tasks_test.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"611847732","text":"def encode(message, key):\r\n \r\n key_line = ''\r\n encode_str = ''\r\n\r\n while (len(key_line) < len(message)):\r\n key_line += key\r\n \r\n key_line = key_line[:len(message)]\r\n\r\n for i in range(0, len(message)):\r\n index = (ord(message[i]) + ord(key_line[i])) % 1104\r\n encode_str += chr(index)\r\n\r\n print('\\nЗАШИФРОВАННОЕ СООБЩЕНИЕ: \\n{}'.format(encode_str))\r\n\r\n f = open('enc.txt', 'w')\r\n f.write(encode_str)\r\n f.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n print('\\nСООБЩЕНИЕ:\\n')\r\n message = open('message.txt', 'r').read()\r\n\r\n print(message)\r\n\r\n key = input('\\nВВЕДИТЕ КЛЮЧ:')\r\n \r\n encode(message, key)\r\n","sub_path":"VII/MAiSABPO/LAB_1/vigenere_enc.py","file_name":"vigenere_enc.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"367732963","text":"from flask import abort\nfrom flask_restx import Resource, Namespace, Model, fields, reqparse\nfrom backend.infraestructura.planes_repo import planesRepo\n\nrepo = planesRepo()\n\nnsPlan = Namespace('planes', description='Administrador de planes')\n\nmodeloPlanSinID = Model('PlanSinCod',{\n 'tipo': fields.String(),\n 'descripcion': fields.String(),\n 'porcentaje_ganancia': fields.Integer(),\n 'costo': fields.Float()\n})\n\nmodeloPlan = modeloPlanSinID.clone('Plan',{\n 'codigo': fields.Integer(),\n\n})\n\nnsPlan.models[modeloPlan.name] = modeloPlan\nnsPlan.models[modeloPlanSinID.name] = modeloPlanSinID\n\nnuevoPlanParser = reqparse.RequestParser(bundle_errors=True)\nnuevoPlanParser.add_argument('tipo', type=str, required=True)\nnuevoPlanParser.add_argument('descripcion', type=str)\nnuevoPlanParser.add_argument('costo', type=float)\nnuevoPlanParser.add_argument('porcentaje_ganancia', type=int, required=True)\n\neditarPlanParser = nuevoPlanParser.copy()\neditarPlanParser.add_argument('codigo',type=int, required=True)\n\n@nsPlan.route('/')\nclass PlanResource(Resource):\n @nsPlan.marshal_list_with(modeloPlan)\n def get(self):\n return repo.get_all()\n\n @nsPlan.expect(modeloPlanSinID)\n @nsPlan.marshal_with(modeloPlan)\n def post(self):\n data = nuevoPlanParser.parse_args()\n p = repo.agregar(data)\n if p:\n return p, 200\n abort(500)\n\n@nsPlan.route('/')\nclass PlanResource(Resource):\n @nsPlan.marshal_with(modeloPlan)\n def get(self, id):\n p = repo.get_by_id(id)\n if p:\n return p, 200\n abort(404)\n \n def delete(self, id):\n if repo.borrar(id):\n return 'Plan Eliminado', 200\n abort(400)\n \n @nsPlan.expect(modeloPlan)\n def put(self, id):\n data = editarPlanParser.parse_args()\n if repo.modificar(id,data):\n return 'Plan actualizado', 200\n abort(404)","sub_path":"backend/api/planes_api.py","file_name":"planes_api.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"164226061","text":"import numpy as np\r\nfrom scipy import integrate\r\nimport matplotlib.pyplot as plt\r\n\r\ndef population(t, x): \r\n '''\r\n マルサスモデル\r\n dx/dt = gamma*x\r\n '''\r\n f = np.zeros_like(x)\r\n gamma = 0.03\r\n f[0] = gamma*x[0]\r\n return f\r\n\r\nx0 = np.zeros(1)\r\nx0[0] = 5500\r\n\r\nsyear = 1920\r\neyear = 2008\r\nn = 3000\r\n\r\nmethods = ['RK45','Radau']\r\n\r\nfor s in methods:\r\n sol = integrate.solve_ivp(population, [syear, eyear], x0, method=s, dense_output=True)\r\n t = np.linspace(syear, eyear, n)\r\n z = sol.sol(t)\r\n plt.subplot(2, 1, methods.index(s)+1)\r\n plt.plot(t, z.T)\r\n plt.legend(['N'])\r\n plt.title(s)\r\n \r\nplt.savefig(\"malthusian.png\")","sub_path":"l11/malthusian_model.py","file_name":"malthusian_model.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"53333091","text":"# Import Required Libraries\nimport pandas as pd\nimport numpy as np\nimport sys\nsys.path.append('../Scripts')\n\n# Make 'PLAYER_DICT'\nplayers = pd.read_csv('../Resources/active_players.csv')\nplayers.index = players['Lineup_name']\nplayers.drop(columns=['Lineup_name'], inplace=True)\nPLAYER_DICT = players.T.to_dict('list')\n\n# Make 'PAST_DICT'\nPAST_DICT = {'ATLANTA_HAWKS': 'ATL',\n 'BOSTON_CELTICS': 'BOS',\n 'BROOKLYN_NETS': 'BRK',\n 'CHARLOTTE_BOBCATS': 'CHO',\n 'CHARLOTTE_HORNETS': 'CHO',\n 'CHICAGO_BULLS': 'CHI',\n 'CLEVELAND_CAVALIERS': 'CLE',\n 'DALLAS_MAVERICKS': 'DAL',\n 'DENVER_NUGGETS': 'DEN',\n 'DETROIT_PISTONS': 'DET',\n 'GOLDEN_STATE_WARRIORS': 'GSW',\n 'HOUSTON_ROCKETS': 'HOU',\n 'INDIANA_PACERS': 'IND',\n 'LOS_ANGELES_CLIPPERS': 'LAC',\n 'LOS_ANGELES_LAKERS': 'LAL',\n 'MEMPHIS_GRIZZLIES': 'MEM',\n 'MIAMI_HEAT': 'MIA',\n 'MILWAUKEE_BUCKS': 'MIL',\n 'MINNESOTA_TIMBERWOLVES': 'MIN',\n 'NEW_ORLEANS_HORNETS': 'NOP',\n 'NEW_ORLEANS_PELICANS': 'NOP',\n 'NEW_YORK_KNICKS': 'NYK',\n 'OKLAHOMA_CITY_THUNDER': 'OKC',\n 'ORLANDO_MAGIC': 'ORL',\n 'PHILADELPHIA_76ERS': 'PHI',\n 'PHOENIX_SUNS': 'PHO',\n 'PORTLAND_TRAIL_BLAZERS': 'POR',\n 'SACRAMENTO_KINGS': 'SAC',\n 'SAN_ANTONIO_SPURS': 'SAS',\n 'TORONTO_RAPTORS': 'TOR',\n 'UTAH_JAZZ': 'UTA',\n 'WASHINGTON_WIZARDS': 'WAS'}\n\n\n# Make 'CURR_DICT'\nCURR_DICT = {'ATLANTA_HAWKS': 'ATLANTA',\n'BOSTON_CELTICS': 'BOSTON',\n 'BROOKLYN_NETS': 'BROOKLYN',\n 'CHARLOTTE_HORNETS': 'CHARLOTTE',\n 'CHICAGO_BULLS': 'CHICAGO',\n 'CLEVELAND_CAVALIERS': 'CLEVELAND',\n 'DALLAS_MAVERICKS': 'DALLAS',\n 'DENVER_NUGGETS': 'DENVER',\n 'DETROIT_PISTONS': 'DETROIT',\n 'GOLDEN_STATE_WARRIORS': 'GOLDEN STATE',\n 'HOUSTON_ROCKETS': 'HOUSTON',\n 'INDIANA_PACERS': 'INDIANA',\n 'LOS_ANGELES_CLIPPERS': 'LA CLIPPERS',\n 'LOS_ANGELES_LAKERS': 'LA LAKERS',\n 'MEMPHIS_GRIZZLIES': 'MEMPHIS',\n 'MIAMI_HEAT': 'MIAMI',\n 'MILWAUKEE_BUCKS': 'MILWAUKEE',\n 'MINNESOTA_TIMBERWOLVES': 'MINNESOTA',\n 'NEW_ORLEANS_PELICANS': 'NEW ORLEANS',\n 'NEW_YORK_KNICKS': 'NEW YORK',\n 'OKLAHOMA_CITY_THUNDER': 'OKLAHOMA CITY',\n 'ORLANDO_MAGIC': 'ORLANDO',\n 'PHILADELPHIA_76ERS': 'PHILADELPHIA',\n 'PHOENIX_SUNS': 'PHOENIX',\n 'PORTLAND_TRAIL_BLAZERS': 'PORTLAND',\n 'SACRAMENTO_KINGS': 'SACRAMENTO',\n 'SAN_ANTONIO_SPURS': 'SAN ANTONIO',\n 'TORONTO_RAPTORS': 'TORONTO',\n 'UTAH_JAZZ': 'UTAH',\n 'WASHINGTON_WIZARDS': 'WASHINGTON'}\n\n\n\n\n\ndef starter_fetcher(df, team_location):\n df.TEAM = df.TEAM.str.upper()\n df.index = df.TEAM\n df = df[['PG','SG','SF','PF','C']]\n df = df.T\n players = list(df[team_location])\n return players\n\ndef teams_fetcher(df):\n teams = list(df.TEAM.str.upper())\n return teams\n\ndef lineup_name_converter(string):\n name = PLAYER_DICT[string]\n return name\n\ndef schedule_past_team_converter(string):\n name = PAST_DICT[string]\n return name\n\ndef schedule_curr_team_converter(string):\n name = CURR_DICT[string]\n return name","sub_path":"Scripts/starting_lineup_functions.py","file_name":"starting_lineup_functions.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"475721236","text":"from load_merge_otu_mf import OtuMfHandler\r\nfrom preprocess import preprocess_data\r\nfrom pca import *\r\nimport scipy\r\nfrom plot_confusion_matrix import *\r\nimport pandas as pd\r\nimport math\r\nimport re\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom matplotlib.ticker import PercentFormatter\r\nimport math\r\nimport seaborn as sns; sns.set(color_codes=True)\r\nimport operator\r\nfrom sklearn.model_selection import train_test_split, RepeatedStratifiedKFold,LeaveOneOut, KFold\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn import metrics, svm\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom xgboost import XGBClassifier\r\n\r\notu = 'C:/Users/Anna/Desktop/docs/otu_psc2.csv'\r\nmapping = 'C:/Users/Anna/Desktop/docs/mapping_psc.csv'\r\nmax_num_of_pcas = 35\r\nOtuMf = OtuMfHandler(otu, mapping, from_QIIME=False)\r\npreproccessed_data = preprocess_data(OtuMf.otu_file, visualize_data=False, taxnomy_level=7)\r\nmapping_file = OtuMf.mapping_file\r\n\r\nmapping_disease = {'Control':0,'Cirrhosis ':1, 'HCC':1, 'PSC+IBD':2,'PSC':2}\r\nmapping_file['DiagnosisGroup'] = mapping_file['DiagnosisGroup'].map(mapping_disease)\r\nmappin_boolean = {'yes' :1, 'no': 0, 'Control': 0, '0':0, '1':1}\r\nmapping_file['FattyLiver'] = mapping_file['FattyLiver'].map(mappin_boolean)\r\nmapping_file['RegularExercise'] = mapping_file['RegularExercise'].map(mappin_boolean)\r\nmapping_file['Smoking'] = mapping_file['Smoking'].map(mappin_boolean)\r\n\r\ncols = [col for col in preproccessed_data.columns if len(preproccessed_data[col].unique()) !=1]\r\ndict_bact ={'else':[]}\r\nfor col in preproccessed_data[cols]:\r\n col_name = preproccessed_data[col].name.split(';')\r\n # if 'c__' in col_name[-1]:\r\n # if col_name[-1] in dict_bact:\r\n # dict_bact[col_name[-1]].append(preproccessed_data[col].name)\r\n # else:\r\n # dict_bact[col_name[-1]] = [preproccessed_data[col].name]\r\n # else:\r\n # dict_bact['else'].append(preproccessed_data[col].name)\r\n if len(col_name)>2:\r\n if col_name[2] in dict_bact:\r\n dict_bact[col_name[2]].append(preproccessed_data[col].name)\r\n else:\r\n dict_bact[col_name[2]] = [preproccessed_data[col].name]\r\n else:\r\n dict_bact['else'].append(preproccessed_data[col].name)\r\n print(col_name[-1])\r\n\r\nnew_df = pd.DataFrame(index = preproccessed_data.index)\r\ncol = 0\r\nfor key, values in dict_bact.items():\r\n new_data = preproccessed_data[values]\r\n pca = PCA(n_components=round(new_data.shape[1] / 2)+1)\r\n pca.fit(new_data)\r\n sum = 0\r\n num_comp = 0\r\n for (i, component) in enumerate(pca.explained_variance_ratio_):\r\n if sum <= 0.5:\r\n sum += component\r\n else:\r\n num_comp = i\r\n break\r\n if num_comp == 0:\r\n num_comp += 1\r\n otu_after_pca_new, pca_components = apply_pca(new_data, n_components=num_comp)\r\n for j in range(otu_after_pca_new.shape[1]):\r\n new_df[col+j] = otu_after_pca_new[j]\r\n col += num_comp\r\n\r\n\r\n#visualize_pca(new_df)\r\npcas =[]\r\ntrain_accuracy = []\r\ntest_accuracy = []\r\nfor n_comp in range(1, max_num_of_pcas):\r\n pcas.append(n_comp)\r\n otu_ap, _ = apply_pca(new_df, n_components=n_comp)\r\n new_df2 = otu_ap.join(mapping_file[['Age', 'BMI', 'FattyLiver','RegularExercise', 'Smoking', 'DiagnosisGroup']], how='inner')\r\n new_df2 = new_df2.fillna(0)\r\n\r\n X= new_df2.drop(['DiagnosisGroup'], axis =1)\r\n regex = re.compile(r\"\\[|\\]|<\", re.IGNORECASE)\r\n X.columns = [regex.sub(\"_\", col) if any(x in str(col) for x in set(('[', ']', '<'))) else col for col in X.columns.values]\r\n\r\n y = new_df2['DiagnosisGroup']\r\n\r\n loo = LeaveOneOut()\r\n y_pred_list = []\r\n auc = []\r\n auc_train = []\r\n for train_index, test_index in loo.split(X):\r\n train_index = list(train_index)\r\n # print(\"%s %s\" % (train_index, test_index))\r\n X_train, X_test = X.iloc[train_index, :], X.iloc[test_index, :]\r\n y_train, y_test = y[train_index], y[test_index]\r\n model = XGBClassifier(max_depth=4, n_estimators=150, learning_rate=15 / 100,\r\n objective='multi:softmax', reg_lambda=150\r\n #objective='binary:logistic',\r\n #scale_pos_weight=(np.sum(y_train == -1) / np.sum(y_train == 1)),\r\n )\r\n model.fit(X_train, y_train)\r\n pred_train = model.predict(X_train)\r\n auc_train.append(metrics.accuracy_score(y_train, pred_train))\r\n y_pred = model.predict(X_test)\r\n y_pred_list.append(y_pred[0])\r\n\r\n auc = metrics.accuracy_score(y, y_pred_list)\r\n scores = round(auc, 2)\r\n scores_train = round(np.array(auc_train).mean(), 2)\r\n train_accuracy.append(scores_train)\r\n test_accuracy.append(round(scores.mean(), 2))\r\n\r\ntrain_accuracy_all = []\r\ntest_accuracy_all = []\r\ndef pca_graph(max_num_of_pcas = max_num_of_pcas):\r\n for i in range (1,max_num_of_pcas):\r\n otu_after_pca, _ = apply_pca(preproccessed_data, n_components=i)\r\n merged_data = otu_after_pca.join(mapping_file[['Age', 'BMI', 'FattyLiver','RegularExercise', 'Smoking','DiagnosisGroup']])\r\n\r\n merged_data.fillna(0)\r\n\r\n # mapping_disease = {'Control':0,'Cirrhosis ':1, 'HCC':1, 'PSC+IBD':2,'PSC':2}\r\n # merged_data['DiagnosisGroup'] = merged_data['DiagnosisGroup'].map(mapping_disease)\r\n # merged_data = merged_data.join(OtuMf.mapping_file[['Age', 'BMI', 'FattyLiver','RegularExercise', 'Smoking']])\r\n # mappin_boolean = {'yes' :1, 'no': 0, 'Control': 0, '0':0, '1':1}\r\n # merged_data['FattyLiver'] = merged_data['FattyLiver'].map(mappin_boolean)\r\n # merged_data['RegularExercise'] = merged_data['RegularExercise'].map(mappin_boolean)\r\n # merged_data['Smoking'] = merged_data['Smoking'].map(mappin_boolean)\r\n\r\n X = merged_data.loc[:, merged_data.columns != 'DiagnosisGroup']\r\n y = merged_data['DiagnosisGroup']\r\n\r\n loo = LeaveOneOut()\r\n y_pred_list = []\r\n x_indx = []\r\n y_pred_train =[]\r\n for train_index, test_index in loo.split(X):\r\n train_index = list(train_index)\r\n # print(\"%s %s\" % (train_index, test_index))\r\n X_train, X_test = X.iloc[train_index, :], X.iloc[test_index, :]\r\n y_train, y_test = y[train_index], y[test_index]\r\n model = XGBClassifier(max_depth=4, n_estimators=150, learning_rate=15/ 100,\r\n objective='multi:softmax', reg_lambda=150)#, reg_lambda=550)\r\n # # # #objective= 'binary:logistic')\r\n model.fit(X_train, y_train)\r\n x_indx.append(X_test.index[0])\r\n y_pred = model.predict(X_test)\r\n y_pred_list.append(y_pred[0])\r\n y_pre_tr = model.predict(X_train)\r\n accuracy_train = metrics.accuracy_score(y_train,y_pre_tr)\r\n y_pred_train.append(accuracy_train)\r\n scores = np.array(metrics.accuracy_score(y, y_pred_list))\r\n scores_train = round(np.array(y_pred_train).mean(), 2)\r\n train_accuracy_all.append(scores_train)\r\n test_accuracy_all.append(round(scores.mean(), 2))\r\npca_graph(max_num_of_pcas = max_num_of_pcas)\r\ndef plot_graph(test_accuracy, train_accuracy, train_accuracy_all, test_accuracy_all, pcas):\r\n plt.plot(pcas,test_accuracy, color ='red', label ='test_fs')\r\n plt.plot(pcas,train_accuracy, color='blue', label ='train_fs')\r\n plt.plot(pcas, test_accuracy_all, color='orange', label='test')\r\n plt.plot(pcas, train_accuracy_all, color='black', label='train')\r\n plt.legend( loc=1,ncol=1)\r\n plt.show()\r\nplot_graph(test_accuracy,train_accuracy,train_accuracy_all, test_accuracy_all, pcas)\r\n\r\nprint('done')\r\n","sub_path":"anna/microbiome/distance_learning_PSC.py","file_name":"distance_learning_PSC.py","file_ext":"py","file_size_in_byte":7745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"425570912","text":"import decimal\nfrom datetime import date, datetime, timedelta\nfrom unittest.mock import patch\n\nimport pytest\nfrom django.test import TestCase\n\nfrom api.booking import booking_service\nfrom api.booking.booking_model import Payment, HotelBookingRequest, Customer, Traveler, PaymentMethod, SubmitErrorType\nfrom api.common import cache_storage\nfrom api.common.models import RoomOccupancy, RoomRate, RateType, Address\nfrom api.hotel.adapters import hotel_service\nfrom api.hotel.adapters.stub.stub import StubHotelAdapter\nfrom api.hotel.hotel_model import HotelLocationSearch\nfrom api.models import models\nfrom api.models.models import Booking, BookingStatus, HotelBooking\nfrom api.tests import to_money, test_objects\nfrom api.view.exceptions import PaymentException\n\n\nclass TestBookingService(TestCase):\n def test_booking_request_validation(self):\n address = {\n \"city\": \"San Francisco\",\n \"province\": \"CA\",\n \"postal_code\": \"94111\",\n \"country\": \"US\",\n \"address1\": \"One Market Street\",\n }\n\n with pytest.raises(ValueError) as e:\n Payment.Schema().load({\"payment_method\": \"CREDIT_CARD\", \"billing_address\": address})\n\n self.assertIn(\"Must set payment_card_parameters when payment_method is CREDIT_CARD\", str(e))\n\n with pytest.raises(ValueError) as e:\n Payment.Schema().load({\"payment_method\": \"PAYMENT_TOKEN\", \"billing_address\": address})\n\n self.assertIn(\"Must set payment_token when payment_method is PAYMENT_TOKEN\", str(e))\n\n def test_stub_booking(self):\n booking_request = HotelBookingRequest(\n api_version=1,\n transaction_id=\"foo\",\n hotel_id=\"ABC123\",\n checkin=date(2020, 1, 1),\n checkout=date(2020, 1, 1),\n language=\"en_US\",\n customer=Customer(\n first_name=\"John\", last_name=\"Doe\", phone_number=\"5558675309\", email=\"john@doe.foo\", country=\"US\"\n ),\n traveler=Traveler(\"John\", \"Doe\", occupancy=RoomOccupancy(adults=1)),\n room_rates=[\n RoomRate(\n \"foo-rate-key\",\n RateType.BOOKABLE,\n description=\"Room Booking\",\n additional_detail=[],\n total_base_rate=to_money(\"100.99\"),\n total_tax_rate=to_money(\"20.00\"),\n total=to_money(\"120.99\"),\n daily_rates=[],\n )\n ],\n payment=Payment(\n billing_address=Address(\n city=\"San Francisco\", province=\"CA\", postal_code=\"94111\", country=\"US\", address1=\"One Market Street\"\n ),\n payment_method=PaymentMethod.PAYMENT_TOKEN,\n payment_token=\"token_foo\",\n ),\n crs=StubHotelAdapter.CRS_NAME,\n )\n\n cache_storage.set(\"foo-rate-key\", booking_request.room_rates[0])\n\n with patch(\"api.payments.payment_service.authorize_payment\"):\n response = booking_service.book(booking_request)\n\n self.assertEqual(1, response.api_version)\n self.assertIsNotNone(response.transaction_id)\n self.assertTrue(response.status.success)\n self.assertEqual(\"Success\", response.status.message)\n self.assertEqual(\"ABC123\", response.reservation.hotel_id)\n self.assertIsNotNone(response.reservation.locator)\n self.assertEqual(\"2020-01-01\", str(response.reservation.checkin))\n self.assertEqual(\"2020-01-01\", str(response.reservation.checkout))\n self.assertEqual(\"John\", response.reservation.customer.first_name)\n self.assertEqual(\"Doe\", response.reservation.customer.last_name)\n self.assertEqual(\"5558675309\", response.reservation.customer.phone_number)\n\n booking = Booking.objects.get(transaction_id=response.transaction_id)\n self.assertEqual(response.transaction_id, booking.transaction_id)\n self.assertIsNotNone(booking.booking_id)\n self.assertEqual(BookingStatus.BOOKED.value, booking.booking_status)\n\n self.assertEqual(\"John\", booking.lead_traveler.first_name)\n self.assertEqual(\"Doe\", booking.lead_traveler.last_name)\n self.assertEqual(\"US\", booking.lead_traveler.country)\n self.assertEqual(\"john@doe.foo\", booking.lead_traveler.email_address)\n self.assertEqual(\"5558675309\", booking.lead_traveler.phone_number)\n\n hotel_bookings = models.HotelBooking.objects.filter(booking__booking_id=booking.booking_id)\n\n self.assertEqual(\"Hotel Name\", hotel_bookings[0].hotel_name)\n self.assertEqual(\"stub\", hotel_bookings[0].crs_name)\n self.assertEqual(\"ABC123\", hotel_bookings[0].hotel_code)\n self.assertIsNotNone(\"foo\", hotel_bookings[0].record_locator)\n self.assertEqual(decimal.Decimal(\"120.99\"), hotel_bookings[0].total_price)\n self.assertEqual(\"USD\", hotel_bookings[0].currency)\n\n def test_stub_booking_with_invalid_payment(self):\n invalid_card_number_payment = test_objects.payment(\"4000000000000002\")\n booking_request = test_objects.booking_request(payment_obj=invalid_card_number_payment)\n\n with pytest.raises(PaymentException) as e:\n booking_service.book(booking_request)\n\n assert e.value.error_type == SubmitErrorType.PAYMENT_DECLINED\n\n def test_hotelbeds_booking(self):\n checkin = datetime.now().date() + timedelta(days=30)\n checkout = datetime.now().date() + timedelta(days=35)\n search = HotelLocationSearch(\n start_date=checkin,\n end_date=checkout,\n occupancy=RoomOccupancy(adults=1),\n location_name=\"SFO\",\n crs=\"hotelbeds\",\n )\n\n availability_response = hotel_service.search_by_location(search)\n room_types = [room_type for hotel in availability_response for room_type in hotel.room_types]\n room_rates = [rate for room_type in room_types for rate in room_type.rates]\n bookable_rates = [rate for rate in room_rates if rate.rate_type == RateType.BOOKABLE]\n\n room_rate_to_book = bookable_rates[0]\n\n booking_request = test_objects.booking_request(rate=room_rate_to_book)\n booking_response = booking_service.book(booking_request)\n\n crs_rate: RoomRate = cache_storage.get(room_rate_to_book.rate_key)\n assert booking_response.reservation.room_rates[0].rate_key == crs_rate.rate_key\n\n hotel_booking = HotelBooking.objects.filter(record_locator=booking_response.reservation.locator.id).first()\n assert hotel_booking.crs_total_price == crs_rate.total.amount\n assert hotel_booking.total_price == room_rate_to_book.total.amount\n","sub_path":"api/tests/integration/test_booking_service.py","file_name":"test_booking_service.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"410384065","text":"from admm_research import LOGGER, flags, app, config_logger\nfrom admm_research.dataset import MedicalImageDataset, segment_transform, augment, get_dataset_root\nfrom admm_research.method import get_method, get_method_class, AdmmSize, AdmmGCSize, ADMM_size_inequality,fullysupervised,ADMM_reg_size_inequality\nfrom admm_research.loss import get_loss_fn\nfrom admm_research.arch import get_arch\nfrom admm_research.trainer import ADMM_Trainer\nfrom admm_research.utils import extract_from_big_dict\nimport torch\nimport warnings\n\nwarnings.filterwarnings('ignore')\ntorch.set_num_threads(1)\n\ndef build_datasets(hparams):\n root_dir = get_dataset_root(hparams['dataroot'])\n train_dataset = MedicalImageDataset(root_dir, 'train', transform=segment_transform((256, 256)),\n augment=augment if hparams['data_aug'] else None, equalize=hparams['data_equ'])\n val_dataset = MedicalImageDataset(root_dir, 'val', transform=segment_transform((256, 256)), augment=None, equalize=hparams['data_equ'])\n\n return train_dataset, val_dataset\n\ndef check_consistance(hparams):\n if hparams['method']=='fullysupervised':\n assert hparams['loss']=='cross_entropy'\n assert hparams['num_admm_innerloop']==1\n else:\n assert hparams['loss']=='partial_ce'\n assert hparams['num_admm_innerloop']>1,hparams['num_admm_innerloop']\n assert hparams['batch_size']==1,hparams['batch_size']\n\ndef run(argv):\n del argv\n\n hparams = flags.FLAGS.flag_values_dict()\n check_consistance(hparams)\n train_dataset, val_dataset = build_datasets(hparams)\n\n arch_hparams = extract_from_big_dict(hparams, AdmmGCSize.arch_hparam_keys)\n torchnet = get_arch(arch_hparams['arch'], arch_hparams)\n\n admm = get_method(hparams['method'], torchnet, **hparams)\n criterion = get_loss_fn(hparams['loss'])\n trainer = ADMM_Trainer(admm, [train_dataset, val_dataset], criterion, hparams)\n trainer.start_training()\n\n\nif __name__ == '__main__':\n torch.manual_seed(41)\n flags.DEFINE_string('dataroot', default='cardiac', help='the name of the dataset')\n flags.DEFINE_boolean('data_aug', default=False, help='data_augmentation')\n flags.DEFINE_string('loss',default='partial_ce',help='loss used in admm loop')\n flags.DEFINE_boolean('data_equ',default=False, help='data equalization')\n # AdmmSize.setup_arch_flags()\n # AdmmGCSize.setup_arch_flags()\n # ADMM_size_inequality.setup_arch_flags()\n ADMM_reg_size_inequality.setup_arch_flags()\n ADMM_Trainer.setup_arch_flags()\n app.run(run)\n","sub_path":"train_in.py","file_name":"train_in.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"587382629","text":"import numpy as np\n\n\n#filename='../../../configs/hydro/3D_128_spic_asc.ini'\n\n#called as\n#from sacio import *\n#alldat,modelinfo=read_sac_ascii('../../../configs/hydro/3D_128_spic_asc.ini')\n\n\ndef read_sac_ascii(filename):\n file = open(filename,'rb')\n \n #read 5 sac file header lines\n \n #1 opozmf_mhd22 #name line \n header=file.readline() \n #2 0 0.00000E+00 2 6 10\n head1=file.readline()\n head1=head1.strip()\n head1col=head1.split()\n \n #3 252 252 252 #2D has 2 values\n head2=file.readline()\n head2=head2.strip()\n head2col=head2.split()\n \n #4 1.66667E+00 0.00000E+00 1.00000E+00 0.00000E+00 0.00000E+00 0.00000E+00 #2D\n #4 1.66667E+00 0.00000E+00 1.00000E+00 0.00000E+00 0.00000E+00 0.00000E+00 #3D\n head3=file.readline()\n head3=head3.strip()\n head3col=head3.split()\n \n #5 x y h m1 m2 e b1 b2 eb rhob bg1 bg2 gamma eta grav1 grav2 #2D\n #5 x y h m1 m2 m3 e b1 b2 b3 eb rhob bg1 bg2 bg3 gamma eta grav1 grav2 grav3 #3D\n head4=file.readline()\n head4=head4.strip()\n head4col=head4.split()\n\n \n #2 0 0.00000E+00 2 6 10\n nits=int(head1col[0])\n time=float(head1col[1])\n ndim=int(head1col[2])\n nvar=int(head1col[3])\n nfields=int(head1col[4])\n \n \n #3 252 252\n \n if ndim==2:\n \tdim=[0,0]\n \tdim[0]=int(head2col[0])\n \tdim[1]=int(head2col[1])\n \n if ndim==3:\n dim=[0,0,0]\n dim[0]=int(head2col[0])\n dim[1]=int(head2col[1])\n dim[2]=int(head2col[2])\n \n modelinfo=(header,nits, time, ndim, nvar, nfields,dim,head3,head4)\n #extract useful information from header lines\n \n if ndim==2:\n alldat=np.zeros((dim[0]*dim[1],ndim+nfields))\n elif ndim==3:\n alldat=np.zeros((dim[0]*dim[1]*dim[2],ndim+nfields)) \n \n #extract components from each line\n count=0\n for line in file:\t\n line=line.strip()\n columns=line.split()\n for i in range(ndim+nfields):\n alldat[count,i]=float(columns[i])\n count=count+1\n \n #using fortran ordering\n #original sac is fortran and same ordering has been adopted\n if ndim==3:\n alldat=np.reshape(alldat,(dim[0],dim[1],dim[2],nfields+ndim),order='F')\n elif ndim==2:\n alldat=np.reshape(alldat,(dim[0],dim[1],nfields+ndim),order='F')\n \n \n \t\n file.close()\n \n return alldat,modelinfo\n\n\n\n#filename='../../../configs/sac_test_asc.ini'\n\n#called as\n#alldat,modelinfo=read_sac_ascii('../../../configs/sac_test_asc.ini')\n\n\n\ndef write_sac_ascii(filename, alldat, modelinfo):\n \n file = open(filename,'wb') \n \n #this script assumes data has been read using a routine such as sac-read3-ascii.py\n #the following variables are assumed\n #nits\n #time\n #ndim\n #nvar\n #nfields\n \n #dim[2] or dim[3]\n \n #gamma\n #eta\n #grav1\n #grav2\n #grav3\n \n #all data is contained in an array alldat of shape nfields+ndim,dim[0],dim[1]\n \n \n #write header lines\n \n #header='sac_test_asc'\n header=modelinfo[0]\n #modelinfo=(header,nits, time, ndim, nvar, nfields,dim,head3,head4)\n #dim=[128,128]\n #ndim=2\n #nfields=12\n dim=modelinfo[6]\n ndim=modelinfo[3]\n nfields=modelinfo[5]\n time=modelinfo[2]\n nits=modelinfo[1]\n nvar=modelinfo[4]\n \n \n head1=str(nits)+\" \"+str(time)+\" \"+str(ndim)+\" \"+str(nvar)+\" \"+str(nfields)\n \n if ndim==2:\n head2=str(dim[0])+\" \"+str(dim[1])\n elif ndim==3:\n head2=str(dim[0])+\" \"+str(dim[1])+\" \"+str(dim[2]) \n \n #warning may need to explicityly write the adiabatic parameter and correct gravitational parameters here\n head3=\"1.66667E+00 0.00000E+00 1.00000E+00 0.00000E+00 0.00000E+00 0.00000E+00\"\n \n if ndim==2:\n head4=\"x y h m1 m2 e b1 b2 eb rhob bg1 bg2 gamma eta grav1 grav2\"\n elif ndim==3:\n head4=\"x y z h m1 m2 m3 e b1 b2 b3 eb rhob bg1 bg2 bg3 gamma eta grav1 grav2 grav3\"\n \n file.write(header)\n file.write(head1+\"\\n\") \n file.write(head2+\"\\n\")\n file.write(head3+\"\\n\")\n file.write(head4+\"\\n\") \n \n if ndim==3: \n for i3 in range(dim[2]):\n for i2 in range(dim[1]):\n for i1 in range(dim[0]):\n line=\"\"\n for j in range(ndim+nfields):\n line=line+str(alldat[i1,i2,i3,j])\n line=line+\"\\n\"\n file.write(line)\n \n if ndim==2: \n for i2 in range(dim[1]):\n for i1 in range(dim[0]):\n line=\"\"\n for j in range(ndim+nfields): \n line=line+\" \"+str(alldat[i1,i2,j])\n line=line+\"\\n\"\n file.write(line) \n \ndef read_sac_bin(filename):\n file = open(filename,'rb')\n \n file.seek(0,2)\n eof = file.tell()\n file.seek(0,0)\n \n \n header = file.read(79)\n \n nits = np.fromfile(file,dtype=np.int32,count=1)\n \n time = np.fromfile(file,dtype=np.float64,count=1)\n ndim=np.fromfile(file,dtype=np.int32,count=1)\n nvar=np.fromfile(file,dtype=np.int32,count=1)\n nfields=np.fromfile(file,dtype=np.int32,count=1)\n \n dim = np.fromfile(file,dtype=np.int32,count=ndim)[:ndim]\n \n varbuf = np.fromfile(file,dtype=float,count=7)[:7]\n \n #if ndim=2\n head4 = file.read(79)\n \n #if ndim=3\n head3=''\n for i in range(7):\n head3=head3+str(varbuf[i])\n \n #typedef enum vars {rho, mom1, mom2, mom3, energy, b1, b2, b3,energyb,rhob,b1b,b2b,b3b} CEV;\n \n if ndim==3:\n alldat=np.fromfile(file,dtype=float,count=(nfields+ndim)*dim[0]*dim[1]*dim[2])[:(nfields+ndim)*dim[0]*dim[1]*dim[2]]\n #if size(alldat)<(nw+ndim)*ndata[0]*ndata[1]*ndata[2]:\n # alldat=resize(alldat,(nw+ndim)*ndata[0]*ndata[1]*ndata[2])\n alldat=np.reshape(alldat,(nfields+ndim,dim[0],dim[1],dim[2],),'C') \n \n file.close()\n modelinfo=(header,nits, time, ndim, nvar, nfields,dim,head3,head4)\n\n return alldat,modelinfo\n\n\ndef write_sac_bin(filename, alldat, modelinfo):\n \n file = open(filename,'wb') \n \n #this script assumes data has been read using a routine such as sac-read3-ascii.py\n #the following variables are assumed\n #nits\n #time\n #ndim\n #nvar\n #nfields\n \n #dim[2] or dim[3]\n \n #gamma\n #eta\n #grav1\n #grav2\n #grav3\n \n #all data is contained in an array alldat of shape nfields+ndim,dim[0],dim[1]\n \n \n #write header lines\n \n #header='sac_test_asc'\n header=modelinfo[0]\n #modelinfo=(header,nits, time, ndim, nvar, nfields,dim,head3,head4)\n #dim=[128,128]\n #ndim=2\n #nfields=12\n dim=modelinfo[6]\n ndim=modelinfo[3]\n nfields=modelinfo[5]\n time=modelinfo[2]\n nits=modelinfo[1]\n nvar=modelinfo[4]\n \n \n head1=str(nits)+\" \"+str(time)+\" \"+str(ndim)+\" \"+str(nvar)+\" \"+str(nfields)+\"\\n\"\n \n if ndim==2:\n head2=str(dim[0])+\" \"+str(dim[1])+\"\\n\"\n elif ndim==3:\n head2=str(dim[0])+\" \"+str(dim[1])+\" \"+str(dim[2])+\"\\n\" \n \n #warning may need to explicityly write the adiabatic parameter and correct gravitational parameters here\n head3=\"1.66667E+00 0.00000E+00 1.00000E+00 0.00000E+00 0.00000E+00 0.00000E+00\"+\"\\n\"\n \n if ndim==2:\n head4=b\"x y h m1 m2 e b1 b2 eb rhob bg1 bg2 gamma eta grav1 grav2\"+\"\\n\"\n elif ndim==3:\n head4=b\"x y z h m1 m2 m3 e b1 b2 b3 eb rhob bg1 bg2 bg3 gamma eta grav1 grav2 grav3\"+\"\\n\"\n \n file.write(header.encode('utf-8'))\n file.write(head1.encode('utf-8')) \n file.write(head2.encode('utf-8'))\n file.write(head3.encode('utf-8'))\n file.write(head4.encode('utf-8')) \n \n if ndim==3: \n for i3 in range(dim[2]):\n for i2 in range(dim[1]):\n for i1 in range(dim[0]):\n line=\"\"\n for j in range(ndim+nfields):\n line=line+str(alldat[i1,i2,i3,j])\n line=line+\"\\n\"\n file.write(line.encode('utf-8'))\n \n if ndim==2: \n for i2 in range(dim[1]):\n for i1 in range(dim[0]):\n line=\"\"\n for j in range(ndim+nfields): \n line=line+\" \"+str(alldat[i1,i2,j])\n line=line+\"\\n\"\n file.write(line.encode('utf-8')) \n \n","sub_path":"smaug/python/sacio.py","file_name":"sacio.py","file_ext":"py","file_size_in_byte":8646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"565999487","text":"# coding=gbk\n#import numpy module\n\nimport os\nimport codecs\nimport datetime\n\nimport matplotlib.pyplot as py_plot\nimport matplotlib.finance as finance_plot\n\n\nclass Stock():\n# This is the class for Stock\n\n # daily quotes of a stock, each quote contains the sequence\n # [date, open price, highest price, lowest price, close price, daily volume, total trading amount]\n quotes = []\n\n def __init__(self):\n # Initializer\n print(\"A stock object is created.\")\n\n def import_stock_data_from_zszq(self, path, identifier):\n # This method loads trading data of a specified stock from Zhaoshang Securities data\n\n # path of the data source\n if path[-1] == \"/\":\n path = path + identifier + \".txt\"\n else:\n path = path + \"/\" + identifier + \".txt\"\n\n # read data\n if os.path.isfile(path):\n file_input = codecs.open(path, \"r\", \"gbk\").readlines()\n else:\n raise IOError(\"File Not Found.\")\n\n # header - id, stock name\n ##header1 = input[0].split(\" \")\n\n # header - date, open, highest, lowest, close, volume, total\n header2 = file_input[1].split(\"\\t\")\n\n # price data\n prices = file_input[2:-1]\n\n header = {}\n # header indices\n for x in header2:\n if codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"日期\", \"gbk\"):\n header[\"date\"] = header2.index(x)\n elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"开盘\", \"gbk\"):\n header[\"open\"] = header2.index(x)\n elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"最高\", \"gbk\"):\n header[\"highest\"] = header2.index(x)\n elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"最低\", \"gbk\"):\n header[\"lowest\"] = header2.index(x)\n elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"收盘\", \"gbk\"):\n header[\"close\"] = header2.index(x)\n elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"成交量\", \"gbk\"):\n header[\"volume\"] = header2.index(x)\n elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"成交额\", \"gbk\"):\n header[\"total\"] = header2.index(x)\n\n # generate price vectors\n for day in prices:\n data = day.split(\"\\t\")\n pdate = finance_plot.date2num(datetime.datetime.strptime(data[header[\"date\"]].strip(), \"%Y/%m/%d\"))\n popen = float(data[header[\"open\"]].strip())\n phighest = float(data[header[\"highest\"]].strip())\n plowest = float(data[header[\"lowest\"]].strip())\n pclose = float(data[header[\"close\"]].strip())\n pvolume = float(data[header[\"volume\"]].strip())\n ptotal = float(data[header[\"total\"]].strip())\n\n self.quotes.append([pdate, popen, phighest, plowest, pclose, pvolume, ptotal])\n\n print(\"Quotes of stock {0} from date {1} to {2} have been loaded\".format(identifier, datetime.date.fromordinal(int(self.quotes[0][0])), datetime.date.fromordinal(int(self.quotes[-1][0]))))\n return 0\n\n\n # def import_stock_data_from_zszq(path, identifier):\n #\n # # path of the data source\n # path = path + \"/\" + identifier + \".txt\"\n #\n # # read data\n # #try:\n # #file_input = codecs.open(path, \"r\", \"gbk\").readlines()\n # file_input = codecs.open(path, \"r\", \"gbk\").readlines()\n # #except:\n # # print(\"failed to open file\")\n #\n # priceDate = []\n # priceOpen = []\n # priceHighest = []\n # priceLowest = []\n # priceClose = []\n # volume = []\n # total = []\n #\n # # header - id, stock name\n # ##header1 = input[0].split(\" \")\n #\n # # header - date, open, highest, lowest, close, volume, total\n # header2 = file_input[1].split(\"\\t\")\n #\n # # price data\n # prices = file_input[2:-1]\n #\n # header = {}\n # # header indices\n # for x in header2:\n # if codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"日期\", \"gbk\"):\n # header[\"date\"] = header2.index(x)\n # elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"开盘\", \"gbk\"):\n # header[\"open\"] = header2.index(x)\n # elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"最高\", \"gbk\"):\n # header[\"highest\"] = header2.index(x)\n # elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"最低\", \"gbk\"):\n # header[\"lowest\"] = header2.index(x)\n # elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"收盘\", \"gbk\"):\n # header[\"close\"] = header2.index(x)\n # elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"成交量\", \"gbk\"):\n # header[\"volume\"] = header2.index(x)\n # elif codecs.encode(x.strip(), \"gbk\") == codecs.encode(\"成交额\", \"gbk\"):\n # header[\"total\"] = header2.index(x)\n #\n # # generate price vectors\n # for day in prices:\n # data = day.split(\"\\t\")\n # ##priceDate.append(finance_plot.date2num(datetime.strptime(data[header[\"date\"]].strip(), \"%Y/%m/%d\")))\n # priceDate.append(datetime.strptime(data[header[\"date\"]].strip(), \"%Y/%m/%d\"))\n # priceOpen.append(float(data[header[\"open\"]].strip()))\n # priceHighest.append(float(data[header[\"highest\"]].strip()))\n # priceLowest.append(float(data[header[\"lowest\"]].strip()))\n # priceClose.append(float(data[header[\"close\"]].strip()))\n # volume.append(float(data[header[\"volume\"]].strip()))\n # total.append(float(data[header[\"total\"]].strip()))\n #\n # return [priceDate, priceOpen, priceHighest, priceLowest, priceClose, volume, total]\n\n def japanese_candle_chart(self, date_start, date_end):\n # This method takes stock price data and generates the japanese candle chart\n\n #quotes = []\n #for i in range(len(pdate)):\n # quotes.append([py_dates.date2num(pdate[i]), popen[i], phighest[i], plowest[i], pclose[i]])\n\n fig = py_plot.figure()\n ax_candlestick = fig.add_subplot(2, 1, 1)\n finance_plot.candlestick_ohlc(ax_candlestick, self.quotes, width=5, colorup=\"r\", colordown=\"g\")\n\n ax_volume = fig.add_subplot(2, 1, 2)\n\n #py_plot.bar(pdate, volume)\n\n ax_candlestick.xaxis_date()\n ax_candlestick.autoscale_view()\n py_plot.setp(py_plot.gca().get_xticklabels(), rotation=45, horizontalalignment='right')\n py_plot.show()\n\n\nif __name__ == \"__main__\":\n stock = Stock()\n stock.import_stock_data_from_zszq(\"/home/fei/Documents/Me/Computing/Data\", \"SH600674\")\n #stock.import_stock_data_from_zszq(\"/wrong\", \"SH600674\")\n stock.japanese_candle_chart()","sub_path":"AssetClasses/Stock.py","file_name":"Stock.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"494970875","text":"import pandas as pd\nimport numpy as np\nimport scipy.spatial.distance as ssd\nimport operator\n\n\nclass Point:\n def __init__(self, xy, _id, capacity):\n self.id = _id\n self.xy = xy\n self.capacity = capacity\n self.warehouse = False\n self.enabled = True\n\n def set_warehouse(self):\n self.warehouse = True\n self.enabled = False\n\n def __repr__(self):\n return 'capacity is {}'.format(self.capacity)\n\n\nclass Points(list):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._distance_matrix = self._get_distance_matrix()\n\n def do_have_enable_points(self):\n return len(self.get_enable_points()) > 1\n\n def get_enable_points(self):\n return list(filter(lambda x: x.enabled and not x.warehouse, self))\n\n def get_disable_points(self):\n return list(filter(lambda x: not x.enabled and not x.warehouse, self))\n\n def get_point_by_id(self, _id):\n return self[_id]\n\n def get_warehouse_point(self):\n return filter(lambda x: x.warehouse, self)\n\n def _get_xy_coords(self):\n x_coord = [point.xy[0] for point in self]\n y_coord = [point.xy[1] for point in self]\n return x_coord, y_coord\n\n def get_xy(self):\n x_coord, y_coord = self._get_xy_coords()\n return pd.DataFrame({'x': x_coord, 'y': y_coord})\n\n def get_xy_depots(self):\n x_coord = [point.xy[0] for point in self if point.warehouse]\n y_coord = [point.xy[1] for point in self if point.warehouse]\n return pd.DataFrame({'x': x_coord, 'y': y_coord})\n\n def _get_euclidean_matrix(self):\n x_coord, y_coord = self._get_xy_coords()\n xy_coord = np.column_stack((x_coord, y_coord))\n mat = ssd.cdist(xy_coord, xy_coord, 'euclidean')\n return mat\n\n def _get_distance_matrix(self):\n distance_matrix = self._get_euclidean_matrix()\n length = len(self)\n dist = pd.DataFrame(distance_matrix,\n columns=list(range(length)),\n index=list(range(length))\n )\n # add winkm\n for y in range(length):\n for x in range(y):\n dist.loc[y, x] = dist.loc[0, x] + dist.loc[0, y] - dist.loc[y, x]\n return dist\n\n def get_max_winkm(self):\n enabled_ids = list(map(lambda x: x.id, self.get_enable_points()))\n length = len(self)\n max, current_x, current_y = 0, 0, 0\n for x in range(length):\n for y in range(x):\n xy_value = self._distance_matrix.loc[x, y]\n if xy_value > max and (x in enabled_ids and y in enabled_ids):\n current_x = x\n current_y = y\n max = xy_value\n return self.get_point_by_id(current_x), self.get_point_by_id(current_y)\n\n def find_neighbor(self, current_route):\n length_of_points = len(self)\n\n route_outside_ids = list(map(lambda x: x.id, current_route.get_outside()))\n disabled_ids = list(map(lambda x: x.id, self.get_disable_points()))\n\n metrics = []\n for x in range(length_of_points):\n for y in range(1, x):\n\n if x in disabled_ids and y in disabled_ids:\n continue\n\n if x not in route_outside_ids and y not in route_outside_ids:\n continue\n\n point_id = x if y in route_outside_ids else y\n point = self.get_point_by_id(point_id)\n\n if not point or point.capacity > current_route.lost:\n continue\n\n route_point_id = y if y in route_outside_ids else x\n target_point_id = x if y in route_outside_ids else y\n target_capacity = point.capacity\n winkm = self._distance_matrix.loc[x, y]\n km = self._distance_matrix.loc[y, x]\n\n metric = Metric(route_point_id, target_point_id, target_capacity, km, winkm)\n metrics.append(metric)\n\n if len(metrics) > 0:\n metrics.sort()\n\n return metrics[0].target_point_id, metrics[0].route_point_id\n return None, None\n\n\nclass Metric:\n def __init__(self, route_point: int, target_point: int, target_capacity: float, km: float, winkm: float):\n self.route_point_id = route_point\n self.target_point_id = target_point\n self.target_capacity = target_capacity\n self.km = km\n self.winkm = winkm\n\n def _less(self, other):\n score = 0\n score += 2 if self.km < other.km else 0\n score += 2 if self.winkm > other.winkm else 0\n score += 1 if self.target_capacity > other.target_capacity else 0\n return score\n\n def __lt__(self, other):\n # return self.km < other.km\n return self._less(other) >= 3","sub_path":"point.py","file_name":"point.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"389687217","text":"from flask import Flask, request, render_template\r\nimport requests\r\nimport logging, os\r\nimport string\r\nimport random\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.before_request\r\ndef log_request():\r\n transaction_id = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(6)])\r\n request.environ['transaction_id'] = transaction_id\r\n\r\n log.info('Method=BeforeRequest Transaction=%s RequestMethod=%s URL=%s ClientIP=%s Method=%s Proto=%s UserAgent=%s Arguments=%s Form=%s Data=%s'\r\n % (transaction_id,\r\n request.method,\r\n request.url,\r\n request.headers.environ['REMOTE_ADDR'] if 'REMOTE_ADDR' in request.headers.environ else 'NULL',\r\n request.headers.environ['REQUEST_METHOD'],\r\n request.headers.environ['SERVER_PROTOCOL'],\r\n request.headers.environ['HTTP_USER_AGENT'] if 'HTTP_USER_AGENT' in request.headers.environ else 'NULL',\r\n request.args,\r\n request.form,\r\n request.data.decode('utf-8')))\r\n\r\n@app.route('/')\r\ndef start():\r\n return basic_render('start')\r\n\r\n@app.route('/step/')\r\ndef step(name):\r\n return basic_render(name)\r\n\r\ndef basic_render(step):\r\n body = render_template('%s.html' % step)\r\n return render_template('index.html', body=body)\r\n\r\ndef fields_render(step, fields):\r\n body = render_template('%s.html' % step, fields=fields)\r\n return render_template('index.html', body=body)\r\n\r\n\r\nlog = logging.getLogger('frontend')\r\n\r\n\r\n\r\n","sub_path":"view/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"301174880","text":"from challenge.default.fileInput import *\nfrom challenge.default.fileOutput import *\nimport math\n\nimport numpy as np\n\nimport operator\n\ninputPath = '../input/'\noutputPath = './outputv3/'\n\nkittens = 'kittens'\nme_at_the_zoo = 'me_at_the_zoo'\ntrending_today = 'trending_today'\nvideos_worth_spreading = 'videos_worth_spreading'\n\ncurrentFile = kittens\n\n# info, videos, endpoints, requests = readAndParseInputFile(inputPath + currentFile + '.in')\n\nnpArr = np.load(inputPath + currentFile + '.dump')\n\ninfo = npArr[0]\nvideos = npArr[1]\nendpoints = npArr[2]\nrequests = npArr[3]\n\nprint(info)\n# print(videos)\n# print(endpoints)\n# print(requests)\n\n# print(len(videos))\n# print(len(endpoints))\n# print(len(requests))\n\nendpointVideoRanking = {}\n\n# endpointVideoRanking = np.array(endpointVideoRanking)\n\nfor request in requests:\n Re = request['Re']\n Rv = request['Rv']\n Rn = request['Rn']\n\n if str(Re) in endpointVideoRanking:\n dict = endpointVideoRanking[str(Re)]\n else:\n dict = endpointVideoRanking[str(Re)] = {}\n\n if Rv in dict:\n dict[str(Rv)] += Rn\n else:\n dict[str(Rv)] = Rn\n\n# print(endpointVideoRanking)\n\n# endpointVideoRankingTemp = np.copy(endpointVideoRanking)\n\npossibleCaches = []\n\nfor c in range(info['C']):\n possibleCaches.append({'videos':[], 'size': 0})\n\nfor e in range(info['E']):\n endpointVideoRanking[str(e)] = sorted(endpointVideoRanking[str(e)].items(), key=operator.itemgetter(1),\n reverse=True)\n\nfor cacheNum in range(info['C']):\n print(cacheNum, info['C'])\n for e in range(info['E']):\n\n # print(endpointVideoRanking[str(e)])\n\n # find best cache\n\n # sortedCaches = newlist = sorted(endpoints[e]['Caches'], key=lambda k: k['Lc'])\n sortedCaches = sorted(endpoints[e]['Caches'], key=operator.itemgetter('Lc'))\n\n if (len(sortedCaches) > cacheNum):\n\n bestCache = possibleCaches[sortedCaches[cacheNum]['c']]\n\n for video in endpointVideoRanking[str(e)]:\n if bestCache['size'] + videos[int(video[0])] <= info['X'] and not (video[0] in bestCache['videos']):\n bestCache['videos'].append(video[0])\n endpointVideoRanking[str(e)].remove(video)\n bestCache['size'] += videos[int(video[0])]\n\n\n# chachesOutput = list(np.array(possibleCaches)[:info['C']])\n\noutputCaches = []\n\nfor c in possibleCaches:\n outputCaches.append(c['videos'])\n\nparseAndSaveOutputFile(outputCaches, outputPath + currentFile + '.out')\n\n\n# print(endpointVideoRanking)\n\n","sub_path":"challenge/default/mainAlbertv4.py","file_name":"mainAlbertv4.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"230567587","text":"from extract.handlers import handleBaseClass\n\nclass handle(handleBaseClass):\n\n def _libarchive(self):\n try:\n # Do the actual extraction\n with open(config['fileName'],\"rb\") as f: \n for entry in libarchive.public.memory_pour(f.read()):\n pass\n return True\n\n except:\n return False\n \n def _lrunzip(self):\n # Make sure it exists\n if not shutil.which(\"lrunzip\"):\n logger.warn(\"lrunzip not found. try installing it.\")\n return False\n\n try:\n subprocess.check_output([\"lrunzip\",self.config['fileName']])\n return True\n\n except:\n return False\n\n\n def extract(self):\n # List of preferred extraction options\n extract_options = [\n self._libarchive,\n self._lrunzip,\n ]\n\n config = self.config\n\n # Find the base directory of the file\n directory = os.path.dirname(os.path.abspath(config['fileName']))\n\n # Default to extracting to the same directory\n os.chdir(directory)\n\n # Try different options\n if not any(option() for option in extract_options):\n logger.error(\"Extraction attempts failed!\")\n # Return before we accidentally remove the file\n return False\n\n # Call parent handler\n handleBaseClass.extract(self)\n \n\nimport logging\nimport os\nimport libarchive.public\nimport subprocess\nimport shutil\n\nlogger = logging.getLogger('extract.handlers.application.x_lrzip')\n","sub_path":"extract/handlers/application/x_lrzip/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"162898377","text":"from flask import Blueprint, request, jsonify\nfrom flask_security import current_user\nfrom homefinance.handlers import result, no_content\nfrom homefinance.exceptions import BadRequest,AccessDenied,ResourceNotFound\nfrom homefinance.operations.services.operation_manager_service import OperationManagerService\nfrom .schemas import finance_operation_schema, finance_operations_schema, move_money_schema\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom homefinance.accounts.exceptions import AccountAccessDenied, NotEnoughMoney\n\noperation_blueprint = Blueprint('operation', __name__)\n\n\n@operation_blueprint.route('/', methods=['POST'])\ndef create_operation():\n try:\n new_operation = finance_operation_schema.load(request.json)\n except NoResultFound:\n raise ResourceNotFound\n if new_operation.errors:\n raise BadRequest(message=new_operation.errors)\n\n try:\n saved_operation = OperationManagerService.add_operation_to_account(new_operation.data, current_user)\n except AccountAccessDenied:\n raise AccessDenied\n except NotEnoughMoney:\n raise BadRequest(message='not enough money for operation')\n return jsonify({'result': finance_operation_schema.dump(saved_operation).data})\n\n\n@operation_blueprint.route('/', methods=['GET'])\ndef list_operations():\n try:\n operations = OperationManagerService.get_operations(current_user, request.args)\n except AccountAccessDenied:\n raise AccessDenied\n return result(finance_operations_schema.dump(operations).data)\n\n\n@operation_blueprint.route('/move/', methods=['POST'])\ndef move_money():\n move_operation = move_money_schema.load(request.json)\n if move_operation.errors:\n raise BadRequest(message=move_operation.errors)\n\n OperationManagerService.move_to_account(move_operation.data, current_user)\n\n return no_content()\n","sub_path":"backend/homefinance/operations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"312883331","text":"def findOrder(numCourses, prerequisites):\n inDegree = [0] * numCourses\n \n for i in range(len(prerequisites)):\n inDegree[prerequisites[i][0]] += 1\n\n stack = []\n for i in range(len(inDegree)):\n if inDegree[i] == 0:\n stack.append(i)\n\n count = 0\n order = []\n while stack:\n learned = stack.pop()\n order.append(learned)\n count += 1\n\n for i in range(len(prerequisites)):\n if prerequisites[i][1] == learned:\n inDegree[prerequisites[i][0]] -= 1\n\n if inDegree[prerequisites[i][0]] == 0:\n stack.append(prerequisites[i][0])\n \n return order if count == numCourses else []\n\n# this one is faster one\nfrom collections import defaultdict \ndef findOrder1(num, p):\n e = defaultdict(list)\n d = defaultdict(int)\n \n for b, a in p:\n e[a].append(b)\n d[b] += 1\n \n res = []\n for i in range(num):\n if not d[i]:\n res.append(i)\n \n for ele in res:\n for end in e[ele]:\n d[end] -= 1\n if not d[end]:\n res.append(end)\n \n return (res if len(res) == num else [])\n\n\nprint(findOrder(2, [[1,0]]))\nprint(findOrder(4, [[1,0],[2,0],[3,1],[3,2]]))","sub_path":"july-leetcoding-challenge/D18-CourseSchedule2.py","file_name":"D18-CourseSchedule2.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"179744089","text":"import logging\nimport praw\nimport os\nimport re\nimport sqlite3\nimport sys\nimport time\nimport traceback\n\n# Requests' exceptions live in .exceptions and are called errors.\nfrom requests.exceptions import ConnectionError, HTTPError\n# Praw's exceptions live in .errors and are called exceptions.\nfrom praw.errors import APIException, ClientException, RateLimitExceeded\n\nfrom urllib.parse import urlparse\n\nfrom settings import IGNORED_BOTH, IGNORED_LINKS, IGNORED_SOURCES, IGNORED_USERS\n\nTEST = True\nDEBUG = True\n\nUSER_AGENT = 'TotesMessenger v0.x by /u/justcool393'\nDOMAIN = 'api.reddit.com'\n\nARCHIVE_TIME = 6 * 30 * 24 * 60 * 60 # currently 6 months (in seconds)\n\nloglevel = logging.DEBUG if DEBUG else logging.INFO\n\nlogging.basicConfig(level=loglevel,\n format='[%(asctime)s] [%(levelname)s] %(message)s')\n\nlog = logging.getLogger('totes')\nlogging.getLogger('requests').setLevel(loglevel)\n\ndb = sqlite3.connect('totes.sqlite3')\ncur = db.cursor()\n\nr = praw.Reddit(USER_AGENT, domain=DOMAIN)\n\nPATH_REGEX = re.compile(r'^/r/([^/]+)/comments/([a-z0-9]{6,8})(/[^/]+/([a-z0-9]{6,8}))?')\n\ndef log_error(e):\n log.error(\"Unexpected {}:\\n{}\".format(e.__class__.__name__,\n traceback.format_exc()))\n\ndef np(url):\n url = urlparse(url)\n return \"//np.reddit.com{}\".format(url.path)\n\n\nclass RecoverableException(Exception):\n pass\n\n\nclass SubmissionNotFound(RecoverableException):\n def __init__(self, id):\n self.id = id\n\n def __str__(self):\n return \"Could not find submission {}\".format(id)\n\n\nclass NotAComment(RecoverableException):\n pass\n\n\nRECOVERABLE_EXC= (RecoverableException,\n ConnectionError,\n HTTPError,\n APIException,\n ClientException,\n RateLimitExceeded)\n\n\nclass Source:\n \"\"\"\n Comment or thread that has been linked to from somewhere else on reddit.\n \"\"\"\n def __init__(self, url):\n self.path = urlparse(url.lower()).path\n self.id, self.subreddit = self._parse_path()\n\n self._submission = None\n self.author = None\n self.title = None\n self.reply = None\n self.skip = False\n self.is_new = True\n\n def __eq__(self, other):\n if isinstance(other, Source):\n return self.id == other.id\n\n return False\n\n def __hash__(self):\n return hash(self.id)\n\n @property\n def submission(self):\n if self._submission:\n return self._submission\n\n self._submission = r.get_info(thing_id=self.id)\n\n if not self._submission:\n raise SubmissionNotFound(self.id)\n\n return self._submission\n\n\n\n @property\n def is_comment(self):\n return self.id.startswith('t1')\n\n @property\n def is_post(self):\n return self.id.startswith('t3')\n\n def check_skip(self):\n if self.skip:\n return True\n\n cur.execute(\n \"SELECT * FROM users WHERE name = ? AND skip_source = ? LIMIT 1\",\n (self.author, True))\n\n if cur.fetchone():\n self.skip = True\n return True\n\n cur.execute(\n \"SELECT * FROM subreddits WHERE name = ? AND skip_source = ? LIMIT 1\",\n (self.subreddit, True))\n\n if cur.fetchone():\n self.skip = True\n return True\n\n return False\n\n def save(self):\n cur.execute(\"\"\"\n REPLACE INTO sources (id, reply, subreddit, author, title, skip)\n VALUES (?, ?, ?, ?, ?, ?)\n \"\"\", (self.id, self.reply, self.subreddit, self.author, self.title, self.skip))\n\n # Maybe commit?\n db.commit()\n\n def load(self):\n \"\"\"\n Populate attributes from database and fetch corresponding submission.\n \"\"\"\n cur.execute(\"\"\"\n SELECT id, reply, subreddit, author, title, skip FROM sources\n WHERE id=? LIMIT 1\n \"\"\", (self.id,))\n\n source = cur.fetchone()\n\n if source:\n self.id, self.reply, self.subreddit, self.author, self.title, self.skip = source\n self.is_new = False\n return # Return early cuz we don't need to perform an api call.\n\n if not self.author and self.submission.author:\n self.author = self.submission.author.name.lower()\n else:\n self.author = '[deleted]'\n\n if hasattr(self.submission, 'title'):\n self.title = self.submission.title\n else:\n self.title = '[comment]'\n\n def _parse_path(self):\n # Comments have path /r/sub/comments/xxx/title/xxx/\n # Posts have path /r/sub/comments/xxx/title/\n match = PATH_REGEX.match(self.path)\n\n if match:\n subreddit, post, _, comment = match.groups()\n else:\n raise NotAComment(\"The source {} is not a comment or post.\".format(self.path))\n\n if comment:\n id = \"t1_{}\".format(comment)\n else:\n id = \"t3_{}\".format(post)\n\n return (id, subreddit)\n\n\nclass Link:\n def __init__(self, submission, source):\n self.submission = submission\n self.id = submission.name\n self.subreddit = submission.subreddit.display_name.lower()\n\n if submission.author:\n self.author = submission.author.name.lower()\n else:\n self.author = '[deleted]'\n\n self.title = submission.title\n self.permalink = submission.permalink\n self.source = source\n self.skip = False\n self.is_new = True\n\n def check_skip(self):\n if self.skip:\n return True\n\n cur.execute(\n \"SELECT * FROM users WHERE name = ? AND skip_link = ? LIMIT 1\",\n (self.author, True))\n\n if cur.fetchone():\n self.skip = True\n return True\n\n cur.execute(\n \"SELECT * FROM subreddits WHERE name = ? AND skip_link = ? LIMIT 1\",\n (self.subreddit, True))\n\n if cur.fetchone():\n self.skip = True\n return True\n\n return False\n\n def save(self):\n cur.execute(\"\"\"\n REPLACE INTO links (id, source, permalink, subreddit, skip, author, title)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n \"\"\", (self.id, self.source, self.permalink, self.subreddit, self.skip, self.author, self.title))\n\n # Maybe commit less often?\n db.commit()\n\n\n def load(self):\n \"\"\"\n Populate attributes from database and fetch corresponding submission.\n \"\"\"\n cur.execute(\"\"\"\n SELECT id, source, permalink, subreddit, skip, author, title FROM links\n WHERE id=? LIMIT 1\n \"\"\", (self.id,))\n\n link = cur.fetchone()\n\n if link:\n self.id, self.source, self.permalink, self.subreddit, self.skip, self.author, self.title = link\n self.is_new = False\n\nclass Notification:\n def __init__(self, source):\n self.source = source\n self.id = source.id\n self.reply = source.reply\n self.links = []\n\n def should_notify(self):\n query = cur.execute(\"\"\"\n SELECT subreddit, title, permalink FROM links\n WHERE source=? AND skip=?\n ORDER BY subreddit ASC, title ASC\n \"\"\", (self.id, False))\n\n for row in query:\n self.links.append(row)\n\n return any(self.links)\n\n def post_reply(self):\n # Render body.\n body = self._render_comment()\n\n if TEST:\n log.debug(\"\"\"\n========== COMMENT ============\nSource: {}\n{}\n========== /COMMENT ===========\n\"\"\".format(self.source.path, body))\n return True\n\n if self.reply:\n reply = r.get_info(thing_id=self.reply)\n reply.edit(body)\n return True\n\n if self.source.is_comment:\n reply = self.source.submission.reply(body)\n self.reply = reply.name\n\n elif self.source.is_post:\n reply = self.source.submission.add_comment(body)\n self.reply = reply.name\n\n self.source.reply = self.reply\n self.source.save()\n\n return True\n\n def _render_comment(self):\n parts = []\n parts.append(\"This thread has been linked to from another place on reddit.\")\n\n for subreddit, title, permalink in self.links:\n parts.append(\"- [/r/{}] [{}]({})\".format(subreddit, title, np(permalink)))\n\n parts.append(\"\"\"\n[](#footer)*^(If you follow any of the above links, respect the rules of reddit and don't vote.)\n ^\\([Info](/r/TotesMessenger/wiki/)\n ^/\n ^[Contact](/message/compose/?to=\\/r\\/TotesMessenger))* [](#bot)\n \"\"\")\n\n return \"\\n\\n\".join(parts)\n\n\n\nclass Totes:\n\n def __init__(self, username, password, limit=25):\n self.username = username\n self.password = password\n self.limit = limit\n\n self._setup = False\n\n def run(self):\n \"\"\"\n Comment replies notifying posts and comments that they have been linked\n to from somewhere else on reddit.\n \"\"\"\n if not self._setup:\n raise Exception(\"Totes not ready yet!!!\")\n\n log.info(\"Running.\")\n\n sources = set()\n\n submissions = r.get_domain_listing('reddit.com', sort='new', limit=self.limit)\n\n for submission in submissions:\n try:\n source = Source(submission.url)\n source.load()\n except RECOVERABLE_EXC as e:\n log_error(e)\n continue\n\n log.debug(\"Got source: {}\".format(submission.url))\n\n source.check_skip()\n source.save()\n\n try:\n link = Link(submission, source.id)\n link.load()\n except RECOVERABLE_EXC as e:\n log_error(e)\n continue\n\n log.debug(\"Got link: {}\".format(submission.permalink))\n\n link.check_skip()\n link.save()\n\n skip_any = source.skip or link.skip\n any_new = source.is_new or link.is_new\n\n log.debug(\"Skip any: {}\".format(skip_any))\n log.debug(\"Any new: {}\".format(any_new))\n\n if any_new and not skip_any:\n sources.add(source)\n\n for source in sources:\n notification = Notification(source)\n\n if notification.should_notify():\n try:\n notification.post_reply()\n except RECOVERABLE_EXC as e:\n log_error(e)\n continue\n\n log.info(\"Done.\")\n\n def setup(self):\n \"\"\"\n Load settings and perform setup.\n \"\"\"\n self._setup_db()\n self._login()\n\n self._setup = True\n log.info(\"Totes set up.\")\n\n def quit(self):\n \"\"\"\n Teardown.\n \"\"\"\n log.info(\"Totes goodbye!\")\n\n def _login(self):\n \"\"\"\n Create reddit session.\n \"\"\"\n r.login(self.username, self.password)\n log.info(\"Logged in to reddit.\")\n\n def _setup_db(self):\n \"\"\"\n Create tables.\n \"\"\"\n\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS subreddits (\n name TEXT PRIMARY KEY,\n skip_source BOOLEAN DEFAULT 0,\n skip_link BOOLEAN DEFAULT 0,\n t TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n )\n \"\"\")\n\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS users (\n name TEXT PRIMARY KEY,\n skip_source BOOLEAN DEFAULT 0,\n skip_link BOOLEAN DEFAULT 0,\n t TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n )\n \"\"\")\n\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS sources (\n id TEXT PRIMARY KEY,\n reply TEXT UNIQUE,\n subreddit TEXT,\n author TEXT,\n title TEXT,\n skip BOOLEAN DEFAULT 0,\n t TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n )\n \"\"\")\n\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS links (\n id TEXT PRIMARY KEY,\n source TEXT,\n subreddit TEXT,\n author TEXT,\n title TEXT,\n permalink TEXT,\n skip BOOLEAN DEFAULT 0,\n t TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n )\n \"\"\")\n\n cur.execute(\"\"\"\n CREATE INDEX IF NOT EXISTS link_sources ON links (source)\n \"\"\")\n\n db.commit()\n log.info(\"Tables ready.\")\n\n for sub in IGNORED_SOURCES:\n cur.execute(\"\"\"\n INSERT OR IGNORE INTO subreddits (name, skip_source)\n VALUES (?, ?)\n \"\"\", (sub, True))\n\n for sub in IGNORED_BOTH:\n cur.execute(\"\"\"\n INSERT OR IGNORE INTO subreddits (name, skip_source, skip_link)\n VALUES (?, ?, ?)\n \"\"\", (sub, True, True))\n\n for sub in IGNORED_LINKS:\n cur.execute(\"\"\"\n INSERT OR IGNORE INTO subreddits (name, skip_link)\n VALUES (?, ?)\n \"\"\", (sub, True))\n\n for user in IGNORED_USERS:\n cur.execute(\"\"\"\n INSERT OR IGNORE INTO users (name, skip_link) VALUES (?, ?)\n \"\"\", (user, True))\n\n db.commit()\n log.info(\"Default settings setup.\")\n\n\nif __name__ == \"__main__\":\n username = os.environ.get(\"REDDIT_USERNAME\")\n password = os.environ.get(\"REDDIT_PASSWORD\")\n wait = int(os.environ.get(\"WAIT\", 30))\n limit = int(os.environ.get(\"LIMIT\", 25))\n\n totes = Totes(username, password, limit)\n totes.setup()\n\n try:\n while True:\n try:\n totes.run()\n except RECOVERABLE_EXC as e:\n log_error(e)\n\n time.sleep(wait)\n except KeyboardInterrupt:\n pass\n\n totes.quit()\n db.close()\n exit(0)\n\n","sub_path":"totes.py","file_name":"totes.py","file_ext":"py","file_size_in_byte":13919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"652526418","text":"import functools\nimport logging\nimport os\nimport pydoc\nimport sys\n\nfrom .logs import get_log_file\n\n\nlog = logging.getLogger('tvrenamr.history')\n\n\ndef parse_history(log_file_location):\n log_file = get_log_file(log_file_location)\n\n if not os.path.getsize(log_file):\n log.critical('No log file found, exiting.')\n sys.exit(1)\n\n with open(log_file, 'r') as f:\n logs = f.readlines()\n\n shows = list(filter(lambda x: 'Renamed:' in x, logs))\n\n def show_len(show):\n return len(show.split('Renamed: ')[1].split(' - ')[0]) - 1\n\n longest = max(map(show_len, shows))\n\n def sanitise_log(log, longest):\n dt, name = log.split('Renamed: ')\n dt = dt.split(' ')[0].replace('T', ' ')\n show, number, title = name.split(' - ')\n name = (name.replace(show, show.lstrip('\"').strip().ljust(longest), 1)\n .replace(number, number.ljust(4), 1)\n .replace(' - ', ' | '))\n return '{0} | {1}'.format(dt, name.rstrip('\"\\n'))\n\n sanitise = functools.partial(sanitise_log, longest=longest)\n\n shows = map(sanitise, shows)\n return pydoc.pager('\\n'.join(shows))\n","sub_path":"tvrenamr/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"56417702","text":"from config import DB\n\nfrom decogres.decorator import postgres\n\nimport json\nimport datetime\nimport os\n\n\n@postgres(**DB)\ndef get_brand_exists(brand_name):\n with db.cursor() as c:\n c.execute(\"SELECT * FROM brands WHERE name=%(brand)s\",\n {'brand': brand_name})\n ret = c.fetchone()\n return ret\n\n\n@postgres(**DB)\ndef get_type_exists(type_name):\n with db.cursor() as c:\n c.execute(\"SELECT * FROM apparel_types WHERE type=%(type)s\",\n {'type': type_name})\n ret = c.fetchone()\n return ret\n\n\n@postgres(**DB)\ndef get_apparel_exists(apparel_name, brand_name):\n with db.cursor() as c:\n c.execute(\"SELECT * FROM apparel WHERE name=%(name)s and brand_name=%(brand_name)s\",\n {'name': apparel_name,\n 'brand_name': brand_name})\n ret = c.fetchone()\n return ret\n\n\"\"\"\n Column | Type | Modifiers \n----------------+---------+------------------------------------------------------\n id | integer | not null default nextval('apparel_id_seq'::regclass)\n name | text | \n apparel_type | text | \n brand_name | text | \n alt_name | text | \n date_added | date | \n reference_link | text | \n image_url | text | \n\"\"\"\n\ndef crupdate_apparel(apparel_properties):\n apparel = get_apparel_exists(apparel_properties['name'])\n if apparel:\n new_apparel_dict = apparel.copy()\n new_apparel_dict.update(apparel_properties)\n update_apparel(apparel_properties['id'], apparel_properties)\n else:\n insert_apparel(apparel_properties)\n\n\n@postgres(**DB)\ndef update_apparel(apparel_id, apparel_properties):\n with db.cursor(commit_on_close=True) as c:\n c.execute(\"\"\"\n UPDATE apparel \n SET name=%(name)s, \n alt_name=%(alt_name)s,\n brand_name=%(brand_name)s,\n apparel_type=%(apparel_type)s,\n product_year=%(product_year)s,\n reference_link=%(reference_link)s,\n image_url=%(image_url)s\n WHERE id=%(id)s\n RETURNING *\n \"\"\",\n {'id':apparel_id, \n 'name': apparel_properties['name'],\n 'alt_name': apparel_properties['alt_name'],\n 'brand_name': apparel_properties['brand_name'],\n 'apparel_type': apparel_properties['apparel_type'],\n 'product_year': apparel_properties['product_year'],\n 'reference_link': apparel_properties['reference_link'],\n 'image_url': apparel_properties['image_url']})\n ret = c.fetchone()\n return ret\n\n\n\n@postgres(**DB)\ndef insert_apparel(apparel_properties):\n with db.cursor(commit_on_close=True) as c:\n c.execute(\"\"\"INSERT INTO apparel \n (name,\n alt_name,\n brand_name,\n apparel_type,\n date_added,\n reference_link,\n image_url,\n product_year) \n values \n (%(name)s,\n %(alt_name)s,\n %(brand_name)s,\n %(apparel_type)s,\n %(date_added)s,\n %(reference_link)s,\n %(image_url)s,\n %(product_year)s)\n RETURNING *\"\"\",\n {'name': apparel_properties['name'],\n 'alt_name': apparel_properties['alt_name'],\n 'brand_name': apparel_properties['brand_name'],\n 'apparel_type': apparel_properties['apparel_type'],\n 'date_added': datetime.date.today(),\n 'product_year': apparel_properties['product_year'],\n 'reference_link': apparel_properties['reference_link'],\n 'image_url': apparel_properties['image_url']})\n ret = c.fetchone()\n return ret\n\n\n@postgres(**DB)\ndef _insert_brand(brand_name):\n with db.cursor(commit_on_close=True) as c:\n c.execute(\"INSERT INTO brands (name) values (%(name)s)\",\n {'name': brand_name})\n\n\n@postgres(**DB)\ndef _insert_type(apparel_type):\n with db.cursor(commit_on_close=True) as c:\n c.execute(\"INSERT INTO apparel_types (type) values (%(type)s)\",\n {'type': apparel_type})\n\n\n@postgres(**DB)\ndef _insert_variety(item):\n with db.cursor(commit_on_close=True) as c:\n c.execute(\"INSERT INTO varieties (apparel_id, variation) values (%(apparel_id)s, %(variation)s)\",\n {'apparel_id': item['apparel_id'],\n 'variation': item['variation']})\n\n\ndef _predict_type(product):\n if product['type'] is None:\n questionable_content.append(product)\n return 'Shoes'\n return product['type']\n\n\ndef _insert_product(product):\n if not get_brand_exists(product['brand']):\n _insert_brand(product['brand'])\n\n item_type = _predict_type(product)\n if not get_type_exists(item_type):\n _insert_type(item_type)\n\n item = {'brand': product['brand'],\n 'link': 'http://lolibrary.org{}'.format(product['link']),\n 'date': datetime.datetime(product['year'], 1, 1),\n 'type': item_type,\n 'name': product['title'],\n 'alt_name': product['title_alt']}\n\n _insert_apparel(item)\n","sub_path":"backend/frillex/store/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"232932387","text":"from __future__ import print_function\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nimport time, pprint, numpy as np, pandas as pd\nfrom pprint import pprint\n\nimport intrinio_sdk\nfrom intrinio_sdk.rest import ApiException\n\nfrom stock_screener.api_keys.secrets import API_KEY_TEST, API_KEY_PRODUCTION\n\n# Comment out one of the following to choose API_KEY_TEST or API_KEY_PRODUCTION\nintrinio_sdk.ApiClient().configuration.api_key['api_key'] = API_KEY_TEST\n# intrinio_sdk.ApiClient().configuration.api_key['api_key'] = API_KEY_PRODUCTION\nsecurity_api = intrinio_sdk.SecurityApi()\ncompany_api = intrinio_sdk.CompanyApi()\ndata_tag_api = intrinio_sdk.DataTagApi()\n\n\ndef search_companies(query):\n \"\"\"query # str | Search parameters\"\"\"\n page_size = 10 # int | The number of results to return (optional) (default to 100)\n\n try:\n api_response = company_api.search_companies(query, page_size=page_size)\n # pprint(api_response) # Debug\n return api_response\n except ApiException as e:\n print(\"Exception when calling CompanyApi->search_companies: %s\\n\" % e)\n return\n\n\ndef get_company_fundamentals(identifier, filed_after='', filed_before='', reported_only=False, fiscal_year=None, statement_code='', type_param='', start_date='', end_date='', page_size=100, next_page=''):\n \"\"\"\n False required for async\n Args:\n [REQUIRED] identifier = 'AAPL' # str | A Company identifier\n filed_after = '' # date | Filed on or after this date (optional)\n filed_before = '' # date | Filed on or before this date (optional)\n reported_only = False # bool | Only as-reported fundamentals (optional)\n fiscal_year = \"~null\" # int | Only for the given fiscal year (REQUIRED)\n statement_code = '' # str | Only of the given statement code (optional)\n type = '' # str | Only of the given type (optional)\n start_date = '' # date | Only on or after the given date (optional)\n end_date = '' # date | Only on or before the given date (optional)\n page_size = 100 # int | The number of results to return (optional) (default to 100)\n next_page = '' # str | Gets the next page of data from a previous API call (optional)\"\"\"\n\n try:\n api_response = company_api.get_company_fundamentals(identifier, filed_after=filed_after, filed_before=filed_before, reported_only=reported_only, fiscal_year=fiscal_year, statement_code=statement_code, type=type_param, start_date=start_date, end_date=end_date, page_size=page_size, next_page=next_page)\n pprint(api_response) # DEBUG\n return api_response\n except ApiException as e:\n print(\"Exception when calling CompanyApi->get_company_fundamentals: %s\\n\" % e)\n\n\ndef search_data_tags(query):\n \"\"\"Interesting tags: (better formatting/search here: https://data.intrinio.com/data-tags)\n altmanzscore: A score below 1.8 means the company is probably headed for bankruptcy, while companies with scores above 3 are not likely to go bankrupt. \n next_yr_ave_revenue_est: Zacks Sales Estimate # Appears not to work # maybe I'm not paying for it?\n Exception AAPL, AAXN, SQ, ROKU, TTD, TWLO\n {'id': 'tag_4zrnoz',\n 'name': 'Revenue Q/Q Growth',\n 'statement_code': 'calculations',\n 'statement_type': 'financial',\n 'tag': 'revenueqoqgrowth',\n 'type': 'growth',\n 'unit': 'percentage'}\n {'id': 'tag_Dg2aoy',\n 'name': 'Revenue Growth',\n 'statement_code': 'calculations',\n 'statement_type': 'financial',\n 'tag': 'revenuegrowth',\n 'type': 'growth',\n 'unit': 'percentage'},\n {'id': 'tag_5ydVRX',\n 'name': 'Price to Revenue (P/Rev)',\n 'statement_code': 'calculations',\n 'statement_type': 'financial',\n 'tag': 'pricetorevenue',\n 'type': 'valuation',\n 'unit': 'multiple'},\n {'id': 'tag_Qz8Evy',\n 'name': 'Current Year Average Revenue Estimate',\n 'statement_code': 'zacks_sales_estimate',\n 'tag': 'current_yr_ave_revenue_est',\n 'type': 'sequenced_sales_est_stat',\n 'unit': 'usd'},\n {'id': 'tag_Eg4L4g',\n 'name': 'Next Year Average Revenue Estimate',\n 'statement_code': 'zacks_sales_estimate',\n 'tag': 'next_yr_ave_revenue_est',\n 'type': 'sequenced_sales_est_stat',\n 'unit': 'usd'},\n {id': 'tag_2z93Og',\n 'name': 'Price to Current Year Forecasted Revenue',\n 'statement_code': 'zacks_sales_estimate',\n 'tag': 'pricetocurrentyearrevenue',\n 'type': 'valuation',\n 'unit': 'multiple'},\n {\"id\": \"tag_qgeNXo\",\n \"name\": \"Weighted Average Basic Shares Outstanding\",\n \"tag\": \"weightedavebasicsharesos\",\n \"parent\": \"basiceps\",\n \"sequence\": 37,\n \"factor\": \"/\",\n \"balance\": null,\n \"unit\": \"shares\"\n },\n {\"id\": \"tag_0yb7gR\",\n \"name\": \"Weighted Average Diluted Shares Outstanding\",\n \"tag\": \"weightedavedilutedsharesos\",\n \"parent\": \"dilutedeps\",\n \"sequence\": 39,\n \"factor\": \"/\",\n \"balance\": null,\n \"unit\": \"shares\"\n }\n \"\"\"\n page_size = 20 # int | The number of results to return (optional) (default to 100)\n try:\n api_response = data_tag_api.search_data_tags(query, page_size=page_size)\n # pprint(api_response) # Debug\n return api_response\n except ApiException as e:\n print(\"Exception when calling CompanyApi->search_companies: %s\\n\" % e)\n return\n\n\ndef get_company_data_point_number(identifier, tag):\n try:\n api_response = company_api.get_company_data_point_number(identifier, tag)\n pprint(api_response)\n return api_response\n except ApiException as e:\n print(\"Exception\", str(identifier), str(tag))\n # print(\"Exception when calling CompanyApi->get_company_data_point_number: %s\\n\" % e)\n return\n","sub_path":"stock_screener/intrinio_functions.py","file_name":"intrinio_functions.py","file_ext":"py","file_size_in_byte":6111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"344186120","text":"from service.analyticService.core.analyticCore.classificationBase import classification\nfrom keras.models import Sequential\nfrom keras.layers import *\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as KTF\nfrom math import ceil\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\nclass in_oneLayerRNN(classification):\n def trainAlgo(self):\n self.customObj[\"tokenizer\"]=Tokenizer()\n self.customObj[\"tokenizer\"].fit_on_texts(self.inputData['X'].reshape(-1))\n word_index=self.customObj[\"tokenizer\"].word_index\n sequences=self.customObj[\"tokenizer\"].texts_to_sequences(self.inputData['X'].reshape(-1))\n x=pad_sequences(sequences,maxlen=self.param['max_seq_len'])\n self.model=Sequential()\n self.model.add(\n Embedding(len(word_index)+1,\n self.param['embed_dim'],\n input_length=self.param['max_seq_len'],\n trainable=True)\n )\n self.model.add(LSTM(self.param[\"LSTM_hidden_neuron\"],activation=self.param[\"LSTM_hidden_activation\"]))\n self.model.add(Dense(self.outputData['Y'].shape[1],activation='softmax'))\n self.model.compile(loss='categorical_crossentropy',optimizer=self.param['optimizer'])\n self.model.fit(\n x,\n self.outputData['Y'],\n batch_size=self.param['batch_size'],\n epochs=self.param['epochs']\n )\n def predictAlgo(self):\n # self.customObj[\"tokenizer\"]=Tokenizer()\n # self.customObj[\"tokenizer\"].fit_on_texts(self.inputData['X'].reshape(-1))\n word_index=self.customObj[\"tokenizer\"].word_index\n sequences=self.customObj[\"tokenizer\"].texts_to_sequences(self.inputData['X'].reshape(-1))\n x=pad_sequences(sequences,maxlen=self.param['max_seq_len'])\n r=self.model.predict(x)\n self.result['Y']=r\n","sub_path":"src/service/analyticService/core/analyticCore/nlp/classification/in_oneLayerRNN.py","file_name":"in_oneLayerRNN.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"339961686","text":"from tools import QueryScript\nfrom math import *\nfrom scipy.stats import binom\nfrom collections import Counter\nfrom datetime import date\nimport env\n\nclass Reprotoxicity:\n\n @staticmethod\n # retourne dict_nbr_days_exposition = {mp: nbrdays}\n def number_days_exposition(dict_pack):\n\n nature = 'reproduction'\n list_mp_repro = []\n for mp in dict_pack:\n try:\n dict_pack[mp][nature]\n except KeyError:\n pass\n else:\n list_mp_repro.append(mp)\n\n # Récupération des dates de début et de fin\n output_dates_debut = QueryScript(\n f\" SELECT measurepoint_id, date FROM {env.DATABASE_TREATED}.key_dates where date_id=1 and measurepoint_id IN {tuple(list_mp_repro) if len(list_mp_repro)>1 else '('+(str(list_mp_repro[0]) if len(list_mp_repro) else '0')+')'} and version= {env.CHOSEN_VERSION()};\"\n ).execute()\n output_dates_fin = QueryScript(\n f\" SELECT measurepoint_id, date FROM {env.DATABASE_TREATED}.key_dates where date_id=4 and measurepoint_id IN {tuple(list_mp_repro) if len(list_mp_repro)>1 else '('+(str(list_mp_repro[0]) if len(list_mp_repro) else '0')+')'} and version= {env.CHOSEN_VERSION()};\"\n ).execute()\n\n output_mp_debut = [x[0] for x in output_dates_debut]\n output_mp_fin = [x[0] for x in output_dates_fin]\n\n dict_dates_debut_fin = {} # {mp: [date_debut, date_fin]}\n for mp in list_mp_repro:\n try : \n idx_debut = output_mp_debut.index(mp)\n idx_fin = output_mp_fin.index(mp)\n dict_dates_debut_fin[mp] = [output_dates_debut[idx_debut][1], output_dates_fin[idx_fin][1]]\n except ValueError:\n dict_dates_debut_fin[mp] = [None, None]\n # Initialisation du dictionnaire de sortie\n dict_nbr_days_exposition = {mp: None for mp in dict_pack} # {mp: nbrdays}\n\n # Calcul\n for mp in list_mp_repro:\n [date_debut, date_fin] = dict_dates_debut_fin[mp]\n if date_debut is not None and date_fin is not None:\n if date_debut is None and date_fin is None:\n dict_nbr_days_exposition[mp] = \"NA\"\n else:\n date_fin_sans_heure = date(date_fin.year, date_fin.month, date_fin.day)\n date_debut_sans_heure = date(date_debut.year, date_debut.month, date_debut.day)\n nbrdays = (date_fin_sans_heure - date_debut_sans_heure).days\n dict_nbr_days_exposition[mp] = nbrdays\n else:\n dict_nbr_days_exposition[mp] = \"NA\"\n\n return dict_nbr_days_exposition\n\n @staticmethod\n # retourne dict_index_fecundity = {pack_id: {'list_molting_stage': [...], 'list_index_fecundity': [...]}\n def index_fecundity_female(list_pack_repro):\n SQL_request = f\" SELECT pack_id, female, molting_stage, embryo_stage, specimen_size_mm, specimen_size_px, embryo_total FROM {env.DATABASE_RAW}.MeasureReprotoxicity where pack_id IN {tuple(list_pack_repro) if len(list_pack_repro)>1 else '('+(str(list_pack_repro[0]) if len(list_pack_repro) else '0')+')'};\"\n output = QueryScript(SQL_request).execute()\n\n # Initialisation du dictionnaire de la requête mise en forme\n # {pack_id: {'px_to_mm': int, 'data': [[molting_stage, embryo_stage, specimen_size_mm, specimen_size_px, embryo_total], [...]] }}\n dict_result = {pack_id: {'px_to_mm': None, 'data': []} for pack_id in list_pack_repro}\n\n pack_errors = []\n\n for row in output:\n [pack_id, female, molting_stage, embryo_stage, specimen_size_mm, specimen_size_px, embryo_total] = row\n\n if female == '' or female == '0bis':\n continue\n\n if int(female) == 0: # Valeur étalon\n try:\n px_to_mm = specimen_size_mm/specimen_size_px\n except (TypeError, ZeroDivisionError):\n pack_errors.append(pack_id)\n continue\n dict_result[pack_id]['px_to_mm'] = px_to_mm\n elif female is not None: # Données à traiter ensuite\n data = [molting_stage, embryo_stage, specimen_size_mm, specimen_size_px, embryo_total]\n dict_result[pack_id]['data'].append(data)\n\n # Initialisation du dictionnaire de sortie\n dict_index_fecundity = {pack_id: {'list_molting_stage': [], 'list_index_fecundity': []} for pack_id in list_pack_repro}\n\n for pack_id in dict_result.keys():\n data = dict_result[pack_id]['data']\n px_to_mm = dict_result[pack_id]['px_to_mm']\n for row in data:\n [molting_stage, embryo_stage, specimen_size_mm, specimen_size_px, embryo_total] = row\n\n if len(row) == 0:\n dict_index_fecundity[pack_id]['list_index_fecundity'].append(0)\n else:\n dict_index_fecundity[pack_id]['list_molting_stage'].append(molting_stage)\n if embryo_stage in [2, 3, 4]:\n if embryo_total == 0:\n dict_index_fecundity[pack_id]['list_index_fecundity'].append(0)\n else:\n if specimen_size_mm is None or specimen_size_mm == 0:\n if specimen_size_px == 0:\n continue\n try:\n specimen_size_mm = specimen_size_px * px_to_mm\n except TypeError:\n pack_errors.append(pack_id)\n continue\n if specimen_size_mm == 0:\n pack_errors.append(pack_id)\n continue\n dict_index_fecundity[pack_id]['list_index_fecundity'].append(embryo_total/(specimen_size_mm-5))\n else:\n dict_index_fecundity[pack_id]['list_index_fecundity'].append(0)\n return dict_index_fecundity # {pack_id: {'list_molting_stage': [...], 'list_index_fecundity': [...]}\n\n @staticmethod\n # retourne dict_fecundity = {mp: {'nbr_femelles_analysées': int, 'nbr_femelles_concernées': int, 'fécondité_moyenne': float}}\n def fecundity(dict_pack):\n nature = 'reproduction'\n list_pack_repro = []\n list_mp_repro = []\n for mp in dict_pack:\n try:\n pack_id = dict_pack[mp][nature]\n except KeyError:\n pass\n else:\n list_mp_repro.append(mp)\n list_pack_repro.append(pack_id)\n\n dict_index_fecundity = Reprotoxicity.index_fecundity_female(list_pack_repro) # {pack_id: {'list_molting_stage': [...], 'list_index_fecundity': [...]}\n\n # Initialisation du dictionnaire de sortie\n # {mp: {{'nbr_femelles_analysées': int, 'nbr_femelles_concernées': int, 'fécondité_moyenne': float}}\n dict_fecundity = {mp: {'nbr_femelles_analysées': None, 'nbr_femelles_concernées': None, 'fécondité_moyenne': None} for mp in dict_pack.keys()}\n\n for pack_id in dict_index_fecundity.keys():\n list_index_fecundity_not_clean = dict_index_fecundity[pack_id]['list_index_fecundity']\n list_molting_stage_not_clean = dict_index_fecundity[pack_id]['list_molting_stage']\n\n list_index_fecundity = [x for x in list_index_fecundity_not_clean if x != 0]\n list_molting_stage = [x for x in list_molting_stage_not_clean if x is not None]\n\n mp = list_mp_repro[list_pack_repro.index(pack_id)]\n\n nbr_femelles_concernees = len(list_index_fecundity) - list_index_fecundity.count(0)\n dict_fecundity[mp]['nbr_femelles_concernées'] = nbr_femelles_concernees\n\n cpt_molting_stage = Counter(list_molting_stage)\n cpt_filtre = [cpt_molting_stage.get(molting_stage) for molting_stage in ['b', 'c1', 'c2', 'd1', 'd2']]\n\n nbr_femelles_analysees = 0\n for x in cpt_filtre:\n if x is not None:\n nbr_femelles_analysees += x\n\n if nbr_femelles_analysees == 0:\n dict_fecundity[mp]['nbr_femelles_analysées'] = 'NA'\n else:\n dict_fecundity[mp]['nbr_femelles_analysées'] = nbr_femelles_analysees\n\n if nbr_femelles_analysees >= 10 and len(list_index_fecundity) != 0:\n fecondite_moyenne = sum(list_index_fecundity)/len(list_index_fecundity)\n else:\n fecondite_moyenne = \"NA\"\n dict_fecundity[mp]['fécondité_moyenne'] = fecondite_moyenne\n\n return dict_fecundity\n\n @staticmethod\n # retourne dict_molting = {mp: {'cycle de mue': ..%, 'cycle de mue attendu': ..%, 'nb_femelles_retard': int}}\n def molting_cycle(dict_pack):\n nature = 'reproduction'\n list_pack_repro = []\n list_mp_repro = []\n for mp in dict_pack:\n try:\n pack_id = dict_pack[mp][nature]\n except KeyError:\n pass\n else:\n list_mp_repro.append(mp)\n list_pack_repro.append(pack_id)\n\n SQL_request = f\" SELECT pack_id, molting_stage FROM {env.DATABASE_RAW}.MeasureReprotoxicity where pack_id IN {tuple(list_pack_repro) if len(list_pack_repro)>1 else '('+(str(list_pack_repro[0]) if len(list_pack_repro) else '0')+')'};\"\n SQL_request_2 = f\" SELECT measurepoint_id, expected_C2,expected_D2 FROM {env.DATABASE_TREATED}.temperature_repro where measurepoint_id IN {tuple(list_mp_repro) if len(list_mp_repro)>1 else '('+(str(list_mp_repro[0]) if len(list_mp_repro) else '0')+')'};\"\n resultat_molting_stage = QueryScript(SQL_request).execute()\n resultat_expected_stage = QueryScript(SQL_request_2).execute()\n\n dict_molting_stage = {pack_id: [] for pack_id in list_pack_repro}\n for row in resultat_molting_stage:\n [pack_id, molting_stage] = row\n dict_molting_stage[pack_id].append(molting_stage)\n\n dict_expected_stage = {mp: {'expected C2': None, 'expected D2': None} for mp in list_mp_repro}\n for row in resultat_expected_stage:\n [measurepoint_id, expected_C2, expected_D2] = row\n dict_expected_stage[measurepoint_id]['expected C2'] = expected_C2\n dict_expected_stage[measurepoint_id]['expected D2'] = expected_D2\n\n # Initialisation du dictionnaire de sortie\n dict_molting = {mp: {'cycle de mue': None, 'cycle de mue attendu': None, 'nb_femelles_retard': None} for mp in dict_pack.keys()}\n\n # Remplissage du dictionnaire de sortie\n for i, mp in enumerate(list_mp_repro):\n pack_id = list_pack_repro[i]\n expected_C2 = dict_expected_stage[mp]['expected C2']\n expected_D2 = dict_expected_stage[mp]['expected D2']\n dict_molting[mp]['cycle de mue attendu'] = expected_C2 if (expected_C2 == 'NA' or expected_C2 is None) else round(expected_C2-expected_D2)\n\n list_molting_stage = dict_molting_stage[pack_id]\n cpt_molting_stage = Counter(list_molting_stage)\n cpt_analysees = [cpt_molting_stage.get(molting_stage) for molting_stage in ['b', 'c1', 'c2', 'd1', 'd2']]\n cpt_c2_d1 = [cpt_molting_stage.get(molting_stage) for molting_stage in ['c2', 'd1']]\n\n nbr_femelles_analysees = 0\n for x in cpt_analysees:\n if x is not None:\n nbr_femelles_analysees += x\n\n nbr_femelles_c2_d1 = 0\n for x in cpt_c2_d1:\n if x is not None:\n nbr_femelles_c2_d1 += x\n\n if nbr_femelles_analysees == 0:\n molting_percent = 'NA'\n else:\n molting_percent = nbr_femelles_c2_d1/nbr_femelles_analysees\n\n dict_molting[mp]['cycle de mue'] = molting_percent if (molting_percent == 'NA' or molting_percent is None) else round(molting_percent*100)\n dict_molting[mp]['nb_femelles_retard'] = nbr_femelles_c2_d1\n\n return dict_molting # {mp: {'cycle de mue': ..%, 'cycle de mue attendu': ..%, 'nb_femelles_retard': int}}\n\n @staticmethod\n # retourne dict_surface_femelles_concernees, dict_surface_des_retards\n # dict_surface_femelles_concernees = {mp: nbr_femelles_concernees}\n # dict_surface_des_retards = {pack_id: [oocyte_area_mm, ...]}\n def number_female_concerned_area(dict_pack):\n nature = 'reproduction'\n list_pack_repro = []\n list_mp_repro = []\n for mp in dict_pack:\n try:\n pack_id = dict_pack[mp][nature]\n except KeyError:\n pass\n else:\n list_mp_repro.append(mp)\n list_pack_repro.append(pack_id)\n\n output = QueryScript(\n f\" SELECT pack_id, female, molting_stage, oocyte_area_pixel, oocyte_area_mm FROM {env.DATABASE_RAW}.MeasureReprotoxicity WHERE pack_id IN {tuple(list_pack_repro) if len(list_pack_repro)>1 else '('+(str(list_pack_repro[0]) if len(list_pack_repro) else '0')+')'};\"\n ).execute()\n\n # Reformatage des données de la requête\n dict_surface_ovocytaire = {pack_id: {'px_to_mm': None, 'data': []} for pack_id in list_pack_repro}\n\n for row in output:\n [pack_id, female, molting_stage, oocyte_area_pixel, oocyte_area_mm] = row\n\n if female == '' or female == '0bis':\n continue\n\n if int(female) == 0:\n try:\n dict_surface_ovocytaire[pack_id]['px_to_mm'] = oocyte_area_pixel / (oocyte_area_mm * 97.82) # Formule écrite en dure dans l'excel contenant les macros\n except TypeError:\n pass\n else:\n data = [molting_stage, oocyte_area_pixel, oocyte_area_mm]\n dict_surface_ovocytaire[pack_id]['data'].append(data)\n\n # Calcul des surfaces des retards\n dict_surface_des_retards = {pack_id: [] for pack_id in list_pack_repro}\n\n for pack_id in dict_surface_ovocytaire.keys():\n px_to_mm = dict_surface_ovocytaire[pack_id]['px_to_mm']\n data = dict_surface_ovocytaire[pack_id]['data']\n\n if len(data) == 0:\n continue\n\n else:\n for [molting_stage, oocyte_area_pixel, oocyte_area_mm] in data:\n if oocyte_area_mm is not None:\n surface_retard = oocyte_area_mm\n dict_surface_des_retards[pack_id].append(surface_retard)\n continue\n\n if molting_stage in ['c1', 'b'] and oocyte_area_pixel is not None:\n if px_to_mm is None:\n continue\n surface_retard = oocyte_area_pixel * px_to_mm\n dict_surface_des_retards[pack_id].append(surface_retard)\n else:\n continue\n\n # Calcul nbr_femelles_concernées\n dict_surface_femelles_concernees = {mp: 0 for mp in dict_pack.keys()}\n\n for mp in dict_surface_femelles_concernees.keys():\n if mp not in list_mp_repro:\n continue\n else:\n pack_id = dict_pack[mp]['reproduction']\n nbr_femelles_concernees = len(dict_surface_des_retards[pack_id])\n dict_surface_femelles_concernees[mp] = nbr_femelles_concernees\n\n return dict_surface_femelles_concernees, dict_surface_des_retards\n\n @staticmethod\n # Identique à la fonction LOI.BINOMIALE.INVERSE() de Excel\n def binom_inv(n, p, s):\n for k in range(n+1):\n if binom.cdf(k, n, p) > s:\n return k\n if k == n:\n return None\n\n @staticmethod\n # retourne dict_conform_resultat_mue = {pack_id: 'NA', 'Retard fort', 'Retard modéré' ou 'Conforme'}\n def conform_resultat_mue(dict_pack):\n\n nature = 'reproduction'\n list_pack_repro = []\n list_mp_repro = []\n for mp in dict_pack:\n try:\n pack_id = dict_pack[mp][nature]\n except KeyError:\n pass\n else:\n list_mp_repro.append(mp)\n list_pack_repro.append(pack_id)\n\n # Récupération du nombre de retard et du nombre de femelles analysées\n output_molting = QueryScript(\n f\" SELECT pack_id, molting_stage FROM {env.DATABASE_RAW}.MeasureReprotoxicity where pack_id IN {tuple(list_pack_repro) if len(list_pack_repro)>1 else '('+(str(list_pack_repro[0]) if len(list_pack_repro) else '0')+')'};\"\n ).execute()\n\n dict_molting_stage = {pack_id: [] for pack_id in list_pack_repro}\n for row in output_molting:\n [pack_id, molting_stage] = row\n dict_molting_stage[pack_id].append(molting_stage)\n\n dict_nombre_femelles = {pack_id: {'nbr_retards': 0, 'nbr_analysées': 0} for pack_id in list_pack_repro}\n for pack_id in list_pack_repro:\n list_molting_stage = dict_molting_stage[pack_id]\n cpt_molting_stage = Counter(list_molting_stage)\n cpt_analysees = [cpt_molting_stage.get(molting_stage) for molting_stage in ['b', 'c1', 'c2', 'd1', 'd2']]\n cpt_retards = [cpt_molting_stage.get(molting_stage) for molting_stage in ['b', 'c1']]\n\n nbr_analysees = sum([x for x in cpt_analysees if x is not None])\n nbr_retards = sum([x for x in cpt_retards if x is not None])\n\n dict_nombre_femelles[pack_id]['nbr_retards'] = nbr_retards\n dict_nombre_femelles[pack_id]['nbr_analysées'] = nbr_analysees\n\n ## Calcul des valeurs de test unilatéral\n # Récupération du pourcentage attendu en B/C1\n output_expected = QueryScript(\n f\" SELECT measurepoint_id, expected_C2 FROM {env.DATABASE_TREATED}.temperature_repro WHERE measurepoint_id IN {tuple(list_mp_repro) if len(list_mp_repro)>1 else '('+(str(list_mp_repro[0]) if len(list_mp_repro) else '0')+')'} and version= {env.CHOSEN_VERSION()};\"\n ).execute()\n dict_expected_BC1 = {pack_id: 0 for pack_id in list_pack_repro}\n\n for row in output_expected:\n [mp, expected_C2] = row\n pack_id = dict_pack[mp]['reproduction']\n dict_expected_BC1[pack_id] = 100-expected_C2\n\n # Récupération des seuils de référence\n output_reference = QueryScript(\n f\" SELECT name, value FROM {env.DATABASE_TREATED}.r2_constant WHERE name IN ('Risque 1 Mue', 'Risque 2 Mue') and version= {env.CHOSEN_VERSION()};\"\n ).execute()\n for row in output_reference:\n [name, value] = row\n if name == 'Risque 1 Mue':\n seuil_test_5percent = value\n else:\n seuil_test_1percent = value\n\n # Calcul des valeurs de test unilatéral\n dict_test_unilateral = {pack_id: {'test_5percent': 0, 'test_1percent': 0} for pack_id in list_pack_repro}\n\n for pack_id in dict_test_unilateral.keys():\n nbr_analysees = dict_nombre_femelles[pack_id]['nbr_analysées']\n expected_BC1 = dict_expected_BC1[pack_id]\n\n test_5percent = Reprotoxicity.binom_inv(nbr_analysees, expected_BC1/100, seuil_test_5percent)\n test_1percent = Reprotoxicity.binom_inv(nbr_analysees, expected_BC1/100, seuil_test_1percent)\n\n dict_test_unilateral[pack_id]['test_5percent'] = test_5percent\n dict_test_unilateral[pack_id]['test_1percent'] = test_1percent\n\n ## Calcul de la conformité des mues\n dict_conform_resultat_mue = {mp: \"NA\" for mp in dict_pack.keys()}\n\n for pack_id in list_pack_repro:\n mp = list_mp_repro[list_pack_repro.index(pack_id)]\n nbr_analysees = dict_nombre_femelles[pack_id]['nbr_analysées']\n if nbr_analysees < 10:\n dict_conform_resultat_mue[mp] = \"NA\"\n continue\n\n nbr_retards = dict_nombre_femelles[pack_id]['nbr_retards']\n test_5percent = dict_test_unilateral[pack_id]['test_5percent']\n test_1percent = dict_test_unilateral[pack_id]['test_1percent']\n if nbr_retards > test_5percent:\n dict_conform_resultat_mue[mp] = \"Retard fort\"\n elif nbr_retards > test_1percent:\n dict_conform_resultat_mue[mp] = \"Retard modéré\"\n else:\n dict_conform_resultat_mue[mp] = \"Conforme\"\n\n return dict_conform_resultat_mue\n\n @staticmethod\n # retourne dict_conform_surface_retard = {pack_id: 'NA', 'PE', 'Conforme BC1' ou 'Conforme'}\n def conform_surface_retard(dict_pack, dict_surface_femelles_concernees, dict_surface_des_retards, dict_fecundity):\n nature = 'reproduction'\n list_pack_repro = []\n list_mp_repro = []\n for mp in dict_pack:\n try:\n pack_id = dict_pack[mp][nature]\n except KeyError:\n pass\n else:\n list_mp_repro.append(mp)\n list_pack_repro.append(pack_id)\n\n ## Surface moyenne des retards\n dict_surface_moyenne_retards = {mp: None for mp in dict_pack.keys()}\n for pack_id in list_pack_repro:\n mp = list_mp_repro[list_pack_repro.index(pack_id)]\n nbr_analysees = dict_fecundity[mp]['nbr_femelles_analysées']\n list_surface_retards = dict_surface_des_retards[pack_id]\n if nbr_analysees == 'NA':\n continue\n if nbr_analysees >= 10:\n try:\n dict_surface_moyenne_retards[mp] = sum(list_surface_retards)/len(list_surface_retards)\n except ZeroDivisionError:\n pass\n\n\n\n ## Seuil unilatéral 5%\n # Récupération des références\n names = ['Constante surface des retards 1', 'Moyenne des surfaces de référence C2', 'SD des surfaces de référence C2']\n\n output_ref = QueryScript(\n f\" SELECT name, value FROM {env.DATABASE_TREATED}.r2_constant WHERE name IN {tuple(names) if len(names)>1 else '('+(str(names[0]) if len(names) else '0')+')'} and version= {env.CHOSEN_VERSION()};\"\n ).execute()\n for row in output_ref:\n [name, value] = row\n if name == 'Constante surface des retards 1':\n cst_surface_des_retards = value\n if name == 'Moyenne des surfaces de référence C2':\n moyenne_surface_refC2 = value\n if name == 'SD des surfaces de référence C2':\n SD_surface_refC2 = value\n\n # Calcul des seuils\n dict_seuil_unilateral_5percent = {pack_id: None for pack_id in list_pack_repro}\n for pack_id in list_pack_repro:\n mp = list_mp_repro[list_pack_repro.index(pack_id)]\n nbr_concernees = dict_surface_femelles_concernees[mp]\n try:\n seuil_5percent = moyenne_surface_refC2 - cst_surface_des_retards * SD_surface_refC2 / sqrt(nbr_concernees)\n except ZeroDivisionError:\n pass\n else:\n dict_seuil_unilateral_5percent[pack_id] = seuil_5percent\n\n ## Calcul de la conformité des surfaces des retards\n # Récupération de données\n dict_conform_resultat_mue = Reprotoxicity.conform_resultat_mue(dict_pack)\n\n # Initialisation du dictionnaire de sortie\n dict_conform_surface_retard = {mp: \"NA\" for mp in dict_pack.keys()}\n\n for pack_id in list_pack_repro:\n mp = list_mp_repro[list_pack_repro.index(pack_id)]\n nbr_analysees = dict_fecundity[mp]['nbr_femelles_analysées']\n\n if nbr_analysees == 'NA' or nbr_analysees < 10:\n continue\n\n conform_mue = dict_conform_resultat_mue[mp]\n if conform_mue == \"Retard fort\" or conform_mue == \"Retard modéré\":\n surface_moyenne_retards = dict_surface_moyenne_retards[mp]\n seuil_5percent = dict_seuil_unilateral_5percent[pack_id]\n if surface_moyenne_retards is None or seuil_5percent is None:\n continue\n if surface_moyenne_retards > seuil_5percent:\n dict_conform_surface_retard[mp] = \"PE\"\n else:\n dict_conform_surface_retard[mp] = \"Conforme BC1\"\n else:\n dict_conform_surface_retard[mp] = \"Conforme\"\n\n return dict_conform_surface_retard, dict_surface_moyenne_retards\n\n @staticmethod\n # retourne dict_perturbation_endocrinienne = {mp: 'NA' ou moyenne des surfaces des retards}\n def perturbation_endocrinienne(dict_pack, dict_surface_femelles_concernees, dict_surface_des_retards, dict_fecundity):\n nature = 'reproduction'\n list_pack_repro = []\n list_mp_repro = []\n for mp in dict_pack:\n try:\n pack_id = dict_pack[mp][nature]\n except KeyError:\n pass\n else:\n list_mp_repro.append(mp)\n list_pack_repro.append(pack_id)\n\n # Récupération de données\n dict_conform_surface_retard, dict_surface_moyenne_retards = Reprotoxicity.conform_surface_retard(dict_pack, dict_surface_femelles_concernees, dict_surface_des_retards, dict_fecundity)\n\n # Initialisation du dictionnaire de sortie\n dict_perturbation_endocrinienne = {mp: \"NA\" for mp in dict_pack.keys()}\n\n for mp in dict_pack.keys():\n if mp not in list_mp_repro:\n continue\n conform_surface = dict_conform_surface_retard[mp]\n moyenne_surface = dict_surface_moyenne_retards[mp]\n\n if conform_surface == \"Conforme\":\n continue\n else:\n dict_perturbation_endocrinienne[mp] = moyenne_surface\n\n return dict_perturbation_endocrinienne\n\n\n \n \n\n\n\n\n \n \n\n\n \n\n\n\n\n\n\n \n \n \n\n\n\n\n\n\n\n\n\n\n \n \n\n\n\n \n \n \n \n\n \n","sub_path":"calcul/toxicity/reprotoxicity.py","file_name":"reprotoxicity.py","file_ext":"py","file_size_in_byte":26198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"496699657","text":"\"Test undumping a substantial Anubis dump into jsondocdb.\"\n\nimport json\nimport os.path\nimport random\nimport tarfile\nimport time\n\nimport jsondocdb\n\nimport tqdm\n\n\ndef create_indexes(db):\n require_call = {\"==\": [{\"var\": \"doctype\"}, \"call\"]}\n db.index(\"call_identifier\", \"identifier\", require=require_call)\n db.index(\"call_closes\", \"closes\", require=require_call)\n db.index(\"call_opens\", \"opens\", require=require_call)\n db.index(\"call_owner\", \"owner\", require=require_call)\n require_proposal = {\"==\": [{\"var\": \"doctype\"}, \"proposal\"]}\n db.index(\"proposal_identifier\", \"identifier\", require=require_proposal)\n db.index(\"proposal_call\", \"call\", require=require_proposal)\n db.index(\"proposal_user\", \"user\", require=require_proposal)\n require_review = {\"==\": [{\"var\": \"doctype\"}, \"review\"]}\n db.index(\"review_call\", \"call\", require=require_review)\n db.index(\"review_proposal\", \"proposal\", require=require_review)\n db.index(\"review_reviewer\", \"reviewer\", require=require_review)\n require_decision = {\"==\": [{\"var\": \"doctype\"}, \"decision\"]}\n db.index(\"decision_call\", \"call\", require=require_decision)\n db.index(\"decision_proposal\", \"proposal\", require=require_decision)\n require_grant = {\"==\": [{\"var\": \"doctype\"}, \"grant\"]}\n db.index(\"grant_identifier\", \"identifier\", require=require_grant)\n db.index(\"grant_call\", \"call\", require=require_grant)\n db.index(\"grant_proposal\", \"proposal\", require=require_grant)\n db.index(\"grant_user\", \"user\", require=require_grant)\n require_user = {\"==\": [{\"var\": \"doctype\"}, \"user\"]}\n db.index(\"user_username\", \"username\", require=require_user)\n db.index(\"user_email\", \"email\", require=require_user)\n db.index(\"user_orcid\", \"orcid\", require=require_user)\n db.index(\"user_role\", \"role\", require=require_user)\n db.index(\"user_status\", \"status\", require=require_user)\n db.index(\"user_last_login\", \"last_login\", require=require_user)\n\ndef undump(filepath, db):\n \"\"\"Load the `tar` file given by the path into the database.\n It must have been produced by `db.dump`.\n\n Returns a tuple `(ndocs, nfiles)` giving the number of documents\n and attached files read from the file.\n\n NOTE: The documents are just added to the database, ignoring any\n `_rev` items. This means that no document with the same identifier\n must exist in the database.\n \"\"\"\n ndocs = 0\n nfiles = 0\n atts = dict()\n with tarfile.open(filepath, mode=\"r\") as infile:\n total = sum(1 for member in infile if member.isreg())\n with tarfile.open(filepath, mode=\"r\") as infile:\n iterator = tqdm.tqdm(infile, total=total)\n for item in iterator:\n itemfile = infile.extractfile(item)\n itemdata = itemfile.read()\n itemfile.close()\n if item.name in atts:\n # An attachment follows its document.\n a = atts.pop(item.name)\n with db:\n db.attachments(doc[\"_id\"]).put(a[\"filename\"], itemdata, a[\"content_type\"])\n nfiles += 1\n else:\n doc = json.loads(itemdata.decode(\"utf-8\"))\n doc.pop(\"_rev\", None)\n atts = doc.pop(\"_attachments\", dict())\n with db:\n db[doc[\"_id\"]] = doc\n ndocs += 1\n for attname, attinfo in list(atts.items()):\n key = u\"{}_att/{}\".format(doc[\"_id\"], attname)\n atts[key] = dict(filename=attname,\n content_type=attinfo[\"content_type\"])\n return (ndocs, nfiles)\n\n\nif __name__ == \"__main__\":\n dbfilepath = \"dump.db\"\n db = jsondocdb.Database(dbfilepath)\n if len(db) == 0:\n time0 = time.perf_counter()\n print(undump(\"anubis_dump_2023-01-17.tar.gz\", db))\n print(time.perf_counter() - time0, \"seconds\")\n time0 = time.perf_counter()\n create_indexes(db)\n print(time.perf_counter() - time0, \"seconds\")\n print(db)\n identifiers = list(db)\n time0 = time.perf_counter()\n for identifier in random.sample(identifiers, 10000):\n doc = db[identifier]\n a = db.attachments(identifier)\n if a:\n atts = list(a.items())\n print(time.perf_counter() - time0, \"seconds\")\n","sub_path":"undump.py","file_name":"undump.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"641858560","text":"from django.conf.urls import patterns, url\nfrom search import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='search'),\n url(r'^aol$', views.aol, name='aol'),\n url(r'^spoe$', views.spoe, name='spoe'),\n url(r'^postcode$', views.postcode, name='postcode'),\n url(r'^results$', views.results, name='results'),\n\n url(r'^address$', views.address, name='address'),\n\n url(r'^results.json$', views.results_json, name='api-results'),\n url(r'^datastatus$', views.data_status, name='data-status'),\n)\n","sub_path":"courtfinder/search/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"631727615","text":"import numpy as np\nfrom pylab import *\nimport urllib2\nimport os\n\nclass readsounding:\n def __init__(self,loc,year,month,day,time):\n # path to sounding (remote)\n path = 'http://weather.uwyo.edu/cgi-bin/sounding?region=europe&TYPE=TEXT:LIST&YEAR=%04i&MONTH=%02i&FROM=%02i%02i&TO=%02i%02i&STNM=%05i'%(year, month, day, time, day, time, loc)\n\n try: \n resp = urllib2.urlopen(path)\n self.success = True\n except: \n self.success = False\n \n if(self.success): \n self.p = []\n self.z = []\n self.T = []\n self.Td = []\n self.RH = []\n self.q = []\n self.V = []\n self.Vd = []\n\n # Holy @#$@$# how ugly\n sound = resp.read().split('\\n')\n skipped = 0\n for i in range(10,len(sound)):\n if(len(sound[i].split())==11):\n try:\n vals = sound[i].split()\n self.p.append(float(vals[0]))\n self.z.append(float(vals[1]))\n self.T.append(float(vals[2]))\n self.Td.append(float(vals[3]))\n self.RH.append(float(vals[4]))\n self.q.append(float(vals[5]))\n self.V.append(float(vals[7]))\n self.Vd.append(float(vals[6]))\n except ValueError:\n skipped += 1\n #print('Skipped %2i lines in reading sounding'%skipped)\n\n self.p = np.array(self.p )\n self.z = np.array(self.z )\n self.T = np.array(self.T )\n self.Td = np.array(self.Td)\n self.RH = np.array(self.RH)\n self.q = np.array(self.q )\n self.V = np.array(self.V )\n self.Vd = np.array(self.Vd)\n\n#d = readsounding(10410,2008,12,12,00)\n","sub_path":"tools/readsounding.py","file_name":"readsounding.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"321962040","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\n\nimport hdf5storage\nimport pandas as pd\n\n\ndataname='alldata_1316_fixed.mat'\nmat = hdf5storage.loadmat(dataname)\npdata = pd.DataFrame(mat['Data'],columns = ['DATE','PERMNO','VOL','SHROUT','retadj','LME','ret','prca','OPENPRC','divamt','facpra','facshr','SHRCD','EXCHCD','DLRET','DLPRC','DISTCD','PERMCO','weight_port','lprc','ME','ticker_idx','SIZEPORT','BTMPORT','OPPORT','INVPORT','RRGRP'])\n# startdate = 20150901\n# enddate = 20150901\n# ind=(pdata['DATE'] >= startdate) & (pdata['DATE'] <= enddate)\nDataPc=pdata\nentry=[1,2,19,8,5,10,11,12,6,15,16,17,20,21,23,25,9]\n#entry = [1,2,19,8,5,9,10,11,12,6,15,16,17,20,21,22,23,24,25,26,27]\nentryew=[i-1 for i in entry]\nDataPcnew=DataPc.iloc[:,entryew].copy()\n#DataPcnew.columns.values.tolist()\nDataPcnew['prca']=DataPcnew['prca'].abs()\nDataPcnew['lprc']=DataPcnew['lprc'].abs()\n\n\n# In[11]:\n\n\n\na = DataPcnew['DATE'].astype(str).copy()\na =a.map(lambda x:x[0:4]+'-'+x[4:6]+'-'+x[6:8])\nDataPcnew['datadate']=pd.to_datetime(a)\nDataPcnew\n\n\n# In[8]:\n\n\nimport numpy as np\nDataPcnew['nonmissport']=np.where((DataPcnew['OPPORT']!=''), 1, 0)\n\n\n# In[12]:\n\n\ndef sz_bucket(row):\n if row['SIZEPORT']==np.nan:\n value=''\n elif row['SIZEPORT']==1:\n value='S'\n else:\n value='B'\n return value\n\ndef rw_bucket(row):\n if row['OPPORT']==1:\n value = 'W'\n elif row['OPPORT']==2:\n value='M'\n elif row['OPPORT']==3:\n value='R'\n else:\n value=''\n return value\n\n# assign size portfolio\nDataPcnew['szport']=np.where((DataPcnew['ME']>0), DataPcnew.apply(sz_bucket, axis=1), '')\n# assign book-to-market portfolio\nDataPcnew['rwport']=np.where((DataPcnew['ME']>0), DataPcnew.apply(rw_bucket, axis=1), '')\n\n\n# In[15]:\n\n\n############################\n# Form Fama French Factors #\n############################\n\n# function to calculate value weighted return\ndef wavg(group, avg_name, weight_name):\n d = group[avg_name]\n w = group[weight_name]\n try:\n return (d * w).sum() / w.sum()\n except ZeroDivisionError:\n return np.nan\n\n# value-weigthed return\nvwret=DataPcnew.groupby(['datadate','szport','rwport']).apply(wavg, 'retadj','weight_port').to_frame().reset_index().rename(columns={0: 'vwret'})\nvwret['sbport']=vwret['szport']+vwret['rwport']\n\n# # firm count\n# vwret_n=ccm4.groupby(['jdate','szport','rwport'])['retadj'].count().reset_index().rename(columns={'retadj':'n_firms'})\n# vwret_n['sbport']=vwret_n['szport']+vwret_n['rwport']\n\n# tranpose\nff_factors=vwret.pivot(index='datadate', columns='sbport', values='vwret').reset_index()\n# ff_nfirms=vwret_n.pivot(index='jdate', columns='sbport', values='n_firms').reset_index()\n\n# create SMB and HML factors\nff_factors['WW']=(ff_factors['BW']+ff_factors['SW'])/2\nff_factors['WR']=(ff_factors['BR']+ff_factors['SR'])/2\nff_factors['matRMW'] = ff_factors['WR']-ff_factors['WW']\n\n# ff_factors['WB']=(ff_factors['BW']+ff_factors['BM']+ff_factors['BR'])/3\n# ff_factors['WS']=(ff_factors['SW']+ff_factors['SM']+ff_factors['SR'])/3\n# ff_factors['WSMB'] = ff_factors['WS']-ff_factors['WB']\nff_factors=ff_factors.rename(columns={'datadate':'date'})\n\n\n# In[16]:\n\n\nff_factors\n\n\n# In[20]:\n\n\nimport matplotlib.pyplot as plt\nCSV_FILE_PATH = 'F:\\\\RA_Fama_French_Factor\\\\five_factor_model\\\\SIZE_RMW\\\\FF_Model_RMW8018.xlsx'\npydata = pd.read_excel(CSV_FILE_PATH, index_col=0)\n\n\n# In[23]:\n\n\nmat=ff_factors[['date','matRMW']]\n\n\n# In[27]:\n\n\nstart=pd.to_datetime('2016-12-30')\nend=pd.to_datetime('2013-07-01')\npy=pydata[(pydata.date<=start) & (pydata.date>= end)]\npy=py[['date','WRMW']].reset_index(drop='true')\n\n\n# In[29]:\n\n\ncomparison = pd.merge(mat,py,how='inner',on='date')\ncomparison\n\n\n# In[36]:\n\n\n\nimport pandas_datareader.data as web # module for reading datasets directly from the web\n#pip install pandas-datareader (in case you haven't install this package)\nfrom pandas_datareader.famafrench import get_available_datasets\nimport pickleshare\nimport pandas as pd\nfrom pandas.core.frame import DataFrame\nimport numpy as np\nimport datetime as dt\nimport psycopg2 \nimport matplotlib.pyplot as plt\nfrom dateutil.relativedelta import *\nfrom pandas.tseries.offsets import *\nfrom scipy import stats\nfrom matplotlib.backends.backend_pdf import PdfPages\nfactor='matRMW'\nwfactor='WRMW'\nprint(stats.pearsonr(comparison[factor], comparison[wfactor]))\n\n\n# In[37]:\n\n\n\npdf1=plt.figure(figsize=(12,4)) \nplt.suptitle(\"Comparison of Results\", fontsize=14)\nplt.ylabel(\"Return\")\nplt.title(factor)\nplt.plot(comparison['date'],comparison[factor],label = factor,color='red')\nplt.plot(comparison['date'],comparison[wfactor], label = 'py_'+wfactor,color='blue')\nplt.legend(loc=\"best\")\n\n\n# In[38]:\n\n\npp = PdfPages(factor+\".pdf\")\npp.savefig(pdf1)\npp.close()\n\n","sub_path":"five_factor_model/HF_Package/Functions/Python_realization/Daily_mat_py_rmw.py","file_name":"Daily_mat_py_rmw.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"402552804","text":"import simpleaudio as sa\nimport numpy as np\n\n# filename = 'testfile.wav'\n#wave_obj = sa.WaveObject.from_wave_file(filename)\n#play_obj = wave_obj.play()\n#play_obj.wait_done() # Wait until sound has finished playing\n\nfrequency = 440 # Our played note will be 440 Hz\nfs = 44100 # 44100 samples per second\nseconds = 3 # Note duration of 3 seconds\n\n# Generate array with seconds*sample_rate steps, ranging between 0 and seconds\nt = np.linspace(0, seconds, seconds * fs, False)\n\n# Generate a 440 Hz sine wave\nnote = np.sin(frequency * t * 2 * np.pi)\n\n# Ensure that highest value is in 16-bit range\naudio = note * (2**15 - 1) / np.max(np.abs(note))\n# Convert to 16-bit data\naudio = audio.astype(np.int16)\n\n# Start playback\nplay_obj = sa.play_buffer(audio, 1, 2, fs)\n\n# Wait for playback to finish before exiting\nplay_obj.wait_done()\n","sub_path":"simplesound_test.py","file_name":"simplesound_test.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"400879763","text":"'''\n Copyright 2015 University of Auckland\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n'''\nimport json\n\nfrom opencmiss.neon.core.neonsceneviewer import NeonSceneviewer\nfrom opencmiss.neon.settings import mainsettings\nfrom opencmiss.neon.core.neonregion import NeonRegion\nfrom opencmiss.neon.core.neonspectrums import NeonSpectrums\nfrom opencmiss.neon.core.neontessellations import NeonTessellations\nfrom opencmiss.zinc.context import Context\nfrom opencmiss.zinc.material import Material\nfrom opencmiss.neon.core.misc.neonerror import NeonError\nfrom opencmiss.neon.core.neonlogger import NeonLogger\nfrom opencmiss.neon.core.neonproject import NeonProject\n\n\nclass NeonDocument(object):\n\n def __init__(self):\n self._project = None\n self._zincContext = None\n self._rootRegion = None\n self._spectrums = None\n self._tessellations = None\n self._sceneviewer = None\n\n def initialiseVisualisationContents(self):\n self._zincContext = Context(\"Neon\")\n\n sceneviewermodule = self._zincContext.getSceneviewermodule()\n sceneviewermodule.setDefaultBackgroundColourRGB([1.0, 1.0, 1.0])\n\n # set up standard materials and glyphs\n materialmodule = self._zincContext.getMaterialmodule()\n materialmodule.beginChange()\n materialmodule.defineStandardMaterials()\n # make default material black\n defaultMaterial = materialmodule.getDefaultMaterial()\n defaultMaterial.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [0.0, 0.0, 0.0])\n defaultMaterial.setAttributeReal3(Material.ATTRIBUTE_DIFFUSE, [0.0, 0.0, 0.0])\n # still want surfaces to default to white material\n white = materialmodule.findMaterialByName(\"white\")\n materialmodule.setDefaultSurfaceMaterial(white)\n materialmodule.endChange()\n glyphmodule = self._zincContext.getGlyphmodule()\n glyphmodule.defineStandardGlyphs()\n\n zincRootRegion = self._zincContext.getDefaultRegion()\n self._rootRegion = NeonRegion(name=None, zincRegion=zincRootRegion, parent=None)\n self._rootRegion.connectRegionChange(self._regionChange)\n\n self._spectrums = NeonSpectrums(self._zincContext)\n self._tessellations = NeonTessellations(self._zincContext)\n self._sceneviewer = NeonSceneviewer(self._zincContext)\n NeonLogger.setZincContext(self._zincContext)\n\n def freeVisualisationContents(self):\n \"\"\"\n Deletes subobjects of document to help free memory held by Zinc objects earlier.\n \"\"\"\n self._rootRegion.freeContents()\n del self._sceneviewer\n del self._tessellations\n del self._spectrums\n del self._rootRegion\n del self._zincContext\n\n def initialiseProject(self):\n self._project = NeonProject()\n\n def freeProject(self):\n self._project = None\n\n def _regionChange(self, changedRegion, treeChange):\n \"\"\"\n If root region has changed, set its new Zinc region as Zinc context's default region.\n :param changedRegion: The top region changed\n :param treeChange: True if structure of tree, or zinc objects reconstructed\n \"\"\"\n if treeChange and (changedRegion is self._rootRegion):\n zincRootRegion = changedRegion.getZincRegion()\n self._zincContext.setDefaultRegion(zincRootRegion)\n\n def deserialize(self, state):\n '''\n :param state: string serialisation of Neon JSON document\n '''\n d = json.loads(state)\n if not ((\"OpenCMISS-Neon Version\" in d) and (\"RootRegion\" in d)):\n raise NeonError(\"Invalid Neon file\")\n neon_version = d[\"OpenCMISS-Neon Version\"]\n if neon_version > mainsettings.VERSION_LIST:\n raise NeonError(\"File version is greater than this version of Neon (\" + mainsettings.VERSION_STRING + \"). Please update your Neon application.\")\n # Ideally would enclose following in:\n # try: zincRegion.beginHierarchicalChange() ... finally: zincRegion.endHierarchicalChange()\n # Can't do this due to Zinc issue 3924 which prevents computed field wrappers being created, so graphics can't find fields\n if \"Project\" in d:\n self._project.deserialize(d[\"Project\"])\n if \"Tessellations\" in d:\n self._tessellations.deserialize(d[\"Tessellations\"])\n if \"Spectrums\" in d:\n self._spectrums.deserialize(d[\"Spectrums\"])\n if \"Sceneviewer\" in d:\n self._sceneviewer.deserialize(d[\"Sceneviewer\"])\n self._rootRegion.deserialize(d[\"RootRegion\"])\n if neon_version == '0.1.0':\n self._problem.setName('Generic')\n\n def serialize(self, basePath=None):\n dictOutput = {}\n dictOutput[\"OpenCMISS-Neon Version\"] = mainsettings.VERSION_LIST\n dictOutput[\"Project\"] = self._project.serialize()\n dictOutput[\"Spectrums\"] = self._spectrums.serialize()\n dictOutput[\"Tessellations\"] = self._tessellations.serialize()\n dictOutput[\"RootRegion\"] = self._rootRegion.serialize(basePath)\n dictOutput[\"Sceneviewer\"] = self._sceneviewer.serialize()\n return json.dumps(dictOutput, default=lambda o: o.__dict__, sort_keys=True, indent=2)\n\n def getZincContext(self):\n return self._zincContext\n\n def getRootRegion(self):\n return self._rootRegion\n\n def getSpectrums(self):\n return self._spectrums\n\n def getTessellations(self):\n return self._tessellations\n\n def setProject(self, project):\n self._project = project\n\n def getProject(self):\n return self._project\n\n def getSceneviewer(self):\n return self._sceneviewer\n","sub_path":"src/opencmiss/neon/core/neondocument.py","file_name":"neondocument.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"301630520","text":"import fnmatch\nimport os\nimport sys\nimport stat\nimport hashlib\nimport base64\nimport pathlib\n\ncurrent_working_directory = os.getcwd()\nfile_pattern = ('*.csv*', '*.txt*', '*.zip*', '*.xml*', '*.ini*')\nphp_pattern = '*.php'\ndisabled_php_functions = (\"exec(\", \"passthru(\", \"shell_exec(\", \"system(\", \"proc_open(\",\n\"popen(\", \"curl_exec(\", \"curl_multi_exec(\", \"parse_ini_file(\", \"show_source(\")\n\ndef EncodeFunctions(functions):\n encoded_dpf = list()\n for encode in functions:\n encode = str(base64.b64encode(encode))\n encoded_dpf.append(encode)\n return encoded_dpf\n\ndef SearchFiletypes(pattern, counter = 0):\n extension_list = list()\n # Change this to make the root path default to /var/www/html or have user input\n rootPath = \"/var/www/html\"\n if type(pattern) is tuple:\n for x in pattern:\n for root, dirs, files in os.walk(rootPath):\n for filename in fnmatch.filter(files, pattern[counter]):\n extension_list.append((os.path.join(root, filename)))\n counter = counter + 1\n if type(pattern) is str:\n for root, dirs, files in os.walk(rootPath):\n for filename in fnmatch.filter(files, pattern):\n extension_list.append((os.path.join(root, filename)))\n return extension_list\n\ndef FilePermissions(files):\n permissions_list = list()\n for q in files:\n files_to_check = q\n st = os.stat(files_to_check)\n permissions = oct(st.st_mode)\n permissions = str(permissions.strip(\"o100\"))\n permissions_list.append(files_to_check + \" \" + permissions)\n return permissions_list\n\ndef UserCommandHistory():\n command_list = list()\n users_history_path_list = list()\n bash_check = \"/home\"\n counter = 0\n for root, dirs, files in os.walk(bash_check):\n for filename in fnmatch.filter(files, \"*.bash_history\"):\n users_history_path = os.path.join(root, filename)\n users_history_path_list.append(users_history_path)\n for users in users_history_path_list:\n command_list.append(\"Below is the history for this user \" + users)\n f = open(users_history_path_list[counter], \"r\")\n for x in f:\n command_list.append(x)\n counter = counter + 1\n return command_list\n\ndef RootCommandHistory():\n command_list = list()\n bash_check = \"/root\"\n for root, dirs, files in os.walk(bash_check):\n for filename in fnmatch.filter(files, \"*.bash_history\"):\n users_history_path = os.path.join(root, filename)\n f = open(users_history_path, \"r\")\n for x in f:\n command_list.append(x)\n return command_list\n\ndef FindShell(files):\n file_counter = 0\n lines_found = list()\n for each_file in files:\n for lines in open(files[file_counter], \"r\", errors=\"ignore\"):\n for found in disabled_php_functions:\n if lines.startswith(found):\n lines_found.append(lines + \"in \" + each_file)\n file_counter = file_counter + 1\n return lines_found\n\ndef HTMLOutput():\n file_search = SearchFiletypes(file_pattern)\n permission_search = FilePermissions(file_search)\n php_scan = SearchFiletypes(php_pattern)\n shellscan =FindShell(php_scan)\n user_command_history = UserCommandHistory()\n try:\n root_command_history = RootCommandHistory()\n except:\n pass\n f = open(\"howdy.html\", \"w\")\n f.write(\"

Hack Checker

\")\n f.write(\"
The Following Files may contain sensitive information and are accesible to the web
\")\n for x in permission_search:\n f.write(x)\n f.write(\"
\")\n f.write(\"
Below is all commands entered by users
\")\n for x in user_command_history:\n f.write(\"
\" + x)\n try:\n f.write(\"
Below is all commands entered by root
\")\n for x in root_command_history:\n f.write(\"
\" + x)\n except:\n pass\n f.write(\"
The below files have dangerous php code in them, they may be shells
\")\n for x in shellscan:\n f.write(\"
\" + x)\n f.close()\n\nHTMLOutput()\n","sub_path":"detectit.py","file_name":"detectit.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"56236835","text":"import asyncio\nfrom signal import SIGINT, SIGTERM\n\n\nDELAY = 3\nSIGNALS = (SIGINT, SIGTERM)\n\n\nasync def hello():\n print('Hello ')\n for _ in range(DELAY):\n print('...')\n await asyncio.sleep(1.0)\n\n\nasync def world():\n print('World!\\n')\n\n\nasync def hello_world():\n try:\n while True:\n print('