diff --git "a/2779.jsonl" "b/2779.jsonl" new file mode 100644--- /dev/null +++ "b/2779.jsonl" @@ -0,0 +1,658 @@ +{"seq_id":"311508421","text":"from django.db import IntegrityError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom generic import BbotuiModelTestCase\n\nfrom bbotui import settings\nfrom bbotui.models import Project\nfrom bbotui.models import Builder\nfrom bbotui.models import BuildSlave\nfrom bbotui.models import ShellCommandStep, RawShellCommandStep\n\nclass TestBuilder(BbotuiModelTestCase):\n \"\"\"\n Test Builder model\n \"\"\" \n def test_completeness(self):\n \"\"\"\n Check that the is_complete property returns the right values\n \"\"\"\n p = self.project\n self.assertFalse(p.has_builder)\n \n \n # create builder\n b = Builder(project = p, name=\"builder\")\n b.save()\n \n # new builders are incomplete\n self.assertFalse(p.has_builder) # no slaves yet\n self.assertFalse(b.is_complete)\n \n # with one slave, still false\n slave = BuildSlave(project=p, name=\"sl\", password=\"slp\")\n slave.save()\n b.slaves.add(slave)\n self.assertEqual(b.slaves.count(), 1)\n self.assertFalse(b.is_complete)\n self.assertFalse(p.has_builder) # no steps yet\n\n # with slave and step, now true\n step = ShellCommandStep(builder=b, name='step', command='./run')\n step.save()\n self.assertEqual(b.step_set.count(), 1)\n self.assertTrue(b.is_complete)\n self.assertTrue(p.has_builder)\n \n # see if status toggles sensibly\n step.delete()\n p = Project.objects.get(id=p.id) # get new ref to p\n self.assertFalse(p.has_builder)\n self.assertFalse(b.is_complete)\n step = ShellCommandStep(builder=b, name='step', command='./run')\n step.save()\n p = Project.objects.get(id=p.id) # get new ref to p\n self.assertTrue(p.has_builder)\n self.assertTrue(b.is_complete)\n \n # remove slave (only step). false\n slave.delete()\n p = Project.objects.get(id=p.id) # get new ref to p\n self.assertEqual(b.slaves.count(), 0)\n self.assertFalse(b.is_complete)\n self.assertFalse(p.has_builder) # no slave\n \n def test_name_uniqueness(self):\n \"\"\"\n Ensure that a project cannot have multiple builder with the same name\n \"\"\"\n p = self.project\n b1 = Builder( # first builder\n project = p,\n name = \"My Builder\",\n )\n b1.save()\n self.assertNotEqual(b1.id, None)\n\n b = Builder( # another builder with the same name\n project = p,\n name = \"My Builder\",\n )\n self.assertRaises(IntegrityError, b.save)\n self.assertEqual(b.id, None)\n \n # ... but the the same name can be used in a different project\n np = Project(\n name = \"Another Test Project\",\n owner = self.user,\n )\n np.save()\n b2 = Builder( # first builder\n project = np,\n name = \"My Builder\",\n )\n b2.save()\n self.assertNotEqual(b2.id, None)\n \n \n def test_creation(self):\n \"\"\"\n Ensure that genereted config object can be instantiated\n \"\"\"\n p = self.project\n b = Builder(project = p, name = \"my builder\")\n b.save()\n \n self.assertEqual(unicode(b), \"my builder\")\n self.assertEqual(b.get_config_type(), _(\"builder\"))\n args = b.get_config_args()\n self.assertEqual(args.get(\"name\", None), \"my builder\")\n slave_list = args.get(\"slavenames\", None)\n self.assertEqual(type(slave_list), type([]))\n self.assertEqual(len(slave_list), 0)\n \n # add a slave\n slave = BuildSlave(project=p, name=\"sl\", password=\"slp\")\n slave.save()\n b.slaves.add(slave)\n self.assertEqual(b.slaves.count(), 1)\n \n slave_list = b.get_config_args().get(\"slavenames\", None)\n self.assertEqual(len(slave_list), 1)\n self.assertEqual(slave_list[0], \"sl\")\n \n # add a step\n step = ShellCommandStep(builder=b, name='step', command='./run')\n step.save()\n self.assertEqual(b.step_set.count(), 1)\n \n # add a step\n step = RawShellCommandStep(builder=b, name='step2', command='./run_again')\n step.save()\n self.assertEqual(b.step_set.count(), 2)\n \n # instantiate factory obj\n f_config = b.get_factory_class()\n class_name, mod_path = f_config\n try:\n mod = __import__(mod_path, globals(), locals(), class_name)\n klass = getattr(mod, class_name)\n except: # pragma: no cover\n self.fail(\"invalid factory class return (%s,%s)\"%f_config)\n try:\n f_obj = klass()\n except: # pragma: no cover\n self.fail(\"could not instantiate factory class\") \n \n # add steps to factory\n for step in b.step_set.all():\n s_config = step.cast().get_config_class()\n s_args = step.cast().get_config_args()\n class_name, mod_path = s_config\n try:\n mod = __import__(mod_path, globals(), locals(), class_name)\n klass = getattr(mod, class_name)\n except: # pragma: no cover\n self.fail(\"invalid step class return (%s,%s)\"%s_config)\n try:\n s_obj = klass(**s_args)\n except: # pragma: no cover\n self.fail(\"could not instantiate step class\") \n f_obj.addStep(s_obj)\n \n # finally, we can check the builder class\n args = b.get_config_args()\n args[\"factory\"] = f_obj\n self.assert_valid_buildbot_config(b.get_config_class(), args)\n \n # Check that the resulting config string is sensible\n self.assert_config_string_executable(b)\n \n \n def test_step_reordering(self):\n \"\"\"\n Test Builder.reorder_steps()\n \"\"\"\n p = self.project\n b = Builder(project = p, name = \"my builder\")\n b.save()\n \n # check that argument must be a list of ints\n self.assertRaises(ValueError, b.reorder_steps, 1)\n self.assertRaises(ValueError, b.reorder_steps, ())\n self.assertRaises(ValueError, b.reorder_steps, None)\n self.assertRaises(ValueError, b.reorder_steps, \"1,2,3\")\n \n # Should not raise error for empty list with no steps\n b.reorder_steps([])\n # but raises ValueError if id entered\n self.assertRaises(ValueError, b.reorder_steps, [1])\n \n step_ids = []\n \n # add a step\n step = ShellCommandStep(builder=b, name='step', command='./run')\n step.save()\n self.assertEqual(b.step_set.count(), 1)\n step_ids.append(step.id)\n \n # no error single item\n b.reorder_steps(step_ids)\n # step stil there\n self.assertEqual(b.step_set.count(), 1)\n \n # add a another step\n step = RawShellCommandStep(builder=b, name='step2', command='./run_again')\n step.save()\n self.assertEqual(b.step_set.count(), 2)\n step_ids.append(step.id)\n \n # no error if no change\n b.reorder_steps(step_ids)\n # ValueError if invalid values\n self.assertRaises(ValueError, b.reorder_steps, [])\n self.assertRaises(ValueError, b.reorder_steps, [10,1])\n self.assertRaises(ValueError, b.reorder_steps, step_ids + [10])\n \n # swap step positions\n step_ids[0], step_ids[1] = step_ids[1], step_ids[0]\n # should not raise error\n b.reorder_steps(step_ids)\n self.assertListEqual(step_ids, [x.id for x in b.step_set.all()])\n \n # add another step\n step = ShellCommandStep(builder=b, name='step3', command='./run')\n step.save()\n self.assertEqual(b.step_set.count(), 3)\n step_ids.append(step.id)\n \n # no error if no change\n b.reorder_steps(step_ids)\n # ValueError if invalid values\n self.assertRaises(ValueError, b.reorder_steps, [])\n self.assertRaises(ValueError, b.reorder_steps, [1])\n self.assertRaises(ValueError, b.reorder_steps, step_ids + [1])\n \n # swap step positions\n step_ids[0], step_ids[2] = step_ids[2], step_ids[0]\n # should not raise error\n b.reorder_steps(step_ids)\n self.assertListEqual(step_ids, [x.id for x in b.step_set.all()])\n \n # swap step positions\n step_ids[1], step_ids[2] = step_ids[2], step_ids[1]\n # should not raise error\n b.reorder_steps(step_ids)\n self.assertListEqual(step_ids, [x.id for x in b.step_set.all()])\n \n ","sub_path":"web/bbotui/tests/models/test_model_builder.py","file_name":"test_model_builder.py","file_ext":"py","file_size_in_byte":8725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"237051802","text":"from os import environ\nfrom os.path import dirname, realpath, join, expanduser\n\nHOME_DIR = expanduser(\"~\")\nPROJ_ROOT = dirname(dirname(dirname(realpath(__file__))))\nANSIBLE_ROOT = join(PROJ_ROOT, \"ansible\")\n\n_FAABRIC_BUILD_DIR = environ.get(\"FAABRIC_BUILD_DIR\", \"/build/faabric\")\n\nFAABRIC_SHARED_BUILD_DIR = join(_FAABRIC_BUILD_DIR, \"shared\")\nFAABRIC_STATIC_BUILD_DIR = join(_FAABRIC_BUILD_DIR, \"static\")\n\nFAABRIC_INSTALL_PREFIX = join(_FAABRIC_BUILD_DIR, \"install\")\n\n\ndef get_version():\n ver_file = join(PROJ_ROOT, \"VERSION\")\n\n with open(ver_file, \"r\") as fh:\n version = fh.read()\n\n version = version.strip()\n return version\n","sub_path":"tasks/util/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"286941378","text":"import os\nimport pathlib\nimport platform\n\nfrom daskperiment.backend import init_backend\nfrom daskperiment.core.errors import TrialIDNotFoundError\nfrom daskperiment.util.diff import unified_diff\nfrom daskperiment.util.log import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass Environment(object):\n\n def __init__(self, backend):\n self.backend = init_backend(backend=backend)\n\n self.platform_info = platform.platform()\n self.python_info = \"{} {}\".format(platform.python_implementation(),\n platform.python_version())\n self.python_shell = self.get_python_mode()\n self.python_packages = self.get_python_packages()\n\n def log_environment_info(self):\n for msg in self.get_device_info():\n logger.info(msg)\n\n def check_environment_change(self, previous):\n current = os.linesep.join(self.get_device_info())\n if current != previous:\n msg = 'Environment information has been changed'\n logger.warning(msg)\n for d in unified_diff(previous, current, n=0):\n logger.warning(d)\n else:\n logger.debug(\"Environment information is not changed\")\n\n def check_python_packages_change(self, previous):\n current = os.linesep.join(self.get_python_packages())\n if current != previous:\n msg = 'Installed Python packages have been changed'\n logger.warning(msg)\n for d in unified_diff(previous, current, n=0):\n logger.warning(d)\n else:\n logger.debug(\"Installed Python packages are not changed\")\n\n def get_device_info(self):\n from daskperiment.version import version\n p = pathlib.Path(__file__)\n msg = \"Number of installed Python packages: {}\"\n info = [\"Platform: {}\".format(self.platform_info),\n \"Python: {} ({})\".format(self.python_info,\n self.python_shell),\n \"daskperiment version: {}\".format(version),\n \"daskperiment path: {}\".format(p.parent.parent),\n \"Working directory: {}\".format(pathlib.Path.cwd()),\n msg.format(len(self.python_packages))]\n return info\n\n def get_python_mode(self):\n # see https://stackoverflow.com/questions/15411967/\n try:\n shell = get_ipython().__class__.__name__\n if shell == 'ZMQInteractiveShell':\n return 'Jupyter Notebook' # Jupyter notebook or qtconsole\n elif shell == 'TerminalInteractiveShell':\n return 'IPython Terminal' # Terminal running IPython\n else:\n msg = 'Unable to detect python environment'\n raise RuntimeError(msg)\n except NameError:\n import __main__ as main\n if hasattr(main, '__file__'):\n if main.__file__.endswith('pytest'):\n return 'Test'\n else:\n return 'File'\n else:\n return 'Interactive Terminal'\n\n def maybe_file(self):\n return self.python_shell == 'File'\n\n def _get_python_packages(self):\n import pkg_resources\n return pkg_resources.working_set\n\n def get_python_packages(self):\n \"\"\"\n Lists installed python packages\n \"\"\"\n return [\"{}=={}\".format(p.project_name, p.version)\n for p in self._get_python_packages()]\n\n def save(self, trial_id):\n self.save_device_info(trial_id)\n self.save_python_packages(trial_id)\n\n def save_device_info(self, trial_id):\n key = self.backend.get_device_info_key(trial_id)\n msg = 'Saving device info: {}'\n logger.info(msg.format(key))\n\n text = os.linesep.join(self.get_device_info())\n self.backend.save_text(key, text)\n\n def load_device_info(self, trial_id):\n key = self.backend.get_device_info_key(trial_id)\n try:\n return self.backend.load_text(key)\n except TrialIDNotFoundError:\n # overwrite message using trial_id\n raise TrialIDNotFoundError(trial_id)\n\n def save_python_packages(self, trial_id):\n key = self.backend.get_python_package_key(trial_id)\n msg = 'Saving python packages: {}'\n logger.info(msg.format(key))\n\n text = os.linesep.join(self.get_python_packages())\n self.backend.save_text(key, text)\n\n def load_python_packages(self, trial_id):\n key = self.backend.get_python_package_key(trial_id)\n try:\n return self.backend.load_text(key)\n except TrialIDNotFoundError:\n # overwrite message using trial_id\n raise TrialIDNotFoundError(trial_id)\n","sub_path":"daskperiment/core/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593610591","text":"import numpy as np\n\ndef sigmoid(x):\n return 1/(1 + np.exp(-x))\n\ndef init_network():\n network = {}\n network['W1'] = [[0.3, 0.5, 0.4], [0.1, 0.2, 0.3]]\n network['b1'] = [[0.1, 0.2, 0.3]]\n network['W2'] = [[0.3, 0.5], [0.1, 0.2], [0.4, 0.7]]\n network['b2'] = [0.1, 0.2]\n return network\n\ndef network_calc(network, x):\n network = init_network()\n W1, W2 = network['W1'], network['W2']\n b1, b2 = network['b1'], network['b2']\n layer1 = np.dot(x, W1) + b1\n layer1_f = sigmoid(layer1)\n layer2 = np.dot(layer1, W2) + b2\n layer2_f = sigmoid(layer2)\n\n return layer2_f\n\nx = np.array([1, 0])\nnetwork = init_network()\nprint(network_calc(network, x))\n\n","sub_path":"DL_learning/section3_4.py","file_name":"section3_4.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"36651860","text":"import time\nfrom myTool.lib import qt\nfrom PySide import QtGui, QtCore\n\ndialog = QtGui.QProgressDialog(\n\t\t\t'Upload files...',\n\t\t\t'Cancel',\n\t\t\t0,\n\t\t\t100,\n\t\t\tqt.getMayaWindow(),\n\t\t\tQtCore.Qt.WindowModal\n\t\t\t)\ndialog.show()\n\nfor i in range(100):\n\t# キャンセルのボタンが押せるおまじない\n\tQtGui.QApplication.processEvents()\n\t\n\t# キャンセルされている場合は、終了\n\tif dialog.wasCanceled():\n\t\tbreak\n\t\t\n\tdialog.setValue(i)\n\t\n\t# サンプルの為、0.1秒待機する\n\ttime.sleep(0.1)\n\ndialog.close()","sub_path":"MayaPy101_DL/MayaPy101_DL/chapter12/00_âTâôâvâïâRü[âh/01_âEâBâWâFâbâg/QProgressDialog.py","file_name":"QProgressDialog.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"470539572","text":"import json\nfrom datetime import datetime\nfrom .time_stamped_model import TimeStampedModel\nfrom django.db import models\nfrom django.dispatch import receiver\nfrom django.db.models import signals\nfrom core.utils.courses import load_exercise_files_dict\n\nclass ExerciseState(TimeStampedModel):\n course_id = models.SlugField()\n material_id = models.SlugField()\n value = models.TextField(blank=True)\n edit_seconds = models.PositiveIntegerField(default=0)\n open_seconds = models.PositiveIntegerField(default=0)\n user = models.ForeignKey('User', on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ('course_id', 'material_id', 'user',)\n indexes = [\n models.Index(fields=['course_id']),\n models.Index(fields=['material_id']),\n models.Index(fields=['user']),\n ]\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'user_id': self.user_id,\n 'course_id': self.course_id,\n 'material_id': self.material_id,\n 'value': json.loads(self.value),\n 'edit_seconds': self.edit_seconds,\n 'open_seconds': self.open_seconds,\n 'created_at': datetime.timestamp(self.created_at),\n 'updated_at': datetime.timestamp(self.updated_at),\n }\n\n def get_seconds_dict(self):\n return {\n 'edit_seconds': self.edit_seconds,\n 'open_seconds': self.open_seconds,\n }\n\n def set_default_value(self):\n tree = load_exercise_files_dict(self.course_id, self.material_id)\n self.value = json.dumps(tree)\n self.save()\n\n\n@receiver(signals.post_save, sender=ExerciseState)\ndef load_default_value(sender, instance, created, **kwargs):\n if created:\n instance.set_default_value()\n","sub_path":"core/models/exercise_state.py","file_name":"exercise_state.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"412243882","text":"\"\"\"\nViews for the Initializer objects.\n\"\"\"\nfrom webgnome_api.common.views import (get_object,\n create_object,\n update_object,\n cors_policy)\n\nfrom cornice import Service\n\ninitializer = Service(name='initializer', path='/initializer*obj_id',\n description=\"Initializer API\", cors_policy=cors_policy)\n\nmodule_name = 'gnome.spill.initializers'\nmodule_attrs = ('InitWindages',\n 'InitMassFromPlume',\n 'InitRiseVelFromDist',\n 'InitRiseVelFromDropletSizeFromDist',\n )\n\nimplemented_types = ['{0}.{1}'.format(module_name, a)\n for a in module_attrs]\n\n\n@initializer.get()\ndef get_initializer(request):\n '''Returns a Gnome Initializer object in JSON.'''\n return get_object(request, implemented_types)\n\n\n@initializer.post()\ndef create_initializer(request):\n '''Creates a Gnome Initializer object.'''\n return create_object(request, implemented_types)\n\n\n@initializer.put()\ndef update_initializer(request):\n '''Updates a Gnome Initializer object.'''\n return update_object(request, implemented_types)\n","sub_path":"webgnome_api/views/initializer.py","file_name":"initializer.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"469657261","text":"# we use shapely to define pad\nfrom shapely.geometry import LineString, MultiPolygon, MultiPoint, box, Polygon,Point\nfrom shapely.ops import split, unary_union\nimport numpy as np\n\n\nclass myPadArray:\n def __init__(self, side):\n b = box(-side/2, -side/2, side/2, side/2)\n point = b.centroid\n self.side = side\n self.box = b\n self.box_array = None\n self.center_x = point.x\n self.center_y = point.y\n\n def modify_one_n_box(self, start, end, amp):\n \"\"\"\n Create a o box\n\n \"\"\"\n s = self.side\n b = self.box\n start = float(start)\n end = float(end)\n amp = s * float(amp)\n x,y = np.array([start, start, end, end])*s - s/2,np.array([0, amp, amp, 0])-s/2\n line_right = LineString(list(zip(-y,x)))\n line_down = LineString(list(zip(-x,y)))\n poly_left = Polygon(list(zip(-y - s,x)))\n poly_up = Polygon(list(zip(-x, y + s)))\n\n spl = split(split(b, line_right)[0], line_down)[0]\n poly = unary_union(MultiPolygon([spl, poly_up]))\n new_box = unary_union(MultiPolygon([poly, poly_left]))\n self.box = new_box\n point = new_box.centroid\n self.center_x = point.x\n self.center_y = point.y\n\n def modify_one_sin_box(self, step, amp):\n s = self.side\n x_range_left = np.arange(0, 0.5*s, step)\n y_range_left = np.sin((x_range_left * np.pi)/ (0.5*s)) * float(amp) *s\n x_range_right = np.arange(0.5*s, s, step)\n y_range_right = -1*y_range_left\n\n down_left_coords = np.array(list(zip(x_range_left-s, y_range_left)))\n down_right_coords = np.array(list(zip(x_range_right-s, y_range_right)))\n right_down_coords = np.array(list(zip(y_range_right, x_range_left)))\n right_up_coords = np.array(list(zip(y_range_left, x_range_right)))\n\n down_coords = np.array(list(down_left_coords) + list(down_right_coords) + [(0,0)]) + [s/2, -s/2]\n up_coords = down_coords + [0,s]\n right_coords = np.array(list(right_down_coords) + list(right_up_coords) + [(0,s)]) + [s/2, -s/2]\n left_coords = right_coords + [-s,0]\n\n b = Polygon(list(down_coords)+list(right_coords)+list(up_coords)[::-1]+list(left_coords)[::-1])\n self.box = b\n point = b.centroid\n self.center_x = point.x\n self.center_y = point.y\n\n def get_pad_nine(self):\n \"\"\"\n\n purpose: return a list of 9 polygons\n \"\"\"\n s = self.side\n b = self.box\n lists = np.array(list(b.exterior.coords)).tolist()\n off_set = np.array([[-s,s],[0,s],[s,s],[-s,0],[0,0],[s,0],[-s,-s],[0,-s],[s,-s]])\n l_ext = [(x+lists).tolist() for x in off_set]\n list_poly = list([Polygon(x) for x in l_ext])\n self.box_array = list_poly\n\n\nif __name__ == \"__main__\":\n print(\"error: myPadArray is running as main\")\n","sub_path":"statistical model/AnodeSimulation/myPadArray.py","file_name":"myPadArray.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496503477","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, current_app, render_template, request\n\nfrom pony.orm import db_session\n\nfrom acid import db\n\nfrom .service import BuildSetsFiltered, BuildSetsPaginated, pagination\n\nbuilds = Blueprint('builds', __name__, template_folder='../../templates')\n\n\n@builds.route('/builds')\n@builds.route('/builds/')\n@db_session\ndef show_builds_history(page=1):\n config = current_app.config\n per_page = config['history']['pagination']['per_page']\n pipeline = config['default']['pipename']\n page_links = config['history']['pagination']['page_links']\n buildset_log_url = config['history']['log_server_url']\n\n db.connect()\n\n branches = request.args.getlist('branch')\n build = request.args.get('build')\n\n if branches or build:\n buildsets = BuildSetsFiltered(pipeline, per_page, branches, build)\n else:\n buildsets = BuildSetsPaginated(pipeline, per_page)\n buildsets.fetch_page(page)\n paginator = pagination(len(buildsets), page, per_page, page_links)\n return render_template('builds_history.html', buildsets=buildsets,\n paginator=paginator,\n buildsets_log_url=buildset_log_url,\n branches=branches)\n","sub_path":"acid/features/history/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"287939552","text":"\n\ndef cipher(sentence):\n encrypted_sentence = \"\"\n for letter in sentence:\n if letter.islower():\n encrypted_sentence += str(ord(letter))\n else:\n encrypted_sentence += letter\n\n return encrypted_sentence\n\n\ndef decrypt(encrypted):\n return_sentence = \"\"\n ascii_code = \"\"\n sentence_type = 0\n for i in range(len(encrypted)):\n if sentence_type > 0:\n sentence_type -= 1\n continue\n if encrypted[i] == \"9\":\n sentence_type = 1\n ascii_code += encrypted[i]\n i += 1\n ascii_code += encrypted[i]\n return_sentence += chr(int(ascii_code))\n elif encrypted[i] == \"1\":\n sentence_type = 2\n ascii_code = encrypted[i:i+3]\n i += 2\n return_sentence += chr(int(ascii_code))\n else:\n return_sentence += encrypted[i]\n return return_sentence\n\nif __name__ == '__main__':\n ci = cipher(\"hELLO\")\n print(cipher(\"hELLO\"))\n print(decrypt(ci))","sub_path":"chap1/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"294844025","text":"#!/usr/bin/env python\n\nfrom brute import Brute\nfrom math import log, exp\n\nclass Water(Brute):\n name = None\n amount = None\n calcium = None\n bicarbonate = None\n sulfate = None\n chloride = None\n sodium = None\n magnesium = None\n ph = None\n notes = None\n\n \"\"\"Shomate parameters for temperatures between 298 and 500 deg K\n \n From Chase, M.W., Jr., NIST-JANAF Themochemical Tables, Fourth Edition,\n J. Phys. Chem. Ref. Data, Monograph 9, 1998, 1-1951.\n \"\"\"\n s_a = -203.6060\n s_b = 1523.290\n s_c = -3196.413\n s_d = 2474.455\n s_e = 3.855326\n s_f = -256.5478\n s_g = -488.7163\n s_h = -285.8304\n\n def __init__(self, **kwargs):\n self.name = kwargs.get('name')\n self.amount = kwargs.get('amount')\n self.calcium = kwargs.get('calcium')\n self.bicarbonate = kwargs.get('bicarbonate')\n self.sulfate = kwargs.get('sulfate')\n self.chloride = kwargs.get('chloride')\n self.sodium = kwargs.get('sodium')\n self.magnesium = kwargs.get('magnesium')\n self.ph = kwargs.get('ph')\n self.notes = kwargs.get('notes')\n\n def heat_capacity(self, temperature):\n \"\"\"Specific heat capacity [J/(mol * K)]\n\n Given a temperature in degrees Kelvin, return the specific heat\n capacity of water.\n \"\"\"\n if temperature > 500:\n raise NotImplemented(\"Specific heat cannot be calculated above\" +\n \" 500 degrees Kelvin at this time.\")\n t = temperature / 1000\n return self.s_a + self.s_b * t \\\n + self.s_c * t**2 \\\n + self.s_d * t**3 \\\n + self.s_e / t**2\n\n def enthalpy(self, temperature):\n \"\"\"Standard enthalpy [kJ/mol]\n\n Given a temperature in degrees Kelvin, return the standard enthalphy\n of water.\n \"\"\"\n if temperature > 500:\n raise NotImplemented(\"Enthalpy cannot be calculated above\" +\n \" 500 degrees Kelvin at this time.\")\n t = temperature / 1000\n return self.s_a * t + self.s_b * t**2 / 2 \\\n + self.s_c * t**3 / 3 \\\n + self.s_d * t**4 / 4 \\\n - self.s_e / t \\\n + self.s_f \\\n - self.s_h\n\n def entropy(self, temperature):\n \"\"\"Standard entropy [J/(mol * K)]\n\n Given a temperature in degrees Kelvin, return the standard entropy\n of water.\n \"\"\"\n if temperature > 500:\n raise NotImplemented(\"Entropy cannot be calculated above\" +\n \" 500 degrees Kelvin at this time.\")\n t = temperature / 1000\n return self.s_a * log(t) + self.s_b * t \\\n + self.s_c * t**2 / 2 \\\n + self.s_d * t**3 / 3 \\\n - self.s_e / (2 * t**2) \\\n + self.s_g\n\n def saturation_pressure(self, t):\n \"\"\"Maximum saturation pressure of water in moist air [Pa]\n\n Given the dry bulb temperature of moist air [K], returns the maximum\n saturation pressure of water in moist air in Pascals [Pa].\n \"\"\"\n return exp(77.3450 + 0.0057 * t - 7235 / t) / t**8.2\n\n def vapor_density(self, t):\n \"\"\"Return vapor density [kg/m^3]\n \n \"\"\"\n return 0.0022 * self.saturation_pressure(t) / t\n\n def humidity_saturation(self, pa, t):\n \"\"\"Return humidity ratio as a function of atmospheric pressure [Pa]\n\n x_s\n\n \"\"\"\n pws = self.saturation_pressure(t)\n return 0.62198 * pws / (pa / pws)\n","sub_path":"brute/models/water.py","file_name":"water.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"60936237","text":"# Model for user group interaction\n#import mongoengine\n#import pymongo\n#import datetime\n#import json\n#import sys\n\n# Checks if the access level being set is appropriate for the DB\n# 0 : all access, 1 : tier 1 access, 2 : tier 2 access, ..., 5 : admin access.\ndef valid_access_level(access_level):\n if access_level >= 0 and access_level <= 5:\n print(\"Proper access level!\")\n return True\n else:\n print(\"Improper access level! Please enter [0, 5].\")\n return False\n\n# Create a new group\ndef create_group(db, owner_id, user_ids, access_level, group_name=None):\n # Check if the access_level is valid.\n if not valid_access_level(access_level):\n print(\"Invalid access level requested!\")\n return False\n\n # Check if a member is being included with this group.\n if user_ids is None:\n print(\"Critical error! Must have at least one member!\")\n return False\n\n # Check for a group_name, otherwise set it to the default\n if group_name is None:\n group_name = 'New Group'\n\n # Next, build the group JSON dictionary.\n new_group_id = db['group'].insert_one({\n 'name' : group_name,\n 'owner_id' : owner_id,\n 'access_level' : access_level,\n 'user_ids' : user_ids,\n 'deleted' : False\n })\n\n # Check if this insert was successful\n if is_group(db, str(new_group_id.inserted_id)):\n print(\"Successful insertion of group!\")\n return True\n else:\n print(\"Error inserting to database! Retry or debug!\")\n return False\n\n# Attempts to delete group: group_id as user: user_id.\ndef delete_group(db, user_id, group_id):\n # Fetch the requested user and group for the interaction.\n user = db['user'].find({\"_id\" : user_id})\n group = db['group'].find({\"_id\" : group_id})\n\n # Check if the user_id was valid and returned a record.\n if len(user) == 0:\n print(\"NO USER FOUND. CANNOT DELETE.\")\n return False\n\n # Check if the group_id was valid and returned a record.\n if len(group) == 0:\n print(\"NO GROUP FOUND. CANNOT DELETE.\")\n return False\n\n # Check if this user has appropriate permissions to delete.\n if user['access_level'] < group['access_level']:\n print(\"Invalid permissions! Cannot delete group! Contact an admin...\")\n return False\n\n # At this point, all data retrieved from the db has been verified, and the\n # delete transaction can take place.\n #db['group'].delete_one({\"_id\" : group_id})\n\n # Don't actually delete the group, but set the deleted flag for the group\n group['deleted'] = True\n db['group'].update_one({\"_id\" : group_id}, {'$set': group})\n\n # Check if the group was successfully deleted.\n if not is_group(db, group_id):\n print(\"Successful deletion!\")\n return True\n # In this case, the group is still queryable and active.\n else:\n print(\"Unsuccessful deletion interaction with the database!\")\n return False\n\n# Get users for a specifc group.\ndef get_users(db, group_id):\n # Check if this group exists.\n if not is_group(db, group_id):\n print(\"Invalid group! Cannot return users!\")\n return False\n\n # Query the groups array for values equivalent to group_id.\n group = db['group'].find({'_id' : group_id})\n users = db['user'].find({'_id' : group['user_ids']})\n\n # Check if the group has users to return.\n # Base user should be the admin - this is a critical error if hit.\n if users is None:\n print(\"Critical error! No users in this group!\")\n return False\n\n # Check if user(s) have the correct group.\n for user in users:\n if group_id not in user['group_ids']:\n print(\"Error fetching \" + group_id + \" members from database.\")\n return False\n\n # Extract user id(s). This may be slow?\n #user_ids = []\n #for user in users:\n # user_ids.append(user['user_id'])\n\n # Return the fetched user JSON values.\n return users\n\n# Valid group check.\n# https://stackoverflow.com/questions/25163658/mongodb-return-true-if-document-exists\ndef is_group(db, group_id):\n # Query to see if a single group with this id exists and the deleted flag has not been set.\n if db['group'].count_documents({\"_id\" : group_id}, limit = 1) != 0:\n is_deleted = db['group'].find_one({\"_id\" : group_id})\n if is_deleted['deleted'] == False:\n print(\"Valid group!\")\n return True\n else:\n print('Group has been deleted!')\n return False\n # If the query returned 0, the group/library doesn't exist.\n else:\n print(\"Invalid group!\")\n return False\n\n# Add an existing user to a group.\ndef add_user(db, admin_id, group_id, user_id):\n # First, check if the group exists.\n if not is_group(db, group_id):\n print(\"Attempting to add user to invalid group! Error!\")\n return False\n\n # Check if the user being entered exists\n user = db['user'].find_one({\"_id\" : user_id})\n if user is None:\n print(\"Invalid user_id! Cannot add to group!\")\n return False\n\n # Fetch the admin and group records, and validate.\n admin = db['user'].find_one({\"_id\" : admin_id})\n group = db['group'].find_one({\"_id\": group_id})\n\n # Check if the admin has correct credentials or is the owner.\n if admin is None or admin['access_level'] < group['access_level'] or str(admin['_id']) != group['owner_id']:\n print(\"Invalid credentials to add user to group!\")\n return False\n # If so, add this user to the group.\n else:\n print(\"Valid credentials! Adding...\")\n group['user_ids'].append(user_id)\n db['group'].update(group)\n return True\n\n# Remove a user from a group\ndef remove_user(db, admin_id, group_id, user_id):\n # First, check if the group exists.\n if not is_group(db, group_id):\n print(\"Attempting to add user to invalid group! Error!\")\n return False\n\n # Check if the user being entered exists\n user = db['user'].find_one({\"_id\" : user_id})\n if user is None:\n print(\"Invalid user_id! Cannot add to group!\")\n return False\n\n # Fetch the admin and group records, and validate.\n admin = db['user'].find_one({\"_id\" : admin_id})\n group = db['group'].find_one({\"_id\": group_id})\n\n # Check if the admin has correct credentials or is the owner.\n if admin is None or admin['access_level'] >= group['access_level'] or str(admin['_id']) != group['owner_id']:\n print(\"Valid credentials! Adding...\")\n group['user_ids'].remove(user_id)\n db['group'].update(group)\n return True\n # Otherwise, do not delete.\n else:\n print(\"Invalid credentials to remove user from group!\")\n return False\n\n# Checks if the user has the appropriate permissions, and if so, adds the group to their document.\ndef join_group(db, user_id, group_id):\n # First, check if the group exists.\n if not is_group(db, group_id):\n print(\"Attempting to add user to invalid group! Error!\")\n return False\n\n # Check if the user being entered exists\n user = db['user'].find_one({\"_id\" : user_id})\n if user is None:\n print(\"Invalid user_id! Cannot add to group!\")\n return False\n\n # Fetch the group.\n group = db['group'].find_one({\"_id\": group_id})\n\n # Check if the user has the appropriate access level\n if user['access_level'] >= group['access_level']:\n print(\"Valid credentials! Adding...\")\n group['user_ids'].append(user_id)\n db['group'].update(group)\n # Otherwise, do not add.\n else:\n print(\"Invalid permissions to join group! Unsuccessful!\")\n return False\n\n# Grabs all groups that this user is a part of.\ndef get_all_groups(db, user_id):\n # Grab this user to check availability\n user = db['user'].find_one({'_id': str(user_id)})\n print(user)\n if user is None:\n print(\"Error! Invalid user_id given! Cannot retrieve groups!\")\n return False\n \n user_groups = db['group'].find({'user_ids': user_id})\n\n if user_groups is None:\n print('User is not in a group!')\n return False\n return user_groups\n\n\n '''\n # Grab the groups and check for correctness.\n user_access = db['user_library_access'].find_one({\"user_id\": user_id})\n if len(user_access) == 0:\n print(\"Error! Could not retrieve user_library_access!\")\n return False\n \n #Return all the groups the user is apart of\n return user_access[\"library_ids\"]\n '''\n","sub_path":"model/group_manager.py","file_name":"group_manager.py","file_ext":"py","file_size_in_byte":8008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"635961863","text":"from datetime import datetime, timedelta\r\n\r\nfrom telegram import Update, ReplyKeyboardMarkup\r\nfrom telegram.ext import MessageHandler, Filters, CallbackContext, CallbackQueryHandler\r\n\r\nfrom bot.utils import get_analytics, send_analytics\r\nfrom bot.resources import keyboards, messages\r\nfrom bot.states import names\r\nfrom bot.states.common import back\r\nfrom extensions import State\r\n\r\n\r\ndef activator(update: Update, context: CallbackContext):\r\n reply_markup = ReplyKeyboardMarkup(keyboards.ANALYTICS_SELECT_PERIOD, resize_keyboard=True)\r\n update.message.reply_text(messages.WHAT_PERIOD_TO_SEE, reply_markup=reply_markup)\r\n\r\n\r\n\r\ndef to_today(update: Update, context: CallbackContext):\r\n start = datetime.now().date()\r\n end = start + timedelta(days=1)\r\n result = get_analytics.hello_habit(update.message.from_user.id, start, end)\r\n send_analytics.hello_habit(update, result)\r\n\r\ndef to_yesterday(update: Update, context: CallbackContext):\r\n end = datetime.today().date()\r\n start = end - timedelta(days=1)\r\n result = get_analytics.hello_habit(update.message.from_user.id, start, end)\r\n send_analytics.hello_habit(update, result)\r\n\r\ndef to_week(update: Update, context: CallbackContext):\r\n end = datetime.today().date()\r\n start = end - timedelta(days=7)\r\n result = get_analytics.hello_habit(update.message.from_user.id, start, end)\r\n send_analytics.hello_habit(update, result)\r\n\r\ndef to_month(update: Update, context: CallbackContext):\r\n end = datetime.today().date()\r\n start = end - timedelta(days=30)\r\n result = get_analytics.hello_habit(update.message.from_user.id, start, end)\r\n send_analytics.hello_habit(update, result)\r\n\r\ndef to_total(update: Update, context: CallbackContext):\r\n result = get_analytics.hello_habit(update.message.from_user.id, total=True)\r\n send_analytics.hello_habit(update, result)\r\n\r\ndef to_custom_period(update: Update, context: CallbackContext):\r\n return names.SELECT_START_DATE\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nanalytics_menu_state = State(\r\n on_activate=activator,\r\n handlers=[\r\n MessageHandler(Filters.regex(f'({keyboards.BACK_BUTTON})'), back),\r\n MessageHandler(Filters.regex(f'({keyboards.TODAY_BUTTON})'), to_today),\r\n MessageHandler(Filters.regex(f'({keyboards.YESTERDAY_BUTTON})'), to_yesterday),\r\n MessageHandler(Filters.regex(f'({keyboards.WEEK_BUTTON})'), to_week),\r\n MessageHandler(Filters.regex(f'({keyboards.MONTH_BUTTON})'), to_month),\r\n MessageHandler(Filters.regex(f'({keyboards.TOTAL_BUTTON})'), to_total),\r\n MessageHandler(Filters.regex(f'({keyboards.PERIOD_BUTTON})'), to_custom_period),\r\n\r\n ]\r\n)\r\n","sub_path":"bot/states/analytics/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"46444995","text":"from sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\niris = load_iris()\n\nX = iris.data\ny = iris.target\n\n\n# LOGISTIC REGRESSION\nlogreg = LogisticRegression()\nlogreg.fit(X, y)\ny_pred = logreg.predict(X)\nprint(metrics.accuracy_score(y, y_pred))\n\n\n# KNN\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(X, y)\ny_pred = knn.predict(X)\nprint(metrics.accuracy_score(y, y_pred))\n\n# overfitting problem with n_neighbors=1\nknn = KNeighborsClassifier(n_neighbors=1)\nknn.fit(X, y)\ny_pred = knn.predict(X)\nprint(metrics.accuracy_score(y, y_pred))\n\n\n# TRAIN/TEST SPLIT\nprint(X.shape)\nprint(y.shape)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=4)\n\nprint(X_train.shape)\nprint(X_test.shape)\n\nprint(y_train.shape)\nprint(y_test.shape)\n\n# fit the training data\nlogreg = LogisticRegression()\nlogreg.fit(X_train, y_train)\n\n# make a prediction on the testing data\ny_pred = logreg.predict(X_test)\n\n# compare the actual response values (y_test) with the predicted response values (y_pred)\nprint(metrics.accuracy_score(y_test, y_pred))\n\n# repeat for KNN with K=5\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(X_train, y_train)\ny_pred = knn.predict(X_test)\nprint(metrics.accuracy_score(y_test, y_pred))\n\n# repeat for KNN with K=1\nknn = KNeighborsClassifier(n_neighbors=1)\nknn.fit(X_train, y_train)\ny_pred = knn.predict(X_test)\nprint(metrics.accuracy_score(y_test, y_pred))\n\n# locate the best value for K\nk_range = range(1, 26)\nscores = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n y_pred = knn.predict(X_test)\n scores.append(metrics.accuracy_score(y_test, y_pred))\nprint(scores.index(max(scores)))\n\n# plot the scores on a graph\nplt.plot(k_range, scores)\nplt.xlabel(\"value of K for KNN\")\nplt.ylabel(\"testing accuracy\")\nplt.show(block=True)","sub_path":"ml tutorials/data school/03_comparing_ml_models.py","file_name":"03_comparing_ml_models.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"293271362","text":"def login_disponivel(nome, lista):\n i=1\n if nome not in lista:\n return nome\n else:\n for name in range(len(lista)):\n while nome in lista:\n nome = nome +str(i)\n if nome in lista:\n nome= nome[:-1]\n i+=1\n return nome\nstate = True\nlogins = []\nwhile state:\n login = input('Digite o login: ')\n if login != 'fim':\n logins.append(input)\n else:\n state = False\nfor login in logins:\n print(login_disponivel(login,logins))","sub_path":"backup/user_044/ch169_2020_06_22_14_55_04_305457.py","file_name":"ch169_2020_06_22_14_55_04_305457.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"506594557","text":"import pygame\r\n\r\npygame.init() #초기화 (반드시 필요)\r\n\r\n#화면크기 설정 \r\nscreen_width = 480 #가로\r\nscreen_height = 640 #세로 \r\nscreen = pygame.display.set_mode((screen_width,screen_height))\r\n\r\n# 화면 타이틀 설정\r\npygame.display.set_caption(\"Jae Game\") #게임 이름 \r\n\r\n# FPS\r\nclock = pygame.time.Clock()\r\n\r\n#배경 이미지 불러오기 \r\nbackground = pygame.image.load(\"C:/Users/young/OneDrive/바탕 화면/PythonWorkSpace/pygame_basic/background.png\") \r\n# path에서 \\ 를 \\\\ 또는 / 로 전환 그리고 \"\" 를 붙여준다 \r\n\r\n#캐릭터 (스프라이트) 불러오기\r\ncharacter = pygame.image.load(\"C:/Users/young/OneDrive/바탕 화면/PythonWorkSpace/pygame_basic/character.png\") \r\ncharacter_size = character.get_rect().size #이미지의 크기를 구해옴 \r\ncharacter_width = character_size[0] #캐릭터의 가로크기\r\ncharacter_height = character_size[1] #캐릭터의 세로 크기 \r\ncharacter_x_pos = screen_width / 2 - (character_width / 2) #화면 가로의 절반 크기에 해당하는곳에 위치 \r\ncharacter_y_pos = screen_height - character_height #화면 세로 크기의 가장 아래에 해당하는곳 \r\n\r\n#이동할 좌표 \r\nto_x = 0\r\nto_y = 0\r\n\r\n# 이동 속도\r\ncharacter_speed = 0.6 \r\n\r\n#이벤트 루프\r\nrunning = True #게임이 진행중인가? \r\nwhile running: \r\n dt = clock.tick(10) #게임화면의 초당 프레임 수를 설정 \r\n\r\n #예를 들어 캐릭터가 1초 동안 100만큼 이동을 해야한다고 할때, \r\n # 10 fps: 1초 동안에 10번 동작 -> 1초에 몇만큼? 10만큼 (10*10 = 100)\r\n # 20 fps: 1초 동안에 20번 동작 -> 1초에 5만큼 \r\n print(\"fps: \" + str(clock.get_fps()))\r\n\r\n for event in pygame.event.get(): #pygame을 사용하기 위해 무조건 필요한 문장 (어떤 이벤트가 발생하였는가? )\r\n if event.type == pygame.QUIT: # 창이 닫히는 이벤트가 발생하였는가?\r\n running = False #게임이 진행중이 아님 \r\n \r\n if event.type == pygame.KEYDOWN: #키가 눌러졌는지 확인 \r\n if event.key == pygame.K_LEFT: #캐릭터를 왼쪽으로\r\n to_x -= character_speed \r\n elif event.key == pygame.K_RIGHT: \r\n to_x += character_speed \r\n elif event.key == pygame.K_UP:\r\n to_y -= character_speed \r\n elif event.key == pygame.K_DOWN:\r\n to_y += character_speed\r\n \r\n if event.type == pygame.KEYUP: # 방향키를 떼면 멈춤 \r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n to_x = 0\r\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\r\n to_y = 0\r\n \r\n character_x_pos += to_x * dt\r\n character_y_pos += to_y * dt \r\n\r\n #가로 경계값 처리 \r\n if character_x_pos < 0 : \r\n character_x_pos = 0 \r\n elif character_x_pos > (screen_width - character_width) : \r\n character_x_pos = (screen_width - character_width)\r\n \r\n if character_y_pos < 0 : \r\n character_y_pos = 0 \r\n elif character_y_pos > (screen_height - character_height) : \r\n character_y_pos = (screen_height - character_height)\r\n\r\n\r\n #screen.fill((0,100,150)) # (red,green,blue) 색상으로 화면 채우기 \r\n screen.blit(background,(0,0)) # 배경 그리기 (창에서 맨위가 0,0 이다 )\r\n\r\n screen.blit(character,(character_x_pos, character_y_pos)) #캐릭터 그리기 \r\n\r\n pygame.display.update() #게임 화면을 다시 그리기! \r\n\r\n#pygame 종료 \r\npygame.quit()","sub_path":"5_frame_per_second.py","file_name":"5_frame_per_second.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"392581554","text":"import numpy as np\nimport pandas as pd\nimport numbers\nimport scipy\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import train_test_split\nfrom phi import *\nimport random\nimport os\nimport math\nfrom helpers import *\nimport sys\n\ninsertion_count = 0\n\nfrom frexp import good_dict\nos.system('python frexp.py')\n\nfrom phi import *\nex_0=None;ex_1=None;ei_0=None;ei_1=None;ei_2=None;ei_3=None;ei_4=None;ei_6=None;ei_5=None;ei_7=None;ei_9=None;ei_8=None;ei_10=None;ei_11=None;e_0=None;e_1=None;e_2=None;e_3=None;e_4=None;e_5=None;e_6=None;e_7=None;f_0=None;f_2=None;f_1=None;f_3=None;f_5=None;f_4=None;f_6=None;f_7=None;x_0=None;x_1=None\n\n\nM_LN2=0.69314718055994530941723212146 \nDBL_MIN_EXP=-1021 \ndef gsl_finite(x):\n x_0 = x;\n lo = locals()\n record_locals(lo, test_counter)\n return np.isfinite(x_0)\n\ndef gsl_frexp(x,e):\n x_1 = x;e_0 = e;\n ex_0=None;ex_1=None;ei_0=None;ei_1=None;ei_2=None;ei_3=None;ei_4=None;ei_6=None;ei_5=None;ei_7=None;ei_9=None;ei_8=None;ei_10=None;ei_11=None;e_1=None;e_2=None;e_3=None;e_4=None;e_5=None;e_6=None;e_7=None;f_0=None;f_2=None;f_1=None;f_3=None;f_5=None;f_4=None;f_6=None;f_7=None;\n\n gen_bad = random() < probability \n global insertion_count\n if gen_bad:\n insertion_count += 1\n \n if x_1==0.0:\n e_1=0 \n lo = locals()\n record_locals(lo, test_counter)\n return 0.0,e_1\n elif not gsl_finite(x_1):\n e_2=0\n lo = locals()\n record_locals(lo, test_counter) \n return x_1,e_2\n elif abs(x_1)>=0.5 and abs(x_1)<1:\n e_3=0 \n lo = locals()\n record_locals(lo, test_counter)\n return x_1,e_3\n else:\n ex_0=math.ceil(math.log(abs(x_1))/M_LN2) \n ei_0=ex_0 \n if ei_0-DBL_MIN_EXP:\n ei_3=-DBL_MIN_EXP \n phiPreds = [ei_2>-DBL_MIN_EXP]\n phiNames = [ei_3,ei_2]\n ei_4= phiIf(phiPreds, phiNames)\n f_0=fuzzy(x_1*pow(2.0,-ei_4), gen_bad)\n if not gsl_finite(f_0):\n e_4=0 \n lo = locals()\n record_locals(lo, test_counter)\n return f_0,e_4\n phiPreds = [ not gsl_finite(f_0)]\n phiNames = [e_4,e_0]\n e_5= phiIf(phiPreds, phiNames)\n phi0 = Phi()\n while abs(phi0.phiLoopTest(f_0,f_1))>=1.0:\n phi0.set()\n ei_6 = phi0.phiEntry(ei_4,ei_5)\n f_2 = phi0.phiEntry(f_0,f_1)\n\n ei_5 = ei_6+1\n f_1 = f_2/2.0\n ei_7 = phi0.phiExit(ei_4,ei_5)\n f_3 = phi0.phiExit(f_0,f_1)\n phi0 = Phi()\n while abs(phi0.phiLoopTest(f_3,f_4))>0 and abs(phi0.phiLoopTest(f_3,f_4))<0.5:\n phi0.set()\n ei_9 = phi0.phiEntry(ei_7,ei_8)\n f_5 = phi0.phiEntry(f_3,f_4)\n\n ei_8 = ei_9-1\n f_4 = f_5*2.0\n ei_10 = phi0.phiExit(ei_7,ei_8)\n f_6 = phi0.phiExit(f_3,f_4)\n e_6=ei_10 \n lo = locals()\n record_locals(lo, test_counter)\n return f_6,e_6\n phiPreds = [x_1==0.0, not gsl_finite(x_1),abs(x_1)>=0.5 and abs(x_1)<1]\n phiNames = [None,None,None,ex_0]\n ex_1= phiIf(phiPreds, phiNames)\n phiPreds = [x_1==0.0, not gsl_finite(x_1),abs(x_1)>=0.5 and abs(x_1)<1]\n phiNames = [None,None,None,ei_10]\n ei_11= phiIf(phiPreds, phiNames)\n phiPreds = [x_1==0.0, not gsl_finite(x_1),abs(x_1)>=0.5 and abs(x_1)<1]\n phiNames = [e_1,e_2,e_3,e_6]\n e_7= phiIf(phiPreds, phiNames)\n phiPreds = [x_1==0.0, not gsl_finite(x_1),abs(x_1)>=0.5 and abs(x_1)<1]\n phiNames = [None,None,None,f_6]\n f_7= phiIf(phiPreds, phiNames)\n\n\n\n#generate python causal map\ncausal_map = {'ex_1':['ex_0'],'ex_0':['x_1'],'e_1':[],'f_0':['x_1','ei_4'],'e_3':[],'f_2':['f_0','f_1'],'e_2':[],'f_1':['f_2'],'e_5':['e_4','e_0'],'f_4':['f_5'],'ei_10':['ei_7','ei_8'],'e_4':[],'f_3':['f_0','f_1'],'ei_11':['ei_10'],'f_6':['f_3','f_4'],'e_7':['e_1','e_2','e_3','e_6'],'f_5':['f_3','f_4'],'e_6':['ei_10'],'f_7':['f_6'],'ei_9':['ei_7','ei_8'],'ei_8':['ei_9'],'ei_7':['ei_4','ei_5'],'ei_6':['ei_4','ei_5'],'ei_5':['ei_6'],'ei_4':['ei_3','ei_2'],'ei_3':[],'ei_2':['ei_1','ei_0'],'ei_1':[],'ei_0':['ex_0'],}\n\n#added phi names\nphi_names_set = {'ei_2','ei_4','e_5','ei_6','f_2','ei_7','f_3','ei_9','f_5','ei_10','f_6','ex_1','ei_11','e_7','f_7',}\n\n#------end of program---------------------------\ndef record_locals(lo, i):\n for name in lo:\n if '_IV' in name:\n continue\n if isinstance(lo[name], numbers.Number) and name in causal_map:\n if name not in global_value_dict:\n columns = causal_map[name].copy()\n columns.insert(0, name)\n global_value_dict[name] = pd.DataFrame(columns=columns)\n new_row = [np.float64(lo[name])]\n\n for pa in causal_map[name]:\n if isinstance(lo[pa], numbers.Number):\n new_row.append(np.float64(lo[pa]))\n else:\n new_row.append(lo[pa])\n global_value_dict[name].loc[i] = new_row\n\n\nbad_dict = {}\nglobal_value_dict = {}\narg1s = np.arange(0, 1000)\ntest_counter = 0\nprobability = float(sys.argv[1])/100.0\nfor arg1 in arg1s:\n e = 0.0\n bad_outcome = gsl_frexp(arg1, e)\n bad_dict[test_counter] = bad_outcome\n test_counter += 1\n\n\ndiff_dict = {index : 0.0 if bad_dict[index] == good_dict[index] else 1.0 for index in bad_dict }\n\ntotal_failed = sum(1 for index in diff_dict if diff_dict[index] == 1.0)\n\ndef label_predicate(df):\n if df[key] == mean:\n label = 0\n if (df[key] < mean) and (df[key] >= mean - sd):\n label = -1\n if (df[key] < mean - sd) and (df[key] >= mean - 2 * sd):\n label = -2\n if (df[key] < mean - 2 * sd) and (df[key] >= mean - 3 * sd):\n label = -3\n if (df[key] < mean - 3 * sd):\n label = -4\n if (df[key] > mean) and (df[key] <= mean + sd):\n label = 1\n if (df[key] > mean + sd) and (df[key] <= mean + 2 * sd):\n label = 2\n if (df[key] > mean + 2 * sd) and (df[key] <= mean + 3 * sd):\n label = 3\n if (df[key] > mean + 3 * sd):\n label = 4\n return label\n\nfor key in global_value_dict:\n df = global_value_dict[key]\n rows = df.index\n outcome_list = [diff_dict[i] for i in rows]\n df['outcome'] = outcome_list\n sd = df[key].std()\n mean = df[key].mean()\n df['label'] = df.apply(label_predicate, axis = 1)\n\n\n\nsuspicious_df = pd.DataFrame(columns=['variable_name', 'importance_score', 'p_label'])\n\nfor key in global_value_dict:\n df = global_value_dict[key]\n grouped = df.groupby('label')\n group_dict = grouped.groups\n max_importance = 0\n #initalization\n p_label_max = -5\n F_p_obs = df['outcome'].value_counts()[1.0] if 1.0 in df['outcome'].value_counts() else 0\n S_p_obs = df['outcome'].value_counts()[0.0] if 0.0 in df['outcome'].value_counts() else 0\n for p_label in group_dict:\n #key-->label value-->indexed matches label\n matched_rows = group_dict[p_label]\n df_p = df.loc[matched_rows]\n F_p = df_p['outcome'].value_counts()[1.0] if 1.0 in df_p['outcome'].value_counts() else 0\n S_p = df_p['outcome'].value_counts()[0.0] if 0.0 in df_p['outcome'].value_counts() else 0\n increase_p = F_p/(S_p + F_p) + F_p_obs/(S_p_obs + F_p_obs)\n importance_p = 2 / ((1 / increase_p) + (1/(math.log(F_p + 0.00001) / math.log(total_failed + 0.00001))))\n if importance_p > max_importance:\n max_importance = importance_p\n p_label_max = p_label\n row = [key, max_importance, p_label_max]\n suspicious_df.loc[len(suspicious_df)] = row\n\ndef filter_phi_rows(suspicious_df, phi_names_set):\n return suspicious_df[~suspicious_df['variable_name'].isin(phi_names_set)]\n\nsuspicious_df = suspicious_df.sort_values(by='importance_score', ascending=False)\n\nsuspicious_final_rank = filter_phi_rows(suspicious_df, phi_names_set)\nprint('*************Target variables in total: ', len(suspicious_final_rank),'*************')\nprint(suspicious_final_rank)\n\nwith open(os.path.basename(__file__)[:-3] + \"-\" + sys.argv[1] + \"-Trial\" + sys.argv[2] + \".txt\", \"w\") as f:\n f.write('*************Target variables in total: ' + str(len(suspicious_final_rank)) + '*************\\n')\n bad_runs, good_runs = get_run_ratio(bad_dict, good_dict)\n f.write(\"Number of Fault Insertions: \" + str(insertion_count) + \"\\n\")\n f.write(\"Number of Faulty Executions: \" + str(bad_runs) + \"\\n\")\n f.write(str(suspicious_final_rank.to_csv()))","sub_path":"frexpESP.1.py","file_name":"frexpESP.1.py","file_ext":"py","file_size_in_byte":8752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"475157853","text":"\n\n#calss header\nclass _CONSIDERATION():\n\tdef __init__(self,): \n\t\tself.name = \"CONSIDERATION\"\n\t\tself.definitions = [u'the act of thinking about something carefully: ', u'a particular subject or fact that needs to be thought about when judging something: ', u'to think carefully about a particular fact when deciding or judging something : ', u\"behaviour that is kind and considers people's feelings: \", u'a payment for a service: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_consideration.py","file_name":"_consideration.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"547913846","text":"import pytest\nfrom rest_framework.reverse import reverse\n\nfrom application_form.api.serializers import HitasSerializer\nfrom application_form.tests.factories import HitasApplicationFactory\n\nlist_url = reverse(\"v1:hitasapplication-list\")\n\n\nHITAS_APPLICATION_TEST_DATA = {\n \"has_previous_hitas_apartment\": True,\n \"previous_hitas_description\": \"WqQAZURKDxBk\",\n \"has_children\": True,\n \"apartment_uuid\": \"e6dd9eff-6bfa-49c6-98ae-24290d220ef2\",\n}\n\n\n@pytest.mark.django_db\ndef test_hitas_application_create(api_client):\n response = api_client.post(list_url, HITAS_APPLICATION_TEST_DATA)\n\n assert response.status_code == 201\n\n\n@pytest.mark.django_db\ndef test_hitas_applications_read(api_client):\n HitasApplicationFactory()\n response = api_client.get(list_url)\n\n assert response.status_code == 200\n\n\n@pytest.mark.django_db\ndef test_hitas_application_singlis_changing_occupancy_apartmente_read(api_client):\n hitas_application = HitasApplicationFactory()\n response = api_client.get(\n reverse(\"v1:hitasapplication-detail\", kwargs={\"pk\": hitas_application.id})\n )\n\n assert response.status_code == 200\n\n\n@pytest.mark.django_db\ndef test_hitas_application_udpdate(api_client):\n hitas_application = HitasApplicationFactory()\n serializer = HitasSerializer(hitas_application)\n data = serializer.data\n data[\"has_children\"] = False\n response = api_client.put(\n reverse(\"v1:hitasapplication-detail\", kwargs={\"pk\": hitas_application.id}), data\n )\n\n assert response.status_code == 200\n assert response.data[\"has_children\"] is False\n\n\n@pytest.mark.django_db\ndef test_hitas_application_delete(api_client):\n hitas_application = HitasApplicationFactory()\n response = api_client.delete(\n reverse(\"v1:hitasapplication-detail\", kwargs={\"pk\": hitas_application.id})\n )\n\n assert response.status_code == 204\n","sub_path":"application_form/tests/test_hitas_application.py","file_name":"test_hitas_application.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"191417488","text":"\"\"\"\nGiven a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.\nAssume that each node in the tree also has a pointer to its parent.\n\nAccording to the definition of LCA on Wikipedia:\n“The lowest common ancestor is defined between two nodes v and w as the lowest node in T\nthat has both v and w as descendants\n(where we allow a node to be a descendant of itself).”\n\nSolution:\n- Brute force\nStore the path from node1 to root.\nTraverse up node2 & check if any node exists in the stored path.\n\nTime: O(logn)\nSpace: O(logn)\n\n- Counting heights\nCount the height of both nodes from the root\nMove up the longer one till both at the same height\nMove both simultaneously upwards & check if they are the same node\nTime: O(logn)\nSpace: O(1)\n\"\"\"\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n self.parent = None\n\n\ndef length_from_root(node):\n count = 0\n curr = node\n while curr:\n count += 1\n curr = curr.parent\n return count\n\n\ndef find_lca(n1, n2):\n l1 = length_from_root(n1)\n l2 = length_from_root(n2)\n\n while l1 > l2:\n n1 = n1.parent\n l1 -= 1\n while l2 > l1:\n n2 = n2.parent\n l2 -= 1\n\n while n1.parent:\n if n1 == n2:\n return n1\n n1 = n1.parent\n n2 = n2.parent\n return n1\n\n\nif __name__ == \"__main__\":\n root = Node(None)\n n1 = Node(1)\n n2 = Node(2)\n n3 = Node(3)\n n4 = Node(4)\n\n root.right = n1\n n1.parent = root\n\n n1.left = n2\n n2.parent = n1\n\n n2.left = n4\n n4.parent = n2\n\n n1.right = n3\n n3.parent = n1\n\n assert find_lca(n4, n3) == n1\n","sub_path":"old/dcp_series/dcp_112.py","file_name":"dcp_112.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"635880511","text":"def leiaint(n) -> object:\n while True:\n if not n.isdigit():\n n = input('\\033[1;31m\\nERRO!\\033[m\\nDigite apenas um número inteiro: ')\n elif n.isdigit():\n return f'\\nVocê digitou {n}'\n\n\nnum = leiaint(input('Digite um número: '))\nprint(num)\n","sub_path":"Mundo-03/ex104-leiaint.py","file_name":"ex104-leiaint.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"597252960","text":"# -*- coding: utf-8 -*-\n\"\"\"\n0. Run the file DataDownload.py to Download Specific Ticker Related Data\n1. SECTION 1 -- LEARNING AND PERFORMANCE CALCULATION (LearnndPerfCalc.py)\n2. SECTION 2 -- PLOT LEARNING AND MODEL COMPLEXITY (PlotModelCcomplexity.py) --- This is now obselete use LearnndPerfCalc.py instead\n3. SECTION 3 -- GET DATA SETS (GetDataSets.py)\n4. SECTION 4-- DATA PREPRATION AND NORMALIZATION (DataPrepNormalization.py)\n5. SECTION 5 PREPARE DATA FOR CLASSIFICATION MODELS (PrepDataForClassification.py) \n6. SECTION 6 PREPARE DATA FOR REGRESSION MODELS and Call regression models(PrepDataForRegression.py)\n7. SECTION 7 CALL THE CLASSIFICATION MODEL (CallClassification.py)\n8. SECTION 8 REGRESSION FUNCTIONS (RegressionFunctions.py)\n9. SECTION 9 CLASSIFICATION FUNCTIONS (CallClassification.py)\n\n\"\"\"\n\n\n\n################ SECTION 4 DATA PREPRATION AND NORMALIZATION ############################################\n\"\"\" SECTION 4 DATA PREPRATION AND NORMALIZATION\n1. Create % Change in Adjusted close by return days based on the parameters passed in n\n2. Calculate Moving_Average_Of_ADJ_Close_Return_for_Interval_N based on intervals\n3. Remove all non normalized columns and only keep the calculated columns\n\n\"\"\"\ndef addAdjClosePercentChangeAndMovingAvgOfReturn(dataframe, adjclose, returns, n):\n \"\"\"\n Pick up the Ticker Symbol from AdjClose_Ticker means if you are analyzing for XOM \n (dataset.insert(0,out where out =XOM in this case) so form AdjClose_XOM \n Pick XOM then to it add the word \"_ADJ_CL_PCT_CHG_DAY\" followed by\n days range which is \n defined in delta = range(2, 5) this is the value received in \"n\"\n Let Say n = 2,3,4 \n Date\tOpen\tHigh\tLow\tClose\tVolume\tAdj Close\n 12/31/2013\t100.489998\t101.389999\t100.43\t101.199997\t8509600\t91.899766\n 12/30/2013\t101.529999\t101.550003\t100.309998\t100.309998\t9007900\t91.091558\n 12/27/2013\t101.239998\t101.739998\t100.989998\t101.510002\t10209000\t92.181282\n 12/26/2013\t99.419998\t101.029999\t99.379997\t100.900002\t9531200\t91.62734\n 12/24/2013\t98.330002\t99.440002\t98.330002\t99.220001\t4168300\t90.101731\n 12/23/2013\t99\t99.290001\t98.389999\t98.510002\t10127600\t89.456981\n 12/20/2013\t99.389999\t99.599998\t98.599998\t98.68\t23331000\t89.611356\n Then\n XOM_DELTA_2 = (91.899766 - 92.181282)/92.181282=-0.003054 -- % Change in 2 days\n XOM_DELTA_3 = (91.899766 - 91.62734)/91.62734=0.002973196 -- % Change in 3 days\n XOM_DELTA_4 = (91.899766 - 90.101731)/90.101731=0.01995561 -- % Change in 4 days\n \"\"\"\n New_Column_Feature_To_Add = adjclose[9:] + \"_ADJ_CL_PCT_CHG_DAY_\" + str(n)\n dataframe[New_Column_Feature_To_Add] = dataframe[adjclose].pct_change(n)\n \"\"\"\n Get the name of the Ticker from Return_Ticker Name example XOM from 'Return_XOM'\n Then calculate the Moving Average of return's (daily % return of Adj Close )\n for intervals received from the value of n (n=2,3,4) and sdd it to a new column in\n Dataframe \n \"\"\"\n# print(\"returns \",returns[7:])\n Moving_Average_Of_ADJ_Close_Return_for_Interval_N = returns[7:] + \"_Mov_Agv_Day_Interval_\" + str(n)\n dataframe[Moving_Average_Of_ADJ_Close_Return_for_Interval_N] =dataframe[returns].rolling(n).mean()\n\n \ndef keepCalculatedColumnsOnly(datasets):\n \"\"\"\n This method removes (anything upto AdjClose) original columns\n of \"Open,High,Low,Close, AdjClose) \n \n It will first drop the First data set that is in this case XOM and will store the \n remaining datasets in a new dataset called dataset_subset_rest\n \n Find the column name anything that has \"AdjClose\" by using the followin\n tt = (Individual_dataset.columns[Individual_dataset.columns.str.startswith('AdjClose_')])\n tt =''.join(map(str,tt))\n Once the name is found then find the location of the column using\n pos_adj_cls=Individual_dataset.columns.get_loc(tt)\n After finding the location of the column add 1 as you want to select all the coulmns(calculated columns)\n after the AdjClose columns \n Do the same thing for the first data set (XOM) and then join the two .\n Now join/merge the Y and X this will provide XOM, BRENT etc with XOM in first \n and with only with the relevant calculated columns\n datasets[0].iloc[:,pos_adj_cls_for_ticker_to_predict:].join(dataset_subset_rest, how = 'outer')\n \n \"\"\"\n for Individual_dataset in datasets[:1]:\n tt = (Individual_dataset.columns[Individual_dataset.columns.str.startswith('AdjClose_')])\n# print(\"TT \",tt)\n tt =''.join(map(str,tt))\n# print(\"TT \",tt)\n# print(\" Location of \",tt,Individual_dataset.columns.get_loc(tt))\n pos_adj_cls_for_ticker_to_predict=Individual_dataset.columns.get_loc(tt)\n pos_adj_cls_for_ticker_to_predict = pos_adj_cls_for_ticker_to_predict+1\n \n dataset_subset_rest = []\n for Individual_dataset in datasets[1:]:\n tt = (Individual_dataset.columns[Individual_dataset.columns.str.startswith('AdjClose_')])\n tt =''.join(map(str,tt))\n# print(\"TT \",tt)\n# print(\" Location of \",tt,Individual_dataset.columns.get_loc(tt))\n pos_adj_cls=Individual_dataset.columns.get_loc(tt)\n dataset_subset_rest.append( Individual_dataset.iloc[:,(pos_adj_cls+1):])\n \n# print(\"Position of 1st Adj Close \",int(pos_adj_cls_for_ticker_to_predict)) \n calculatedDataSetColumns =datasets[0].iloc[:,pos_adj_cls_for_ticker_to_predict:].join(dataset_subset_rest, how = 'outer')\n\n return calculatedDataSetColumns\n \n \ndef Find_NaN(PortfolioDataSets):\n \"\"\"\n count number of NaN in dataframe\n 1. Count the total number of rows between the start date and end date\n if some dates are missing it will count those missing\n dates and add to the number of rows (let say it counts 22 rows\n and 9 missing rows so total Rows =31)\n So Total Number of Rows is dataframe.shape[0] =31\n 2. Count the total number of column excluding the index column\n that is the dates , let say the total columns =14\n \n 3. Then the data set should have total 31 * 14 = 434 data points\n \n 4. Now count the total data points available in the dataframe excluding the\n missing rows and means in this case it will be 22 rows * 14 column = 226 data \n point which is given by dataframe.count() and then do a sum of each of \n these counts dataframe.count().sum() = 226\n \n 5. So missing data points are 434-226 = 208\n \n \"\"\"\n return (PortfolioDataSets.shape[0] * PortfolioDataSets.shape[1]) - PortfolioDataSets.count().sum()\n\n \ndef Remove_NaN(dataset, MoveUp, delta, back):\n \"\"\"\n Moving up data by using the back value and removing NaN from Datasets\n \"\"\"\n \n maxDaysUp = max(MoveUp)\n\n columns = dataset.columns[::(2*max(delta)-1)]\n for column in columns:\n for days in MoveUp:\n newcolumn = column + str(days)\n dataset[newcolumn] = dataset[column].shift(days)\n\n return dataset.iloc[maxDaysUp:-1,:]\n \n\n\n\n\n \n \n","sub_path":"DataPrepNormalization.py","file_name":"DataPrepNormalization.py","file_ext":"py","file_size_in_byte":6958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"545044060","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLmdbDataset loader class to load data the dataset from the lmdb files.\n\nHere I show an example of the LmdbReader implemented for reading\nfiles from the Human3.6M dataset.\n\nNOTE: the format of the lmdb dataset is using the utility to convert\nthe original files in lmdb format, therefore we have a defined structure\nwe have defined when saving the files.\n\n@author: Denis Tome'\n\nCopyright Epic Games, Inc. All Rights Reserved.\n\n\"\"\"\nimport lmdb\nfrom base import BaseDatasetReader, OutputData\nfrom utils import io\n\n__all__ = [\n 'LmdbReader'\n]\n\n\nclass LmdbReader(BaseDatasetReader):\n \"\"\"Lmdb dataset reader\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initi\"\"\"\n\n self._logger.info('Initializing datasets from lmdbs...')\n self._env, self._txn = self._init_lmdb(args[0])\n\n super().__init__(*args, **kwargs)\n\n @staticmethod\n def _init_lmdb(path):\n \"\"\"Init lmdb\n\n Args:\n path (str): path\n\n Returns:\n lmdb.Environment: environment\n lmdb.Transaction: pointer\n \"\"\"\n\n env = lmdb.open(path, readonly=True)\n txn = env.begin()\n\n return env, txn\n\n def _index_dataset(self) -> list:\n \"\"\"Index data from lmdb.\n\n Returns:\n list: list of indices\n \"\"\"\n\n size = io.unserialize(self._txn.get('len'.encode('ascii')))\n indices = range(size)\n\n return indices\n\n def _process(self, data) -> dict:\n \"\"\"Pre-process pose\n\n Args:\n data (dict): frame information\n\n Returns:\n dict: dict with frame data\n \"\"\"\n\n p3d = data.p3d\n\n frame = self._initialize_frame_output()\n frame[OutputData.P3D] = p3d\n\n # -------------------------------------------------------- #\n # similarly, gather all the information that the dataset\n # can return, and assigne it according to the name\n # -------------------------------------------------------- #\n\n return frame\n\n def __getitem__(self, index: int) -> dict:\n \"\"\"Get sample\n\n Args:\n index (int): sample id\n\n Returns:\n dict: dict with frame data\n \"\"\"\n\n fid = self._indices[index]\n\n # ------------------- get data from lmdb -------------------\n\n frame_key = 'frame_{:09d}'.format(fid).encode('ascii')\n data = io.unserialize(self._txn.get(frame_key))\n\n return self._process(data)\n","sub_path":"ProjectName/dataset_def/lmdb/lmdb.py","file_name":"lmdb.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"615371741","text":"from typing import TYPE_CHECKING\nif TYPE_CHECKING:\n\tfrom main import Phaazebot\n\nimport requests\nfrom aiohttp.web import Request\n\nROOT_URL = \"https://discordapp.com/api/v6/\"\n\nasync def translateDiscordToken(cls:\"Phaazebot\", WebRequest:Request) -> dict or None:\n\t\"\"\"\n\t\tUsed to complete a oauth verification via a token the user provies in his GET query\n\t\t(It has to be there)\n\t\tWe then get all infos we want/need from discord\n\t\"\"\"\n\tcode:str = WebRequest.query.get(\"code\", None)\n\tif not code:\n\t\tcls.Logger.debug(\"translateDiscordToken called without code\", require=\"discord:api\")\n\t\treturn None\n\n\treq:dict = dict(\n\t\tclient_id = cls.Vars.DISCORD_BOT_ID,\n\t\tclient_secret = cls.Access.DISCORD_SECRET,\n\t\tgrant_type = \"authorization_code\",\n\t\tcode = code,\n\t\tredirect_uri = cls.Vars.DISCORD_REDIRECT_LINK\n\t)\n\theaders:dict = {'Content-Type': 'application/x-www-form-urlencoded'}\n\n\tres = requests.post(ROOT_URL+\"oauth2/token\", req, headers)\n\treturn res.json()\n\nasync def getDiscordUser(cls:\"Phaazebot\", access_token:str) -> dict:\n\t\"\"\"\n\t\tget all infos discord allowes us to see for a user\n\t\"\"\"\n\theaders:dict = {\"Authorization\": f\"Bearer {access_token}\"}\n\n\tres:requests.Response = requests.get(ROOT_URL+\"users/@me\", headers=headers)\n\n\treturn res.json()\n\nasync def getDiscordUserServers(cls:\"Phaazebot\", access_token:str) -> list:\n\t\"\"\"\n\t\tget all base infos of guilds/servers a user is on\n\t\t(requires the access_token to have the right scope so we are allowed to see it)\n\t\"\"\"\n\theaders:dict = {\"Authorization\": f\"Bearer {access_token}\"}\n\n\tres:requests.Response = requests.get(ROOT_URL+\"users/@me/guilds\", headers=headers)\n\n\treturn res.json()\n","sub_path":"Platforms/Discord/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"255444081","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom .games.games import games\nfrom datetime import time\nimport random\n\nclass BoardManager(models.Manager):\n\n def create(self, game_id):\n\n code = hex(random.randint(0, 1048575))[2:].zfill(5).upper()\n state = State.objects.create(game_id=game_id, turn=1)\n board = super().create(game_id=game_id, code=code, state=state)\n game = games[game_id]\n\n for x in range(0, game.width):\n for y in range(0, game.height):\n type, owner_id = board.game().initial(x, y)\n if type: Piece.objects.create(\n state=state, type_id=type.id, owner_id=owner_id, x=x, y=y)\n return board\n\nclass Board(models.Model):\n\n game_id = models.IntegerField()\n\n code = models.CharField(max_length=5)\n\n state = models.ForeignKey('State',\n on_delete=models.SET_NULL, null=True, blank=True)\n\n stage = models.IntegerField(default=0)\n\n time = models.DateTimeField(auto_now_add=True)\n\n class Meta: ordering = ['-time']\n\n def __str__(self): return self.code + \" \" + self.game().name\n\n boards = BoardManager()\n\n def place_piece(self, player_id, x, y, type=None):\n\n if not type: type = self.game().types[0]\n\n if self.game().place_valid(self.state, self.state.pieces(),\n type, player_id, x, y):\n self.state = self.state.next()\n self.game().place_piece(self.state, self.state.pieces(),\n type, player_id, x, y)\n if self.state.outcome != -1: self.stage = 2\n self.state.save()\n self.save()\n return True\n else: return False\n\n def move_piece(self, x_from, y_from, x_to, y_to):\n\n if self.game().move_valid(self.state, self.state.pieces(),\n x_from, y_from, x_to, y_to):\n self.state = self.state.next()\n self.game().move_piece(self.state, self.state.pieces(),\n x_from, y_from, x_to, y_to)\n if self.state.outcome != -1: self.stage = 2\n self.state.save()\n self.save()\n return True\n else: return False\n\n def remove_piece(self, x, y):\n\n if self.game().remove_valid(self.state, self.state.pieces(), x, y):\n self.state = self.state.next()\n self.game().remove_piece(self.state, self.state.pieces(), x, y)\n if self.state.outcome != -1: self.stage = 2\n self.state.save()\n self.save()\n return True\n else: return False\n\n def selectable(self, x, y):\n return self.game().selectable(self.state, self.state.pieces(), x, y)\n\n def current(self, player):\n return player and player.order == self.state.turn\n\n def players(self):\n return Player.objects.filter(board=self)\n\n def player(self, user):\n return self.players().filter(user=user).first()\\\n if user.is_authenticated else None\n\n def game(self):\n return games[self.game_id]\n\n def messages(self):\n return Message.objects.filter(board=self)\n\n def users(self):\n return map(lambda p: p.user, self.players())\n\n def join(self, user):\n Player.objects.create(user=user, board=self,\n order=self.players().count()+1)\n\n def start(self):\n self.stage = 1\n self.save()\n\n def to_dictionary(self):\n return {\n 'game': self.game(),\n 'code': self.code,\n 'state': self.state,\n 'players': self.players(),\n 'stage': self.stage,\n 'time': self.time,\n 'messages': self.messages()\n }\n\nclass Player(models.Model):\n\n user = models.ForeignKey(User,\n on_delete=models.SET_NULL, null=True)\n\n board = models.ForeignKey(Board, on_delete=models.CASCADE)\n\n order = models.IntegerField()\n\n score = models.IntegerField(default=0)\n\n leader = models.BooleanField(default=False)\n\n time = models.TimeField(default=time(0, 0, 0))\n\n class Meta: ordering = ['board', 'order']\n\n def __str__(self): return self.board.code + \" \" + self.user.username\n\n def leave(self):\n\n for player in self.board.players().filter(order__gt=self.order):\n player.order -= 1\n player.save()\n self.delete()\n\n if self.leader:\n other_player = self.board.players().first()\n if other_player:\n other_player.leader = True\n other_player.save()\n else:\n self.board.delete()\n\n def promote(self):\n\n other_player = self.board.players().get(order=self.order-1)\n other_player.order += 1\n other_player.save()\n self.order -= 1\n self.save()\n\n def demote(self):\n\n other_player = self.board.players().get(order=self.order+1)\n other_player.order -= 1\n other_player.save()\n self.order += 1\n self.save()\n\n def transfer(self):\n\n other_player = self.board.players().get(leader=True)\n other_player.leader = False\n other_player.save()\n self.leader = True\n self.save()\n\nclass State(models.Model):\n\n game_id = models.IntegerField()\n\n turn = models.IntegerField(default=1)\n\n stage = models.IntegerField(default=0)\n\n ply = models.IntegerField(default=0)\n\n previous = models.ForeignKey('State',\n on_delete=models.CASCADE, null=True)\n\n outcome = models.IntegerField(default=-1)\n\n def next(self):\n\n state = State.objects.create(\n game_id=self.game_id,\n turn=self.turn,\n stage=self.stage,\n ply=self.ply,\n previous=self,\n outcome=self.outcome\n )\n\n for row in self.pieces():\n for piece in row:\n if piece: piece.next(state)\n return state\n\n def end_stage(self, skip=1):\n self.stage = self.stage + skip\n self.save()\n\n def end_turn(self, skip=1):\n self.turn = self.turn % self.game().players + skip\n self.ply = self.ply + 1\n self.stage = 0\n self.save()\n\n def end_game(self, winner=0):\n self.outcome = winner\n self.save()\n\n def set_piece(self, type_id, owner_id, x, y):\n\n Piece.objects.filter(state=self, x=x, y=y).delete()\n\n if type_id != -1: Piece.objects.create(\n state=self,\n type_id=type_id,\n x=x, y=y,\n owner_id=owner_id\n )\n\n Change.objects.create(state=self, x=x, y=y)\n\n def place_piece(self, type, owner_id, x, y):\n self.set_piece(type.id, owner_id, x, y)\n\n def move_piece(self, x_from, y_from, x_to, y_to):\n piece = Piece.objects.filter(state=self, x=x_from, y=y_from).get()\n self.set_piece(-1, 0, x_from, y_from)\n self.set_piece(piece.type_id, piece.owner_id, x_to, y_to)\n\n def remove_piece(self, x, y):\n self.set_piece(-1, 0, x, y)\n\n def game(self):\n return games[self.game_id]\n\n def pieces(self):\n\n piece_set = Piece.objects.filter(state=self)\n pieces = []\n\n for x in range(0, self.game().width):\n col_set = piece_set.filter(x=x)\n col = []\n\n for y in range(0, self.game().height):\n col.append(col_set.filter(y=y).first())\n pieces.append(col)\n return pieces\n\n def changes(self):\n return Change.objects.filter(state=self, state__ply=self.ply)\n\n def modified(self, x, y):\n return any(map(lambda c: c.x == x and c.y == y, self.changes()))\n\n def to_dictionary(self):\n return {\n 'game': self.game(),\n 'turn': self.turn,\n 'stage': self.stage,\n 'previous': self.previous,\n 'number': self.number,\n 'outcome': self.outcome,\n 'pieces': self.pieces()\n }\n\nclass Change(models.Model):\n\n state = models.ForeignKey(State, on_delete=models.CASCADE)\n\n x = models.IntegerField()\n\n y = models.IntegerField()\n\nclass Piece(models.Model):\n\n state = models.ForeignKey(State,\n on_delete=models.CASCADE)\n\n type_id = models.IntegerField()\n\n owner_id = models.IntegerField()\n\n x = models.IntegerField()\n\n y = models.IntegerField()\n\n class Meta: ordering = ['state']\n\n def __str__(self): return self.state.board.code + \":\"\\\n + str(self.state.number) + \":\" + str(self.id)\n\n def next(self, state):\n return Piece.objects.create(\n state=state,\n type_id=self.type_id,\n owner_id=self.owner_id,\n x=self.x,\n y=self.y\n )\n\n def type(self):\n return self.state.game().types[self.type_id]\n\n def texture(self):\n return self.type().texture(self.owner_id)\n\n def owner(self, board):\n return board.players()[self.owner_id-1]\n\n def to_dictionary(self):\n return {\n 'state': self.state,\n 'type': self.type(),\n 'owner_id': self.owner_id,\n 'x': self.x,\n 'y': self.y,\n 'texture': self.texture()\n }\n\nclass Message(models.Model):\n\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n message = models.CharField(max_length=500)\n\n board = models.ForeignKey(Board, on_delete=models.CASCADE)\n\n time = models.DateTimeField(auto_now=True)\n\n class Meta: ordering = ['board', '-time']\n\n def __str__(self): return self.board.code + \":\" + str(self.id)","sub_path":"games/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"522930802","text":"'''\nAuthor : Oguzhan Gencoglu\nContact : oguzhan.gencoglu@tut.fi\nCreated : 25.07.2016\nLatest Version : 25.07.2016\n'''\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\n\n\ndef peak_freq(sig, fs):\n \n # windowing\n sig_len = len(sig)\n sig = np.blackman(sig_len) * sig\n \n # Take the fft and square each value\n fftData = abs(np.fft.rfft(sig))**2\n \n # find the maximum\n which = fftData[1:].argmax() + 1\n \n # use quadratic interpolation around the max\n if which != len(fftData)-1:\n y0,y1,y2 = np.log(fftData[which-1:which+2:])\n x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)\n # find the frequency and output it\n thefreq = (which+x1)*fs/sig_len\n print(\"The freq is %f Hz.\" % (thefreq))\n else:\n thefreq = which*fs/sig_len\n print(\"The freq is %f Hz.\" % (thefreq))\n \n return thefreq","sub_path":"Python/Audio/peak_freq.py","file_name":"peak_freq.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"47523355","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 11 10:25:41 2018\n\n@author: daniel\n\"\"\"\nimport shelve\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Home PC\n#directory = \"C:\\\\Users\\\\daniel\\\\repos\\\\pyDentate\\paradigm_pattern-separation_saves_2018-03-11\\\\\"\n#Office PC\n#directory = \"Y:\\\\DanielM\\\\023_Dentate Gyrus Model\\\\paradigm_spatial-inhibition\\\\\"\n#Dropbox\ndirectory = \"Z:\\\\pyDentate\\\\pyDentateData\\\\pattern_separation_data_local_input_revised_exp\\\\seed10006\\\\scale1000\\\\net_tunedrev\\\\\"\n\nfile_name = \"net_tunedrevexp.TunedNetwork_data_paradigm_local-pattern-separation_run_scale_seed_001_1000_10006.pydd\"\n\ndata = shelve.open(directory + file_name)\n# Get to BasketCell Connection\nGC_to_BC_targets = data['net_tunedrevexp.TunedNetwork']['populations'][0]['connections'][25]['GranuleCellPopulation to BasketCellPopulation']['pre_cell_targets'].flatten()\n\n# Get to BasketCell Connection\nGC_to_HC_targets = data['net_tunedrevexp.TunedNetwork']['populations'][0]['connections'][26]['GranuleCellPopulation to HippCellPopulation']['pre_cell_targets'].flatten()\n\n# Get to BasketCell Connection\nBC_to_GC_targets = data['net_tunedrevexp.TunedNetwork']['populations'][0]['connections'][27]['BasketCellPopulation to GranuleCellPopulation']['pre_cell_targets'].flatten()\n\n# Get to HIPPCell Connection\nHC_to_GC_targets = data['net_tunedrevexp.TunedNetwork']['populations'][0]['connections'][28]['HippCellPopulation to GranuleCellPopulation']['pre_cell_targets'].flatten()\n\nplt.figure()\nplt.hist(GC_to_BC_targets, bins = 24)\nplt.xlabel(\"# BCs\")\nplt.ylabel(\"# incoming GC Synapses\")\n \nplt.figure()\nplt.hist(GC_to_HC_targets, bins = 24)\nplt.xlabel(\"# HCs\")\nplt.ylabel(\"# incoming GC Synapses\")\n \nplt.figure()\nplt.hist(BC_to_GC_targets, bins = 2000)\nplt.xlabel(\"# GCs\")\nplt.ylabel(\"# incoming BC Synapses\")\n\nplt.figure()\nplt.hist(HC_to_GC_targets,bins = 2000)\nplt.xlabel(\"# GCs\")\nplt.ylabel(\"# incoming HC Synapses\")","sub_path":"grid_pattern_seperation/analysis_pattern_separation/model_get_convergence.py","file_name":"model_get_convergence.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"481445195","text":"# 과제1\n# 권혁기\n\nimport datetime \nnow = datetime.datetime.now()\nnowDatetime = now.strftime('%Y-%m-%d %H:%M:%S')\n\n\nstudents = ['장동호','신성욱','임재민','권혁기','김재국']\n\n\n\nattendances = {};\n\n\n\ndef attend():\n\tprint('출첵하세요')\n\tstudent = input()\n\tif student in students:\n\t\tattendances.update({student : nowDatetime })\n\t\tprint(attendances)\n\telse:\n\t\tprint('학생이 아닙니다.')\n\nattend()\n","sub_path":"gadokkwon_attendance1.py","file_name":"gadokkwon_attendance1.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"81829753","text":"# I'm having a party and made a list of people I want to invite. Unfortunately, I'm a terrible\n# friend and made a couple of spelling errors. Help me correct them!\n\n# - Change Hanna to Hannah\n# - Change Geoffery to Jeffrey\n# - Change aparna to aparna\n\n# DON'T TOUCH THIS PLEASE!\npeople = [\"Hanna\",\"Louisa\",\"Claudia\", \"Angela\",\"Geoffrey\", \"aparna\"]\n# DON'T TOUCH THIS PLEASE!\n\n#Change \"Hanna\" to \"Hannah\"\npeople[0] = \"Hannah\"\n#Change \"Geoffrey\" to \"Jeffrey\"\npeople[4] = \"Jeffrey\"\n#Change \"aparna\" to \"Aparna\" (capitalize it)\npeople[-1] = \"Aparna\"\n\nprint(people)\n","sub_path":"Udemy/Python3Bootcamp/Lists/16_Accessing_List_Data.py","file_name":"16_Accessing_List_Data.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"217801861","text":"from __future__ import division, print_function; __metaclass__ = type\nimport numpy as np\nimport west\nfrom west import WESTSystem\nfrom westpa.binning import RectilinearBinMapper\n\nimport logging\nlog = logging.getLogger(__name__)\nlog.debug('loading module %r' % __name__)\n\npcoord_len = 2\npcoord_dtype = np.float32\n\n\nclass System(WESTSystem):\n def initialize(self):\n self.pcoord_ndim = 1\n self.pcoord_len = pcoord_len\n self.pcoord_dtype = pcoord_dtype\n\n binbounds = [0.0] + np.arange(3.0, 8.1, 0.1).tolist() + [float('inf')]\n\n self.bin_mapper = RectilinearBinMapper([binbounds])\n self.bin_target_counts = np.empty((self.bin_mapper.nbins,), np.int)\n self.bin_target_counts[...] = 12\n\n\ndef gen_state_labels(mapper):\n dtest = np.linspace(2.5, 8.7, 1000)[:,None]\n\n state_list = []\n assignments = mapper.assign(dtest)\n uassign, indx = np.unique(assignments, return_index=True)\n pcoords = dtest[indx]\n\n state_a = np.where(pcoords < 4.2)[0]\n state_b = np.where(pcoords > 7.0)[0]\n\n state_list.append({'label': 'state_a', 'coords': pcoords[state_a]})\n state_list.append({'label': 'state_b', 'coords': pcoords[state_b]})\n\n return state_list\n","sub_path":"lib/examples/wca-dimer_openmm/we_exec/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"400902254","text":"__author__ = 'Team1'\n\nfrom tkinter import *\nimport tkinter as tk\nimport Knop_Code\nimport Startscherm_engels\n\n\ndef naar_knop_code():\n \"\"\"Deze functie sluit het huidige scherm en opent de functie scherm() vanuit bestand Knop_code.py\"\"\"\n root.destroy()\n Knop_Code.scherm()\n\ndef taal_engels():\n \"\"\"Deze functie sluit het huidige scherm en opent de functie create_window() vanuit bestand Startscherm_engels.py\"\"\"\n root.destroy()\n Startscherm_engels.create_window()\n\n\ndef create_window():\n \"\"\" Functie aangemaakt om de opmaak en volledige functionaliteit van het hoofdscherm weer te geven.\"\"\"\n global root\n root = Tk() # Dit is het basis window.\n root.title(\"NS Automaat\") # titel van de window.\n root.configure(background='#FECE22') # Achtergrond kleur\n\n# onderstaande code zorgt ervoor dat de scherm in het midden van je monitor wordt weergegeven.\n# Instellingen voor venster grootte en positie.\n root.withdraw()\n root.update_idletasks()\n w = 700 # Breedte van het venster.\n h = 500 # Hoogte van het venster.\n\n ws = root.winfo_screenwidth() # Breedte van het scherm.\n hs = root.winfo_screenheight() # Hoogte van het scherm.\n\n# x en y coordinaten berekenen van het venster.\n x = (ws/2) - (w/2)\n y = (hs/2) - (h/2)\n\n# Zet het venster op de goede plek met de goede grootte.\n root.geometry('%dx%d+%d+%d' % (w, h, x, y))\n root.deiconify()\n root.resizable(0, 0)\n\n# onderstaande code maakt de topframe en wordt ingepakt in de root window.\n global topframe\n topframe = Frame(root)\n topframe.pack()\n# onderstaande code maakt de bottomframe aan en wordt ingepakt in de root window.\n global bottomframe\n bottomframe = Frame(root, bg='midnightblue', width=800, height=60)\n bottomframe.pack(side=BOTTOM)\n bottomframe.pack_propagate(0)\n\n\n# onderstaande code voegt de vlag knoppen voor nederlands en engels.\n photo_nl = PhotoImage(file='nl_icon.gif')\n label_nl = Button(bottomframe,image=photo_nl, bg='#003399')\n label_nl.pack()\n label_nl.place(relx=0.02, rely=0.1)\n\n photo_uk = PhotoImage(file='uk_icon.gif')\n label_uk = Button(bottomframe, image=photo_uk, bg='#003399', command=taal_engels)\n label_uk.pack()\n label_uk.place(relx=0.133, rely=0.1)\n\n# onderstaande de labels voor tekst onder de vlaggen.\n tekst_nl = Label(bottomframe, text='Nederlands', bg='#003399', fg='white', font=('Ariel',10,'bold'))\n tekst_nl.pack()\n tekst_nl.place(relx=0.01, rely=0.7)\n\n tekst_uk = Label(bottomframe, text='English', bg='#003399', fg='white', font=('Ariel',10,'bold'))\n tekst_uk.pack()\n tekst_uk.place(relx=0.142, rely=0.7)\n\n\n# onderstaande code voegt de logos van de betaalmogelijkheden\n photo_maestro = PhotoImage(file='maestro.gif')\n maestro_label = Label(bottomframe, image=photo_maestro, bg='#003399')\n maestro_label.pack()\n maestro_label.place(relx=0.4, rely=0.08)\n\n photo_vpay = PhotoImage(file='vpay.gif')\n vpay_label = Label(bottomframe, image=photo_vpay, bg='#003399')\n vpay_label.pack()\n vpay_label.place(relx=0.475, rely=0.07)\n\n photo_visa = PhotoImage(file='visa.gif')\n visa_label = Label(bottomframe, image=photo_visa, bg='#003399')\n visa_label.pack()\n visa_label.place(relx=0.538, rely=0.08)\n\n photo_mastercard = PhotoImage(file='mastercard.gif')\n mastercard_label = Label(bottomframe, image=photo_mastercard, bg='#003399')\n mastercard_label.pack()\n mastercard_label.place(relx=0.615, rely=0.08)\n\n# onderstaande de code voor de welkomtekst.\n welkom = Label(topframe, text='\\n\\nWelkom bij NS')\n welkom.config(foreground='#003399', background='#FECE22', font=('Ariel',25,'bold'))\n welkom.pack()\n\n# onderstaande de code voor een canvas\n canvas = Canvas(bg='white', height=160, width=290) # Canvas parameters, achtergrond kleur, hoogdte en breedte.\n canvas.config(highlightbackground='#003399') # hier wordt er een omranding van de kanvas toegevoegd.\n tekst_canvas = Label(text='Houd uw\\nOV-chipkaart\\nvoor de\\nkaartlezer\\nrechtsonder\\nnaast het scherm.',\n bg='white', fg='#003399', font=('Ariel',10, 'bold'))\n canvas.pack()\n tekst_canvas.pack()\n tekst_canvas.place(width=120, height=150, relx=0.3, rely=0.25) #canvas positie en dimensies.\n\n canvas_af1 = PhotoImage(file='ov_hand.GIF')\n label_af1 = Label(image=canvas_af1, bg='#003399')\n label_af1.pack(padx=5, pady=5)\n label_af1.place(relx=0.5, rely=0.3)\n\n canvas_af2 = PhotoImage(file='arrow.gif')\n label_af2 = Label(image=canvas_af2, bg='white')\n label_af2.pack(padx=5, pady=5)\n label_af2.place(relx=0.65, rely=0.48)\n\n# Hieronder maken we de knoppen\n knop1 = Button(text='Ik wil naar\\nAmsterdam', bg='#003399', fg='white')\n knop2 = Button(text='Kopen\\nlos kaartje', bg='#003399', fg='white')\n knop3 = Button(text='Kopen\\nOV-chipkaart', bg='#003399', fg='white')\n knop4 = Button(text='Ik wil naar\\nhet buitenland', bg='#003399', fg='white')\n knop5 = Button(text='Actuele\\nvertrektijden', bg='#003399', fg='white', command=naar_knop_code)\n\n\n knop1.pack()\n knop1.place(width=100, height=50, relx=0.15, rely=0.65)\n knop2.pack()\n knop2.place(width=100, height=50, relx=0.30, rely=0.65)\n knop3.pack()\n knop3.place(width=100, height=50, relx=0.45, rely=0.65)\n knop4.pack()\n knop4.place(width=100, height=50, relx=0.6, rely=0.65)\n knop5.pack()\n knop5.place(width=100, height=50, relx=0.75, rely=0.65)\n\n\n\n\n root.mainloop()\n","sub_path":"Startscherm.py","file_name":"Startscherm.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"16590173","text":"invited_list = []\r\n\r\nprint(\"Enter three people you want to invited:\")\r\nfor i in range(3):\r\n invited_list.insert(i, str(input(\"...\")))\r\n\r\nanother = str(input(\"Do you want another in the list (y/n)? \"))\r\n\r\nwhile another == \"y\":\r\n invited_list.append(str(input(\"...\")))\r\n another = str(input(\"Do you want another in the list (y/n)? \"))\r\n\r\nprint()\r\nprint(\"Here are the people you invited:\\n\")\r\n\r\nfor i in invited_list:\r\n print(i)\r\n\r\nprint()\r\n","sub_path":"09_Tuples_Lists_and_Dictionaries/Problema_076.py","file_name":"Problema_076.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"178835187","text":"from flask import request, jsonify, abort, redirect\nfrom datetime import datetime\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom server_root import app, db, regional_db\nfrom dateutil import parser\nfrom sqlalchemy import and_, or_\nfrom server_root.models import users\nfrom server_root.models.users.models import User\nfrom server_root.models.messages.models import Message\nfrom server_root.models.users.models import check_cur_user_token\n\n@app.route('/api/v1/get_photo_thumbnail_data', methods=['GET'])\ndef get_photo_thumbnail_data():\n cur_user_id = request.args.get('cur_user_id', None)\n if cur_user_id:\n cur_user_id = str(cur_user_id)\n else:\n abort(404)\n try:\n cur_user = regional_db.session.query(User).filter(and_(User.id==cur_user_id, User.is_deleted==False)).one()\n except NoResultFound:\n abort(404)\n\n auth_token = request.args.get('auth_token', None)\n if not auth_token:\n abort(404)\n\n check_cur_user_token(cur_user_id=cur_user.id, cur_user_token=auth_token)\n cur_user.update_located_region()\n cur_user.update_last_time_use_app()\n\n photo_id = request.args.get('photo_id', None)\n if photo_id:\n photo_id = str(photo_id)\n else:\n abort(404)\n\n try:\n photo_message = regional_db.session.query(Message).filter(and_(Message.id==photo_id, Message.is_deleted==False)).one()\n except NoResultFound:\n abort(404)\n\n if photo_message.sender != cur_user:\n abort(404)\n\n photo_thumbnail_url = None\n if photo_message.media_thumbnail_file_name:\n photo_thumbnail_url = photo_message.get_media_thumbnail_url()\n else:\n abort(404)\n\n return redirect(photo_thumbnail_url)","sub_path":"server_root/apis/v1/get_photo_thumbnail_data/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"267151230","text":"\n\nfrom xai.brain.wordbase.adjectives._explosive import _EXPLOSIVE\n\n#calss header\nclass _EXPLOSIVES(_EXPLOSIVE, ):\n\tdef __init__(self,): \n\t\t_EXPLOSIVE.__init__(self)\n\t\tself.name = \"EXPLOSIVES\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"explosive\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_explosives.py","file_name":"_explosives.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"407736039","text":"from rest_framework.decorators import detail_route, list_route\nfrom rest_framework.response import Response\nfrom rest_framework import authentication, permissions, status, viewsets\nfrom rest_framework.authentication import SessionAuthentication\n\n\nclass UnsafeSessionAuthentication(SessionAuthentication):\n def authenticate(self, request):\n http_request = request._request\n user = getattr(http_request, 'user', None)\n if not user or not user.is_active:\n return None\n return (user, None)\n\n def enforce_csrf(self, request):\n return\n\n\nclass EmptyAPIView(viewsets.ViewSet):\n environment = None\n authentication_classes = (UnsafeSessionAuthentication, )\n permission_classes = (permissions.AllowAny, )\n\n\nclass WebAPIView(EmptyAPIView):\n def list(self, request, format=None):\n self.environment.load_data(\n method='list',\n filters=request.data.get(\"filters\", None))\n if len(self.environment.permissions) == 0 or request.user.has_perms(\n self.environment.permissions):\n serial = self.environment.serializer(\n self.environment.query,\n many=True,\n read_only=True)\n return Response(serial.data, status=status.HTTP_200_OK)\n else:\n return Response(status=status.HTTP_403_FORBIDDEN)\n\n # Get only one object by its ID or PK\n def retrieve(self, request, pk, format=None):\n self.environment.load_data(method='retrieve', pk=pk)\n if len(self.environment.permissions) == 0 or request.user.has_perms(\n self.environment.permissions):\n serial = self.environment.serializer(\n self.environment.query,\n many=False,\n read_only=True)\n return Response(serial.data, status=status.HTTP_200_OK)\n else:\n return Response(status=status.HTTP_403_FORBIDDEN)\n\n # Create a new object from scratch\n @list_route(methods=['post'])\n def create(self, request, format=None):\n self.environment.load_data('create')\n if len(self.environment.permissions) == 0 or request.user.has_perms(\n self.environment.permissions):\n serial = self.environment.serializer(request.data)\n if serial.is_valid():\n serial.save()\n return Response(serial.data, status=status.HTTP_201_CREATED)\n return Response(\n serial.errors,\n status=status.HTTP_412_PRECONDITION_FAILED)\n else:\n return Response(status=status.HTTP_403_FORBIDDEN)\n\n # Update an existing object using its ID or PK\n def update(self, request, pk, format=None):\n self.environment.load_data('update', pk=pk)\n if len(self.environment.permissions) == 0 or request.user.has_perms(\n self.environment.permissions):\n serial = self.environment.serializer(\n self.environment.query,\n data=request.data)\n if serial.is_valid():\n serial.save()\n return Response(serial.data, status=status.HTTP_202_ACCEPTED)\n return Response(\n serial.errors,\n status=status.HTTP_412_PRECONDITION_FAILED)\n else:\n return Response(status=status.HTTP_403_FORBIDDEN)\n\n # Delete an object by using its ID or PK\n def destroy(self, request, pk, format=None):\n self.environment.load_data('destroy', pk=pk)\n if len(self.environment.permissions) == 0 or request.user.has_perms(\n self.environment.permissions):\n self.environment.query.delete()\n return Response(status=status.HTTP_200_OK)\n else:\n return Response(status=status.HTTP_403_FORBIDDEN)\n\n # @list_route(methods=['post'])\n # def destroy1(self, request, pk, format=None):\n # self.environment.load_data('delete', pk=pk)\n # if len(self.environment.permissions) == 0 or request.user.has_perms(\n # self.environment.permissions):\n # self.environment.query.delete()\n # return Response(status=status.HTTP_200_OK)\n # else:\n # return Response(status=status.HTTP_403_FORBIDDEN)\n","sub_path":"contrib/django_rest_framework/apiViews.py","file_name":"apiViews.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"88187278","text":"#\nimport json\nimport time\nimport numpy\nimport pandas\nimport asyncio\nfrom data.flat import KernelLoader\n\n\n#\nfrom news_embedder.overhelm import embedding_pool\nfrom news_embedder.configuration import Config\n\n\n#\nk = 'C:/Users/MainUser/Desktop/OpenAPI_sandbox.txt'\ncrs = open(k, \"r\")\napi_key = 'alive_outside'\nfor columns in (raw.strip().split() for raw in crs):\n api_key = columns[0]\n\ntarget_quotes = ['MSFT']\nnews_horizon = 100\neffect_horizon = 100\n\ndb_config = 'C:/Users/MainUser/Desktop/config.json'\nnews_titles_source = './data/data/rex.xlsx'\n\nconfig = Config()\nconfig.model = {'agg': 'mean'}\n\nloader = KernelLoader(api_key, target_quotes, news_horizon, effect_horizon, db_config,\n window_function='ewm', window_function_kwargs={'alpha': 0.1, 'adjust': False},\n reload_quotes=True,\n news_titles_source=news_titles_source, verbose=True, base_option='for_merge', add_time_features=True,\n nlp_treator=embedding_pool, nlp_treator_signature=['sister'], nlp_treator_config=config,\n nlp_ductor='post', export_chunk=100_000)\n# base_option='without'\n\n# data = await loader.read()\ndata = asyncio.run(loader.read())\n\"\"\"\ndata = data.dropna()\n\nd = './result.csv'\ndata.to_csv(d, sep=';', index=False)\n\"\"\"\n\"\"\"\n# data.columns = [x.replace('_PCT1', '') for x in data.columns.values]\ndata.dropna().sort_values(by=['title', 'lag'])\n\nd = './dataset_use_timed.csv'\ndata.dropna().sort_values(by=['title', 'lag']).to_csv(d, index=False)\n\"\"\"\n\"\"\"\ng = 'E:/dataset_use_timed.csv'\ndata = pandas.read_csv(g, sep=';')\n\"\"\"\n","sub_path":"bork_bork__kernel.py","file_name":"bork_bork__kernel.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200806772","text":"\"\"\"\nElastic basis pursuit\n\"\"\"\n\nimport numpy as np\nimport numpy.linalg as nla\nimport leastsqbound as lsq\nimport sklearn.linear_model as lm\nimport scipy.optimize as opt\n\n\ndef err_func(params, x, y, func):\n \"\"\"\n Error function for fitting a function\n \n Parameters\n ----------\n params : tuple\n A tuple with the parameters of `func` according to their order of\n input\n\n x : float array \n An independent variable. \n \n y : float array\n The dependent variable. \n \n func : function\n A function with inputs: `(x, *params)`\n \n Returns\n -------\n The sum of squared marginals of the fit to x/y given the params\n \"\"\"\n # We ravel both, so that we can accomodate multi-d input without having\n # to think about it:\n return np.ravel(y) - np.ravel(func(x, params))\n\n\ndef gaussian_kernel(x, params):\n \"\"\"\n A multi-dimensional Gaussian kernel function\n\n Useful for creating and testing EBP with simple Gaussian Mixture Models\n\n Parameters\n ----------\n x : ndarray\n The independent variable over which the Gaussian is calculated\n\n params : ndarray\n If this is a 1D array, it could have one of few things:\n\n [mu_1, mu_2, ... mu_n, sigma_1, sigma_2, ... sigma_n]\n\n Or:\n [mu_1, mu_2, ... mu_n, var_covar_matrix]\n\n where: \n\n var_covar_matrix needs to be reshaped into n-by-n \n \n \"\"\"\n mu = np.asarray(params[:x.shape[0]])\n if len(params) == x.shape[0] * 2:\n sigma = np.diag(params[x.shape[0]:])\n elif len(params) == x.shape[0] + x.shape[0] ** 2:\n mu = params[:x.shape[0]]\n sigma = np.reshape(params[x.shape[0]:], (x.shape[0], x.shape[0]))\n else:\n e_s = \"Inputs to gaussian_kernel don't have the right dimensions\"\n raise ValueError(e_s)\n\n dims = mu.shape[0]\n while len(mu.shape) < len(x.shape): \n mu = mu[..., None]\n\n shape_tuple = x.shape[1:]\n diff = (x - mu).reshape(x.shape[0], -1)\n sigma_inv = nla.inv(sigma)\n mult1 = np.dot(diff.T, sigma_inv)\n mult2 = (np.diag(np.dot(mult1, diff))).reshape(shape_tuple)\n norm_factor = 1/(np.sqrt((2*np.pi)**dims * nla.det(sigma)))\n gauss = norm_factor * np.exp(-0.5 * mult2) \n return gauss\n\n\ndef leastsq_oracle(x, y, kernel, initial=None, bounds=None):\n \"\"\"\n This is a generic oracle function that uses bounded least squares to find\n the parameters in each iteration of EBP, and requires initial parameters. \n\n Parameters\n ----------\n x : ndarray\n Input to the kernel function.\n y : ndarray\n Data to fit to.\n kernel : callalble\n The kernel function to be specified by this oracle.\n initial : list/array\n initial setting for the parameters of the function. This has to be\n something that kernel knows what to do with.\n \"\"\"\n return lsq.leastsqbound(err_func, initial, args=(x, y, kernel),\n bounds=bounds)[0]\n\n\ndef mixture_of_kernels(x, betas, params, kernel):\n \"\"\"\n\n Generate the signal from a mixture of kernels\n\n Parameters\n ----------\n x : ndarray\n\n betas : 1D array\n Coefficients for the linear summation of the kernels\n\n params : list\n A set of parameters for each one of the kernels \n\n kernel : callable\n \n \"\"\"\n betas = np.asarray(betas)\n out = np.zeros(x.shape[1:])\n\n for i in xrange(betas.shape[0]):\n out += np.dot(betas[i], kernel(x, params[i]))\n\n return out\n\ndef kernel_err(y, x, betas, params, kernel):\n \"\"\"\n An error function for a mixture of kernels, each one parameterized by its\n own set of params, and weighted by a beta\n\n\n Note\n ----\n For a given set of betas, params, this can be used as a within set error\n function, or to estimate the cross-validation error against another set of\n y, x values, sub-sampled from the whole original set, or from a left-out\n portion\n \"\"\"\n return y - mixture_of_kernels(x, betas, params, kernel)\n\n \ndef parameters_to_regressors(x, kernel, params):\n \"\"\"\n Maps from parameters to regressors through the kernel function\n\n Parameters\n ----------\n x : ndarray\n Input\n kernel : callable\n The kernel function\n params : list\n The parameters for each one of the kernel functions\n \n \"\"\"\n # Ravel the secondary dimensions of this:\n x = x.reshape(x.shape[0], -1).squeeze()\n regressors = np.zeros((len(params), x.shape[-1]))\n for i, p in enumerate(params):\n regressors[i] = kernel(x, p)\n return regressors.T\n \n\ndef solve_nnls(x, y, kernel=None, params=None, design=None):\n \"\"\"\n Solve the mixture problem using NNLS\n\n Parameters\n ----------\n x : ndarray\n y : ndarray\n\n kernel : callable\n params : list\n\n \"\"\"\n if design is None and (kernel is None or params is None):\n e_s = \"Need to provide either design matrix, or kernel and list of\"\n e_s += \"params for generating the design matrix\"\n raise ValueError(e_s)\n\n if design is None:\n A = parameters_to_regressors(x, kernel, params)\n else:\n A = design\n y = y.ravel()\n beta_hat, rnorm = opt.nnls(A, y)\n return beta_hat, rnorm\n \n \ndef elastic_basis_pursuit(x, y, oracle, kernel, initial_theta=None, bounds=None,\n max_iter=1000, beta_tol=10e-6, xval=True):\n \"\"\"\n Elastic basis pursuit\n\n Fit a mixture model::\n\n ..math::\n\n y = \\sum{w_i f_{\\theta_i} (x_i)}\n\n with y data, f a kernel function parameterized by $\\theta_i$ and \\w_i a\n non-negative weight, and x inputs to the kernel function\n\n Parameters\n ----------\n x : 1D/2D array\n The independent variable that produces the data\n\n y : 1D/2D darray\n The data to be fit.\n\n oracle : callable\n This is a function that takes data (`x`/`y`) and a kernel function\n (`kernel`) and returns the params theta for the kernel given x and\n y. The oracle can use any optimization routine, and any cost function\n\n kernel : callable\n A skeleton for the oracle function to optimize. Must take something\n of the dimensions of x (together with params, and with args) and return\n something of the dimensions of y. \n\n initial_theta : list/array\n The initial parameter guess\n\n bounds : the bounds on \n \"\"\"\n fit_x = x[..., ::2]\n validate_x = x[..., 1::2]\n fit_y = y[::2]\n validate_y = y[1::2]\n\n # Initialize a bunch of empty lists to hold the state:\n theta = []\n est = [] \n design_list = []\n r = []\n err = [np.dot(fit_y, fit_y)] # Start with the assumption of \n err_norm = []\n # Initialize the residuals with the fit_data:\n r.append(fit_y)\n\n # Limit this by number of iterations\n for i in range(max_iter):\n theta.append(oracle(fit_x, r[-1], kernel, initial_theta,\n\t\t\t bounds=bounds))\n design = parameters_to_regressors(fit_x, kernel, theta)\n beta_hat, rnorm = solve_nnls(fit_x, fit_y, design=design)\n # Here comes the \"elastic\" bit. We exclude kernels with insignificant\n # contributions: \n keep_idx = np.where(beta_hat > beta_tol)\n # We want this to still be a list (so we can 'append'):\n theta = list(np.array(theta)[keep_idx])\n beta_hat = beta_hat[keep_idx]\n design = design[:, keep_idx[0]]\n # Move on with the shrunken basis set:\n est.append(np.dot(design, beta_hat))\n r.append(fit_y - est[-1])\n # Cross-validation:\n xval_design = parameters_to_regressors(validate_x, kernel, theta)\n xval_est = np.dot(xval_design, beta_hat)\n xval_r = validate_y - xval_est\n err.append(np.dot(xval_r, xval_r))\n # If error just grew, we bail:\n if err[i+1] > err[i]:\n break\n\t\n return theta, err, r\n","sub_path":"ebp/elastic_basis_pursuit.py","file_name":"elastic_basis_pursuit.py","file_ext":"py","file_size_in_byte":7933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"648434370","text":"class Solution:\n def firstMissingPositive(self, nums) -> int:\n # 整理 nums ,让 nums[k] == k+1,只要 k+1 存在于 nums 中\n # nums[i], nums[nums[i]-1] = nums[nums[i]-1], nums[i]\n # 这样直接交换会出错,还没有找到原因\n for i in range(len(nums)):\n k = nums[i] - 1\n while 0 <= k < len(nums) and k + 1 != nums[k]:\n nums[i], nums[k] = nums[k], nums[i]\n k = nums[i] - 1\n\n for i in range(len(nums)):\n if nums[i] != i + 1:\n return i + 1\n\n return len(nums) + 1","sub_path":"Problems/041-first-missing-positive/first-missing-positive.py","file_name":"first-missing-positive.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"259641603","text":"##-*************************\n##-* This program is free software; you can use it, redistribute it and/or\n##-* modify it under the terms of the GNU Affero General Public License\n##-* version 3 as published by the Free Software Foundation. The full text of\n##-* the GNU Affero General Public License version 3 can be found in the file\n##-* named 'LICENSE.txt' that accompanies this program. This source code is\n##-* (C)copyright Geoffrey French 2011-2014.\n##-*************************\nimport weakref\nimport unittest\nimport gc\n\n\n\nclass IncrementalEvaluationCycleError (Exception):\n\tpass\n\n\n\nclass IncrementalMonitor (object):\n\tUNINITIALISED = 'UNINITIALISED'\n\tREFRESH_REQUIRED = 'REFRESH_REQUIRED'\n\tREFRESH_NOT_REQUIRED = 'REFRESH_NOT_REQUIRED'\n\n\n\t_current_computation = None\n\n\n\tdef __init__(self, owner=None):\n\t\tself._owner = owner\n\t\tself._incremental_state = IncrementalMonitor.UNINITIALISED\n\t\tself._outgoing_dependencies = None\n\t\tself._listeners = None\n\n\n\n\towner = property(lambda self: self._owner)\n\thas_listeners = property(lambda self: (self._listeners is not None and len(self._listeners) > 0))\n\toutgoing_dependences = property(lambda self: (set(self._outgoing_dependencies.keys()) if self._outgoing_dependencies is not None else set()))\n\thas_outgoing_dependences = property(lambda self: (self._outgoing_dependencies is not None and len(self._outgoing_dependencies) > 0))\n\n\n\n\tdef add_listener(self, listener):\n\t\tif self._listeners is None:\n\t\t\tself._listeners = []\n\t\tif listener in self._listeners:\n\t\t\treturn\n\t\tself._listeners.append(listener)\n\n\tdef remove_listener(self, listener):\n\t\tif self._listeners is not None:\n\t\t\ttry:\n\t\t\t\tself._listeners.remove(listener)\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\n\n\n\tdef _on_value_access(self):\n\t\tif IncrementalMonitor._current_computation is not None:\n\t\t\tIncrementalMonitor._current_computation._on_incoming_dependency_access(self)\n\n\tdef _notify_changed(self):\n\t\tif self._incremental_state != IncrementalMonitor.REFRESH_REQUIRED:\n\t\t\tself._incremental_state = IncrementalMonitor.REFRESH_REQUIRED\n\t\t\tself._emit_changed()\n\n\t\t\tif self._outgoing_dependencies is not None:\n\t\t\t\tfor dep in self._outgoing_dependencies.keys():\n\t\t\t\t\tdep._notify_changed()\n\n\tdef _notify_refreshed(self):\n\t\tself._incremental_state = IncrementalMonitor.REFRESH_NOT_REQUIRED\n\n\n\tdef _emit_changed(self):\n\t\tif self._listeners is not None:\n\t\t\tfor listener in self._listeners:\n\t\t\t\tlistener(self)\n\n\n\t@staticmethod\n\tdef _push_current_computation(computation):\n\t\tf = IncrementalMonitor._current_computation\n\t\tIncrementalMonitor._current_computation = computation\n\t\treturn f\n\n\t@staticmethod\n\tdef _pop_current_computation(prev_computation):\n\t\tIncrementalMonitor._current_computation = prev_computation\n\n\n\n\t@staticmethod\n\tdef block_access_tracking():\n\t\treturn IncrementalMonitor._push_current_computation(None)\n\n\t@staticmethod\n\tdef unblock_access_tracking(prev_computation):\n\t\tIncrementalMonitor._pop_current_computation(prev_computation)\n\n\n\n\tdef _add_outgoing_dependency(self, dep):\n\t\tif self._outgoing_dependencies is None:\n\t\t\tself._outgoing_dependencies = weakref.WeakKeyDictionary()\n\t\tself._outgoing_dependencies[dep] = None\n\n\tdef _remove_outgoing_dependency(self, dep):\n\t\tif self._outgoing_dependencies is not None:\n\t\t\tdel self._outgoing_dependencies[dep]\n\t\t\tif len(self._outgoing_dependencies) == 0:\n\t\t\t\tself._outgoing_dependencies = None\n\t\telse:\n\t\t\traise KeyError\n\n\n\n\n\n\nclass IncrementalValueMonitor (IncrementalMonitor):\n\tdef on_access(self):\n\t\tself._notify_refreshed()\n\t\tself._on_value_access()\n\n\tdef on_changed(self):\n\t\tself._notify_changed()\n\n\n\n\n\nclass IncrementalFunctionMonitor (IncrementalMonitor):\n\t\"\"\" Incremental Function Monitor\n\n\tMonitor a value acquired by evaluating a function.\n\n\tWhen acquiring the value:\n\t\tdef get_value():\n\t\t\ttry:\n\t\t\t\trefresh_value()\n\t\t\tfinally:\n\t\t\t\tinc_fn_mon.on_access()\n\t\t\treturn value_cache\n\n\n\t\tdef refresh_value():\n\t\t\trefresh_state = inc_fn_mon.on_refresh_begin()\n\t\t\ttry:\n\t\t\t\tif refresh_state is not None:\n\t\t\t\t\tvalue_cache = evaluate_function()\n\t\t\tfinally:\n\t\t\t\tinc_fn_mon.on_refresh_end(refresh_state)\n\t\"\"\"\n\t_FLAG_CYCLE_LOCK = 0x1\n\t_FLAG_BLOCK_INCOMING_DEPENDENCIES = 0x2\n\n\tdef __init__(self, owner=None):\n\t\tsuper(IncrementalFunctionMonitor, self).__init__(owner)\n\t\tself._incoming_dependencies = None\n\t\tself.__flags = 0\n\n\n\tincoming_dependencies = property(lambda self: (self._incoming_dependencies if self._incoming_dependencies is not None else set()))\n\n\n\tdef on_access(self):\n\t\tself._on_value_access()\n\n\tdef on_changed(self):\n\t\tself._notify_changed()\n\n\tdef block_and_clear_incoming_dependencies(self):\n\t\tself.__set_flag(self._FLAG_BLOCK_INCOMING_DEPENDENCIES)\n\t\tself._incoming_dependencies = None\n\n\n\tdef on_refresh_begin(self):\n\t\tif self.__test_flag(self._FLAG_CYCLE_LOCK):\n\t\t\traise IncrementalEvaluationCycleError\n\n\t\tself.__clear_flag(self._FLAG_BLOCK_INCOMING_DEPENDENCIES)\n\t\tself.__set_flag(self._FLAG_CYCLE_LOCK)\n\n\t\tif self._incremental_state != IncrementalMonitor.REFRESH_NOT_REQUIRED:\n\t\t\t# Push current computation\n\t\t\told_computation = IncrementalMonitor._push_current_computation(self)\n\n\t\t\trefresh_state = old_computation, self._incoming_dependencies\n\t\t\tself._incoming_dependencies = None\n\t\t\treturn refresh_state\n\t\telse:\n\t\t\treturn None\n\n\n\tdef on_refresh_end(self, refresh_state):\n\t\tif self._incremental_state != IncrementalMonitor.REFRESH_NOT_REQUIRED:\n\t\t\told_computation, prev_incoming_dependencies = refresh_state\n\n\t\t\t# Restore current computation\n\t\t\tIncrementalMonitor._pop_current_computation(old_computation)\n\n\t\t\t# Disconnect the dependencies that are being removed\n\t\t\tif prev_incoming_dependencies is not None:\n\t\t\t\tfor inc in prev_incoming_dependencies:\n\t\t\t\t\tif self._incoming_dependencies is None or inc not in self._incoming_dependencies:\n\t\t\t\t\t\tinc._remove_outgoing_dependency(self)\n\n\t\t\t# Connect new dependencies\n\t\t\tif self._incoming_dependencies is not None:\n\t\t\t\tfor inc in self._incoming_dependencies:\n\t\t\t\t\tif prev_incoming_dependencies is None or inc not in prev_incoming_dependencies:\n\t\t\t\t\t\tinc._add_outgoing_dependency(self)\n\n\n\t\t\tself._incremental_state = IncrementalMonitor.REFRESH_NOT_REQUIRED\n\n\t\tself.__clear_flag(self._FLAG_CYCLE_LOCK)\n\n\n\tdef _on_incoming_dependency_access(self, inc):\n\t\tself._add_incoming_dependency(inc)\n\n\n\tdef _add_incoming_dependency(self, dep):\n\t\tif not self.__test_flag(self._FLAG_BLOCK_INCOMING_DEPENDENCIES):\n\t\t\tif self._incoming_dependencies is None:\n\t\t\t\tself._incoming_dependencies = set()\n\t\t\tself._incoming_dependencies.add(dep)\n\n\n\n\n\n\t#\n\t#\n\t# Flags\n\t#\n\t#\n\n\tdef __clear_flag(self, flag):\n\t\tself.__flags &= ~flag\n\n\n\tdef __set_flag(self, flag):\n\t\tself.__flags |= flag\n\n\n\tdef __set_flag_value(self, flag, value):\n\t\tif value:\n\t\t\tself.__flags |= flag\n\t\telse:\n\t\t\tself.__flags &= ~flag\n\n\n\tdef __test_flag(self, flag):\n\t\treturn (self.__flags & flag) != 0\n\n\n\n\n\nclass Test_IncrementalMonitor (unittest.TestCase):\n\tclass _Counter (object):\n\t\tdef __init__(self):\n\t\t\tself.count = 0\n\n\t\tdef __call__(self, *args, **kwargs):\n\t\t\tself.count += 1\n\n\n\tdef signal_counter(self):\n\t\treturn Test_IncrementalMonitor._Counter()\n\n\n\n\t@staticmethod\n\tdef _get_listeners(inc):\n\t\tif inc._listeners is not None:\n\t\t\treturn inc._listeners\n\t\telse:\n\t\t\treturn []\n\n\n\tdef test_listener_refs(self):\n\t\tinc = IncrementalMonitor()\n\n\t\tl1 = self.signal_counter()\n\t\tl2 = self.signal_counter()\n\t\tl3 = self.signal_counter()\n\t\tl4 = self.signal_counter()\n\n\n\t\tinc.add_listener(l1)\n\t\tinc.add_listener(l2)\n\t\tinc.add_listener(l3)\n\t\tinc.add_listener(l4)\n\n\t\tself.assertEqual({l1, l2, l3, l4}, set(self._get_listeners(inc)))\n\n\t\tinc.remove_listener(l4)\n\t\tself.assertEqual({l1, l2, l3}, set(self._get_listeners(inc)))\n\n\t\tinc.remove_listener(l3)\n\t\tself.assertEqual({l1, l2}, set(self._get_listeners(inc)))\n\n\t\tl3 = self.signal_counter()\n\t\tinc.add_listener(l3)\n\t\tinc.add_listener(l3)\n\t\tself.assertEqual({l1, l2, l3}, set(self._get_listeners(inc)))\n\n\t\tinc.remove_listener(l3)\n\t\tinc.remove_listener(l2)\n\t\tself.assertEqual({l1}, set(self._get_listeners(inc)))\n\t\tself.assertTrue(inc.has_listeners)\n\n\t\tinc.remove_listener(l1)\n\t\tself.assertEqual(set(), set(self._get_listeners(inc)))\n\t\tself.assertFalse(inc.has_listeners)\n\n\n\n\n\nclass Test_IncrementalValueMonitor (Test_IncrementalMonitor):\n\tdef test_listener(self):\n\t\tcounter = self.signal_counter()\n\t\tself.assertEqual(0, counter.count)\n\n\t\tinc = IncrementalValueMonitor()\n\n\t\tinc.add_listener(counter)\n\n\t\tinc.on_changed()\n\t\tself.assertEqual(1, counter.count)\n\n\t\tinc.on_changed()\n\t\tself.assertEqual(1, counter.count)\n\n\t\tinc.on_access()\n\t\tinc.on_changed()\n\t\tself.assertEqual(2, counter.count)\n\n\n\n\n\nclass Test_IncrementalFunctionMonitor (Test_IncrementalMonitor):\n\tdef test_listener(self):\n\t\tcounter = self.signal_counter()\n\t\tself.assertEqual(0, counter.count)\n\n\t\tinc = IncrementalFunctionMonitor()\n\n\t\tinc.add_listener(counter)\n\n\t\tinc.on_changed()\n\t\tself.assertEqual(1, counter.count)\n\n\t\tinc.on_changed()\n\t\tself.assertEqual(1, counter.count)\n\n\t\trefresh_state = inc.on_refresh_begin()\n\t\tinc.on_refresh_end(refresh_state)\n\t\tinc.on_changed()\n\t\tself.assertEqual(2, counter.count)\n\n\n\n\n\n\tdef test_chain(self):\n\t\tinc1 = IncrementalFunctionMonitor()\n\t\tinc2 = IncrementalFunctionMonitor()\n\t\tinc3 = IncrementalFunctionMonitor()\n\t\tinc4 = IncrementalFunctionMonitor()\n\n\t\tl1 = self.signal_counter()\n\t\tl2 = self.signal_counter()\n\t\tl3 = self.signal_counter()\n\t\tl4 = self.signal_counter()\n\t\tinc1.add_listener(l1)\n\t\tinc2.add_listener(l2)\n\t\tinc3.add_listener(l3)\n\t\tinc4.add_listener(l4)\n\n\t\trs1 = inc1.on_refresh_begin()\n\t\tinc1.on_refresh_end( rs1 )\n\t\tinc1.on_changed()\n\t\tself.assertEqual( 1, l1.count )\n\n\t\trs2 = inc2.on_refresh_begin()\n\t\trs1 = inc1.on_refresh_begin()\n\t\tinc1.on_refresh_end( rs1 )\n\t\tinc1.on_access()\n\t\tinc2.on_refresh_end( rs2 )\n\n\t\tself.assertEqual({inc2}, inc1.outgoing_dependences )\n\t\tself.assertEqual({inc1}, inc2.incoming_dependencies )\n\n\t\trs3 = inc3.on_refresh_begin()\n\t\tinc2.on_access()\n\t\tinc3.on_refresh_end( rs3 )\n\n\t\tself.assertEqual({inc3}, inc2.outgoing_dependences )\n\t\tself.assertEqual({inc2}, inc3.incoming_dependencies )\n\n\t\trs4 = inc4.on_refresh_begin()\n\t\tinc2.on_access()\n\t\tinc4.on_refresh_end( rs4 )\n\n\t\tself.assertEqual({inc3, inc4}, inc2.outgoing_dependences )\n\t\tself.assertEqual({inc2}, inc4.incoming_dependencies )\n\n\n\t\tinc1.on_changed()\n\t\tself.assertEqual( 2, l1.count )\n\t\tself.assertEqual( 1, l2.count )\n\t\tself.assertEqual( 1, l3.count )\n\t\tself.assertEqual( 1, l4.count )\n\n\t\trs1 = inc1.on_refresh_begin()\n\t\tinc1.on_refresh_end( rs1 )\n\t\tinc1.on_changed()\n\t\tself.assertEqual( 3, l1.count )\n\t\tself.assertEqual( 1, l2.count )\n\t\tself.assertEqual( 1, l3.count )\n\t\tself.assertEqual( 1, l4.count )\n\n\t\trs4 = inc4.on_refresh_begin()\n\t\trs2 = inc2.on_refresh_begin()\n\t\trs1 = inc1.on_refresh_begin()\n\t\tinc1.on_refresh_end( rs1 )\n\t\tinc1.on_access()\n\t\tinc2.on_refresh_end( rs2 )\n\t\tinc2.on_access()\n\t\tinc4.on_refresh_end( rs4 )\n\t\trs3 = inc3.on_refresh_begin()\n\t\tinc2.on_access()\n\t\tinc3.on_refresh_end( rs3 )\n\n\t\tself.assertEqual({inc2}, inc1.outgoing_dependences )\n\t\tself.assertEqual({inc3, inc4}, inc2.outgoing_dependences )\n\t\tself.assertEqual( set(), inc3.outgoing_dependences )\n\t\tself.assertEqual( set(), inc4.outgoing_dependences )\n\t\tself.assertEqual( set(), inc1.incoming_dependencies )\n\t\tself.assertEqual({inc1}, inc2.incoming_dependencies )\n\t\tself.assertEqual({inc2}, inc3.incoming_dependencies )\n\t\tself.assertEqual({inc2}, inc4.incoming_dependencies )\n\n\t\tinc1.on_changed()\n\t\tself.assertEqual( 4, l1.count )\n\t\tself.assertEqual( 2, l2.count )\n\t\tself.assertEqual( 2, l3.count )\n\t\tself.assertEqual( 2, l4.count )\n\n\n\n\tdef test_block_access_tracking(self):\n\t\tinc1 = IncrementalFunctionMonitor()\n\t\tinc2 = IncrementalFunctionMonitor()\n\t\tinc3 = IncrementalFunctionMonitor()\n\t\tinc4 = IncrementalFunctionMonitor()\n\n\t\tl1 = self.signal_counter()\n\t\tl2 = self.signal_counter()\n\t\tl3 = self.signal_counter()\n\t\tl4 = self.signal_counter()\n\t\tinc1.add_listener(l1)\n\t\tinc2.add_listener(l2)\n\t\tinc3.add_listener(l3)\n\t\tinc4.add_listener(l4)\n\n\n\t\trs4 = inc4.on_refresh_begin()\n\t\trs2 = inc2.on_refresh_begin()\n\t\trs1 = inc1.on_refresh_begin()\n\t\tinc1.on_refresh_end( rs1 )\n\t\tinc1.on_access()\n\t\tinc2.on_refresh_end( rs2 )\n\t\tinc2.on_access()\n\t\tinc4.on_refresh_end( rs4 )\n\t\trs3 = inc3.on_refresh_begin()\n\t\tf = IncrementalMonitor.block_access_tracking()\n\t\tinc2.on_access()\n\t\tIncrementalMonitor.unblock_access_tracking( f )\n\t\tinc3.on_refresh_end( rs3 )\n\n\t\tself.assertEqual({inc2}, inc1.outgoing_dependences )\n\t\tself.assertEqual({inc4}, inc2.outgoing_dependences )\n\t\tself.assertEqual(set(), inc3.outgoing_dependences )\n\t\tself.assertEqual(set(), inc4.outgoing_dependences )\n\t\tself.assertEqual(set(), inc1.incoming_dependencies )\n\t\tself.assertEqual({inc1}, inc2.incoming_dependencies )\n\t\tself.assertEqual(set(), inc3.incoming_dependencies )\n\t\tself.assertEqual({inc2}, inc4.incoming_dependencies )\n\n\t\tinc1.on_changed()\n\t\tself.assertEqual(1, l1.count )\n\t\tself.assertEqual(1, l2.count )\n\t\tself.assertEqual(0, l3.count )\n\t\tself.assertEqual(1, l4.count )\n\n\n\n\tdef test_cycle(self):\n\t\tinc1 = IncrementalFunctionMonitor()\n\n\t\trs1 = inc1.on_refresh_begin()\n\n\t\tdef _cycle():\n\t\t\tinc1.on_refresh_begin()\n\n\t\tself.assertRaises(IncrementalEvaluationCycleError, _cycle)\n\n\t\tinc1.on_refresh_end( rs1 )\n\n\n\n\tdef test_unnecessary_refresh(self):\n\t\t# This test is for coverage purposes only\n\t\tinc1 = IncrementalFunctionMonitor()\n\n\t\trs1 = inc1.on_refresh_begin()\n\t\tinc1.on_refresh_end( rs1 )\n\n\t\t# Another refresh, without sending an 'on_changed'\n\t\trs1 = inc1.on_refresh_begin()\n\t\tinc1.on_refresh_end( rs1 )\n\n\n\n\tdef test_deps(self):\n\t\tinc1 = IncrementalFunctionMonitor()\n\t\tinc2 = IncrementalFunctionMonitor()\n\t\tinc3 = IncrementalFunctionMonitor()\n\t\tinc4 = IncrementalFunctionMonitor()\n\n\t\trs4 = inc4.on_refresh_begin()\n\t\trs3 = inc3.on_refresh_begin()\n\t\trs2 = inc2.on_refresh_begin()\n\t\trs1 = inc1.on_refresh_begin()\n\t\tinc1.on_refresh_end( rs1 )\n\t\tinc1.on_access()\n\t\tinc2.on_refresh_end( rs2 )\n\t\tinc2.on_access()\n\t\tinc3.on_refresh_end( rs3 )\n\t\tinc3.on_access()\n\t\tinc4.on_refresh_end( rs4 )\n\n\n\t\tself.assertEqual({inc2}, inc1.outgoing_dependences )\n\t\tself.assertEqual({inc3}, inc2.outgoing_dependences )\n\t\tself.assertEqual({inc4}, inc3.outgoing_dependences )\n\t\tself.assertEqual(set(), inc4.outgoing_dependences )\n\t\tself.assertEqual(set(), inc1.incoming_dependencies )\n\t\tself.assertEqual({inc1}, inc2.incoming_dependencies )\n\t\tself.assertEqual({inc2}, inc3.incoming_dependencies )\n\t\tself.assertEqual({inc3}, inc4.incoming_dependencies )\n\n\t\tinc4.on_changed()\n\t\trs4 = inc4.on_refresh_begin()\n\t\tinc4.on_refresh_end( rs4 )\n\n\t\tself.assertEqual({inc2}, inc1.outgoing_dependences )\n\t\tself.assertEqual({inc3}, inc2.outgoing_dependences )\n\t\tself.assertEqual(set(), inc3.outgoing_dependences )\n\t\tself.assertEqual(set(), inc4.outgoing_dependences )\n\t\tself.assertEqual(set(), inc1.incoming_dependencies )\n\t\tself.assertEqual({inc1}, inc2.incoming_dependencies )\n\t\tself.assertEqual({inc2}, inc3.incoming_dependencies )\n\t\tself.assertEqual(set(), inc4.incoming_dependencies )\n\n\t\tdel inc4\n\t\tdel rs4\n\t\tdel rs3\n\t\tdel rs2\n\t\tdel rs1\n\t\tgc.collect()\n\n\t\tself.assertEqual({inc2}, inc1.outgoing_dependences )\n\t\tself.assertEqual({inc3}, inc2.outgoing_dependences )\n\t\tself.assertEqual(set(), inc3.outgoing_dependences )\n\t\tself.assertEqual(set(), inc1.incoming_dependencies )\n\t\tself.assertEqual({inc1}, inc2.incoming_dependencies )\n\t\tself.assertEqual({inc2}, inc3.incoming_dependencies )\n\n","sub_path":"larch/incremental.py","file_name":"incremental.py","file_ext":"py","file_size_in_byte":15181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"118337510","text":"import imgaug as ia\nimport imgaug.augmenters as iaa\nimport numpy as np\nfrom functools import partial\n\ndef val_augs():\n return iaa.SomeOf((0, 1),\n [\n iaa.OneOf([\n iaa.Multiply((0.8, 1.2)),\n iaa.Add((-0.15, 0.15))]),\n iaa.OneOf([\n iaa.AdditiveGaussianNoise(scale=(0, 0.1)),\n iaa.AdditiveLaplaceNoise(scale=(0, 0.1))]),\n iaa.OneOf([\n iaa.GaussianBlur((0.0, 3.0)),\n iaa.AverageBlur(k=(3, 7))])\n ], random_order=True)\n\ndef geo_augs(): \n return iaa.SomeOf((0, 4),\n [ \n iaa.Crop(percent=(0.0, 0.2)),\n iaa.Fliplr(0.5),\n iaa.Flipud(0.5),\n iaa.Affine(scale=(0.8, 2.0), mode=\"symmetric\"),\n iaa.Affine(translate_percent=(-0.4, 0.4), mode=\"symmetric\"),\n # iaa.Affine(rotate=(-180, 180), mode=\"symmetric\"),\n # iaa.Affine(shear=(-25, 25), mode=\"symmetric\")\n ], random_order=True)\n\n\ndef augment(image_batch, mask_batch, dmap_batch, cmap_batch):\n geo_seq = geo_augs().to_deterministic()\n val_seq = val_augs().to_deterministic()\n \n image_aug_batch = geo_seq(images=image_batch)\n image_aug_batch = val_seq(images=image_aug_batch)\n\n geo_augment = partial(geo_seq, images=image_batch)\n _, mask_aug_batch, cmap_aug_batch = geo_augment(segmentation_maps=mask_batch, heatmaps=cmap_batch)\n _, dmap_aug_batch = geo_augment(heatmaps=dmap_batch)\n\n return image_aug_batch, mask_aug_batch, dmap_aug_batch, cmap_aug_batch\n\n \n","sub_path":"QA_Seg_DIS/lib/training/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"462769640","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.views.generic import DetailView\nfrom . import models\nfrom participant import models as participant_models\nfrom projects import models as project_models\nfrom users import models as user_models\n\n\nclass CreateError(Exception):\n pass\n\n\ndef create(request, project_pk):\n try:\n the_project = project_models.Project.objects.get(pk=project_pk)\n models.Apply.objects.get(Q(project=the_project) & Q(apply_user=request.user) & Q(applyto=the_project.host))\n except project_models.Project.DoesNotExist:\n messages.error(request, \"해당 프로젝트는 존재하지 않습니다\")\n raise redirect(reverse(\"core:home\"))\n except models.Apply.DoesNotExist:\n models.Apply.objects.create(\n project=the_project, apply_user=request.user, applyto=the_project.host\n )\n messages.success(request, \"신청이 완료되었습니다\")\n return redirect(reverse(\"projects:detail\", kwargs={\"pk\": project_pk}))\n\n\ndef delete(request, project_pk):\n try:\n models.Apply.objects.filter(project=project_pk).filter(\n apply_user=request.user\n ).delete()\n messages.success(request, \"신청이 취소되었습니다\")\n except models.Apply.DoesNotExist:\n messages.error(request, \"취소할수없습니다\")\n return redirect(reverse(\"projects:detail\", kwargs={\"pk\": project_pk}))\n\n\nclass ApplyDetailView(DetailView):\n model = models.Apply\n template_name = \"applyto/apply_detail.html\"\n\n\ndef toggle_applyto(request, pk):\n action = request.GET.get(\"action\", None)\n the_apply = models.Apply.objects.get_or_none(pk=pk)\n if the_apply is not None and action is not None:\n if action == \"accept\":\n ck_dupl = (\n participant_models.Participant.objects.filter(user=the_apply.apply_user)\n .filter(project=the_apply.project)\n .exists()\n )\n if ck_dupl:\n models.Apply.objects.filter(pk=the_apply.pk).delete()\n messages.error(request, \"이미 참가중인 프로젝트입니다\")\n else:\n (\n the_participant,\n created,\n ) = participant_models.Participant.objects.get_or_create(\n user=the_apply.apply_user, project=the_apply.project\n )\n the_participant.user = the_apply.apply_user\n the_participant.save()\n models.Apply.objects.filter(pk=the_apply.pk).delete()\n messages.success(request, \"참여신청을 수락하였습니다\")\n return redirect(reverse(\"users:profile\", kwargs={\"pk\": request.user.pk}))\n if action == \"denied\":\n models.Apply.objects.filter(pk=the_apply.pk).delete()\n messages.success(request, \"참여신청을 거절하였습니다\")\n return redirect(reverse(\"users:profile\", kwargs={\"pk\": request.user.pk}))\n\n","sub_path":"applyto/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"375648656","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/treadmill/services/localdisk_service.py\n# Compiled at: 2017-04-03 02:32:49\n# Size of source mod 2**32: 15608 bytes\n\"\"\"LVM based local disk management service.\"\"\"\nimport math, errno, logging, os, re, subprocess\nfrom .. import cgroups\nfrom .. import exc\nfrom .. import fs\nfrom .. import logcontext as lc\nfrom .. import lvm\nfrom .. import sysinfo\nfrom .. import utils\nfrom .. import subproc\nfrom ._base_service import BaseResourceServiceImpl\n_LOGGER = lc.ContainerAdapter(logging.getLogger(__name__))\nTREADMILL_MIN_VG_SIZE = utils.size_to_bytes('100M')\nTREADMILL_IMG = 'treadmill.img'\n\nclass LocalDiskResourceService(BaseResourceServiceImpl):\n __doc__ = 'LocalDisk service implementation.\\n '\n __slots__ = ('_block_dev', '_default_read_bps', '_default_read_iops', '_default_write_bps',\n '_default_write_iops', '_img_location', '_pending', '_reserve',\n '_status', '_volumes')\n PAYLOAD_SCHEMA = (\n (\n 'size', True, str),)\n TREADMILL_VG = 'treadmill'\n\n def __init__(self, block_dev=None, img_location=None, reserve='2G', default_read_bps='20M', default_write_bps='20M', default_read_iops=100, default_write_iops=100):\n super(LocalDiskResourceService, self).__init__()\n assert bool(block_dev is None) ^ bool(img_location is None)\n if block_dev is not None:\n self._block_dev = block_dev\n self._reserve = None\n self._img_location = None\n else:\n if img_location is not None:\n self._reserve = reserve\n self._img_location = os.path.realpath(img_location)\n self._block_dev = None\n else:\n raise ValueError('Need to provide either a block device or an image location.')\n self._status = {}\n self._volumes = {}\n self._pending = []\n self._default_read_bps = default_read_bps\n self._default_write_bps = default_write_bps\n self._default_read_iops = default_read_iops\n self._default_write_iops = default_write_iops\n\n def initialize(self, service_dir):\n super(LocalDiskResourceService, self).initialize(service_dir)\n need_init = False\n try:\n lvm.vgactivate(group=self.TREADMILL_VG)\n except subprocess.CalledProcessError:\n need_init = True\n\n if need_init:\n _LOGGER.info('Initialiazing Volume Group')\n if self._block_dev is None:\n self._block_dev = _init_block_dev(self._img_location, self._reserve)\n _init_vg(self.TREADMILL_VG, self._block_dev)\n lvs_info = lvm.lvsdisplay(group=self.TREADMILL_VG)\n for lv in lvs_info:\n lv['stale'] = True\n if lv['open_count']:\n _LOGGER.warning('Logical volume in use: %r', lv['block_dev'])\n continue\n\n volumes = {lv['name']:{k:lv[k] for k in ['name', 'block_dev', 'dev_major', 'dev_minor', 'stale']} for lv in lvs_info}\n self._volumes = volumes\n self._status = _refresh_vg_status(self.TREADMILL_VG)\n\n def synchronize(self):\n \"\"\"Make sure that all stale volumes are removed.\n \"\"\"\n modified = False\n for uniqueid in self._volumes.keys():\n if not self._volumes[uniqueid].get('stale', False):\n continue\n modified = True\n self._destroy_volume(uniqueid)\n\n if not modified:\n return\n for pending_id in self._pending:\n self._retry_request(pending_id)\n\n self._pending = []\n self._status = _refresh_vg_status(self.TREADMILL_VG)\n\n def report_status(self):\n return self._status\n\n def on_create_request(self, rsrc_id, rsrc_data):\n app_unique_name = rsrc_id\n size = rsrc_data['size']\n read_bps = self._default_read_bps\n write_bps = self._default_write_bps\n read_iops = self._default_read_iops\n write_iops = self._default_write_iops\n with lc.LogContext(_LOGGER, rsrc_id) as (log):\n log.logger.info('Processing request')\n size_in_bytes = utils.size_to_bytes(size)\n _, uniqueid = app_unique_name.rsplit('-', 1)\n existing_volume = uniqueid in self._volumes\n if not existing_volume:\n needed = math.ceil(size_in_bytes / self._status['extent_size'])\n if needed > self._status['extent_free']:\n log.logger.info('Delaying request %r until %d extents are free', rsrc_id, needed)\n self._pending.append(rsrc_id)\n return\n lvm.lvcreate(volume=uniqueid, group=self.TREADMILL_VG, size_in_bytes=size_in_bytes)\n self._status = _refresh_vg_status(self.TREADMILL_VG)\n lv_info = lvm.lvdisplay(volume=uniqueid, group=self.TREADMILL_VG)\n cgrp = os.path.join('treadmill', 'apps', app_unique_name)\n cgroups.create('blkio', cgrp)\n major, minor = lv_info['dev_major'], lv_info['dev_minor']\n cgroups.set_value('blkio', cgrp, 'blkio.throttle.write_bps_device', '{major}:{minor} {bps}'.format(major=major, minor=minor, bps=utils.size_to_bytes(write_bps)))\n cgroups.set_value('blkio', cgrp, 'blkio.throttle.read_bps_device', '{major}:{minor} {bps}'.format(major=major, minor=minor, bps=utils.size_to_bytes(read_bps)))\n cgroups.set_value('blkio', cgrp, 'blkio.throttle.write_iops_device', '{major}:{minor} {iops}'.format(major=major, minor=minor, iops=write_iops))\n cgroups.set_value('blkio', cgrp, 'blkio.throttle.read_iops_device', '{major}:{minor} {iops}'.format(major=major, minor=minor, iops=read_iops))\n volume_data = {k:lv_info[k] for k in ['name', 'block_dev', 'dev_major', 'dev_minor']}\n self._volumes[lv_info['name']] = volume_data\n return volume_data\n\n def on_delete_request(self, rsrc_id):\n app_unique_name = rsrc_id\n with lc.LogContext(_LOGGER, rsrc_id):\n _, uniqueid = app_unique_name.rsplit('-', 1)\n if not self._destroy_volume(uniqueid):\n return\n for pending_id in self._pending:\n self._retry_request(pending_id)\n\n self._pending = []\n self._status = _refresh_vg_status(self.TREADMILL_VG)\n return True\n\n def _destroy_volume(self, uniqueid):\n \"\"\"Try destroy a volume from LVM.\n \"\"\"\n self._volumes.pop(uniqueid, None)\n try:\n lvm.lvremove(uniqueid, group=self.TREADMILL_VG)\n except subprocess.CalledProcessError:\n _LOGGER.warning('Ignoring unknow volume %r', uniqueid)\n return False\n\n _LOGGER.info('Destroyed volume %r', uniqueid)\n return True\n\n def _retry_request(self, rsrc_id):\n \"\"\"Force re-evaluation of a request.\n \"\"\"\n request_lnk = os.path.join(self._service_rsrc_dir, rsrc_id)\n _LOGGER.debug('Updating %r', rsrc_id)\n try:\n os.lchown(request_lnk, os.getuid(), os.getgid())\n except OSError as err:\n if err.errno != errno.ENOENT:\n raise\n\n\ndef _refresh_vg_status(group):\n \"\"\"Query LVM for the current volume group status.\n \"\"\"\n vg_info = lvm.vgdisplay(group=group)\n status = {'name': vg_info['name'], \n 'extent_size': utils.size_to_bytes('{kb}k'.format(kb=vg_info['extent_size'])), \n 'extent_free': vg_info['extent_free'], \n 'extent_nb': vg_info['extent_nb'], \n 'size': utils.size_to_bytes('{kb}k'.format(kb=vg_info['extent_nb'] * vg_info['extent_size']))}\n _LOGGER.info('Group %r available space: %s (bytes)', group, status['size'])\n return status\n\n\ndef _init_vg(group, block_dev):\n \"\"\"Initialize the 'treadmill' volume group.\n\n :param group:\n Name of the LVM Volume Group.\n :type group:\n ``str``\n :param block_dev:\n LVM Physical Volume device backing the Volume Group\n :type block_dev:\n ``str``\n \"\"\"\n try:\n lvm.vgactivate(group)\n return\n except subprocess.CalledProcessError:\n pass\n\n lvm.pvcreate(device=block_dev)\n lvm.vgcreate(group, device=block_dev)\n lvm.vgactivate(group)\n\n\ndef _init_block_dev(img_location, reserve='2G'):\n \"\"\"Initialize a block_dev suitable to back the Treadmill Volume Group.\n\n The physical volume size will be auto-size based on the available capacity\n minus the reserved size.\n\n :param img_location:\n Path name to the file which is going to back the new volume group.\n :type img_location:\n ``str``\n :param reserved:\n Reserved amount of free filesystem space to leave to the OS, in bytes\n or using a literal qualifier (e.g. \"2G\").\n :type size:\n ``int`` or ``str``\n \"\"\"\n filename = os.path.join(img_location, TREADMILL_IMG)\n _init_loopback_devices()\n try:\n loop_dev = _loop_dev_for(filename)\n except subprocess.CalledProcessError:\n loop_dev = None\n _create_image(TREADMILL_IMG, img_location, reserve)\n\n if loop_dev is None:\n subproc.check_call([\n 'losetup',\n '-f',\n filename])\n loop_dev = _loop_dev_for(filename)\n if loop_dev is None:\n raise exc.NodeSetupError('Unable to find /dev/loop device')\n _LOGGER.info('Using %r as backing for the physical volume group', loop_dev)\n return loop_dev\n\n\ndef _create_image(img_name, img_location, reserve):\n \"\"\"Create a sparse file of the appropriate size.\n \"\"\"\n fs.mkdir_safe(img_location)\n filename = os.path.join(img_location, img_name)\n retries = 10\n while retries > 0:\n retries -= 1\n try:\n stats = os.stat(filename)\n os.unlink(filename)\n _LOGGER.info('Disk image found and unlinked: %r; stat: %r', filename, stats)\n except OSError as err:\n if err.errno == errno.ENOENT:\n pass\n else:\n raise\n\n available_size = sysinfo.disk_usage(img_location)\n reserved_size = utils.size_to_bytes(reserve)\n image_size_bytes = available_size.free - reserved_size\n if available_size.free < reserved_size + TREADMILL_MIN_VG_SIZE:\n raise exc.NodeSetupError('Not enough free disk space')\n if fs.create_excl(filename, image_size_bytes):\n break\n\n if retries == 0:\n raise exc.NodeSetupError('Something is messing with disk image creation')\n\n\n_TREADMILL_LOOPDEV_NB = 8\n\ndef _init_loopback_devices():\n \"\"\"Create and initialize loopback devices.\"\"\"\n for i in range(0, _TREADMILL_LOOPDEV_NB):\n if not os.path.exists('/dev/loop%s' % i):\n subproc.check_call(['mknod', '-m660', '/dev/loop%s' % i, 'b',\n '7', str(i)])\n subproc.check_call(['chown', 'root.disk', '/dev/loop%s' % i])\n continue\n\n\ndef _loop_dev_for(filename):\n \"\"\"Lookup the loop device associated with a given filename.\n\n :param filename:\n Name of the file\n :type filename:\n ``str``\n :returns:\n Name of the loop device or None if not found\n :raises:\n subprocess.CalledProcessError if the file doesn't exist\n \"\"\"\n filename = os.path.realpath(filename)\n loop_dev = subproc.check_output([\n 'losetup',\n '-j',\n filename])\n loop_dev = loop_dev.strip()\n match = re.match('^(?P[^:]+):.*\\\\({fname}\\\\)'.format(fname=filename), loop_dev)\n if match is not None:\n loop_dev = match.groupdict()['loop_dev']\n else:\n loop_dev = None\n return loop_dev","sub_path":"pycfiles/Treadmill-0.0.2-py3.4/localdisk_service.cpython-34.py","file_name":"localdisk_service.cpython-34.py","file_ext":"py","file_size_in_byte":11713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593981284","text":"import subprocess\nimport sys\nimport unittest\n\nimport dbus\nimport dbus.mainloop.glib\nimport dbusmock\nfrom gi.repository import GLib\n\nfrom bluezero.adapter import Adapter\nfrom bluezero.device import Device\n\n\nclass TestBluezeroDevice(dbusmock.DBusTestCase):\n\n @classmethod\n def setUpClass(klass):\n klass.start_system_bus()\n klass.dbus_con = klass.get_dbus(True)\n (klass.p_mock, klass.obj_bluez) = klass.spawn_server_template(\n 'bluez5', {}, stdout=subprocess.PIPE)\n\n def setUp(self):\n self.obj_bluez.Reset()\n self.dbusmock = dbus.Interface(self.obj_bluez, dbusmock.MOCK_IFACE)\n self.dbusmock_bluez = dbus.Interface(self.obj_bluez, 'org.bluez.Mock')\n\n def test_device_name(self):\n adapter_name = 'hci0'\n address = '22:22:33:44:55:66'\n alias = 'Peripheral Device'\n\n path = self.dbusmock_bluez.AddAdapter(adapter_name, 'my-computer')\n self.assertEqual(path, '/org/bluez/' + adapter_name)\n dongle = Adapter('/org/bluez/hci0')\n\n path = self.dbusmock_bluez.AddDevice(adapter_name,\n address,\n alias)\n self.assertEqual(path,\n '/org/bluez/' + adapter_name + '/dev_' +\n address.replace(':', '_'))\n ble_dev = Device('/org/bluez/hci0/dev_22_22_33_44_55_66')\n conn_state = ble_dev.name\n self.assertEqual(conn_state, alias)\n\n def test_connected(self):\n adapter_name = 'hci0'\n address = '22:22:33:44:55:66'\n alias = 'Peripheral Device'\n\n path = self.dbusmock_bluez.AddAdapter(adapter_name, 'my-computer')\n self.assertEqual(path, '/org/bluez/' + adapter_name)\n dongle = Adapter('/org/bluez/hci0')\n\n path = self.dbusmock_bluez.AddDevice(adapter_name,\n address,\n alias)\n self.assertEqual(path,\n '/org/bluez/' + adapter_name + '/dev_' +\n address.replace(':', '_'))\n ble_dev = Device('/org/bluez/hci0/dev_22_22_33_44_55_66')\n conn_state = ble_dev.connected\n self.assertEqual(conn_state, False)\n self.dbusmock_bluez.ConnectDevice(adapter_name, address)\n conn_state = ble_dev.connected\n self.assertEqual(conn_state, True)\n\n\nif __name__ == '__main__':\n # avoid writing to stderr\n unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout,\n verbosity=2))\n","sub_path":"tests/test_device.py","file_name":"test_device.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"380443132","text":"#Actividad en clase - Estatutos de decisión - 1\n#Rosa Vanessa Palacios Beltran\n#A01652612\n\nlado_x = int(input('Ingrese valor para lado x: '))\nlado_y = int(input('Ingrese valor para lado y: '))\nlado_z = int(input('Ingrese valor para lado z: '))\n\nif (lado_x, lado_y, lado_z) == 0:\n print('Valores no validos')\n quit()\nelse:\n if (lado_x + lado_y) > lado_z:\n print\n else:\n print('Valores no validos')\n quit()\n if (lado_x + lado_z) > lado_y:\n print\n else:\n print('Valores no validos')\n quit()\n if (lado_y + lado_z) > lado_x:\n print\n else:\n print('Valores no validos')\n quit()\n\nprint(f'Los valores de (x, y, z) son: {lado_x, lado_y, lado_z}')\n\nif lado_x == lado_y == lado_z:\n tipo = 'equilatero'#el triángulo equilátero tiene 3 lados iguales\nelif lado_x == lado_y:\n tipo = 'isósceles'#el isósceles tiene 2 lados iguales \nelif lado_x == lado_z:\n tipo = 'isósceles'\nelif lado_y == lado_z:\n tipo = 'isósceles'\nelif lado_x != lado_y != lado_z:\n tipo = 'escaleno'#el escaleno tiene los 3 lados diferentes\nelse:\n print('No existe')\n quit()\n\nprint(f'El tipo de triángulo es {tipo}')","sub_path":"semana-1/tarea-semana-1/A01652612-tipos-de-triangulos.py","file_name":"A01652612-tipos-de-triangulos.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"355592006","text":"#!/usr/bin/env python\n'''\nSimulation of electrical stimulations on neurons.\nDetermine the threshold of current delivery needed to elicitate an AP on a neuron/axon at various depths.\n'''\nimport LFPy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\n# from GeneticOptimization import MEAutility as MEA\nfrom os.path import join\nimport utils\nimport neuron\nimport plotting_convention\n\nplt.close('all')\n\n###############################################################################\n# Main script, set parameters and create cell, synapse and electrode objects\n###############################################################################\n\n# folder = \"morphologies/cell_hallermann_myelin\"\n# folder = \"morphologies/cell_hallermann_unmyelin\"\n# folder = \"morphologies/simple_axon_hallermann\"\nfolder = \"morphologies/HallermannEtAl2012\"\n# folder = \"morphologies/almog\"\n# folder = \"morphologies/hay_model\"\nneuron.load_mechanisms(join(folder))\n# morph = 'patdemo/cells/j4a.hoc', # Mainen&Sejnowski, 1996\nmorph = join(folder, '28_04_10_num19.hoc') # HallermannEtAl2012\n# morph = join(folder, 'A140612.hoc') # Almog model\n# morph = join(folder, 'cell1.hoc') # Hay model\n# morph = join('morphologies', 'axon.hoc') # Mainen&Sejnowski, 1996\n# morph = join(folder, 'cell_simple_long.hoc') # stick model based on Hallermann's\n# custom_code = [join(folder, 'Cell parameters.hoc'),\ncustom_code = [join(folder, 'Cell parameters.hoc'),\n join(folder, 'charge.hoc')]\n # join(folder, 'pruning.hoc')]\n # join(folder, 'custom_code.hoc')]\n # join(folder, 'initialize_mechs.hoc')]\n\n\n# Define cell parameters\ncell_parameters = { # various cell parameters,\n 'morphology': morph, # simplified neuron model from HallermannEtAl2012\n # rm': 30000., # membrane resistance\n 'cm': 1.0, # membrane capacitance\n 'Ra': 150, # axial resistance\n # 'passive_parameters':dict(g_pas=1/30., e_pas=-65),\n 'v_init': -85., # initial crossmembrane potential\n # 'e_pas': -65., # reversal potential passive mechs\n 'passive': False, # switch on passive mechs\n 'nsegs_method': 'lambda_f',\n 'lambda_f': 1000.,\n 'dt': 2.**-6, # [ms] dt's should be in powers of 2 for both,\n 'tstart': -50., # start time of simulation, recorders start at t=0\n 'tstop': 50., # stop simulation at 200 ms. These can be overridden\n # by setting these arguments in cell.simulation()\n \"extracellular\": True,\n \"pt3d\": True,\n 'custom_code': custom_code}\n\n\nnames = [\"axon myelin\", \"soma axon myelin\", \"dendrite soma axon myelin\", \"axon nonmyelin\"]\n\nform_name = folder[folder.find('/')+1:]\n\ncell = LFPy.Cell(**cell_parameters)\n# Assign cell positions\n# TEST with different distance between cells\nx_cell_pos = [0, 0, 0, 10]\ny_cell_pos = [0, -10, 10, 0]\n# z_cell_pos = np.zeros(len(x_cell_pos))\nz_cell_pos = [0., -1000 - (np.sum(cell.length)), 0.]\n\n# utils.reposition_stick_horiz(cell)\n# utils.reposition_stick_flip(cell, x_cell_pos[0], y_cell_pos[0], z_cell_pos[0])\n\n# xrot = [0., 2., 1.]\n# cell.set_rotation(y=xrot[cell_id] * np.pi / 2)\ncell.set_rotation(x=np.pi / 2)\ncell.set_pos(x=x_cell_pos[1], y=y_cell_pos[0], z=-np.max(cell.zend))\n\nn_sec, names = utils.get_sections_number(cell)\n\n\nfig = plt.figure(figsize=[10, 8])\nfig.subplots_adjust(wspace=0.1)\n\nax1 = plt.subplot(111, projection=\"3d\",\n title=\"\", aspect=1, xlabel=\"x [$\\mu$m]\",\n ylabel=\"y [$\\mu$m]\", zlabel=\"z [$\\mu$m]\", xlim=[-600, 600], ylim=[-600, 600], zlim=[-1800, 200])\n# c='k', clip_on=False) for idx in range(cell.totnsegs)]\n# [plt.plot([cell.xstart[idx], cell.xend[idx]], [cell.zstart[idx], cell.zend[idx]], '-',\ncmap = plt.cm.viridis\nnorm = mpl.colors.Normalize(vmin=-100, vmax=50)\n# col = (cell.vmem.T[spike_time_loc[0]] + 100) / 150.\n# col = {'soma': 'k', 'axon': 'b', 'dendrite': 'r', }\ncolr = plt.cm.Set2(np.arange(n_sec))\nfor i, sec in enumerate(names):\n [ax1.plot([cell.xstart[idx], cell.xend[idx]],\n [cell.ystart[idx], cell.yend[idx]],\n [cell.zstart[idx], cell.zend[idx]],\n '-', c=colr[i], clip_on=False) for idx in cell.get_idx(sec)]\n if sec != 'soma':\n ax1.plot([cell.xstart[cell.get_idx(sec)[0]], cell.xend[cell.get_idx(sec)[0]]],\n [cell.ystart[cell.get_idx(sec)[0]], cell.yend[cell.get_idx(sec)[0]]],\n [cell.zstart[cell.get_idx(sec)[0]], cell.zend[cell.get_idx(sec)[0]]],\n '-', c=colr[i], clip_on=False, label=sec)\nax1.scatter(cell.xmid[cell.get_idx('soma')[0]], cell.ymid[cell.get_idx('soma')[0]],\n cell.zmid[cell.get_idx('soma')[0]], s=33, marker='o', c='k', alpha=.7, label='soma')\n\nplt.legend()\n# [ax1.plot([cell.xmid[idx]], [cell.ymid[idx]], [cell.zmid[idx]], 'D', c=v_clr(cell.zmid[idx])) for idx in v_idxs]\n# ax1.scatter(source_xs, source_ys, source_zs, c=source_amps)\n# ax1.scatter(cell.xmid[initial], cell.ymid[initial], cell.zmid[initial], '*', c='r')\n# for idx in range(cell.totnsegs):\n# ax1.text(cell.xmid[idx], cell.ymid[idx], cell.zmid[idx], \"{0}.\".format(cell.get_idx_name(idx)[1]))\n\nelev = 15 # Default 30\nazim = 45 # Default 0\nax1.view_init(elev, azim)\n\n\n# ax.axes.set_yticks(yinfo)\n# ax.axes.set_yticklabels(yinfo)\nplt.savefig(\"geo_morph_\" + form_name + \".png\", dpi=200)\nplt.show()\n","sub_path":"geometry_cell.py","file_name":"geometry_cell.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"231331766","text":"import numpy as np\r\nfrom classifier import svm, svm_2, svm_test\r\nlambd = .01\r\nX = np.random.rand(4,3)\r\nY = np.array([0,1,2,3])\r\n #np.array([[1,1],[0,0],[2,3]])\r\n#Y = np.array([1,0,2])\r\nbeta, v = svm(X,Y,lambd)\r\n\r\n#Y2 = np.array([1,-1])\r\n#beta2, v2 = svm_2(X,Y2,lambd)\r\n\r\n\r\ntest_error = svm_test(X,Y,beta,v)\r\nprint(test_error)","sub_path":"observatory_Anatoly/classifier_test.py","file_name":"classifier_test.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"485984717","text":"from random import randint\n\nfrom django.contrib.auth.models import User\n\nUSERS_NUMBER_RESTRICT = 5\n\n\ndef random_users(request):\n users_ids = User.objects.values_list('id', flat=True)\n users_no = len(users_ids)\n if users_no <= USERS_NUMBER_RESTRICT:\n return {'random_users': User.objects.all()}\n random_ids = set()\n while len(random_ids) != USERS_NUMBER_RESTRICT:\n i = randint(0, users_no - 1)\n random_ids.add(users_ids[i])\n return {'random_users': User.objects.filter(id__in=random_ids)}\n","sub_path":"blog/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"385332900","text":"# Howlongtobeat Custom Class\n# Details available here:\n# https://github.com/ScrappyCocco/HowLongToBeat-PythonAPI/blob/master/howlongtobeatpy/howlongtobeatpy/HowLongToBeatEntry.py\n\nfrom howlongtobeatpy import HowLongToBeat\nimport pandas as pd\n\ndef get_game_data(id):\n result = HowLongToBeat().search_from_id(id)\n a = result.__dict__\n df = pd.DataFrame([a])\n return df\n\n\n# Load csv\ngames = pd.read_csv('data\\\\mario_games_v2.csv')\n\nmario_game_ids = games['ID']\n\nfor i in range(len(mario_game_ids)):\n print(i)\n print(mario_game_ids[i])\n\n df = check = get_game_data(mario_game_ids[i])\n\n if i == 0:\n output_df = df\n else:\n output_df = pd.concat([output_df,df])\n \n\nprint(output_df)\n\noutput_df.game_id = output_df.game_id.astype(int)\ngames.ID = games.ID.astype(int)\n\noutput_df = pd.merge(output_df, games, left_on='game_id', right_on='ID', how='inner')\n\n# replace halves with 0.5s\noutput_df = output_df.replace('½','.5', regex=True)\n\noutput_df.to_csv('data\\\\fastest_mario.csv', encoding=\"utf-8-sig\", index=False)\n\n","sub_path":"mario_games_completion_times.py","file_name":"mario_games_completion_times.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"180203202","text":"#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nThe Redis Client using random.choice(self.redis_servers)\nIt is useful when using proxy like twemproxy\n\nUSAGE:\n cli = make_redis_proxy_cli(\n [\"10.4.16.28:6379\", \"10.4.16.29:6379\"],\n **connection_kwargs, strict_redis=False)\n\n connection_kwargs can be:\n db,\n password,\n socket_timeout,\n max_connections (in total, not per server)\n\"\"\"\n\nimport pytools.twemproxy as twemproxy\n\nimport redis\nfrom redis_wrapper import StrictRedisWrapper\n\ndef make_redis_proxy_cli(redis_servers, **connection_kwargs):\n strict_redis = connection_kwargs.pop('strict_redis', True)\n\n pool = twemproxy.RandomRedisConnectionPool(redis_servers, **connection_kwargs)\n if strict_redis:\n return redis.StrictRedis(connection_pool=pool)\n else:\n return redis.Redis(connection_pool=pool)\n\ndef make_redis_proxy_cli2(redis_servers, connection_kwargs=None, redis_kwargs=None, strict_redis=True,\n cluster='default', thread_local_pool=False):\n '''\n NOTE: cluster设置成使用的cluster的名称,以便可以查看metrics\n\n :param redis_servers:\n :param connection_kwargs:\n :param redis_kwargs:\n :param strict_redis:\n :param cluster:\n :return:\n '''\n if connection_kwargs is None:\n connection_kwargs = {}\n if redis_kwargs is None:\n redis_kwargs = {}\n if thread_local_pool:\n pool = twemproxy.ThreadLocalConnectionPool(redis_servers, **connection_kwargs)\n else:\n pool = twemproxy.RandomRedisConnectionPool(redis_servers, **connection_kwargs)\n if strict_redis:\n return StrictRedisWrapper(connection_pool=pool, cluster=cluster, **redis_kwargs)\n else:\n return redis.Redis(connection_pool=pool, **redis_kwargs)\n\n\ndef make_redis_auto_conf_proxy_cli(cluster, connection_kwargs=None, redis_kwargs=None):\n '''\n NOTE: cluster设置成使用的cluster的名称,以便获取servers和查看metrics\n 当配置发生变化时,可以自动获取新配置初始化client.\n\n :param cluster:\n :param connection_kwargs:\n :param redis_kwargs:\n :param strict_redis:\n :param cluster:\n :return:\n '''\n if connection_kwargs is None:\n connection_kwargs = {}\n if redis_kwargs is None:\n redis_kwargs = {}\n pool = twemproxy.AutoConfRedisConnectionPool(cluster_name=cluster, **connection_kwargs)\n return StrictRedisWrapper(connection_pool=pool, cluster=cluster, **redis_kwargs)\n","sub_path":"opt_recommend_local/pytools/redis/redis_proxy.py","file_name":"redis_proxy.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"541237227","text":"from plots import *\nimport time\n\n\nclass Solver(object):\n \"\"\"General solver\"\"\"\n\n def __init__(self, samples, model_size, model_type, interval_length=1):\n self.samples = samples\n self.number_samples = len(samples)\n self.model_size = model_size\n self.model_type = model_type\n self.interval_length = interval_length\n self.position_estimate = np.linspace(0, self.interval_length, self.number_samples)\n self.parameter_estimate = np.zeros(1)\n\n def solve(self):\n \"\"\"\n Finds parameters (different for different solvers),\n and stores results in it the fields of the solver.\n\n \"\"\"\n raise NotImplemented\n\n def test_error(self, signal):\n \"\"\"\n Args:\n signal (SignalModel): signal to compare with\n\n Returns:\n float: squared distance between stored signal and the given one\n \"\"\"\n return signal.square_error(self.parameter_estimate)\n\n def get_position_estimates(self):\n return self.position_estimate\n\n def get_samples(self):\n return self.samples\n\n\nclass OrdinaryLS(Solver):\n \"\"\"Ordinary Least Squares:\n assume that samples are uniquely spaced on [0,1]\"\"\"\n\n def __init__(self, samples, model_size, model_type, interval_length=1):\n super(OrdinaryLS, self).__init__(samples, model_size, model_type, interval_length)\n self.train_error = 0\n self.parameter_estimate = np.zeros(self.model_size)\n\n def solve(self):\n x = self.model_type.create_ls_matrix(self.position_estimate, self.model_size)\n estimated = np.linalg.solve(np.dot(x.T, x), np.dot(x.T, self.samples))\n error = np.linalg.norm(np.dot(x, estimated) - self.samples) / self.number_samples\n (self.train_error, self.parameter_estimate) = (error, estimated)\n\n\nclass AlternatingLS(Solver):\n \"\"\"Alternating least squares\n i. e. alternating least squares with gradient descent\"\"\"\n\n def __init__(self, samples, model_size, model_type,\n show_plots=False, hold_edges=True, stopping_error=1.0e-6, beta=0.01, interval_length=1):\n super(AlternatingLS, self).__init__(samples, model_size, model_type, interval_length)\n self.beta = beta\n self.stopping_error = stopping_error\n self.show_plots = show_plots\n self.hold_edges = hold_edges\n self.max_iterations = 10000\n self.illustration = []\n self.train_error = 0.0\n self.parameter_estimate = np.zeros(self.model_size)\n\n def solve(self):\n\n for k in range(0, self.max_iterations):\n # if k%100==0 and k<5000: #brzydkie\n if k < 5:\n self.illustration.append(self.position_estimate)\n\n x = self.model_type.create_ls_matrix(self.position_estimate, self.model_size)\n self.parameter_estimate = np.linalg.solve(np.dot(x.T, x), np.dot(x.T, self.samples))\n g = self.model_type.compute_ls_gradient(self.position_estimate, self.parameter_estimate, self.samples)\n if self.hold_edges:\n self.position_estimate[1:self.number_samples - 1] -= self.beta * g[1:self.number_samples - 1]\n else:\n self.position_estimate -= self.beta * g\n error = np.linalg.norm(np.dot(x, self.parameter_estimate) - self.samples) / self.number_samples\n\n if self.show_plots & (k % 10 == 0):\n print(error)\n pylab.stem(self.position_estimate, self.samples)\n time.sleep(0.001)\n pylab.pause(0.001)\n if error < self.stopping_error:\n print(\"error small enough\")\n break\n\n\nclass ConstrainedALS(AlternatingLS):\n \"\"\"Alternating least squares for constrained case\n i. e. alternating least squares with gradient descent,\n where parameters of matrix X depend on parameters (tr_param)\"\"\"\n\n def __init__(self, samples, model_size, model_type, start_pos,\n show_plots=False, hold_edges=True, stopping_error=1.0e-9, beta=0.01, interval_length=1, max_iter=10000,\n fl=1.0, angle=0, verbose=True, early_stopping=1.0e-16):\n super(ConstrainedALS, self).__init__(samples, model_size, model_type, show_plots,\n hold_edges, stopping_error, beta, interval_length)\n self.figure, self.axis = pylab.subplots(1, 3)\n assert len(samples) == len(start_pos)\n self.position_estimate = start_pos\n self.start_positions = start_pos\n self.illustration_param = []\n self.tr_param = self.model_type.zero_transformation()\n if self.model_type == SecondSurfacePolynomial:\n self.tr_param[2] = fl\n self.tr_param[0] = angle\n self.max_iterations = max_iter\n self.verb = verbose\n self.tr_params_over_time = []\n self.beta_over_time = []\n self.error_over_time = []\n self.error = np.infty\n self.early_stopping = early_stopping\n\n def solve(self):\n if self.show_plots:\n self.axis[0].set_title(\"beta\")\n self.axis[1].set_title(\"error\")\n self.axis[2].set_title(\"gradient\")\n\n blocked = False\n for k in range(0, self.max_iterations):\n\n # solver is blocked if gradient step would take parameters outside safe intervals\n if not blocked:\n x = self.model_type.create_ls_matrix(self.start_positions, self.model_size, self.tr_param)\n try:\n self.parameter_estimate = np.linalg.solve(np.dot(x.T, x), np.dot(x.T, self.samples))\n except np.linalg.linalg.LinAlgError as lin_err:\n print(lin_err)\n print(\"angle:\", self.tr_param[0])\n break\n\n error = np.linalg.norm(np.dot(x, self.parameter_estimate) - self.samples) / self.number_samples\n\n if error < self.stopping_error:\n if self.verb:\n print(\"error small enough after fitting parameters\")\n break\n\n g = self.model_type.compute_ls_gradient(self.start_positions, self.parameter_estimate, self.samples,\n self.tr_param)\n\n if np.max(np.abs(error - self.error)) < self.early_stopping:\n if self.verb:\n print(\"error stopped changing after\", k, \"steps\")\n break\n\n # instead of normalizing the gradient, reduce beta, to prevent parameters outside stable region\n # WARNING this is not a nice hack below:\n\n if self.tr_param[2] > np.abs(np.tan(self.tr_param[0] - self.beta * g[0])) and np.abs(\n self.tr_param[0] - self.beta * g[0]) < (np.pi / 2.0):\n self.tr_param -= self.beta * g\n blocked = False\n else:\n self.beta *= 0.9\n blocked = True\n\n if self.beta < 1e-5:\n if self.verb:\n print(\"beta became to small\")\n break\n\n self.position_estimate = self.model_type.shifted_positions(self.start_positions, self.tr_param)\n\n error = np.linalg.norm(np.dot(x, self.parameter_estimate) - self.samples) / self.number_samples\n\n if error < self.stopping_error:\n if self.verb:\n print(\"error small enough after fitting positions\")\n break\n\n if self.error < error:\n if self.beta > 10 * np.finfo(float).eps:\n self.beta *= 0.9\n\n self.error = error\n\n\n if self.show_plots:\n self.axis[0].plot(k, self.beta, 'go')\n self.axis[1].semilogy(k, self.error, 'ro')\n self.axis[2].semilogy(k, np.abs(g[0]), 'bo')\n pylab.pause(0.1)\n\n self.beta_over_time.append(self.beta)\n self.error_over_time.append(self.error)\n self.tr_params_over_time.append(self.tr_param[0])\n\n if k == self.max_iterations - 1:\n if self.verb:\n print('force stop after', self.max_iterations, 'steps')\n","sub_path":"solvers.py","file_name":"solvers.py","file_ext":"py","file_size_in_byte":8219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"40492572","text":"\nimport ismrmrd\nimport os\nimport itertools\nimport logging\nimport numpy as np\nimport numpy.fft as fft\nimport base64\n\n# Folder for debug output files\ndebugFolder = \"/tmp/share/debug\"\n\ndef process(connection, config, metadata):\n logging.info(\"Config: \\n%s\", config)\n\n # Metadata should be MRD formatted header, but may be a string\n # if it failed conversion earlier\n try:\n logging.info(\"Metadata: \\n%s\", metadata.toxml('utf-8'))\n\n logging.info(\"Incoming dataset contains %d encodings\", len(metadata.encoding))\n logging.info(\"First encoding is of type '%s', with a field of view of (%s x %s x %s)mm^3 and a matrix size of (%s x %s x %s)\", \n metadata.encoding[0].trajectory, \n metadata.encoding[0].encodedSpace.matrixSize.x, \n metadata.encoding[0].encodedSpace.matrixSize.y, \n metadata.encoding[0].encodedSpace.matrixSize.z, \n metadata.encoding[0].encodedSpace.fieldOfView_mm.x, \n metadata.encoding[0].encodedSpace.fieldOfView_mm.y, \n metadata.encoding[0].encodedSpace.fieldOfView_mm.z)\n\n except:\n logging.info(\"Improperly formatted metadata: \\n%s\", metadata)\n\n # Continuously parse incoming data parsed from MRD messages\n acqGroup = []\n imgGroup = []\n waveformGroup = []\n try:\n for item in connection:\n # ----------------------------------------------------------\n # Raw k-space data messages\n # ----------------------------------------------------------\n if isinstance(item, ismrmrd.Acquisition):\n # Accumulate all imaging readouts in a group\n if (not item.is_flag_set(ismrmrd.ACQ_IS_NOISE_MEASUREMENT) and\n not item.is_flag_set(ismrmrd.ACQ_IS_PHASECORR_DATA)):\n acqGroup.append(item)\n\n # When this criteria is met, run process_raw() on the accumulated\n # data, which returns images that are sent back to the client.\n if item.is_flag_set(ismrmrd.ACQ_LAST_IN_SLICE):\n logging.info(\"Processing a group of k-space data\")\n image = process_raw(acqGroup, config, metadata)\n logging.debug(\"Sending image to client:\\n%s\", image)\n connection.send_image(image)\n acqGroup = []\n\n # ----------------------------------------------------------\n # Image data messages\n # ----------------------------------------------------------\n elif isinstance(item, ismrmrd.Image):\n # Only process magnitude images -- send phase images back without modification\n if item.image_type is ismrmrd.IMTYPE_MAGNITUDE:\n imgGroup.append(item)\n else:\n connection.send_image(item)\n continue\n\n # When this criteria is met, run process_group() on the accumulated\n # data, which returns images that are sent back to the client.\n # TODO: logic for grouping images\n if False:\n logging.info(\"Processing a group of images\")\n image = process_image(imgGroup, config, metadata)\n logging.debug(\"Sending image to client:\\n%s\", image)\n connection.send_image(image)\n imgGroup = []\n\n # ----------------------------------------------------------\n # Waveform data messages\n # ----------------------------------------------------------\n elif isinstance(item, ismrmrd.Waveform):\n waveformGroup.append(item)\n\n elif item is None:\n break\n\n else:\n logging.error(\"Unsupported data type %s\", type(item).__name__)\n\n # Extract raw ECG waveform data. Basic sorting to make sure that data \n # is time-ordered, but no additional checking for missing data.\n # ecgData has shape (5 x timepoints)\n if len(waveformGroup) > 0:\n waveformGroup.sort(key = lambda item: item.time_stamp)\n ecgData = [item.data for item in waveformGroup if item.waveform_id == 0]\n ecgData = np.concatenate(ecgData,1)\n\n # Process any remaining groups of raw or image data. This can \n # happen if the trigger condition for these groups are not met.\n # This is also a fallback for handling image data, as the last\n # image in a series is typically not separately flagged.\n if len(acqGroup) > 0:\n logging.info(\"Processing a group of k-space data (untriggered)\")\n image = process_raw(acqGroup, config, metadata)\n logging.debug(\"Sending image to client:\\n%s\", image)\n connection.send_image(image)\n acqGroup = []\n\n if len(imgGroup) > 0:\n logging.info(\"Processing a group of images (untriggered)\")\n image = process_image(imgGroup, config, metadata)\n logging.debug(\"Sending image to client:\\n%s\", image)\n connection.send_image(image)\n imgGroup = []\n\n finally:\n connection.send_close()\n\n\ndef process_raw(group, config, metadata):\n # Create folder, if necessary\n if not os.path.exists(debugFolder):\n os.makedirs(debugFolder)\n logging.debug(\"Created folder \" + debugFolder + \" for debug output files\")\n\n # Sort by line number (incoming data may be interleaved)\n lin = [acquisition.idx.kspace_encode_step_1 for acquisition in group]\n logging.debug(\"Incoming lin ordering: \" + str(lin))\n\n group.sort(key = lambda acq: acq.idx.kspace_encode_step_1)\n sortedLin = [acquisition.idx.kspace_encode_step_1 for acquisition in group]\n logging.debug(\"Sorted lin ordering: \" + str(sortedLin))\n\n # Format data into single [cha RO PE] array\n data = [acquisition.data for acquisition in group]\n data = np.stack(data, axis=-1)\n\n logging.debug(\"Raw data is size %s\" % (data.shape,))\n np.save(debugFolder + \"/\" + \"raw.npy\", data)\n\n # Fourier Transform\n data = fft.fftshift(data, axes=(1, 2))\n data = fft.ifft2(data)\n data = fft.ifftshift(data, axes=(1, 2))\n\n # Sum of squares coil combination\n data = np.abs(data)\n data = np.square(data)\n data = np.sum(data, axis=0)\n data = np.sqrt(data)\n\n logging.debug(\"Image data is size %s\" % (data.shape,))\n np.save(debugFolder + \"/\" + \"img.npy\", data)\n\n # Normalize and convert to int16\n data *= 32767/data.max()\n data = np.around(data)\n data = data.astype(np.int16)\n\n # Remove phase oversampling\n nRO = np.size(data,0)\n data = data[int(nRO/4):int(nRO*3/4),:]\n logging.debug(\"Image without oversampling is size %s\" % (data.shape,))\n np.save(debugFolder + \"/\" + \"imgCrop.npy\", data)\n\n # Format as ISMRMRD image data\n image = ismrmrd.Image.from_array(data, acquisition=group[0])\n image.image_index = 1\n\n # Set ISMRMRD Meta Attributes\n meta = ismrmrd.Meta({'DataRole': 'Image',\n 'ImageProcessingHistory': ['FIRE', 'PYTHON'],\n 'WindowCenter': '16384',\n 'WindowWidth': '32768'})\n xml = meta.serialize()\n logging.debug(\"Image MetaAttributes: %s\", xml)\n logging.debug(\"Image data has %d elements\", image.data.size)\n\n image.attribute_string = xml\n\n # Call process_image() to invert image contrast\n image = process_image([image], config, metadata)\n\n return image\n\n\ndef process_image(images, config, metadata):\n # Create folder, if necessary\n if not os.path.exists(debugFolder):\n os.makedirs(debugFolder)\n logging.debug(\"Created folder \" + debugFolder + \" for debug output files\")\n\n logging.debug(\"Incoming image data of type %s\", ismrmrd.get_dtype_from_data_type(images[0].data_type))\n\n # Display MetaAttributes for first image\n meta = ismrmrd.Meta.deserialize(images[0].attribute_string)\n logging.debug(\"MetaAttributes: %s\", ismrmrd.Meta.serialize(meta))\n\n # Optional serialization of ICE MiniHeader\n if 'IceMiniHead' in meta:\n logging.debug(\"IceMiniHead: %s\", base64.b64decode(meta['IceMiniHead']).decode('utf-8'))\n\n # Extract image data into a 5D array of size [img cha z y x]\n data = np.stack([img.data for img in images])\n head = [img.getHead() for img in images]\n\n logging.debug(\"Original image data is size %s\" % (data.shape,))\n np.save(debugFolder + \"/\" + \"imgOrig.npy\", data)\n\n # Normalize and convert to int16\n data = data.astype(np.float64)\n data *= 32767/data.max()\n data = np.around(data)\n data = data.astype(np.int16)\n\n # Invert image contrast\n data = 32767-data\n data = np.abs(data)\n data = data.astype(np.int16)\n np.save(debugFolder + \"/\" + \"imgInverted.npy\", data)\n\n # Re-slice back into 2D images\n imagesOut = [None] * data.shape[0]\n for iImg in range(data.shape[0]):\n # Create new MRD instance for the inverted image\n imagesOut[iImg] = ismrmrd.Image.from_array(data[iImg,...].transpose())\n data_type = imagesOut[iImg].data_type\n\n # Copy the fixed header information\n oldHeader = head[iImg]\n oldHeader.data_type = data_type\n\n imagesOut[iImg].setHead(oldHeader)\n\n # Set ISMRMRD Meta Attributes\n meta = ismrmrd.Meta({'DataRole': 'Image',\n 'ImageProcessingHistory': ['FIRE', 'PYTHON'],\n 'WindowCenter': '16384',\n 'WindowWidth': '32768'})\n xml = meta.serialize()\n logging.debug(\"Image MetaAttributes: %s\", xml)\n logging.debug(\"Image data has %d elements\", imagesOut[iImg].data.size)\n\n imagesOut[iImg].attribute_string = xml\n\n return imagesOut\n","sub_path":"invertcontrast.py","file_name":"invertcontrast.py","file_ext":"py","file_size_in_byte":9851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"624302319","text":"import unittest\nimport os\nimport sys \nunitTestPath = os.path.dirname(__file__) + \"/../Source\"\nsys.path.append(unitTestPath)\n\nfrom Heuristics import *\n\nclass TestTilesClass(unittest.TestCase) :\n\n def test_HeuristicScoreWord_AnyWord_ReturnsZero(self) :\n # Arrange\n expected = 0\n hand = []\n word = \"TEST\"\n heuristic = Heuristic()\n\n # Act\n result = heuristic.ScoreWord(word, hand)\n\n # Assert\n self.assertEqual(result, expected)\n\n def test_LongestWordHeuristicScoreWord_FourLetterWord_ReturnsFour(self) :\n # Arrange\n expected = 4\n hand = []\n word = \"FOUR\"\n heuristic = LongestWordHeuristic()\n\n # Act\n result = heuristic.ScoreWord(word, hand)\n\n # Assert\n self.assertEqual(result, expected)\n\n def test_LetterScoringHeuristicScoreWord_A_ReturnsOne(self) :\n # Arrange\n expected = 1\n hand = []\n word = \"A\"\n heuristic = LetterScoringHeuristic()\n\n # Act\n result = heuristic.ScoreWord(word, hand)\n\n # Assert\n self.assertEqual(result, expected)\n\n def test_LetterScoringHeuristicScoreWord_QUEEN_ReturnsFourteen(self) :\n # Arrange\n expected = 14\n hand = []\n word = \"QUEEN\"\n heuristic = LetterScoringHeuristic()\n\n # Act\n result = heuristic.ScoreWord(word, hand)\n\n # Assert\n self.assertEqual(result, expected)\n\nif __name__ == '__main__' :\n unittest.main()\n","sub_path":"UnitTests/HeuristicsTest.py","file_name":"HeuristicsTest.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"631122306","text":"#!/usr/bin/python2\n\nfrom readCSV import *\nimport matplotlib.pyplot as plt\nimport sys\n\nyear = int(sys.argv[1])\nmonth = int(sys.argv[2])\nday = int(sys.argv[3])\nhour1 = int(sys.argv[4])\nminute1 = int(sys.argv[5])\nhour2 = int(sys.argv[6])\nminute2 = int(sys.argv[7])\ntitle = str(sys.argv[8])\n\ncsv2 = readCSV(\"Files/TRN_2010_2011_50m_18_C.hour.CO2\", \"\\t\")\ncsv3 = readCSV(\"Files/TRN_2010_2011_100m_18_C.hour.CO2\", \" \")\ncsv4 = readCSV(\"Files/TRN_2010_2011_180m_18_C.hour.CO2\", \" \")\ncsv5 = readCSV(\"Files/FTIR/or_sl_scd-0_2011.vav.ada.aia.oof\", \" \")\n\nlistColumns2 = []#Tower50\nlistColumns3 = []#Tower100\nlistColumns4 = []#Tower180\nlistColumns5 = []#FTIR\n\nlistColumns2.append(csv2.findColumn(\"co2\", 0))\nlistColumns2.append(csv2.findColumn(\"Year\", 0))\nlistColumns2.append(csv2.findColumn(\"Month\", 0))\nlistColumns2.append(csv2.findColumn(\"Day\", 0))\nlistColumns2.append(csv2.findColumn(\"Hour\", 0))\ncsv2.getColumns(listColumns2, 1)\ncsv2.delBorder(listColumns2[0], [0, 800])\ncsv2.columnToDatetime([listColumns2[1], listColumns2[2], listColumns2[3], listColumns2[4]], \"%Y %m %d %H \")\n#csv2.delBorder(-1, [dt.datetime(year, month, day, hour1, minute1), dt.datetime(year, month, day, hour2, minute2)])\n\n\nlistColumns3.append(csv3.findColumn(\"co2\", 0))\nlistColumns3.append(csv3.findColumn(\"Year\", 0))\nlistColumns3.append(csv3.findColumn(\"Month\", 0))\nlistColumns3.append(csv3.findColumn(\"Day\", 0))\nlistColumns3.append(csv3.findColumn(\"Hour\", 0))\ncsv3.getColumns(listColumns3, 1)\ncsv3.delBorder(listColumns3[0], [0, 800])\ncsv3.columnToDatetime([listColumns3[1], listColumns3[2], listColumns3[3], listColumns3[4]], \"%Y %m %d %H \")\n#csv3.delBorder(-1, [dt.datetime(year, month, day, hour1, minute1), dt.datetime(year, month, day, hour2, minute2)])\n\n\nlistColumns4.append(csv4.findColumn(\"co2\", 0))\nlistColumns4.append(csv4.findColumn(\"Year\", 0))\nlistColumns4.append(csv4.findColumn(\"Month\", 0))\nlistColumns4.append(csv4.findColumn(\"Day\", 0))\nlistColumns4.append(csv4.findColumn(\"Hour\", 0))\ncsv4.getColumns(listColumns4, 1)\ncsv4.delBorder(listColumns4[0], [0, 800])\ncsv4.columnToDatetime([listColumns4[1], listColumns4[2], listColumns4[3], listColumns4[4]], \"%Y %m %d %H \")\n#csv4.delBorder(-1, [dt.datetime(year, month, day, hour1, minute1), dt.datetime(year, month, day, hour2, minute2)])\n\n\nlistColumns5.append(csv5.findColumn(\"xco2(ppm)\", 277))\nlistColumns5.append(csv5.findColumn(\"year\", 277))\nlistColumns5.append(csv5.findColumn(\"day\", 277))\nlistColumns5.append(csv5.findColumn(\"hour\", 277))\ncsv5.getColumns(listColumns5, 278)\ncsv5.delBorder(listColumns5[0], [0, 800])\ncsv5.columnToDatetime([listColumns5[1], listColumns5[2], listColumns5[3], ], \"%Y %j %H %M \", realHour=listColumns5[3])\n#csv5.delBorder(-1, [dt.datetime(year, month, day, hour1, minute1), dt.datetime(year, month, day, hour2, minute2)])\n\n\n#print(csv5.returnColumn(-1))\nplt.plot(csv2.returnColumnByIndex(-1), csv2.returnColumn(csv2.findColumn(\"co2\", 0)), marker=\"+\", color=\"blue\", linestyle=\" \", label=\"TRN_2010_2011_50m_18_C.hour.CO2\")\nplt.plot(csv3.returnColumnByIndex(-1), csv3.returnColumn(csv3.findColumn(\"co2\", 0)), marker=\"+\", color=\"red\", linestyle=\" \", label=\"TRN_2010_2011_100m_18_C.hour.CO2\")\nplt.plot(csv4.returnColumnByIndex(-1), csv4.returnColumn(csv4.findColumn(\"co2\", 0)), marker=\"+\", color=\"green\", linestyle=\" \", label=\"TRN_2010_2011_180m_18_C.hour.CO2\")\nplt.plot(csv5.returnColumnByIndex(-1), csv5.returnColumn(csv5.findColumn(\"xco2(ppm)\", 277)), marker=\"o\", color=\"black\", linestyle=\" \", label=\"or_sl_scd-0_2011.vav.ada.aia.oof\") \nplt.legend()\nplt.xlim([dt.datetime(2011,1,1),dt.datetime(2011,12,31)])\nplt.ylim(375, 475)\nplt.title(title)\nplt.xlabel(\"Date\")\nplt.ylabel(\"co2 (ppm)\")\nplt.show()\n","sub_path":"Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"178484625","text":"from sys import argv\n\nkeyword = \"\"\nfor x in argv[1]:\n\tkeyword += x + x\nkeyword = keyword.upper()\n\nstring = \"\"\n\nfor i in range(2, len(argv)):\n\tstring += argv[i].upper()\n\n# This is specifically for the keyword\ndef letter_to_nums(string):\n\tarr = []\n\tfor i in string:\n\t\t# 65 is the ASCII value of 'A'\n\t\tarr.append(ord(i) - 65)\n\treturn arr\n\ndef decrypt(keyarr, string):\n\tx = 0\n\tend_string = \"\"\n\n\tfor i in string:\n\t\tval = ord(i)\n\t\tnew_val = val + keyarr[x] + 1\n\n\t\tif new_val > 90:\n\t\t\tnew_val += 6\n\n\n\t\tif x == len(keyarr) - 1:\n\t\t\tx = 0\n\t\telse:\n\t\t\tx += 1\n\n\t\tend_string += chr(new_val)\n\n\treturn end_string.lower()\n\nprint(keyword, string)\nkeyarr = letter_to_nums(keyword)\nprint(keyarr)\nprint(decrypt(keyarr, string))\n","sub_path":"encryption/vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"162187943","text":"#!/usr/bin/env python3\n\ninitial_state = input().split(\": \")[1]\ninput()\n\nstate = \".\" * 100 + initial_state + \".\" * 100\n\npatterns = []\nwhile True:\n try:\n line = input()\n patterns.append((line[0:5], line[9]))\n except:\n break\n\nfor _ in range(20):\n new_state = [\".\"] * len(state)\n for i in range(len(state)):\n for p in patterns:\n if state[i-2:i+3] == p[0]:\n new_state[i] = p[1]\n state = \"\".join(new_state)\n\nres = 0\nfor i in range(len(state)):\n if state[i] == \"#\":\n res += i - 100\nprint(res)","sub_path":"12/solution-A.py","file_name":"solution-A.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"79216449","text":"\"\"\"\n Phillip Lane\n Program Assignment #1 for UAH CS 317\n\n This program reads in a text file, alphabetizes the words in said file, then writes back out in a formatted manner.\n The formatting and the files are both user-inputted.\n\"\"\"\n\nimport random\nimport datetime\nimport re\nimport functools\n\n# TO DO: Write a comprehensive paper on all of my grievances with this language, and why it should never be used.\n# WILL DO: Write a paper on the execution model of this language and why it's one of the worst of any modern language.\n\nclass Sorter:\n copyList = [] #define a list used for copying each iteration\n\n def __init__(): #default constructor, not necessary cause this is effectively a static class\n pass\n\n @staticmethod\n def __Quicksort(inList, start, end): #internal quicksort\n if end - start < 2: #if length is 0 or 1, already sorted\n return\n\n pivot = random.randint(0, end - start - 1) + start #decide a pivot\n pivotValue = inList[pivot] #get the value at that pivot\n\n insert = start #where to start inserting into copyList\n for i in range(start, end): #iterate through the list\n if i == pivot: #don't copy the pivot\n continue\n\n if inList[i] < pivotValue: #copy everything less than the pivot\n Sorter.copyList[insert] = inList[i]\n insert += 1\n\n Sorter.copyList[insert] = pivotValue #copy in the pivot\n newSplit = insert #define where to recursively sort the list\n insert += 1\n\n for i in range(start, end): #iterate through the list again\n if i == pivot: #don't copy the pivot\n continue\n\n if inList[i] >= pivotValue: #copy everything greater than or equal to the pivot\n Sorter.copyList[insert] = inList[i]\n insert += 1\n\n for i in range(start, end): #copy the copyList back into the original list\n inList[i] = Sorter.copyList[i]\n\n Sorter.__Quicksort(inList, start, newSplit) #recursively sort bottom half\n Sorter.__Quicksort(inList, newSplit + 1, end) #recursively sort top half\n\n @staticmethod\n def Quicksort(inList): #public quicksort\n Sorter.copyList = [0] * len(inList) #create the copy list with len(inList) elements (fun Python syntax that I actually do enjoy, which is far and few in this language)\n Sorter.__Quicksort(inList, 0, len(inList)) #sort the list, passing in the upper and lower bounds\n\ndef main():\n infile = input(\"Input the file to read from:\\n\")\n inf = open(infile, \"r\", encoding=\"utf-8\") #read a UTF-8 input\n myList = [i for i in re.split(\" |\\r\\n|\\n\", str(inf.read())) if i != \"\"] #separate into individual words\n\n t0 = datetime.datetime.now() #start timing\n Sorter.Quicksort(myList) #sort list\n elapsed = datetime.datetime.now() - t0 #get elapsed time\n\n outfile = input(\"Input the file to write to:\\n\");\n outf = open(outfile, \"w\", encoding=\"utf-8\");\n\n wordsPL = int(input(\"Input how many words you would like printed per line:\\n\")) #prompt user for input\n longestWord = functools.reduce(lambda x, y: max(x, y), [len(i) for i in myList]) #calculate the longest word for use later\n\n for i in range(0, len(myList)):\n for j in range(0, wordsPL):\n if i * wordsPL + j >= len(myList): #break if finished printing list\n break\n\n outf.write(\"'{}'\".format(myList[i * wordsPL + j]) + \" \" * (longestWord + 1 - len(myList[i * wordsPL + j]))) #output the list\n \n if i * wordsPL + wordsPL >= len(myList): #break if finished printing list\n break\n outf.write(\"\\n\")\n\n #print(myList)\n print(\"Elapsed time: {} ms\".format(elapsed.total_seconds() * 1000)) #print elapsed time to sort list\n\n inf.close()\n outf.close() #close the files\n\n input()\n\nmain()\n","sub_path":"CS 317 Homework 1 Python/Program.py","file_name":"Program.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"357572875","text":"from django import template\nfrom ..models import Category, Article\nfrom datetime import datetime, timedelta\nfrom django.db.models import Count, Q\nfrom django.contrib.contenttypes.models import ContentType\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef title():\n\treturn \"وبلاگ جنگویی\"\n\n\n@register.inclusion_tag(\"blog/partials/category_navbar.html\")\ndef category_navbar():\n\treturn {\n\t\t\"categories\": Category.objects.filter(status=True),\n\t}\n\n@register.inclusion_tag(\"blog/partials/sidebar.html\")\ndef popular_articles():\n\tlast_month = datetime.today() - timedelta(days=30)\n\treturn {\n\t\t\"articles\": Article.objects.published().annotate(count=Count('hits', filter=Q(articlehits__created__gte=last_month))).order_by('-count', '-publish')[:5],\n\t\t\"title\": \"مقالالت پر بازدید ماه\"\n\t}\n\n@register.inclusion_tag(\"blog/partials/sidebar.html\")\ndef hot_articles():\n\tlast_month = datetime.today() - timedelta(days=30)\n\tcontent_type_id = ContentType.objects.get(app_label='blog', model='article').id\n\treturn {\n\t\t\"articles\": Article.objects.published().annotate(count=Count('comments', filter=Q(comments__posted__gte=last_month) and Q(comments__content_type_id=content_type_id))).order_by('-count', '-publish')[:5],\n\t\t\"title\": \"مقالات داغ ماه\"\n\t}\n\n\n@register.inclusion_tag(\"registration/partials/link.html\")\ndef link(request, link_name, content, classes):\n\treturn {\n\t\t\"request\": request,\n\t\t\"link_name\": link_name,\n\t\t\"link\": \"account:{}\".format(link_name),\n\t\t\"content\": content,\n\t\t\"classes\": classes,\n\t}","sub_path":"blog/templatetags/base_tags.py","file_name":"base_tags.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"95980618","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @param sum, an integer\n # @return a list of lists of integers\n def pathSum(self, root, sum):\n res = []\n cur_path = []\n cur_sum = 0\n self.path_sum(root, cur_sum, sum, cur_path, res)\n return res\n \n def path_sum(self, node, cur_sum, sum, cur_path, res):\n if node is None:\n return\n if node.left is None and node.right is None:\n if cur_sum + node.val == sum:\n new_res = list(cur_path)\n new_res.append(node.val)\n res.append(new_res)\n else:\n cur_path.append(node.val)\n self.path_sum(node.left, cur_sum + node.val, sum, cur_path, res)\n self.path_sum(node.right, cur_sum + node.val, sum, cur_path, res)\n del cur_path[-1] \n","sub_path":"src/main/python/lc/path_sum_2.py","file_name":"path_sum_2.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"488056241","text":"from django.shortcuts import render\nfrom django.shortcuts import HttpResponse,HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.db import connection\nfrom django.views.generic import View\nfrom django_redis import get_redis_connection\nimport redis\n#登录\nclass Login(View):\n\n def get(self,request):\n return render(request, 'login.html')\n\n def post(self,request):\n user = request.POST.get('username', None)\n pwd = request.POST.get('password', None)\n if user == 'admin' and pwd == '1111':\n return HttpResponse('登录成功')\n else:\n msg = '用户名密码错误'\n return render(request, 'login.html', {'masg': msg})\n\n\n#index\nclass Index(View):\n\n def get(self,request):\n # 链接 redis\n # conn = redis.Redis(host='127.0.0.1',port=6379,password='',decode_responses=True,db=1)\n conn = get_redis_connection('default')\n #链接 mysql\n cur = connection.cursor()\n sql1 = (\n '''\n select * from school_detail where school_id >50000 ORDER BY class_count desc\n '''\n )\n cur.execute(sql1)\n row1 = cur.fetchall()\n cur.close\n\n # 学校类型分布\n school_data1 = []\n school_data2 = []\n school_data3 = []\n school_data4 = []\n school_data5 = []\n\n for i in row1:\n if i[4] == 1:\n school_data1.append(i[4])\n elif i[4] == 2:\n school_data2.append(i[4])\n elif i[4] == 3:\n school_data3.append(i[4])\n elif i[4] == 4:\n school_data4.append(i[4])\n else:\n school_data5.append(i[4])\n\n if conn.exists(\"xdata\", \"ydata\") == False:\n cur = connection.cursor()\n sql2 = (\n '''\n SELECT DATE_FORMAT(c_time,'%Y-%m'),count(school_id) from school_detail where school_id >50000 \n and school_type in (3,4) GROUP BY DATE_FORMAT(c_time,'%Y-%m')\n '''\n )\n cur.execute(sql2)\n row2 = cur.fetchall()\n cur.close\n # 开通学校趋势图\n xdata = []\n ydata = []\n for i in row2:\n xdata.append(i[0])\n ydata.append(i[1])\n\n for x in xdata:\n conn.rpush(\"xdata\",x)\n # 设置缓存时间\n conn.expire(\"xdata\", 60*60*24)\n\n for y in ydata :\n conn.rpush(\"ydata\",y)\n # 设置缓存时间\n conn.expire(\"ydata\", 60*60*24)\n x_data=conn.lrange(\"xdata\",0,-1)\n y_data=conn.lrange(\"ydata\",0,-1)\n\n conext = {\n 'row1': row1,\n 'school_data1':len(school_data1),\n 'school_data2':len(school_data2),\n 'school_data3':len(school_data3),\n 'school_data4':len(school_data4),\n 'school_data5':len(school_data5),\n 'xdata':x_data,\n 'ydata':y_data\n }\n return render(request, 'index.html',conext)\n","sub_path":"django_object/cmdb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"574684010","text":"# TOSHIBA - TSDV\n# Team: PHOcr\n# Author: Phung Dinh Tai\n# Email: tai.phungdinh@toshiba-tsdv.com\n# Date create: 16/09/2019\n# Description: This script implement a class which help parsing execution time data\n# profiling from automation test system\nfrom baseapi.file_access import read_json\nfrom baseapi.log_manager import Logger\nfrom manager.lib_manager.nodes_manager_updater import NodesManagerUpdater\nfrom manager.lib_manager.workers_manager_updater import WorkersManagerUpdater\n\n\nclass ExecutionTimeDataParser(object):\n\n # Key to extract total time of all processes\n TOTAL_TIME_KEY = \"Total time\"\n\n # Key to extract time of processes on main thread\n MAIN_PROCESSES_TIME_KEY = [\n NodesManagerUpdater.EXECUTION_TIME_MANAGEMENT_KEY,\n WorkersManagerUpdater.EXECUTION_TIME_MANAGEMENT_KEY\n ]\n\n # Key to extract execution time of processes on threads which execute ssh connection to\n # remote workers\n PROCESSES_ON_THREAD_TIME_KEY = [\n NodesManagerUpdater.EXECUTION_TIME_NODE_THREADS_KEY,\n WorkersManagerUpdater.EXECUTION_TIME_WORKERS_KEY\n ]\n\n # Key to extract execution time of build stage will include this sub-string\n BUILD_STAGE_KEY_INCLUDE = \"BUILD \"\n\n # Key to extract execution time of test stage will include this sub-string\n TEST_STAGE_KEY_INCLUDE = \"TEST \"\n\n def __init__(self, input_file=None, name=None):\n \"\"\"\n Constructor for the class.\n\n Parameters\n ----------\n input_file: str\n Path to json data file which need to parse.\n\n \"\"\"\n # Some data objects which are useful to be used\n self._data = None\n self._main_processes_time_data = None\n self._processes_on_threads_data = None\n # Set the name to identify a parser in a list\n self.name = name\n # Set up path to input json file and extract data objects\n self.input_file = input_file\n\n @property\n def name(self):\n \"\"\"\n Get name of the data parser object\n\n Returns\n -------\n str\n Name of the parser\n\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"\n Set a name for the parser for identification\n\n Parameters\n ----------\n name: str\n Name of the data parser\n\n Returns\n -------\n None\n\n \"\"\"\n self._name = name\n\n @property\n def input_file(self):\n \"\"\"\n Getter for path to json data file\n\n Returns\n -------\n str\n Path to json data file\n\n \"\"\"\n return self._input_file\n\n @input_file.setter\n def input_file(self, input_file):\n \"\"\"\n Setter for path to json data file\n\n Parameters\n ----------\n input_file: str\n Path to json data file\n\n Returns\n -------\n None\n\n \"\"\"\n self._input_file = input_file\n # Try to extract data from json data file into data object\n if self._input_file:\n # Read data into json object\n self.data = read_json(self._input_file)\n\n @property\n def data(self):\n \"\"\"\n Getter for json data object\n\n Returns\n -------\n dict/list\n Json data object\n\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, json_object):\n \"\"\"\n Setter for json object file. In this setter, we also extract some data object which will\n be used later.\n\n Parameters\n ----------\n json_object: dict/list\n Data object which need to be processed\n\n Returns\n -------\n None\n\n \"\"\"\n self._data = json_object\n # Extract execution time data for processes on the main thread\n main_processes_time_key = list(key for key in self.MAIN_PROCESSES_TIME_KEY if key in\n self._data)[0]\n self.main_processes_time_data = self._data[main_processes_time_key]\n # Extract execution time data for processes on a thread\n processes_on_thread_key = list(key for key in self.PROCESSES_ON_THREAD_TIME_KEY if key in\n self._data)[0]\n self.processes_on_thread_time_data = self._data[processes_on_thread_key]\n\n def total_time(self):\n \"\"\"\n Get execution time of total process\n\n Returns\n -------\n float\n Total time of all processes\n\n \"\"\"\n return self.data[self.TOTAL_TIME_KEY]\n\n @property\n def main_processes_time_data(self):\n \"\"\"\n Getter for execution time data of processes on the main thread\n\n Returns\n -------\n list\n List of data object. Currently, one dictionary for execution time data for processes\n and one for total time of processes.\n\n \"\"\"\n return self._main_processes_time_data\n\n @main_processes_time_data.setter\n def main_processes_time_data(self, execution_time_data):\n \"\"\"\n Setter for execution time data of processes on the main thread.\n\n Parameters\n ----------\n execution_time_data: list\n Execution time data of processes on the main thread. This should include two objects,\n one for execution time data of processes and one for total time of all processes.\n\n Returns\n -------\n None\n\n \"\"\"\n self._main_processes_time_data = execution_time_data\n\n @property\n def processes_on_thread_time_data(self):\n \"\"\"\n Getter for execution time data of processes which are executed on threads.\n\n Returns\n -------\n dict\n Key in this case is name of the worker and value is detail execution on thread which\n control worker over ssh connection.\n\n \"\"\"\n return self._processes_on_threads_data\n\n @processes_on_thread_time_data.setter\n def processes_on_thread_time_data(self, execution_time_data):\n \"\"\"\n Setter for execution time data on the threads which control workers over ssh connection\n\n Parameters\n ----------\n execution_time_data: dict\n Key is the name of worker. Value is a data object includes execution time of\n processes on the thread.\n\n Returns\n -------\n None\n\n \"\"\"\n self._processes_on_threads_data = execution_time_data\n\n def get_stages_list(self):\n \"\"\"\n Get list name of stages on the main thread. Currently, execution time data for the main\n processes is constructed like this:\n [\n {\n \"stage_name\": {\n \"Steps\": [\n {\n \"step_name_1\": \n },\n {\n \"step_name_2\": \n }\n ],\n \"Total time\": \n },\n \"stage_name_2\": {...},\n \"step_name\": ,\n ...\n },\n {\n \"Total time\": \n }\n ]\n Then a stage block data has value type is a dictionary. For this case, list of stages\n should be [\"stage_name\", \"stage_name_2\"]\n\n Returns\n -------\n list\n List name of stages on the main thread\n\n \"\"\"\n stages = list()\n for obj in self._main_processes_time_data:\n for key in obj:\n if type(obj[key]) is dict:\n # Execution data for a stage is a dictionary, not just a value\n stages.append(key)\n return stages\n\n def get_stage_time_data(self, stage_name):\n \"\"\"\n Get data object of execution time of a processes on a stage\n\n Parameters\n ----------\n stage_name: str\n Name of stage to extract data\n\n Returns\n -------\n dict\n Data object of execution time of processes on a stage. None is return if stage can\n not be found.\n\n \"\"\"\n for obj in self._main_processes_time_data:\n if stage_name in obj:\n return obj[stage_name]\n\n def get_stage_total_time(self, stage_name):\n \"\"\"\n Get total execution time on a stage\n\n Parameters\n ----------\n stage_name: str\n Name of stage to extract data\n\n Returns\n -------\n float\n Total execution time of stage\n\n \"\"\"\n return self.get_stage_time_data(stage_name)[self.TOTAL_TIME_KEY]\n\n def get_steps_list(self):\n \"\"\"\n Get list name of steps on the main thread. Currently, on the main thread, maybe there are\n some steps which has the same level with stages such like this:\n [\n {\n \"stage_name\": {\n \n }\n },\n {\n \"step_name_1\": \n },\n {\n \"step_name_2\": \n },\n {\n \"Total time\": \n }\n ]\n Then a step data example has key different from \"Total time\" and has type float to\n distinguish with stage data. For the example, list name of steps should be [\n \"step_name_1\", \"step_name_2\"]\n\n Returns\n -------\n list\n List name of steps on the main thread\n\n \"\"\"\n main_steps = list()\n for obj in self._main_processes_time_data:\n for key in obj:\n if key != self.TOTAL_TIME_KEY and self.is_number(obj[key]):\n # A step execution data has key not total time and has type float, not object\n main_steps.append(key)\n return main_steps\n\n def get_step_execution_time(self, step_name):\n \"\"\"\n Get execution time of a step on the main thread\n\n Parameters\n ----------\n step_name: str\n Name of step to extract execution time data\n\n Returns\n -------\n float\n Execution time of a step on the main thread. Return 0 if step not found.\n\n \"\"\"\n for obj in self._main_processes_time_data:\n if step_name in obj:\n return obj[step_name]\n # Return 0 if step doesn't exist on the main thread data\n return 0\n\n def get_steps_list_on_stage(self, stage_name):\n \"\"\"\n Get list name of steps on a stage on main thread. Currently, the following is structure\n of data for a stage:\n {\n \"Steps\": [\n {\n \"step_name_1\": \n },\n {\n \"step_name_2\": \n },\n ...\n ],\n \"Total time\": \n }\n Then for the above structure, list name of steps on the stage is [\"step_name_1\",\n \"step_name_2\"]\n\n Parameters\n ----------\n stage_name: str\n Name of stage to extract data\n\n Returns\n -------\n list\n List name of steps on stage processes on main thread\n\n \"\"\"\n # Get execution time data for steps of the stage\n steps_on_stage_time_data = self.get_stage_time_data(stage_name)[Logger.STEP_DATA_KEY]\n # Collect name of steps on the stage\n steps = list()\n for obj in steps_on_stage_time_data:\n for key in obj:\n if key != self.TOTAL_TIME_KEY and self.is_number(obj[key]):\n # Data value of a step should be float type\n steps.append(key)\n return steps\n\n def get_step_execution_time_on_stage(self, stage_name, step_name):\n \"\"\"\n Get execution time of a step on a stage on the main thread\n\n Parameters\n ----------\n stage_name: str\n Name of stage to extract data\n step_name: str\n Name of step to extract data\n\n Returns\n -------\n float\n Execution time of step on the stage. Return 0 if step can be found.\n\n \"\"\"\n # Get execution time data for steps on the stage\n steps_on_stage_time_data = self.get_stage_time_data(stage_name)[Logger.STEP_DATA_KEY]\n for obj in steps_on_stage_time_data:\n if step_name in obj:\n # Step found\n return obj[step_name]\n # Return 0 if step can not be found\n return 0\n\n def get_threads_list(self):\n \"\"\"\n Get list name of threads/workers. Currently, structure of execution time data for threads\n look like this:\n {\n \"thread_1\": {...},\n \"thread_2\": {...},\n ...\n }\n For this example, list name of threads is [\"thread_1\", \"thread_2\"]\n\n Returns\n -------\n list\n List name threads/workers\n\n \"\"\"\n return sorted(self.processes_on_thread_time_data.keys())\n\n def get_total_execution_time_on_thread(self, thread_name):\n \"\"\"\n Get total execution time on a thread\n\n Parameters\n ----------\n thread_name: str\n Name of thread to extract data\n\n Returns\n -------\n float\n Total execution time on a thread. If total time key can not be found, then return 0\n\n \"\"\"\n for obj in self._processes_on_threads_data[thread_name]:\n for key in obj:\n if key == self.TOTAL_TIME_KEY:\n return obj[key]\n return 0\n\n def get_thread_execution_time_data(self, thread_name):\n \"\"\"\n Get execution time data on a thread control. Thread is using to control a worker over ssh\n connection. Currently, processes on a thread are separated into steps only. There is no\n stage to be presented.\n\n Parameters\n ----------\n thread_name: str\n Name of thread/worker\n\n Returns\n -------\n list\n List of execution time of steps on the thread\n\n \"\"\"\n return self._processes_on_threads_data[thread_name]\n\n def get_steps_list_on_thread(self, thread_name):\n \"\"\"\n Get list name of steps of processes on a thread. Currently, this is the structure of data\n for threads:\n {\n \"thread_name_1\": [\n {\n \"step_name_1\": \n },\n {\n \"step_name_2\": \n },\n ...\n ],\n ...\n }\n So, for this case, list name of steps of thread_name_1 is [\"step_name_1\", \"step_name_2\"]\n\n Parameters\n ----------\n thread_name: str\n Name of the thread/worker to extract data\n\n Returns\n -------\n list\n List name of steps on the thread\n\n \"\"\"\n # Execution time data of processes on thread which control worker over ssh connection\n execution_time_data_on_thread = self._processes_on_threads_data[thread_name]\n # Collect list name of steps\n steps = list()\n for obj in execution_time_data_on_thread:\n for key in obj:\n if key != self.TOTAL_TIME_KEY and self.is_number(obj[key]):\n # Step data value should be a float value\n steps.append(key)\n return steps\n\n def get_step_execution_time_on_thread(self, thread_name, step_name):\n \"\"\"\n Get execution time of a step on a thread.\n\n Parameters\n ----------\n thread_name: str\n Name of the thread/worker which need to be extracted data\n step_name: str\n Name of step to extract data\n\n Returns\n -------\n float\n Execution data of the step on thread\n\n \"\"\"\n # Get execution time of processes on the thread\n execution_time_data_on_thread = self._processes_on_threads_data[thread_name]\n # Extract execution time data of the step\n for obj in execution_time_data_on_thread:\n if step_name in obj:\n return obj[step_name]\n # Return 0 if step can not be found\n return 0\n\n @staticmethod\n def is_number(input_value):\n \"\"\"\n Check if an input value is number or not. A number should has type int or float\n\n Parameters\n ----------\n input_value: int/float\n Value need to be checked\n\n Returns\n -------\n bool\n True if input value is number which has type int or float. Otherwise, return False.\n\n \"\"\"\n if isinstance(input_value, int) or isinstance(input_value, float):\n return True\n return False\n","sub_path":"Run_PHocr_test/Mekong/utilities/report/lib_system_performance/execution_time_data_parser.py","file_name":"execution_time_data_parser.py","file_ext":"py","file_size_in_byte":16998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"562525895","text":"# -*- coding: utf8 -*-\nimport MySQLdb\nimport json\nimport collections\nfrom telnetlib import theNULL\nimport csv\n# MySQL database username\nDB_USER='yhuang';\n\n# MySQL database password\nDB_PASSWORD='perlin1997';\n\n# MySQL hostname\nDB_HOST='twmodedb.cw7dmbpk8io1.us-east-1.rds.amazonaws.com';\nDB_dbname='marketdata'\nmydb = MySQLdb.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWORD, db=DB_dbname,charset='utf8')\ncursor = mydb.cursor()\n\n## {{{ http://code.activestate.com/recipes/410692/ (r8)\n# This class provides the functionality we want. You only need to look at\n# this if you want to know how this works. It only needs to be defined\n# once, no need to muck around with its internals.\nclass switch(object):\n def __init__(self, value):\n self.value = value\n self.fall = False\n \n def __iter__(self):\n \"\"\"Return the match method once, then stop\"\"\"\n yield self.match\n raise StopIteration\n \n def match(self, *args):\n \"\"\"Indicate whether or not to enter a case suite\"\"\"\n if self.fall or not args:\n return True\n elif self.value in args: # changed for v1.5, see below\n self.fall = True\n return True\n else:\n return False\n\ndef gentaifex(querydate=None):\n if querydate is None:\n sqlquery=\"call genTaifex(null);\"\n else :\n sqlquery=\"call genTaifex(\\'%s\\');\" % (querydate) \n cursor.execute(sqlquery.decode(\"utf-8\"))\n rowarrays_file = 'taifex.tw.csv'\n rows = cursor.fetchall()\n # Convert query to row arrays\n rowarray_list = []\n f = open(data_directory+rowarrays_file,'wb')\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow([i[0] for i in cursor.description])\n for row in rows:\n rowarray_list = []\n rowarray_list.append(str(row[0]))\n rowarray_list.append(str(row[1]))\n rowarray_list.append(str(row[2]))\n rowarray_list.append(float(row[3]))\n rowarray_list.append(float(row[4]))\n rowarray_list.append(float(row[5]))\n rowarray_list.append(float(row[6]))\n wr.writerow(rowarray_list)\n #j = json.dumps(rowarray_list)\n \n f.close()\n cursor.nextset()\n\n\ndef genUnSettle(Institute='外資及陸資',querydate=None):\n#外資及陸資 future_unsettle_foreign\n#自營商 future_unsettle_brokerage\n#SKType='0' '交易人' future_unsettle_0\n#SKType='1' '特定法人' future_unsettle_1\n#SKType='2' '非特定法人' future_unsettle_2\n#future_unsettle\n if querydate is None:\n sqlquery=\"call genUnSettle (\\'%s\\');\" % (Institute)\n else :\n sqlquery=\"call genUnSettle(\\'%s\\',\\'%s\\');\" % (Institute,querydate)\n cursor.execute(\"SET NAMES utf8;\")\n cursor.execute(sqlquery.decode(\"utf-8\"))\n for case in switch(Institute):\n if case('外資及陸資'):\n rowarrays_file = 'future_unsettle_foreign.tw.csv'\n break\n if case('自營商'):\n rowarrays_file = 'future_unsettle_brokerage.tw.csv'\n break\n if case('交易人'):\n rowarrays_file = 'future_unsettle_0.tw.csv'\n break\n if case('特定法人'):\n rowarrays_file = 'future_unsettle_1.tw.csv'\n break\n if case('非特定法人'): \n rowarrays_file = 'future_unsettle_2.tw.csv'\n break\n rows = cursor.fetchall()\n rowarray_list = []\n f = open(data_directory+rowarrays_file,'wb')\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow([i[0] for i in cursor.description])\n for row in rows:\n rowarray_list = []\n rowarray_list.append(row[0].encode('utf8'))\n rowarray_list.append(str(row[1]))\n rowarray_list.append(float(row[2]))\n rowarray_list.append(float(row[3]))\n wr.writerow(rowarray_list)\n #j = json.dumps(rowarray_list)\n cursor.nextset()\n\ndef genbuysell(SKName='臺股期貨',Institute='外資及陸資',querydate=None):\n#外資及陸資 future_SKID_foreign\n#自營商 future_SKID_brokerage\n#SKType='0' '交易人' future_SKID_0\n#SKType='1' '特定法人' future_SKID_1\n#SKType='2' '非特定法人' future_SKID_2\n if (SKName=='臺股期貨') :\n for case in switch(Institute):\n if case('外資及陸資'):\n rowarrays_file = 'future_tx_foreign.tw.csv'\n break\n if case('自營商'):\n rowarrays_file = 'future_tx_brokerage.tw.csv'\n break\n if case('交易人'):\n rowarrays_file = 'future_tx_0.tw.csv'\n break\n if case('特定法人'):\n rowarrays_file = 'future_tx_1.tw.csv'\n break\n if case('非特定法人'): \n rowarrays_file = 'future_tx_2.tw.csv'\n break\n else :\n for case in switch(Institute):\n if case('外資及陸資'):\n rowarrays_file = 'future_%s_foreign.tw.csv' % SKName[5:]\n break\n if case('自營商'):\n rowarrays_file = 'future_%s_brokerage.tw.csv' % SKName[5:]\n break \n if case('投信'):\n rowarrays_file = 'future_%s_fund.tw.csv' % SKName[5:]\n break\n if case('交易人'):\n rowarrays_file = 'future_%s_0.tw.csv' % SKName[5:]\n break\n if case('特定法人'):\n rowarrays_file = 'future_%s_1.tw.csv' % SKName[5:]\n break\n if case('非特定法人'): \n rowarrays_file = 'future_%s_2.tw.csv' % SKName[5:]\n break\n\n if querydate is None:\n sqlquery=\"call genbuysell (\\'%s\\',\\'%s\\');\" % (SKName,Institute)\n else :\n sqlquery=\"call genbuysell (\\'%s\\',\\'%s\\',\\'%s\\');\" % (SKName,Institute,querydate)\n cursor.execute(\"SET NAMES utf8;\")\n cursor.execute(sqlquery)\n rows = cursor.fetchall()\n rowarray_list = []\n f = open(data_directory+rowarrays_file,'wb')\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow([i[0] for i in cursor.description])\n for row in rows:\n rowarray_list = []\n rowarray_list.append(row[0].encode('utf8'))\n rowarray_list.append(str(row[1]))\n rowarray_list.append(float(row[2]))\n rowarray_list.append(float(row[3]))\n wr.writerow(rowarray_list)\n\n cursor.nextset()\n \ndef genTSEbuysell(Institute='外資及陸資',querydate=None):\n#外資及陸資 future_SKID_foreign\n#自營商 future_SKID_brokerage\n#SKType='0' '交易人' future_SKID_0\n#SKType='1' '特定法人' future_SKID_1\n#SKType='2' '非特定法人' future_SKID_2\n for case in switch(Institute):\n if case('外資及陸資'):\n rowarrays_file = 'future_foreign.tw.csv' \n break\n if case('自營商'):\n rowarrays_file = 'future_brokerage.tw.csv'\n break\n if case('投信'):\n rowarrays_file = 'future_fund.tw.csv'\n break\n if case('合計'):\n rowarrays_file = 'future_total.tw.csv' \n break\n if case('融資'): # default \n rowarrays_file = 'future_margin_buy.tw.csv' \n break\n if case('融券'): # default \n rowarrays_file = 'future_margin_sell.tw.csv' \n break\n if querydate is None:\n sqlquery=\"call genTSEbuysell (\\'%s\\');\" % (Institute)\n else :\n sqlquery=\"call genfuture_buysell(\\'%s\\',\\'%s\\');\" % (Institute,querydate)\n cursor.execute(\"SET NAMES utf8;\")\n cursor.execute(sqlquery)\n rows = cursor.fetchall()\n rowarray_list = []\n f = open(data_directory+rowarrays_file,'wb')\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow([i[0] for i in cursor.description])\n for row in rows:\n rowarray_list = []\n rowarray_list.append(row[0].encode('utf8'))\n rowarray_list.append(str(row[1]))\n rowarray_list.append(float(row[2]))\n rowarray_list.append(float(row[3]))\n wr.writerow(rowarray_list)\n\n cursor.nextset()\n \ndef genfuture_buysell(querydate=None):\n rowarrays_file = 'future_buysell.tw.csv' \n if querydate is None:\n sqlquery=\"call genfuture_buysell(null);\"\n else :\n sqlquery=\"call genfuture_buysell(\\'%s\\');\" % (querydate)\n cursor.execute(\"SET NAMES utf8;\")\n cursor.execute(sqlquery)\n rows = cursor.fetchall()\n rowarray_list = []\n f = open(data_directory+rowarrays_file,'wb')\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow([i[0] for i in cursor.description])\n for row in rows:\n rowarray_list = []\n rowarray_list.append(row[0].encode('utf8'))\n rowarray_list.append(str(row[1]))\n rowarray_list.append(row[2].encode('utf8'))\n rowarray_list.append(row[3].encode('utf8'))\n rowarray_list.append(float(row[4]))\n wr.writerow(rowarray_list)\n cursor.nextset()\n \ndef genfuture_UnSettle(querydate=None):\n rowarrays_file = 'future_UnSettle.tw.csv'\n if querydate is None:\n sqlquery=\"call genfuture_UnSettle(null);\"\n else :\n sqlquery=\"call genfuture_UnSettle(\\'%s\\');\" % (querydate)\n cursor.execute(\"SET NAMES utf8;\")\n cursor.execute(sqlquery)\n rows = cursor.fetchall()\n rowarray_list = []\n f = open(data_directory+rowarrays_file,'wb')\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow([i[0] for i in cursor.description])\n for row in rows:\n rowarray_list = []\n rowarray_list.append(row[0].encode('utf8'))\n rowarray_list.append(str(row[1]))\n rowarray_list.append(row[2].encode('utf8'))\n rowarray_list.append(float(row[3]))\n rowarray_list.append(float(row[4]))\n wr.writerow(rowarray_list)\n cursor.nextset()\n\ndef genTSE_buysell(querydate=None):\n rowarrays_file = 'TSE_buysell.tw.csv' \n if querydate is None:\n sqlquery=\"call genTSE_buysell(null);\"\n else :\n sqlquery=\"call genTSE_buysell(\\'%s\\');\" % (querydate)\n cursor.execute(\"SET NAMES utf8;\")\n cursor.execute(sqlquery)\n rows = cursor.fetchall()\n rowarray_list = []\n f = open(data_directory+rowarrays_file,'wb')\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow([i[0] for i in cursor.description])\n for row in rows:\n rowarray_list = []\n rowarray_list.append(row[0].encode('utf8'))\n rowarray_list.append(str(row[1]))\n rowarray_list.append(row[2].encode('utf8'))\n rowarray_list.append(float(row[3]))\n wr.writerow(rowarray_list)\n cursor.nextset()\n \ndef genALL_buysell(querydate=None):\n rowarrays_file = 'ALL_buysell.tw.csv' \n if querydate is None:\n sqlquery=\"call genALL_buysell(null);\"\n else :\n sqlquery=\"call genALL_buysell(\\'%s\\');\" % (querydate)\n cursor.execute(\"SET NAMES utf8;\")\n cursor.execute(sqlquery)\n rows = cursor.fetchall()\n rowarray_list = []\n f = open(data_directory+rowarrays_file,'wb')\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow([i[0] for i in cursor.description])\n for row in rows:\n rowarray_list = []\n rowarray_list.append(row[0].encode('utf8'))\n rowarray_list.append(str(row[1]))\n rowarray_list.append(row[2].encode('utf8'))\n rowarray_list.append(row[3].encode('utf8'))\n rowarray_list.append(float(row[4]))\n wr.writerow(rowarray_list)\n cursor.nextset()\ndef genTSE_Lending(querydate=None):\n rowarrays_file = 'tse_lending.tw.csv' \n if querydate is None:\n sqlquery=\"call genTSE_Lending(null);\"\n else :\n sqlquery=\"call genTSE_Lending(\\'%s\\');\" % (querydate)\n cursor.execute(\"SET NAMES utf8;\") \n cursor.execute(sqlquery)\n rows = cursor.fetchall()\n rowarray_list = []\n f = open(data_directory+rowarrays_file,'wb')\n f.truncate()\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow([i[0] for i in cursor.description])\n for row in rows:\n rowarray_list = []\n rowarray_list.append(str(row[0]))\n rowarray_list.append(float(row[1]))\n rowarray_list.append(float(row[2]))\n rowarray_list.append(float(row[3]))\n rowarray_list.append(float(row[4]))\n wr.writerow(rowarray_list)\n cursor.nextset()\ndata_directory='c:/develop/MoneyDJTV/Data/'\ngentaifex()\n# genUnSettle(Institute='外資及陸資')\n# genUnSettle(Institute='自營商')\n# genUnSettle(Institute='交易人')\n# genUnSettle(Institute='特定法人')\n# genUnSettle(Institute='非特定法人')\n# genbuysell(SKName='臺股期貨',Institute='外資及陸資')\n# genbuysell(SKName='臺股期貨',Institute='自營商')\n# genbuysell(SKName='臺股期貨',Institute='交易人')\n# genbuysell(SKName='臺股期貨',Institute='特定法人')\n# genbuysell(SKName='臺股期貨',Institute='非特定法人')\n# genTSEbuysell(Institute='外資及陸資')\n# genTSEbuysell(Institute='自營商')\n# genTSEbuysell(Institute='合計')\n# genTSEbuysell(Institute='融資')\n# genTSEbuysell(Institute='融券')\ngenfuture_buysell(None)\ngenfuture_UnSettle(None)\ngenTSE_buysell(None)\ngenALL_buysell(None)\ngenTSE_Lending(None)\ncursor.close()","sub_path":"MoneyDJTV/genFuture_csv.py","file_name":"genFuture_csv.py","file_ext":"py","file_size_in_byte":13206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"353441877","text":"list = [12,53,75,24,87,97,34,75,23,54,3,51,36,27]\n# max = list[0]\n#\n# for i in range(1,len(list)):\n# if list[i] > max:\n# max = list[i]\n# print(max)\n\nmin = list[0]\n\nfor i in range(1,len(list)):\n if list[i] < min:\n min = list[i]\nprint(min)\n","sub_path":"al/2013.py","file_name":"2013.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"197377557","text":"import os\nfrom shutil import rmtree\nfrom subprocess import call\nfrom elftools.elf.elffile import ELFFile as elf\n\n# Locate various things\nasm = 'powerpc-eabi-as'\ngcc = 'powerpc-eabi-gcc'\nobjcopy = 'powerpc-eabi-objcopy'\n\n# Initialize variables\nstartHook = 0x8000629C\ndebug = False\nregionlist = ['P', 'E', 'J', 'K']\n\ndef build(isBootStrap):\n # Initialize lists\n asmlist = []\n cpplist = []\n\n if isBootStrap:\n mainpath = 'bootstrap'\n outname = 'Loader'\n print('Building bootstrap...')\n else:\n mainpath = 'src'\n outname = 'HideNSeek'\n print('Building payload...')\n\n # Get all files in the source folder\n for root, subfolder, files in os.walk(mainpath):\n for item in files:\n if item.lower().endswith('.s'):\n filename = os.path.join(root, item)\n asmlist.append(filename)\n elif item.lower().endswith('.c'):\n filename = os.path.join(root, item)\n cpplist.append(filename)\n\n for region in regionlist:\n # Make a clean build folder\n if os.path.isdir('build'):\n rmtree('build')\n os.mkdir('build')\n\n # Initialize GCC command\n cc_command = [gcc, '-Iinclude', '-nostartfiles', '-nostdinc', '-D', 'REGION_{}'.format(region), '-D', 'REGION=\\'{}\\''.format(region), '-Os', '-Wl,-T,{}/mem.ld,-T,rmc.ld,-T,rmc{}.ld'.format(mainpath, region.lower()), '-ffunction-sections', '-fdata-sections', '-fcommon', '-mcpu=750', '-meabi', '-mhard-float']\n\n # Add debug macro if debug is on\n if debug:\n cc_command += ['-D', 'DEBUG']\n\n # Add all cpp files and the destination\n cc_command += cpplist\n cc_command += asmlist\n cc_command += ['-o', 'build/{}{}.o'.format(outname, region)]\n\n # Debug output for testing:\n # print(*cc_command)\n\n # Call GCC to compile everything\n c = call(cc_command)\n if c != 0:\n print('Build failed!')\n return\n\n # Get offset to start function\n if isBootStrap:\n with open('build/{}{}.o'.format(outname,region), 'rb') as f:\n elfData = elf(f)\n symtab = elfData.get_section_by_name('.symtab')\n startFunc = symtab.get_symbol_by_name('start')[0].entry['st_value']\n instruction = (((startFunc-startHook) & 0x3FFFFFF ) | 0x48000000)\n print('New instruction is', hex(instruction))\n\n c = call([objcopy, '-O', 'binary', '-R', '.eh_frame', '-R', '.eh_frame_hdr', 'build/{}{}.o'.format(outname, region), 'bin/{}{}.bin'.format(outname, region)])\n if c != 0:\n print('Build failed!')\n return\n else:\n print('Built', region + '!')\n\n # We're done!\n rmtree('build')\n print('All built!')\n\ndef main():\n # Debug prompt\n global debug\n debug = input('Enable debug mode? (Y/N): ').lower() == 'y'\n\n # Make a clean bin folder\n if os.path.isdir('bin'):\n rmtree('bin')\n os.mkdir('bin')\n\n # Build it!\n build(False)\n build(True)\n\nmain()\n","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"36387650","text":"import re\nfrom malaya.num2word import to_cardinal, to_ordinal\nfrom malaya.word2num import word2num\nfrom malaya.text.tatabahasa import (\n consonants,\n vowels,\n sounds,\n hujung_malaysian,\n calon_dictionary,\n)\nfrom malaya.text.rules import rules_normalizer, rules_compound_normalizer\nfrom malaya.text.function import ENGLISH_WORDS, MALAY_WORDS\n\nignore_words = ['ringgit', 'sen']\nignore_postfix = ['adalah']\nunit_mapping = {\n 'kg': 'kilogram',\n 'g': 'gram',\n 'l': 'liter',\n 'ml': 'milliliter',\n 'c': 'celsius',\n 'km': 'kilometer',\n 'm': 'meter',\n 'cm': 'sentimeter',\n 'kilo': 'kilogram',\n}\n\nrules_compound_normalizer_regex = (\n '(?:' + '|'.join(list(rules_compound_normalizer.keys())) + ')'\n)\n\n\ndef _replace_compoud(string):\n results = re.findall(\n rules_compound_normalizer_regex, string, flags = re.IGNORECASE\n )\n for r in results:\n string = string.replace(r, rules_compound_normalizer[r.lower()])\n return string\n\n\ndef _remove_postfix(word):\n if word in MALAY_WORDS or word in ENGLISH_WORDS or word in rules_normalizer:\n return word, ''\n if word in ignore_postfix:\n return word, ''\n for p in hujung_malaysian:\n if word.endswith(p):\n return word[: -len(p)], ' lah'\n return word, ''\n\n\ndef _normalize_title(word):\n if word[0].isupper():\n return calon_dictionary.get(word.lower(), word)\n return word\n\n\ndef _normalize_money(\n word, currency = {'dollar': '$', 'ringgit': 'RM', 'pound': '£', 'euro': '€'}\n):\n splitted = word.split()\n if splitted[-1] in ['dollar', 'ringgit', 'pound', 'euro']:\n v = word2num(' '.join(splitted[:-1]))\n return currency[splitted[-1]] + str(v)\n else:\n return word\n\n\ndef _is_number_regex(s):\n if re.match('^\\d+?\\.\\d+?$', s) is None:\n return s.isdigit()\n return True\n\n\ndef _string_to_num(word):\n if '.' in word:\n return float(word)\n else:\n return int(word)\n\n\ndef cardinal(x):\n cp_x = x[:]\n try:\n if re.match('.*[A-Za-z]+.*', x):\n return x\n x = re.sub(',', '', x, count = 10)\n\n if re.match('.+\\..*', x):\n x = to_cardinal(float(x))\n elif re.match('\\..*', x):\n x = to_cardinal(float(x))\n else:\n x = to_cardinal(int(x))\n x = re.sub('-', ' ', x, count = 10)\n return x\n except:\n return cp_x\n\n\ndef split_currency(x):\n results = []\n for no, u in enumerate(x.split('.')):\n if no and len(u) == 1:\n u = u + '0'\n results.append(cardinal(u))\n return results\n\n\ndef digit(x):\n cp_x = x[:]\n try:\n x = re.sub('[^0-9]', '', x)\n result_string = ''\n for i in x:\n result_string = result_string + cardinal(i) + ' '\n result_string = result_string.strip()\n return result_string\n except:\n return cp_x\n\n\ndef digit_unit(x):\n cp_x = x[:]\n try:\n n = re.sub('[^0-9.]', '', x)\n u = re.sub('[0-9. ]', '', x)\n u = unit_mapping.get(u.lower(), u)\n if '.' in n:\n n = float(n)\n else:\n n = int(n)\n n = to_cardinal(n)\n return f'{n} {u}'\n except Exception as e:\n return cp_x\n\n\ndef letters(x):\n cp_x = x[:]\n try:\n x = re.sub('[^a-zA-Z]', '', x)\n x = x.lower()\n result_string = ''\n for i in range(len(x)):\n result_string = result_string + x[i] + ' '\n return result_string.strip()\n except:\n return cp_x\n\n\ndef rom_to_int(string):\n\n table = [\n ['M', 1000],\n ['CM', 900],\n ['D', 500],\n ['CD', 400],\n ['C', 100],\n ['XC', 90],\n ['L', 50],\n ['XL', 40],\n ['X', 10],\n ['IX', 9],\n ['V', 5],\n ['IV', 4],\n ['I', 1],\n ]\n returnint = 0\n for pair in table:\n\n continueyes = True\n\n while continueyes:\n if len(string) >= len(pair[0]):\n\n if string[0 : len(pair[0])] == pair[0]:\n returnint += pair[1]\n string = string[len(pair[0]) :]\n\n else:\n continueyes = False\n else:\n continueyes = False\n\n return returnint\n\n\ndef ordinal(x):\n cp_x = x[:]\n try:\n result_string = ''\n x = x.replace(',', '')\n x = x.replace('[\\.]$', '')\n if re.match('^[0-9]+$', x):\n x = to_ordinal(int(x))\n return x\n if re.match('.*(V|X|I|L|D)', x):\n x = x.replace('-', '')\n if re.match('^ke.*', x):\n x = x[2:]\n x = rom_to_int(x)\n result_string = to_ordinal(x)\n else:\n x = rom_to_int(x)\n result_string = to_ordinal(x)\n result_string = 'yang ' + result_string\n elif re.match('^ke.*', x):\n x = x.replace('-', '')\n x = x[2:]\n result_string = to_ordinal(int(x))\n else:\n result_string = to_ordinal(int(x))\n return result_string\n except Exception as e:\n return cp_x\n\n\ndef telephone(x):\n try:\n result_string = ''\n for i in range(0, len(x)):\n if re.match('[0-9]+', x[i]):\n result_string = result_string + cardinal(x[i]) + ' '\n else:\n result_string = result_string + 'sil '\n return result_string.strip()\n except:\n return x\n\n\ndef electronic(x):\n try:\n replacement = {\n '.': 'dot',\n ':': 'colon',\n '/': 'slash',\n '-': 'dash',\n '#': 'hash tag',\n }\n result_string = ''\n if re.match('.*[A-Za-z].*', x):\n for char in x:\n if re.match('[A-Za-z]', char):\n result_string = result_string + letters(char) + ' '\n elif char in replacement:\n result_string = result_string + replacement[char] + ' '\n elif re.match('[0-9]', char):\n if char == 0:\n result_string = result_string + 'o '\n else:\n number = cardinal(char)\n for n in number:\n result_string = result_string + n + ' '\n return result_string.strip()\n else:\n return x\n except:\n return x\n\n\ndef fraction(x):\n try:\n y = x.split('/')\n result_string = ''\n y[0] = cardinal(y[0])\n y[1] = cardinal(y[1])\n return '%s per %s' % (y[0], y[1])\n except:\n return x\n\n\ndef combine_with_cent(\n x, currency = 'RM', currency_end = 'ringgit', cent = 'sen'\n):\n text = split_currency(str(x))\n c = '%s%s' % (currency, str(x))\n if text[0] != 'kosong':\n x = '%s %s' % (text[0], currency_end)\n else:\n x = ''\n if len(text) == 2:\n if text[1] != 'kosong':\n x = '%s %s %s' % (x, text[1], cent)\n return x, c\n\n\ndef money(x):\n try:\n if (\n re.match('^\\$', x)\n or x.lower().endswith('dollar')\n or x.lower().endswith('cent')\n ):\n x = x.lower()\n if not x.startswith('$') and x.endswith('cent'):\n cent = True\n else:\n cent = False\n x = x.replace('$', '').replace('dollar', '').replace('cent', '')\n x = re.sub(r'[ ]+', ' ', x).strip()\n x, n = re.split(\"(\\d+(?:[\\.,']\\d+)?)\", x)[1:]\n x = re.sub(',', '', x, count = 10)\n labels = []\n for c in n:\n if re.match('.*(M|m)$', c):\n labels.append(1e6)\n elif re.match('.*(b|B)$', c):\n labels.append(1e9)\n elif re.match('.*(k|K)$', c):\n labels.append(1e3)\n\n if cent:\n x = float(x)\n x = x / 100\n for l in labels:\n x = float(x)\n x = x * l\n\n x, c = combine_with_cent(\n x, currency = '$', currency_end = 'dollar', cent = 'cent'\n )\n\n return re.sub(r'[ ]+', ' ', x.lower()).strip(), c\n\n elif (\n re.match('^US', x)\n or x.lower().endswith('dollar')\n or x.lower().endswith('cent')\n or x.lower().endswith('usd')\n ):\n x = x.lower()\n if not x.startswith('US') and x.endswith('cent'):\n cent = True\n else:\n cent = False\n x = (\n x.replace('US', '')\n .replace('usd', '')\n .replace('dollar', '')\n .replace('cent', '')\n )\n x = re.sub(r'[ ]+', ' ', x).strip()\n x, n = re.split(\"(\\d+(?:[\\.,']\\d+)?)\", x)[1:]\n x = re.sub(',', '', x, count = 10)\n labels = []\n for c in n:\n if re.match('.*(M|m)$', c):\n labels.append(1e6)\n elif re.match('.*(b|B)$', c):\n labels.append(1e9)\n elif re.match('.*(k|K)$', c):\n labels.append(1e3)\n\n if cent:\n x = float(x)\n x = x / 100\n for l in labels:\n x = float(x)\n x = x * l\n\n x, c = combine_with_cent(\n x, currency = '$', currency_end = 'dollar', cent = 'cent'\n )\n\n return re.sub(r'[ ]+', ' ', x.lower()).strip(), c\n\n elif (\n re.match('^\\£', x)\n or x.lower().endswith('pound')\n or x.lower().endswith('penny')\n ):\n x = x.lower()\n if not x.startswith('£') and x.endswith('penny'):\n cent = True\n else:\n cent = False\n x = x.replace('£', '').replace('pound', '').replace('penny', '')\n x = re.sub(r'[ ]+', ' ', x).strip()\n x, n = re.split(\"(\\d+(?:[\\.,']\\d+)?)\", x)[1:]\n x = re.sub(',', '', x, count = 10)\n labels = []\n for c in n:\n if re.match('.*(M|m)$', c):\n labels.append(1e6)\n elif re.match('.*(b|B)$', c):\n labels.append(1e9)\n elif re.match('.*(k|K)$', c):\n labels.append(1e3)\n\n if cent:\n x = float(x)\n x = x / 100\n for l in labels:\n x = float(x)\n x = x * l\n\n x, c = combine_with_cent(\n x, currency = '£', currency_end = 'pound', cent = 'cent'\n )\n return re.sub(r'[ ]+', ' ', x.lower()).strip(), c\n\n elif (\n re.match('^\\€', x)\n or x.lower().endswith('euro')\n or x.lower().endswith('cent')\n ):\n x = x.lower()\n if not x.startswith('€') and x.endswith('cent'):\n cent = True\n else:\n cent = False\n x = x.replace('€', '').replace('euro', '').replace('cent', '')\n x = re.sub(r'[ ]+', ' ', x).strip()\n x, n = re.split(\"(\\d+(?:[\\.,']\\d+)?)\", x)[1:]\n x = re.sub(',', '', x, count = 10)\n labels = []\n for c in n:\n if re.match('.*(M|m)$', c):\n labels.append(1e6)\n elif re.match('.*(b|B)$', c):\n labels.append(1e9)\n elif re.match('.*(k|K)$', c):\n labels.append(1e3)\n\n x = float(x)\n if cent:\n x = x / 100\n for l in labels:\n x = x * l\n\n x, c = combine_with_cent(\n x, currency = '€', currency_end = 'euro', cent = 'cent'\n )\n return re.sub(r'[ ]+', ' ', x.lower()).strip(), c\n\n elif (\n re.match('^RM', x)\n or re.match('^rm', x)\n or x.lower().endswith('ringgit')\n or x.lower().endswith('sen')\n ):\n x = x.lower()\n if not x.startswith('rm') and x.endswith('sen'):\n cent = True\n else:\n cent = False\n\n x = x.replace('rm', '').replace('ringgit', '').replace('sen', '')\n x = re.sub(r'[ ]+', ' ', x).strip()\n x, n = re.split(\"(\\d+(?:[\\.,']\\d+)?)\", x)[1:]\n x = re.sub(',', '', x, count = 10)\n labels = []\n for c in n:\n if re.match('.*(M|m)$', c):\n labels.append(1e6)\n elif re.match('.*(b|B)$', c):\n labels.append(1e9)\n elif re.match('.*(k|K)$', c):\n labels.append(1e3)\n\n if cent:\n x = float(x)\n x = x / 100\n for l in labels:\n x = float(x)\n x = x * l\n\n x, c = combine_with_cent(x)\n return re.sub(r'[ ]+', ' ', x.lower()).strip(), c\n return x, None\n\n except Exception as e:\n return x, None\n","sub_path":"malaya/text/normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":13132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"605611181","text":"import re\n\ndefault_nonanon_regex = r\"\\bgmbh\\b|\\bklinik| co kg\\b| co\\.|[a-z]+ [a-z]+ ag| cokg\\b|krankenhaus|sagt danke|\\bstadt [a-z]+\"\n\n\nclass AnonFirstLastName:\n def __init__(\n self,\n blacklist,\n firstnames,\n lastnames,\n firstlastnames,\n nonanon_regex=default_nonanon_regex,\n ):\n self.blacklist = blacklist\n self.firstnames = firstnames\n self.lastnames = lastnames\n self.firstlastnames = firstlastnames\n self.nonanon_regex = re.compile(nonanon_regex, flags=re.I)\n self.word_regex = re.compile(\n r\"\\b[^\\W0-9_]{2,}\\b\"\n ) # [^\\W0-9_] effectively means \"any letter\", also Umlaut etc.\n self.replace_dict = {} # maybe speedup with lookup\n\n def __call__(self, text):\n if self.nonanon_regex.search(text):\n return text\n\n names = set(self.word_regex.findall(text.lower()))\n\n if not (\n names & self.firstnames.value\n and names & self.lastnames.value\n and len(names & self.firstlastnames.value) > 1\n ):\n # latter condition is to ensure that not a single word found in both firstnames and lastnames triggers\n # anonymization (e.g. a word that can be both first and last name)\n return text\n\n to_replace = names & self.blacklist.value\n for replace_x in to_replace:\n replace_len = len(replace_x)\n if replace_len not in self.replace_dict:\n self.replace_dict[replace_len] = \"X\" * replace_len\n replace_text = self.replace_dict[replace_len]\n\n text = re.sub(r\"\\b\" + replace_x + r\"\\b\", replace_text, text, flags=re.I)\n return text\n\n\ndef anon_firstlastname_spark(df, blacklist, firstnames, lastnames, *columns_to_anon):\n from pyspark.sql.functions import udf, StringType, col\n\n sc = df._sc\n\n blacklist = set(map(str.lower, blacklist))\n firstnames = set(map(str.lower, firstnames))\n lastnames = set(map(str.lower, lastnames))\n\n blacklist_bc = sc.broadcast(blacklist)\n firstnames_bc = sc.broadcast(firstnames)\n lastnames_bc = sc.broadcast(lastnames)\n firstlastnames_bc = sc.broadcast(firstnames | lastnames)\n\n anonymizer = AnonFirstLastName(\n blacklist_bc, firstnames_bc, lastnames_bc, firstlastnames_bc\n )\n anonymizer_udf = udf(anonymizer.__call__, StringType())\n\n result = df\n for colname in columns_to_anon:\n print(\"Anonymizing column {}\".format(colname))\n result = result.withColumn(\n colname + \"_anonymized\", anonymizer_udf(col(colname))\n ).drop(colname)\n\n return result\n","sub_path":"dstkdev/anonymize_firstlastname.py","file_name":"anonymize_firstlastname.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"149823462","text":"from FindADownload.engine.core import WebSpider, File, GLOBAL_TIMEOUT\nfrom lxml import etree\nimport urlparse\nimport re\n\nHTMLParser = etree.HTMLParser()\n\nclass ZippyShare(WebSpider):\n name = \"ZippyShare\"\n\n def __init__(self, host=\"zippyshare\", search_query=None, filter=\"Everything\", results=None, output_links=False, read_html=True, *args, **kwargs):\n super(ZippyShare, self).__init__(host, search_query, filter, results, output_links, read_html, *args, **kwargs)\n\n def mine_data(self, response):\n \"\"\"\n Generates download by scraping download link expression from javascript,\n then creates a File() object-instance and appends it to self.files.\n :param response: supplied by self.process_data.\n :return: File() instance or None.\n \"\"\"\n tree = etree.parse(response, HTMLParser)\n text_elements = tree.xpath(\"//div[@id='lrbox']//font/text()\")[1:5]\n javascript = tree.xpath('//script[@type=\"text/javascript\"]')\n if len(javascript) >= 5:\n javascript = javascript[5].text\n if javascript:\n download_expression = re.findall('href = (.*?);\\s*$', javascript, re.M)\n if download_expression:\n download_expression = download_expression[0]\n download_expression = download_expression.split(\" + \")\n if len(download_expression) > 1:\n download_expression[1] = \"str\" + download_expression[1]\n download_expression = \" + \".join(download_expression)\n try:\n download_link = urlparse.urljoin(response.geturl(), '/')[:-1] + eval(download_expression)\n except TypeError:\n download_link = None\n if download_link:\n temporary_file = File()\n temporary_file['name'] = text_elements[1]\n temporary_file['size'] = text_elements[3]\n temporary_file['download_link'] = download_link\n temporary_file['origin_link'] = response.geturl()\n return temporary_file\n return None\n","sub_path":"FindADownload/engine/zippyshare.py","file_name":"zippyshare.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"648958770","text":"#\n# Copyright (c) The acados authors.\n#\n# This file is part of acados.\n#\n# The 2-Clause BSD License\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.;\n#\n\n# author: Daniel Kloeser\n\nfrom acados_template import AcadosModel, AcadosOcp, AcadosOcpSolver\nfrom bicycle_model import bicycle_model\nimport scipy.linalg\nimport numpy as np\n\n\ndef acados_settings(Tf, N, track_file):\n # create render arguments\n ocp = AcadosOcp()\n\n # export model\n model, constraint = bicycle_model(track_file)\n\n # define acados ODE\n model_ac = AcadosModel()\n model_ac.f_impl_expr = model.f_impl_expr\n model_ac.f_expl_expr = model.f_expl_expr\n model_ac.x = model.x\n model_ac.xdot = model.xdot\n model_ac.u = model.u\n model_ac.z = model.z\n model_ac.p = model.p\n model_ac.name = model.name\n ocp.model = model_ac\n\n # define constraint\n model_ac.con_h_expr = constraint.expr\n\n # set dimensions\n nx = model.x.size()[0]\n nu = model.u.size()[0]\n ny = nx + nu\n ny_e = nx\n\n ocp.dims.N = N\n ns = 2\n nsh = 2\n\n # set cost\n Q = np.diag([ 1e-1, 1e-8, 1e-8, 1e-8, 1e-3, 5e-3 ])\n\n R = np.eye(nu)\n R[0, 0] = 1e-3\n R[1, 1] = 5e-3\n\n Qe = np.diag([ 5e0, 1e1, 1e-8, 1e-8, 5e-3, 2e-3 ])\n\n ocp.cost.cost_type = \"LINEAR_LS\"\n ocp.cost.cost_type_e = \"LINEAR_LS\"\n unscale = N / Tf\n\n ocp.cost.W = unscale * scipy.linalg.block_diag(Q, R)\n ocp.cost.W_e = Qe / unscale\n\n Vx = np.zeros((ny, nx))\n Vx[:nx, :nx] = np.eye(nx)\n ocp.cost.Vx = Vx\n\n Vu = np.zeros((ny, nu))\n Vu[6, 0] = 1.0\n Vu[7, 1] = 1.0\n ocp.cost.Vu = Vu\n\n Vx_e = np.zeros((ny_e, nx))\n Vx_e[:nx, :nx] = np.eye(nx)\n ocp.cost.Vx_e = Vx_e\n\n ocp.cost.zl = 100 * np.ones((ns,))\n ocp.cost.Zl = 0 * np.ones((ns,))\n ocp.cost.zu = 100 * np.ones((ns,))\n ocp.cost.Zu = 0 * np.ones((ns,))\n\n # set intial references\n ocp.cost.yref = np.array([1, 0, 0, 0, 0, 0, 0, 0])\n ocp.cost.yref_e = np.array([0, 0, 0, 0, 0, 0])\n\n # setting constraints\n ocp.constraints.lbx = np.array([-12])\n ocp.constraints.ubx = np.array([12])\n ocp.constraints.idxbx = np.array([1])\n ocp.constraints.lbu = np.array([model.dthrottle_min, model.ddelta_min])\n ocp.constraints.ubu = np.array([model.dthrottle_max, model.ddelta_max])\n ocp.constraints.idxbu = np.array([0, 1])\n # ocp.constraints.lsbx=np.zero s([1])\n # ocp.constraints.usbx=np.zeros([1])\n # ocp.constraints.idxsbx=np.array([1])\n ocp.constraints.lh = np.array(\n [\n constraint.along_min,\n constraint.alat_min,\n model.n_min,\n model.throttle_min,\n model.delta_min,\n ]\n )\n ocp.constraints.uh = np.array(\n [\n constraint.along_max,\n constraint.alat_max,\n model.n_max,\n model.throttle_max,\n model.delta_max,\n ]\n )\n ocp.constraints.lsh = np.zeros(nsh)\n ocp.constraints.ush = np.zeros(nsh)\n ocp.constraints.idxsh = np.array([0, 2])\n\n # set intial condition\n ocp.constraints.x0 = model.x0\n\n # set QP solver and integration\n ocp.solver_options.tf = Tf\n # ocp.solver_options.qp_solver = 'FULL_CONDENSING_QPOASES'\n ocp.solver_options.qp_solver = \"PARTIAL_CONDENSING_HPIPM\"\n ocp.solver_options.nlp_solver_type = \"SQP_RTI\"\n ocp.solver_options.hessian_approx = \"GAUSS_NEWTON\"\n ocp.solver_options.integrator_type = \"ERK\"\n ocp.solver_options.sim_method_num_stages = 4\n ocp.solver_options.sim_method_num_steps = 3\n\n # ocp.solver_options.qp_solver_tol_stat = 1e-2\n # ocp.solver_options.qp_solver_tol_eq = 1e-2\n # ocp.solver_options.qp_solver_tol_ineq = 1e-2\n # ocp.solver_options.qp_solver_tol_comp = 1e-2\n\n # create solver\n acados_solver = AcadosOcpSolver(ocp, json_file=\"acados_ocp.json\")\n\n return constraint, model, acados_solver\n","sub_path":"examples/acados_python/race_cars/acados_settings.py","file_name":"acados_settings.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"578617577","text":"import operations\nimport random\nimport boto3\nfrom botocore.exceptions import ClientError\n\n\ndef test_max(capsys):\n input = { 'Items': [\n {'gramm': {'N': '95'\n }, '__typename': {'S': 'Weigh'\n }, 'date': {'S': '2021-03-06'\n }, 'userID': {'S': '1'\n }, 'updatedAt': {'S': '2021-03-06T18: 51: 23.540Z'\n }, 'createdAt': {'S': '2021-03-06T18: 51: 23.540Z'\n }, 'id': {'S': 'af594cf0-1a6c-4b9a-af13-66fe41ad2b42'\n }\n },\n {'gramm': {'N': '95'\n }, '__typename': {'S': 'Weigh'\n }, 'date': {'S': '2021-03-06'\n }, 'userID': {'S': '1'\n }, 'updatedAt': {'S': '2021-03-06T18: 51: 21.816Z'\n }, 'createdAt': {'S': '2021-03-06T18: 51: 21.816Z'\n }, 'id': {'S': '65efff19-1d6b-4142-8ff1-184056d46ace'\n }\n },\n {'gramm': {'N': '98'\n }, '__typename': {'S': 'Weigh'\n }, '_lastChangedAt': {'N': '1615052540582'\n }, 'date': {'S': '2021-03-06'\n }, '_version': {'N': '1'\n }, 'userID': {'S': '1'\n }, 'updatedAt': {'S': '2021-03-06T17: 42: 20.561Z'\n }, 'createdAt': {'S': '2021-03-06T17: 42: 20.561Z'\n }, 'id': {'S': 'c502db88-5ab6-430b-8233-3744620d7764'\n }\n }\n ]\n }\n max_weight = operations.max(input)\n # weight = max_weight['gramm']\n weight = max_weight\n assert weight['gramm'] == 98\n\ndef _create_dynamodb_client(region=\"eu-central-1\"):\n return boto3.client(\"dynamodb\", region_name=region)\n\ndef _create_get_item_input_test_put_weight():\n return {\n \"TableName\": \"daily-weight\",\n \"Key\": {\n \"id\": {\"S\":\"2#2021-03-14\"}\n },\n \"ProjectionExpression\": \"#05560\",\n \"ExpressionAttributeNames\": {\"#05560\":\"hectogramm\"}\n }\n\ndef _execute_get_item(dynamodb_client, input):\n try:\n response = dynamodb_client.get_item(**input)\n return response\n # Handle response\n except ClientError as error:\n handle_error(error)\n except BaseException as error:\n print(\"Unknown error while getting item: \" + error.response['Error']['Message']) \n\n\ndef test_put_weight():\n # die Uid \"2\" ist für Test reserviert\n random_weight=random.randint(1, 10000)\n operations.put_weight(\"2\",\"2021-03-14\", random_weight)\n dynamodb_client_test = _create_dynamodb_client(region=\"eu-central-1\")\n get_item_input = _create_get_item_input_test_put_weight()\n response = _execute_get_item(dynamodb_client_test, get_item_input)\n print(response)\n assert response['Item']['hectogramm']['N'] == str(random_weight)\n\n\n\n\n\n","sub_path":"pythonsls/Lektion_07/k96/test_operations.py","file_name":"test_operations.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382930567","text":"import colorhelp as ch\nfrom matplotlib.colors import ColorConverter\n\n\ndef test():\n n = 1\n colors = ch.get_distinct_colors(n)\n assert len(colors) is 1\n n = 10\n colors = ch.get_distinct_colors(n)\n assert len(colors) is 10\n colors = ch.get_arbitrary_n_of_distinct_colors(11)\n assert len(colors) is 11\n colors = ch.get_qualitative_brewer_colors(12)\n assert len(colors) is 12\n colors2 = ch.get_qualitative_brewer_colors(10)\n # same first colors should be returned with 10-12 colors used:\n assert (colors[9] == colors2[-1]).all()\n \n \n cc = ColorConverter()\n for color in colors:\n assert len(cc.to_rgba(color)) is 4\n","sub_path":"plots/test_colorhelp.py","file_name":"test_colorhelp.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"82166482","text":"import json\nimport requests\nimport numpy as np\nfrom matplotlib import pyplot as plt\napi = ''\n\nclass GuialaticoData:\n # returns a lerp of azimuth and elevation in the interval [t+1, t+11]\n def getnext10seconds(self, lat, lon, alt):\n url = 'https://www.n2yo.com/rest/v1/satellite/positions/25544/{}/{}/{}/10/&apiKey={}'.format(lat,lon,alt,'SBRY9Q-LVVSFA-L6KE5G-3ZSV')\n\n\n resp = requests.get(url)\n data = resp.json()\n azimuths = [data['positions'][t]['azimuth'] for t in range(10)]\n elevations = [data['positions'][t]['elevation'] for t in range(10)]\n azimuth = lambda a: np.interp(a, range(10), azimuths)\n elevation = lambda a: np.interp(a, range(10), elevations)\n return (azimuth, elevation)\n\n def show(self):\n x = np.linspace(0, 10, 50)\n data = self.getnext10seconds(0,0,0)\n fig = plt.figure()\n ax1 = fig.add_subplot(211, projection='polar')\n ax2 = fig.add_subplot(212, projection='polar')\n # plt.ylim((0,90))\n plt.subplot(2,1,1)\n plt.title('Uphead')\n plt.ylim(-90.0,0)\n plt.plot(data[0](x), -data[1](x))\n plt.subplot(2,1,2)\n plt.title('Downhead')\n plt.ylim(-90.0,0)\n plt.plot(data[0](x), data[1](x))\n\n plt.show()\n","sub_path":"request-json.py","file_name":"request-json.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"152737997","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport pandas as pd\nimport numpy as np\n\ntrain_filename = os.path.join(\"DATA\", \"challenge_axa_train.csv\")\ntest_filename = os.path.join(\"DATA\", \"challenge_axa_test.csv\")\nall_X_filename = os.path.join(\"DATA\", \"challenge_axa_all_ones_X.pck\")\nall_y_filename = os.path.join(\"DATA\", \"challenge_axa_all_ones_y.pck\")\nall_id_filename = os.path.join(\"DATA\", \"challenge_axa_all_ones_id.pck\")\ndepartement_filename = os.path.join(\"DATA\", \"departement_data.csv\")\n\ntrain_df = pd.read_csv(\n\t\ttrain_filename, \n\t\tsep = \";\",\n\t\tindex_col = 34,\n\t\tna_values = [\"\"]\n\t\t)\n\ntrain_df.index = np.arange(26000, 26000+88225)\n\ntest_df = pd.read_csv(\n\t\ttest_filename, \n\t\tsep = \";\",\n\t\tindex_col = 33,\n\t\tna_values = [\"\"]\n\t\t)\n\n# Concat data\nall_df = pd.concat([train_df, test_df])\n# Concat data\n\n\n# Init\ntmp = []\n\n# age_prospect\n# tmp.append(pd.DataFrame(data=all_df.age_prospect, index=all_df.index))\ntmp.append(pd.get_dummies(all_df.age_prospect, prefix=\"age_prospect\", dummy_na = True))\n\n# departement\n# tmp.append(pd.DataFrame(data=all_df.age_prospect, index=all_df.index))\ntmp.append(pd.get_dummies(all_df.departement, prefix=\"departement\", dummy_na = True))\n\n# sexe\ntmp.append(pd.get_dummies(all_df.sexe, prefix=\"sexe\", dummy_na = False))\n\n# var_4 à var_22\nfor column_name in [\"var_%d\" % x for x in range(4, 23)]:\n\t# tmp.append(pd.DataFrame(data=all_df[column_name], index=all_df.index))\n\ttmp.append(pd.get_dummies(all_df[column_name], prefix=column_name, dummy_na = True))\n\n# var_23 à var_33\nfor column_name in [\"var_%d\" % x for x in range(23, 34)]:\n\ttmp.append(pd.get_dummies(all_df[column_name], prefix=column_name, dummy_na = True))\n\n# Big agg\nagg = pd.concat(tmp, axis=1)\n# Fin big agg\n\ny = all_df.ix[:, \"target\"]\ny = y.reindex(agg.index)\n\npd.to_pickle(agg, all_X_filename)\npd.to_pickle(y, all_y_filename)\npd.to_pickle(y.index, all_id_filename)\n\n","sub_path":"sklearn_archive/panda_read_all_ones.py","file_name":"panda_read_all_ones.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"157436626","text":"import controller\r\nimport model # Calls to update in update_all are passed a reference to model\r\n\r\n#Use the reference to this module to pass it to update methods\r\n\r\nfrom ball import Ball\r\nfrom floater import Floater\r\nfrom blackhole import Black_Hole\r\nfrom pulsator import Pulsator\r\nfrom hunter import Hunter\r\nfrom special import Special \r\n\r\n# Global variables: declare them global in functions that assign to them: e.g., ... = or +=\r\nrunning = False\r\ncycle_count = 0\r\nsimultons = set()\r\nselected = None #text\r\n\r\n#return a 2-tuple of the width and height of the canvas (defined in the controller)\r\ndef world():\r\n return (controller.the_canvas.winfo_width(),controller.the_canvas.winfo_height())\r\n\r\n#reset all module variables to represent an empty/stopped simulation\r\ndef reset ():\r\n global running,cycle_count,simultons\r\n running = False\r\n cycle_count = 0\r\n simultons = set()\r\n\r\n\r\n#start running the simulation\r\ndef start ():\r\n global running\r\n running = True\r\n\r\n\r\n#stop running the simulation (freezing it)\r\ndef stop ():\r\n global running\r\n running = False \r\n\r\n#step just one update in the simulation\r\ndef step ():\r\n global running\r\n if not running:\r\n start ()\r\n update_all()\r\n stop ()\r\n \r\n \r\n \r\n#remember the kind of object to add to the simulation when an (x,y) coordinate in the canvas\r\n# is clicked next (or remember to remove an object by such a click) \r\ndef select_object(kind):\r\n global selected\r\n selected = kind\r\n\r\n\r\n#add the kind of remembered object to the simulation (or remove all objects that contain the\r\n# clicked (x,y) coordinate\r\ndef mouse_click(x,y):\r\n global selected \r\n print(f\"{selected}({x},{y})\")\r\n if selected == 'Remove': \r\n for sim in find(p = lambda s: s.distance((x,y)) < 5 ):\r\n remove(sim)\r\n \r\n elif selected is not None:\r\n try:\r\n add( eval(f\"{selected}({x},{y})\") )\r\n except NameError:\r\n pass\r\n\r\n\r\n#add simulton s to the simulation\r\ndef add(s):\r\n global simultons\r\n simultons.add(s)\r\n\r\n# remove simulton s from the simulation \r\ndef remove(s):\r\n global simultons\r\n simultons.remove(s)\r\n \r\n\r\n#find/return a set of simultons that each satisfy predicate p \r\ndef find(p):\r\n global simultons\r\n return {i for i in simultons if p(i)}\r\n\r\n\r\n# for each simulton in this simulation, call update (passing model to it) \r\n#this function should loop over one set containing all the simultons\r\n# and should not call type or isinstance: let each simulton do the\r\n# right thing for itself, without this function knowing what kinds of\r\n# simultons are in the simulation\r\ndef update_all():\r\n global simultons,cycle_count\r\n if running:\r\n cycle_count += 1\r\n for sim in list(simultons):\r\n sim.update(model) \r\n \r\n#How to animate: 1st: delete all simultons on the canvas; 2nd: call display on\r\n# all simulton being simulated, adding each back to the canvas, maybe in a\r\n# new location; 3rd: update the label defined in the controller for progress \r\n#this function should loop over one set containing all the simultons\r\n# and should not call type or isinstance: let each simulton do the\r\n# right thing for itself, without this function knowing what kinds of\r\n# simultons are in the simulation\r\ndef display_all():\r\n global simultons\r\n # Easier to delete all and display all; could use move with more thought\r\n for o in controller.the_canvas.find_all():\r\n controller.the_canvas.delete(o)\r\n \r\n for b in simultons:\r\n b.display(controller.the_canvas)\r\n \r\n controller.the_progress.config(text=str(len(simultons))+\" simultons/\"+str(cycle_count)+\" cycles\")\r\n\r\n","sub_path":"Simulator/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"276810647","text":"add_library('minim')\nimport os, random\npath = os.getcwd() + \"/\"\nplayer = Minim(this)\n\nclass Creature:\n def __init__(self, x, y, r, g, img, w, h, F):\n self.x = x\n self.y = y\n self.r = r\n self.g = g\n self.img = loadImage(path + \"images/\"+img)\n self.w = w\n self.h = h\n self.f = 0\n self.F = F\n self.direction = 1\n self.vx = 0\n self.vy = 0\n \n def gravity(self):\n if self.y+self.r >= self.g:\n self.vy = 0\n else:\n self.vy += 0.4\n if self.y + self.r + self.vy > self.g:\n self.vy = self.g - (self.y+self.r)\n \n for p in g.platforms:\n if self.y + self.r <= p.y and self.x+self.r >= p.x and self.x-self.r <= p.x+p.w:\n self.g = p.y\n break\n self.g = g.g\n \n def update(self):\n self.gravity()\n \n self.x += self.vx\n self.y += self.vy\n \n def display(self):\n self.update()\n stroke(255,0,0)\n \n if self.direction == 1:\n image(self.img,self.x-self.w//2 - g.x , self.y -self.h//2, self.w, self.h, int(self.f) * self.w, 0, (int(self.f) +1)* self.w, self.h )\n elif self.direction == -1:\n image(self.img,self.x-self.w//2 - g.x, self.y -self.h//2, self.w, self.h, (int(self.f) +1) * self.w, 0, int(self.f) * self.w, self.h )\n \n if self.vx != 0:\n self.f = (self.f + .2) % self.F\n \nclass Mario(Creature):\n def __init__(self, x, y, r, g, img, w, h, F):\n Creature.__init__(self,x, y, r, g, img, w, h, F)\n self.keyHandler={LEFT:False, RIGHT:False, UP:False}\n self.jumpSound = player.loadFile(path + \"sounds/jump.mp3\")\n self.killSound = player.loadFile(path + \"sounds/kill.mp3\")\n self.starSound = player.loadFile(path + \"sounds/coin.mp3\")\n self.starCnt = 0\n \n def update(self):\n self.gravity()\n \n if self.keyHandler[LEFT]:\n self.vx = -5\n self.direction = -1\n elif self.keyHandler[RIGHT]:\n self.vx = 8\n self.direction = 1\n else:\n self.vx = 0\n \n if self.keyHandler[UP] and self.y + self.r == self.g:\n self.jumpSound.rewind()\n self.jumpSound.play()\n self.vy = -15\n \n if self.x - self.r < 0:\n self.x = self.r \n \n self.x += self.vx\n self.y += self.vy\n \n if self.x >= g.w//2:\n g.x += self.vx\n\n for s in g.stars:\n if self.distance(s) <= self.r + s.r:\n g.stars.remove(s)\n self.starSound.rewind()\n self.starSound.play()\n self.starCnt += 1\n \n\n for e in g.enemies:\n if self.distance(e) <= self.r + e.r:\n if self.vy > 0:\n g.enemies.remove(e)\n del e\n self.killSound.rewind()\n self.killSound.play()\n self.vy = -8\n else:\n g.bgSound.pause()\n g.__init__(1280,720,585)\n \n def distance(self, target):\n return ((self.x - target.x)**2 + (self.y - target.y)**2)**0.5\n \n \nclass Gomba(Creature):\n def __init__(self, x, y, r, g, img, w, h, F, xL, xR):\n Creature.__init__(self,x, y, r, g, img, w, h, F)\n self.xL = xL\n self.xR = xR\n self.vx = random.randint(1,5)\n \n def update(self):\n self.gravity()\n \n if self.x > self.xR:\n self.vx *= -1\n self.direction = -1\n elif self.x < self.xL:\n self.vx *= -1\n self.direction = 1\n \n self.x += self.vx\n self.y += self.vy\n \nclass Platform:\n def __init__(self,x,y, w, h, img):\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n self.img = loadImage(path+\"images/\"+img)\n \n def display(self):\n image(self.img, self.x - g.x, self.y, self.w, self.h)\n\nclass Star(Creature):\n def __init__(self, x, y, r, g, img, w, h, F, theta, r1):\n Creature.__init__(self,x, y, r, g, img, w, h, F)\n self.theta = theta\n self.cx = x\n self.cy = y\n self.r1 = r1\n \n def update(self):\n self.f = (self.f + .2) % self.F\n \n self.theta += 0.01\n self.x = self.cx + self.r1*cos(self.theta)\n self.y = self.cy + self.r1*sin(self.theta)\n \n \nclass Game:\n def __init__(self, w, h, g):\n self.x = 0\n self.w = w\n self.h = h\n self.g = g\n self.time = 0\n self.pause = False\n self.pauseSound = player.loadFile(path + \"sounds/pause.mp3\")\n self.bgSound = player.loadFile(path + \"sounds/background.mp3\")\n self.bgSound.play()\n self.bgSound.loop()\n \n self.enemies = []\n self.platforms = []\n inputFile = open(path+\"level1.csv\",\"r\")\n\n # self.mario = Mario(50,50, 35, self.g, \"mario.png\", 100, 70, 11)\n \n self.bgImgs = []\n for i in range(5,0,-1):\n self.bgImgs.append(loadImage(path+\"images/layer_0\" + str(i) + \".png\"))\n \n # self.enemies = []\n # for i in range(5):\n # self.enemies.append(Gomba(random.randint(200, 500), 0, 35, self.g, \"gomba.png\", 70, 70, 5, 200, 800))\n \n # self.platforms = []\n # for i in range(3):\n # self.platforms.append(Platform(250+i*300, 500-i*150, 200, 50, \"platform.png\"))\n \n # for i in range(3):\n # self.platforms.append(Platform(1500+i*300, 500-i*150, 200, 50, \"platform.png\"))\n \n \n for line in inputFile:\n line = line.strip().split(\",\")\n if line[0] == \"mario\":\n self.mario = Mario(int(line[1]),int(line[2]), int(line[3]), int(line[4]), line[5], int(line[6]), int(line[7]), int(line[8]))\n elif line[0] == \"platform\":\n self.platforms.append(Platform(int(line[1]),int(line[2]), int(line[3]), int(line[4]), line[5]))\n elif line[0] == \"gomba\":\n self.enemies.append(Gomba(int(line[1]),int(line[2]), int(line[3]), int(line[4]), line[5], int(line[6]), int(line[7]), int(line[8]), int(line[9]), int(line[10])))\n elif line[0] == \"ground\":\n self.g = int(line[2])\n \n \n self.stars = []\n for i in range(7):\n self.stars.append(Star(300,300,20, self.g, \"star.png\", 40,40,6, i*0.9, 100))\n \n self.stars.append(Star(600,300,20, self.g, \"star.png\", 40,40,6, 0, 0))\n \n def display(self):\n self.time += 1\n cnt = 0\n x = 0\n for b in self.bgImgs:\n if cnt == 1:\n x = self.x//4\n if cnt == 2:\n x = self.x//3\n if cnt == 3:\n x = self.x//2\n if cnt == 4 and cnt == 5:\n x = self.x\n cnt += 1\n \n image(b,0,0, self.w - x%self.w, self.h, x%self.w, 0, self.w, self.h)\n image(b,self.w -x%self.w, 0, x%self.w, self.h, 0, 0, x%self.w, self.h)\n \n for p in self.platforms:\n p.display()\n \n for e in self.enemies:\n e.display()\n \n for s in self.stars:\n s.display()\n \n self.mario.display()\n \n \n textSize(30)\n text(self.mario.starCnt, self.w - 50, 50)\n text(self.time//60, self.w - 50, 100)\n \n \n\ng = Game(1280,720,585)\n\ndef setup():\n size(g.w, g.h)\n background(255)\n \ndef draw():\n if not g.pause:\n background(255)\n g.display()\n\ndef keyPressed():\n if keyCode == LEFT:\n g.mario.keyHandler[LEFT] = True\n elif keyCode == RIGHT:\n g.mario.keyHandler[RIGHT] = True\n elif keyCode == UP:\n g.mario.keyHandler[UP] = True\n elif keyCode == 80:\n if g.pause:\n g.pause = False\n g.bgSound.play()\n else:\n g.pause = True\n g.bgSound.pause()\n g.pauseSound.rewind()\n g.pauseSound.play()\n \ndef keyReleased():\n if keyCode == LEFT:\n g.mario.keyHandler[LEFT] = False\n elif keyCode == RIGHT:\n g.mario.keyHandler[RIGHT] = False\n elif keyCode == UP:\n g.mario.keyHandler[UP] = False\n","sub_path":"mario002/mario002.pyde","file_name":"mario002.pyde","file_ext":"pyde","file_size_in_byte":8514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558147755","text":"# import matplotlib ,this statement imports a module and its functions in your program(press ctrl and click on module to open its code and its doc string)\nimport random\n\nrandom_integer = random.randint(1, 4) # This generates a random integer 1 <= random no. <= 4\nprint(random_integer)\n\nrand = random.random() # This generates a random floating point no. between 0 & 1\nprint(rand)\n\nrand = random.random() * 100 # This generates a random floating point no. between 0 & 100\nprint(rand)\n\nl = [\"Rock\", \"Blues\", \"Jazz\", \"Country\", \"Metal\"]\nchoice = random.choice(l)\nprint(choice)\n\n","sub_path":"Random Module in Python.py","file_name":"Random Module in Python.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"322571332","text":"\"\"\"{\n \"wall-time\": 5.825495004653931,\n \"dataset\": \"dstc2_dev\",\n \"sessions\": [\n {\n \"session-id\": \"voip-f246dfe0f2-20130328_161556\",\n \"turns\": [\n {\n \"goal-labels\": {\n \"pricerange\": {\n \"expensive\": 0.9883454175454712\n },\n \"area\": {\n \"south\": 0.9673269337257503\n }\n },\n \"goal-labels-joint\": [\n {\n \"slots\": {\n \"pricerange\": \"expensive\",\n \"area\": \"south\"\n },\n \"score\": 0.9777797002475338\n }\n ],\n \"method-label\": {\n \"byconstraints\": 0.9999999999999999\n },\n \"requested-slots\": {}\n }\n }\n}\n\"\"\"\n\nimport collections\nimport time\nimport json\nimport logging\nimport numpy as np\nimport argparse\n\nfrom data import Data, Tagger\nfrom dstc5_scripts import ontology_reader\nfrom utils import pdb_on_error\nfrom model import Model\nfrom model_baseline import BaselineModel\n\nfrom dstc5_scripts.stat_classes import (\n Stat_Accuracy, Stat_Frame_Precision_Recall\n)\n\ndef init_logging():\n # Setup logging.\n logger = logging.getLogger('XTrack')\n logger.setLevel(logging.DEBUG)\n\n logging_format = '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n formatter = logging.Formatter(logging_format)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n logging.root = logger\n\n\nclass XTrack2DSTCTracker(object):\n def __init__(self, data, models, ontology):\n assert len(models), 'You need to specify some models.'\n\n self.data = data\n self.models = models\n self.main_model = models[0]\n self.ontology = ontology\n\n self.classes_rev = {}\n for slot in self.data.slots:\n self.classes_rev[slot] = {\n val: key\n for key, val in self.data.classes[slot].iteritems()\n }\n\n self.slot_groups = data.slot_groups\n self.tagger = Tagger()\n\n def _label_id_to_str(self, label):\n res = {}\n for slot in self.data.slots:\n res[slot] = self.classes_rev[slot][label[slot]]\n return res\n\n def build_output(\n self,\n pred,\n label,\n segment_id,\n in_accuracy_stat,\n in_frame_precision_recall_stat\n ):\n raw_labels = {}\n raw_label_probs = {}\n for i, slot in enumerate(self.data.slots):\n val = np.argmax(pred[i])\n val_prob = pred[i][val]\n if pred[i][val] == 0.0:\n val = 0\n raw_labels[slot] = val\n raw_label_probs[slot] = val_prob\n\n lbl = self._label_id_to_str(label)\n pred = self._label_id_to_str(raw_labels)\n for slot in self.data.slots:\n self.track_log.write(\n \" %s lbl(%s) pred(%s)\\n\" % (\n slot,\n lbl[slot],\n pred[slot]\n )\n )\n goals_correct = {}\n for group, slots in self.slot_groups.iteritems():\n goals_correct[group] = True\n for i, slot in enumerate(slots):\n goals_correct[group] &= raw_labels[slot] == label[slot]\n\n label_list = {key: [value] for key, value in label.items()}\n raw_labels_list = {key: [value] for key, value in raw_labels.items()}\n in_accuracy_stat.add(label_list, raw_labels_list)\n in_frame_precision_recall_stat.add(label_list, raw_labels_list)\n\n goal_labels = {\n slot: pred[slot]\n for slot in self.data.slots\n if pred[slot] != self.data.null_class \\\n # and slot in self.ontology.tagsets.get(segment_id, {}).keys() \\\n # and pred[slot] in self.ontology.tagsets.get(segment_id, {}).get(slot, [])\n }\n\n tracker_output = {\n 'frame_label': goal_labels\n }\n return tracker_output, goals_correct\n\n def _label_empty(self, lbl):\n res = True\n for val in lbl.values():\n res &= val == 0\n return res\n\n def _make_model_predictions(self, data):\n preds = []\n for model in self.models:\n pred = model._predict(*data)\n preds.append(pred)\n return preds\n\n def track(self, tracking_log_file_name=None, output_len_accuracy=False):\n accuracy_stat = Stat_Accuracy()\n frame_precision_recall_stat = Stat_Frame_Precision_Recall()\n data = self.main_model.prepare_data_predict(\n self.data.sequences,\n self.data.slots\n )\n\n preds = self._make_model_predictions(data)\n\n pred = []\n for slot_preds in zip(*preds):\n slot_res = np.array(slot_preds[0])\n for slot_pred in slot_preds[1:]:\n slot_res += slot_pred\n pred.append(slot_res / len(slot_preds))\n\n pred_ptr = 0\n\n result = []\n if tracking_log_file_name:\n self.track_log = open(tracking_log_file_name, 'w')\n else:\n self.track_log = open('/dev/null', 'w')\n\n for dialog in self.data.sequences:\n self.track_log.write(\">> Dialog: %s\\n\" % dialog['id'])\n self.track_log.write(\"\\n\")\n turns = []\n last_pos = 0\n state_component_mentioned = False\n for utter_index, lbl in enumerate(dialog['labels']):\n last_pos = lbl['time'] + 1\n segment_id, segment_bio = lbl['segment_id'], lbl['segment_bio']\n\n out, goals_correct = self.build_output(\n [\n pred[i][pred_ptr]\n for i, _ in enumerate(self.data.slots)\n ],\n lbl['slots'],\n segment_id,\n accuracy_stat,\n frame_precision_recall_stat\n )\n if dialog['tags']:\n dialog['tags'] = self._replace_tags(out, dialog['tags'])\n #self.track_log.write(json.dumps(out))\n #self.track_log.write(\"\\n\")\n if segment_bio == 'O':\n del out['frame_label']\n else:\n out['frame_label'] = \\\n self._denormalize_frame_label(out['frame_label'])\n out['utter_index'] = utter_index\n turns.append(out)\n pred_ptr += 1\n\n if not self._label_empty(lbl['slots']) or state_component_mentioned:\n state_component_mentioned = True\n\n result.append({\n 'session_id': dialog['id'],\n 'utterances': turns\n })\n\n if len(pred[0]) != pred_ptr:\n raise Exception('Data mismatch.')\n\n stats = {\n 'accuracy': accuracy_stat.results()[0][2],\n 'frame_precision': frame_precision_recall_stat.results()[0][2],\n 'frame_recall': frame_precision_recall_stat.results()[1][2],\n 'frame_f1': frame_precision_recall_stat.results()[2][2]\n }\n res = [result, stats]\n return tuple(res)\n\n def _denormalize_frame_label(self, in_frame_label):\n new_frame_label = {}\n for slot, value in in_frame_label.items():\n denormalized_value = value.split('___')\n denormalized_value = [\n atomic_value.replace('_', ' ')\n for atomic_value in denormalized_value\n ]\n new_frame_label[slot] = denormalized_value\n return new_frame_label\n\n def _replace_tags(self, out, tags):\n new_tags = {}\n for slot, values in out['frame_label'].iteritems():\n new_tags[slot] = self._replace_tags_for_slot(slot, tags, values)\n return new_tags\n\n def _replace_tags_for_slot(self, slot, tags, values):\n new_res = []\n for slot_val in values:\n if slot_val.startswith('#%s' % slot):\n tag_id = int(slot_val.replace('#%s' % slot, ''))\n try:\n tag_list = tags.get(slot, [])\n tag_val = tag_list[tag_id]\n tag_val = self.tagger.denormalize_slot_value(tag_val)\n new_res.append(tag_val)\n except IndexError:\n # This happens when the we predict a tag that\n # does not exist.\n new_res.append('_null_')\n else:\n new_res.append(slot_val)\n return new_res\n\n\ndef main(dataset, data_file, output_file, params_file, model_type, ontology):\n models = []\n for pf in params_file:\n logging.info('Loading model from: %s' % pf)\n if model_type == 'lstm':\n model_cls = Model\n elif model_type == 'baseline':\n model_cls = BaselineModel\n models.append(model_cls.load(pf, build_train=False))\n\n logging.info('Loading data: %s' % data_file)\n data = Data.load(data_file)\n\n logging.info('Starting tracking.')\n tracker = XTrack2DSTCTracker(\n data,\n models,\n ontology_reader.OntologyReader(ontology)\n )\n\n t = time.time()\n result, stats = tracker.track(output_len_accuracy=True)\n t = time.time() - t\n logging.info('Tracking took: %.1fs' % t)\n logging.info('Tracking stats: ')\n for metric, value in stats.items():\n logging.info('%s: %.5f %%' % (metric, value * 100))\n\n tracker_output = {\n 'wall_time': float(t),\n 'dataset': dataset,\n 'sessions': result\n }\n\n logging.info('Writing to: %s' % output_file)\n with open(output_file, 'w') as f_out:\n json.dump(tracker_output, f_out, indent=4)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', required=True)\n parser.add_argument('--data_file', required=True)\n parser.add_argument('--output_file', required=True)\n parser.add_argument('--params_file', action='append', required=True)\n parser.add_argument('--model_type', default='lstm'),\n parser.add_argument('--ontology', required=True)\n\n pdb_on_error()\n init_logging()\n main(**vars(parser.parse_args()))\n","sub_path":"dstc_tracker.py","file_name":"dstc_tracker.py","file_ext":"py","file_size_in_byte":10521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367460747","text":"import os\nimport pytest\nimport testinfra.utils.ansible_runner\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ[\"MOLECULE_INVENTORY_FILE\"]\n).get_hosts(\"all\")\n\n\n@pytest.mark.parametrize(\n \"name\", [\"python3-certbot\", \"python3-certbot-dns-route53\", \"ssl-cert\"]\n)\ndef test_packages(host, name):\n p = host.package(name)\n assert p.is_installed\n\n\n@pytest.mark.parametrize(\n \"path\",\n [\n \"/etc/letsencrypt/cli.ini\",\n \"/etc/letsencrypt/hooks/deploy\",\n \"/etc/letsencrypt/hooks/post\",\n \"/etc/letsencrypt/hooks/pre\",\n \"/etc/ssl/certs/ssl-cert-snakeoil.pem\",\n \"/etc/ssl/private/ssl-cert-snakeoil-combined.pem\",\n \"/etc/ssl/private/ssl-cert-snakeoil.key\",\n \"/usr/bin/certbot\",\n ],\n)\ndef test_files(host, path):\n f = host.file(path)\n assert f.exists\n\n\ndef test_command(host):\n with host.sudo():\n cmd = host.check_output(\"certbot --version\")\n assert \"certbot \" in cmd, cmd\n","sub_path":"molecule/default/tests/test_certbot.py","file_name":"test_certbot.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"549016103","text":"# Copyright (c) 2017 LINE Corporation\n# These sources are released under the terms of the MIT license: see LICENSE\n\nfrom unittest import mock\n\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom promgen import models, prometheus\nfrom promgen.tests import TEST_RULE\n\n\n_RULES = '''\n# Service: Service 1\n# Service URL: /service/1/\nALERT RuleName\n IF up==0\n FOR 1s\n LABELS {severity=\"severe\"}\n ANNOTATIONS {service=\"http://example.com/service/1/\", summary=\"Test case\"}\n\n\n'''.lstrip()\n\n\nclass RuleTest(TestCase):\n @mock.patch('django.db.models.signals.post_save', mock.Mock())\n @mock.patch('django.db.models.signals.pre_save', mock.Mock())\n def setUp(self):\n self.shard = models.Shard.objects.create(name='Shard 1')\n self.service = models.Service.objects.create(id=1, name='Service 1', shard=self.shard)\n self.rule = models.Rule.objects.create(\n name='RuleName',\n clause='up==0',\n duration='1s',\n service=self.service\n )\n models.RuleLabel.objects.create(name='severity', value='severe', rule=self.rule)\n models.RuleAnnotation.objects.create(name='summary', value='Test case', rule=self.rule)\n\n @mock.patch('django.db.models.signals.post_save')\n def test_write(self, mock_render):\n result = prometheus.render_rules()\n self.assertEqual(result, _RULES)\n\n @mock.patch('django.db.models.signals.post_save')\n def test_copy(self, mock_render):\n service = models.Service.objects.create(name='Service 2', shard=self.shard)\n copy = self.rule.copy_to(service)\n # Test that our copy has the same labels and annotations\n self.assertIn('severity', copy.labels)\n self.assertIn('summary', copy.annotations)\n # and test that we actually duplicated them and not moved them\n self.assertEqual(models.RuleLabel.objects.count(), 3, 'Copied rule has exiting labels + service label')\n self.assertEqual(models.RuleAnnotation.objects.count(), 2)\n\n @mock.patch('django.db.models.signals.post_save')\n def test_import(self, mock_render):\n self.client.post(reverse('import'), {\n 'rules': TEST_RULE\n })\n\n # Includes count of our setUp rule + imported rules\n self.assertEqual(models.Rule.objects.count(), 3, 'Missing Rule')\n self.assertEqual(models.RuleLabel.objects.count(), 4, 'Missing labels')\n self.assertEqual(models.RuleAnnotation.objects.count(), 7, 'Missing annotations')\n","sub_path":"promgen/tests/test_rules.py","file_name":"test_rules.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"628868242","text":"import requests\nimport time\nimport os\nimport sys \ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\ndm_dir = os.path.dirname(current_dir)\npkg_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\nsys.path.append(pkg_dir)\nfrom influxdb_client.influxdb_client_host_1 import InfluxClientHost1\nfrom influxdb_client.influxdb_client_host_2 import InfluxClientHost2\nfrom influxdb_client.influxdb_client_qa_host_1 import InfluxClientHostQA1\n\nhost_1 = InfluxClientHostQA1()\nhost_2 = InfluxClientHost2()\n\n\nmeasurement = \"exchange_open_interest\"\n\n\ndef get_swap_tickers(base_token):\n base = \"https://www.okex.com/v2/\"\n market_overview_endpoint = \"perpetual/pc/public/contracts/tickers?type={}\".format(base_token)\n market_overview = base + market_overview_endpoint\n response = requests.get(market_overview)\n resp = response.json()\n data = resp['data']\n return data\n\ndef swap_usd_update(swap_data,measurement):\n for sd in swap_data:\n fields = {}\n usd_oi = float(sd['holdAmount']) * float(sd['unitAmount'])\n coin_oi = float(sd['holdAmount'])/(float(sd['volume'])/float(sd['coinVolume']))\n fields.update({\"coin_denominated_open_interest\":float(coin_oi)})\n fields.update({\"coin_denominated_symbol\":sd['coinName']})\n fields.update({\"usd_denominated_open_interest\":float(usd_oi)})\n fields.update({\"is_api_return_timestamp\": False})\n tags = {}\n tags.update({\"contract_symbol\":sd['contract']})\n tags.update({\"contract_exchange\":\"Okex\"})\n dbtime = False\n host_1.write_points_to_measurement(measurement,dbtime,tags,fields)\n\ndef swap_usdt_update(swap_data,measurement):\n for sd in swap_data:\n fields = {}\n coin_oi = float(sd['holdAmount']) * float(sd['unitAmount'])\n usd_oi = coin_oi * float(sd['close'])\n fields.update({\"coin_denominated_open_interest\":float(coin_oi)})\n fields.update({\"coin_denominated_symbol\":sd['coinName']})\n fields.update({\"usd_denominated_open_interest\":float(usd_oi)})\n fields.update({\"is_api_return_timestamp\": False})\n tags = {}\n tags.update({\"contract_symbol\":sd['contract']})\n tags.update({\"contract_exchange\":\"Okex\"})\n dbtime = False\n host_1.write_points_to_measurement(measurement,dbtime,tags,fields)\n\ndef subscribe_swap_ticker(measurement):\n usd_swap = get_swap_tickers(\"USD\")\n swap_usd_update(usd_swap,measurement)\n usdt_swap = get_swap_tickers(\"USDT\") \n swap_usdt_update(usdt_swap,measurement)\n\n\n\nif __name__ == '__main__':\n subscribe_swap_ticker(measurement)\n while True:\n time.sleep(55)\n subscribe_swap_ticker(measurement)\n","sub_path":"data_open_interest/exchanges_open_interest_host_1_qa/okex/open_interest_okex_swaps.py","file_name":"open_interest_okex_swaps.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"393954892","text":"# -*- encoding: utf-8\n\"\"\"\n线性查找\n\"\"\"\nfrom __future__ import print_function\n\n\ndef linear_search(seq, target):\n for index, item in enumerate(seq):\n if item == target:\n return index\n return None\n\n\nif __name__ == '__main__':\n sequence = [1, 5, 6, 89, 123, 4345, 66]\n print(linear_search(sequence, 5))\n","sub_path":"searchs/linear_search.py","file_name":"linear_search.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"613535039","text":"#!/usr/bin/env python\n\nimport datetime\nimport json\nimport operator\nimport os\nimport re\nimport requests\nimport sys\nimport subprocess\nimport yaml\n\n\nBOOTSTRAP_TABLE_HEADER = '''\n\n\n\n \n \n \n \n\n\n'''\n\nBOOTSTRAP_TABLE_FOOTER = '''\n\n\n'''\n\n\ndef report_galaxy_modules_tally(rdict, destfile=None, config=None):\n\n if not destfile:\n assert 'html_directory' in config, \"Configuration must include html_directory\"\n hd = config.get('html_directory', None)\n assert hd, \"Configuration's html_directory must not be null\"\n rpath = os.path.join(hd, 'galaxy', 'galaxy_modules_reference_tally.html')\n else:\n rpath = destfile\n\n m_tuples = []\n for k,v in rdict.items():\n #mtup = (v, k)\n mtup = (\n len(sorted(set(v['files']))),\n len(sorted(set(v['checkouts']))),\n len(sorted(set([x for x in v['checkouts'] if 'galaxy/roles/' in x]))),\n k\n )\n m_tuples.append(mtup)\n #import epdb; epdb.st()\n\n # sort by count\n m_tuples.sort(key=lambda tup: tup[0], reverse=True)\n\n today = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')\n\n with open(rpath, 'w') as f:\n f.write(BOOTSTRAP_TABLE_HEADER)\n f.write('
\\n')\n f.write('

Module References in Github Repos (%s)

\\n' % today)\n f.write('
\\n')\n f.write('\\n')\n f.write('\\n')\n f.write('\\n')\n f.write('\\n')\n f.write('\\n')\n f.write('\\n')\n f.write('\\n')\n f.write('\\n')\n f.write('\\n')\n f.write('\\n')\n\n f.write('\\n')\n for tidx,tup in enumerate(m_tuples):\n f.write('\\n')\n f.write('\\n' % (tidx + 1))\n f.write('\\n' % tup[-1])\n f.write('\\n' % tup[0])\n f.write('\\n' % tup[1])\n f.write('\\n' % tup[2])\n f.write('\\n')\n f.write('\\n')\n\n f.write('
moduletotal referencestotal unique github repostotal unique galaxy roles
%s%s%s%s%s
\\n')\n f.write('
\\n')\n f.write('
\\n')\n f.write(BOOTSTRAP_TABLE_FOOTER)\n\n","sub_path":"gdash/galaxy/texttools.py","file_name":"texttools.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"167356251","text":"age = 311\n\nname = \"Bek\"\n\ntodayIsCold = False\n\nclass Car:\n def _init_(self, year, make, model):\n self.year = year\n self.make = make\n self.model = model\n\n\n def age(self):\n return 2020 - self.year\n","sub_path":"python_refresher.py","file_name":"python_refresher.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"269695955","text":"# -*- coding: utf-8 -*-\n# -*- coding: utf-8 -*-\na = {'kolia': 'c',\n 'vasia': 'c',\n 'dima': 'python',}\n# set() это множество в котором в котороом все значения уникальны\nfor n in set(sorted(a.values())): # вывести только уникальные значения и попорядку\n print(n.title()) # вывод с большой буквы\n\nb = {'nile':'egipet',\n 'gorin':'stolin',\n 'kopanec':'mankovichi',}\nfor key, values in b.items():\n print(key, ' протекает в ', values)\n print(key)\n\n\n","sub_path":"knigapython/113_set().py","file_name":"113_set().py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"641864561","text":"'''\r\nCreated on 13.01.2017\r\n\r\n@author: Thomas Ziegs\r\n'''\r\nimport threading\r\n\r\nclass RWSyncNoWait(object):\r\n def __init__(self):\r\n self.__methodLock = threading.Lock()\r\n self.__atLeastOneWriterTry = False\r\n self.__readerCount = 0\r\n self.__writerActive = False\r\n \r\n def acquireRead(self):\r\n success = False\r\n if self.__methodLock.acquire(False):\r\n if not self.__atLeastOneWriterTry and not self.__writerActive:\r\n self.__readerCount += 1\r\n success = True\r\n self.__methodLock.release()\r\n return success\r\n \r\n def releaseRead(self):\r\n if self.__methodLock.acquire(False):\r\n self.__readerCount -= 1\r\n self.__methodLock.release()\r\n \r\n def acquireWrite(self):\r\n success = False\r\n self.__atLeastOneWriterTry = True\r\n if self.__methodLock.acquire(False):\r\n if self.__readerCount == 0 and not self.__writerActive:\r\n self.__writerActive = True\r\n success = True\r\n self.__methodLock.release()\r\n return success \r\n \r\n def releaseWrite(self):\r\n if self.__methodLock.acquire(False):\r\n self.__atLeastOneWriterTry = False\r\n self.__writerActive = False\r\n self.__methodLock.release()","sub_path":"RWSyncNoWait.py","file_name":"RWSyncNoWait.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"170082405","text":"from setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\n\nlong_description = ''\n\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept (IOError, ImportError):\n with open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\n__title__ = ''\n__version__ = ''\n__summary__ = ''\n__uri__ = ''\n__author__ = ''\n__author_email__ = ''\nexec(open('./qface/__about__.py').read())\n\nsetup(\n name=__title__,\n version=__version__,\n description=__summary__,\n long_description=long_description,\n url=__uri__,\n author=__author__,\n author_email=__author_email__,\n license='GPLV3',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Code Generators',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n keywords='qt code generator framework',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'jinja2',\n 'path.py',\n 'pyyaml',\n 'antlr4-python3-runtime>=4.6',\n 'click',\n 'watchdog',\n ],\n extras_require={\n 'dev': [\n 'watchdog',\n 'pypandoc',\n ],\n 'test': [\n 'pytest',\n 'watchdog',\n 'ipdb',\n ],\n },\n entry_points={\n 'console_scripts': [\n 'qface-qtcpp = qface.builtin.qtcpp.qtcpp:app',\n 'qface-qtqml = qface.builtin.qtqml.qtqml:app',\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"318055957","text":"from typing import Optional\n\n\n__all__ = [\n 'SingletonMeta'\n]\n\n\nclass SingletonMeta(type):\n \"\"\"\n 单例元类\n \"\"\"\n\n _instance: Optional[type] = None\n\n def __call__(cls, *args, **kwargs) -> type:\n if cls._instance is None:\n cls._instance = super().__call__(*args, **kwargs)\n return cls._instance\n","sub_path":"utils/metaclass.py","file_name":"metaclass.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"197449197","text":"# -*- coding: utf-8 -*-\r\nfrom perform.defines import *\r\nfrom perform.object import PhyAttackPerform as CustomPerform\r\n\r\n#导表开始\nclass Perform(CustomPerform):\n\tid = 1121\n\tname = \"神通符法\"\n\ttargetType = PERFORM_TARGET_ENEMY\n\ttargetCount = 1\n\tdamage = lambda self,SLV:SLV*2+20\n\tpower = 120\n\tconsumeList = {\n\t\t\"真气\": lambda SLV:SLV*1.2+20,\n\t\t\"符能\": 10,\n\t}\n\tspeRatio = 100\n\tconfigInfo = {\n\t\t\"概率\":50,\n\t}\n#导表结束\r\n\r\n\tdef afterPerform(self, att, vicCast):\r\n\t\tCustomPerform.afterPerform(self, att, vicCast)\r\n\t\tif hasattr(self, \"moveDone\"):\r\n\t\t\tdel self.moveDone\r\n\r\n\tdef afterAttack(self, att, vic, vicCast, dp, targetCount):\r\n\t\tCustomPerform.afterAttack(self, att, vic, vicCast, dp, targetCount)\r\n\t\t\r\n\t\tif hasattr(self, \"moveDone\"):\r\n\t\t\treturn\r\n\t\tif dp <= 0:\r\n\t\t\treturn\r\n\t\tif not att.inWar() or not vic.inWar():\r\n\t\t\treturn\r\n\t\tif rand(100) >= self.configInfo[\"概率\"]:\r\n\t\t\treturn\r\n\t\t\r\n\t\tif not self.tryGetBuff(att, vic):\r\n\t\t\tself.tryRemoveBuff(att, vic)\r\n\t\telif not self.tryRemoveBuff(att, vic):\r\n\t\t\tself.tryGetBuff(att, vic)\r\n\t\t\t\r\n\t\tif hasattr(self, \"moveDone\"):\r\n\t\t\tfor func in att.getFuncList(\"onMoveDone\"):\r\n\t\t\t\tfunc(att, vic, vicCast, dp, self.getAttackType())\r\n\t\t\t\t\r\n\tdef tryGetBuff(self, att, vic):\r\n\t\t'''对方一个正面状态转移给自己\r\n\t\t'''\r\n\t\tfor lst in vic.buffList.values():\r\n\t\t\tfor bfObj in shuffleList(lst):\r\n\t\t\t\tif not bfObj:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif bfObj.type not in (BUFF_TYPE_BUFF,):\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbuff.remove(vic, bfObj.id)\r\n\t\t\t\tbuff.fork(att, bfObj)\r\n\t\t\t\tself.moveDone = True\r\n\t\t\t\treturn 1\r\n\r\n\t\treturn 0\r\n\t\r\n\tdef tryRemoveBuff(self, att, vic):\r\n\t\t'''自身一个负面状态转移给对方\r\n\t\t'''\r\n\t\tfor lst in att.buffList.values():\r\n\t\t\tfor bfObj in shuffleList(lst):\r\n\t\t\t\tif not bfObj:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif bfObj.type not in (BUFF_TYPE_DEBUFF, BUFF_TYPE_SEAL,):\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbuff.remove(att, bfObj.id)\r\n\t\t\t\tbuff.fork(vic, bfObj)\r\n\t\t\t\tself.moveDone = True\r\n\t\t\t\treturn 1\r\n\r\n\t\treturn 0\r\n\t\t\t\t\r\n\t\t\t\t\r\nfrom common import *\r\nfrom buff.defines import *\r\nimport buff\r\n\r\n","sub_path":"logic/perform/school/pf1121.py","file_name":"pf1121.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"166211546","text":"### Leia os quatro valores correspondentes aos eixos x e y de dois pontos quaisquer no plano, p1(x1,y1) e p2(x2,y2) e calcule a dis\n### tância entre eles, mostrando 4 casas decimais após a vírgula, segundo a fórmula: Distancia = ((x2 - x1)² + (y2 - y1)²)^(1/2) ;O\n### arquivo de entrada contém duas linhas de dados. A primeira linha contém dois valores de ponto flutuante: x1 y1 e a segunda linh\n### a contém dois valores de ponto flutuante x2 y2. Calcule e imprima o valor da distância segundo a fórmula fornecida, com 4 casas\n### após o ponto decimal. Q:1015\n\nx1, y1 = input().split()\nx2, y2 = input().split()\n\nx1 = float(x1)\nx2 = float(x2)\ny1 = float(y1)\ny2 = float(y2)\n\ndis = ((x2 - x1)**2 + (y2 - y1)**2)**(1/2)\n\nprint(\"%.4f\" % dis)\n","sub_path":"1015.py","file_name":"1015.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"269806619","text":"\nimport numpy as np\nimport ffmpeg\nfrom slicerator import Slicerator\n\n__all__ = ['ReadVideoFFMPEG', 'WriteVideoFFMPEG']\n\n\n@Slicerator.from_class\nclass ReadVideoFFMPEG:\n \"\"\"\n ReadVideoFFMPEG reads images from video using FFMPEG\n\n Attributes\n ----------\n filename : str\n full path and filename for video to be read\n width : int\n width of frame being read\n height : int\n height of frame being read\n num_frames : int\n total number of frames in video\n\n Examples\n --------\n\n | readVid = ReadVideoFFMPEG(filename)\n | img = readVid.read_frame()\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self._get_info()\n self._setup_process()\n\n def _get_info(self):\n probe = ffmpeg.probe(self.filename)\n video_info = next(\n s for s in probe['streams'] if s['codec_type'] == 'video')\n self.width = int(video_info['width'])\n self.height = int(video_info['height'])\n self.num_frames = int(video_info['nb_frames'])\n\n def _setup_process(self):\n self.process = (\n ffmpeg\n .input(self.filename)\n .output('pipe:', format='rawvideo', pix_fmt='rgb24')\n .run_async(pipe_stdout=True, quiet=False)\n )\n\n def read_frame(self):\n \"\"\"\n read_frame reads the next image from the video\n\n :return: np.ndarray\n \"\"\"\n frame_bytes = self.process.stdout.read(self.width * self.height * 3)\n frame = (\n np.frombuffer(frame_bytes, np.uint8)\n .reshape([self.height, self.width, 3]))\n return frame\n\n def read_frame_bytes(self):\n frame_bytes = self.process.stdout.read(self.width * self.height * 3)\n return frame_bytes\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n return self.read_frame()\n except IndexError:\n raise StopIteration\n\n def __getitem__(self):\n return self.read_frame()\n\n def __len__(self):\n return self.num_frames\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def close(self):\n self.process.stdin.close()\n self.process.wait()\n\n\nclass WriteVideoFFMPEG:\n \"\"\"WriteVideoFFMPEG writes images to file with FFMPEG\n\n Attributes\n ----------\n filename : str\n Full path and filename to output file\n bitrate : str\n bitrate affects video quality and file size\n framerate : int\n frames per second playback of video\n speed : str\n specifies the speed\n\n\n Examples\n --------\n | with WriteVideo(filename) as writevid:\n | writevid.add_frame(img)\n | writevid.close()\n\n \"\"\"\n\n def __init__(self, filename, speed='superfast', bitrate='LOW4K', framerate=50.0):\n self.filename = filename\n self.frame_no = 0\n bitrates = {\n 'LOW4K': '20000k',\n 'MEDIUM4K': '50000k',\n 'HIGH4K': '100000k',\n 'LOW1080': '5000k',\n 'MEDIUM1080': '10000k',\n 'HIGH1080': '20000k'}\n self.video_bitrate = bitrates[bitrate]\n self.framerate=framerate\n self.preset = speed\n\n def add_frame(self, frame):\n \"\"\"\n add next frame to the video being written\n\n :param frame: np.ndarray\n frame to be added to the video\n :return: None\n \"\"\"\n\n if self.frame_no == 0:\n width = np.shape(frame)[1]\n height = np.shape(frame)[0]\n self._setup_process(width, height)\n self.process.stdin.write(frame.astype(np.uint8).tobytes())\n self.frame_no += 1\n\n def add_frame_bytes(self, frame, width, height):\n if self.frame_no == 0:\n self._setup_process(width, height)\n self.process.stdin.write(frame)\n self.frame_no += 1\n\n def _setup_process(self, width, height):\n self.process = (\n ffmpeg\n .input(\n 'pipe:',\n format='rawvideo',\n pix_fmt='rgb24',\n s='{}x{}'.format(width, height),\n r=50\n )\n .output(\n self.filename,\n pix_fmt='yuv420p',\n vcodec='libx264',\n preset=self.preset,\n video_bitrate=self.video_bitrate,\n r=self.framerate\n )\n .overwrite_output()\n .run_async(\n pipe_stdin=True,\n quiet=False\n )\n )\n\n def close(self):\n \"\"\"\n Release video object\n :return: None\n \"\"\"\n\n self.process.stdin.close()\n self.process.wait()\n","sub_path":"labvision/video/ffmpeg_io.py","file_name":"ffmpeg_io.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"620125942","text":"from django.urls import path\r\nfrom .views import rentacars, searched_rentacars, search_vehicles, rentacar_detail, quick_reserve, rentacar_service_reports, rate_vehicle, find_vehicles, reserve_vehicle, search_vehicles_after_reservation\r\nurlpatterns = [\r\n path('', rentacars, name='rentacars'),\r\n path('search', searched_rentacars, name='search_rentacars'),\r\n path('search_vehicles', search_vehicles, name='search_vehicles'),\r\n path('rentacar/', rentacar_detail, name='rentacar_with_id'),\r\n path('quick_rentacar_reservation/', quick_reserve, name='quick_reserve'),\r\n path('find_vehicles/', find_vehicles, name='find_vehicles'),\r\n path('reserve_vehicle', reserve_vehicle, name='reserve_vehicle'),\r\n path('search_vehicles_after_reservation', search_vehicles_after_reservation, name='search_vehicles_after_reservation'),\r\n path('get_rentacar_service_reports', rentacar_service_reports, name='rentacar_service_reports'),\r\n path('rate_vehicle', rate_vehicle, name='rate_vehicle'),\r\n]","sub_path":"Tim11/RentACarService/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"115850767","text":"# encoding: utf-8\n\"\"\"\ntest_adorn_uniterm.py\n\nCreated by Graham Higgins on 2021-11-14.\nCopyright (c) 2021 Graham Higgins. All rights reserved.\n\"\"\"\n\nfrom functools import reduce\n\nimport rdflib\nfrom rdflib import Namespace, URIRef, Variable, logger\n\nfrom FuXi.Horn.PositiveConditions import Uniterm\nfrom FuXi.Rete.Magic import NON_LINEAR_MS_QUERY, AdornLiteral\nfrom FuXi.sparqlp.algebra import RenderSPARQLAlgebra\nfrom FuXi.sparqlp.parser import parse\n\n\ndef test_herbrand_hash():\n \"\"\"\n Test of correct (modern, list-comprehension-idiom) implementation of\n herbrand_hash.\n\n Original code:\n\n self.herbrand_hash = hash(\n reduce(\n lambda x, y: str(x) + str(y),\n filter(\n lambda i: not isinstance(i, Variable),\n self.toRDFTuple() if len(self.arg) == 2 else [self.op] + self.arg,\n ),\n None,\n )\n ) ^ hash(reduce(lambda x, y: x + y, self.adornment))\n\n Re-implemented as:\n\n self.herbrand_hash = hash(\n \"\".join(\n [\n term\n for term in (\n self.toRDFTuple()\n if len(self.arg) == 2\n else [self.op] + self.arg\n )\n if not isinstance(term, Variable)\n ]\n )\n ) ^ hash(\"\".join([i for i in self.adornment]))\n\n\n \"\"\"\n\n self_isMagic = False\n self_arg = [URIRef(\"http://doi.acm.org/10.1145/28659.28689#john\"), Variable(\"X\")]\n self_op = URIRef(\"http://doi.acm.org/10.1145/28659.28689#sg\")\n self_adornment = [\"b\", \"f\"]\n\n def self_toRDFTuple():\n if self_isMagic:\n return (self_arg[0], self_op, self_arg[-1])\n else:\n subject, _object = self_arg\n return (subject, self_op, _object)\n\n def doreduce():\n return \"\".join(\n [\n term\n for term in (\n self_toRDFTuple() if len(self_arg) == 2 else [self_op] + self_arg\n )\n if not isinstance(term, Variable)\n ]\n )\n\n reduced = doreduce()\n\n assert (\n reduced\n == \"http://doi.acm.org/10.1145/28659.28689#johnhttp://doi.acm.org/10.1145/28659.28689#sg\"\n )\n\n herbrand_hashx = hash(reduced) ^ hash(\"\".join([i for i in self_adornment]))\n\n self_arg = [URIRef(\"http://doi.acm.org/10.1145/28659.28689#john\"), Variable(\"Y\")]\n\n reduced = doreduce()\n\n assert (\n reduced\n == \"http://doi.acm.org/10.1145/28659.28689#johnhttp://doi.acm.org/10.1145/28659.28689#sg\"\n )\n\n herbrand_hashy = hash(reduced) ^ hash(reduce(lambda x, y: x + y, self_adornment))\n\n assert herbrand_hashx == herbrand_hashy\n\n\ndef test_adorn_uniterm():\n EX = Namespace(\"http://doi.acm.org/10.1145/6012.15399#\")\n\n query = RenderSPARQLAlgebra(parse(NON_LINEAR_MS_QUERY))\n\n logger.info(f\"query.patterns {query.patterns}\")\n\n literal = query.patterns[0][:3]\n\n assert literal == (\n rdflib.term.URIRef(\"http://doi.acm.org/10.1145/28659.28689#john\"),\n rdflib.term.URIRef(\"http://doi.acm.org/10.1145/28659.28689#sg\"),\n rdflib.term.Variable(\"X\"),\n )\n\n aLit = AdornLiteral(literal, query.prolog.prefixBindings)\n\n assert str(aLit) == \"ex:sg_bf(ex:john ?X)\"\n\n assert aLit.adornment == [\"b\", \"f\"]\n\n res = aLit.getBindings(Uniterm(EX.sg, [Variable(\"X\"), EX.jill]))\n\n assert res == {\n rdflib.term.Variable(\"X\"): rdflib.term.URIRef(\n \"http://doi.acm.org/10.1145/28659.28689#john\"\n )\n }\n\n\n# class AdornedUniTerm(Uniterm):\n# def __init__(self, uterm, adornment=None, naf=False):\n\n\n'''\n\nadornment = [\n \"f\" if isinstance(v, Variable) else \"b\"\n for v in GetArgs(self.goal, secondOrder=True)\n]\nadornment = reduce(lambda x, y: x + y, adornment)\nadornedQuery = AdornedUniTerm(self.goal, adornment)\n\n\n\n\nclass AdornedUniTerm(Uniterm):\n def __init__(self, uterm, adornment=None, naf=False):\n self.marked = False\n self.adornment = adornment\n # @@DEVNOTE uncommented in 1.3/4\n # self.nsMgr = GetUterm(uterm).nsMgr\n newArgs = copy.deepcopy(GetUterm(uterm).arg)\n # @@TODO FIXME: verify\n super(AdornedUniTerm, self).__init__(\n GetUterm(uterm).op, newArgs, newNss=GetUterm(uterm).nsMgr, naf=naf\n )\n self.isMagic = False\n # @@DEVNOTE TODO resolve\n\n self.herbrand_hash = hash(\n reduce(\n lambda x, y: str(x) + str(y),\n [i for i in self.toRDFTuple() if len(self.arg) == 2\n else [self.op] + self.arg if not isinstance(i, Variable)], None)\n ) ^ hash(reduce(lambda x, y: x + y, self.adornment)\n )\n\n self.herbrand_hash = hash(\n reduce(\n lambda x, y: str(x) + str(y),\n filter(\n lambda i: not isinstance(i, Variable),\n self.toRDFTuple() if len(self.arg) == 2 else [self.op] + self.arg,\n ),\n None,\n )\n ) ^ hash(reduce(lambda x, y: x + y, self.adornment))\n\n def clone(self):\n return AdornedUniTerm(self, self.adornment, self.naf)\n\n def convert2NormalUterm(self):\n return buildUniTerm(self.toRDFTuple())\n\n def makeMagicPred(self):\n \"\"\"\n Make a (cloned) magic predicate\n\n The arity of the new predicate is the number of occurrences of b in the\n adornment a, and its arguments correspond to the bound arguments of p a\n \"\"\"\n newAdornedPred = AdornedUniTerm(self, self.adornment, self.naf)\n if self.op == RDF.type:\n newAdornedPred.arg[-1] = URIRef(self.arg[-1] + \"_magic\")\n elif len([i for i in self.adornment if i == \"b\"]) == 1:\n # adorned predicate occurrence with one out of two arguments bound\n # converted into a magic predicate: It becomes a unary predicate\n # (an rdf:type assertion)\n newAdornedPred.arg[-1] = URIRef(self.op + \"_magic\")\n newAdornedPred.arg[0] = [\n self.arg[idx] for idx, i in enumerate(self.adornment) if i == \"b\"\n ][0]\n newAdornedPred.op = RDF.type\n else:\n newAdornedPred.op = URIRef(self.op + \"_magic\")\n newAdornedPred.isMagic = True\n return newAdornedPred\n\n def __hash__(self):\n return self._hash ^ hash(reduce(lambda x, y: x + y, self.adornment))\n\n # def __eq__(self,other):\n # return self.adornment == other.adornment and\\\n # self.op == other.op and\\\n # self.arg == other.arg\n\n def hasBindings(self, varsOnly=False):\n for idx, term in enumerate(GetArgs(self)):\n if self.adornment[idx] == \"b\":\n if not varsOnly or isinstance(term, Variable):\n return True\n return False\n\n def getDistinguishedVariables(self, varsOnly=False, bound=True):\n adornment2Compare = \"b\" if bound else \"f\"\n if self.op == RDF.type:\n for idx, term in enumerate(GetArgs(self)):\n if self.adornment[idx] == adornment2Compare:\n if not varsOnly or isinstance(term, Variable):\n yield term\n else:\n for idx, term in enumerate(self.arg):\n try:\n if self.adornment[idx] == adornment2Compare:\n if not varsOnly or isinstance(term, Variable):\n yield term\n except IndexError:\n pass\n\n def getBindings(self, uniterm):\n rt = {}\n for idx, term in enumerate(self.arg):\n goalArg = self.arg[idx]\n candidateArg = uniterm.arg[idx]\n if self.adornment[idx] == \"b\" and isinstance(candidateArg, Variable):\n # binding\n rt[candidateArg] = goalArg\n return rt\n\n def toRDFTuple(self):\n if hasattr(self, \"isMagic\") and self.isMagic:\n return (self.arg[0], self.op, self.arg[-1])\n else:\n subject, _object = self.arg\n return (subject, self.op, _object)\n\n def __repr__(self):\n pred = self.normalizeTerm(self.op)\n negPrefix = self.naf and \"not \" or \"\"\n if self.op == RDF.type:\n adornSuffix = \"_\" + self.adornment[0]\n else:\n adornSuffix = \"_\" + \"\".join(self.adornment)\n if self.isMagic:\n if self.op == RDF.type:\n return \"%s%s(%s)\" % (\n negPrefix,\n self.normalizeTerm(self.arg[-1]),\n self.normalizeTerm(self.arg[0]),\n )\n else:\n return \"%s%s(%s)\" % (\n negPrefix,\n pred,\n \" \".join(\n [\n self.normalizeTerm(i)\n for idx, i in enumerate(self.arg)\n if self.adornment[idx] == \"b\"\n ]\n ),\n )\n elif self.op == RDF.type:\n return \"%s%s%s(%s)\" % (\n negPrefix,\n self.normalizeTerm(self.arg[-1]),\n adornSuffix,\n self.normalizeTerm(self.arg[0]),\n )\n else:\n return \"%s%s%s(%s)\" % (\n negPrefix,\n pred,\n adornSuffix,\n \" \".join([self.normalizeTerm(i) for i in self.arg]),\n )\n'''\n","sub_path":"test/test_rete/test_adorn_uniterm.py","file_name":"test_adorn_uniterm.py","file_ext":"py","file_size_in_byte":9579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602435481","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[13]:\n\n\ndef reverse(S):\n new_list = []\n for i in range(len(S)-1, -1, -1):\n new_list.append(S[i])\n s = \"\".join(new_list)\n return s\nreverse(\"junyiacademy\")\n\n\n# In[12]:\n\n\ndef reverse_sentense(S):\n new_list = []\n flag = 0\n for i in range(len(S)):\n if S[i] == \" \":\n s = reverse(S[flag:i+1])\n new_list.append(s)\n flag = i+1\n elif i == len(S)-1:\n s = reverse(S[flag:])\n new_list.append(\" \"+ s)\n else:\n continue\n a = \"\".join(new_list)\n return a\nreverse_sentense('flipped class room is important')\n\n\n# In[15]:\n\n\ndef count_num(num):\n list = []\n for i in range(1, num+1):\n if i % 15 == 0:\n list.append(i)\n else:\n if i % 3 == 0 or i % 5 == 0:\n continue\n else:\n list.append(i)\n return len(list)\ncount_num(15)\n\n\n\n","sub_path":"均一題目1、2.py","file_name":"均一題目1、2.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"106969764","text":"# coding=utf-8\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\nimport config.global_configs as global_configs\n\n\nclass BismarkAligner(object):\n def __init__(self, first_mate, second_mate, output_dir, temp_dir, log_file = None):\n self.first_mate = first_mate\n self.second_mate = second_mate\n self.output_dir = output_dir\n self.temp_dir = temp_dir\n self.log_file = log_file if log_file else os.path.join(self.output_dir, \"log.txt\")\n\n def generate_command(self):\n cfg = global_configs.project_config\n command = cfg.bismark_path\n command += \" --bowtie2 \" if cfg.use_bowtie_2 else \" \"\n command += \" --path_to_bowtie \" + cfg.bowtie_path\n command += \" -n 1 \"\n command += \" -p 4 \"\n command += cfg.converted_genome_path\n command += \" -1 \" + self.first_mate + \" -2 \" + self.second_mate\n command += \" -o \" + self.output_dir\n command += \" --temp_dir \" + self.temp_dir\n command += \" --bam \"\n command += \" --samtools_path \" + cfg.samtools_path\n command += \" > \" + self.log_file\n command += \" 2>/dev/null \"\n return command\n","sub_path":"tools/bismark/bismark_aligner.py","file_name":"bismark_aligner.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"329058879","text":"#!/usr/bin/env python3\n\"\"\"The app: very basic.\"\"\"\n\n# pylint: disable=cyclic-import, no-self-use, no-member, invalid-name, no-init\n# pylint: disable=too-few-public-methods, no-name-in-module, import-error\n\nimport re\n\nfrom functools import lru_cache, wraps\n\nimport arrow\n\nfrom flask import request\nfrom flask import current_app as app\nfrom flask.ext.restful import Resource\nfrom flask.ext.restful import reqparse\n\nfrom sqlalchemy.orm.relationships import RelationshipProperty\n\nfrom ersa_storage_hcp import db, Allocation, Tenant\nfrom ersa_storage_hcp import Namespace, Snapshot, Usage\n\nUSAGE_QUERY_PARSER = reqparse.RequestParser()\nUSAGE_QUERY_PARSER.add_argument(\"filter\", action=\"append\", help=\"Filter\")\nUSAGE_QUERY_PARSER.add_argument(\"page\", type=int, default=1, help=\"Page #\")\n\nALPHA_PREFIX = re.compile(\"^[A-Za-z]+\")\n\nITEMS_PER_PAGE = 1000\n\n\ndef get_or_create(session, model, **kwargs):\n \"\"\"Fetch object if returned by filter query, else create new.\"\"\"\n item = session.query(model).filter_by(**kwargs).first()\n if not item:\n item = model(**kwargs)\n session.add(item)\n session.flush()\n return item\n\n\ndef extract_allocation(name):\n \"\"\"Check for an allocation suffix.\"\"\"\n if not name.islower():\n return None\n if \"-\" not in name:\n return None\n name = name.split(\"-\")\n try:\n return int(ALPHA_PREFIX.sub(\"\", name[-1]))\n except ValueError:\n return None\n\n\ndef dynamic_query(model, query, expression):\n \"\"\"\n Construct query based on:\n attribute.operation.expression\n For example:\n foo.eq.42\n \"\"\"\n key, op, value = expression.split(\".\", 3)\n column = getattr(model, key, None)\n if isinstance(column.property, RelationshipProperty):\n column = getattr(model, key + \"_id\", None)\n if op == \"in\":\n query_filter = column.in_(value.split(\",\"))\n else:\n attr = None\n for candidate in [\"%s\", \"%s_\", \"__%s__\"]:\n if hasattr(column, candidate % op):\n attr = candidate % op\n break\n if value == \"null\":\n value = None\n query_filter = getattr(column, attr)(value)\n return query.filter(query_filter)\n\n\ndef require_auth(func):\n \"\"\"Very simple authentication via HTTP header.\"\"\"\n\n @wraps(func)\n def decorated(*args, **kwargs):\n \"\"\"Check the header.\"\"\"\n token = request.headers.get(\"x-ersa-storage-hcp-token\", \"\")\n if token == app.config[\"ERSA_STORAGE_HCP_TOKEN\"]:\n return func(*args, **kwargs)\n else:\n return \"\", 401\n\n return decorated\n\n\nclass PingResource(Resource):\n \"\"\"Basic liveness test.\"\"\"\n\n def get(self):\n \"\"\"Hello?\"\"\"\n return \"pong\"\n\n\nclass StorageResource(Resource):\n \"\"\"HCP Tenants and Namespaces\"\"\"\n\n @require_auth\n def get(self):\n \"\"\"HCP Tenants and Namespaces\"\"\"\n return [tenant.json() for tenant in Tenant.query.all()]\n\n\nclass SnapshotResource(Resource):\n \"\"\"Snapshot\"\"\"\n\n @require_auth\n def get(self):\n \"\"\"Snapshot\"\"\"\n return [snapshot.json() for snapshot in Snapshot.query.all()]\n\n\nclass AllocationResource(Resource):\n \"\"\"Allocation\"\"\"\n\n @require_auth\n def get(self):\n \"\"\"Allocation\"\"\"\n return [allocation.json() for allocation in Allocation.query.all()]\n\n\nclass UsageResource(Resource):\n \"\"\"Usage\"\"\"\n\n @require_auth\n def get(self):\n \"\"\"Fetch usage.\"\"\"\n args = USAGE_QUERY_PARSER.parse_args()\n query = Usage.query\n if args[\"filter\"]:\n for query_filter in args[\"filter\"]:\n query = dynamic_query(Usage, query, query_filter)\n return [usage.json()\n for usage in query.paginate(args[\"page\"],\n per_page=ITEMS_PER_PAGE).items]\n\n @require_auth\n def post(self):\n \"\"\"Fetch usage.\"\"\"\n return self.get()\n\n @require_auth\n def put(self):\n \"\"\"Ingest usage.\"\"\"\n\n timestamps = set()\n\n @lru_cache(maxsize=10000)\n def cache(model, **kwargs):\n \"\"\"Intra-request caching\"\"\"\n return get_or_create(db.session, model, **kwargs)\n\n inserts = []\n\n for message in request.json:\n data = message[\"data\"]\n\n timestamp = data[\"timestamp\"]\n if timestamp in timestamps:\n continue\n else:\n timestamps.add(timestamp)\n\n print(timestamp)\n\n snapshot = cache(Snapshot, ts=timestamp)\n\n for tenant_name, namespaces in data.items():\n if not isinstance(namespaces, list):\n continue\n\n allocation = None\n allocation_id = extract_allocation(tenant_name)\n if allocation_id:\n allocation = cache(Allocation, allocation=allocation_id)\n\n tenant = cache(Tenant, name=tenant_name, allocation=allocation)\n\n for details in namespaces:\n if \"namespaceName\" in details:\n namespace_name = details[\"namespaceName\"]\n else:\n namespace_name = \"__total__\"\n\n allocation = None\n allocation_id = extract_allocation(namespace_name)\n if allocation_id:\n allocation = cache(Allocation,\n allocation=allocation_id)\n\n namespace = cache(Namespace,\n name=namespace_name,\n tenant=tenant,\n allocation=allocation)\n\n start_time = arrow.get(details[\"startTime\"]).timestamp\n end_time = arrow.get(details[\"endTime\"]).timestamp\n\n usage = {\n \"snapshot\": snapshot,\n \"namespace\": namespace,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"ingested_bytes\": details[\"ingestedVolume\"],\n \"raw_bytes\": details[\"storageCapacityUsed\"],\n \"reads\": details[\"reads\"],\n \"writes\": details[\"writes\"],\n \"deletes\": details[\"deletes\"],\n \"objects\": details[\"objectCount\"],\n \"bytes_in\": details[\"bytesIn\"],\n \"bytes_out\": details[\"bytesOut\"],\n \"metadata_only_objects\":\n details[\"metadataOnlyObjects\"],\n \"metadata_only_bytes\": details[\"metadataOnlyBytes\"],\n \"tiered_objects\": details[\"tieredObjects\"],\n \"tiered_bytes\": details[\"tieredBytes\"]\n }\n\n inserts.append(Usage(**usage))\n\n # snapshot, tenant, namespace, allocation\n db.session.commit()\n\n # usage\n db.session.bulk_save_objects(inserts)\n db.session.commit()\n\n return \"\", 204\n","sub_path":"ersa_storage_hcp/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367813603","text":"import pygame, math, time, random\nfrom pygame.locals import *\n\npygame.init()\nwidth, height = 640, 480\nscreen = pygame.display.set_mode((width, height))\nkeys = [False, False, False, False]\nplayerpos = [300, 240]\npi = math.pi\nacc = [0, 0]\narrows = []\nbadtimer = 100\nbadtimer1 = 0\nbadguys = [[-35, 100]]\nhealthvalue = 194\ndead = False\ngamestatus = 0\naccuracy = 0\n\nplayer = pygame.image.load('resources/PNG/Soldier1/soldier1_gun.png')\ngrass = pygame.image.load('resources/PNG/Tiles/tile_01.png')\ncastle1 = pygame.image.load('resources/PNG/towerDefense_tile205.png')\ncastle = pygame.transform.rotate(castle1, -90)\narrow1 = pygame.image.load('resources/PNG/Tiles/tile_533.png')\narrow = pygame.transform.scale(arrow1, (32, 32))\nbadguyimg1 = pygame.image.load('resources/PNG/Robot1/robot1_hold.png')\nbadguyimg = badguyimg1\nhealthbar = pygame.image.load(\"resources/images/healthbar.png\")\nhealth = pygame.image.load(\"resources/images/health.png\")\ngameover = pygame.image.load(\"resources/images/gameover.png\")\nyouwin = pygame.image.load(\"resources/images/youwin.png\")\n\nclass EndingScreen():\n screen.fill((255, 0, 0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit(0)\n if event.type == pygame.KEYDOWN:\n if event.key == K_q:\n pygame.quit()\n exit(0)\n #screen.fill(0)\n #pygame.font.init()\n #font = pygame.font.Font(None, 24)\n #text = font.render(\"Accuracy: \" + str(accuracy) + \"%\", True, (255, 0, 0))\n #textRect = text.get_rect()\n #textRect.centerx = screen.get_rect().centerx\n #textRect.centery = screen.get_rect().centery + 24\n #screen.blit(gameover, (0, 0))\n #screen.blit(text, textRect)\n\nclass GamePlay():\n while 1:\n #exitcode = 1\n badtimer -= 1\n\n screen.fill(0)\n for x in range(width//grass.get_width() + 1):\n for y in range(height//grass.get_height() + 1):\n screen.blit(grass,(x * 60, y * 60))\n screen.blit(pygame.transform.flip(castle, True, False), (531, 30))\n screen.blit(pygame.transform.flip(castle, True, False), (531, 135))\n screen.blit(pygame.transform.flip(castle, True, False), (531, 240))\n screen.blit(pygame.transform.flip(castle, True, False), (531, 345))\n\n position = pygame.mouse.get_pos()\n angle = math.atan2(position[1] - (playerpos[1] + 32), position[0] - (playerpos[0] + 26))\n playerrot = pygame.transform.rotate(player, 360-angle * (360 / (2 * pi)))\n playerpos1 = (playerpos[0] - playerrot.get_rect().width / 2, playerpos[1] - playerrot.get_rect().height / 2)\n screen.blit(playerrot, playerpos1)\n\n for bullet in arrows:\n index = 0\n velx = math.cos(bullet[0]) * 10\n vely = math.sin(bullet[0]) * 10\n bullet[1] += velx\n bullet[2] += vely\n if bullet[1] < -64 or bullet[1] > 640 or bullet[2] < -64 or bullet[2] > 480:\n arrows.pop(index)\n index += 1\n for projectile in arrows:\n arrow1 = pygame.transform.rotate(arrow, 360 - projectile[0] * 57.29)\n screen.blit(arrow1, (projectile[1], projectile[2]))\n\n if badtimer == 0:\n badguys.append([-35, random.randint(50, 430)])\n badtimer = 100 - (badtimer1 * 2)\n if badtimer1 >= 35:\n badtimer1 = 35\n else:\n badtimer1 += 5\n index = 0\n for badguy in badguys:\n if badguy[0] > 640:\n badguys.pop(index)\n badguy[0] += 7\n badrect = pygame.Rect(badguyimg.get_rect())\n badrect.top = badguy[1]\n badrect.right = badguy[0]\n if badrect.right > 531:\n healthvalue -= random.randint(5, 20)\n print(healthvalue)\n badguys.pop(index)\n index1 = 0\n for bullet in arrows:\n bullrect = pygame.Rect(arrow.get_rect())\n bullrect.left = bullet[1]\n bullrect.top = bullet[2]\n if badrect.colliderect(bullrect):\n acc[0] += 1\n badguys.pop(index)\n arrows.pop(index1)\n index1 += 1\n index += 1\n for badguy in badguys:\n screen.blit(badguyimg, badguy)\n\n #font = pygame.font.Font(None, 24)\n #survivedtext = font.render(str((90000 - pygame.time.get_ticks())/60000) + ':' + str((90000 - pygame.time.get_ticks())/1000%60).zfill(2), True, (0, 0, 0))\n #textRect = survivedtext.get_rect()\n #textRect.topright = [635, 5]\n #screen.blit(survivedtext, textRect)\n\n screen.blit(healthbar, (5, 5))\n for health1 in range(healthvalue):\n screen.blit(health, (health1 + 8, 8))\n\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit(0)\n\n if event.type == pygame.KEYDOWN:\n if event.key == K_q:\n pygame.quit()\n exit(0)\n\n if event.key == K_w:\n keys[0] = True\n if event.key == K_a:\n keys[1] = True\n if event.key == K_s:\n keys[2] = True\n if event.key == K_d:\n keys[3] = True\n\n if event.key == K_SPACE:\n position = pygame.mouse.get_pos()\n acc[1] += 1\n arrows.append([math.atan2(position[1] - (playerpos1[1] + 32), position[0] - (playerpos1[0] + 26)), playerpos1[0] + 32, playerpos1[1] + 32])\n\n if event.type == pygame.KEYUP:\n if event.key == K_w:\n keys[0] = False\n if event.key == K_a:\n keys[1] = False\n if event.key == K_s:\n keys[2] = False\n if event.key == K_d:\n keys[3] = False\n\n if keys[0]:\n playerpos[1] -= 5\n elif keys[2]:\n playerpos[1] += 5\n if keys[1]:\n playerpos[0] -= 5\n elif keys[3]:\n playerpos[0] += 5\n\n if healthvalue < 0:\n #exitcode = 0\n dead = True\n print(\"hello ladies\")\n gamestatus = 1\n EndingScreen()\n break\n if acc[1] != 0:\n accuracy = acc[0] * 1.0 / acc[1] * 100\n else:\n accuracy = 0\n\n #if dead == True:\n # gamestatus = 1\n\nwhile True:\n if gamestatus == 1:\n EndingScreen()\n print(\"we made it\")\n elif gamestatus == 0:\n GamePlay()\n else:\n print(\"Somebody decided to fuck with the gamestatus, therefore also fucking your game experience. We apologize.\")\n #elif gamestatus == 0:\n #GamePlay()\n\n #while 1:\n # for event in pygame.event.get():\n # if event.type == pygame.QUIT:\n # pygame.quit()\n # exit(0)\n # pygame.display.flip()\n","sub_path":"alltests/testingarea2.py","file_name":"testingarea2.py","file_ext":"py","file_size_in_byte":7147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496657258","text":"#-*- coding:utf_8 -*-\n'''\nCreated on 2010-8-2\n\n@author: aleac\n'''\nimport sendmail\nimport amail\nimport wx\nimport email\nimport email.Message\nimport smtplib\nimport time\nimport email.MIMEText\nclass sendDlg(wx.Dialog):\n def __init__(self,parent,mail):\n self.mail=mail\n wx.Dialog.__init__(self,parent,-1,\"Send email\",size=(500,500))\n self.filename=\"\"\n self.label1=wx.StaticText(self,-1,\"To:\",pos=(20,50))\n self.label2=wx.StaticText(self,-1,\"Title:\",pos=(20,80))\n self.label3=wx.StaticText(self,-1,\"Add File\",pos=(20,110))\n self.label4=wx.StaticText(self,-1,\"Object:\",pos=(20,150))\n \n self.sendBt=wx.Button(self,-1,\"Send\",pos=(410,50),size=(50,20))\n self.addBt=wx.Button(self,-1,\"AddFile\",pos=(410,110),size=(50,20))\n self.toText=wx.TextCtrl(self,-1,pos=(100,50),size=(300,20))\n self.subjectText=wx.TextCtrl(self,-1,pos=(100,80),size=(300,20))\n self.addFileText=wx.TextCtrl(self,-1,pos=(100,110),size=(300,20))\n self.objectText=wx.TextCtrl(self,-1,pos=(100,150),size=(300,300),style=wx.TE_MULTILINE)\n self.sendBt.Bind(wx.EVT_LEFT_DOWN, self.sendMail, self.sendBt)\n self.addBt.Bind(wx.EVT_LEFT_DOWN, self.addFile, self.addBt)\n def sendMail(self,event):\n mailer = sendmail.Mailer(self.mail.stmpaddr,self.mail.usr,self.mail.pass_) \n mailer.mailfrom(self.mail.address)\n \n mailer.mailto(self.toText.GetValue().encode(\"UTF-8\") ) \n mailer.mailsubject(self.subjectText.GetValue().encode(\"UTF-8\") ) \n mailer.html_body(self.objectText.GetValue().encode(\"UTF-8\") ) \n if self.filename!=\"\":\n mailer.addattach(self.filename)\n \n if mailer.send():\n wx.MessageBox(\"send success\") \n else:\n wx.MessageBox(\"send failed\") \n \n def addFile(self,event):\n dlg=wx.FileDialog(self,\"Open a file\",\"\",style=wx.OPEN)\n if dlg.ShowModal()==wx.ID_OK:\n self.filename=dlg.GetPath()\n self.addFileText.SetValue(self.filename)\n dlg.Destroy()\n \nif __name__==\"__main__\":\n app=wx.PySimpleApp()\n mail=amail.MyMail()\n dlg=sendDlg(None,mail)\n dlg.ShowModal()\n app.MainLoop()","sub_path":"PyEmailClient/src/sendDlg.py","file_name":"sendDlg.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558834642","text":"from bottle import route, get, post, mako_view as view, url, request, response, redirect, abort\r\nfrom overlord import ServiceRegistry\r\nimport globals\r\n\r\nfrom onyx import account\r\nfrom onyx import game\r\n\r\nfrom onyx import logging, api\r\nfrom ml import log\r\n\r\nimport json \r\n\r\n@get('/start_game/:game', name='game.game')\r\n@logging.trap\r\ndef start_game(game):\r\n ses, user = account.authenticate(['can_play_games'])\r\n \r\n db = globals.db.connect()\r\n url = api.start_user_session(db, game, user.id)\r\n db.commit()\r\n redirect(url)\r\n \r\n #try:\r\n \r\n #conn = httplib.HTTPConnection(host=globals.config.ml.api.domain, port=globals.config.ml.api.port)\r\n #conn.request(\"POST\", \"/api/%s/start_user_session\" % globals.config.ml.key, json.dumps({'game_id':game, 'user_id':user.username, 'group_id':''}))\r\n \r\n #response = conn.getresponse()\r\n #\r\n #if response.status == 200:\r\n # data = response.read()\r\n # conn.close()\r\n # return dict(location=data)\r\n #else:\r\n # data = response.read()\r\n # abort(503, data)\r\n \r\n #except socket.error:\r\n # abort(500)\r\n\r\n@post('/game/get_sp_data/:token')\r\n@logging.trap\r\ndef a_get_sp_data(token):\r\n ses, user = account.authenticate(['can_play_games'])\r\n \r\n db = globals.db.connect()\r\n \r\n position = game.SlicePosition.get(db, token)\r\n destinations = game.PositionDestination.get(db, position.id)\r\n playable = game.Playable.get_by_id(db, position.playable_id)\r\n \r\n sud_urls = []\r\n for d in destinations:\r\n arg = dict(playable=dict(playable=d.playable, resizable=d.resizable,\r\n height=d.height, width=d.width),\r\n url=game.get_sud_url(d.pointer))\r\n sud_urls.append(arg)\r\n \r\n response.header['CACHE-CONTROL'] = 'NO-CACHE'\r\n \r\n # Don't log access - gets called every second by an AJAX call\r\n return json.dumps(dict(sud_urls=sud_urls, playable=dict(token=playable.token,\r\n resizable=playable.resizable, height=playable.height,\r\n width=playable.width), finished=position.finished,\r\n custom_transition_token=position.custom_transition_token,\r\n epilogue_token=position.epilogue_token))\r\n\r\n@post('/game/get_playable/:playable_token')\r\n@logging.trap\r\ndef a_get_playable(playable_token):\r\n ses, user = account.authenticate(['can_play_games'])\r\n \r\n db = globals.db.connect()\r\n \r\n playable = game.Playable.get(db, playable_token)\r\n \r\n if playable:\r\n log.access([request.urlparts.path, playable_token])\r\n return game.get_sud_url(playable.pointer)\r\n else:\r\n log.error([\"Couldn't generate sud file url. There was no playable.\", \"a_get_playable\"])\r\n abort(500)\r\n\r\n@get('/play', name=\"game.index\")\r\n@view('game/game')\r\n@logging.trap\r\ndef index():\r\n db = globals.db.connect()\r\n # ses, user = account.authenticate(['can_play_games'])\r\n \r\n # session = game.SlicePosition.get(db, token)\r\n # slice = game.Slice.get(db, session.slice)\r\n sandstone_version = game.get_sandstone_version(db)\r\n \r\n # if session.user_id != user.id:\r\n # abort(403)\r\n \r\n playable = game.Playable.get(db, 'tubman')\r\n \r\n # play_prologue = session.play_prologue\r\n # if play_prologue:\r\n # game.SlicePosition.set_prologue_to_played(db, session.id)\r\n\r\n sandstone_js = '%s:%s/sandstone.js' % (globals.config.ml.api_service_url, globals.config.ml.api_service_port )\r\n sud_url = game.get_sud_url('tubman-full')\r\n spd_url = \"%s:%s%s\" % (globals.config.ml.http, globals.config.ml.port, url('game.tubman_spd'))\r\n \r\n http = globals.config.ml.http\r\n # log.access([request.urlparts.path, token])\r\n #return dict(js_url=js_url, sud_url=sud_url, spd_url=spd_url, token=token, playable=session.playable)\r\n \r\n sandstone_url = game.get_sandstone_installer_url(sandstone_version)\r\n \r\n db.commit()\r\n return dict(sandstone_js=sandstone_js, sud_url=sud_url, spd_url=spd_url,\r\n playable=playable, http=http, sandstone_url=sandstone_url,\r\n min_sandstone_version=sandstone_version.min_version)\r\n \r\n \r\n\r\n@route('/spd', name=\"game.tubman_spd\")\r\n@view('game/spd')\r\n@logging.trap\r\ndef spd():\r\n db = globals.db.connect()\r\n\r\n response.content_type = \"application/x-sandstone-game\"\r\n response.headers['CACHE-CONTROL'] = 'NO-CACHE'\r\n \r\n return dict()","sub_path":"web/onyx/game/controllers/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548475943","text":"import numpy as np\nimport os\nimport pandas as pd\nimport time\n\n\ndef adjust_data_type(df, data_type=\"float\"):\n \"\"\"Adjust data type of input data frame to required data type.\n\n Parameters\n ----------\n df : pandas dataframe\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n \"\"\"\n\n if not np.all(df.dtypes.unique() == data_type):\n raise TypeError(f\"Input data adjusted to {data_type}.\")\n\n df_new = df.astype(data_type)\n return df_new\n\ndef createdir(path):\n \"\"\"Checks existance of a directory and creates it, if it does not exit.\n :param path: str, path to check and create\n :return: str, path created or not.\n \"\"\"\n if path[-1] != '/':\n path = path + '/'\n if not os.path.exists(path):\n new_path = path\n os.makedirs(new_path)\n else:\n new_path = path\n return new_path\n\ndef timer(func_to_time):\n \"\"\"This is a decorator for timing execution of any input function. To use it, the decorator must be written\n together with the definition of the function like the example below:\n\n @timer\n def my_function(*args,**kwargs):\n do something...\n return whatever\n\n :param func_to_time: function (object)\n :return: returns the same function, decorated with the timer function\n \"\"\"\n\n def decorator(*args, **kwargs):\n print(f\"Running {func_to_time.__name__}():\", end=\" \")\n t0 = time.time()\n results = func_to_time(*args, **kwargs)\n run_time_secs = time.time() - t0\n print(f\"\\t{get_time_in_nice_format(run_time_secs)}\")\n # display_time_in_nice_format(run_time_secs, message='{}()'.format(func_to_time.__name__), jumpline=False)\n return results\n\n return decorator\n\ndef get_time_in_nice_format(time_in_secs):\n if time_in_secs < 60:\n display_time = '{} sec'.format(round(time_in_secs, 2))\n elif (time_in_secs >= 60) and (time_in_secs < 3600):\n mins = int(time_in_secs / 60)\n secs = int(time_in_secs % 60)\n display_time = '{} min {} sec'.format(mins, secs)\n elif (time_in_secs >= 3600) and (time_in_secs < 24 * 3600):\n hours = int(time_in_secs / 3600)\n mins = int((time_in_secs % 3600) / 60)\n display_time = '{} hour {} min'.format(hours, mins)\n else:\n days = int(time_in_secs / (24 * 3600))\n hours = int((time_in_secs % (24 * 3600)) / 3660)\n mins = int((time_in_secs - 24 * 3600 * days - 3600 * hours) / 60)\n display_time = '{} day {} hour {} mins'.format(days, hours, mins)\n return display_time\n\ndef check_for_any_nans(x):\n \"\"\" Check for any nans in numpy array \"\"\"\n return np.isnan(x).any()","sub_path":"my_functions.py","file_name":"my_functions.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"573439285","text":"# Copyright (c) 2021 Food-X Technologies\n#\n# This file is part of foodx_devops_tools.\n#\n# You should have received a copy of the MIT License along with foodx_devops_tools.\n# If not, see .\n\nimport pytest\n\nfrom foodx_devops_tools.pipeline_config import (\n DeploymentsDefinitionError,\n load_deployments,\n)\n\n\n@pytest.fixture\ndef apply_deployments_test(apply_pipeline_config_test):\n def _apply(mock_content: str):\n result = apply_pipeline_config_test(mock_content, load_deployments)\n return result\n\n return _apply\n\n\ndef test_single(apply_deployments_test):\n file_text = \"\"\"\n---\ndeployments:\n name:\n locations:\n - loc1\n - loc2\n subscription: some-name\n\"\"\"\n\n result = apply_deployments_test(file_text)\n\n assert len(result.deployments) == 1\n assert \"name\" in result.deployments\n assert result.deployments[\"name\"].locations == [\"loc1\", \"loc2\"]\n assert result.deployments[\"name\"].subscription == \"some-name\"\n\n\ndef test_multiple(apply_deployments_test):\n file_text = \"\"\"\n---\ndeployments:\n name1:\n locations:\n - loc1\n - loc2\n subscription: some-name\n name2:\n locations:\n - loc1\n - loc3\n subscription: some-name\n name3:\n locations:\n - loc1\n - loc3\n - loc4\n subscription: other-name\n\"\"\"\n\n result = apply_deployments_test(file_text)\n\n assert len(result.deployments) == 3\n assert \"name1\" in result.deployments\n assert \"name2\" in result.deployments\n assert \"name3\" in result.deployments\n # Assume that name2 data is correct if name1, name3 are correct.\n assert result.deployments[\"name1\"].locations == [\"loc1\", \"loc2\"]\n assert result.deployments[\"name1\"].subscription == \"some-name\"\n assert result.deployments[\"name3\"].locations == [\"loc1\", \"loc3\", \"loc4\"]\n assert result.deployments[\"name3\"].subscription == \"other-name\"\n\n\ndef test_bad_field_raises(apply_deployments_test):\n file_text = \"\"\"\n---\ndeployments:\n name:\n bad_field:\n - loc1\n - loc2\n subscription: some-name\n\"\"\"\n\n with pytest.raises(\n DeploymentsDefinitionError,\n match=r\"Error validating deployments definition\",\n ):\n apply_deployments_test(file_text)\n\n\ndef test_none_raises(apply_deployments_test):\n file_text = \"\"\"\n---\n\"\"\"\n\n with pytest.raises(\n DeploymentsDefinitionError,\n match=r\"Error validating deployments definition\",\n ):\n apply_deployments_test(file_text)\n\n\ndef test_empty_list_raises(apply_deployments_test):\n file_text = \"\"\"\n---\ndeployments:\n\"\"\"\n\n with pytest.raises(\n DeploymentsDefinitionError,\n match=r\"Error validating deployments definition\",\n ):\n apply_deployments_test(file_text)\n","sub_path":"tests/ci/unit_tests/pipeline_config/test_deployments.py","file_name":"test_deployments.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"612774657","text":"from fourPointTransformUtility import four_point_tranform\nfrom skimage.filters import threshold_local\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport imutils\nimport argparse\n#construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required =True, help = \"Path to input image to be scanned\")\nargs = vars(ap.parse_args())\n\n\n### EDGE DETECTION ###\n\n#load the image and compute the ratio of the old height and the new height, clone it, and resize it\nimage = cv2.imread(args[\"image\"])\nratio = image.shape[0]/ 500.0\norig = image.copy()\nimage = imutils.resize(image, height =500)\n\n#convert the image to grayscale. blur it, and find edges\n\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ngray = cv2.GaussianBlur(gray, (5,5), 0)\nedged = cv2.Canny(gray , 75, 200)\n\n#show the original image and the edge detected image\ncv2.imshow(\"Image\", image)\ncv2.imshow(\"Edged\", edged)\n#cv2.waitKey(0)\n\n\n### FINDING CONTOURS ###\n\ncnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\ncnts = imutils.grab_contours(cnts)\ncnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]\n\n# loop over the contours\n\nfor c in cnts:\n\t#approx the contour\n\tperi = cv2.arcLength(c, True)\n\tapprox = cv2.approxPolyDP(c, 0.02*peri, True)\n\n\t# if our ppriximated contour has 4 points, then we can assume that we have found our screen\n\n\tif(len(approx) ==4):\n\t\tscreenCnt = approx\n\t\tbreak\n\n#show the contour(outline) of the piece of paper\ncv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)\ncv2.imshow(\"outline\", image)\n#cv2.waitKey()\n#cv2.destroyAllWindows()\n\n\n### APPLY A PERSPECTTIVE TRANSFORM AND THRESHOLD\n\n#apply the four point transform to obtain a top-down view of the original image\n\nwarped = four_point_tranform(orig, screenCnt.reshape(4,2)*ratio)\n\n#convert the warped image to grayscale, then threshold it to give that 'black and white' paper effect\nwarped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)\nT = threshold_local(warped, 11, offset = 10, method = \"gaussian\")\nwarped = (warped > T).astype(\"uint8\")*255\n\n#show the original and scanned images\ncv2.imshow(\"original\", imutils.resize(orig, height = 650))\ncv2.imshow(\"Scanned\", imutils.resize (warped, height = 650))\ncv2.waitKey(0)\n\n","sub_path":"scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"150956397","text":"import cv2\n\nprint(cv2.__version__)\n\n#CHARGEMENT DE L'IMAGE DU PROJET\nimg = cv2.imread(\"landscape.jpg\", 1)\n\n# PARAMETRE DE GESTION DE LA TAILLE DE L'IMAGE\nscale_percent = 50 # percent of original size\nwidth = int(img.shape[1] * scale_percent / 200)\nheight = int(img.shape[0] * scale_percent / 200)\ndim = (width, height)\nresized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n\n#OUVERTURE D'UNE FENETRE POUR PRESENTER L'IMAGE\ncv2.imshow(\"OpenImage\", resized)\n\n#FERMETURE DE L'APPLICATION AVEC LA TOCHE Q\nwhile True:\n if cv2.waitKey(0) & 0xFF == ord('q'):\n break\n\n#LIBERATION DE LA MEMOIRE UTILISANT L'IMAGE\nimg.release()\ncv2.destroyAllWindows()","sub_path":"PythonFile/2_ResizeImage.py","file_name":"2_ResizeImage.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"3379656","text":"from collections import defaultdict, Counter\nclass Solution(object):\n def removeDuplicateLetters(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n n = len(s)\n if n <= 1: return s\n counter = Counter(s)\n visited = defaultdict(lambda: False)\n result = \"\"\n for c in s:\n counter[c] -= 1\n if visited[c]: continue\n while len(result) > 0 and counter[result[-1]] > 0 and result[-1] > c:\n visited[result[-1]] = False\n result = result[:-1]\n visited[c] = True\n result += c\n return result\n\n # The basic idea is to find out the smallest result letter by letter (one letter at a time). Here is the thinking process for input \"cbacdcbc\":\n\n # find out the last appeared position for each letter;\n # c - 7\n # b - 6\n # a - 2\n # d - 4\n # find out the smallest index from the map in step 1 (a - 2);\n # the first letter in the final result must be the smallest letter from index 0 to index 2;\n # repeat step 2 to 3 to find out remaining letters.\n # the smallest letter from index 0 to index 2: a\n # the smallest letter from index 3 to index 4: c\n # the smallest letter from index 4 to index 4: d\n # the smallest letter from index 5 to index 6: b\n # so the result is \"acdb\"\n\n # Notes:\n\n # after one letter is determined in step 3, it need to be removed from the \"last appeared position map\", and the same letter should be ignored in the following steps\n # in step 3, the beginning index of the search range should be the index of previous determined letter plus one \n def removeDuplicateLetters(self, s):\n result = ''\n while s:\n i = min(map(s.rindex, set(s)))\n c = min(s[:i+1])\n result += c\n s = s[s.index(c):].replace(c, '')\n return result\n\nif __name__ == '__main__':\n from minitest import *\n\n with test(Solution):\n Solution().removeDuplicateLetters(\"bcabc\").must_equal(\"abc\")\n Solution().removeDuplicateLetters(\"bceabc\").must_equal(\"bcea\")\n Solution().removeDuplicateLetters(\"ebceabc\").must_equal(\"bcea\")\n Solution().removeDuplicateLetters(\"cbacdcbc\").must_equal(\"acdb\")\n Solution().removeDuplicateLetters(\"ccacbaba\").must_equal(\"acb\")\n Solution().removeDuplicateLetters(\"abacb\").must_equal(\"abc\")\n Solution().removeDuplicateLetters(\"abacbijhij\").must_equal(\"abchij\")","sub_path":"python/leetcode/greedy/316_Remove_Duplicate_Letters.py","file_name":"316_Remove_Duplicate_Letters.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"528104717","text":"##This script should help you solve Bottleneck, nothing else to tell, no spoilers\n##image_gallery.php?t=1578829354&f=Ym90dGxlbmVja19kb250YmUucG5n\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport base64\n\n\nfajl = input(\"File: \")\nencodirano = base64.b64encode(fajl.encode(\"utf-8\"))\nencStr = str(encodirano, \"utf-8\")\n\nurl = \"http://192.168.1.102/image_gallery.php\"\nizvor = requests.get(url).text\nsoup=BeautifulSoup(izvor, \"lxml\")\n\nfor link in soup.find_all(\"img\"):\n\tvrednosti = link.get(\"src\")\n\t#new_req = requests.get(\"http://192.168.1.102/\"+vrednosti[0:33]+encStr)\n\tvreme = \"/var/log/soc/intrusion_\"+vrednosti[20:30]\n\tenc1 =base64.b64encode(vreme.encode(\"utf-8\"))\n\ten1 = str(enc1,\"utf-8\")\n\tnew_req = requests.get(\"http://192.168.1.102/\"+vrednosti[0:33]+en1)\n\tprint(new_req.text)\n\n","sub_path":"Bottleneck/BottleneckHelper.py","file_name":"BottleneckHelper.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"112523768","text":"# encoding: utf-8\n\"\"\"\n@author: liuwz\n@time: 2021/4/1 10:39 上午\n@file: 429. N 叉树的层序遍历.py\n@desc: \n\"\"\"\nfrom collections import deque\n\n\"\"\"\n给定一个 N 叉树,返回其节点值的层序遍历。(即从左到右,逐层遍历)。\n树的序列化输入是用层序遍历,每组子节点都由 null 值分隔(参见示例)。\n\n示例 1:\n\n输入:root = [1,null,3,2,4,null,5,6]\n输出:[[1],[3,2,4],[5,6]]\n示例 2:\n\n\n\n输入:root = [1,null,2,3,4,5,null,null,6,7,null,8,null,9,10,null,null,11,null,12,null,13,null,null,14]\n输出:[[1],[2,3,4,5],[6,7,8,9,10],[11,12,13],[14]]\n \n\n提示:\n\n树的高度不会超过 1000\n树的节点总数在 [0, 10^4] 之间\n\"\"\"\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n if not root:\n return None\n ans = []\n queue = deque()\n queue.append([root])\n while queue:\n qsize = len(queue)\n current_layer_list = []\n while qsize:\n node = queue.popleft()\n current_layer_list.append(node.val)\n if node.children:\n for temp in node.children:\n queue.append(temp)\n qsize -= 1\n ans.append(current_layer_list)\n return ans\n","sub_path":"Queue_/429. N 叉树的层序遍历.py","file_name":"429. N 叉树的层序遍历.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"104366745","text":"import time\nimport RPi.GPIO as GPIO\n\n# How to call this part?\n# V = Vehicle()\n# t = template(p1)\n# V.add(t, input=['in1', 'in2'], output=['tdata1','tdata2'], threaded=True)\n# V.start()\n\n\n\nclass buzzer:\n def __init__(self, poll_delay=0.01):\n self.on = True\n self.poll_delay = poll_delay\n # initiate your data\n\n self.BUZZERPIN = 4\n\n self.SPEED = 1\n\n # List of tone-names with frequency\n self.TONES = {\"c6\": 1047,\n \"b5\": 988,\n \"a5\": 880,\n \"g5\": 784,\n \"f5\": 698,\n \"e5\": 659,\n \"eb5\": 622,\n \"d5\": 587,\n \"c5\": 523,\n \"b4\": 494,\n \"a4\": 440,\n \"ab4\": 415,\n \"g4\": 392,\n \"f4\": 349,\n \"e4\": 330,\n \"d4\": 294,\n \"c4\": 262}\n\n self.SONG = [\n [\"e5\", 16], [\"eb5\", 16],\n [\"e5\", 16], [\"eb5\", 16], [\"e5\", 16], [\"b4\", 16], [\"d5\", 16], [\"c5\", 16],\n [\"a4\", 8], [\"p\", 16], [\"c4\", 16], [\"e4\", 16], [\"a4\", 16],\n [\"b4\", 8], [\"p\", 16], [\"e4\", 16], [\"ab4\", 16], [\"b4\", 16],\n [\"c5\", 8], [\"p\", 16], [\"e4\", 16], [\"e5\", 16], [\"eb5\", 16],\n [\"e5\", 16], [\"eb5\", 16], [\"e5\", 16], [\"b4\", 16], [\"d5\", 16], [\"c5\", 16],\n [\"a4\", 8], [\"p\", 16], [\"c4\", 16], [\"e4\", 16], [\"a4\", 16],\n [\"b4\", 8], [\"p\", 16], [\"e4\", 16], [\"c5\", 16], [\"b4\", 16], [\"a4\", 4]\n]\n\n # Initiate your part here\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.BUZZERPIN, GPIO.OUT)\n\n def run(self):\n self.poll()\n # Call in the control loop\n # Works when threaded=False\n # Input is parameters, Return your output\n\n def shutdown(self):\n self.on = False\n time.sleep(0.2)\n GPIO.output(self.BUZZERPIN, GPIO.HIGH)\n GPIO.cleanup() # Release resource\n\n def update(self):\n while self.on:\n self.poll()\n time.sleep(self.poll_delay)\n # your thread\n # Works when threaded=True\n\n def run_threaded(self):\n pass\n\n def poll(self):\n p = GPIO.PWM(self.BUZZERPIN, 440)\n p.start(0.5)\n for t in self.SONG:\n duration = (1. / (t[1] * 0.25 * self.SPEED))\n if t[0] == \"p\":\n time.sleep(duration)\n else:\n frequency = self.TONES[t[0]]\n p.ChangeFrequency(frequency)\n p.start(0.5)\n time.sleep(duration)\n p.stop()\n\n\n# test\nif __name__ == \"__main__\":\n\n B = buzzer()\n\n while True:\n B.run()\n","sub_path":"mycar/custom_parts/buzzer.py","file_name":"buzzer.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"449867394","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split # 载入数据分割函数train_test_split\nimport matplotlib.pyplot as plt\nimport os\nisTrain=True\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\ndef show_result(image, points):\n plt.imshow(image, cmap='gray')\n for i in range(15):\n plt.plot(points[2*i], points[2*i + 1], 'ro')\n plt.show()\n\ndef export(pred_points, filename):\n submission_data = pd.DataFrame(pred_points)\n submission_data.to_csv(filename, index=False)\n\nTrain_Dir = 'D:/大数据资料/大三上课件/机器学习/期末大实验/program/base/data/train.csv'\nTest_Dir = 'D:/大数据资料/大三上课件/机器学习/期末大实验/program/base/data/test.csv'\ntrain_data = pd.read_csv(Train_Dir)\ntest_data = pd.read_csv(Test_Dir)\n\n# use the previous value to fill the missing value\ntrain_data.fillna(method='ffill', inplace=True)\n\n# preparing training data\nimga = []\nfor i in range(len(train_data)):\n img = train_data['Image'][i].split(' ')\n img = ['0' if x == '' else x for x in img]\n imga.append(img)\n\nimage_list = np.array(imga, dtype='float')\nX_train = image_list.reshape(-1, 96, 96, 1)\n\n# preparing training label\ntraining = train_data.drop('Image', axis=1)\ny_train = []\nfor i in range(len(train_data)):\n y = training.iloc[i, 1:]\n y_train.append(y)\ny_train = np.array(y_train, dtype='float')\n\n# preparing test data\ntimga = []\nfor i in range(len(test_data)):\n timg = test_data['Image'][i].split(' ')\n timg = ['0' if x == '' else x for x in timg]\n timga.append(timg)\ntimage_list = np.array(timga, dtype='float')\nX_test = timage_list.reshape(-1, 96, 96, 1)\nX_5=X_train.reshape(-1,96*96)\nX_6=X_test.reshape(-1,96*96)\n#分割测试集和验证集\nX_t,X_v,y_t,y_v=train_test_split(X_5,y_train)\n\n#模型的训练和保存\nimport tensorflow.compat.v1 as tf \ntf.disable_v2_behavior() \nimport matplotlib.pyplot as plt\n\n\nsess = tf.compat.v1.InteractiveSession()\n#构建一个卷积神经网络\n#所以placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型,它只会分配必\n#要的内存。等建立session,在会话中,运行模型的时候通过feed_dict()函数向占位符喂入数据。\nx = tf.placeholder(\"float\", shape=[None, 9216],name=\"x\")\ny_ = tf.placeholder(\"float\", shape=[None, 30],name=\"y_\")\n\ndef calculate_mse(predict, label):\n #mse_array = tf.reduce_mean((predict - label)**2, 0)\n mse_array = tf.reduce_mean((predict - label)**2)\n return tf.sqrt(mse_array)\n\n# 定义变量初始化\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape,stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1,shape=shape)\n return tf.Variable(initial)\n# 定义卷积和池化操作\ndef conv2d(x,W):\n return tf.nn.conv2d(x,W,strides=[1,1,1,1], padding='SAME')\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n# 第一层卷积\nW_conv1 = weight_variable([3,3,1,32])\nb_conv1 = bias_variable([32])\nx_images = tf.reshape(x,[-1,96,96,1])# -1 根据输入的实际情况判断,比如50张图片就是 50*784=>50,28,28,1\n\nh_conv1 = tf.nn.relu(conv2d(x_images,W_conv1) + b_conv1)\nh_pool1 = max_pool_2x2(h_conv1)\n\n# 第二层卷积\nW_conv2 = weight_variable([2,2,32,64])\nb_conv2 = bias_variable([64])\n\nh_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2) + b_conv2)\nh_pool2 = max_pool_2x2(h_conv2)\n#print(h_pool2.shape)\n#第三次卷积\nW_conv3 = weight_variable([2,2,64,128])\nb_conv3 = bias_variable([128])\n\nh_conv3 = tf.nn.relu(conv2d(h_pool2,W_conv3) + b_conv3)\nh_pool3 = max_pool_2x2(h_conv3)\n# 全连接层\nW_fc1 = weight_variable([128*12*12,500])\nb_fc1 = bias_variable([500])\n\nh_pool3_flat = tf.reshape(h_pool3,[-1,12*12*128])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat,W_fc1) + b_fc1)\n\n# dropout\nkeep_prob = tf.placeholder(\"float\",name=\"keep_prob\")\nh_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)\n# 全连接层(输出层)\nW_fc2 = weight_variable([500,30])\nb_fc2 = bias_variable([30])\n\n#保存模型要用name初始化\n#y_conv = tf.matmul(h_fc1_drop,W_fc2) + b_fc2\n#y_conv = tf.convert_to_tensor(tf.matmul(h_fc1_drop,W_fc2) + b_fc2,name=\"y_conv\")\ny_conv = tf.matmul(h_fc1_drop,W_fc2) + b_fc2\n\n# 训练和评估模型\ncross_entropy = calculate_mse(y_conv,y_)\n# 交叉熵代价函数\n# Adam的优点主要在于经过偏置校正后,每一次迭代学习率都有个确定范围,使得参数比较平稳。\n# 相比于基础SGD算法,1.不容易陷于局部优点。2.速度更快\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n#correct_prediction = tf.equal(tf.argmax(y_conv,1),tf.argmax(y_,1))\n#accuracy = tf.reduce_mean(tf.cast(correct_prediction,\"float\"))\nmse=calculate_mse(y_conv,y_)\n\n\n\ndef next_batch(train,target,batch_size):\n length=len(train)\n index=[i for i in range(length)]\n np.random.shuffle(index)\n cnt=length/batch_size+1\n while cnt>0:\n batch_x=[]\n batch_y=[]\n try:\n for i in range(batch_size):\n batch_x.append(train[index[i]])\n batch_y.append(target[index[i]])\n index.remove(index[i])\n except:\n index=[i for i in range(length)]\n continue\n \n yield (batch_x,batch_y)\na=next_batch(X_t,y_t,50)\n\"\"\"saver = tf.train.Saver(max_to_keep=4)\nckpt_file_path = \"D:/models12/mnist\"\npath = os.path.dirname(os.path.abspath(ckpt_file_path))\nif os.path.isdir(path) is False:\n os.makedirs(path)\"\"\"\nsess.run(tf.global_variables_initializer())\nfor i in range(10001):\n #batch[0] = X_t.next_batch(5)\n #batch[1] = y_t.next_batch(5)\n batch = next(a)\n if i%100 == 0:\n train_accuracy = mse.eval(feed_dict={x:batch[0],y_:batch[1],keep_prob:1.0})\n #train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})\n print(\"step %d,测试均方误差 %g\"%(i,train_accuracy))\n \"\"\"if i%1000==0:\n tf.train.Saver().save(sess,ckpt_file_path,write_meta_graph=True)\"\"\"\n #模型保存\n train_step.run(feed_dict={x:batch[0],y_:batch[1],keep_prob:0.5})\nprint(\"验证集的均方误差: %g\"%mse.eval(feed_dict={x:X_v,y_:y_v,keep_prob:1.0}))\n\ny_result = y_conv.eval(feed_dict={x:X_v,y_:y_v,keep_prob:1.0})\n\nshow_result(X_v[0].reshape(96,96), y_result[0])\nshow_result(X_v[0].reshape(96,96), y_v[0])\n\ny_test = y_conv.eval(feed_dict={x:X_6,keep_prob:1.0})\n#保存预测结果\nexport(y_test,'result_13.csv')\n\n","sub_path":"人脸关键点检测/base/模型训练.py","file_name":"模型训练.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"318229669","text":"# -*- coding: utf-8 -*-\n\"\"\"\nWritten by Daniel M. Aukes\nEmail: danaukesgmail.com\nPlease see LICENSE for full license.\n\"\"\"\n\nimport pynamics\nfrom pynamics.name_generator import NameGenerator\nfrom pynamics.force import Force\n\nclass PseudoParticle(NameGenerator):\n def __init__(self,pCM,mass,name = None,system = None,vCM = None,aCM=None):\n system = system or pynamics.get_system()\n\n name = name or self.generate_name()\n self.name = name\n\n self.pCM = pCM\n self.mass = mass\n self.system = system\n\n self.vCM= vCM or self.pCM.time_derivative(self.system.newtonian,self.system)\n self.aCM= aCM or self.vCM.time_derivative(self.system.newtonian,self.system)\n \n self.gravityvector = None\n self.forcegravity = None \n \n self.system.particles.append(self)\n\n self.effectiveforces = []\n\n def adddynamics(self):\n effectiveforce = self.mass*self.aCM\n self.KE = .5*self.mass*self.vCM.dot(self.vCM)\n\n self.effectiveforces = []\n self.effectiveforces.append(Force(effectiveforce,self.vCM))\n\n return self.effectiveforces\n \n def addforcegravity(self,gravityvector):\n pass\n\nclass Particle(PseudoParticle):\n def addforcegravity(self,gravityvector):\n self.gravityvector = gravityvector\n self.forcegravity = self.mass*gravityvector\n self.system.addforce(self.forcegravity,self.vCM)\n \nif __name__=='__main__':\n from pynamics.system import System\n from pynamics.frame import Frame\n sys = System()\n N = Frame(name = 'N')\n \n sys.set_newtonian(N)\n Particle(0*N.x,1)","sub_path":"python/pynamics/particle.py","file_name":"particle.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"12561551","text":"'''\nProgram to make:\n - dummy xyz map data\n - dummy xyz path (the leading diagonal of the map matrix)\n\nPrints output with 3 columns: x y z.\nData is uniformly spaced by 1 in XY and Z is a random number.\n\nTo run: python make_dummy.py\n'''\n\nimport numpy as np\n\nx_origin = 5002200\ny_origin = 2090949\n\n#Make 100x100 array of elevation data\nz = np.random.rand(100,100)*10 + 16\n\n#Save dummy map data to file: 'dummy_map.txt'\nwith open('dummy_map.txt', 'w') as f:\n for i in range(len(z)):\n for j in range(len(z[i])):\n f.write(\"{:.3f} {:.3f} {:.3f}\\n\".format(i+x_origin, j+y_origin, z[i,j]))\n\n#Save dummy path data to file: 'dummy_path.txt'\nwith open('dummy_path.txt', 'w') as f2:\n for i in range(len(z)):\n f2.write(\"{:.3f} {:.3f} {:.3f}\\n\".format(i+x_origin, i+y_origin, z[i,i]))\n","sub_path":"make_dummy.py","file_name":"make_dummy.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"385528137","text":"from collections import defaultdict\nfrom itertools import product\n \nH, W, K = map(int, input().split())\nS = [input() for _ in range(H)]\n \nC = [[int(s[i]) for s in S] for i in range(W)]#列を一括りにする\n \ntotal = sum(sum(c) for c in C)#すべての値の合計を算出\n \nif total <= K:\n answer = 0\nelse:\n answer = H * W\n for X in product([False, True], repeat=H-1):#Hは行数\n #あるXについて一回目のfor文が回る\n ans = sum(X)#Trueの数を換算\n if ans > answer:\n continue\n M = [[0]]\n for i, x in enumerate(X):\n if x:#Trueなら実行\n M.append([])#Trueの数の配列が作成される\n M[-1].append(i+1)#一番最後にその番目をappendする\n D = [0] * len(M)#初期値を設定\n for c in C:\n for k, m in enumerate(M):\n D[k] += sum(c[i] for i in m)#k番目の要素に足していく\n #Dにどんどん代入することによってどの列まで足すことができるか算出することができる\n if any(d > K for d in D):#一つでもKを超えていたら\n ans += 1\n if ans > answer:\n break\n D = [sum(c[i] for i in m) for m in M]#Dの更新(1番最初のDに戻る)\n if any(d > K for d in D):#一つでもKを超えていたら\n ans = answer + 1#ansの更新\n break\n answer = min(answer, ans)\n\nprint(answer)","sub_path":"Python_codes/p02733/s018874505.py","file_name":"s018874505.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552290196","text":"\"\"\"\n\nSo let us load the breast cancer dataset:\n\n1. Read dataset directly from the UCI website using pandas.\n2. We assign the 30 features to a NumPy array X. Using LabelEncoder\n transform the class labels from their original string representation\n into integers.\n3. Split the dataset into seperate training and test dataset.\n\n\"\"\"\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\n\ndef load_data(): \n\tdf = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data\",\n\t\t\t\t\theader=None)\n\n\tX = df.loc[:, 2:].values\n\ty = df.loc[:, 1].values\n\tle = LabelEncoder()\n\ty = le.fit_transform(y)\n\n\tX_train, X_test, y_train, y_test = \\\n\t\t\ttrain_test_split(X, y, test_size=0.2, random_state=1)\n\n\treturn X_train, X_test, y_train, y_test\n","sub_path":"ML/model_eval_tuning/load_breast_cancer_data.py","file_name":"load_breast_cancer_data.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387547336","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nFull Sieve Command Line Client\n\"\"\"\n\nfrom __future__ import absolute_import\nimport pickle as pickler\nfrom collections import OrderedDict\n\nfrom g6k.algorithms.workout import workout\nfrom g6k.siever import Siever\nfrom g6k.utils.cli import parse_args, run_all, pop_prefixed_params\nfrom g6k.utils.stats import SieveTreeTracer\nfrom g6k.utils.util import load_svpchallenge_and_randomize, db_stats\nfrom g6k.utils.util import sanitize_params_names, print_stats, output_profiles\nimport six\n\n\ndef full_sieve_kernel(arg0, params=None, seed=None):\n # Pool.map only supports a single parameter\n if params is None and seed is None:\n n, params, seed = arg0\n else:\n n = arg0\n\n pump_params = pop_prefixed_params(\"pump\", params)\n verbose = params.pop(\"verbose\")\n\n reserved_n = n\n params = params.new(reserved_n=reserved_n, otf_lift=False)\n\n challenge_seed = params.pop(\"challenge_seed\")\n A, _ = load_svpchallenge_and_randomize(n, s=challenge_seed, seed=seed)\n\n g6k = Siever(A, params, seed=seed)\n tracer = SieveTreeTracer(g6k, root_label=(\"full-sieve\", n), start_clocks=True)\n\n # Actually runs a workout with very large decrements, so that the basis is kind-of reduced\n # for the final full-sieve\n workout(\n g6k,\n tracer,\n 0,\n n,\n dim4free_min=0,\n dim4free_dec=15,\n pump_params=pump_params,\n verbose=verbose,\n )\n\n return tracer.exit()\n\n\ndef full_sieve():\n \"\"\"\n Run a a full sieve (with some partial sieve as precomputation).\n \"\"\"\n description = full_sieve.__doc__\n\n args, all_params = parse_args(description, challenge_seed=0)\n\n stats = run_all(\n full_sieve_kernel,\n list(all_params.values()),\n lower_bound=args.lower_bound,\n upper_bound=args.upper_bound,\n step_size=args.step_size,\n trials=args.trials,\n workers=args.workers,\n seed=args.seed,\n )\n\n inverse_all_params = OrderedDict([(v, k) for (k, v) in six.iteritems(all_params)])\n stats = sanitize_params_names(stats, inverse_all_params)\n\n fmt = \"{name:50s} :: n: {n:2d}, cputime {cputime:7.4f}s, walltime: {walltime:7.4f}s, |db|: 2^{avg_max:.2f}\"\n profiles = print_stats(\n fmt,\n stats,\n (\"cputime\", \"walltime\", \"avg_max\"),\n extractf={\"avg_max\": lambda n, params, stat: db_stats(stat)[0]},\n )\n\n output_profiles(args.profile, profiles)\n\n if args.pickle:\n pickler.dump(\n stats,\n open(\n \"full-sieve-%d-%d-%d-%d.sobj\"\n % (args.lower_bound, args.upper_bound, args.step_size, args.trials),\n \"wb\",\n ),\n )\n\n\nif __name__ == \"__main__\":\n full_sieve()\n","sub_path":"full_sieve.py","file_name":"full_sieve.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"379894339","text":"#!/usr/bin/python3\n\nimport re\nimport os\nimport ast\nimport pdb\nimport math\nimport time\nimport sqlite3\nimport unittest\nfrom sys import *\nfrom random import *\nfrom TestPerson import *\nfrom printName import *\n\nclass ThisPerson:\n def __init__(self, fname=\"\", lname=\"\", pword=\"\", gender='', id=0, salary=0):\n if id < 0:\n id = 0\n if salary < 0:\n salary = 0\n self.fname = fname\n self.lname = lname\n self.pword = pword\n self.gender = gender\n self.id = id\n self.salary = salary\n self.mytuple = (fname,lname,pword,gender,id,salary)\n self.myperson = [fname,lname,pword,gender,id,salary]\n self.mykvpair = {id: [fname,lname,pword,gender,id,salary]}\n\n def copyPerson(self, person):\n self.fname = person.fname\n self.lname = person.lname\n self.pword = person.pword\n self.gender = person.gender\n self.id = person.id\n self.salary = person.salary\n self.mytuple = (self.fname,self.lname,self.pword,self.gender,self.id,self.salary)\n self.myperson = [self.fname,self.lname,self.pword,self.gender,self.id,self.salary]\n self.mykvpair = {self.id: [self.fname,self.lname,self.pword,self.gender,self.id,self.salary]}\n\n def setThisPersonData(self,fn,ln,pw,gr,id,sal):\n self.fname = fn\n self.lname = ln\n self.pword = pw\n self.gender = gr\n self.id = id\n self.salary = sal\n self.mytuple = (fn,ln,pw,gr,id,sal)\n self.myperson = [fn,ln,pw,gr,id,sal]\n\n def getThisPersonData(self):\n print(\"{0}\".format(self.mytuple))\n # print(\"{0}\".format(self.myperson))\n # print(\"fname:\\t {0}\".format(self.fname))\n # print(\"lname:\\t {0}\".format(self.lname))\n # print(\"pword:\\t {0}\".format(self.pword))\n # print(\"gender:\\t {0}\".format(self.gender))\n # print(\"id:\\t {0}\".format(self.id))\n # print(\"salary:\\t {0}\\n\".format(self.salary))\n\n def createPerson(self,fn,ln,pw,gr,id,sal):\n p1 = ThisPerson(fn, ln, pw, gr, id, sal)\n print(\"{0}\".format(p1.mytuple)) ## DBPRINT\n return p1\n\n def writePersonToFile(self,person):\n localtime = time.asctime( time.localtime(time.time()) )\n file0 = open(\"personlist.txt\",\"a\")\n print(\"{0}\".format(localtime))\n file0.write(\"{0}\\n\".format(person.mytuple))\n file0.close()\n\n def convertPersonToString(self,person):\n # copyPerson(person)\n strPerson = \"(\\\"%s\\\", \\\"%s\\\", \\\"%s\\\", \\'%c\\', %i, %i)\" % (person.fname,person.lname,person.pword,\n person.gender,person.id,person.salary)\n# \"\"\"INSERT INTO ThisGroup VALUES (\"Yertle\", \"Turtle\", \"vodka\", 65506, 'F', 61);\"\"\"\n print(\"strPerson: {0}\".format(strPerson))\n return strPerson\n\n# Driver program\nif __name__ == \"__main__\":\n p0 = ThisPerson(\"Human\",\"George\",\"yar\",'M',2001,40)\n p1 = ThisPerson(\"H\",\"G\",\"y\",'F',10101,140)\n # p0.copyPerson(p1)\n p1.convertPersonToString(p1)\n p0.getThisPersonData()\n # p0.writePersonToFile(ThisPerson(\"H\",\"G\",\"y\",'A',2001,40))\n # p0.writePersonToFile(p0)\n p0.setThisPersonData(\"TestHuman\",\"Gorgon\",\"meepzor\",'U',2041,80)\n p0.getThisPersonData()# printPersonName(p0.fname)\n #\n # ptest2 = p0.createPerson(\"Hun\",\"Guizang\",\"wong\",'M',p0.id,p0.salary)\n # printgender(p0.gender)\n # convID2bytearr(p0.id)\n # convSal2bytearr(p0.salary)\n print(\"\\n\")\n # printPerson()\n#\n","sub_path":"TestPerson/my_tmp/ThisPerson.py","file_name":"ThisPerson.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"497375565","text":"# 练习:\n# 1. 任意输入一段字符串\n# 1) 计算出这段字符串中空格的个数,并打印这个数\n# 2) 计算出字符串的全部ascii字符的个数并打印\n# 注: ascii 字符的编码在0~127范围内\n# 思考:\n# 用while语句能实现上述功能?\n\ns = input(\"请输入一段文字: \")\n\nblanks_count = 0 # 用来记录空格的个数\n\n# for c in s:\n# if c == ' ': # if ord(c) == 32:\n# blanks_count += 1\n\ni = 0 # i 代表索引值\nwhile i < len(s):\n c = s[i]\n if c == ' ':\n blanks_count += 1\n i += 1\n\nprint(\"空格的个数是:\", blanks_count)\n\nascii_count = 0\nfor c in s:\n if ord(c) <= 127:\n ascii_count += 1\nprint(\"ascii字符的个数是:\", ascii_count)\n\n\n\n","sub_path":"NOTE/02_PythonBase/day05/exercise/calc_char_count2.py","file_name":"calc_char_count2.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"5827986","text":"from tkinter import ttk, Tk, messagebox\n\n\nclass Output:\n def __init__(self, analysis_results):\n \"\"\"Constructor\n\n Args:\n analysis_results: A list of output elements\n \"\"\"\n self.frame = None\n self.analysis_results = analysis_results\n\n if self.analysis_results is not None:\n self.initialize()\n\n def initialize(self):\n \"\"\"Create the output window and display output elements\n \"\"\"\n\n window = Tk()\n window.title(\"Analysis output\")\n\n frame = ttk.Frame(master=window)\n\n for idx, element in enumerate(self.analysis_results):\n\n render = self.confirm_render(element)\n\n if render is False:\n window.destroy()\n return\n\n element.create_output(frame, idx)\n\n self.frame = frame\n\n def destroy(self):\n self.frame.destroy()\n\n def pack(self):\n if self.frame is not None:\n self.frame.pack()\n\n def confirm_render(self,element):\n \"\"\"Confirm rendering of output element from user\n\n Args:\n element: Output element\n\n Returns:\n True/False based on response\n \"\"\"\n\n elements = element.number_of_elements_to_render()\n\n if elements > 100:\n\n msg = \"The analysis has {} elements to render.\".format(elements)\n msg = msg+\"Are you sure you wish to render it?\"\n\n return messagebox.askokcancel(title=\"Are you sure\",message=msg)\n\n return True\n","sub_path":"src/gui/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"106104364","text":"import subprocess\nimport json\nfrom hashlib import md5\n\nOPENEO_API_VERSION = \"0.0.2\"\n\nPROCESS_GRAPH = {\n \"process_graph\":{\n \"process_id\":\"min_time\",\n \"args\":{\n \"imagery\":{\n \"process_id\":\"NDVI\",\n \"args\":{\n \"imagery\":{\n \"process_id\":\"filter_daterange\",\n \"args\":{\n \"imagery\":{\n \"process_id\":\"filter_bbox\",\n \"args\":{\n \"imagery\":{\n \"product_id\":\"s2a_prd_msil1c\"\n },\n \"left\":652000,\n \"right\":672000,\n \"top\":5161000,\n \"bottom\":5181000,\n \"srs\":\"EPSG:32632\"\n }\n },\n \"from\":\"2017 -01 -01\",\n \"to\":\"2017 -01 -08\"\n }\n },\n \"red\":\"B04\",\n \"nir\":\"B08\"\n }\n }\n }\n }\n}\n\nSERVER_VERSION = 1\n\nJOB_ID = 1\n\nOS_ENV_CMD = \"dpkg -l\"\nHW_ENV_CMD = \"lspci -nnk\"\n\ndef create_context_model(job_id):\n\n CODE_ENV_CMD = 'now show --dir={}'.format(str(job_id))\n\n context_model = {}\n\n # Retrieving data\n\n process = subprocess.Popen(OS_ENV_CMD.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n\n os_hash = md5(output).hexdigest()\n\n process = subprocess.Popen(HW_ENV_CMD.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n\n hw_hash = md5(output).hexdigest()\n\n process = subprocess.Popen(CODE_ENV_CMD.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n\n output = str(output).split('\\\\n')\n\n trial = output[1].split(':')[1].strip()\n code_hash = output[5].split(':')[1].strip()\n\n # save to json\n\n context_model['backend_version'] = SERVER_VERSION\n context_model['openeo_api'] = OPENEO_API_VERSION\n context_model['process_graph'] = PROCESS_GRAPH\n context_model['job_id'] = JOB_ID\n context_model['os'] = os_hash\n context_model['hw'] = hw_hash\n context_model['code'] = {trial: code_hash}\n\n # context_model = json.loads(str(context_model))\n\n with open('context_model.json', 'w') as outfile:\n json.dump(context_model, outfile)\n\n print(context_model)\n return context_model\n","sub_path":"server_now/env_cap.py","file_name":"env_cap.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"512902206","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Console script for hookman.\"\"\"\nimport sys\n\nimport click\n\n\n@click.command()\n@click.argument('specs_path', type=click.Path(exists=True))\n@click.option('--dst-path', default='./', help='Path to where the files will be destined')\ndef main(specs_path, dst_path):\n \"\"\"\n This task will invoke a code generation to produce the following files:\n - hook_specs.h - `File to be used by the plugin implementation`\n - HookCaller.hpp - `File to be passed to the application`\n - HookCallerPython.cpp - `Bindings for the function available on the plugin implementation`\n\n In order to call this command is necessary to inform the hook_specs.py file that has the\n specifications of the hooks available, and the destination path, (where the files will be created).\n\n Per default dst-path is the same directory that the command is called.\n\n Example:\n > hookman //hook_specs.py --dst-path=/home/\n \"\"\"\n\n from pathlib import Path\n from hookman.hookman_generator import HookManGenerator\n\n hook_specs_path = Path(specs_path)\n hm_generator = HookManGenerator(hook_spec_file_path=hook_specs_path)\n hm_generator.generate_project_files(Path(dst_path))\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main()) # pragma: no cover\n","sub_path":"hookman/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"176498066","text":"import collections\nimport datetime\nimport io\n\nfrom weatherapp.core import constants\n\nfrom django.core.cache import cache\n\nimport requests\nimport pandas\n\n\ndef get_outside_temp_context():\n \"\"\" Gets and returns the context for the Outside Temperatures View\n \"\"\"\n highs = _get_highest_temps()\n times = []\n temps = []\n for value in highs.values():\n times.append(datetime.datetime.strptime(value['time'], '%H:%M'))\n\n for k, v in highs.items():\n temps.append({'date': k, 'temp': v['temp']})\n\n avg_time = _get_average_time(times)\n most_common = _get_most_common_time(times)\n top_temps = _get_top_temps(temps)\n\n return {\n 'time': avg_time,\n 'most_common_time': most_common,\n 'top_temps': top_temps,\n }\n\n\ndef get_hi_temp_context():\n \"\"\" Gets and returns the context for the Hi Temp View\n \"\"\"\n valid_entries = _get_hi_temp_range()\n return valid_entries\n\n\ndef get_forecast_context():\n \"\"\" Gets and returns the context for the Forecast View\n \"\"\"\n forecast = _calculate_forecast()\n return forecast\n\n\ndef _get_weather_data():\n \"\"\" Fetches the weather data CSV from the given URL\n \"\"\"\n try:\n response = requests.get(constants.WEATHER_CSV_URL)\n except requests.exceptions.RequestException as e:\n raise e\n\n data = response.content.decode('utf-8')\n\n df = pandas.read_csv(\n io.StringIO(data), usecols=constants.WEATHER_CSV_FIELDNAMES)\n\n cache.set('weather_df', df, 86400)\n\n return df\n\n\ndef _get_weather_df():\n \"\"\" Fetches the dataframe from the cache. If none are available,\n it fetches a new one.\n \"\"\"\n if cache.get('weather_df') is None:\n return _get_weather_data()\n\n return cache.get('weather_df')\n\n\ndef _create_date(date_str):\n \"\"\" Creates a datetime from the str given in the csv.\n \"\"\"\n return datetime.datetime.strptime(date_str, '%d/%m/%Y')\n\n\ndef _calculate_average(arr):\n total = sum(item for item in arr)\n avg = total / len(arr)\n return avg\n\n\ndef _get_highest_temps():\n \"\"\" Creates a dictionary of the highest temperatures\n \"\"\"\n df = _get_weather_df()\n unique_dates = df.Date.unique()\n highest_values = {}\n\n for date in unique_dates:\n # Initialise the highest value to be overwritten\n highest_values.update({date: {'time': 0, 'temp': 0}})\n rows = df.loc[df['Date'] == date]\n\n for index, row in rows.iterrows():\n if row['Outside Temperature'] > highest_values[row['Date']]['temp']:\n highest_values.update({\n date: {\n 'time': row['Time'],\n 'temp': row['Outside Temperature']\n }\n })\n\n return highest_values\n\n\ndef _get_average_time(times):\n \"\"\" Calculates and returns the average time in a list of times\n \"\"\"\n total = sum(dt.hour * 3600 + dt.minute * 60 + dt.second for dt in times)\n avg = total / len(times)\n return datetime.datetime.fromtimestamp(avg).strftime('%H:%M')\n\n\ndef _get_most_common_time(times):\n \"\"\" Gets and returns the most commonly occuring time in a list of times\n \"\"\"\n most_common = collections.Counter(times).most_common(1)[0]\n return most_common[0].strftime('%H:%M')\n\n\ndef _get_top_temps(temps):\n \"\"\" Sorts a list of dictionaries containing temperatures and dates.\n Returns the top 10 temperatures, sorted by date.\n \"\"\"\n top_temps = sorted(temps, key=lambda k: k['temp'], reverse=True)[:10]\n return sorted(top_temps, key=lambda k: _create_date(k['date']))\n\n\ndef _create_june_dates_list(unique_dates):\n june_dates = []\n\n for date in unique_dates:\n if _create_date(date).month == 6:\n june_dates.append(date)\n\n return sorted(june_dates)[:9]\n\n\ndef _is_valid_temps(hi_temp, low_temp):\n hi_temp_range_upper = constants.HI_TEMP + constants.HI_TEMP_RANGE\n hi_temp_range_lower = constants.HI_TEMP - constants.HI_TEMP_RANGE\n low_temp_range_upper = constants.LOW_TEMP + constants.LOW_TEMP_RANGE\n low_temp_range_lower = constants.LOW_TEMP - constants.LOW_TEMP_RANGE\n\n is_hi_temp = False\n is_low_temp = False\n\n if hi_temp_range_lower <= hi_temp <= hi_temp_range_upper:\n is_hi_temp = True\n\n if low_temp_range_lower <= low_temp <= low_temp_range_upper:\n is_low_temp = True\n\n if is_hi_temp or is_low_temp:\n return True\n\n return False\n\n\ndef _get_hi_temp_range():\n df = _get_weather_df()\n\n unique_dates = df.Date.unique()\n dates = _create_june_dates_list(unique_dates)\n\n valid_dates = []\n\n for date in dates:\n rows = df.loc[df['Date'] == date]\n\n for index, row in rows.iterrows():\n if _is_valid_temps(row['Hi Temperature'], row['Low Temperature']):\n valid_dates.append({\n 'date': date,\n 'hi_temp': row['Hi Temperature'],\n 'low_temp': row['Low Temperature'],\n 'time': row['Time'],\n })\n\n return valid_dates\n\n\ndef _get_average_june_temp(dates):\n \"\"\" Gets and returns the average temperature for the first\n 9 days in June\n \"\"\"\n df = _get_weather_df()\n temps = {date: [] for date in dates}\n\n for date in dates:\n rows = df.loc[df['Date'] == date]\n for index, row in rows.iterrows():\n temps[date].append(row['Outside Temperature'])\n\n avg_temps = {}\n for k, v in temps.items():\n avg_temps[k] = _calculate_average(v)\n\n return avg_temps\n\n\ndef _get_june_temperature_diffs():\n \"\"\" Calculates and returns the difference between the average and time's temps\n \"\"\"\n df = _get_weather_df()\n\n unique_dates = df.Date.unique()\n dates = _create_june_dates_list(unique_dates)\n avg_temps = _get_average_june_temp(dates)\n\n diffs = {date: [] for date in dates}\n\n for date in dates:\n rows = df.loc[df['Date'] == date]\n for index, row in rows.iterrows():\n diffs[date].append({\n 'diff': row['Outside Temperature'] - avg_temps[date],\n 'time': row['Time'],\n })\n\n return diffs\n\n\ndef _calculate_forecast():\n diffs = _get_june_temperature_diffs()\n forecast_dates = ['0{}/07/2006'.format(i) for i in range(9)]\n\n forecast = {date: [] for date in forecast_dates}\n\n for index, (k, v) in enumerate(diffs.items()):\n for value in v:\n forecast[forecast_dates[index]].append({\n value['time']:\n '{0:.2f}'.format(constants.JULY_AVG_TEMP + value['diff'])\n })\n\n return forecast\n","sub_path":"weatherapp/core/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":6108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"577200160","text":"#!/usr/bin/python\n#\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Logica command-line tool.\n\nExample usage:\n\npython3 logica.py - run Grandparent <<<'\nParent(parent: \"Shmi Skywalker\", child: \"Anakin Skywalker\");\nParent(parent: \"Anakin Skywalker\", child: \"Luke Skywalker\");\nGrandparent(grandparent:, grandchild:) :-\n Parent(parent: grandparent, child: x),\n Parent(parent: x, child: grandchild);\n'\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport getopt\nimport json\nimport os\nimport subprocess\nimport sys\n\n# We are doing this 'if' to allow usage of the code as package and as a\n# script.\nif __name__ == '__main__' and not __package__:\n from common import color\n from common import sqlite3_logica\n from compiler import functors\n from compiler import rule_translate\n from compiler import universe\n from parser_py import parse\nelse:\n from .common import color\n from .common import sqlite3_logica\n from .compiler import functors\n from .compiler import rule_translate\n from .compiler import universe\n from .parser_py import parse\n\n\ndef ReadUserFlags(rules, argv):\n \"\"\"Reading logic program flags provided by the user.\"\"\"\n def Error(msg):\n print(color.Format('[ {error}Error{end} ] {msg}', {'msg': msg}))\n sys.exit(1)\n\n annotations = universe.Annotations.ExtractAnnotations(\n rules, restrict_to=['@DefineFlag'])\n defined_flags = annotations['@DefineFlag'].keys()\n try:\n p = getopt.getopt(argv, '', ['%s=' % f for f in defined_flags])\n except getopt.GetoptError as e:\n Error(str(e))\n\n if p[1]:\n Error('Undefined command arguments: %s' % p[1])\n\n sys.exit(1)\n user_flags = {k[2:]: v for k, v in p[0]}\n return user_flags\n\n\ndef GetImportRoot():\n \"\"\"Parses LOGICAPATH environment variable.\"\"\"\n import_root_env = os.environ.get('LOGICAPATH')\n if not import_root_env:\n return None\n roots = import_root_env.split(':')\n if len(roots) > 1:\n return roots\n else:\n return import_root_env\n\n\ndef main(argv):\n if len(argv) <= 1 or argv[1] == 'help':\n print('Usage:')\n print(' logica [flags]')\n print(' Commands are:')\n print(' print: prints the StandardSQL query for the predicate.')\n print(' run: runs the StandardSQL query on BigQuery with pretty output.')\n print(' run_to_csv: runs the query on BigQuery with csv output.')\n\n print('')\n print('')\n print('Example:')\n print(' python3 logica.py - run GoodIdea <<<\\' '\n 'GoodIdea(snack: \"carrots\")\\'')\n return 1\n\n if len(argv) == 3 and argv[2] == 'parse':\n pass # compile needs just 2 actual arguments.\n else:\n if len(argv) < 4:\n print('Not enough arguments. Run \\'logica help\\' for help.',\n file=sys.stderr)\n return 1\n\n if argv[1] == '-':\n filename = '/dev/stdin'\n else:\n filename = argv[1]\n\n command = argv[2]\n\n commands = ['parse', 'print', 'run', 'run_to_csv']\n\n if command not in commands:\n print(color.Format('Unknown command {warning}{command}{end}. '\n 'Available commands: {commands}.',\n dict(command=command, commands=', '.join(commands))))\n return 1\n if not os.path.exists(filename):\n print('File not found: %s' % filename, file=sys.stderr)\n return 1\n program_text = open(filename).read()\n\n try:\n parsed_rules = parse.ParseFile(program_text,\n import_root=GetImportRoot())['rule']\n except parse.ParsingException as parsing_exception:\n parsing_exception.ShowMessage()\n sys.exit(1)\n\n if command == 'parse':\n # No indentation to avoid file size inflation.\n print(json.dumps(parsed_rules, sort_keys=True, indent=''))\n return 0\n\n predicates = argv[3]\n\n user_flags = ReadUserFlags(parsed_rules, argv[4:])\n\n predicates_list = predicates.split(',')\n for predicate in predicates_list:\n try:\n p = universe.LogicaProgram(parsed_rules, user_flags=user_flags)\n formatted_sql = p.FormattedPredicateSql(predicate)\n preamble = p.execution.preamble\n defines_and_exports = p.execution.defines_and_exports\n main_predicate_sql = p.execution.main_predicate_sql\n except rule_translate.RuleCompileException as rule_compilation_exception:\n rule_compilation_exception.ShowMessage()\n sys.exit(1)\n except functors.FunctorError as functor_exception:\n functor_exception.ShowMessage()\n sys.exit(1)\n\n if command == 'print':\n print(formatted_sql)\n\n engine = p.annotations.Engine()\n\n if command == 'run' or command == 'run_to_csv':\n # We should split and move this logic to dialects.\n if engine == 'bigquery':\n output_format = 'csv' if command == 'run_to_csv' else 'pretty'\n p = subprocess.Popen(['bq', 'query',\n '--use_legacy_sql=false',\n '--format=%s' % output_format],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n o, _ = p.communicate(formatted_sql.encode())\n elif engine == 'sqlite':\n # TODO: Make multi-statement scripts work.\n format = ('artistictable' if command == 'run' else 'csv')\n o = sqlite3_logica.RunSqlScript(\n [preamble] + defines_and_exports + [main_predicate_sql],\n format).encode()\n elif engine == 'psql':\n p = subprocess.Popen(['psql', '--quiet'] +\n (['--csv'] if command == 'run_to_csv' else []),\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n commands = []\n o, _ = p.communicate(\n '\\n'.join(commands + [formatted_sql]).encode())\n elif engine == 'trino':\n a = p.annotations.annotations['@Engine']['trino']\n catalog = a.get('catalog', 'memory')\n\n p = subprocess.Popen(['trino', '--catalog=%s' % catalog] +\n (['--output-format=CSV_HEADER_UNQUOTED']\n if command == 'run_to_csv' else\n ['--output-format=ALIGNED']),\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n o, _ = p.communicate(formatted_sql.encode())\n elif engine == 'presto':\n a = p.annotations.annotations['@Engine']['presto']\n catalog = a.get('catalog', 'memory')\n server = a.get('server', 'localhost:8080')\n p = subprocess.Popen(['presto',\n '--catalog=%s' % catalog,\n '--server=%s' % server,\n '--file=/dev/stdin'] +\n (['--output-format=CSV_HEADER_UNQUOTED']\n if command == 'run_to_csv' else\n ['--output-format=ALIGNED']),\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n o, _ = p.communicate(formatted_sql.encode())\n else:\n assert False, 'Unknown engine: %s' % engine\n print(o.decode())\n\n\ndef run_main():\n \"\"\"Run main function with system arguments.\"\"\"\n main(sys.argv)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"logica.py","file_name":"logica.py","file_ext":"py","file_size_in_byte":7647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"364677396","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef fcn(u,t):\n return -3*u + np.sin(2*t)\n\ndef exact(t):\n return (2*np.exp(3*(np.pi-t)) + \\\n 3*np.sin(2*t) - 2*np.cos(2*t))/13\n \nt0 = np.pi; te = 3*np.pi\nNt = 20\nI = 0.\n\nu = np.zeros(Nt+1) \nt = np.linspace(t0, te, Nt+1)\ndt = t[1] - t[0]\n\nu[0] = I\nfor n in range(Nt):\n u[n+1] = u[n] + dt*fcn( u[n], t[n] )\n\ntt = np.linspace(t0, te, 101)\nex = exact(tt)\n\n#plt.plot(t, u,'o-', tt, ex,'r-')\nplt.plot(t, u,'o-')\nplt.plot(tt, ex,'r-')\nplt.xlabel('t'); plt.ylabel('u')\nplt.title('Euler method, numerical solution')\nplt.grid(True)\nplt.savefig('Euler0.pdf',bbox_inches='tight')\nplt.show()\n","sub_path":"Euler.py","file_name":"Euler.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"101432161","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom Book.models import BookInfo, PeopleInfo\n\n\ndef peoplelist(request,book_id):\n \"\"\"提供人物信息界面\"\"\"\n\n #获取book_id:通过正则的组取值,并自动传入\n\n book = BookInfo.objects.get(id=book_id)\n #查询出书里面的人物信息\n people_list=book.peopleinfo_set.all()\n context = {\n 'people_list': people_list\n }\n return render(request,\"Book/peoplelist.html\",context)\n\n\ndef booklist(request):\n \"\"\"提供书籍信息界面\"\"\"\n\n #查询出所有书籍信息 book_list=(BookInfo,BookInfo)\n book_list=BookInfo.objects.all()\n context={\n 'book_list':book_list\n }\n\n return render(request,'Book/booklist.html',context)\n\n\n return HttpResponse('ok')\n\n\n\ndef test(request):\n #测试:请求到视图的逻辑\n #没有使用模板\n # return HttpResponse('测试123!')\n context={\n 'test':'测试'\n }\n #调用模板并响应,接受上下文参数\n return render(request,'Book/test.html',context)","sub_path":"Book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154316201","text":"from __future__ import print_function\n\nfrom train_e_d import train_e_d\nfrom utils import list_images\nfrom infer import stylize\n#import tensorflow.contrib.eager as tfe\n\n#tfe.enable_eager_execution()\nis_training = True\n\n# for training\nTRAINING_CONTENT_DIR = 'data'\nENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'\nLOGGING_PERIOD = 20\nfeature_weight = 50\nMODEL_SAVE_PATHS = [\n 'Aug_L1_concat_models/encoder_decoder_concat_model.ckpt',\n]\n#checkpoint = 'Aug_L1_concat_models'\n# for inferring (stylize)\nINFERRING_CONTENT_DIR = 'data2'\nOUTPUTS_DIR = 'outputs'\n\n\ndef main():\n\n if is_training:\n\n content_imgs_path = list_images(TRAINING_CONTENT_DIR)\n\n train_e_d(content_imgs_path, feature_weight, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATHS, checkpoint=None, logging_period=LOGGING_PERIOD, debug=False)\n\n print('\\n>>> Successfully! Done all training...\\n')\n\n else:\n\n content_imgs_path = list_images(INFERRING_CONTENT_DIR)\n\n stylize(content_imgs_path, OUTPUTS_DIR, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATHS, suffix=None)\n\n print('\\n>>> Successfully! Done all stylizing...\\n')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"train/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"293557268","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\"\"\"\nDueling QNetwork for function aproximation. Splits the network prior to the end into two streams V and Q. \nV is the estimate of the value of the state. Q is the advantage of each action given the state.\nTwo formulations for subtracting Q from V:\nV - max(Q)\nThis verision makes more sense theoretically as the value of V* should equal the max(Q*(s,A)). \nBut in practice mean allows for better performance.\nV - mean(Q)\nSame as max except now they are separated by a constant. \nAnd not as susceptable to over optimism due to randomness of Q values.\n\"\"\"\n\n# class Dueling_QNetwork(nn.Module):\n# def __init__(self,state_space,action_space,seed):\n# super(Dueling_QNetwork,self).__init__()\n# \"\"\"\n# Rescale the last layer gradients by 1/sqrt(2)\n# clip gradients whose norms are <= 10\n# \"\"\"\n# self.action_space = action_space\n# self.state_space = state_space\n# self.seed = torch.manual_seed(seed)\n\n# self.fc1 = nn.Linear(state_space,64)\n# self.fc2 = nn.Linear(64,32)\n# self.Qfc1 = nn.Linear(32,action_space)\n# self.Vfc1 = nn.Linear(32,1)\n\n# def forward(self,state):\n# x = state\n# if not isinstance(state,torch.Tensor):\n# x = torch.tensor(x,dtype=torch.float32) #device = self.device,\n# x = x.unsqueeze(0)\n# x = F.relu(self.fc1(x))\n# x = F.relu(self.fc2(x))\n# a = F.relu(self.Qfc1(x))\n# v = F.relu(self.Vfc1(x))\n# # Max formulation\n# # q_max = torch.max(q)\n# # q.sub_(max)\n# # mean = torch.mean(a)\n# # q.sub_(mean)\n# # torch.add(q,v)\n# v = v.expand_as(a)\n# q = v + a - a.mean(1,keepdim=True).expand_as(a)\n# return q\n \nclass Dueling_QNetwork(nn.Module):\n def __init__(self,state_space,action_space,seed,hidden_dims=(32,32),activation_fc=F.relu):\n super(Dueling_QNetwork,self).__init__()\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.activation_fc = activation_fc\n self.seed = torch.manual_seed(seed)\n print('hidden_dims',hidden_dims)\n self.input_layer = nn.Linear(state_space,hidden_dims[0])\n self.hidden_layers = nn.ModuleList()\n for i in range(len(hidden_dims)-1):\n hidden_layer = nn.Linear(hidden_dims[i],hidden_dims[i+1])\n self.hidden_layers.append(hidden_layer)\n self.value_output = nn.Linear(hidden_dims[-1],1)\n self.advantage_output = nn.Linear(hidden_dims[-1],action_space)\n \n def forward(self,state):\n x = state\n if not isinstance(state,torch.Tensor):\n x = torch.tensor(x,dtype=torch.float32,device = self.device,)\n x = x.unsqueeze(0)\n x = self.activation_fc(self.input_layer(x))\n for hidden_layer in self.hidden_layers:\n x = self.activation_fc(hidden_layer(x))\n a = self.advantage_output(x)\n v = self.value_output(x)\n v = v.expand_as(a)\n q = v + a - a.mean(1,keepdim=True).expand_as(a)\n return q\n \nclass Visual_Dueling_QNetwork(nn.Module):\n def __init__(self,state_space,action_space,seed,hidden_dims=(18432,512),activation_fc=F.relu):\n super(Visual_Dueling_QNetwork,self).__init__()\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.activation_fc = activation_fc\n self.seed = torch.manual_seed(seed)\n print('hidden_dims',hidden_dims)\n\n # Input is (1,84,84,3) -> (1,3,1,84,84)\n self.conv1 = nn.Conv3d(3, 64, kernel_size=(1, 3, 3), stride=(1,3,3))\n self.bn1 = nn.BatchNorm3d(64)\n # Output shape is (1,64,1,28,28)\n self.conv2 = nn.Conv3d(64, 128, kernel_size=(1, 3, 3), stride=(1,3,3),padding=2)\n self.bn2 = nn.BatchNorm3d(128)\n # Output shape is (1,128,5,10,10)\n self.conv3 = nn.Conv3d(128, 128, kernel_size=(1, 3, 3), stride=(1,3,3),padding=2)\n self.bn3 = nn.BatchNorm3d(128)\n # Output shape is (1,64,9,4,4)\n\n self.hidden_layers = nn.ModuleList()\n for i in range(len(hidden_dims)-1):\n hidden_layer = nn.Linear(hidden_dims[i],hidden_dims[i+1])\n self.hidden_layers.append(hidden_layer)\n self.value_output = nn.Linear(hidden_dims[-1],1)\n self.advantage_output = nn.Linear(hidden_dims[-1],action_space)\n \n def forward(self,state):\n x = state\n if not isinstance(state,torch.Tensor):\n x = torch.tensor(x,dtype=torch.float32,device = self.device)\n x = x.unsqueeze(0)\n x = self.activation_fc(self.bn1(self.conv1(x)))\n x = self.activation_fc(self.bn2(self.conv2(x)))\n x = self.activation_fc(self.bn3(self.conv3(x)))\n # Flatten layer but retain number of samples\n x = x.view(x.shape[0],x.shape[1] * x.shape[2] * x.shape[3] * x.shape[4])\n for hidden_layer in self.hidden_layers:\n x = self.activation_fc(hidden_layer(x))\n a = self.advantage_output(x)\n v = self.value_output(x)\n v = v.expand_as(a)\n q = v + a - a.mean(1,keepdim=True).expand_as(a)\n return q","sub_path":"Networks/dueling_qnetwork.py","file_name":"dueling_qnetwork.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"577356154","text":"#!/usr/local/bin/python\n# encoding: utf-8\n# File Name: optical_flow.py\n# Author: Shaoxiong (Shawn) Wang\n# Create Time: 2017/11/15 16:24\n# TODO:\n# Add annotation\n\nimport cv2\nimport numpy as np\nimport numpy.matlib\nimport time\n\n# cap = cv2.VideoCapture(\"../data/CD0011-a3.mp4\") # choose to read from video or camera\ncap = cv2.VideoCapture(0)\nfourcc = cv2.VideoWriter_fourcc('M','J','P','G')\ncol = 320\nrow = 240\nout = cv2.VideoWriter('flow.avi',fourcc, 8.0, (col*1,row*2)) # The fps depends on CPU\n\n\nx0 = np.matlib.repmat(np.arange(row), col, 1).T\ny0 = np.matlib.repmat(np.arange(col), row, 1)\n\nx = np.zeros_like(x0).astype(int)\ny = np.zeros_like(y0).astype(int)\n\ndef add_flow(x, y, flow):\n dx = np.round_(x + x0).astype(int)\n dy = np.round_(y + y0).astype(int)\n dx[dx>=row] = row - 1\n dx[dx<0] = 0\n dy[dy>=col] = col - 1\n dy[dy<0] = 0\n ds = np.reshape(flow[np.reshape(dx, -1), np.reshape(dy, -1)], (row, col, -1))\n nx = x + ds[:, :, 0]\n ny = y + ds[:, :, 1]\n return nx, ny\n\ndef flow2color(flow, K=15):\n mag, ang = cv2.cartToPolar(-flow[...,1], flow[...,0])\n hsv[...,0] = ang*180/np.pi/2\n mag = mag.astype(float) * K * 960 / col\n \n mag[mag>255] = 255\n hsv[...,0] = ang*180/np.pi/2\n hsv[...,2] = mag\n bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)\n\n return bgr\n\ntime.sleep(1)\nret, frame1 = cap.read()\nframe1 = cv2.resize(frame1, (col, row))\nprvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)\nf0 = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)\nhsv = np.zeros_like(frame1)\nhsv[...,1] = 255\nflow_sum = np.zeros((row, col, 2))\ncount = 0\n\nreset_threshold_error = 0.3\nreset_threshold_mean = 2\n\n\n\ndef draw(img, flow, scale=5.0):\n for i in range(20, row, 15):\n for j in range(20, col, 15):\n d = (flow[i, j] * scale).astype(int)\n cv2.arrowedLine(img, (j, i), (j+d[0], i+d[1]), (0, 255, 255))\n\n\nwhile(1):\n count += 1\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (col, row))\n next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)\n except:\n break\n\n flow = cv2.calcOpticalFlowFarneback(f0,next, None, 0.5, 3, int(180 * col / 960), 5, 5, 1.2, 0)\n bgr0 = flow2color(flow)\n\n flow_2 = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, int(180 * col / 960), 5, 5, 1.2, 0)\n bgr2 = flow2color(flow_2, K=100)\n\n nx, ny = add_flow(x, y, flow_2)\n nx = nx\n ny = ny\n error = (np.mean(np.abs(nx - flow[:,:,0])) + np.mean(np.abs(ny - flow[:,:,1]))) / 2.0\n mean = (np.mean(np.abs(flow[:,:,0])) + np.mean(np.abs(flow[:,:,1]))) / 2.0\n\n if error < reset_threshold_error or mean < reset_threshold_mean:\n x = flow[:,:,0]\n y = flow[:,:,1]\n else:\n x, y = nx, ny\n\n # x, y = add_flow(x, y, flow_2)\n flow_sum[:,:,0] = x\n flow_sum[:,:,1] = y\n bgr = flow2color(flow_sum)\n\n\n frame3 = np.copy(frame2)\n frame4 = np.copy(frame2)\n draw(frame2, flow_2, 10)\n draw(frame3, flow_sum, 5)\n draw(frame4, flow, 5)\n\n # bgr = np.vstack([np.hstack([frame2, frame4, frame3]), np.hstack([bgr2, bgr0, bgr])])\n bgr = np.vstack([np.hstack([frame3]), np.hstack([bgr])])\n cv2.imshow('frame2',bgr)\n \n k = cv2.waitKey(30) & 0xff\n out.write(bgr)\n if k == 27:\n break\n elif k == ord('s'):\n cv2.imwrite('opticalfb.png',frame2)\n cv2.imwrite('opticalhsv.png',bgr)\n prvs = next\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"Bnz/python/optical_flow/optical_flow.py","file_name":"optical_flow.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"183267345","text":"import os\nimport xml.etree.ElementTree as ET\n\n\nxmlfilepath = 'Annotations'\ntotal_xml = os.listdir(xmlfilepath)\n\nnum = len(total_xml)\nlist = range(num)\n\nfor i in range(1,num):\n name = total_xml[i]\n print(name)\n path_root = '/Users/phoebechen/Desktop/keras-frcnn-web/VOC2007/Annotations/'+name\n tree = ET.parse(path_root)\n root = tree.getroot()\n path = root[2].text\n root[2].text = path_root\n print(root[2].text)\n tree.write(path_root)","sub_path":"VOC2007/pathRepalce.py","file_name":"pathRepalce.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"280340504","text":"#!/usr/bin/python\n# coding: utf-8\n\n\"\"\"\ntcp服务器:\n 创建套接字\n bind ip和port\n listen使得套接字变为被动链接\n accept等待客户端链接\n recv/send 接收发送数据\n\"\"\"\n\nimport socket\n\n# 创建套接字\ntcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# 绑定ip和port\ntcp_socket.bind(('', 1997))\n\n# 变为被动监听套接字\ntcp_socket.listen(128)\n\nprint('服务器启动')\n\nwhile True: # 循环处理客户端\n # 接收客户端链接请求, accept默认阻塞\n # 返回的是一个元组, 第0项生成一个新的socket来链接客户端,第一项是客户端的ip及port\n # 这样监听套接字还可以为下一个客户端继续进行服务\n new_socket, new_addr = tcp_socket.accept()\n print(new_socket, new_addr)\n \n while True: # 循环处理数据\n # 接收客户端发送数据, recv默认阻塞\n recv_data = new_socket.recv(1024) # 指定最大1024字节\n print('接收到的数据为: %s ' % recv_data.decode('utf-8'))\n \n # 当客户端断开链接时, recv将会解堵塞, 并且返回None \n if recv_data:\n # 发送数据给客户端\n new_socket.send('发送数据成功'.encode('utf-8'))\n else:\n break\n \n # 关闭为客户端创建的套接字\n new_socket.close()\n\n# 关闭监听套接字\ntcp_socket.close()\n\n","sub_path":"网络编程/TCP通信/tcp服务器.py","file_name":"tcp服务器.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"103276155","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 5 16:22:30 2016\n\n@author: p000495138\n\"\"\"\n\n\"\"\"\n機能 : NCセンサの検知・補償温度を計算する\n入力 : センサ電圧、電圧テーブル\n出力 : 温度\n\"\"\"\n\nimport numpy as np #行列計算\nfrom scipy import interpolate\n\n\"\"\"関数\"\"\"\n\n#マッピング関数\ndef mapping(data1,data2):\n temp = interpolate.interp1d(data1,data2)\n return temp\n\n#検知温度計算\ndef calc_tdet(vdet,vcomp,Vdet_table,Vcomp_table,Tdet_table):\n\n #初期化\n Vdet_cand = []\n tdet = []\n row_size,col_size = Vdet_table.shape\n \n #step1 1列毎に検知電圧テーブルの候補を出す\n for i in range(col_size):\n fc = mapping(Vcomp_table,Vdet_table[:,i])\n temp = fc(vcomp) \n Vdet_cand.append(temp)\n\n #step2 検知電圧候補 vs 検知温度のマッピング\n ft = mapping(Vdet_cand,Tdet_table) # \n \n #step3 検知温度の計算\n# tdet.append( ft(vdet) ) \n tdet = ft(vdet)\n return tdet\n\n#補償温度計算\ndef calc_tcomp(vcomp,Vcomp_table,Tcomp_table):\n #step1 補償→補償温度のマッピング\n fc = mapping(Vcomp_table,Tcomp_table)\n #step2 温度計算\n Tcomp = fc(vcomp)\n return Tcomp\n\n\n\"\"\"クラス\"\"\"\nclass Calc_Temp():\n def __init__(self,Vdet,Vcomp,Vdet_table,Vcomp_table,Tdet_table,Tcomp_table):\n self.Vdet = Vdet\n self.Vcomp =Vcomp\n self.Vdet_table = Vdet_table\n self.Vcomp_table = Vcomp_table\n self.Tdet_table = Tdet_table\n self.Tcomp_table = Tcomp_table \n \n def calc_Tdet(self): #検知電圧による方法\n size = len(self.Vdet)\n temp = []\n for i in range(size):\n temp.append( calc_tdet(self.Vdet[i],self.Vcomp[i],self.Vdet_table,self.Vcomp_table,self.Tdet_table) )\n self.Tdet = np.array(temp)\n return self.Tdet\n\n def calc_Tdet_byTcomp(self): #検知電圧による方法(補償温度を使う方法)\n size = len(self.Vdet)\n temp = []\n for i in range(size):\n temp.append( calc_tdet(self.Vdet[i],self.Tcomp[i],self.Vdet_table,self.Tcomp_table,self.Tdet_table) )\n self.Tdet = np.array(temp)\n return self.Tdet\n\n def calc_Tcomp(self): #検知電圧による方法\n size = len(self.Vcomp)\n temp = []\n for i in range(size):\n temp.append( calc_tcomp(self.Vcomp[i],self.Vcomp_table,self.Tcomp_table) )\n self.Tcomp = np.array(temp)\n return self.Tcomp\n\n","sub_path":"02_センサ評価/d02_単体機/NC/Parts_CalcTemp.py","file_name":"Parts_CalcTemp.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"183778270","text":"'''\nCreated on Jan 4, 2012\n\n@package: ally core sql alchemy\n@copyright: 2012 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Gabriel Nistor\n\nProvides support for SQL alchemy mapper that is able to link the alchemy with REST models.\n'''\n\nfrom abc import ABCMeta\nfrom inspect import isclass\nimport logging\nfrom sqlalchemy import event\nfrom sqlalchemy.ext.compiler import compiles\nfrom sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base\nfrom sqlalchemy.orm.attributes import InstrumentedAttribute\nfrom sqlalchemy.orm.mapper import Mapper\nfrom sqlalchemy.schema import Table, MetaData\nfrom sqlalchemy.sql.expression import Executable, ClauseElement, Join\n\nfrom ally.api.operator.descriptor import Reference\nfrom ally.api.operator.type import TypeModel\nfrom ally.api.type import typeFor\n\n\n# --------------------------------------------------------------------\nlog = logging.getLogger(__name__)\n\n# --------------------------------------------------------------------\n\nclass MappingError(Exception):\n '''\n Provides the exception used whenever a mapping issue occurs.\n '''\n\n# --------------------------------------------------------------------\n\ndef mapperSimple(clazz, sql, **keyargs):\n '''\n Maps a table to a ally REST model. Use this instead of the classical SQL alchemy mapper since this will\n also provide to the model additional information extracted from the SQL alchemy configurations.\n \n @param clazz: class\n The model class to be mapped with the provided sql.\n @param sql: Table|Join|Select\n The table or join to map the model with.\n @param keyargs: key arguments\n This key arguments are directly delivered to the SQL alchemy @see mapper.\n @return: class\n The obtained mapped class.\n '''\n assert isclass(clazz), 'Invalid class %s' % clazz\n if isinstance(sql, Table):\n metadata = sql.metadata\n elif isinstance(sql, Join):\n metadata = keyargs.pop('metadata', None)\n assert metadata is not None, \\\n 'For a join mapping you need to specify the metadata in the key words arguments \\'metadata=?\\''\n else:\n raise MappingError('Invalid sql source %s' % sql)\n assert isinstance(metadata, MetaData), 'Invalid metadata %s' % metadata\n\n inherits = keyargs.pop('inherits', None)\n\n attributes = {'__module__': clazz.__module__}\n attributes['__table__'] = sql\n attributes['metadata'] = metadata\n if keyargs: attributes['__mapper_args__'] = keyargs\n\n # We need to treat the case when a model inherits another, since the provided inherited model class is actually the \n # mapped class the provided model class will not be seen as inheriting the provided mapped class\n if inherits is not None:\n assert isclass(inherits), 'Invalid class %s' % inherits\n assert isinstance(inherits, MappedSupport), 'Invalid inherit class %s, is not mapped' % inherits\n bases = (inherits, clazz)\n else:\n try: Base = metadata._ally_mapper_base\n except AttributeError:\n Base = metadata._ally_mapper_base = declarative_base(metadata=metadata, metaclass=DeclarativeMetaModel)\n bases = (Base, clazz)\n\n return type(clazz.__name__ + '$Mapped', bases, attributes)\n\ndef mapperModel(clazz, sql, **keyargs):\n '''\n Maps a table to a ally REST model. Use this instead of the classical SQL alchemy mapper since this will\n also provide to the model additional information extracted from the SQL alchemy configurations. Use\n this mapper to also add validations for updating and inserting on the model.\n \n @param clazz: class\n The model class to be mapped with the provided sql table.\n @param sql: Table|Join|Select\n The table or join to map the model with.\n @param keyargs: key arguments\n This key arguments are directly delivered to the SQL alchemy @see mapper. \n @return: class\n The mapped class, basically a model derived class that also contains the mapping data.\n '''\n mapped = mapperSimple(clazz, sql, **keyargs)\n\n return mapped\n\n# --------------------------------------------------------------------\n\nclass DeclarativeMetaModel(DeclarativeMeta):\n '''\n Extension for @see: DeclarativeMeta class that provides also the merging with the API model.\n '''\n\n def __init__(self, name, bases, namespace):\n assert isinstance(namespace, dict), 'Invalid namespace %s' % namespace\n\n mappings, models = [], []\n for cls in bases:\n model = typeFor(cls)\n if isinstance(model, TypeModel):\n if isinstance(cls, MappedSupport):\n if models:\n raise MappingError('The mapped class %s needs to be placed before %s' % (cls, ','.join(mappings)))\n mappings.append(model)\n else: models.append(model)\n\n if not models:\n assert log.debug('Cannot find any API model class for \\'%s\\', no merging required', name) or True\n DeclarativeMeta.__init__(self, name, bases, namespace)\n return\n\n if len(mappings) > 1:\n raise MappingError('Cannot inherit more then one mapped class, got %s' % ','.join(str(typ) for typ in mappings))\n if len(models) > 1:\n raise MappingError('Cannot merge with more then one API model class, got %s' % ','.join(str(typ) for typ in models))\n\n model = models[0]\n assert isinstance(model, TypeModel)\n self._ally_type = model # Provides the TypeSupport\n self._ally_reference = {name: Reference(prop) for name, prop in model.properties.items()}\n self._ally_listeners = {} # Provides the BindableSupport\n\n DeclarativeMeta.__init__(self, name, bases, namespace)\n\n try: mappings = self.metadata._ally_mappers\n except AttributeError: mappings = self.metadata._ally_mappers = []\n mappings.append(self)\n\n# --------------------------------------------------------------------\n\ndef mappingFor(mapped):\n '''\n Provides the mapper of the provided mapped class.\n \n @param mapped: class\n The mapped class.\n @return: Mapper\n The associated mapper.\n '''\n assert isinstance(mapped, DeclarativeMetaModel), 'Invalid mapped class %s' % mapped\n\n return mapped.__mapper__\n\ndef mappingsOf(metadata):\n '''\n Provides the mapping dictionary of the provided meta.\n \n @param metadata: MetaData\n The meta to get the mappings for.\n @return: dictionary{class: class}\n A dictionary containing as a key the model API class and as a value the mapping class for the model.\n '''\n assert isinstance(metadata, MetaData), 'Invalid meta data %s' % metadata\n\n try: mappings = metadata._ally_mappers\n except AttributeError: return {}\n\n return {typeFor(mapped).clazz: mapped for mapped in mappings}\n\ndef tableFor(mapped):\n '''\n Provides the table of the provided mapped class.\n \n @param mapped: object\n The mapped object.\n @return: Table\n The associated table.\n '''\n if isinstance(mapped, InstrumentedAttribute):\n assert isinstance(mapped, InstrumentedAttribute)\n assert len(mapped.property.columns) == 1, 'To many columns found for %s' % mapped\n return mapped.property.columns[0].table\n assert isinstance(mapped, DeclarativeMetaModel), 'Invalid mapped object %s' % mapped\n return mapped.__table__\n\ndef columnFor(attribute):\n '''\n Provides the column of the provided instrumented attribute.\n \n @param attribute: InstrumentedAttribute\n The instrument attribute object.\n @return: Column\n The associated column.\n '''\n assert isinstance(attribute, InstrumentedAttribute), 'Invalid attribute %s' % attribute\n assert len(attribute.property.columns) == 1, 'To many columns found for %s' % attribute\n return attribute.property.columns[0]\n\n# --------------------------------------------------------------------\n\ndef addLoadListener(mapped, listener):\n '''\n Adds a load listener that will get notified every time the mapped class entity is loaded.\n \n @param mapped: class\n The model mapped class to add the listener to.\n @param listener: callable(object)\n A function that has to take as parameter the model instance that has been loaded.\n '''\n assert isclass(mapped), 'Invalid class %s' % mapped\n assert callable(listener), 'Invalid listener %s' % listener\n def onLoad(target, *args): listener(target)\n event.listen(mapped, 'load', onLoad)\n\ndef addInsertListener(mapped, listener, before=True):\n '''\n Adds an insert listener that will get notified every time the mapped class entity is inserted.\n \n @param mapped: class\n The model mapped class to add the listener to.\n @param listener: callable(object)\n A function that has to take as parameter the model instance that will be or has been inserted.\n @param before: boolean\n If True the listener will be notified before the insert occurs, if False will be notified after.\n '''\n assert isclass(mapped), 'Invalid class %s' % mapped\n assert isinstance(mapped, MappedSupport), 'Invalid mapped class %s' % mapped\n assert callable(listener), 'Invalid listener %s' % listener\n assert isinstance(before, bool), 'Invalid before flag %s' % before\n def onInsert(mapper, conn, target): listener(target)\n if before: event.listen(mapped.__mapper__, 'before_insert', onInsert)\n else: event.listen(mapped.__mapper__, 'after_insert', onInsert)\n\ndef addUpdateListener(mapped, listener, before=True):\n '''\n Adds an update listener that will get notified every time the mapped class entity is update.\n \n @param mapped: class\n The model mapped class to add the listener to.\n @param listener: callable(object)\n A function that has to take as parameter the model instance that will be or has been update.\n @param before: boolean\n If True the listener will be notified before the update occurs, if False will be notified after.\n '''\n assert isclass(mapped), 'Invalid class %s' % mapped\n assert isinstance(mapped, MappedSupport), 'Invalid mapped class %s' % mapped\n assert callable(listener), 'Invalid listener %s' % listener\n assert isinstance(before, bool), 'Invalid before flag %s' % before\n def onUpdate(mapper, conn, target): listener(target)\n if before: event.listen(mapped.__mapper__, 'before_update', onUpdate)\n else: event.listen(mapped.__mapper__, 'after_update', onUpdate)\n\n# --------------------------------------------------------------------\n\nclass MappedSupportMeta(ABCMeta):\n '''\n Meta class for mapping support that allows for instance check base on the '__mapper__' attribute.\n '''\n\n def __instancecheck__(self, instance):\n '''\n @see: ABCMeta.__instancecheck__\n '''\n if ABCMeta.__instancecheck__(self, instance): return True\n if self is not MappedSupport: return False\n return isinstance(getattr(instance, '__mapper__', None), Mapper)\n\nclass MappedSupport(metaclass=MappedSupportMeta):\n '''\n Support class for mapped classes.\n '''\n __mapper__ = Mapper # Contains the mapper that represents the model\n\n# --------------------------------------------------------------------\n# TODO: SQL alchemy check if is still a problem in the new SQL alchemy version\n# This is a fix for the aliased models.\ndef adapted(self, adapter):\n '''\n @see: InstrumentedAttribute.adapted\n We need to adjust this in order to be able to alias.\n '''\n adapted = InstrumentedAttribute(self.prop, self.mapper, adapter)\n adapted.comparator = self.comparator.adapted(adapter)\n adapted.class_ = self.class_\n return adapted\nInstrumentedAttribute.adapted = adapted\n\n# TODO: SQL alchemy check if is still a problem in the new SQL alchemy version\n# patch from http://docs.sqlalchemy.org/en/rel_0_8/core/compiler.html#compiling-sub-elements-of-a-custom-expression-construct\n# in order to support INSERT INTO t1 (SELECT * FROM t2)\n\nclass InsertFromSelect(Executable, ClauseElement):\n def __init__(self, table, columns, select):\n self.table = table\n self.columns = columns\n self.select = select\n\n@compiles(InsertFromSelect)\ndef visit_insert_from_select(element, compiler, **kw):\n return 'INSERT INTO %s (%s) %s' % (\n compiler.process(element.table, asfrom=True),\n element.columns,\n compiler.process(element.select)\n )\n","sub_path":"plugins/support-sqlalchemy/sql_alchemy/support/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":12497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"239385520","text":"import requests\n\n'''配置文件中读取参数'''\nlogin_msg = read_conf.readConf(\"INTERFACEMSG\",\"login_msg\")\nlogin_url = read_conf.readConf(\"INTERFACEMSG\",\"login_url\")\ninsert_user_url = read_conf.readConf(\"INTERFACEMSG\",\"insert_user_url\")\nemails_url =read_conf.readConf(\"INTERFACEMSG\",\"emails_url\")\n\n'''添加用户user_id'''\n#仅需要告知用户的user_id,即可进行批量添加用户进行课程的学习\n\nlist_params = ['10221']\n\n\n\n\n'''声明全局变量'''\nglobal null\nnull = ''\n\n'''登录'''\nresponse_login = requests.post(login_url,json=eval(login_msg))\nprint(response_login.status_code)\n\n'''提取authentication'''\njwt_token = response_login\njwt_token1 = jwt_token.json()['jwt']\ntoken = jwt_token.json()['api_path']\ntoken = token.replace(\"\\\\\", \"\")\ntokens = (eval(token)['path'])\n\n'''提取api_token'''\napi_token = tokens['POSTinno-course/elearn/enroll_users/{packageId}']['token']\nheaders = {'Authorization':jwt_token1,'X-API-Token':api_token,'Content-Type':'application/json'}\nresponse_insert_users = requests.post(insert_user_url,json = list_params, headers=headers)\nprint(response_insert_users.status_code)\n\n'''是否触发邮件通知'''\napi_token1 = tokens['PUTinno-course/elearn/enroll_users/{packageId}/sendemail_status']['token']\nheaders1 = {'Authorization':jwt_token1,'X-API-Token':api_token1}\nresponse_emails = requests.put(emails_url,headers=headers1)\nprint(response_emails.text)\n\n","sub_path":"elearning_insert-users/insertUsers/insert_users.py","file_name":"insert_users.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"80101786","text":"# -*- coding: utf-8 -*-\n#@Time : 2020/3/29 21:27\n#@Author : jamerri\n\n# -*- coding: utf-8 -*-\n# @Time : 2020/3/26 13:54\n# @Author : jamerri\n\nimport numpy as np\nimport function as fuc\nimport random\nfrom sklearn.model_selection import KFold\nfrom conf import conf\nimport math\nimport time\nfrom datetime import datetime\n\nstart = time.time()\n\n'定义为全局变量'\nconf = conf.KernelDMVW()\n\nmap = []\nD_total = []\nD_train = []\nK_NLPD_value = []\nparameter_kernel_size = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5]\nparameter_wind_scale = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]\nparameter_wind_speed_factor = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]\nnp.set_printoptions(suppress=True)\n\n'''定义坐标点信息类'''\n\n\nclass Point:\n def __init__(self, x, y, c, wind_x, wind_y, wind_z):\n self.x = x\n self.y = y\n self.c = c\n self.wind_x = wind_x\n self.wind_y = wind_y\n self.wind_z = wind_z\n\n\n'''定义大网格类'''\n\n\nclass Block:\n def __init__(self, sets):\n self.point_set = sets\n\n\n'''从txt读取900个点'''\nfile_open_path = 'C:/Users/jamerri/Desktop/实验数据/1.5_900_interpolation_data.txt'\n'''处理原始数据'''\nraw_data = np.loadtxt(file_open_path)\nfor i in range(900):\n D_total.append(\n Point(raw_data[i][0], raw_data[i][1], raw_data[i][2], raw_data[i][3], raw_data[i][4], raw_data[i][5]))\n# print(raw_data)\n\n'''设置训练集'''\n\n\ndef get_train_set(blc, b_len, b_bre, c_num):\n \"\"\"\n 设置训练集\n blc:大网格的数量\n b_len:一个大网格的长度\n b_bre:一个大网格的宽度\n c_num:每个网格采样的数量\n \"\"\"\n tmp = []\n index_dict = {-1, }\n b_total = len(D_total) / blc # 每个网格点的数量\n blc = int(np.math.sqrt(blc))\n for i in range(blc):\n for j in range(blc): # 第i行第j列的网格\n for n in range(c_num):\n index = -1\n while index in index_dict:\n r = np.random.randint(0, b_total)\n ln = r // b_bre\n col = r - ln * b_bre\n index = (i * b_len + ln) * (blc * b_bre) + (j * b_bre + col) # 计算采样点的索引\n index_dict.add(index)\n tmp.append(D_total[index])\n\n return tmp\n\n\nD_train = get_train_set(25, 6, 6, 16)\n\n'''设置测试集'''\nD_test = [i for i in D_total if i not in D_train]\n\nprint(len(D_train))\nprint(len(D_test))\n\n# '''写D_train训练集到txt'''\n# raw_D_train_c_filed = []\n# for i in range(len(D_train)):\n# raw_D_train_c_filed.append(\n# [D_train[i].x, D_train[i].y, D_train[i].c, D_train[i].wind_x, D_train[i].wind_y, D_train[i].wind_z])\n#\n# fuc.write_D_train_data(raw_D_train_c_filed)\n# '''写D_test训练集到txt'''\n# raw_D_test_c_filed = []\n# for i in range(len(D_test)):\n# raw_D_test_c_filed.append(\n# [D_test[i].x, D_test[i].y, D_test[i].c, D_test[i].wind_x, D_test[i].wind_y, D_test[i].wind_z])\n#\n# fuc.write_D_test_data(raw_D_test_c_filed)\n\n'''10折交叉验证'''\nD_train = random.sample(D_train, len(D_train))\nif len(D_train) == 25:\n del D_train[4], D_train[8], D_train[12], D_train[16], D_train[20]\nprint(len(D_train))\n# print(D_train)\nfor y in range(len(parameter_wind_speed_factor)):\n for u in range(len(parameter_wind_scale)):\n for m in range(len(parameter_kernel_size)):\n ws = parameter_wind_scale[u]\n kz = parameter_kernel_size[m]\n bt = parameter_wind_speed_factor[y]\n kf = KFold(n_splits=10)\n NLPD_value = []\n for train, test in kf.split(D_train):\n train_position_x = []\n train_position_y = []\n train_wind_x = []\n train_wind_y = []\n train_c = []\n for i in range(len(train)):\n cnt_train = train[i]\n # print(cnt_train)\n train_position_x.append(D_train[cnt_train].x)\n train_position_y.append(D_train[cnt_train].y)\n train_c.append(D_train[cnt_train].c)\n train_wind_x.append(D_train[cnt_train].wind_x)\n train_wind_y.append(D_train[cnt_train].wind_y)\n calculate_speed = fuc.calculateSpeed_direction(train_wind_x, train_wind_y)[0]\n calculate_direction = fuc.calculateSpeed_direction(train_wind_x, train_wind_y)[1]\n mean_value = fuc.mean_value_and_variance_value_KernelDMVW_pro(train_position_x, train_position_y, train_c, calculate_speed, calculate_direction, kz, ws, bt)[0]\n variance_value = fuc.mean_value_and_variance_value_KernelDMVW_pro(train_position_x, train_position_y, train_c, calculate_speed, calculate_direction, kz, ws, bt)[1]\n # print(mean_value)\n # print(variance_value)\n sub_n = []\n for j in range(len(test)):\n cnt_test = test[j]\n # print(cnt_test)\n test_position_x = D_train[cnt_test].x\n test_position_y = D_train[cnt_test].y\n test_c = D_train[cnt_test].c\n number = fuc.find_number(test_position_x, test_position_y)\n sub_n.append(math.log(variance_value[number]) + pow((mean_value[number] - test_c), 2) / variance_value[\n number])\n # print(len(sub_n))\n NLPD_value_total = np.array(sub_n).sum() / (2 * len(test)) + 0.5 * math.log(2 * math.pi)\n NLPD_value.append(NLPD_value_total)\n mean_NLPD_value = np.mean(np.array(NLPD_value))\n K_NLPD_value.append(mean_NLPD_value)\nprint(K_NLPD_value)\nprint(len(K_NLPD_value))\nmin_NLPD_value = np.min(np.array(K_NLPD_value))\nNum = np.argmin(K_NLPD_value)\nempty = []\nfor k in parameter_wind_speed_factor:\n for i in parameter_wind_scale:\n for j in parameter_kernel_size:\n empty.append([k, i, j])\nprint(empty[Num])\nprint('min_NLPD_value = ', min_NLPD_value)\n# kernel_size_num = (Num % (len(parameter_kernel_size) * len(parameter_wind_scale))) % len(parameter_kernel_size)\n# wind_scale_num = int((Num % (len(parameter_kernel_size) * len(parameter_wind_scale))) / len(parameter_kernel_size))\n# wind_speed_factor_num = int(Num / (len(parameter_kernel_size) * len(parameter_wind_scale)))\n# print('min_NLPD_value = ', min_NLPD_value)\n# print('the best kernel size = ', parameter_kernel_size[kernel_size_num - 1])\n# print('the best wind scale = ', parameter_wind_scale[wind_scale_num])\n# print('the best wind speed factor = ', parameter_wind_speed_factor[wind_speed_factor_num])\n\nend = time.time()\nprint('runtime:%s second' % (end - start))\n","sub_path":"test/GDM_parameter_wind_pro.py","file_name":"GDM_parameter_wind_pro.py","file_ext":"py","file_size_in_byte":6874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"419127953","text":"from typing import List\n\nclass Solution:\n def sumZero(self, n: int) -> List[int]:\n ans = []\n if n % 2 == 1:\n ans.append(0)\n\n for i in range(1, n // 2 + 1):\n ans.extend([i, -i])\n\n return ans\n","sub_path":"LeetCode_1304_1.py","file_name":"LeetCode_1304_1.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"51222476","text":"# -*- coding: utf-8 -*-\n\"\"\"wordのランキングを作成するclass\"\"\"\nimport MeCab\nimport pandas as pd\nimport unicodedata\nimport collections\nimport datetime\nimport pandas.io.sql as psql\nimport numpy as np\nfrom config import db_info\nimport mysql.connector\nimport MySQLdb\nimport sys\nfrom get_stock_data import get_stock_price\n\n\nclass WordTrend:\n \"\"\"\n 各銘柄についてのwordランキングを出すクラス\n :param today:date型で、分析したい日にちを指定する\n \"\"\"\n\n def __init__(self, today):\n\n self.MECAB_MODE = 'mecabrc'\n # 検索範囲\n self.today = today\n self.today_str = today.strftime('%Y-%m-%d')\n self.yesterday = (today - datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n self.tomorrow = (today + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n self.target_day = self.today.strftime(\"%Y-%m-%d\")\n con = mysql.connector.connect(user=db_info.user, password=db_info.passwd,\n host=db_info.host, database=db_info.db, charset='utf8')\n sql = \"SELECT `code`, `name`, `name2` from `stock_info`\"\n self.stock_df = psql.read_sql(sql, con)\n con.close()\n # self.stock_df = pd.read_csv(\"~/investment/twitter_investment/trend/stock_list.csv\")\n\n self.stock_code = np.array(self.stock_df['code']).astype(str)\n self.stock_name = np.array(self.stock_df['name'])\n self.stock_name1 = np.array(self.stock_df['name2'])\n\n def get_tweet_dateframe(self):\n \"\"\"データベースから指定の日付のツイートを取得\"\"\"\n # データベースに接続\n \"\"\"\n con = mysql.connector.connect(user=db_info.user, password=db_info.passwd,\n host=db_info.host, database=db_info.db, charset='utf8')\n \"\"\"\n con = MySQLdb.connect(\n user=db_info.user,\n passwd=db_info.passwd,\n host=db_info.host,\n db=db_info.db)\n\n chunk_size = 2000\n offset = 0\n readers = []\n while True:\n sql = \"select `id_user`,`text`,`in_degree` from \" \\\n \"(select * from tweets where created_time > '%s' and created_time < '%s') as tweets_tmp \" \\\n \"inner join `centralities` on tweets_tmp.id_user = centralities.index \" \\\n \"limit %d offset %d\" % (self.today_str, self.tomorrow, chunk_size, offset)\n # 1000行ずつ読み込むことでmemoryを抑える\n # reader = psql.read_sql(sql, con, chunksize=100)\n reader = psql.read_sql(sql, con)\n readers.append(reader)\n offset += chunk_size\n if len(readers[-1]) < chunk_size:\n break\n\n # df = pd.concat((r for r in reader))\n df = pd.concat(readers)\n con.close()\n\n return df\n\n def connect_usertext(self, df, id_user):\n \"\"\"特定のアカウントのTweet全て連結し、textを返す関数\"\"\"\n df_personal = df[df['id_user'] == id_user]\n usertext = ''\n for text in df_personal['text']:\n usertext = usertext + text\n\n return usertext\n\n def parse(self, text):\n \"\"\"文字列を名詞ごとに分ける\"\"\"\n tagger = MeCab.Tagger(self.MECAB_MODE)\n # ここで空文字をpurseしておくとerrorをはかない\n tagger.parse(\"\")\n\n node = tagger.parseToNode(text)\n\n nouns = []\n wcosts = []\n\n while node:\n pos = node.feature.split(\",\")[0]\n word = node.surface\n cost = node.wcost\n\n if pos == \"名詞\" and self.status(word):\n nouns.append(self.unite_word(word))\n wcosts.append(cost)\n\n node = node.next\n\n return nouns, wcosts\n\n def status(self, word):\n \"\"\"wordでカウントする条件をTrue,Falseで返す\"\"\"\n # 3文字以上\n if len(word) < 3:\n return False\n\n # ひらがな、カタカナ、漢字、数字、英字だけで構成されていることを確認\n for c in word:\n if not (unicodedata.name(c)[0:8] == \"HIRAGANA\" or\n unicodedata.name(c)[0:8] == \"KATAKANA\" or\n unicodedata.name(c)[0:3] == \"CJK\" or\n unicodedata.name(c)[0:5] == \"DIGIT\" or\n unicodedata.name(c)[0:5] == \"LATIN\"):\n return False\n\n # 全て英字の単語はNG\n \"\"\"\n flag = True\n for c in word:\n if not (unicodedata.name(c)[0:5] == \"LATIN\"):\n flag = False\n if flag:\n return False\n \"\"\"\n # 数字は4桁のみ\n flag = True\n for c in word:\n if not (unicodedata.name(c)[0:5] == \"DIGIT\"):\n flag = False\n if flag:\n if int(word) < 1000 or int(word) > 10000:\n return False\n\n # 指定ワードならTrue\n if word in self.stock_code or word in self.stock_name or word in self.stock_name1:\n return True\n else:\n return False\n\n def unite_word(self, word):\n \"\"\"銘柄を正式名称で返す\"\"\"\n # 何番目の銘柄かを判���\n for i in range(0, len(self.stock_code)):\n if word == self.stock_code[i] or word == self.stock_name[i] or word == self.stock_name1[i]:\n word = self.stock_name[i]\n return word\n\n def get_code(self, word):\n \"\"\"正式名称に対してcodeを返す\"\"\"\n for i in range(0, len(self.stock_code)):\n if word == self.stock_code[i] or word == self.stock_name[i] or word == self.stock_name1[i]:\n code = self.stock_code[i]\n return code\n\n def create_word_rank(self):\n \"\"\"wordランキングを計算するメソッド\"\"\"\n # tweetsデータ選択\n df = self.get_tweet_dateframe()\n print(\"データ取得\")\n # アカウントのリストを作成\n user_list = list(df['id_user'].unique())\n\n # ネットワーク全体の総計のword登場回数(初期化)\n counter_sum = collections.Counter('')\n\n # 各アカウントのword登場回数(重み付け)\n for id_user in user_list:\n # print(id_user)\n counter_person = collections.Counter('')\n text = self.connect_usertext(df, id_user)\n\n terms, costs = self.parse(text)\n\n # 回数をdictionaryで取得\n count_dict = collections.Counter(terms)\n\n # userごとに重み付け\n in_degree = df[df['id_user'] == id_user]\n in_degree = in_degree.reset_index()\n del in_degree['index']\n for i in range(0, in_degree['in_degree'][0]):\n counter_person += count_dict\n\n counter_sum += counter_person\n\n # 上位50wordをデータフレームに変換\n\n df_output = pd.DataFrame(counter_sum.most_common(100))\n df_output['code'] = 0\n\n # return するdfを整形\n if df_output.shape[0] > 0:\n # カラム名を設定,整理\n word_name = \"word\"\n in_degree_name = \"in_degree\"\n df_output.columns = [word_name, in_degree_name, 'code']\n df_output = df_output.ix[:, ['code', word_name, in_degree_name]]\n # codeを挿入\n for i in range(0, df_output.shape[0]):\n df_output.loc[i, 'code'] = self.get_code(df_output[word_name][i])\n print(\"done\")\n return df_output\n\n else:\n print(\"None\")\n return None\n\n def insert_data_to_primary_report(self):\n \"\"\"primary_reportテーブルにを挿入するメソッド\"\"\"\n\n rank_day = self.create_word_rank()\n\n # もしデータがなかったらプログラムを終了させる\n if rank_day is None:\n print(\"finished\")\n sys.exit()\n\n # データを挿入\n\n # data_frameにランキングを挿入\n rank_day['rank'] = 0\n print(rank_day.shape)\n for i in range(0, rank_day.shape[0]):\n rank_day.loc[i, 'rank'] = i + 1\n\n # date_frameに挿入日の時刻を代入\n now = datetime.datetime.now()\n rank_day['date'] = now.strftime('%Y-%m-%d %H:%M:%S')\n\n # data_frameに現在値、前日終値、上昇値、上昇率をを設定\n rank_day['price_now'] = -1\n rank_day['price_yes_finish'] = -1\n rank_day['rising_vale'] = 0\n rank_day['increase_rate'] = 0\n\n for i in range(0, rank_day.shape[0]):\n code = rank_day[\"code\"][i]\n stock = get_stock_price.GetStockPrice(code)\n price_now = stock.get_price_now()\n rank_day.loc[i, \"price_now\"] = price_now\n price_yes_finish = stock.get_price_yesterday_finish()\n rank_day.loc[i, \"price_yes_finish\"] = price_yes_finish\n rising_value, increase_rate = stock.cal_the_day_before_ratio(price_now, price_yes_finish)\n rank_day.loc[i, 'rising_value'] = rising_value\n rank_day.loc[i, 'increase_rate'] = increase_rate\n\n # データベースに接続\n con = mysql.connector.connect(user=db_info.user, password=db_info.passwd,\n host=db_info.host, database=db_info.db, charset='utf8')\n # テーブルのデータを削除\n try:\n delete_statement = \"delete from twitter_trend_primary_report\"\n cursor = con.cursor()\n cursor.execute(delete_statement)\n con.commit()\n cursor.close()\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n # word, in_degree, rank, date をデータベースに挿入\n try:\n \"\"\"\n rank_day = rank_day.ix[:, ['rank', 'code', 'word', 'in_degree', 'price_now', 'price_yes_finish',\n 'rising_value', 'increase_rate', 'date']]\n \"\"\"\n for i in range(0, rank_day.shape[0]):\n insert_stmt = \"insert into `twitter_trend_primary_report` \" \\\n \"(`rank`,`code`,`word`,`in_degree`,`price_now`,`price_yes_finish`,\" \\\n \"`rising_value`,`increase_rate`,`date`) \" \\\n \"values (%s,%s,%s,%s,%s,%s,%s,%s,%s);\"\n insert_data = (str(rank_day['rank'][i]), str(rank_day['code'][i]), str(rank_day['word'][i]),\n str(rank_day['in_degree'][i]), str(rank_day['price_now'][i]),\n str(rank_day['price_yes_finish'][i]), str(rank_day['rising_value'][i]),\n str(rank_day['increase_rate'][i]), str(rank_day['date'][i]))\n cursor = con.cursor()\n cursor.execute(insert_stmt, insert_data)\n con.commit()\n cursor.close()\n # rank_day.to_sql(Table_name, con, flavor='mysql', if_exists='append', index=False)\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n con.close()\n\n def insert_data_to_trend_yesterday_and_log(self):\n \"\"\"trend_yesterdayテーブル,logテーブルにランキングを挿入するメソッド\"\"\"\n\n rank_day = self.create_word_rank()\n\n # もしデータがなかったらプログラムを終了させる\n if rank_day is None:\n print(\"finished\")\n sys.exit()\n\n # データを挿入\n # data_frameにランキングを挿入\n rank_day['rank'] = 0\n print(rank_day.shape)\n for i in range(0, rank_day.shape[0]):\n rank_day.loc[i, 'rank'] = i + 1\n\n # date_frameに昨日のdateを追加\n rank_day['date'] = self.target_day\n\n # data_frameに現在値、前日終値、上昇値、上昇率をを設定\n rank_day['price_now'] = -1\n rank_day['price_yes_finish'] = -1\n rank_day['rising_vale'] = 0\n rank_day['increase_rate'] = 0\n\n for i in range(0, rank_day.shape[0]):\n code = rank_day[\"code\"][i]\n stock = get_stock_price.GetStockPrice(code)\n price_now = stock.get_price_now()\n rank_day.loc[i, \"price_now\"] = price_now\n price_yes_finish = stock.get_price_yesterday_finish()\n rank_day.loc[i, \"price_yes_finish\"] = price_yes_finish\n rising_value, increase_rate = stock.cal_the_day_before_ratio(price_now, price_yes_finish)\n rank_day.loc[i, 'rising_value'] = rising_value\n rank_day.loc[i, 'increase_rate'] = increase_rate\n\n # データベースに接続\n con = mysql.connector.connect(user=db_info.user, password=db_info.passwd,\n host=db_info.host, database=db_info.db, charset='utf8')\n # twitter_trend_yesterdayテーブルのデータを削除\n try:\n delete_statement = \"delete from twitter_trend_yesterday\"\n cursor = con.cursor()\n cursor.execute(delete_statement)\n con.commit()\n cursor.close()\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n # word, in_degree, rank, date をデータベースに挿入\n # twitter_trend_yesterday\n try:\n\n for i in range(0, rank_day.shape[0]):\n insert_stmt = \"insert into `twitter_trend_yesterday` \" \\\n \"(`rank`,`code`,`word`,`in_degree`,`price_now`,`price_yes_finish`,\" \\\n \"`rising_value`,`increase_rate`,`date`) \" \\\n \"values (%s,%s,%s,%s,%s,%s,%s,%s,%s);\"\n insert_data = (str(rank_day['rank'][i]), str(rank_day['code'][i]), str(rank_day['word'][i]),\n str(rank_day['in_degree'][i]), str(rank_day['price_now'][i]),\n str(rank_day['price_yes_finish'][i]), str(rank_day['rising_value'][i]),\n str(rank_day['increase_rate'][i]), str(rank_day['date'][i]))\n cursor = con.cursor()\n cursor.execute(insert_stmt, insert_data)\n con.commit()\n cursor.close()\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n # log\n try:\n\n for i in range(0, rank_day.shape[0]):\n insert_stmt = \"insert into `twitter_trend_log` \" \\\n \"(`rank`,`code`,`word`,`in_degree`,`price_now`,`price_yes_finish`,\" \\\n \"`rising_value`,`increase_rate`,`date`) \" \\\n \"values (%s,%s,%s,%s,%s,%s,%s,%s,%s);\"\n insert_data = (str(rank_day['rank'][i]), str(rank_day['code'][i]), str(rank_day['word'][i]),\n str(rank_day['in_degree'][i]), str(rank_day['price_now'][i]),\n str(rank_day['price_yes_finish'][i]), str(rank_day['rising_value'][i]),\n str(rank_day['increase_rate'][i]), str(rank_day['date'][i]))\n cursor = con.cursor()\n cursor.execute(insert_stmt, insert_data)\n con.commit()\n cursor.close()\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n con.close()\n\n\nclass WordTrendDayReport(WordTrend):\n \"\"\"\n 各銘柄についてのwordランキングを出すクラス\n 受け取ったspan(minutes)の範囲でのトレンドを出す\n \"\"\"\n # ここで指定の範囲を、span[minute]で受け取る\n def __init__(self, span):\n\n # 検索範囲の設定\n self.span = span\n self.MECAB_MODE = 'mecabrc'\n\n # 昨日の日付を求める\n self.today = datetime.date.today()\n self.yesterday = (self.today - datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n\n self.now_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.since_datetime = (datetime.datetime.now() - datetime.timedelta(minutes=self.span)).strftime(\n \"%Y-%m-%d %H:%M:%S\")\n\n con = mysql.connector.connect(user=db_info.user, password=db_info.passwd,\n host=db_info.host, database=db_info.db, charset='utf8')\n sql = \"SELECT `code`, `name`, `name2` from `stock_info`\"\n self.stock_df = psql.read_sql(sql, con)\n con.close()\n # self.stock_df = pd.read_csv(\"~/investment/twitter_investment/trend/stock_list.csv\")\n\n self.stock_code = np.array(self.stock_df['code']).astype(str)\n self.stock_name = np.array(self.stock_df['name'])\n self.stock_name1 = np.array(self.stock_df['name2'])\n\n # WordTrendのget_tweet_dateframeをオーバーライド\n def get_tweet_dateframe(self):\n \"\"\"データベースから特定のspanのツイートを取得\"\"\"\n # データベースに接続\n con = mysql.connector.connect(user=db_info.user, password=db_info.passwd,\n host=db_info.host, database=db_info.db, charset='utf8')\n sql = \"select `id_user`,`text`,`in_degree` from `tweets` join `centralities`\" \\\n \"on tweets.id_user = centralities.index \" \\\n \"where created_time >= '%s'\" % self.since_datetime\n # 1000行ずつ読み込むことで高速化を図る\n reader = psql.read_sql(sql, con, chunksize=1000)\n df = pd.concat((r for r in reader))\n con.close()\n\n return df\n\n def insert_data_to_instance_span_report(self):\n \"\"\"instance_trend_span_reportテーブルにを挿入するメソッド\"\"\"\n\n rank_span = self.create_word_rank()\n\n # もしデータがなかったらプログラムを終了させる\n if rank_span is None:\n print(\"finished\")\n sys.exit()\n\n # データを挿入\n\n # data_frameにランキングを挿入\n rank_span['rank'] = 0\n print(rank_span.shape)\n for i in range(0, rank_span.shape[0]):\n rank_span.loc[i, 'rank'] = i + 1\n\n # date_frameに挿入日ののdateを追加\n rank_span['date'] = self.target_day\n\n # data_frameに現在値、前日終値、上昇値、上昇率をを設定\n rank_span['price_now'] = -1\n rank_span['price_yes_finish'] = -1\n rank_span['rising_vale'] = 0\n rank_span['increase_rate'] = 0\n\n for i in range(0, rank_span.shape[0]):\n code = rank_span[\"code\"][i]\n stock = get_stock_price.GetStockPrice(code)\n price_now = stock.get_price_now()\n rank_span.loc[i, \"price_now\"] = price_now\n price_yes_finish = stock.get_price_yesterday_finish()\n rank_span.loc[i, \"price_yes_finish\"] = price_yes_finish\n rising_value, increase_rate = stock.cal_the_day_before_ratio(price_now, price_yes_finish)\n rank_span.loc[i, 'rising_value'] = rising_value\n rank_span.loc[i, 'increase_rate'] = increase_rate\n\n # データベースに接続\n con = mysql.connector.connect(user=db_info.user, password=db_info.passwd,\n host=db_info.host, database=db_info.db, charset='utf8')\n # テーブルのデータを削除\n try:\n delete_statement = \"delete from instance_trend_span_report\"\n cursor = con.cursor()\n cursor.execute(delete_statement)\n con.commit()\n cursor.close()\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n # word, in_degree, rank, date をデータベースに挿入\n try:\n \"\"\"\n rank_day = rank_day.ix[:, ['rank', 'code', 'word', 'in_degree', 'price_now', 'price_yes_finish',\n 'rising_value', 'increase_rate', 'date']]\n \"\"\"\n for i in range(0, rank_span.shape[0]):\n insert_stmt = \"insert into `instance_trend_span_report` \" \\\n \"(`rank`,`code`,`word`,`in_degree`,`price_now`,`price_yes_finish`,\" \\\n \"`rising_value`,`increase_rate`,`date`) \" \\\n \"values (%s,%s,%s,%s,%s,%s,%s,%s,%s);\"\n insert_data = (str(rank_span['rank'][i]), str(rank_span['code'][i]), str(rank_span['word'][i]),\n str(rank_span['in_degree'][i]), str(rank_span['price_now'][i]),\n str(rank_span['price_yes_finish'][i]), str(rank_span['rising_value'][i]),\n str(rank_span['increase_rate'][i]), str(rank_span['date'][i]))\n cursor = con.cursor()\n cursor.execute(insert_stmt, insert_data)\n con.commit()\n cursor.close()\n # rank_day.to_sql(Table_name, con, flavor='mysql', if_exists='append', index=False)\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n\n con.close()\n\n","sub_path":"twitter_investment/trend/word_trend.py","file_name":"word_trend.py","file_ext":"py","file_size_in_byte":21211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602760687","text":"import os\n\nimport Track\n\nPROG_NAME = 'trajectoryAnalysis'\nDESCRIPTION = 'Analysis of trajectories for cell image data'\n\n \ndef main():\n \n SUFFIX = '.txt'\n \n from argparse import ArgumentParser\n \n epilog = 'For further help on running this program please email wb104@cam.ac.uk'\n \n arg_parse = ArgumentParser(prog=PROG_NAME, description=DESCRIPTION,\n epilog=epilog, prefix_chars='-', add_help=True)\n \n arg_parse.add_argument('directories', nargs='*',\n help='Directories containing *%s files to be analysed' % SUFFIX)\n\n arg_parse.add_argument('-numDimensions', default=2, type=int,\n help='Number of dimensions for the tracks (2 or 3)')\n\n arg_parse.add_argument('-maxJumpDistance', default=100, type=float,\n help='Maximum distance can jump between frame (adjusted distance, taking frame difference into account)')\n\n arg_parse.add_argument('-maxFrameGap', default=2, type=int,\n help='Maximum change in frame for two consecutive positions on track')\n\n arg_parse.add_argument('-minNumPositions', default=3, type=int,\n help='Minimum number of positions on track to be further considered')\n\n arg_parse.add_argument('-binSize', default=312, type=int,\n help='The bin size for binned calculations')\n \n arg_parse.add_argument('-plotDpi', default=600, type=int,\n help='DPI value for binned plots')\n \n arg_parse.add_argument('-savePositionsFramesIntensities', default=False, action='store_true',\n help='Save positions, frames and intensities of tracks to csv file')\n \n arg_parse.add_argument('-calcFramesPercentage', default=0, type=float,\n help='Calculate track frames length which is >= than specified percentage over all tracks')\n \n arg_parse.add_argument('-calcMaxNumTracksInBin', default=False, action='store_true',\n help='Calculate maximum number of tracks in any bin')\n \n arg_parse.add_argument('-saveNumTracksInBin', default=0, type=int,\n help='Save number of tracks in each bin with maximum number for color being specified by given value, -1 means use maximum number of tracks in any bin')\n \n arg_parse.add_argument('-calcFramesByBinPercentage', default=0, type=float,\n help='Calculate binned track (average) frames length which is >= than specified percentage over all tracks')\n \n arg_parse.add_argument('-saveTrackFramesInBin', default=0, type=int,\n help='Save track (average) frames in each bin with maximum number for color being specified by given value, -1 means use maximum number of (average) frames in any bin')\n \n arg_parse.add_argument('-saveTracksColoredByFrames', default=0, type=int,\n help='Save tracks where those with frames >= specified value are colored blue and others yellow')\n \n args = arg_parse.parse_args()\n\n assert args.numDimensions in (2, 3), 'numDimensions = %d, must be in (2, 3)' % args.numDimensions\n\n for directory in args.directories:\n print('Processing directory %s' % directory)\n relfileNames = os.listdir(directory)\n relfileNames = [relfileName for relfileName in relfileNames if relfileName.endswith(SUFFIX)]\n for relfileName in relfileNames:\n filePrefix = os.path.join(directory, relfileName[:-len(SUFFIX)])\n fileName = os.path.join(directory, relfileName)\n print('Determining tracks for %s' % fileName)\n tracks = Track.determineTracks(fileName, args.numDimensions, args.maxJumpDistance, args.maxFrameGap, args.minNumPositions)\n \n if args.savePositionsFramesIntensities:\n Track.savePositionsFramesIntensities(tracks, filePrefix)\n\n if args.calcFramesPercentage > 0:\n Track.calcFramesPercentage(tracks, args.calcFramesPercentage)\n \n if args.calcMaxNumTracksInBin:\n Track.calcMaxNumTracksInBin(tracks, args.binSize)\n \n if args.saveNumTracksInBin:\n if args.saveNumTracksInBin == -1:\n value = Track.calcMaxNumTracksInBin(tracks, args.binSize)\n else:\n value = args.saveNumTracksInBin\n Track.saveNumTracksInBin(tracks, filePrefix, args.binSize, value, args.plotDpi)\n \n if args.calcFramesByBinPercentage > 0:\n Track.calcFramesByBinPercentage(tracks, args.binSize, args.calcFramesByBinPercentage)\n \n if args.saveTrackFramesInBin:\n if args.saveTrackFramesInBin == -1:\n value = Track.calcFramesByBinPercentage(tracks, args.binSize, 100.0)\n else:\n value = args.saveTrackFramesInBin\n Track.saveTrackFramesInBin(tracks, filePrefix, args.binSize, value, args.plotDpi)\n \n if args.saveTracksColoredByFrames:\n Track.saveTracksColoredByFrames(tracks, filePrefix, args.saveTracksColoredByFrames, args.plotDpi)\n\nif __name__ == '__main__':\n \n main()\n \n \n ","sub_path":"trajectoryAnalysis.py","file_name":"trajectoryAnalysis.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"346225515","text":"# 1220. Count Vowels Permutation\n# Hard\n\n# 640\n\n# 79\n\n# Add to List\n\n# Share\n# Given an integer n, your task is to count how many strings of length n can be formed under the following rules:\n\n# Each character is a lower case vowel ('a', 'e', 'i', 'o', 'u')\n# Each vowel 'a' may only be followed by an 'e'.\n# Each vowel 'e' may only be followed by an 'a' or an 'i'.\n# Each vowel 'i' may not be followed by another 'i'.\n# Each vowel 'o' may only be followed by an 'i' or a 'u'.\n# Each vowel 'u' may only be followed by an 'a'.\n# Since the answer may be too large, return it modulo 10^9 + 7.\n\n \n\n# Example 1:\n\n# Input: n = 1\n# Output: 5\n# Explanation: All possible strings are: \"a\", \"e\", \"i\" , \"o\" and \"u\".\n# Example 2:\n\n# Input: n = 2\n# Output: 10\n# Explanation: All possible strings are: \"ae\", \"ea\", \"ei\", \"ia\", \"ie\", \"io\", \"iu\", \"oi\", \"ou\" and \"ua\".\n# Example 3: \n\n# Input: n = 5\n# Output: 68\n \n\n# Constraints:\n\n# 1 <= n <= 2 * 10^4\n\n# This solution works:\n\nclass Solution:\n MOD = 10 ** 9 + 7\n def countVowelPermutation(self, n: int) -> int:\n rules = {\n 'a' : ['e'],# Each vowel 'a' may only be followed by an 'e'.\n 'e' : ['a', 'i'], # Each vowel 'e' may only be followed by an 'a' or an 'i'.\n 'i' : ['a','e','o','u'], # Each vowel 'i' may not be followed by another 'i'.\n 'o' : ['i', 'u'], # Each vowel 'o' may only be followed by an 'i' or a 'u'.\n 'u' : ['a'], # Each vowel 'u' may only be followed by an 'a'.\n '#' : ['a','e','i','o','u']\n }\n \n @lru_cache(None)\n def helper(n, prev):\n if n == 0:\n return 1\n ans = 0\n for next_char in rules[prev]:\n ans += helper(n-1, next_char)\n return ans\n return helper(n, '#') % Solution.MOD","sub_path":"lc/1220.CountVowelsPermutation.py","file_name":"1220.CountVowelsPermutation.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"203115065","text":"import scipy.ndimage as ndi\nimport numpy as np\nfrom PIL import Image\n\ndef transform_matrix_offset_center(matrix, x, y):\n o_x = float(x) / 2 + 0.5\n o_y = float(y) / 2 + 0.5\n offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])\n reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])\n transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)\n return transform_matrix\n \ndef apply_transform(x, transform_matrix, channel_axis=2, fill_mode='nearest', fill_value=0.):\n x = np.rollaxis(x, channel_axis, 0)\n x = x.astype('float32')\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,\n final_offset, order=0, mode=fill_mode, cval=fill_value) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x\n\ndef random_transform_fn(x, T):\n \"\"\"\n Randomly transform an image from the given parameters\n\n Transforms:\n - rotate\n - shift\n - shear\n - zoom\n - flip\n \n Arguments\n ---------\n x : np.ndarray\n y : np.ndarray\n T : dictionary\n holds values for the various transforms\n example:\n T = { \n \"rotation_range\" : 15,\n \"shift_range\" : [0.3,0.3],\n \"shear_range\" : 0.1,\n \"zoom_range\" : [1,1.4],\n \"horizontal_flip\" : True,\n \"vertical_flip\" : False,\n \"x_fill_mode\" : \"constant\",\n \"y_fill_mode\" : \"nearest\",\n \"fill_value\" : 0\n }\n \"\"\"\n x = np.asarray(x)\n\n # only support tf ordering\n orig_dim = x.ndim\n if x.ndim == 2:\n x = np.expand_dims(x,-1)\n\n img_row_axis = 0\n img_col_axis = 1\n channel_axis = 2\n\n ### ROTATION\n if T['rotation_range'] > 0:\n theta = np.pi / 180 * np.random.uniform(-T['rotation_range'],\n T['rotation_range'])\n else:\n theta = 0\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n\n ### SHIFT HEIGHT\n if T['shift_range'][0] > 0:\n tx = np.random.uniform(-T['shift_range'][0], \n T['shift_range'][0]) * x.shape[img_row_axis]\n else:\n tx = 0\n ### SHIFT WIDTH\n if T['shift_range'][1] > 0:\n ty = np.random.uniform(-T['shift_range'][1], \n T['shift_range'][1]) * x.shape[img_col_axis]\n else:\n ty = 0\n translation_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n\n ### SHEAR\n if T['shear_range'] > 0:\n shear = np.random.uniform(-T['shear_range'],T['shear_range'])\n else:\n shear = 0\n shear_matrix = np.array([[1, -np.sin(shear), 0],\n [0, np.cos(shear), 0],\n [0, 0, 1]])\n\n ### ZOOM\n if T['zoom_range'][0] == 1. and T['zoom_range'][1] == 1.:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(T['zoom_range'][0], T['zoom_range'][1], 2)\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n ### COMBINE MATRICES INTO ONE TRANSFORM MATRIX\n transform_matrix = np.dot(np.dot(np.dot(rotation_matrix,\n translation_matrix),\n shear_matrix),\n zoom_matrix)\n h, w = x.shape[img_row_axis], x.shape[img_col_axis]\n ### APPLY COMBINED TRANSFORM ON X IMAGE\n transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)\n #x = apply_transform(x, transform_matrix, channel_axis,\n # fill_mode=T['x_fill_mode'], fill_value=T['fill_value'])\n xs = np.dsplit(x,3)\n xs = [apply_transform(c, transform_matrix, channel_axis,fill_mode='constant',\n fill_value=fv) \n for c,fv in zip(xs,[255.0,0,255.0])]\n x = np.concatenate(xs, axis=2)\n \n ### HORIZONTAL FLIP\n if T['horizontal_flip'] == True:\n if np.random.random() < 0.5:\n x = np.asarray(x).swapaxes(img_col_axis, 0)\n x = x[::-1, ...]\n x = x.swapaxes(0, img_col_axis)\n\n ### VERTICAL FLIP\n if T['vertical_flip']:\n if np.random.random() < 0.5:\n x = np.asarray(x).swapaxes(img_row_axis, 0)\n x = x[::-1, ...]\n x = x.swapaxes(0, img_row_axis)\n\n\n if orig_dim == 2:\n x = np.squeeze(x)\n return Image.fromarray(x.astype(np.uint8))\n else:\n return Image.fromarray(x.astype(np.uint8))\n","sub_path":"transform_helper.py","file_name":"transform_helper.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"392861796","text":"from mesa import Agent\nimport math\nfrom vampiro_chromatium.chromatium import Chromatium\n\n\nclass Vampiro(Agent):\n '''\n A vampirococcus that walks around, chromatium gradient dependent\n '''\n\n grid = None\n x = None\n y = None\n moore = True\n energy = None\n prey = None\n\n def __init__(self, unique_id, pos, model, moore, energy=None, prey=None):\n super().__init__(unique_id, model)\n self.pos = pos\n self.moore = moore\n self.energy = energy\n self.prey = prey\n \n def gradient_move(self):\n '''\n #Gradient move depending on Chromatium\n '''\n neigh_obj = self.model.grid.get_neighbors(self.pos, self.moore, include_center=True, radius=1)\n food_patches = [obj for obj in neigh_obj if isinstance(obj, Chromatium)]\n if len(food_patches) > 0:\n next_move = self.random.choice(food_patches)\n self.model.grid.move_agent(self, next_move.pos)\n\n # Otherwise move random\n else:\n next_move = self.random.choice(neigh_obj)\n self.model.grid.move_agent(self, next_move.pos)\n '''\n def random_move(self):\n \n #Random Move\n \n neigh_obj = self.model.grid.get_neighbors(self.pos, self.moore, include_center=True, radius=1)\n next_move = self.random.choice(neigh_obj)\n self.model.grid.move_agent(self, next_move.pos)\n '''\n \n \n \n \n def step(self):\n\n self.energy -= 1\n already_eat = False\n ### Eating\n if self.prey != None:\n already_eat = True\n '''\n if already attached, follow and suck the prey\n '''\n # Check if it is alive\n if self.prey.pos != None:\n # First, move vampiro to chromatium position\n self.model.grid.move_agent(self, self.prey.pos)\n \n # Now eat it\n # if it has enough energy\n if self.prey.energy >= self.model.vampiro_gain_from_food:\n self.energy += self.model.vampiro_gain_from_food\n self.prey.energy -= self.model.vampiro_gain_from_food\n # if it does not, suck it and kill it\n else:\n self.energy += self.model.vampiro_gain_from_food-self.prey.energy\n self.prey.energy -= self.model.vampiro_gain_from_food\n #self.model.grid._remove_agent(self.prey.pos, self.prey)\n #self.model.schedule.remove(self.prey)\n self.prey = None\n else:\n already_eat = False\n\n if not already_eat:\n '''\n Not attached to prey, move gradient/random and check for some prey\n '''\n self.gradient_move()\n # Check for chromatium\n this_cell = self.model.grid.get_cell_list_contents([self.pos])\n Chrome = [obj for obj in this_cell if isinstance(obj, Chromatium)]\n if len(Chrome) > 0:\n self.prey = self.random.choice(Chrome)\n # if it has enough energy attach to it\n if self.prey.energy >= self.model.vampiro_gain_from_food:\n self.energy += self.model.vampiro_gain_from_food\n self.prey.energy -= self.model.vampiro_gain_from_food\n # if it does not, suck it and kill it\n else:\n self.energy += self.model.vampiro_gain_from_food-self.prey.energy\n self.prey.energy -= self.model.vampiro_gain_from_food\n #self.model.grid._remove_agent(self.prey.pos, self.prey)\n #self.model.schedule.remove(self.prey)\n self.prey = None \n\n \n ### Death\n if self.energy < 0:\n self.model.grid._remove_agent(self.pos, self)\n self.model.schedule.remove(self)\n \n ### Reproduce \n if self.random.random() < self.model.vampiro_reproduce and self.prey != None:\n self.energy = math.floor(self.energy/2)\n vampirino = Vampiro(self.model.next_id(), self.pos, self.model, self.moore, self.energy)\n self.model.grid.place_agent(vampirino, vampirino.pos)\n self.model.schedule.add(vampirino)\n \n \n","sub_path":"vampiro_chromatium/vampiro.py","file_name":"vampiro.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"264030646","text":"#-*- coding:utf8-*-\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\nclass sampleA(object):\n\n # 无向图\n def sampleFunc(self):\n G = nx.Graph()\n G.add_node(1) #添加节点\n G.add_edge(2, 3) #添加边\n G.add_edge(3, 2) #对于无详图,可以省略\n\n print(\"nodes: {}\".format(G.nodes()))\n print(\"edges: {}\".format(G.edges()))\n print(\"number of edges: {}\".format(G.number_of_edges()))\n\n nx.draw(G)\n plt.savefig(\"sampleA.png\")\n plt.show()\n\nclass sampleB(object):\n def sampleFunc(self):\n G = nx.DiGraph()\n G.add_node(1)\n G.add_node(2) #加点\n G.add_nodes_from([3,4,5,6]) #加点集合\n G.add_cycle([1,2,3,4]) #加环\n G.add_edge(1,3)\n G.add_edges_from([(3,5),(3,6),(6,7)]) #加边集合\n nx.draw(G)\n plt.savefig(\"sampleB.png\")\n plt.show()\n\n\nclass sampleC(object):\n #有向图\n def sampleFunc(self):\n G = nx.DiGraph()\n G.add_node(1)\n G.add_node(2)\n G.add_nodes_from([3,4,5,6])\n G.add_cycle([1,2,3,4])\n G.add_edge(1,3)\n G.add_edges_from([(3,5),(3,6),(6,7)])\n nx.draw(G)\n plt.savefig(\"sampleC.png\")\n plt.show()\n\n\n\n\n\ndef test():\n # objA = sampleA()\n # objA.sampleFunc()\n obj = sampleC()\n obj.sampleFunc()\n\n\ntest()\n","sub_path":"其他/图算法/NetwirkX-python/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"453209234","text":" \n\"\"\"\n======================COPYRIGHT/LICENSE START==========================\n\nObjectTablePopup.py: \n\nCopyright (C) 2005 Wayne Boucher, Rasmus Fogh, Tim Stevens and Wim Vranken (University of Cambridge and EBI/MSD)\n\n=======================================================================\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense as published by the Free Software Foundation; either\nversion 2.1 of the License, or (at your option) any later version.\n \nA copy of this license can be found in ../../../license/LGPL.license\n \nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLesser General Public License for more details.\n \nYou should have received a copy of the GNU Lesser General Public\nLicense along with this library; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n\n======================COPYRIGHT/LICENSE END============================\n\nfor further information, please contact :\n\n- CCPN website (http://www.ccpn.ac.uk/)\n- PDBe website (http://www.ebi.ac.uk/pdbe/)\n\n=======================================================================\n\nIf you are using this software for academic purposes, we suggest\nquoting the following references:\n\n===========================REFERENCE START=============================\nR. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.\nHabeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,\nH. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The\nCCPN project: An interim report on a data model for the NMR community\n(Progress report). Nature Struct. Biol. 9, 416-418.\n\nWim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus\nH. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John\nIonides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:\nDevelopment of a Software Pipeline. Proteins 59, 687 - 696.\n\n===========================REFERENCE END===============================\n\"\"\"\nimport Tkinter\n \n \nfrom memops.general.Constants import infinity, apiSubDirs\nfrom memops.general import Implementation\n \nfrom memops.gui.ButtonList import ButtonList\nfrom memops.gui.MessageReporter import showError\n\nfrom memops.editor.BasePopup import BasePopup\nfrom memops.editor.ObjectTable import ObjectTable\nfrom memops.editor.Util import getAllObjects\n\nclass ObjectTablePopup(BasePopup):\n\n def __init__(self, parent, root, metaclass, onlyShow = False, *args, **kw):\n\n self.root = root\n self.metaclass = metaclass\n self.onlyShow = onlyShow\n\n BasePopup.__init__(self, parent, title='Browser for %s objects' % metaclass.name,\n location='+50+50', *args, **kw)\n \n def body(self, master):\n \n master.grid_rowconfigure(0, weight=1)\n master.grid_columnconfigure(0, weight=1)\n\n self.table = ObjectTable(master, self.metaclass)\n self.table.grid(row=0, column=0, sticky=Tkinter.NSEW)\n\n if (self.onlyShow):\n texts = [ 'Close' ]\n commands = [ self.close ]\n else:\n texts = [ 'Ok', 'Cancel' ]\n commands = [ self.ok, self.close ]\n\n self.buttons = ButtonList(master, texts=texts, commands=commands,\n direction=Tkinter.HORIZONTAL, expands=True)\n self.buttons.grid(row=1, column=0, sticky=Tkinter.EW)\n\n self.selected = None\n\n self.setObjects()\n\n self.doRegisters()\n\n def doRegisters(self):\n\n # do not need '' since dealing with keys\n #for func in ('', '__init__', 'delete'):\n for func in ('__init__', 'delete'):\n self.registerNotify(self.setObjects, self.metaclass.qualifiedName(), func)\n\n # unregisters dealt with by BasePopup\n\n def apply(self):\n\n selected = self.table.currentObject\n\n if (not selected):\n showError('No selection', \"No object selected\", parent=self)\n return False\n\n self.selected = selected\n\n return True\n\n def setObjects(self, *extra):\n\n objects = getAllObjects(self.root, self.metaclass)\n\n self.table.setObjects(objects)\n","sub_path":"ccpnmr2.4/python/memops/editor/ObjectTablePopup.py","file_name":"ObjectTablePopup.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317438941","text":"from __future__ import print_function\nfrom datahandling import insert_update_db, get_dtype_names, extractnames\nfrom tinydb import Query\nfrom numpy import mean\nfrom equipment import LOI, Colour, Tensile, MassFrac, ConeCal\nfrom logging import debug\n\nQ = Query()\n\ndef raw_to_db(db, equip_name, data_type):\n \"\"\" Works for LOI and Colour Data \"\"\"\n \n if equip_name == 'LOI':\n equipment = LOI()\n elif equip_name == 'colour':\n equipment = Colour()\n \n File = equipment.alldatafiles()\n f = File[0]\n sample_numbers, vals = equipment.simple_data(f)\n \n for sample_number, val in zip(sample_numbers, vals):\n \n done = db.contains((Q.equipment_name == equipment.name) & (Q.sample_number == int(sample_number)))\n \n if not done:\n entry = {'equipment_name': equipment.name,\n 'sample_number': int(sample_number),\n 'data_type': data_type,\n 'value': val\n }\n db.insert(entry)\n \ndef raw_to_db_tensile(db):\n equipment = Tensile()\n File = equipment.alldatafiles()\n f = File[0]\n \n data = equipment.simple_data(f)\n data_types = ['E_t_MPa', 'sigma_max_MPa', 'epsilon_max_%',\n 'sigma_break_MPa', 'epsilon_break_%']\n \n sample_numbers = data[0]\n data = data[1:]\n \n specimens = []\n sample_numbers_only = []\n for n in sample_numbers:\n n_split = n.split('_')\n sample_numbers_only.append(n_split[0])\n specimens.append(n_split[1])\n \n for d, d_n in zip(data, data_types):\n for n, s, val in zip(sample_numbers_only, specimens, d):\n done = db.contains((Q.equipment_name == equipment.name) &\n (Q.sample_number == int(n)) &\n (Q.specimen_number == int(s)) &\n (Q.data_type == d_n)\n ) \n if not done:\n entry = {'equipment_name': equipment.name,\n 'sample_number': int(n),\n 'specimen_number': int(s),\n 'data_type': d_n,\n 'value': val\n }\n db.insert(entry)\n \ndef calc_tensile_mean(sv_db):\n \"\"\" Calculates the mean values between the different tensile specimens\n and enters them into the single values database \"\"\"\n equipment = Tensile()\n data_types = get_dtype_names(sv_db, equipment.name)\n if len(data_types) == 10:\n data_types = [d for i, d in enumerate(data_types) if i in [0,4,6,8,9]]\n \n for i in range(53):\n sn = i + 1\n\n for dt in data_types:\n data = sv_db.search((Q.equipment_name == equipment.name) &\n (Q.sample_number == sn) &\n (Q.data_type == dt)\n )\n if len(data) != 0:\n done = sv_db.contains((Q.equipment_name == equipment.name) &\n (Q.sample_number == sn) &\n (Q.data_type == (dt + '_mean'))\n )\n \n if not done:\n vals = extractnames(data, 'value')\n mean_val = mean(vals)\n sv_db.insert({'equipment_name': equipment.name,\n 'sample_number': sn,\n 'data_type': (dt + '_mean'),\n 'value': mean_val})\n \ndef raw_to_db_massfrac(db):\n raw_in = MassFrac()\n File = raw_in.alldatafiles()\n f = File[0]\n \n data = raw_in.simple_data(f)\n sample_numbers = data[0]\n data = data[1:]\n \n ingredients = ['PVC', 'filler', 'FR', 'stabiliser', 'DINP', 'LDH', 'spherical_filler']\n \n for d, d_n in zip(data, ingredients):\n for n, val in zip(sample_numbers, d):\n done = db.contains((Q.sample_number == n) &\n (Q.data_type == raw_in.name) &\n (Q.ingredient == d_n)\n )\n\n if not done:\n entry = {'sample_number': int(n),\n 'data_type': raw_in.name,\n 'ingredient': d_n,\n 'value': val\n }\n\n db.insert(entry)\n \ndef raw_to_db_conecal(db):\n equipment = ConeCal()\n Files = equipment.alldatafiles()\n\n for f in Files:\n sample_no = equipment.file_parse(f)\n\n done = db.contains((Q.sample_number == int(sample_no)) &\n (Q.equipment_name == equipment.name)\n )\n\n if not done:\n params, param_vals = equipment.simple_data(f)\n\n data_types = ['peak_HRR_kWpm2',\n 't_peak_HRR_s',\n 'tot_HR_MJpm2',\n 'tot_O2cons_g',\n 'tot_masslost_g',\n 'tot_smokeprod_m2',\n 'MARHE_kW_m2',\n 'C-factor',\n 't_to_ign_s'\n ]\n\n dtype_indexes = [1, 9, 23, 24, 25, 28, 29, 83, 84]\n\n values = [param_vals[i] for i in dtype_indexes]\n\n insert_update_db(db, False, equipment.name, sample_no, data_types, values)\n else:\n debug('skipped sample %s', sample_no)\n","sub_path":"raw_to_db.py","file_name":"raw_to_db.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"471160684","text":"import asyncio\nimport json\nimport os\nimport struct\n\nimport time\n\nimport aiohttp\nimport aiopubsub\nimport bitstring\nfrom can import Message\n\nfrom boatgod.hub import hub\nfrom boatgod.nmea2000 import Iso11783Decode\n\nwith open(os.path.dirname(__file__) + \"/../pgns.json\", \"r\") as f:\n pgns = json.load(f)[\"PGNs\"]\n\nprint(\"pgn loaded\")\n\nimport sqlite3\n\nconn = sqlite3.connect(os.path.dirname(__file__) + \"/../log.sqlite\")\n\ncursor = conn.cursor()\n\ncursor.execute(\n \"\"\"\n create table if not exists \"data\"\n (\"time\" INT ,\n \"pgn\" INT,\n \"data\" BLOB\n )\n \"\"\")\n\n\ndef parse(pgn, data):\n result = {\n \"id\": pgn['PGN'],\n \"description\": pgn['Description']\n }\n bs = bitstring.BitArray(data)\n for field in pgn['Fields']:\n bits = bs[field[\"BitOffset\"]:field[\"BitOffset\"] + field[\"BitLength\"]]\n result[field['Name']] = bits.intle * float(field.get('Resolution', 1))\n\n return result\n\n\ndef write_to_database(time, pgn, data):\n cursor.execute(\"\"\"insert into data values(?,?,?)\"\"\", (time, pgn, data))\n conn.commit()\n\n\nasync def store_records():\n print(\"init store\")\n\n subscriber = aiopubsub.Subscriber(hub, 'store_handler')\n\n canbus_key = aiopubsub.Key('canbus', 'message', '*')\n # lora_key = aiopubsub.Key('lora', 'obj', '*')\n # subscriber.subscribe(lora_key)\n lora_key = aiopubsub.Key('lora', 'message', 'nmea2000')\n subscriber.subscribe(canbus_key)\n subscriber.subscribe(lora_key)\n\n depth_date = 0\n lat_lon_date = 0\n engine_time = 0\n voltage_time = 0\n while True:\n key, message = await subscriber.consume()\n pgn, *_ = Iso11783Decode(message.arbitration_id)\n\n if pgn == 128267: # depth # latlong\n if depth_date + 3 <= time.time():\n depth_date = time.time()\n write_to_database(time.time(), pgn, message.data)\n\n if pgn == 129025:\n if lat_lon_date + 3 <= time.time():\n lat_lon_date = time.time()\n write_to_database(time.time(), pgn, message.data)\n\n if pgn == 127488:\n _, rpm, _, _ = struct.unpack(\" 10 and engine_time + 3 <= time.time():\n engine_time = time.time()\n write_to_database(time.time(), pgn, message.data)\n\n if pgn == 127508:\n\n _, voltage, _, _, _ = struct.unpack(\" 10 and voltage_time + 60 <= time.time():\n voltage_time = time.time()\n write_to_database(time.time(), pgn, message.data)\n\n\nasync def post_data():\n SLEEP = 5\n\n BOAT_ID = \"e6acdc24-2869-4b14-8890-c24ad338c791\" #SLAVIA\n\n # HOST = \"http://127.0.0.1:8000/api/datalogs/%s/\" % BOAT_ID\n HOST = \"https://boatpilot.me/api/datalogs/%s/\" % BOAT_ID\n\n while True:\n async with aiohttp.ClientSession() as session:\n cursor.execute((\"\"\"select ROWID, \"time\", pgn, \"data\" from data order by \"time\" limit 1000 \"\"\"))\n body = \"\"\n sent = []\n for rowid, dt, pgn, data in cursor.fetchall():\n sent.append([rowid, ])\n body += \"%s:%s:%s\\n\" % (dt, pgn, data.hex())\n try:\n async with session.post(HOST, data=body) as resp:\n if resp.status == 200:\n cursor.executemany(\"\"\"delete from data where ROWID = ?\"\"\", sent)\n conn.commit()\n except aiohttp.ClientError:\n pass\n\n await asyncio.sleep(SLEEP)\n","sub_path":"boatgod/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"628489564","text":"import csv\nimport tweepy\nfrom nltk.tokenize import TweetTokenizer\nfrom tweepy import RateLimitError\n\nfrom DataExtraction.Scripts import twitter_credentials\nimport time\nimport datetime\n\ndate_now = datetime.datetime.now()\n\ntknzr = TweetTokenizer(strip_handles=True)\n\naccess_token = twitter_credentials.ACCESS_TOKEN\naccess_token_secret = twitter_credentials.ACCESS_TOKEN_SECRET\nconsumer_key = twitter_credentials.CONSUMER_KEY\nconsumer_secret = twitter_credentials.CONSUMER_SECRET\n\nauth = tweepy.auth.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\ncsvFile = open('../../../CollectedData/NDC/NDCOldTweets.csv', 'a', encoding=\"utf-16\")\ncsvWriter = csv.writer(csvFile)\ncsvWriter.writerow([\n \"Date\", \"Id\", \"Text\", \"Likes\", \"Re-Tweets\", \"Location\"\n])\n\nfor tweet in tweepy.Cursor(api.search, q=\"ndc\", since_id=\"2020-01-01\", until=date_now,\n tweet_mode=\"extended\").items():\n try:\n full_text = tweet.full_text\n full_text = ' '.join(tknzr.tokenize(full_text))\n csvWriter.writerow([\n tweet.created_at, tweet.id, full_text, tweet.favorite_count, tweet.retweet_count, tweet.user.location\n ])\n print(full_text)\n except RateLimitError:\n time.sleep(60 * 15)\n except UnicodeEncodeError:\n pass\n","sub_path":"DataExtraction/Scripts/NDC/generalTweets/ndcOldTweets.py","file_name":"ndcOldTweets.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"176386211","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.use('macosx')\n\n\ndef plot_signal(samples, signal, label, xlim=None, ylim=None, show=False):\n fig = plt.figure(figsize=(16, 4))\n fig.tight_layout()\n plt.plot(samples, signal)\n plt.title(label)\n plt.ylabel('Amplitude')\n plt.xlabel('Time [s]')\n if ylim is not None:\n plt.ylim(ylim)\n if xlim is not None:\n plt.xlim(xlim)\n fig.savefig(\"results/\" + label + \".png\", bbox_inches='tight')\n if show:\n plt.show()\n\n\ndef plot_absolute_spectrum(fq, pw, label, xmax=None, xticks=None, ylim=None, show=False):\n fig = plt.figure(figsize=(16, 4))\n fig.tight_layout()\n plt.plot(fq, pw)\n plt.title(label)\n plt.ylabel('Power')\n plt.xlabel('Frequency [Hz]')\n if ylim is not None:\n plt.ylim(ylim)\n if xticks is not None:\n plt.xticks([0] + xticks + [int(fq[-1])])\n if xmax is not None:\n plt.xlim(0, xmax)\n else:\n plt.xlim(0, fq[-1])\n fig.savefig(\"results/\" + label + \".png\", bbox_inches='tight')\n if show:\n plt.show()\n plt.close(fig)\n\n\ndef plot_spectogram(signal, sf, window, wlen, noverlap, label, highlights=None, show=False):\n plt.clf()\n fig = plt.figure(figsize=(8, 4))\n\n s, f, t, im = plt.specgram(signal, NFFT=wlen, Fs=sf, window=window, noverlap=noverlap, mode='magnitude')\n # mode='magnitude' to match the MATLAB function which returns the STFT, otherwise it would return the PSD\n\n plt.title(label)\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [s]')\n plt.colorbar(label='Power / frequency [dB/Hz]')\n\n if highlights is not None:\n for highlight in highlights:\n plt.plot([0, 1], [highlight, highlight], '--', linewidth=0.7)\n\n fig.savefig(\"results/tukey/\" + label + \".png\", bbox_inches='tight')\n if show:\n plt.show()\n plt.close(fig)\n","sub_path":"lab04/ex4/plots_aux.py","file_name":"plots_aux.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"293429368","text":"# %load q05_read_csv_data/build.py\n# Default imports\nimport numpy as np\n\n# Enter code here\ndef read_ipl_data_csv(path,dtype='|S50'):\n d=dtype\n ipl_matches_array = np.genfromtxt(path,delimiter=',',dtype=d,skip_header=1)\n return ipl_matches_array;\nread_ipl_data_csv('data/ipl_matches_small.csv')\n\n\n\n\n","sub_path":"q05_read_csv_data/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"5281466","text":"suits = 'CDHS'\nranks = '23456789TJQKA'\n\nfrom abc import ABCMeta, abstractmethod\nimport sys\nfrom enum import IntEnum\nimport logging\n\nclass Ranking(IntEnum):\n HIGH_CARD = 1\n ONE_PAIR = 2\n TWO_PAIRS = 3\n THREE_OF_A_KIND = 4\n STRAIGHT = 5\n FLUSH = 6\n FULL_HOUSE = 7\n FOUR_OF_A_KIND = 8\n STRAIGHT_FLUSH = 9\n\nclass Card(metaclass=ABCMeta):\n \"\"\"Abstact class for playing cards\n \"\"\"\n def __init__(self, rank_suit):\n if rank_suit[0] not in ranks or rank_suit[1] not in suits:\n raise ValueError(f'{rank_suit}: illegal card')\n self.card = rank_suit\n \n def __repr__(self):\n return self.card\n \n @abstractmethod\n def value(self):\n \"\"\"Subclasses should implement this method\n \"\"\"\n raise NotImplementedError(\"value method not implemented\")\n \n @property #인자처럼 쓸 수 있게 해준다 특정값에 따라 인자가 계속 바뀌어야하는경우 or 인자가 너무 많을때\n def rank(self):\n return self.card[0]\n \n @property\n def suit(self): \n return self.card[1]\n\n # card comparison operators\n def __gt__(self, other): return self.value() > other.value()\n def __ge__(self, other): return self.value() >= other.value()\n def __lt__(self, other): return self.value() < other.value()\n def __le__(self, other): return self.value() <= other.value()\n def __eq__(self, other): return self.value() == other.value()\n def __ne__(self, other): return self.value() != other.value()\n\n\n\n\n\nclass PKCard(Card):\n values = dict(zip(ranks, range(2, 2+len(ranks))))\n def __init__(self, rank_suit):\n if rank_suit[0] not in ranks or rank_suit[1] not in suits:\n raise ValueError(f'{rank_suit}: illegal card')\n self.card = rank_suit\n \n def __repr__(self):\n return self.card\n \n def value(self):\n return PKCard.values[self.card[0]]\n \n\nimport random\nclass Deck:\n def __init__(self, cls):\n self.cards = []\n self.cards = [cls(i+j) for i in ranks for j in suits]\n \n \n def __str__(self):\n return str(self.cards)\n \n def shuffle(self):\n self.cards = random.sample(self.cards, len(self.cards))\n return self.cards\n \n def __len__(self):\n return len(self.cards)\n \n def __getitem__(self, index):\n return self.cards[index]\n \n def pop(self):\n return self.cards.pop()\n\n\n\nclass Hands:\n def __init__(self, cards):\n if len(cards) != 5:\n raise ValueError('not 5 cards')\n self.cards = sorted(cards, reverse=True)\n \n def __str__(self):\n return str(self.cards)\n\n def __len__(self):\n return len(self.cards)\n \n def __getitem__(self, index):\n return self.cards[index]\n \n def pop(self):\n return self.cards.pop()\n \n def value(self, key):\n dic = dict(zip(ranks, range(2, 2+len(ranks))))\n return dic.get(key)\n\n def is_flush(self):\n test_suit = str(self.cards[0]) #ㄴself.cards의 원소에는 Card형이 들어가므로 str을 씌워줘야한다\n if(all(str(element)[1] == test_suit[1] for element in self.cards)):\n return True\n else:\n return False\n \n def is_straight(self):\n temp = []\n for i in self.cards:\n temp.append(self.value(str(i)[0])) #value함수에 인자를 추가해줘야했다.\n temp = sorted(temp, reverse=True)\n if temp == [14, 5, 4, 3, 2]:\n return True\n else:\n stop = 1\n for i in range (0, 4):\n if temp[i] != temp[i+1] + 1:\n return False\n stop = 0\n if stop == 1:\n return True\n \n def classify_by_rank(self):\n temp = []\n for i in self.cards:\n temp.append(self.value(str(i)[0])) \n temp = sorted(temp, reverse=True)\n r_Dic = {}\n for i in temp:\n if i not in r_Dic:\n r_Dic[i] = 1\n else:\n r_Dic[i] += 1\n return r_Dic\n \n def find_a_kind(self):\n r_Dic2 = self.classify_by_rank()\n val_num = list(r_Dic2.values())\n\n if 4 in val_num:\n return 8 #'Four of a kind'\n elif 3 in val_num and 2 in val_num:\n return 7 #'Full house'\n elif val_num.count(2) == 2:\n return 3 #'Two pair'\n elif 3 in val_num:\n return 4 #'Three of a kind'\n elif 2 in val_num:\n return 2 #'One pair'\n else:\n return 1 #'High card'\n \n def tell_hand_ranking(self):\n if self.is_flush() == True:\n flush = True\n else:\n flush = False\n if self.is_straight() == True:\n straight = True\n else:\n straight = False\n fak = self.find_a_kind()\n if flush == True and straight == True:\n return 9 #'Straight flush'\n elif fak == 8: #'Four of a kind'\n return 8 #'Four of a kind'\n elif fak == 7: #'Full house':\n return 7 #'Full house'\n elif flush == True:\n return 6 #'Flush'\n elif straight == True:\n return 5 #'Straight'\n elif fak == 4: #'Three of a kind':\n return 4 #'Three of a kind'\n elif fak == 3: #'Two pair':\n return 3 #'Two pair'\n elif fak == 2: #'One pair':\n return 2 #'One pair'\n else:\n return 1 #'High card'\n def play_game(self, other):\n p1 = self.tell_hand_ranking()\n p2 = other.tell_hand_ranking()\n p1_dic0 = self.classify_by_rank()\n p2_dic0 = other.classify_by_rank()\n p1_dic = {}\n p2_dic = {}\n for k, v in p1_dic0.items():\n p1_dic[v] = k\n for i, j in p2_dic0.items():\n p2_dic[j] = i\n \n temp1 = []\n for i in self.cards:\n temp1.append(self.value(i[0])) \n p1_sorted = sorted(temp1, reverse=True)\n temp2 = []\n for i in other.cards:\n temp2.append(other.value(i[0])) \n p2_sorted = sorted(temp2, reverse=True)\n\n if(p1 > p2):\n return \"p1 has won!\"\n if(p1 < p2):\n return \"p2 has won!\"\n elif(p1 == 9 or p1 == 6 or p1 == 5 or p1 == 1):\n if(p1_sorted > p2_sorted):\n return \"p1 has won!\"\n elif(p1_sorted < p2_sorted):\n return \"p2 has won!\"\n else:\n return \"draw!\"\n elif(p1 == 8):\n if(p1_dic[4] > p2_dic[4]):\n return \"p1 has won!\"\n else:\n return \"p2 has won!\"\n elif(p1 == 7 or p1 == 4):\n if(p1_dic[3] > p2_dic[3]):\n return \"p1 has won!\"\n elif(p1_dic[3] == p2_dic[3]):\n if str(p1_dic.values()) > str(p2_dic.values()):\n return \"p1 has won!\"\n elif str(p1_dic.values()) == str(p2_dic.values()):\n return \"draw!\"\n else:\n return \"p2 has won!\"\n else: \n return \"p2 has won!\"\n elif(p1 == 3):\n k1 = []\n k2 = []\n for i in p1_dic0:\n if(p1_dic0[i] == 2):\n k1.append(i)\n for i in p2_dic0:\n if(p2_dic0[i] == 2):\n k2.append(i)\n\n if(k1 > k2):\n return \"p1 has won!\"\n elif(k1 < k2):\n return \"p2 has won!\"\n else:\n if(p1_dic[1] > p2_dic[1]):\n return \"p1 has won!\"\n elif(p1_dic[1] < p2_dic[1]):\n return \"p2 has won!\"\n else:\n return \"draw!\"\n elif(p1 == 2):\n if(p1_dic[2] > p2_dic[2]):\n return \"p1 has won!\"\n elif(p1_dic[2] < p2_dic[2]):\n return \"p2 has won!\"\n else:\n k1 = []\n k2 = []\n for i in p1_dic0:\n if(p1_dic0[i] == 1):\n k1.append(i)\n for i in p2_dic0:\n if(p2_dic0[i] == 1):\n k2.append(i)\n if(k1 > k2):\n return \"p1 has won!\"\n elif(k1 < k2):\n return \"p2 has won!\"\n else:\n return \"draw!\"\n elif(p1 == 1):\n if(p1_dic[1] > p2_dic[1]):\n return \"p1 has won!\"\n elif(p1_dic[1] < p2_dic[1]):\n return \"p2 has won!\"\n else:\n return \"draw!\"\n \n\n \n \n\n# if __name__ == \"__main__\":\n# import sys\n# def test(did_pass):\n# \"\"\" Print the result of a test. \"\"\"\n# linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n# if did_pass:\n# msg = \"Test at line {0} ok.\".format(linenum)\n# else:\n# msg = (\"Test at line {0} FAILED.\".format(linenum))\n# print(msg)\n \n# deck = Deck(PKCard) # deck of poker cards\n# deck.shuffle()\n# c = deck[0] # __getitem__\n# print('A deck of', c.__class__.__name__)\n# #print(deck) #__str__\n# #print(deck[-5:])\n# while len(deck) >= 10: # __len__\n# my_hand = []\n# your_hand = []\n# for i in range(5):\n# for hand in (my_hand, your_hand):\n# card = deck.pop()\n# hand.append(card)\n# p1 = Hands(my_hand)\n# p2 = Hands(your_hand)\n\n# p1 = Hands(['2D', '3D', '4D', '5D', 'AD'])\n# p2 = Hands(['3H', '4H', '5H', '6H', '7H']) #9\n# test(p1.play_game(p2) == \"p1 has won!\")\n\n# p1 = Hands(['3H', '4H', '5H', '6H', '7H'])\n# p2 = Hands(['2D', '3D', '4D', '5D', 'AD']) #9\n# test(p1.play_game(p2) == \"p2 has won!\")\n\n# p1 = Hands(['3D', '4D', '5D', '6D', '7D'])\n# p2 = Hands(['3H', '4H', '5H', '6H', '7H']) #9\n# test(p1.play_game(p2) == \"draw!\")\n\n# p1 = Hands(['2D', '2H', '2S', '2C', 'AD'])\n# p2 = Hands(['4D', '4H', '4S', '4C', '7D']) #8\n# test(p1.play_game(p2) == \"p2 has won!\")\n\n# p1 = Hands(['6D', '6H', '6S', '6C', 'AD'])\n# p2 = Hands(['4D', '4H', '4S', '4C', '7D']) #8\n# test(p1.play_game(p2) == \"p1 has won!\")\n\n# p1 = Hands(['6D', '6H', '6S', '3C', '3D'])\n# p2 = Hands(['4D', '4H', '4S', '7C', '7D']) #7\n# test(p1.play_game(p2) == \"p1 has won!\")\n\n# p1 = Hands(['2D', '2H', '6S', '2C', '6D'])\n# p2 = Hands(['4D', '2H', '4S', '4C', '2D']) #7\n# test(p1.play_game(p2) == \"p2 has won!\")\n\n# p1 = Hands(['2D', '3D', '6D', '7D', 'KD'])\n# p2 = Hands(['4C', '2C', '5C', '8C', 'TC']) #6\n# test(p1.play_game(p2) == \"p1 has won!\")\n\n# p1 = Hands(['4C', '2C', '5C', '8C', 'TC']) \n# p2 = Hands(['2D', '3D', '6D', '7D', 'KD']) #6\n# test(p1.play_game(p2) == \"p2 has won!\")\n\n# p1 = Hands(['4C', '2C', '5C', '8C', 'TC']) \n# p2 = Hands(['4D', '2D', '5D', '8D', 'TD']) #6\n# test(p1.play_game(p2) == \"draw!\")\n\n# p1 = Hands(['7C', '6D', '8S', '9S', 'TH']) \n# p2 = Hands(['4D', '5H', '6C', '3D', '2S']) #5\n# test(p1.play_game(p2) == \"p1 has won!\")\n\n# p1 = Hands(['4D', '5H', '6C', '3D', '2S']) \n# p2 = Hands(['7C', '6D', '8S', '9S', 'TH']) #5\n# test(p1.play_game(p2) == \"p2 has won!\")\n\n# p1 = Hands(['4D', '5H', '6C', '3D', '2S']) \n# p2 = Hands(['4C', '5D', '6S', '3S', '2H']) #5\n# test(p1.play_game(p2) == \"draw!\")\n\n# p1 = Hands(['7D', '7H', '7C', '3D', '2S']) \n# p2 = Hands(['5C', '5D', '5S', '3S', '2H']) #4\n# test(p1.play_game(p2) == \"p1 has won!\")\n \n# p1 = Hands(['8C', '8D', '8S', '3S', '4H']) \n# p2 = Hands(['7D', '7H', '7C', '3D', '2S']) #4\n# test(p1.play_game(p2) == \"p1 has won!\")\n\n# p1 = Hands(['9C', '9D', '4S', '4D', '2H']) \n# p2 = Hands(['7D', '7H', '3C', '3D', '2S']) #3\n# test(p1.play_game(p2) == \"p1 has won!\")\n \n# p1 = Hands(['5C', '5D', '4S', '4D', '2H']) \n# p2 = Hands(['7D', '7H', '3C', '3D', '2S']) #3\n# test(p1.play_game(p2) == \"p2 has won!\")\n\n# p1 = Hands(['5C', '5D', '4S', '4D', '7H']) \n# p2 = Hands(['5S', '5H', '4C', '4H', '2S']) #3\n# test(p1.play_game(p2) == \"p1 has won!\")\n\n# p1 = Hands(['5C', '5D', '4S', '4D', '2H']) \n# p2 = Hands(['5S', '5H', '4C', '4H', '7S']) #3\n# test(p1.play_game(p2) == \"p2 has won!\")\n\n# p1 = Hands(['5C', '5D', '4S', '4D', '7H']) \n# p2 = Hands(['5S', '5H', '4C', '4H', '7S']) #3\n# test(p1.play_game(p2) == \"draw!\")\n\n# p1 = Hands(['5C', '5D', '4S', '3D', '7H']) \n# p2 = Hands(['2S', '5H', '4C', '4H', '7S']) #2\n# test(p1.play_game(p2) == \"p1 has won!\")\n\n# p1 = Hands(['2C', '2D', '4S', '3D', '7H']) \n# p2 = Hands(['8S', '5H', '4C', '4H', '7S']) #2\n# test(p1.play_game(p2) == \"p2 has won!\")\n\n# p1 = Hands(['5C', '5D', '4S', '2D', '7H']) \n# p2 = Hands(['5S', '5H', '4C', '2H', '7S']) #2\n# test(p1.play_game(p2) == \"draw!\")\n\n# p1 = Hands(['5C', '5D', '4S', '2D', 'KH']) \n# p2 = Hands(['5S', '5H', '4C', '2H', '7S']) #2\n# test(p1.play_game(p2) == \"p1 has won!\")\n\n# p1 = Hands(['5C', '5D', '4S', '2D', 'KH']) \n# p2 = Hands(['5S', '5H', '4C', '2H', 'AS']) #2\n# test(p1.play_game(p2) == \"p2 has won!\")\n\n# p1 = Hands(['5C', '2D', '4S', '7D', 'KH']) \n# p2 = Hands(['5S', '6H', '4C', '7H', '9S']) #1\n# test(p1.play_game(p2) == \"p1 has won!\")\n\n# p1 = Hands(['5C', '2D', '4S', '7D', '8H']) \n# p2 = Hands(['5S', '6H', '4C', '7H', 'KS']) #1\n# test(p1.play_game(p2) == \"p2 has won!\")\n\n# p1 = Hands(['5C', '6D', '4S', '7D', 'KH']) \n# p2 = Hands(['5S', '6H', '4C', '7H', 'KS']) #1\n# test(p1.play_game(p2) == \"draw!\")\n\n \n \n","sub_path":"PA_6.py","file_name":"PA_6.py","file_ext":"py","file_size_in_byte":13786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"339186881","text":"size = int(input('Enter the size of the list: '))\na = []\nfor i in range(0, size):\n element = int(input('Enter the elements of the list: '))\n a.append(element)\n\n#Bubble Sort Algorithm => Remember\nfor i in range(size):#Max no. of steps to sort the list is equal to the no. of elements of the list\n for j in range(size - i - 1):\n if a[j] > a[j + 1]:\n t = a[j]\n a[j] = a[j + 1]\n a[j + 1] = t\n\nprint(a)\n","sub_path":"bubblesort.py","file_name":"bubblesort.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"106056591","text":"possible = input(\"Enter a sentece to check if its a pangram: \")\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\ncount = 0\nwhile possible != \"\":\n possible = possible.lower()\n \n if len(possible) < 26:\n print (\"Not enough letters.\")\n else:\n if possible == set('abcdefghijklmnopqrstuvwxyz'):\n print (\"True\")\n else:\n print (\"False\")\n \n possible = input(\"Enter a sentece to check if its a pangram: \")\n \n ","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"577577775","text":"# coding: utf-8\n\"\"\"\n操作shelve, 存储用户的选择数据和任务���执行情况\n\"\"\"\n\nimport sys\nimport shelve\nimport logging\nimport warnings\nfrom collections import namedtuple\n\nimport settings\n\n\n\"\"\"\ndb schema:\n'necessary_tasks': [Task(task_id, task_name)] 未完成的必要的task, 存的是 bytes\n'finished_tasks': set([task_id]) 已完成的task, 包括以前运行程序时完成的\n'datanodes': 记录 datanodes 的 ip, hostname\n'namenode': 记录 namenode 的 ip, hostname\n'dtsearch': 记录部署 dtsearch 的机器\n'dataloader': 记录部署 dataloader 的机器\n'tomcat': 记录部署 tomcat 的机器\n\"\"\"\nTask = namedtuple('Task', ['task_id', 'name'])\nHost = namedtuple('Host', ['ip', 'hostname'])\ndb = shelve.open(settings.user_data, 'c', protocol=1, writeback=True)\ndb['necessary_tasks'] = db.get('necessary_tasks', [])\ndb['finished_tasks'] = db.get('finished_tasks', set())\ndb['datanodes'] = db.get('datanodes', set())\ndb['namenode'] = db.get('namenode', set())\n\n\ndef signal_handler(signal, frame):\n db.close()\n\n\ndef close_db():\n db.close()\n\n\ndef set_task_complete(task_id, name):\n name = name.encode('utf-8')\n try:\n db['necessary_tasks'].remove(Task(task_id, name))\n except ValueError:\n pass\n db['finished_tasks'].add(task_id)\n db.sync()\n\n\ndef is_task_complete(task_id):\n return task_id in db.get('finished_tasks')\n\n\ndef add_necessary_task(task_id, name):\n name = name.encode('utf-8')\n if Task(task_id, name) in db['necessary_tasks']:\n return\n\n import bisect\n bisect.insort_left(db['necessary_tasks'], Task(task_id, name))\n db.sync()\n\n\ndef get_next_prerequisite_id():\n if len(db['necessary_tasks']) > 0:\n return db['necessary_tasks'][0].task_id\n else:\n return sys.maxint\n\n\ndef get_next_prerequisite_name():\n return db['necessary_tasks'][0].name\n\n\ndef set_master(ip, hostname):\n db['namenode'].add(Host(ip, hostname))\n logging.info('set namenode: ' + str(db['namenode']))\n\n\ndef set_slaves(slaves):\n \"\"\"\n :param slaves: a list of (ip, hostname)\n \"\"\"\n for ip, hostname in slaves:\n db['datanodes'].add(Host(ip, hostname))\n\n with open(settings.slaves, 'w') as f:\n for host in db['namenode']:\n f.write(host.hostname + '\\n')\n\n datanode_lines = '\\n'.join([h.hostname for h in db['datanodes']])\n f.write(datanode_lines)\n\n db.sync()\n logging.info('set datanodes: ' + str(db['datanodes']))\n\n\ndef get_hostname(ip):\n for host in db['datanodes']:\n if host.ip == ip:\n return host.hostname\n else:\n raise Exception('Error in get_hostname, no such ip: ' + ip)\n\n\ndef set_user_choice(name, hostname):\n if name not in ('dtsearch', 'dataloader', 'tomcat'):\n raise Exception('set_user_choice, invalid name: ' + name)\n\n db[name] = hostname\n logging.info(name + ' ' + db[name])\n db.sync()\n\n\ndef get_user_choice(name):\n if name not in ('dtsearch', 'dataloader', 'tomcat'):\n raise Exception('get_user_choice, invalid name: ' + name)\n\n return db[name]\n","sub_path":"DeployGDB/src/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"302357815","text":"from django.conf.urls.defaults import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'mte481.website.views.root'),\n url(r'^(?P[^/]+)/?$', 'mte481.website.views.page'),\n url(r'^styles/(?P