diff --git "a/3440.jsonl" "b/3440.jsonl" new file mode 100644--- /dev/null +++ "b/3440.jsonl" @@ -0,0 +1,662 @@ +{"seq_id":"468333570","text":"from tensorflow.python.keras.engine.base_layer import Layer\nimport tensorflow as tf\n\n\nfrom rational.utils.get_weights import get_parameters\n\n\nclass Rational(Layer):\n def __init__(self, approx_func=\"leaky_relu\", degrees=(5, 4), cuda=False,\n version=\"A\", trainable=True, train_numerator=True,\n train_denominator=True):\n \"\"\"\n Rational activation function inherited from tensorflow keras ``Layer``\n\n Arguments:\n approx_func (str):\n The name of the approximated function for initialisation. \\\n The different initialable functions are available in \\\n `rational.rationals_config.json`. \\n\n Default ``leaky_relu``.\n degrees (tuple of int):\n The degrees of the numerator (P) and denominator (Q).\\n\n Default ``(5, 4)``\n cuda (bool):\n Use GPU CUDA version. \\n\n If ``None``, use cuda if available on the machine\\n\n Default ``None``\n version (str):\n Version of Rational to use. Rational(x) = P(x)/Q(x)\\n\n `A`: Q(x) = 1 + \\|b_1.x\\| + \\|b_2.x\\| + ... + \\|b_n.x\\|\\n\n `B`: Q(x) = 1 + \\|b_1.x + b_2.x + ... + b_n.x\\|\\n\n `C`: Q(x) = 0.1 + \\|b_1.x + b_2.x + ... + b_n.x\\|\\n\n `D`: like `B` with noise\\n\n Default ``A``\n trainable (bool):\n If the weights are trainable, i.e, if they are updated during \\\n backward pass\\n\n Default ``True``\n Returns:\n Module: Rational module\n \"\"\"\n super(Rational, self).__init__()\n\n w_numerator, w_denominator = get_parameters(version, degrees, approx_func)\n self.numerator = tf.Variable(initial_value=w_numerator, trainable=trainable and train_numerator)\n self.denominator = tf.Variable(initial_value=w_denominator, trainable=trainable and train_denominator)\n\n if version == \"A\":\n rational_func = Rational_PYTORCH_A_F\n elif version == \"B\":\n rational_func = Rational_PYTORCH_B_F\n elif version == \"C\":\n rational_func = Rational_PYTORCH_C_F\n elif version == \"D\":\n rational_func = Rational_PYTORCH_D_F\n else:\n raise ValueError(\"version %s not implemented\" % version)\n\n self.rational_func = rational_func\n\n def build(self, input_shape):\n pass\n\n def call(self, inputs, training=True):\n return self.rational_func(inputs, self.numerator, self.denominator, training)\n","sub_path":"rational/keras/rationals.py","file_name":"rationals.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"20150799","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse\n\nfrom main.models import Product\nfrom .cart import Cart\nfrom .forms import CartAddProductForm, CartAddProductQuantityForm\nfrom coupons.forms import CouponApplyForm\n# pagination\nfrom django.core.paginator import Paginator\nfrom django.core.paginator import EmptyPage\nfrom django.core.paginator import PageNotAnInteger\n#json\nfrom django.core import serializers\nimport json\n\n\n\"\"\"\nVerify if quantity is available, but we won't substract if from the stock unless the user purchases\n\"\"\"\n@require_POST\ndef cart_add(request, product_id):\n cart = Cart(request)\n # Get the product that we want to add\n product = get_object_or_404(Product, id= product_id, available=True, stock__gte=1)\n product_slug = product.slug\n # Create a form using the data entered in the form\n form = CartAddProductForm(request.POST)\n \n if form.is_valid():\n cleaned_data = form.cleaned_data\n desired_quantity = cleaned_data['quantity']\n if product.stock > desired_quantity:\n quantity = desired_quantity\n elif product.stock < desired_quantity & product.stock > 10:\n quantity = 5\n else:\n quantity = 1\n print(f'User can take {quantity} of product {product.name}')\n cart.add(\n product=product,\n quantity=quantity,\n )\n return redirect('cart:cart_detail')\n else:\n return redirect(f'/produits/{product_slug}')\n\n\ndef cart_add_one_product(request, product_id):\n cart = Cart(request)\n # Get the product that we want to add\n product = get_object_or_404(Product, id= product_id, available=True, stock__gte=1)\n \n if product.stock >= 1:\n quantity = 1\n cart.add_one(\n product=product,\n quantity=quantity,\n )\n return redirect('/produits')\n\n\n@require_POST\ndef cart_add_one_product_with_quantity(request, slug, product_id):\n cart = Cart(request)\n # Get the product that we want to add\n product = get_object_or_404(Product, id= product_id, available=True, stock__gte=1)\n \n form = CartAddProductQuantityForm(request.POST)\n \n if form.is_valid():\n cleaned_data = form.cleaned_data\n desired_quantity = cleaned_data['quantity']\n print(desired_quantity)\n if product.stock > desired_quantity:\n quantity = desired_quantity\n elif product.stock < desired_quantity & product.stock > 10:\n quantity = 5\n else:\n quantity = 1\n print(f'User can take {quantity} of product {product.name}')\n cart.add(\n product=product,\n quantity=quantity,\n )\n return redirect('cart:cart_detail')\n else:\n return redirect(f'/produits/{product.slug}', {'failed':True})\n\n\"\"\"\nUser can remove without problem, anyways the order is not validated yet\n\"\"\"\n\n\ndef cart_remove(request, product_id):\n cart = Cart(request)\n product = get_object_or_404(Product, id=product_id)\n cart.remove(product)\n return redirect('cart:cart_detail')\n\n\n@require_POST\ndef cart_update(request, product_id):\n cart = Cart(request)\n\n product = get_object_or_404(Product, id=product_id)\n desired_quantity = int(request.POST.get('quantity'))\n if product.stock > desired_quantity:\n quantity = desired_quantity\n elif product.stock < desired_quantity & product.stock > 10:\n quantity = 5\n else:\n quantity = 1\n\n cart.update(product=product, quantity=quantity)\n return redirect('cart:cart_detail')\n\n\n\n@require_POST\ndef cart_update_with_json(request, product_id):\n cart = Cart(request)\n product = get_object_or_404(Product, id=product_id)\n data = json.loads(request.body.decode(\"utf-8\"))\n \n desired_quantity = int(data['quantity'])\n if product.stock > desired_quantity:\n quantity = desired_quantity\n elif product.stock < desired_quantity & product.stock > 10:\n quantity = 5\n else:\n quantity = 1\n\n cart.update(product=product, quantity=quantity)\n \n print(\"CART ITEM MODIFIED\")\n res = {\n \"name\": product.name,\n \"price\": product.price,\n \"quantity\": quantity,\n \"total-product\": product.price * quantity,\n \"sub-total\": cart.get_total_price(),\n \"total\": cart.get_total_price_after_discount(),\n \"number-products\": cart.__len__()\n }\n return JsonResponse(res)\n\n@require_POST\n\ndef cart_empty(request):\n cart = Cart(request)\n cart.clear()\n return redirect('cart:cart_detail')\n \n\ndef cart_detail(request):\n \n cart = Cart(request)\n coupon_apply_form = CouponApplyForm()\n \n products = []\n items = []\n for item in cart:\n item['update_quantity_form'] = CartAddProductForm(initial={ 'quantity': item['quantity']})\n item['total'] = item['product'].price * item['quantity']\n items.append(item)\n products.append(item['product'])\n page = request.GET.get('page')\n paginator = Paginator(items, 1000)\n \n try:\n list_products = paginator.get_page(page)\n \n except PageNotAnInteger:\n list_products = paginator.get_page(1)\n \n except EmptyPage:\n list_products = paginator.get_page(paginator.num_pages)\n \n context = {\n 'cart': cart,\n 'listing_products': list_products,\n 'coupon_apply_form': coupon_apply_form\n }\n return render(request, 'cart/cart-page.html', context)\n\n\n\n \n \n ","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"170685738","text":"import json\nimport boto3\n\ndef lambda_handler(event, context):\n \n \n client = boto3.client('lex-runtime')\n \n print(\"event is: \", event)\n \n user_id = 'User1'\n bot_name_lex = 'SearchRestaurant'\n bot_alias = 'diningPhase'\n msg_text = event['messages'][0]['unstructured']['text']\n response = client.post_text(\n botName=bot_name_lex ,\n botAlias= bot_alias,\n userId=user_id,\n sessionAttributes={\n 'string': 'string'\n },\n requestAttributes={\n 'string': 'string'\n },\n inputText= msg_text\n)\n \n \n bot_response= {\n \"messages\": [\n {\n \"type\": \"unstructured\",\n \"unstructured\": {\n \"id\": 'User1',\n \"text\": response['message'],\n \"timestamp\": \"\"\n }\n }\n ]\n \n }\n \n \n \n \n return bot_response","sub_path":"Lambda Functions/LF0.py","file_name":"LF0.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"302310426","text":"import bpy\nfrom mathutils import Quaternion\nimport math\nimport os\nimport xml.etree.ElementTree as ET\n\n# anim file of the animation you want to replace\n#template_anim = r'C:\\Users\\kikko\\Desktop\\anim\\wadblender\\idle.anim'\n\ntemplate = \"\"\"\n\n 1\n 2\n 43\n 103\n 0\n STAND_IDLE\n 0\n 0\n 0\n 0\n \n \n \n\n\"\"\"\n\ndef export_anim(template_anim, zoffset, rig_name):\n # output file\n path = os.path.dirname(os.path.realpath(template_anim)) + '\\\\'\n name = os.path.basename(template_anim)\n\n if os.path.exists(template_anim):\n with open(template_anim) as f:\n xml_string = '\\n'.join(f.readlines())\n else:\n xml_string = template\n\n obj = bpy.data.objects[rig_name]\n\n lara_skin_names = ['HIPS', 'LEFT_THIGH', 'LEFT_SHIN', 'LEFT_FOOT',\n 'RIGHT_THIGH', 'RIGHT_SHIN', 'RIGHT_FOOT',\n 'TORSO', 'RIGHT_UPPER_ARM', 'RIGHT_FOREARM', 'RIGHT_HAND',\n 'LEFT_UPPER_ARM', 'LEFT_FOREARM', 'LEFT_HAND', 'HEAD']\n\n\n xml_string = xml_string.replace('utf-16', 'utf8')\n tree = ET.ElementTree(ET.fromstring(xml_string))\n root = tree.getroot()\n\n # remove template anim file keyframes\n keyframes_node = tree.find(\"KeyFrames\")\n for keyframe in keyframes_node.findall(\"WadKeyFrame\"):\n keyframes_node.remove(keyframe)\n\n\n fcurves = obj.animation_data.action.fcurves\n keyframes_count = len(fcurves[0].keyframe_points)\n\n keyframes_cnt_node = tree.find(\"EndFrame\")\n keyframes_cnt_node.text = str(keyframes_count)\n\n # read keyframes from anim file\n data = {}\n for fcurve in fcurves:\n if \"scale\" in fcurve.data_path or \"HIPS\" in fcurve.data_path:\n # trle does not support scale animation\n # root motion/rotation is applied to the entire rig,\n # so discard the hips bone datapath\n continue\n else:\n axis = fcurve.array_index\n data[(fcurve.data_path, axis)] = []\n for i in range(keyframes_count):\n data[(fcurve.data_path, axis)].append(fcurve.evaluate(i))\n\n\n # initialize rotations and locations lists for each of the 15 Lara body parts\n # and keyframes_count keyframes\n n = len(lara_skin_names)\n rotations = [[] for _ in range(n)]\n for i in range(n):\n for j in range(keyframes_count):\n rotations[i].append([0, 0, 0, 0])\n\n locations = [[0, 0, 0] for _ in range(keyframes_count)]\n\n # For each fcurve\n for datapath, kf_points in data.items():\n if \"location\" == datapath[0]: # this is the hips location\n for i in range(keyframes_count):\n locations[i][datapath[1]] = kf_points[i] * 512 # 512 is 1m in trle\n # mixamo animations ground is at the height of the foot pivot point\n # so let's rise the z offset by the height of the foot mesh\n if datapath[1] == 2:\n locations[i][2] += zoffset\n continue\n\n if datapath[0] != 'location' and datapath[0] != 'rotation_euler':\n # location keyframes are discarded except for the hips\n bonename = datapath[0].split('\"')[1][5:][:-5]\n else:\n # the datapath for the hips rotations is rotation_euler\n bonename = 'HIPS'\n\n # save bodyparts rotations in the same order as wad tool\n axis = datapath[1]\n idx = lara_skin_names.index(bonename)\n for i in range(keyframes_count):\n rotations[idx][i][axis] = kf_points[i]\n\n\n # angles conversion\n for j, e in enumerate(rotations):\n if j == lara_skin_names.index('HIPS'):\n for i in range(keyframes_count):\n angles = [math.degrees(p) for p in e[i]]\n rotations[j][i][0] = angles[0] - 90\n rotations[j][i][1] = -angles[2] + 90\n rotations[j][i][2] = -angles[1] + 180\n\n else:\n for i in range(keyframes_count):\n q = Quaternion(e[i])\n euler = q.to_euler(\"ZXY\")\n angles = [math.degrees(e) for e in euler]\n rotations[j][i][0] = -angles[0]\n rotations[j][i][1] = angles[1]\n rotations[j][i][2] = -angles[2]\n if j == 14 or j == 3 or j == 6:\n rotations[j][i][0] -= 180\n\n\n # write output anim file\n for datapath in range(keyframes_count):\n wadkf = ET.SubElement(keyframes_node, 'WadKeyFrame')\n\n bbox = ET.SubElement(wadkf, 'BoundingBox')\n\n minimum = ET.SubElement(bbox, 'Minimum')\n ET.SubElement(minimum, 'X').text = \"0\"\n ET.SubElement(minimum, 'Y').text = \"0\"\n ET.SubElement(minimum, 'Z').text = \"0\"\n\n maximum = ET.SubElement(bbox, 'Maximum')\n ET.SubElement(maximum, 'X').text = \"0\"\n ET.SubElement(maximum, 'Y').text = \"0\"\n ET.SubElement(maximum, 'Z').text = \"0\"\n\n offset = ET.SubElement(wadkf, 'Offset')\n x = ET.SubElement(offset, 'X')\n x.text = '%f' % -locations[datapath][1]\n y = ET.SubElement(offset, 'Y')\n y.text = '%f' % locations[datapath][2]\n z = ET.SubElement(offset, 'Z')\n z.text = '%f' % locations[datapath][0]\n\n angles = ET.SubElement(wadkf, 'Angles')\n\n for i in range(n):\n rot = ET.SubElement(angles, 'WadKeyFrameRotation')\n rot = ET.SubElement(rot, 'Rotations')\n x = ET.SubElement(rot, 'X')\n x.text = '%.6f' % rotations[i][datapath][0]\n y = ET.SubElement(rot, 'Y')\n y.text = '%.6f' % rotations[i][datapath][1]\n z = ET.SubElement(rot, 'Z')\n z.text = '%.6f' % rotations[i][datapath][2]\n\n tree.write(template_anim)\n","sub_path":"anim.py","file_name":"anim.py","file_ext":"py","file_size_in_byte":6190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"202070796","text":"# -*- coding:utf-8 -*-\n# vSphere Client RCE\n\nfrom .. import fileUtil\nfrom ServiceScanModel.models import ServiceScan\nfrom ..requestClass import Requests\n\nvuln_path = \"/ui/vropspluginui/rest/services/uploadova\"\nSM_TEMPLATE = b\"\"\"\n \n \n <_this type=\"ServiceInstance\">ServiceInstance\n \n \n \"\"\"\n\n\ndef getValue(sResponse, sTag=\"vendor\"):\n try:\n return sResponse.split(\"<\" + sTag + \">\")[1].split(\"\")[0]\n except:\n pass\n return \"\"\n\n\nclass POC:\n def __init__(self, service: ServiceScan):\n self.service = service\n self.requestUtil = Requests(service.cookies)\n self.result = False\n\n def getVersion(self, sURL):\n oResponse = self.requestUtil.post(sURL + \"/sdk\", data=SM_TEMPLATE)\n if oResponse.status_code == 200:\n sResult = oResponse.text\n if not \"VMware\" in getValue(sResult, \"vendor\"):\n return False\n else:\n sVersion = getValue(sResult, \"version\") # e.g. 7.0.0\n sBuild = getValue(sResult, \"build\") # e.g. 15934073\n return (sVersion, sBuild)\n return False\n\n def check_vul(self, url):\n resp = self.requestUtil.get(url + vuln_path)\n if resp.status_code == 405:\n (sVersion, sBuild) = self.getVersion(url)\n if (\n int(sVersion.split(\".\")[0]) == 6\n and int(sVersion.split(\".\")[1]) == 7\n and int(sBuild) >= 13010631\n ) or (\n (int(sVersion.split(\".\")[0]) == 7 and int(sVersion.split(\".\")[1]) == 0)\n ):\n return False\n else:\n return f\"VMware vCenter Server {sVersion}\"\n return False\n\n def fingerprint(self):\n try:\n if self.service.url and \"ID_VC_Welcome\" in self.service.title:\n return True\n except:\n return False\n\n def poc(self):\n try:\n version = self.check_vul(self.service.url)\n if version:\n return [\"vSphere Client RCE\", f\"{version}\"]\n else:\n return []\n except:\n return []\n","sub_path":"vulscan_Project/modules/vsphere_rce_poc.py","file_name":"vsphere_rce_poc.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"274367143","text":"import dataclasses\nimport json\nimport typing\n\nfrom dacite import Config, from_dict\n\nfrom dataclasses_avroschema import case, utils\nfrom dataclasses_avroschema.schema_definition import AvroSchemaDefinition\nfrom dataclasses_avroschema.serialization import deserialize, serialize, to_json\nfrom dataclasses_avroschema.utils import SchemaMetadata, is_custom_type\n\nfrom .fields import FieldType\n\nAVRO = \"avro\"\nAVRO_JSON = \"avro-json\"\n\nJsonDict = typing.Dict[str, typing.Any]\nCT = typing.TypeVar(\"CT\", bound=\"AvroModel\")\n\n\nclass AvroModel:\n\n schema_def: typing.Optional[AvroSchemaDefinition] = None\n klass: typing.Any = None\n metadata: typing.Optional[SchemaMetadata] = None\n user_defined_types: typing.Optional[typing.Tuple[utils.UserDefinedType]] = None\n\n @classmethod\n def generate_dataclass(cls: typing.Any) -> typing.Any:\n if dataclasses.is_dataclass(cls):\n return cls\n return dataclasses.dataclass(cls)\n\n @classmethod\n def generate_metadata(cls: typing.Any) -> SchemaMetadata:\n meta = getattr(cls.klass, \"Meta\", None)\n\n return SchemaMetadata.create(meta)\n\n @classmethod\n def generate_schema(cls: typing.Type[CT], schema_type: str = \"avro\") -> AvroSchemaDefinition:\n if cls.schema_def is None:\n # Generate metaclass and metadata\n cls.klass = cls.generate_dataclass()\n cls.metadata = cls.generate_metadata()\n cls.user_defined_types = ()\n\n # let's live open the possibility to define different\n # schema definitions like json\n if schema_type == \"avro\":\n # cache the schema\n cls.schema_def = cls._generate_avro_schema()\n else:\n raise ValueError(\"Invalid type. Expected avro schema type.\")\n\n return cls.schema_def\n\n @classmethod\n def _generate_avro_schema(cls: typing.Any) -> AvroSchemaDefinition:\n return AvroSchemaDefinition(\"record\", cls.klass, metadata=cls.metadata, parent=cls)\n\n @classmethod\n def avro_schema(cls: typing.Any, case_type: typing.Optional[str] = None) -> str:\n avro_schema = cls.generate_schema(schema_type=AVRO).render()\n\n # After generating the avro schema, reset the raw_fields to the init\n cls.user_defined_types = ()\n\n if case_type is not None:\n avro_schema = case.case_record(avro_schema, case_type)\n\n return json.dumps(avro_schema)\n\n @classmethod\n def avro_schema_to_python(cls: typing.Any) -> typing.Dict[str, typing.Any]:\n return json.loads(cls.avro_schema())\n\n @classmethod\n def get_fields(cls: typing.Any) -> typing.List[FieldType]:\n if cls.schema_def is None:\n return cls.generate_schema().fields\n return cls.schema_def.fields\n\n @staticmethod\n def standardize_custom_type(value: typing.Any) -> typing.Any:\n if is_custom_type(value):\n return value[\"default\"]\n return value\n\n def asdict(self) -> JsonDict:\n data = dataclasses.asdict(self)\n\n # te standardize called can be replaced if we have a custom implementation of asdict\n # for now I think is better to use the native implementation\n return {key: self.standardize_custom_type(value) for key, value in data.items()}\n\n def serialize(self, serialization_type: str = AVRO) -> bytes:\n schema = self.avro_schema_to_python()\n\n return serialize(self.asdict(), schema, serialization_type=serialization_type)\n\n @classmethod\n def deserialize(\n cls: typing.Type[CT],\n data: bytes,\n serialization_type: str = AVRO,\n create_instance: bool = True,\n writer_schema: typing.Optional[typing.Union[JsonDict, CT]] = None,\n ) -> typing.Union[JsonDict, CT]:\n\n try:\n writer_schema = writer_schema.avro_schema_to_python()\n except AttributeError:\n pass\n\n schema = cls.avro_schema_to_python()\n payload = deserialize(data, schema, serialization_type=serialization_type, writer_schema=writer_schema)\n\n if create_instance:\n return from_dict(data_class=cls, data=payload, config=Config(**cls.config()))\n return payload\n\n def to_json(self) -> JsonDict:\n # Serialize using the current AVRO schema to get proper field representations\n # and after that convert into python\n data = self.asdict()\n return to_json(data)\n\n @classmethod\n def config(cls) -> JsonDict:\n \"\"\"\n Get the default config for dacite and always include the self reference\n \"\"\"\n return {\n \"check_types\": False,\n \"forward_references\": {\n cls.klass.__name__: cls.klass,\n },\n }\n\n @classmethod\n def fake(cls: typing.Type[CT]) -> CT:\n payload = {field.name: field.fake() for field in cls.get_fields()}\n\n return from_dict(data_class=cls, data=payload, config=Config(**cls.config()))\n","sub_path":"dataclasses_avroschema/schema_generator.py","file_name":"schema_generator.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"236899665","text":"\"\"\"\"\nrobocrystallographer: Automatic generation of crystal structure descriptions.\n\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom os.path import join as path_join\n\n\nwith open('README.md', 'r') as file:\n long_description = file.read()\n\nsetup(\n name='robocrys',\n version=\"0.2.4\",\n description='Automatic generation of crystal structure descriptions',\n url='https://github.com/hackingmaterials/robocrystallographer',\n author='Alex Ganose',\n author_email='aganose@lbl.gov',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='modified BSD',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Other/Nonlisted Topic',\n 'Operating System :: OS Independent',\n ],\n keywords='crystal-structure crystallography materials-science',\n test_suite='nose.collector',\n packages=find_packages(),\n install_requires=['spglib', 'numpy', 'scipy', 'pymatgen>=2017.12.30',\n 'inflect', 'networkx', 'matminer>=0.6.3', 'monty', 'pubchempy',\n 'pybtex'],\n extras_require={'docs': ['sphinx', 'sphinx-argparse', 'sphinx_rtd_theme',\n 'sphinx-autodoc-typehints', 'm2r'],\n 'dev': ['tqdm', 'pybel', 'pebble', 'maggma'],\n 'tests': ['nose', 'coverage', 'coveralls']},\n package_data={'robocrys': [path_join('condense', 'mineral_db.json.gz'),\n path_join('condense', 'molecule_db.json.gz'),\n path_join('condense', 'formula_db.json.gz')]},\n data_files=['LICENSE', 'CONTRIBUTING.rst'],\n entry_points={'console_scripts': ['robocrys = robocrys.cli:main']}\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"471735160","text":"import copy\n\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer\nfrom reportlab.platypus import Table, TableStyle, Image\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.rl_config import defaultPageSize\nfrom reportlab.lib.units import inch\nfrom reportlab.lib import colors\n\nfrom support import *\n\nclass NoEmbargo(object):\n\n NAME = \"Immediate Access\"\n VERSION = \"1.0\"\n\n def __call__(self, filename, manuscript=\"\", journal=\"\", author=[], \n publisher=\"\"):\n \"\"\"Generate the No Embargo agreement.\"\"\"\n\n # check the parameters\n while len(author) < 4:\n author.append(\"\")\n\n doc = getDocument(filename)\n\n Story = []\n\n # Section 1\n Story.append(\n Paragraph(\n \"\"\". \n THIS ADDENDUM hereby modifies and supplements\n the attached Publication Agreement concerning the following\n Article:\"\"\", styles['outer_style'])\n )\n\n journal_info_table = Table([\n [fillInRow(manuscript, \"(manuscript title)\", width=inch*5)],\n [fillInRow(journal, \"(journal name)\", width=inch*5)],\n ],\n )\n journal_info_table.hAlign = 'LEFT'\n Story.append(journal_info_table)\n\n # Section 2\n Story.append(\n Paragraph(\n \"\"\". The parties to the Publication Agreement as\n modified and supplemented by this Addendum are:\"\"\", styles['outer_style'])\n )\n\n journal_info_table = Table([\n [fillInRow(author[0],\"(corresponding author)\",), \"\", \"\"],\n [fillInRow(author[1],\"\"), \"\", \"\"],\n [fillInRow(author[2],\"\"), \"\", \"\"],\n [fillInRow(author[3],\n \"\"\"(Individually or, if more than one author, collectively, Author)\"\"\"), \"\", fillInRow(publisher, \"(Publisher\")],\n ],\n colWidths=[inch*3, inch * 0.25, inch*3],\n )\n journal_info_table.hAlign = 'LEFT'\n Story.append(journal_info_table)\n\n # Section 3\n Story.append(\n Paragraph(\n \"\"\". This Addendum and the Publication Agreement,\n taken together, allocate all rights under copyright with respect\n to all versions of the Article. The parties agree that wherever\n there is any conflict between this Addendum and the Publication\n Agreement, the provisions of this Addendum are paramount and the\n Publication Agreement shall be construed accordingly.\"\"\",\n styles['outer_style'])\n )\n\n # Section 4\n Story.append(\n Paragraph(\n \"\"\". Notwithstanding any terms in the Publication\n Agreement to the contrary, AUTHOR and PUBLISHER agree as follows:\"\"\",\n styles['outer_style'])\n )\n\n Story.append(\n Paragraph(\n \"\"\". Professional Activities.\n Author retains the non-exclusive right to create derivative works\n from the Article and to reproduce, to distribute, to publicly\n perform, and to publicly display the Article in connection with\n Author's teaching, conference presentations, lectures, other\n scholarly works, and professional activities. \"\"\",\n styles['inner_style'])\n )\n\n Story.append(\n Paragraph(\n \"\"\". Distribution.\n Author has the\n non-exclusive right to distribute copies of any version of the\n Article, including but not limited to the published version, by means\n of any web server from which members of the general public can\n download copies without charge, provided that Author cites the\n journal in which the Article has been published as the source of\n first publication, when applicable. \"Published version\" means the\n version of the Article distributed by Publisher to subscribers or\n readers of the Journal.\"\"\",\n styles['inner_style'])\n )\n\n Story.append(\n Paragraph(\n \"\"\".\n Acknowledgment of Prior License Grants. Where applicable,\n Publisher acknowledges that Author's assignment of copyright or\n Author's grant of exclusive rights in the Publication Agreement is\n subject to Author's prior grant of a non-exclusive copyright license\n to Author's employing institution and/or to a funding entity that\n financially supported the research reflected in the Article as part\n of an agreement between Author or Author's employing institution\n and such funding entity, such as an agency of the United States\n government.\"\"\",\n styles['inner_style'])\n )\n\n # Section 5\n Story.append(\n Paragraph(\n \"\"\". For record keeping purposes, Author requests\n that Publisher sign a copy of this Addendum and return it to Author.\n However, if Publisher publishes the Article in the journal or in any\n other form without signing a copy of this Addendum, such publication\n manifests Publisher's assent to the terms of this Addendum.\"\"\",\n styles['outer_style'])\n )\n\n # Signature\n journal_info_table = Table([\n [\"AUTHOR\", \" \", \"PUBLISHER\"],\n [fillInRow(\"\", \"(corresponding author on behalf of all authors)\"),\n \"\", fillInRow(\"\", \"\")],\n [fillInRow(\"\", \"Date\"),\n \"\",\n fillInRow(\"\", \"Date\")]\n ],\n colWidths=[inch*3, inch*.25, inch*3],\n )\n\n journal_info_table.hAlign = 'LEFT'\n Story.append(journal_info_table)\n\n # Disclaimer\n Story.append(\n Paragraph(Disclaimer, styles['disclaimer'])\n )\n\n agreement = \"%s %s\" % (self.NAME, self.VERSION)\n doc.build(Story, \n onFirstPage=lambda x,y: pageInfo(agreement, x, y), \n onLaterPages=lambda x,y: pageInfo(agreement, x, y))\n\nif __name__ == '__main__':\n NoEmbargo()(\"test.pdf\", \"Extraordinary Measures\",\n \"Nature\", [\"B. Pants\"], \"The Publisher\")\n \n","sub_path":"scicom/scholars/agreements/noembargo.py","file_name":"noembargo.py","file_ext":"py","file_size_in_byte":6596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"386949398","text":"import pytest\nimport strawberry\nimport strawberry_django\nfrom .. import types\n\n@pytest.fixture\ndef schema():\n Query = strawberry_django.queries(types.User)\n Mutation = strawberry_django.mutations(types.User, types.Group, types.Tag, types=types.types)\n schema = strawberry.Schema(Query, mutation=Mutation)\n return schema\n\n@pytest.fixture\ndef mutation(schema, db):\n def mutation(mutation, variable_values=None):\n if not mutation.startswith('mutation'):\n mutation = 'mutation ' + mutation\n return schema.execute_sync(mutation, variable_values=variable_values)\n return mutation\n","sub_path":"tests/mutations/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"104827068","text":"from pprint import pprint\n\ndata_file = 'associations/data.txt'\nmin_support_coeff = 0.3\nmin_confidence_coeff = 0.6\n\nprint(\"Using coefficients: support\", min_support_coeff, \"confidence\", min_confidence_coeff, '\\n')\n\ntransactions = {}\n\n# read data and do equivalent class transformation\nwith open(data_file) as data:\n transactions_number = 0\n for row in data:\n elements = row.split()\n element_id = elements[0]\n for element in elements[1:]:\n if element in transactions:\n transactions[element].add(element_id)\n else:\n transactions[element] = set(element_id)\n transactions_number += 1\n\n# extract frequent items in transactions\nfrequent_threshold = 0.33 * transactions_number\nfrequent_items = [(k, len(v)/transactions_number) for k, v in transactions.items() if len(v) >= frequent_threshold]\nprint(\"Frequent items:\", frequent_items, '\\n')\n\n# find associations\nassociations = []\nfor item1 in transactions.keys():\n for item2 in transactions.keys():\n if item1 == item2:\n continue\n support = len(transactions[item1] | transactions[item2]) / transactions_number\n confidence = len(transactions[item1] & transactions[item2]) / len(transactions[item1])\n if support >= min_support_coeff and confidence >= min_confidence_coeff:\n associations.append((item1, item2, support, confidence))\n\nprint(\"Associations:\")\npprint(associations)\n","sub_path":"associations/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"467562918","text":"import asyncio\nimport asyncio\n\nasync def func(m):\n print('hello, m')\n await asyncio.sleep(1)\n print('world', m)\n\n\n\nasync def main():\n x = await func('yap')\n return x\n\nif __name__ == '__main__':\n asyncio.run(main())\n","sub_path":"asyncio-lib.py","file_name":"asyncio-lib.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"29615543","text":"items = []\nitem_count = int(input(\"How many items are we configuring? \"))\nfor _ in range(item_count):\n item = {}\n item['color'] = input(\"What color is it?\")\n if item['color'] == \"blue\":\n item['shape'] = input(\"What shape is it? \")\n item['size'] = input(\"What size is it? \")\n items.append(item)\n\nprint(items)\n","sub_path":"Python3/Tests/variableloop.py","file_name":"variableloop.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"4705445","text":"from lib.esp8266.wemos.d1mini import pinmap\n\nNAME = \"Solar HWS\"\n\nSOLAR_GPIO = pinmap.D5\nTANK_GPIO = pinmap.D6\nPUMP_GPIO = pinmap.D8\n\nADC = pinmap.A0\n\nPUMP_ON = 1\nPUMP_OFF = 0\n\nSOLAR_MAX_TEMP = 110\nTANK_MAX_TEMP = 80\nTANK_TARGET_TEMP = 65\n\nFREQ = 5 # seconds\nAVERAGE = 6\n\ndef pump_logic(state):\n if state.solar_temp >= SOLAR_MAX_TEMP:\n ## solar is too hot, do not want water to vaporise\n return PUMP_OFF\n \n elif state.tank_temp >= TANK_MAX_TEMP:\n ## safety cut off\n return PUMP_OFF\n \n elif state.tank_temp >= state.tank_target_temp:\n ## tank is hot enough\n return PUMP_OFF\n\n elif state.solar_temp >= 12 + state.tank_temp:\n ## solar is more than 12 deg hotter than the tank\n return PUMP_ON\n \n elif state.solar_temp <= 6 + state.tank_temp:\n ## solar is less than 6 deg hotter than the tank\n return PUMP_OFF\n\ndef pump_boost(state):\n if state.tank_temp >= TANK_MAX_TEMP:\n return PUMP_OFF\n \n elif state.tank_temp >= state.tank_target_temp + 5:\n return PUMP_OFF\n\n elif state.solar_temp <= state.tank_temp:\n return PUMP_OFF\n\n else:\n return PUMP_ON\n\ndef solar_adc_to_temp(adc):\n return (adc - 36.1997) / 6.26956 ## (adc - 288.6) / 6.38\n\ndef tank_adc_to_temp(adc):\n return (adc - 36.1997) / 6.26956 ## (adc - 285.54) / 5.269\n\nRETAIN = (\n 'tank_target_temp',\n)\n","sub_path":"build/py/solar_hot_water_controller/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"388800924","text":"from flask import Blueprint, render_template, redirect, url_for, flash\n\nfrom .github import github, require_github_login\n# from ..pagination import Paginator\n\nget_repo = Blueprint(\"get_repo\", __name__)\n\n\n@get_repo.route(\"/\", defaults={\"page\": 1})\n@get_repo.route(\"/page/\")\n@require_github_login\ndef index(page):\n resp = github.get(\"/user/repos\")\n return render_template(\"index.html\",\n repos=resp.json())\n","sub_path":"get_repo/views/get_repo.py","file_name":"get_repo.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"538059404","text":"#!/usr/bin/env python\nimport sys\n\n\nparm3='/devices/platform/soc/3f980000.usb/usb1/1-1/1-1.5/1-1.5.3/1-1.5.3:1.0/ttyUSB1/tty/ttyUSB1'\n\n# sudo udevadm control --reload-rules\n# sudo udevadm trigger\n\ndef processLine(line, fDEBUG=False):\n token = line.split('/')\n arduinoKernels = {\n # Arduino con hub usb da 9 porte\n '1-1.5.1': \"arduino10\",\n '1-1.5.2': \"arduino12\",\n '1-1.5.3': \"arduino11\",\n '1-1.5.4.1': \"rs485_01\",\n '1-1.5.4.2': \"rs485_02\",\n '1-1.5.4.3': \"rs485_03\",\n }\n\n\n for kernel, linkName in arduinoKernels.items():\n if kernel in token:\n # print (linkName)\n return linkName\n\n return None\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n line = sys.argv[1]\n retVal = processLine(line)\n\n if retVal:\n print (retVal)\n\n","sub_path":"_under_test/etc/rules.d/98-Ln-usb-parse-devpath.py","file_name":"98-Ln-usb-parse-devpath.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"286090959","text":"from configparser import ConfigParser\nimport nibabel as nib\nimport numpy as np\nfrom json import loads\nimport sys\nimport os\n\n# Load directory and file ids\nparser = ConfigParser()\nparser.read('/home/k1201869/DOPA_symptoms/src/project_details.txt')\n#parser.read('/Users/robmcc/mnt/droplet/home/k1201869/DOPA_symptoms/src/project_details.txt')\n\nmain_directory = parser.get('project_details', 'main_directory')\nroi_directory = parser.get('project_details', 'roi_directory')\nstriatum_file = parser.get('project_details', 'striatum_file')\npet_maps_dir = ('%sdata/ki_maps' % main_directory)\nimg_dim = (loads(parser.get(\"project_details\", \"image_dimension\")))\n\nsubject_id = sys.argv[1]\n\n# set_parameters\nnumber_permutations = 1000\nnum_networks = 7\n\n# Load PET ki_map\npet_filename = os.listdir('%(1)s/%(2)s/' % {\"1\": pet_maps_dir, \"2\": subject_id})\npet_map_nii = nib.load('%(1)s/%(2)s/%(3)s' % {\"1\": pet_maps_dir, \"2\": subject_id, \"3\": pet_filename[0]})\npet_map = np.array(pet_map_nii.get_data())\n\n# Load striatal mask\nstriatal_mask_nii = (roi_directory+striatum_file)\nstriatal_mask_temp = nib.load(striatal_mask_nii, mmap=False).get_data() # disables 'memmap' special arrays\nstriatal_mask = 1 <= striatal_mask_temp\nstriatal_mask_multidim = np.stack([striatal_mask]*num_networks, axis=-1)\n\n# results array\nnetwork_kis = np.zeros([1+number_permutations, num_networks])\nnetwork_kis[0, :] = range(1, num_networks+1)\n\n# Random striatal weights\nfor i in range(number_permutations):\n random_weights = np.random.dirichlet(np.ones(num_networks), size=(img_dim))\n masked_random_weights = striatal_mask_multidim*random_weights\n for network in range(1, num_networks+1):\n # Mask pet map for each network and weight by probabilities(nb - TRUE means the value is masked)\n probability_sum = np.nansum(masked_random_weights[:, :, :, network-1])\n weighted_ki = np.multiply(masked_random_weights[:, :, :, network-1], pet_map)\n network_kis[i+1, network-1] = np.nansum(weighted_ki)/probability_sum\n\nnp.save('%(1)s/results/null_dist/data_driven/individual_results/network_kis_%(2)s.npy'\n % {\"1\": main_directory, \"2\": subject_id}, network_kis)\n","sub_path":"null_distribution/null_dist.py","file_name":"null_dist.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"505606859","text":"from math import sqrt\nfrom common import sieve_primes\n\nN = 2 ** 50\n\nres = {}\n\ndef f(n, k=-1):\n if k < 0 or k > sqrt(n):\n k = int(sqrt(n))\n if (n, k) in res:\n return res[(n, k)]\n if k < 2: return 0\n s = 0\n for p in sieve_primes(k + 1):\n m = n / (p * p)\n s += m - f(m, p - 1)\n res[(n, k)] = s\n return s\n\nprint(N - f(N))","sub_path":"150_199/src/task193/s193.py","file_name":"s193.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"373855391","text":"import time\nimport os\n\n\nclass Clock(object):\n\n # Python中的函数是没有重载的概念的\n # 因为Python中函数的参数没有类型而且支持缺省参数和可变参数\n # 用关键字参数让构造器可以传入任意多个参数来实现其他语言中的构造器重载\n def __init__(self, **kw):\n if 'hour' in kw and 'minute' in kw and 'second' in kw:\n self._hour = kw['hour']\n self._minute = kw['minute']\n self._second = kw['second']\n else:\n tm = time.localtime(time.time())\n self._hour = tm.tm_hour\n self._minute = tm.tm_min\n self._second = tm.tm_sec\n\n def run(self):\n self._second += 1\n if self._second == 60:\n self._second = 0\n self._minute += 1\n if self._minute == 60:\n self._minute = 0\n self._hour += 1\n if self._hour == 24:\n self._hour = 0\n\n def show(self):\n return '%02d:%02d:%02d' % (self._hour, self._minute, self._second)\n\n\nif __name__ == '__main__':\n # clock = Clock(hour=10, minute=5, second=58)\n clock = Clock()\n while True:\n os.system('clear')\n print(clock.show())\n time.sleep(1)\n clock.run()\n\n# from time import time, localtime, sleep\n\n\n# class Clock(object):\n# \"\"\"数字时钟\"\"\"\n\n# def __init__(self, hour=0, minute=0, second=0):\n# self._hour = hour\n# self._minute = minute\n# self._second = second\n\n# @classmethod\n# def now(cls):\n# ctime = localtime(time())\n# return cls(ctime.tm_hour, ctime.tm_min, ctime.tm_sec)\n\n# def run(self):\n# \"\"\"走字\"\"\"\n# self._second += 1\n# if self._second == 60:\n# self._second = 0\n# self._minute += 1\n# if self._minute == 60:\n# self._minute = 0\n# self._hour += 1\n# if self._hour == 24:\n# self._hour = 0\n\n# def show(self):\n# \"\"\"显示时间\"\"\"\n# return '%02d:%02d:%02d' % \\\n# (self._hour, self._minute, self._second)\n\n\n# def main():\n# clock = Clock.now()\n# while True:\n# print(clock.show())\n# sleep(1)\n# clock.run()\n\n\n# if __name__ == '__main__':\n# main()\n","sub_path":"python/Intro/oop/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"213688504","text":"import os\nimport tempfile\n\nAPPDIR = os.path.abspath(os.path.dirname(__file__))\nSETUPFILEDIR = os.path.abspath(os.path.join(APPDIR, \"..\"))\nTESTDIR = os.path.abspath(os.path.join(APPDIR, \"tests\"))\nMEMTEMPDIR = \"/dev/shm\"\n\nif os.path.isdir(MEMTEMPDIR):\n tempfile.tempdir = MEMTEMPDIR\n\nimport sys\nfrom setuptools import setup, find_packages\n\nVERSION = \"2020.1.1\"\nPRINT_VERBOSITY = \"high\"\nEXCLUDED_DIRS = [\".DS_Store\"]\nPROJECT_NAME = \"jumper\"\nTEMPDIR = \"/tmp\"\nTEXTTABLE_STYLE = [\"-\", \"|\", \"+\", \"-\"]\nDIRS = [f\"{TEMPDIR}/jumperworkingdirs\"]\nMINIMUM_PYTHON_VERSION = (3, 6, 0)\nCOVERAGERC_PATH = f\"{APPDIR}/.coveragerc\"\n\nassert sys.version_info >= MINIMUM_PYTHON_VERSION\n\nsetup(\n name=\"inflation-states\",\n version=VERSION,\n author=\"Terminal Labs\",\n author_email=\"solutions@terminallabs.com\",\n license=\"see LICENSE file\",\n zip_safe=False,\n include_package_data=True,\n install_requires=[\n \"setuptools\",\n \"inflation@git+https://github.com/terminal-labs/inflation.git\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n jumper=jumper.__main__:main\n \"\"\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"444455996","text":"class Solution:\n \"\"\"\n @param str: str: the given string\n @return: char: the first unique character in a given string\n \"\"\"\n\n def firstUniqChar(self, str):\n counter = {}\n\n for c in str:\n counter[c] = counter.get(c, 0) + 1\n\n for c in str:\n if counter[c] == 1:\n return c\n","sub_path":"First Unique Character in a String.py","file_name":"First Unique Character in a String.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"275177821","text":"# system libraries\nimport os\nfrom collections import deque\nfrom pathlib import Path\n\n# pygame and tile map\nimport pygame\nimport pyscroll\nimport pyscroll.data\nfrom pygame.locals import VIDEORESIZE\n\n# https://github.com/bitcraft/pytmx\nfrom pytmx import pytmx\nfrom pytmx.util_pygame import load_pygame\n\n# user defined ones\nfrom gameplay import GamePlay\nfrom gamestate import GameStateID\nfrom gamemenu import GameMenu\nfrom gamedata import GameData\nfrom gamewon import GameWon\nfrom gameover import GameOver\n\n# define configuration variables here\nCURRENT_DIR = Path(__file__).parent\nRESOURCES_DIR = CURRENT_DIR / \"data\"\n\n\n# simple wrapper to keep the screen resizeable\ndef initScreen(width: int, height: int) -> pygame.Surface:\n screen = pygame.display.set_mode((width, height), pygame.RESIZABLE)\n return screen\n\n\ndef getPath(filename):\n return os.path.join(RESOURCES_DIR, filename)\n\n\nclass PirateGame:\n \"\"\" This class is your most excellent Pirate game.\n\n This class is responsible for loading data, sharing it,\n creating a scrollable map, initialising audio and creating\n the initial game state to use. For the map a pyscroll group\n will be used to render the sprites.. see GamePlay\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\" Initialise the game here\n\n Sets sane values for the game and creates the window size.\n I've gone for a typical 16:9 aspect ratio here. Remember\n to use functions when initialising more complex data structures\n i.e. audio or the map\n \"\"\"\n self.width = 1920\n self.height = 1080\n self.costs = None\n self.gamedata = GameData()\n self.gamedata.fonts['scoreboard'] = pygame.font.Font('data/fonts/ErbosDraco1StNbpRegular-99V5.ttf', 72)\n self.gamedata.fonts['menu'] = pygame.font.Font(pygame.font.get_default_font(), 128)\n self.gamedata.fonts['debug'] = pygame.font.Font(pygame.font.get_default_font(), 18)\n self.screen = initScreen(self.width, self.height)\n self.background_colour = (100, 149, 237)\n self.screen.fill(self.background_colour)\n self.initAudio()\n self.loadMap()\n self.current_state = GameMenu(self.gamedata)\n self.running = False\n\n def initAudio(self) -> None:\n \"\"\" Initialises the audio\n\n At present only background audio has been provided,\n but you could choose to load additional sound effects\n and store them as part of the game data\n \"\"\"\n print('init =', pygame.mixer.get_init())\n print('channels =', pygame.mixer.get_num_channels())\n self.gamedata.background_audio = pygame.mixer.Sound('data/audio/ambient.wav')\n self.gamedata.background_audio.play(-1)\n self.gamedata.background_audio.set_volume(self.gamedata.background_volume)\n print('length =', self.gamedata.background_audio.get_length())\n\n def loadMap(self) -> None:\n \"\"\" Loads the tiled map\n\n In order for your A* pathfinding to work you will need to\n store the weights associated with the tiles. You can do this\n programmatically or via properties using the editor. Load\n the weights here though.\n \"\"\"\n # loads the map data\n tmx_data = load_pygame(\"./data/worldmap.tmx\")\n map_data = pyscroll.TiledMapData(tmx_data)\n\n # the map stores collision rects with the islands\n for obj in tmx_data.objects:\n self.gamedata.gamemap.islands.append(pygame.Rect(obj.x, obj.y, obj.width, obj.height))\n\n # generate the path finding map\n self.gamedata.gamemap.costs = [[0] * tmx_data.width for _ in range(tmx_data.height)]\n for layer in tmx_data.layers:\n if isinstance(layer, pytmx.TiledTileLayer):\n\n if \"cost\" in layer.properties:\n cost = layer.properties[\"cost\"]\n else:\n continue\n\n for x, y, _ in layer.tiles():\n self.gamedata.gamemap.costs[y][x] += cost\n\n # Make the scrolling layer\n self.gamedata.gamemap.map = pyscroll.BufferedRenderer(map_data, self.screen.get_size())\n\n def input_handler(self, event) -> None:\n \"\"\" Handles input events sent by pygame\n\n In a typical state system, we actually delegate these inputs\n to the active state. This allows the game to simply proxy\n on inputs to any active states.\n\n Args:\n event (pygame.Event): The input event\n \"\"\"\n\n # mute background audio\n if event.type == pygame.KEYDOWN and event.key == pygame.K_m:\n print(self.gamedata.background_audio.get_volume())\n self.gamedata.background_audio.set_volume(self.gamedata.background_volume - self.gamedata.background_audio.get_volume())\n return\n\n # exit game\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n self.running = False\n return\n\n self.current_state.input(event)\n\n def update(self, dt: float) -> None:\n \"\"\" Updates the game\n\n The update function is the heart of the game. It processes\n the message pump and looks to forward game events on\n accordingly. Use delta time (dt) to maintain consistent\n animation speeds.\n\n Args:\n dt (float): The time elapsed since the previous tick\n \"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n\n elif (\n event.type == pygame.MOUSEBUTTONDOWN or\n event.type == pygame.MOUSEBUTTONUP or\n event.type == pygame.MOUSEMOTION\n ):\n self.input_handler(event)\n\n elif event.type == pygame.KEYDOWN:\n self.input_handler(event)\n\n elif event.type == VIDEORESIZE:\n self.screen = initScreen(event.w, event.h)\n self.gamedata.gamemap.map.set_size((event.w, event.h))\n\n # delegate the update logic to the active state\n new_state = self.current_state.update(dt)\n if self.current_state.id != new_state:\n if new_state is GameStateID.START_MENU:\n self.current_state = GameMenu(self.gamedata)\n elif new_state is GameStateID.GAMEPLAY:\n self.current_state = GamePlay(self.gamedata)\n elif new_state is GameStateID.GAME_OVER:\n self.current_state = GameOver(self.gamedata)\n elif new_state is GameStateID.WINNER_WINNER:\n self.current_state = GameWon(self.gamedata)\n elif new_state is GameStateID.EXIT:\n self.running = False\n\n def render(self) -> None:\n \"\"\" Renders the active game state\n \"\"\"\n self.current_state.render(self.screen)\n pygame.display.flip()\n\n def run(self) -> None:\n \"\"\"Run the game loop\"\"\"\n clock = pygame.time.Clock()\n times = deque(maxlen=30)\n\n self.running = True\n while self.running:\n dt = clock.tick() / 1000.0\n times.append(clock.get_fps())\n\n self.update(dt)\n self.render()\n pygame.quit()\n\n\n# initialises and starts the game running\ndef main() -> None:\n pygame.init()\n pygame.font.init()\n pygame.display.set_caption('Arrrrr!!! Me Pirate Game!')\n\n try:\n game = PirateGame()\n game.run()\n except KeyboardInterrupt:\n pass\n finally:\n pygame.quit()\n\n exit(0)\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print(f\"Launching PyGame\")\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"424497569","text":"\"\"\"\nFollowing script extracts `airport-code` data from `https://raw.githubusercontent.com/datasets/airport-codes/master/data/airport-codes.csv`\n\"\"\"\n\nimport pandas as pd\n\n# Connect to server and read the .csv with airport codes\ndf = pd.read_csv('https://raw.githubusercontent.com/datasets/airport-codes/master/data/airport-codes.csv')\n\n# Parse coordinates \ncoord = df['coordinates'].str.split(',', expand=True).astype(float)\ncoord.columns = ['longitude','latitude']\ndf['latitude'] = coord['latitude']\ndf['longitude'] = coord['longitude']\ndf.drop('coordinates', axis=1, inplace=True)\n\n# Export the data\ndf.to_csv('out/tables/airport_codes.csv', index=None, encoding='utf-8')\n","sub_path":"bi_challenge_delay_insurance/src/airport_codes_extractor.py","file_name":"airport_codes_extractor.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"12465434","text":"import sys\nsys.stdin = open('input.txt')\n\ndef code(data):\n while True:\n for j in range(1, 6): # 5번의 동작이 한 사이클\n x = data.pop(0) # queue에서 첫번째 값을 꺼내서\n if x-j > 0: # 1~5를 뺀 값이 0 보다 크면\n data.append(x-j) # 뺀 값을 뒤로 이동\n else: # 1~5를 뺀 값이 0 이하면\n data.append(0) # 뒤에 0을 저장하고\n return data # queue를 반환\n\nT = 10\n\nfor _ in range(1, T+1):\n test_case = int(input())\n data = list(map(int, input().split()))\n\n print('#{}'.format(test_case), *code(data))","sub_path":"SWEA/1225_암호생성기/1225.py","file_name":"1225.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"516334048","text":"# Megan Chu\r\n# PID: A12814536\r\n# Assignment 1\r\n# 4/10/18\r\n\r\nprint(\"Running Q6(getSeq) with data.seq and data.in...\")\r\nwith open(\"data.seq\", \"r\") as ds:\r\n with open(\"data.in\", \"r\") as di:\r\n print(\"Getting contents of Q6query.txt...\")\r\n with open(\"Q6query.txt\", \"r\") as q:\r\n query = q.readline()\r\n if query[len(query) - 1:len(query)] == \"\\n\":\r\n query = query[0:len(query) - 1]\r\n print(\"Query is: \" + query)\r\n seq = di.readline()\r\n while len(seq) > 0:\r\n seq = seq[0:len(seq) - 1] # remove new line character\r\n pair = seq.split(\" \")\r\n count = int(pair[1])\r\n ds.seek(count)\r\n ln = ds.read(len(query))\r\n while ln.lower() != query.lower() and ln[len(query) - 1:len(query)] != \"@\":\r\n count += 1\r\n ds.seek(count)\r\n ln = ds.read(len(query))\r\n if ln.lower() == query.lower():\r\n print(\"gi # of database sequence containing query is: \" + pair[0])\r\n break\r\n seq = di.readline()\r\n \r\n","sub_path":"Assignment 1/Q6.py","file_name":"Q6.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"420501037","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 10 14:29:16 2017\r\n\r\n@author: jyang\r\n\"\"\"\r\n\r\nimport pickle\r\n \r\ndef save_variable(var,file_name):\r\n pkl_file = open(file_name, 'wb')\r\n pickle.dump(var, pkl_file, -1)\r\n pkl_file.close()\r\n \r\ndef read_variable(file_name):\r\n pkl_file = open(file_name, 'rb')\r\n var = pickle.load(pkl_file)\r\n pkl_file.close()\r\n return var\r\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"618950433","text":"from trcsource.trcdata import load\r\nfrom trcframe.trcframeselector import trcframeselect\r\nfrom fieldwork.step import FieldworkLowerLimb2SideGenerationStep\r\nfrom geom.step import FieldworkGait2392GeomStep\r\nfrom muscle.step import FieldworkGait2392SomsoMuscleStep\r\nimport mayavi\r\nimport os\r\nimport json\r\n\r\nDEFAULT_MODEL_LANDMARKS = (\r\n 'pelvis-LASIS', 'pelvis-RASIS', 'pelvis-Sacral',\r\n 'femur-MEC-l', 'femur-LEC-l', \r\n 'femur-MEC-r', 'femur-LEC-r',\r\n 'tibiafibula-MM-l', 'tibiafibula-LM-l',\r\n 'tibiafibula-MM-r', 'tibiafibula-LM-r',\r\n )\r\n\r\n# path to trc file\r\npath = r'C:\\Users\\mkeo2\\Desktop\\work material\\server\\trcs\\StaticCalibration.trc'\r\n\r\ntrcData = load(path) #load trc data\r\n\r\ntrcFrame = trcframeselect(trcData, 100) #get marker coordinates for all frames\r\n\r\n# configuration data for fieldwork\r\nconfig = {}\r\nconfig['identifier'] = ''\r\nconfig['GUI'] = False\r\nconfig['registration_mode'] = 'shapemodel'\r\nconfig['pcs_to_fit'] = '1'\r\nconfig['mweight'] = '0.1'\r\nconfig['knee_corr'] = False\r\nconfig['knee_dof'] = True\r\nconfig['marker_radius'] = '5.0'\r\nconfig['skin_pad'] = '5.0'\r\nconfig['landmarks'] = {}\r\nconfig['landmarks'] = {\r\n 'femur-HC-l': 'LHJC',\r\n 'femur-HC-r': 'RHJC',\r\n 'femur-LEC-l': 'LLFC',\r\n 'femur-LEC-r': 'RLFC',\r\n 'femur-MEC-l': 'LMFC',\r\n 'femur-MEC-r': 'RMFC',\r\n 'pelvis-LASIS': 'LASI',\r\n 'pelvis-LPSIS': 'LPSI',\r\n 'pelvis-RASIS': 'RASI',\r\n 'pelvis-RPSIS': 'RPSI',\r\n 'tibiafibula-LM-l': 'LLMAL',\r\n 'tibiafibula-LM-r': 'RLMAL',\r\n 'tibiafibula-MM-l': 'LMMAL',\r\n 'tibiafibula-MM-r': 'RMMAL'\r\n}\r\n \r\nfieldworkModel = FieldworkLowerLimb2SideGenerationStep(config) # initialise fieldwork class\r\nfieldworkModel._data.inputLandmarks = trcFrame # input landmark frames\r\nfieldworkModel.execute() # execute plugin\r\nlowerLimb = fieldworkModel.output(2) # gias2 lower limb\r\nfwLandmarks = fieldworkModel.output(1) #fieldwork anatomical landmarks\r\n\r\nconfig = {}\r\nconfig['identifier'] = ''\r\nconfig['GUI'] = False\r\nconfig['scale_other_bodies'] = True\r\nconfig['in_unit'] = 'mm'\r\nconfig['out_unit'] = 'm'\r\nconfig['osim_output_dir'] = r'C:\\Users\\mkeo2\\Desktop\\work material\\server\\backend\\geom\\osim'\r\nconfig['write_osim_file'] = True\r\nconfig['subject_mass'] = None\r\nconfig['preserve_mass_distribution'] = False\r\nconfig['adj_marker_pairs'] = {}\r\n\r\ngeomModel = FieldworkGait2392GeomStep(config) #initialise geom class\r\ngeomModel.getInput(0, lowerLimb) #input gias2 lower limb\r\ngeomModel.getInput(1, 'input tracking markers')\r\ngeomModel.getInput(-1, fwLandmarks) #input gias2 landmarks\r\ngeomModel.execute() #execute plugin\r\n\r\ngeomLowerLimb = geomModel.output(-1)\r\nosimModel = geomModel.output(3)\r\n\r\n\r\nconfig = {}\r\nconfig['osim_output_dir'] = r'C:\\Users\\mkeo2\\Desktop\\work material\\server\\backend\\muscle\\osim'\r\nconfig['in_unit'] = 'cm'\r\nconfig['out_unit'] = 'm'\r\nconfig['write_osim_file'] = True\r\nconfig['update_knee_splines'] = False\r\nconfig['static_vas'] = False\r\nconfig['update_max_iso_forces'] = True\r\nconfig['subject_height'] = '169'\r\nconfig['subject_mass'] = '56'\r\n \r\nmuscleModel = FieldworkGait2392SomsoMuscleStep(config)\r\nmuscleModel.inputData(0, geomLowerLimb)\r\nmuscleModel.inputData(1, osimModel)\r\nmuscleModel.inputData(2, fwLandmarks)\r\nmuscleModel.execute()\r\n\r\n\r\n \r\n","sub_path":"server/backend/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"213839832","text":"#!/usr/bin/env python3\n\n# https://codeforces.com/problemset/problem/1169/A\n# (n+1)余数和保持不变,然后求相等点,再检查相等点是否在两者之间\n\ndef f(l):\n n,a,x,b,y = l\n if (a+b)%2!=0:\n return False\n m = (a+b)//2\n return (m>=a or m<=x) and (m<=b or m>=y)\n\nl = list(map(int,input().split()))\nprint('YES' if f(l) else 'NO')\n\n","sub_path":"codeforces/math数学/900/1169A两条地铁_错误逻辑2.py","file_name":"1169A两条地铁_错误逻辑2.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"631356400","text":"#===============================================================================\n#\n# AC Libs\n#\n# GENERAL DESCRIPTION\n# build script\n#\n# Copyright (c) 2014-2017 by Qualcomm Technologies, Inc.\n# All Rights Reserved.\n# QUALCOMM Proprietary/GTDR\n#\n#-------------------------------------------------------------------------------\n#\n# $Header: //components/rel/ssg.tz/1.0.2/securemsm/accesscontrol/src/components/vmidmt/build/SConscript#1 $\n# $DateTime: 2018/02/06 03:00:17 $\n# $Author: pwbldsvc $\n# $Change: 15399933 $\n# EDIT HISTORY FOR FILE\n#\n# This section contains comments describing changes made to the module.\n# Notice that changes are listed in reverse chronological order.\n#\n# when who what, where, why\n# -------- --- ---------------------------------------------------------\n# 2/16/17 jr move xbl_sec from core to ssg\n# 04/03/15 rs Moved SMEM partition protections to Hypervisor\n# 02/09/12 PS Initial release\n#===============================================================================\nimport os\nImport('env')\n\n#-------------------------------------------------------------------------------\n# Load sub scripts\n#-------------------------------------------------------------------------------\nenv.LoadSoftwareUnits()\n\nif 'USES_RCINIT' in env:\n RCINIT_IMGS = ['CTZL64_IMAGE', 'TZOS_IMAGE', 'CTZL_IMAGE']\n env.AddRCInitFunc(\n RCINIT_IMGS,\n {\n 'sequence_group' : 'RCINIT_GROUP_2',\n 'init_name' : 'vmidmt_config',\n 'init_function' : 'tzbsp_vmidmt_config',\n 'dependencies' : ['clock_init'],\n 'policy_optin' : ['default']\n })\n\n","sub_path":"trustzone_images/ssg/securemsm/accesscontrol/src/components/vmidmt/build/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"295401808","text":"from utils.spatial_utils import SpatialUtils\n\n\ndef DBSCAN(points, eps, min_points):\n \"\"\"\n cluster dataset of points according to DBSCAN methodology\n :param points: the list of vectors to cluster\n :param eps: threshold distance\n :param min_points: minimum number of points required in the cluster for it to be considered non-noise\n :return: a list of labels. -1 for noise, other labels begin from one\n \"\"\"\n # initialise all labels as 0, before subsequently overwriting\n labels = [0] * len(points)\n\n # ID of current cluster\n cluster_num = 0\n\n # create a seed for each point (points from which to try to grow new cluster)\n for seed_num, seed in enumerate(points):\n # if the point has already been assigned to a cluster, skip\n if labels[seed_num] != 0:\n continue\n # find all points within eps of point\n neighbours = nearby_points(points=points, ref_point=seed, eps=eps)\n\n # if has few that min_points as neighbours it is noise. Note it may later be classed as a boundary point\n # of other cluster and have label updated\n if len(neighbours) < min_points:\n labels[seed_num] = -1\n # else grow cluster around this seed\n else:\n cluster_num += 1\n grow_cluster(points=points, labels=labels, seed_num=seed_num,\n neighbours=neighbours, cluster_label=cluster_num,\n eps=eps, min_points=min_points)\n return labels\n\n\ndef grow_cluster(points, labels, seed_num, neighbours, cluster_label, eps, min_points):\n \"\"\"\n Grow a new cluster with label `C` from the seed point `P`.\n\n This function searches through the dataset to find all points that belong\n to this new cluster. When this function returns, cluster `C` is complete.\n\n Parameters:\n `D` - The dataset (a list of vectors)\n `labels` - List storing the cluster labels for all dataset points\n `P` - Index of the seed point for this new cluster\n `NeighborPts` - All of the neighbors of `P`\n `C` - The label for this new cluster.\n `eps` - Threshold distance\n `MinPts` - Minimum required number of neighbors\n \"\"\"\n\n # assign the cluster label to the seed point\n labels[seed_num] = cluster_label\n\n # Look at each neighbor of P (neighbors are referred to as Pn).\n # NeighborPts will be used as a FIFO queue of points to search--that is, it\n # will grow as we discover new branch points for the cluster. The FIFO\n # behavior is accomplished by using a while-loop rather than a for-loop.\n # In NeighborPts, the points are represented by their index in the original\n # dataset.\n i = 0\n\n # try branching from each point within neighbours\n # use while loop because neighbours is added to if branch is found\n while i < len(neighbours):\n ref_point = neighbours[i]\n # if ref_point was classed as noise we know it is not a branch point (noise has no point within eps)\n # but it is a leaf point of this cluster, so add to cluster of seed point\n indices = [i for i, x in enumerate(points) if x == ref_point]\n for ind in indices:\n if labels[ind] == -1:\n labels[ind] = cluster_label\n # if ref_point isn't already claimed, add to this cluster\n elif labels[ind] == 0:\n labels[ind] = cluster_label\n # find all neighbours of the neighbour\n neighbour_neighbours = nearby_points(points=points, ref_point=ref_point, eps=eps)\n # if ref_point has min required neighbouring points, it is a branch point\n # add all of its neighbors to the FIFO queue to be searched.\n neighbours = neighbours + neighbour_neighbours\n i += 1\n\n\ndef nearby_points(points, ref_point, eps):\n \"\"\"\n Find all points in dataset `D` within distance `eps` of point `P`.\n\n This function calculates the distance between a point P and every other\n point in the dataset, and then returns only those points which are within a\n threshold distance `eps`.\n \"\"\"\n neighbours = []\n # ref_point = points[point_seed]\n for point in points:\n # if point within eps of reference point, class as neighbour\n if SpatialUtils.calc_distance(ref_point, point) <= eps:\n neighbours.append(point)\n return neighbours\n","sub_path":"utils/dbscan.py","file_name":"dbscan.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"169606963","text":"# coding:utf-8\nimport requests\n\n\"\"\"\n这个脚本写死了\n\"\"\"\n\ndef token_value(ip):\n \"\"\"\n 获取token值\n :return: 返回token值\n \"\"\"\n result = requests.get('http://' + ip + '/api/get_jy_token/', auth=('icgoo', 'sm2906')).json()\n token = result['token']\n return token\n\ndef json_data(sup, partno, ip=None):\n \"\"\"\n 发送请求获取接口数据\n :param sup: 生产商\n :param partno:型号\n :param token: token令牌\n :param htp: IP\n :return:返回字典个格式的数据\n \"\"\"\n url = 'http://' + ip + '/api/bom/' + sup + '/' + partno + '/'\n params = {\n 'token': token_value(ip),\n 'nocache': 1\n }\n result = requests.get(url, params=params, auth=('icgoo', 'sm2906'))\n return result.json()\n\ndef key_compare(ip1, ip2, sup, partno):\n \"\"\"\n 比较key值的字典是否一致\n :param d_dict:\n :return: 返回2个对比后差异化的结果\n \"\"\"\n r1 = json_data(sup, partno, ip1)\n r2 = json_data(sup, partno, ip2)\n print('r1=%s' % r1)\n print('r2=%s' % r2)\n\n if r1 == r2:\n return (u\"两个请求返回结果一致!\")\n else:\n #对其共同的每一个key值进行value比较\n tmp = [] #不同的key-value\n # t1 = [] #不同的key-value\n # t2 = [] #不同的key-value\n # t3 = [] #不同的key-value\n ss = {}\n data1 = []\n data2 = []\n if r1['order'] != r2['order']:\n tmp.append({'order':r1.get('order')})\n\n if r1['data'] != r2['data']:\n for i in range(len(r2['data'])):\n ss.update({r2['data'][i]['sup_partno']: i}) #获取url2的sup_partno value值\n for i in range(len(r1['data'])):\n t1 = [] # 不同的key-value\n t2 = [] # 不同的key-value\n t3 = [] # 不同的key-value\n\n r1_r1 = r1['data'][i]\n sup_partno_value = r1['data'][i]['sup_partno'] #根据这个key值找到对应的比较\n j = ss[sup_partno_value]\n r2_r2 = r2['data'][j]\n\n #在r1_r1中不在r2_r2中\n for key in r1_r1.keys():\n if key not in r2_r2.keys():\n tmp.append({key:r1_r1[key]})\n t1.append({key:r1_r1[key]})\n\n # 在r2_r2中不在r1_r1中\n for key in r2_r2.keys():\n if key not in r1_r1.keys():\n tmp.append({key:r2_r2[key]})\n t2.append({key:r2_r2[key]})\n\n # 同时在r2_r2中和r1_r1中\n for key in r2_r2.keys():\n if key in r1_r1.keys():\n if r1_r1[key] != r2_r2[key]:\n tmp.append({key:r2_r2[key]})\n t3.append({key:r2_r2[key]})\n\n r1_y2 = [t1 + t3]\n r2_y2 = [t2 + t3]\n ry1 = dict(zip([sup_partno_value],r1_y2))\n ry2 = dict(zip([sup_partno_value],r2_y2))\n data1.append(ry1)\n data1.append(ry2)\n\n # print(tmp)\n # print('url1=%s'%(t1 + t3))\n # print('url2=%s'%(t2 + t3))\n print('data1=%s'% data1)\n print('data2=%s'% data2)\n print('tmp=%s'% tmp)\n return tmp\n\nif __name__ == '__main__':\n \"\"\"\n 新旧2个IP地址\n 供应商\n 产品型号\n ---可以从文本读入sup、partno---\n \"\"\"\n ip1 = 'ali.icgoo.net:40081'\n ip2 = '120.76.47.26:40071'\n sup = 'arrow_verical'\n partno = 'ATTINY84A-SSU'\n key_compare(ip1, ip2, sup, partno)\n\n\n\n\n\n\n\n","sub_path":"script/result_compare.py","file_name":"result_compare.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"123014007","text":"from __future__ import absolute_import, division, print_function\n\nfrom builtins import (bytes, str, open, super, range,\n zip, round, input, int, pow, object, map, zip)\n\n__author__ = \"Andrea Tramacere\"\n\nNOPYLAB=True\n\ntry:\n \n import pylab as plt\n \n from matplotlib import pylab as pp\n from matplotlib import gridspec\n\nexcept:\n\n NOPYLAB=True\n\n #print \"pylab not found on this system\"\n #print \"install package, and/or update pythonpath\"\n\n\n\nfrom collections import namedtuple\n\nfrom .output import section_separator,WorkPlace\n\nfrom .utils import *\n\nimport numpy as np\nimport os\n\n__all__=['PlotSED']\n\n\nclass PlotSED (object):\n \n \n def __init__(self,sed_data=None,\n model=None,\n #x_min=6.0,\n #x_max=3.0,\n #y_min=-20.0,\n #y_max=-9.0,\n interactive=False,\n plot_workplace=None,\n title='Plot',\n frame='obs',\n figsize=None):\n #autoscale=True):\n\n check_frame(frame)\n\n self.frame=frame\n\n self.axis_kw=['x_min','x_max','y_min','y_max']\n self.interactive=interactive\n\n plot_workplace=plot_workplace\n #self.line_tuple=namedtuple('line',['label','ref'])\n self.lines_data_list=[]\n self.lines_model_list=[]\n self.lines_res_list = []\n\n #self.legend=[]\n \n \n\n if self.interactive==True:\n \n \n plt.ion()\n print ('running PyLab in interactive mode')\n \n\n #--------------------------------------------------------------\n\n \n #set workplace\n if plot_workplace is None:\n plot_workplace=WorkPlace()\n self.out_dir=plot_workplace.out_dir\n self.flag=plot_workplace.flag\n \n else:\n self.out_dir=plot_workplace.out_dir\n self.flag=plot_workplace.flag\n \n \n self.title=\"%s_%s\"%(title,self.flag)\n \n \n #Build sedplot \n \n if figsize is None:\n figsize=(10,8)\n\n self.fig=plt.figure(figsize=figsize)\n \n\n\n self.gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1])\n \n \n self.sedplot= self.fig.add_subplot(self.gs[0])\n self._add_res_plot()\n \n self.set_plot_axis_labels()\n \n #if autoscale==True:\n self.sedplot.set_autoscalex_on(True)\n self.sedplot.set_autoscaley_on(True)\n self.sedplot.set_autoscale_on(True)\n self.counter=0\n\n\n\n self.sedplot.grid(True,alpha=0.5)\n \n\n\n self.sedplot.set_xlim(6, 30)\n if frame == 'obs':\n self.sedplot.set_ylim(-20, -8)\n\n elif frame == 'src':\n self.sedplot.set_ylim(38, 55)\n else:\n unexpetced_behaviour()\n\n\n self.resplot.set_ybound(-2,2)\n try:\n if hasattr(self.fig.canvas.manager,'toolbar'):\n self.fig.canvas.manager.toolbar.update()\n except:\n pass\n\n if sed_data is not None :\n self.add_data_plot(sed_data)\n\n if model is not None:\n self.add_model_plot(model)\n\n\n \n self.counter_res=0\n\n\n def _add_res_plot(self):\n\n\n\n\n self.resplot = self.fig.add_subplot(self.gs[1], sharex=self.sedplot)\n\n self.lx_res = 'log($ \\\\nu $) (Hz)'\n self.ly_res = 'res'\n\n self.resplot.set_ylabel(self.ly_res)\n self.resplot.set_xlabel(self.lx_res)\n\n\n\n self.add_res_zeroline()\n\n def clean_residuals_lines(self):\n for i in range(len(self.lines_res_list)):\n self.del_residuals_line(0)\n\n def clean_data_lines(self):\n \n for i in range(len(self.lines_data_list)):\n self.del_data_line(0)\n \n def clean_model_lines(self):\n for i in range(len(self.lines_model_list)):\n self.del_model_line(0)\n \n \n def list_lines(self):\n \n if self.lines_data_list==[] and self.lines_model_list==[]:\n pass\n else:\n\n for ID,plot_line in enumerate(self.lines_data_list):\n print('data',ID, plot_line.get_label())\n\n\n\n for ID,plot_line in enumerate(self.lines_model_list):\n print ('model',ID, plot_line.get_label())\n\n\n\n\n def del_data_line(self,line_ID):\n \n if self.lines_data_list==[]:\n print (\"no lines to delete \")\n \n else:\n \n print (\"removing line: \",self.lines_data_list[line_ID])\n\n line = self.lines_data_list[line_ID]\n\n for item in line:\n # This removes lines\n if np.shape(item) == ():\n item.remove()\n else:\n # This removes containers for data with errorbars\n for item1 in item:\n item1.remove()\n\n # self.sedplot.lines.remove(self.lines_list[line_ID].ref[0])\n\n #self.legend.remove(self.lines_data_list[line_ID].label)\n\n del self.lines_data_list[line_ID]\n\n self.update_legend()\n self.update_plot()\n\n #self.fig.canvas.draw()\n \n \n def del_model_line(self,line_ID):\n \n if self.lines_model_list==[]:\n #print \"no lines to delete \"\n pass\n else:\n\n line=self.lines_model_list[line_ID]\n line.remove()\n\n\n del self.lines_model_list[line_ID]\n\n\n self.update_plot()\n self.update_legend()\n\n def del_residuals_line(self, line_ID):\n\n if self.lines_res_list == []:\n # print \"no lines to delete \"\n pass\n else:\n\n line = self.lines_res_list[line_ID]\n line.remove()\n\n del self.lines_res_list[line_ID]\n\n self.update_plot()\n self.update_legend()\n\n def set_plot_axis_labels(self):\n self.lx = 'log($ \\\\nu $) (Hz)'\n\n if self.frame == 'src':\n\n self.ly = 'log($ \\\\nu L_{\\\\nu} $ ) (erg s$^{-1}$)'\n \n elif self.frame == 'obs':\n\n self.ly = 'log($ \\\\nu F_{\\\\nu} $ ) (erg cm$^{-2}$ s$^{-1}$)'\n else:\n unexpetced_behaviour()\n\n self.sedplot.set_ylabel(self.ly)\n self.sedplot.set_xlabel(self.lx)\n \n #self.sedplot.set_xlim(self.x_min,self.x_max)\n #self.sedplot.set_ylim(self.y_min,self.y_max)\n \n \n def add_res_zeroline(self):\n\n\n y0 = np.zeros(2)\n x0 = [0,30]\n\n self.resplot.plot(x0,y0,'--',color='black')\n self.update_plot()\n\n \n \n \n \n \n \n def rescale(self,x_min=None,x_max=None,y_min=None,y_max=None):\n\n\n \n self.sedplot.set_xlim(x_min,x_max)\n self.sedplot.set_ylim(y_min,y_max)\n\n\n \n #def autoscale(self):\n\n \n #self.sedplot.autoscale_view(tight=True)\n # for l in self.sedplot.lines:\n\n # x_min,x_max=self.sedplot.get_xlim()\n\n # y_min,y_max=self.sedplot.get_ylim()\n \n # self.sedplot.set_xticks(np.arange(int(x_min)-2,int(x_max)+2,1.0))\n \n # self.sedplot.set_xlim(x_min-1,x_max+1)\n \n # self.sedplot.set_ylim(y_min-1,y_max+1)\n \n \n \n # if self.resplot is not None :\n \n #self.resplot.autoscale_view(tight=True)\n \n #self.x_min_res=self.x_min-1\n #self.x_max_res=self.x_max+1\n\n # y_min_res,y_max_res=self.resplot.get_ylim()\n # x_min_res,x_max_res=self.resplot.get_xlim()\n\n \n # self.resplot.set_xticks(np.arange(int(x_min_res)-2,int(x_max_res)+2,1.0))\n \n # self.resplot.set_xlim(x_min_res,x_max_res)\n # self.resplot.set_ylim(y_min_res,y_max_res)\n \n \n \n #self.update_plot()\n \n #self.sedplot.set_autoscale_on(False)\n \n \n \n def rescale_res(self,x_min=None,x_max=None,y_min=None,y_max=None):\n\n self.resplot.set_xlim(x_min,x_max)\n self.resplot.set_ylim(y_min,y_max)\n \n self.update_plot()\n \n def update_plot(self):\n self.fig.canvas.draw()\n self.fig.tight_layout()\n\n def update_legend(self,label=None):\n\n _handles=[]\n\n if self.lines_data_list!=[] and self.lines_data_list is not None:\n _handles.extend(self.lines_data_list)\n\n\n if self.lines_model_list!=[] and self.lines_model_list is not None:\n _handles.extend(self.lines_model_list)\n\n\n\n for h in _handles[:]:\n #print('_label',h._label)\n if h._label is None:\n _handles.remove(h)\n elif h._label.startswith('_line'):\n _handles.remove(h)\n else:\n pass\n\n #for h in _handles:\n # print('_label',h._label)\n\n self.sedplot.legend(handles=_handles,loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1, prop={'size':10})\n self.update_plot()\n\n\n\n\n def add_model_plot(self, model, label=None, color=None, line_style=None, flim=None,auto_label=True,fit_range=None):\n\n try:\n #print( \"a\")\n x, y = model.get_model_points(log_log=True, frame = self.frame)\n except Exception as e:\n #print(\"a\",e)\n try:\n\n x, y = model.SED.get_model_points(log_log=True, frame = self.frame)\n\n except Exception as e:\n #print(\"b\", e)\n #print(model, \"!!! Error has no SED instance or something wrong in get_model_points()\")\n #print(e)\n return\n\n #print('->x,y',x,y)\n #if color is None:\n # color = self.counter\n\n if line_style is None:\n line_style = '-'\n\n if label is None and auto_label is True:\n if model.name is not None:\n label = model.name\n else:\n label = 'line %d' % self.counter\n if flim is not None:\n\n msk=y>np.log10(flim)\n x=x[msk]\n y=y[msk]\n else:\n pass\n\n if fit_range is not None:\n msk1 = x < fit_range[1]\n msk2 = x > fit_range[0]\n\n x = x[msk1 * msk2]\n y = y[msk1 * msk2]\n\n line, = self.sedplot.plot(x, y, line_style, label=label,color=color,linewidth=1.0)\n\n\n self.lines_model_list.append(line)\n\n self.update_legend()\n self.update_plot()\n\n self.counter += 1\n\n def add_data_plot(self,sed_data,label=None,color=None,autoscale=True,fmt='o',ms=4,mew=0.5,fit_range=None):\n\n\n\n try:\n x,y,dx,dy,=sed_data.get_data_points(log_log=True,frame=self.frame)\n except:\n print (\"!!! ERROR failed to get data points from\", sed_data)\n print\n raise RuntimeError\n \n \n \n # get x,y,dx,dy from SEDdata\n if dx is None:\n dx=np.zeros(len(sed_data.data['nu_data']))\n \n\n if dy is None:\n dy=np.zeros(len(sed_data.data['nu_data']))\n\n UL = sed_data.data['UL']\n \n # set color\n #if color is None:\n # color=self.counter\n\n \n if label is None:\n if sed_data.obj_name is not None :\n label=sed_data.obj_name\n else:\n label='line %d'%self.counter\n\n if fit_range is not None:\n msk1 = x < fit_range[1]\n msk2 = x > fit_range[0]\n\n x = x[msk1 * msk2]\n y = y[msk1 * msk2]\n dx= dx[msk1 * msk2]\n dy = dy[msk1 * msk2]\n UL=UL[msk1 * msk2]\n\n line = self.sedplot.errorbar(x, y, xerr=dx, yerr=dy, fmt=fmt\n , uplims=UL,label=label,ms=ms,mew=mew,color=color)\n\n\n \n\n\n self.lines_data_list.append(line)\n\n\n\n self.counter+=1\n self.update_legend()\n self.update_plot()\n \n\n def add_xy_plot(self,x,y,label=None,color=None,line_style=None,autoscale=False):\n\n #color setting \n #if color is None:\n # color=self.counter\n \n \n \n if line_style is None:\n line_style='-'\n \n\n if label is None:\n label='line %d'%self.counter\n\n line, = self.sedplot.plot(x, y, line_style,label=label)\n\n \n\n self.lines_model_list.append(line)\n\n\n self.counter+=1\n\n self.update_legend()\n self.update_plot()\n\n\n\n \n \n \n\n def add_residual_plot(self,model,data,label=None,color=None,filter_UL=True,fit_range=None):\n\n if self.counter_res == 0:\n self.add_res_zeroline()\n #print('bbbbb')\n if data is not None:\n\n x,y=model.get_residuals(log_log=True,data=data,filter_UL=filter_UL)\n\n if fit_range is not None:\n msk1 = xfit_range[0]\n\n x=x[msk1*msk2]\n y=y[msk1*msk2]\n #print('aaaaaaa',fit_range,x)\n line = self.resplot.errorbar(x, y, yerr=np.ones(x.size), fmt='+',color=color)\n self.lines_res_list.append(line)\n self.counter_res += 1\n else:\n pass\n\n\n\n\n \n\n\n\n\n self.update_plot()\n\n\n\n\n\n\n def add_text(self,lines):\n self.PLT.focus(0,0)\n x_min, x_max = self.sedplot.get_xlim()\n\n y_min, y_max = self.sedplot.get_ylim()\n t=''\n for line in lines:\n t+='%s \\\\n'%line.strip()\n self.PLT.text(t,font=10,charsize=0.6,x=x_min-1.5,y=y_min-2.85)\n self.PLT.redraw()\n\n\n def save(self,filename=None):\n\n\n if filename is None:\n wd=self.out_dir\n filename = 'jetset_fig.png'\n\n else:\n wd=''\n\n outname = os.path.join(wd,filename)\n self.fig.savefig(outname)\n\n def show(self):\n self.fig.show()\n\n\n\n\nclass PlotPdistr (object):\n\n def __init__(self):\n self.fig, self.ax = plt.subplots()\n\n def plot_distr(self,gamma,n_gamma,y_min=None,y_max=None,x_min=None,x_max=None):\n\n self.ax.plot(np.log10(gamma), np.log10(n_gamma))\n self.ax.set_xlabel(r'log($\\gamma$)')\n self.ax.set_ylabel(r'log(n($\\gamma$))')\n self.ax.set_ylim(y_min, y_max)\n self.ax.set_xlim(x_min, x_max)\n self.update_plot()\n\n def plot_distr3p(self,gamma,n_gamma,y_min=None,y_max=None,x_min=None,x_max=None):\n\n self.ax.plot(np.log10(gamma), np.log10(n_gamma * gamma * gamma * gamma))\n self.ax.set_xlabel(r'log($\\gamma$)')\n self.ax.set_ylabel(r'log(n($\\gamma$) \\gamma^3)')\n self.ax.set_ylim(y_min, y_max)\n self.ax.set_xlim(x_min, x_max)\n self.update_plot()\n\n def update_plot(self):\n self.fig.canvas.draw()\n\n\nclass PlotSpecComp (object):\n\n def __init__(self):\n self.fig, self.ax = plt.subplots()\n\n\n def plot(self,nu,nuFnu,y_min=None,y_max=None):\n\n self.ax.plot(np.log10(nu), np.log10(nuFnu))\n self.ax.set_xlabel(r'log($ \\nu $) (Hz)')\n self.ax.set_ylabel(r'log($ \\nu F_{\\nu} $ ) (erg cm$^{-2}$ s$^{-1}$)')\n self.ax.set_ylim(y_min, y_max)\n self.update_plot()\n\n\n def update_plot(self):\n self.fig.canvas.draw()\n\n\nclass PlotSeedPhotons (object):\n\n def __init__(self):\n self.fig, self.ax = plt.subplots()\n\n\n def plot(self,nu,nuFnu,y_min=None,y_max=None):\n\n self.ax.plot(np.log10(nu), np.log10(nuFnu))\n self.ax.set_xlabel(r'log($ \\nu $) (Hz)')\n self.ax.set_ylabel(r'log(n ) (photons cm$^{-3}$ Hz$^{-1}$ ster$^{-1}$)')\n self.ax.set_ylim(y_min, y_max)\n self.update_plot()\n\n\n def update_plot(self):\n self.fig.canvas.draw()","sub_path":"jetset/plot_sedfit.py","file_name":"plot_sedfit.py","file_ext":"py","file_size_in_byte":15851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"550669259","text":"from fake_useragent import UserAgent\nimport threading\nimport json, requests, random\nimport telnetlib\nimport time\n\nclass ProxyIPPool():\n \n def __init__(self):\n print('初始化IP代理池...')\n self._pool = set()\n\n proxys = read_proxy_file(\"proxy_ip.txt\")\n\n failed_proxys = set()\n for proxy in proxys:\n splits = proxy.split(\":\")\n protocol = splits[0]\n proxy_ip = splits[1][2:]\n proxy_port = splits[2]\n if exec_telnet(proxy_ip, proxy_port):\n p = ProxyIP(protocol, proxy_ip, proxy_port)\n self._pool.add(p)\n else:\n failed_proxys.add(proxy)\n print(f\"删除无效代理IP: {proxy}\")\n alive_proxys = proxys - failed_proxys\n with open(\"proxy_ip.txt\", \"w+\", encoding=\"utf8\") as f:\n for line in alive_proxys:\n f.write(f\"{line}\\n\")\n\n def random_proxyip(self):\n return random.choice(list(self._pool))\n \n def add(self, proxy_ip):\n if proxy_ip not in self._pool:\n self._pool.add(proxy_ip)\n print('向代理IP池中添加一个代理IP:', proxy_ip)\n else:\n print('代理IP池中已经有此代理IP:', proxy_ip, '无需重复添加')\n\n\nclass ProxyIP():\n '''\n 代理ip类\n '''\n def __init__(self, protocol=None, ip=None, port=None):\n self.protocol = protocol\n self.ip = ip\n self.port = port\n\n def __str__(self):\n return f\"{self.protocol}://{self.ip}:{self.port}\"\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False\n def __hash__(self):\n return hash(self.protocol + self.ip + self.port)\n\n @classmethod\n def get_proxy_ip(cls):\n url = \"https://ip.jiangxianli.com/api/proxy_ip\"\n ua = UserAgent(verify_ssl=False)\n user_agent = ua.random\n\n proxy_info = json.loads(requests.get(url, headers={\"user-agent\": user_agent}).text).get(\"data\")\n\n protocol = proxy_info.get(\"protocol\")\n ip = proxy_info.get(\"ip\")\n port = proxy_info.get(\"port\")\n \n proxys = read_proxy_file(\"proxy_ip.txt\")\n\n proxy_ip = ProxyIP(protocol, ip, port)\n if str(proxy_ip) not in proxys:\n with open(\"proxy_ip.txt\", \"a+\", encoding=\"utf8\") as f:\n f.write(f\"{proxy_ip}\\n\")\n print(f\"抓取一个免费代理IP:{proxy_ip}\")\n else:\n print(f'代理IP {proxy_ip} 已经被抓取过,无需写入本地文件')\n return proxy_ip\n \ndef exec_telnet(ip, port):\n try:\n telnetlib.Telnet(ip, port=port, timeout=1)\n except Exception:\n # 端口连接失败返回None\n return False\n# 连接成功返回port\n return True\n\ndef read_proxy_file(filename):\n proxys = set()\n with open(filename, \"r\", encoding=\"utf8\") as f:\n for line in f.readlines():\n proxys.add(line.rstrip())\n return proxys\n\n\nif __name__ == \"__main__\":\n pool = ProxyIPPool()\n for _ in range(10):\n pool.add(ProxyIP.get_proxy_ip())\n time.sleep(3)","sub_path":"week03/homework2/proxy_ip.py","file_name":"proxy_ip.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"256701690","text":"from django.shortcuts import render_to_response, get_object_or_404\nfrom blog.models import BlogPost, ContactForm\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage\nfrom django.core.mail import EmailMessage\n\ndef index(request):\n blogpost_list = BlogPost.objects.all().order_by('-pub_date')\n paginiator = Paginator(blogpost_list, 5)\n try:\n\t page = int(request.GET.get('page', '1'))\n except ValueError:\n\t page = 1\n\n try:\n\t blogentries = paginiator.page(page)\n except (EmptyPage, InvalidPage):\n\t blogentries = paginator.page(paginoator.num_pages)\n\n return render_to_response('blog/index.html', {'blogentries': blogentries})\n\n\ndef about(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n email = EmailMessage(subject='New blog message', body='Email: ' + cd['email'] + ' Subject: ' + cd['subject'] + ' Message: ' + cd['message'], from_email=cd.get('email', 'noreply@example.com'), to=[EMAIL_HOST_USER])\n email.send()\n return render_to_response('blog/thanks.html')\n else:\n form = ContactForm(\n initial={'subject': ''}\n )\n return render_to_response('blog/about.html', {'form': form}, context_instance=RequestContext(request))\n\n\ndef archive(request):\n blogpost_list = BlogPost.objects.all().order_by('-pub_date')\n paginiator = Paginator(blogpost_list, 25)\n try:\n\t page = int(request.GET.get('page', '1'))\n except ValueError:\n\t page = 1\n\n try:\n\t blogentries = paginiator.page(page)\n except (EmptyPage, InvalidPage):\n\t blogentries = paginator.page(paginoator.num_pages)\n\n return render_to_response('blog/archive.html', {'blogentries': blogentries})\n \ndef projects(request):\n return render_to_response('blog/projects.html') \n\ndef details(request, year, month, day, slug_name):\n blogpost = get_object_or_404(BlogPost, slug=slug_name, pub_date__year=year, pub_date__month=month, pub_date__day=day)\n return render_to_response('blog/details.html', {'blogpost': blogpost})\n\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"116021951","text":"from tensorflow.compat.v1 import InteractiveSession\nfrom tensorflow.compat.v1 import ConfigProto\nimport numpy as np\nimport cv2\nfrom PIL import Image\nfrom tensorflow.python.saved_model import tag_constants\nfrom core.yolov4 import filter_boxes\nimport core.utils as utils\nfrom absl.flags import FLAGS\nfrom absl import app, flags, logging\nimport time\nimport tensorflow as tf\n\n\nweights = './checkpoints/yolov4-416'\nsize = 416\niou = 0.45\nscore = 0.25\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\nSTRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config()\ninput_size = size\n\nsaved_model_loaded = tf.saved_model.load(\n weights, tags=[tag_constants.SERVING])\ninfer = saved_model_loaded.signatures['serving_default']\n\nframe_id = 0\nfps = 0.0\n\n\ndef detectObjectV4(frame):\n frame_size = frame.shape[:2]\n image_data = cv2.resize(frame, (input_size, input_size))\n image_data = image_data / 255.\n image_data = image_data[np.newaxis, ...].astype(np.float32)\n prev_time = time.time()\n\n batch_data = tf.constant(image_data)\n pred_bbox = infer(batch_data)\n for key, value in pred_bbox.items():\n boxes = value[:, :, 0:4]\n pred_conf = value[:, :, 4:]\n\n t1 = time.time()\n\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\n scores=tf.reshape(\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\n max_output_size_per_class=50,\n max_total_size=50,\n iou_threshold=iou,\n score_threshold=score\n )\n pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(),\n valid_detections.numpy()]\n # image = utils.draw_bbox(frame, pred_bbox)\n # print(utils.get_image_info(frame, pred_bbox))\n return utils.get_image_info(frame, pred_bbox)\n","sub_path":"detectvideo.py","file_name":"detectvideo.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"305292758","text":"# -*- coding: utf-8 -*-\n\nname = 'python'\n\nversion = '3.6.6'\n\nauthor = ['python']\n\ntools = [\"python\"]\n\nrequires = [] \n\nvariants = [ ]\n\n\n\ndef commands():\n import os\n import sys\n\n applications_path = os.environ[\"APPLICATIONS_PATH\"]\n\n python_path = os.path.join(applications_path, \"python\", \"%s\"%version).replace(\"/\", os.sep)\n\n if sys.platform.startswith(\"win32\"):\n env.PATH.append(python_path)\n env.PATH.append(os.path.join(python_path, \"Scripts\").replace('/', os.sep))\n env.PATH.append(os.path.join(python_path, \"bin\").replace('/', os.sep))\n\n env.INCLUDE.append(os.path.join(python_path, \"include\").replace('/', os.sep))\n env.LIB.append(os.path.join(python_path, \"libs\").replace('/', os.sep))\n","sub_path":"Applications/python/3.6.6/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"213395584","text":"from unittest.mock import patch\n\nfrom django.urls import reverse\n\nfrom sentry.models.integrations.integration import Integration\nfrom sentry.models.integrations.repository_project_path_config import RepositoryProjectPathConfig\nfrom sentry.models.repository import Repository\nfrom sentry.testutils import APITestCase\nfrom sentry.testutils.helpers.features import apply_feature_flag_on_cls\nfrom sentry.testutils.silo import region_silo_test\n\n\n@region_silo_test(stable=True)\n@apply_feature_flag_on_cls(\"organizations:derive-code-mappings\")\nclass OrganizationDeriveCodeMappingsTest(APITestCase):\n def setUp(self):\n super().setUp()\n self.login_as(user=self.user)\n self.project = self.create_project(organization=self.organization)\n self.url = reverse(\n \"sentry-api-0-organization-derive-code-mappings\",\n args=[self.organization.slug],\n )\n\n self.repo = self.create_repo(\n name=\"getsentry/sentry\",\n provider=\"integrations:github\",\n integration_id=self.integration.id,\n project=self.project,\n )\n\n @patch(\"sentry.integrations.github.GitHubIntegration.get_trees_for_org\")\n def test_get_single_match(self, mock_get_trees_for_org):\n config_data = {\n \"stacktraceFilename\": \"stack/root/file.py\",\n }\n expected_matches = [\n {\n \"filename\": \"stack/root/file.py\",\n \"repo_name\": \"getsentry/codemap\",\n \"repo_branch\": \"master\",\n \"stacktrace_root\": \"/stack/root\",\n \"source_path\": \"/source/root/\",\n }\n ]\n with patch(\n \"sentry.integrations.utils.code_mapping.CodeMappingTreesHelper.list_file_matches\",\n return_value=expected_matches,\n ):\n response = self.client.get(self.url, data=config_data, format=\"json\")\n assert mock_get_trees_for_org.call_count == 1\n assert response.status_code == 200, response.content\n assert response.data == expected_matches\n\n @patch(\"sentry.integrations.github.GitHubIntegration.get_trees_for_org\")\n def test_get_multiple_matches(self, mock_get_trees_for_org):\n config_data = {\n \"stacktraceFilename\": \"stack/root/file.py\",\n }\n expected_matches = [\n {\n \"filename\": \"stack/root/file.py\",\n \"repo_name\": \"getsentry/codemap\",\n \"repo_branch\": \"master\",\n \"stacktrace_root\": \"/stack/root\",\n \"source_path\": \"/source/root/\",\n },\n {\n \"filename\": \"stack/root/file.py\",\n \"repo_name\": \"getsentry/codemap\",\n \"repo_branch\": \"master\",\n \"stacktrace_root\": \"/stack/root\",\n \"source_path\": \"/source/root/\",\n },\n ]\n with patch(\n \"sentry.integrations.utils.code_mapping.CodeMappingTreesHelper.list_file_matches\",\n return_value=expected_matches,\n ):\n response = self.client.get(self.url, data=config_data, format=\"json\")\n assert mock_get_trees_for_org.call_count == 1\n assert response.status_code == 200, response.content\n assert response.data == expected_matches\n\n def test_get_no_installation(self):\n config_data = {\n \"projectId\": self.project.id,\n \"stacktraceFilename\": \"stack/root/file.py\",\n }\n Integration.objects.all().delete()\n response = self.client.get(self.url, data=config_data, format=\"json\")\n assert response.status_code == 404, response.content\n\n def test_post_simple(self):\n config_data = {\n \"projectId\": self.project.id,\n \"stackRoot\": \"/stack/root\",\n \"sourceRoot\": \"/source/root\",\n \"defaultBranch\": \"master\",\n \"repoName\": \"getsentry/codemap\",\n }\n response = self.client.post(self.url, data=config_data, format=\"json\")\n repo = Repository.objects.get(name=\"getsentry/codemap\")\n assert response.status_code == 201, response.content\n assert response.data == {\n \"id\": str(response.data[\"id\"]),\n \"projectId\": str(self.project.id),\n \"projectSlug\": self.project.slug,\n \"repoId\": str(repo.id),\n \"repoName\": \"getsentry/codemap\",\n \"provider\": {\n \"aspects\": {},\n \"features\": [\"codeowners\", \"commits\", \"issue-basic\", \"stacktrace-link\"],\n \"name\": \"GitHub\",\n \"canDisable\": False,\n \"key\": \"github\",\n \"slug\": \"github\",\n \"canAdd\": True,\n },\n \"integrationId\": str(self.integration.id),\n \"stackRoot\": \"/stack/root\",\n \"sourceRoot\": \"/source/root\",\n \"defaultBranch\": \"master\",\n }\n\n def test_post_no_installation(self):\n config_data = {\n \"projectId\": self.project.id,\n \"stackRoot\": \"/stack/root\",\n \"sourceRoot\": \"/source/root\",\n \"defaultBranch\": \"master\",\n \"repoName\": \"name\",\n }\n Integration.objects.all().delete()\n response = self.client.post(self.url, data=config_data, format=\"json\")\n assert response.status_code == 404, response.content\n\n def test_post_existing_code_mapping(self):\n RepositoryProjectPathConfig.objects.create(\n project=self.project,\n stack_root=\"/stack/root\",\n source_root=\"/source/root/wrong\",\n default_branch=\"master\",\n repository=self.repo,\n organization_integration=self.organization_integration,\n )\n\n config_data = {\n \"projectId\": self.project.id,\n \"stackRoot\": \"/stack/root\",\n \"sourceRoot\": \"/source/root\",\n \"defaultBranch\": \"master\",\n \"repoName\": \"name\",\n }\n response = self.client.post(self.url, data=config_data, format=\"json\")\n assert response.status_code == 201, response.content\n\n new_code_mapping = RepositoryProjectPathConfig.objects.get(\n project=self.project, stack_root=\"/stack/root\"\n )\n assert new_code_mapping.source_root == \"/source/root\"\n","sub_path":"tests/sentry/api/endpoints/test_organization_derive_code_mappings.py","file_name":"test_organization_derive_code_mappings.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"137107179","text":"from operator import mul\r\n\r\n\r\nn=int(input())\r\nlst=[]\r\nlst2=[]\r\nlst4=[]\r\nfor i in range(n):\r\n lst1=[str(i) for i in input().split()][:50]\r\n #r=m.split()\r\n #ss=m.replace(':',',')\r\n lst.append(lst1[0])\r\n lst.append(lst1[1])\r\n #print(lst1) \r\n\r\n\r\n \r\n#print(lst)\r\nfor i in lst:\r\n my_time=i\r\n fact=(60,1)\r\n t1=sum(i*j for i,j in zip(map(int,my_time.split(':')),fact))\r\n lst2.append(t1)\r\n#print(lst2)\r\nrr=len(lst2)\r\neven_pos=lst2[2:rr:2]\r\nodd_pos=lst2[1:rr-1:2]\r\n#print(even_pos)\r\n#print(odd_pos)\r\nfor i,j in zip(even_pos,odd_pos):\r\n if(lst2[-1]!=1080):\r\n if(i!=j):\r\n diff=i-j\r\n diff1=1080-lst2[-1]\r\n lst4.append(diff)\r\nlst4.append(diff1)\r\n #time_slot_start=lst[i]\r\nmaximum_time=max(lst4)\r\nprint('Longest nap will last for',maximum_time,'minutes')\r\n#print(int(lst[0][1])-int(lst[1][0]))\r\n","sub_path":"longest nap.py","file_name":"longest nap.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"45747900","text":"def opcode_compile(opcode):\n operations = {1: '+', 2: '*', 7: '<', 8: '=='}\n relative_base = 0\n codes_1 = {0: 'opcode[opcode[i+1]]', 1: 'opcode[i+1]', 2: 'opcode[opcode[i+1] + relative_base]'}\n codes_2 = {0: 'opcode[opcode[i+2]]', 1: 'opcode[i+2]', 2: 'opcode[opcode[i+2] + relative_base]'}\n codes_3 = {0: 'opcode[opcode[i+3]]', 1: 'opcode[i+3]', 2: 'opcode[opcode[i+3] + relative_base]'}\n bools = {True: 1, False: 0}\n\n i = 0\n while i < len(opcode):\n instruct = opcode[i]\n mode_1 = 0\n mode_2 = 0\n mode_3 = 0\n if len(str(instruct)) > 3:\n mode_3 = (instruct // 10000) % 10\n\n if len(str(instruct)) > 2:\n mode_1 = (instruct // 100) % 10\n mode_2 = (instruct // 1000) % 10\n instruct %= 10\n\n if instruct == 1 or instruct == 2:\n exec(codes_3[mode_3] + '=' + codes_1[mode_1] + operations[instruct] + codes_2[mode_2])\n i += 4\n\n elif instruct == 3:\n exec(codes_1[mode_1] + '=' + input(\"Digit please: \"))\n i += 2\n\n elif instruct == 4:\n print(eval(codes_1[mode_1]))\n i += 2\n\n elif instruct == 5:\n if eval(codes_1[mode_1]):\n i = eval(codes_2[mode_2])\n else:\n i += 3\n\n elif instruct == 6:\n if not eval(codes_1[mode_1]):\n i = eval(codes_2[mode_2])\n else:\n i += 3\n\n elif instruct == 7 or instruct == 8:\n exec(codes_3[mode_3] + '=' + str(bools[eval(codes_1[mode_1] + operations[instruct] + codes_2[mode_2])]))\n i += 4\n\n elif instruct == 9:\n relative_base += int(eval(codes_1[mode_1]))\n i += 2\n\n elif instruct == 99:\n return\n\n\nif __name__ == '__main__':\n with open(\"input\", \"r\") as file:\n opcode = file.read()\n\n opcode = [int(n) for n in opcode.split(',')] + [0]*100000\n # Part 1 and 2 use the same code\n # For Part 1 run the intcode computer and input: 1\n # For Part 2 run the intcode computer and input: 2\n opcode_compile(opcode)\n","sub_path":"day9/part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"153492939","text":"from django.urls import include, path\nfrom adminSite import views\n\napp_name = 'adminSite'\n\nurlpatterns = [\n\n path('', views.homeAdmin, name='homeAdmin'),\n\n path('global/', include(('globalModels.urls', 'global'), namespace='global')),\n path('seasonal/', include(('seasonalModels.urls', 'seasonal'), namespace='seasonal')),\n path('match/', include(('matchModels.urls', 'match'), namespace='match')),\n\n]\n\n\n'''\n# url(r'^seasonChoice/', calendars_views.seasonChoice, name='seasonChoice'),\n# url(r'^addMatch/', views.addMatch, name='addMatch'),\nurl(r'^addPlayer/', views.addPlayer, name='addPlayer'),\nurl(r'^addReferee/', views.addReferee, name='addReferee'),\nurl(r'^addStadium/', views.addStadium, name='addStadium'),\n# url(r'^deleteMatchSeasonChoice/', views.deleteMatchSeasonChoice, name='deleteMatchSeasonChoice'),url(r'^deleteMatch/([0-9]{4})-([0-9]{4})', views.deleteMatch, name='deleteMatch'),\n# url(r'^deleteMatchSuccess/([0-9]{4})-([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/$',\n# views.deleteMatchSuccess, name='deleteMatchSuccess'),\n# url(r'^editMatchSeasonChoice/', views.editMatchSeasonChoice, name='editMatchSeasonChoice'),\n# url(r'^editMatch/([0-9]{4})-([0-9]{4})', views.editMatch, name='editMatch'),\n# url(r'^editSpecificMatch/([0-9]{4})-([0-9]{4})/([0-9]{1,2})/([0-9]{1,2})/$', views.editSpecificMatch,\n# name='editSpecificMatch'),\n# url(r'^viewMatch/', views.viewMatch, name='viewMatch'),\nurl(r'^viewClassificationSeasonChoice/', views.viewClassificationSeasonChoice, name='viewClassificationSeasonChoice'),\nurl(r'^viewClassification/', views.viewClassification, name='viewClassification'),\nurl(r'^viewPlayersSeasonChoice/', views.viewPlayersSeasonChoice, name='viewPlayersSeasonChoice'),\nurl(r'^viewPlayers/([0-9]{4})-([0-9]{4})/', views.viewPlayers, name='viewPlayers'),\nurl(r'^viewRefereeSeasonChoice/', views.viewRefereeSeasonChoice, name='viewRefereeSeasonChoice'),\nurl(r'^viewReferees/([0-9]{4})-([0-9]{4})/', views.viewReferees, name='viewReferees'),\nurl(r'^viewStadiums/', views.viewStadiums, name='viewStadiums'),\n'''\n","sub_path":"Betcomm/adminSite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"44366083","text":"import pip\ndef install(package):\n pip.main(['install', package])\n\nimport urllib.request\nimport requests\nimport sys\nimport cgitb\nimport urllib3\nimport zipfile\nimport datetime\nimport io\nimport os \nimport time\nimport glob\nimport pandas as pd\nimport csv\nimport json\n\nfrom bs4 import BeautifulSoup as bsoup\nimport sklearn\nfrom sklearn import datasets\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.datasets import SupervisedDataSet\nfrom pybrain.supervised.trainers import BackpropTrainer\n\n\n# Create logfile.\nlogfile = open(\"Freddie-Mac-logs.txt\", \"a\")\ndef log_entry(s):\n #print('Date now: %s' % datetime.datetime.now())\n\n timestamp = '[%s] : ' % datetime.datetime.now()\n log_line = timestamp + s + '\\n'\n logfile.write(log_line)\n logfile.flush()\n\ndef getYear():\n #Taking data from user\n print(\"Please enter a year and quarter in config file(Example: Q12005)\")\n with open('config.json') as json_file: \n json_data = json.load(json_file)\n \n year_full=json_data[\"args\"][0]\n try:\n year = int(year_full[2:])\n #print('year ',year)\n quarter = year_full[:2] \n #print('quarter ',quarter)\n\n next_quarter = int(quarter[1:]) + 1\n next_year = year\n if next_quarter > 4:\n next_quarter = 1\n next_year = year+1\n next_year_full = \"Q\"+ str(next_quarter) + str(next_year)\n\n print(next_year_full)\n if(int(year) < 1999 or int(year) > 2016):\n print(\"Year can have only numeric values between 1999 and 2016\")\n log_entry(\"Wrong Year to process : Year out of range\")\n else:\n year = year_full\n next_year = next_year_full\n print(year,next_year)\n return year,next_year\n except Exception as e: \n print(\"Year should be in format QNYYYY\")\n log_entry(\"Wrong Year to process : Invalid format found in year\")\n\ndef get_login():\n url =\"https://freddiemac.embs.com/FLoan/secure/auth.php\"\n session = requests.session()\n \n with open('config.json') as json_file: \n json_data = json.load(json_file)\n\n session_data = {'username':json_data[\"args\"][1],\n 'password':json_data[\"args\"][2]}\n\n r = session.post(url,data = session_data)\n #print(r.cookies)\n\n response = session.get(\"https://freddiemac.embs.com/FLoan/Data/download.php\")\n #print(response.text)\n\n if 'Terms and Conditions' in response.text:\n session_data = {'username':json_data[\"args\"][1] ,\n 'password':json_data[\"args\"][2],\n 'accept':'Yes',\n 'action': 'acceptTandC',\n 'acceptSubmit':'Continue',\n 'accept.checked':'true'}\n\n r = session.post('https://freddiemac.embs.com/FLoan/Data/download.php',data = session_data)\n #print(r.text)\n\n response = session.get(\"https://freddiemac.embs.com/FLoan/Data/download.php\")\n #print(response.content)\n return response,session\n\ndef get_file_list():\n if not os.path.exists(directory):\n os.makedirs(directory)\n #print(os.path.isdir(directory))\n historical_file_list = glob.glob(directory+'//historical_data*.txt')\n return historical_file_list\n\ndef getData(current_year,next_year,response,session):\n url1 = 'https://freddiemac.embs.com/FLoan/Data/'\n soup= bsoup(response.text,'lxml')\n #print(url1)\n\n href = soup.findAll ('a',limit=None)\n\n fileList = get_file_list()\n print(\"File list type=\",type(fileList),sep=\" \")\n for a in href:\n zip_file_url = url1+a['href']\n #print(zip_file_url)\n #print (os.getcwd())\n hist_data = directory + \"\\\\\\\\\" + a.text[:24]+'txt'\n hist_data_time = directory + \"\\\\\\\\\" + a.text[:17] + 'time_' + a.text[17:24] + 'txt'\n if current_year in zip_file_url:\n count = 0\n #if any(hist_data in s for s in fileList):\n #if hist_data in fileList:\n if any(current_year in s for s in fileList):\n count+= 1 \n if any(current_year in s for s in fileList):\n count+= 1\n if count != 2:\n print(zip_file_url)\n zfile = session.get(zip_file_url)\n #time.sleep(5)\n print(zfile)\n z = zipfile.ZipFile(io.BytesIO(zfile.content))\n z.extractall(directory)\n currData = hist_data_time\n if next_year in zip_file_url:\n print('yes')\n count = 0\n if any(next_year in s for s in fileList):\n count = count + 1 \n if any(next_year in s for s in fileList):\n count = count + 1\n if count != 2:\n print(zip_file_url)\n zfile = session.get(zip_file_url)\n #time.sleep(5)\n print(zfile)\n z = zipfile.ZipFile(io.BytesIO(zfile.content))\n z.extractall(directory)\n nexData = hist_data_time \n return currData,nexData\n\ndef df_strip(df):\n df = df.copy()\n for c in df.columns:\n if df[c].dtype == np.object:\n df[c] = pd.core.strings.str_strip(df[c])\n df = df.rename(columns={c:c.strip()})\n return df\n\ndef missing_values_table(df): \n mis_val = df.isnull().sum()\n mis_val_percent = 100 * df.isnull().sum()/len(df)\n mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)\n mis_val_table_ren_columns = mis_val_table.rename(\n columns = {0 : 'Missing Values', 1 : '% of Total Values'})\n return mis_val_table_ren_columns \n\ndef add_header(data):\n header = ['LOAN_SEQUENCE_NUMBER','MONTHLY_REPORTING_PERIOD','CURRENT_ACTUAL_UPB','CURRENT_LOAN_DELINQUENCY_STATUS',\n 'LOAN_AGE','REMAINING_MONTHS_TO_LEGAL_MATURITY','REPURCHASE_FLAG','MODIFICATION_FLAG',\n 'ZERO_BALANCE_CODE','ZERO_BALANCE_EFFECTIVE_DATE','CURRENT_INTEREST_RATE','CURRENT_DEFERRED_UPB',\n 'DUE_DATE_OF_LAST_PAID_INSTALLMENT','MI_RECOVERIES','NET_SALES_PROCEEDS','NON_MI_RECOVERIES','EXPENSES','Legal_Costs',\n 'Maintenance_and_Preservation_Costs','Taxes_and_Insurance','Miscellaneous_Expenses','Actual_Loss_Calculation',\n 'Modification_Cost']\n curr_data = pd.DataFrame(data)\n curr_data.columns = header\n return curr_data\n\ndef ConvertToNumeric1(curr_data): \n #CreditScore - Mean\n \n #print(curr_data[curr_data['CURRENT_LOAN_DELINQUENCY_STATUS'] == 'R'] = 1)\n curr_data = curr_data[pd.notnull(curr_data['LOAN_SEQUENCE_NUMBER'])]\n #curr_data['LOAN_SEQUENCE_NUMBER'] = pd.to_numeric(curr_data['LOAN_SEQUENCE_NUMBER'])\n #curr_data['LOAN_SEQUENCE_NUMBER'] = curr_data[curr_data['LOAN_SEQUENCE_NUMBER'].isnull()== False]['LOAN_SEQUENCE_NUMBER']\n #curr_data['LOAN_SEQUENCE_NUMBER'] = curr_data['LOAN_SEQUENCE_NUMBER'].fillna((curr_data['LOAN_SEQUENCE_NUMBER'].mean()))\n\n #MONTHLY_REPORTING_PERIOD\n curr_data['MONTHLY_REPORTING_PERIOD'] = pd.to_numeric(curr_data['MONTHLY_REPORTING_PERIOD'])\n curr_data['MONTHLY_REPORTING_PERIOD'] = curr_data['MONTHLY_REPORTING_PERIOD'].fillna((curr_data['MONTHLY_REPORTING_PERIOD'].mode()))\n\n #CURRENT_ACTUAL_UPB - Mode\n curr_data['CURRENT_ACTUAL_UPB'] = pd.to_numeric(curr_data['CURRENT_ACTUAL_UPB'])\n curr_data['CURRENT_ACTUAL_UPB'] = curr_data['CURRENT_ACTUAL_UPB'].fillna((curr_data['CURRENT_ACTUAL_UPB'].mean()))\n\n #CURRENT_LOAN_DELINQUENCY_STATUS - Mean\n curr_data[curr_data['CURRENT_LOAN_DELINQUENCY_STATUS'] == 'R'] = 1\n curr_data['CURRENT_LOAN_DELINQUENCY_STATUS'] = pd.to_numeric(curr_data['CURRENT_LOAN_DELINQUENCY_STATUS'])\n curr_data['CURRENT_LOAN_DELINQUENCY_STATUS'] = curr_data['CURRENT_LOAN_DELINQUENCY_STATUS'].fillna((curr_data['CURRENT_LOAN_DELINQUENCY_STATUS'].mean()))\n\n #LOAN_AGE - Mode\n curr_data['LOAN_AGE'] = pd.to_numeric(curr_data['LOAN_AGE'])\n curr_data['LOAN_AGE'] = curr_data['LOAN_AGE'].fillna((curr_data['LOAN_AGE'].mode()))\n\n #REMAINING_MONTHS_TO_LEGAL_MATURITY\n curr_data['REMAINING_MONTHS_TO_LEGAL_MATURITY'] = pd.to_numeric(curr_data['REMAINING_MONTHS_TO_LEGAL_MATURITY'])\n curr_data['REMAINING_MONTHS_TO_LEGAL_MATURITY'] = curr_data['REMAINING_MONTHS_TO_LEGAL_MATURITY'].fillna((curr_data['REMAINING_MONTHS_TO_LEGAL_MATURITY'].mean()))\n\n #REPURCHASE_FLAG\n #curr_data['REPURCHASE_FLAG'] = pd.to_numeric(curr_data['REPURCHASE_FLAG'])\n curr_data['REPURCHASE_FLAG'] = curr_data['REPURCHASE_FLAG'].fillna((curr_data['REPURCHASE_FLAG'].mode()))\n\n #MODIFICATION_FLAG\n #curr_data['MODIFICATION_FLAG'] = pd.to_numeric(curr_data['MODIFICATION_FLAG'])\n curr_data['MODIFICATION_FLAG'] = curr_data['MODIFICATION_FLAG'].fillna((curr_data['MODIFICATION_FLAG'].mode()))\n\n #ZERO_BALANCE_CODE\n curr_data['ZERO_BALANCE_CODE'] = pd.to_numeric(curr_data['ZERO_BALANCE_CODE'])\n curr_data['ZERO_BALANCE_CODE'] = curr_data['ZERO_BALANCE_CODE'].fillna((curr_data['ZERO_BALANCE_CODE'].mode()))\n\n #ZERO_BALANCE_EFFECTIVE_DATE\n curr_data['ZERO_BALANCE_EFFECTIVE_DATE'] = pd.to_numeric(curr_data['ZERO_BALANCE_EFFECTIVE_DATE'])\n curr_data['ZERO_BALANCE_EFFECTIVE_DATE'] = curr_data['ZERO_BALANCE_EFFECTIVE_DATE'].fillna((curr_data['ZERO_BALANCE_EFFECTIVE_DATE'].mode()))\n\n #CURRENT_INTEREST_RATE\n curr_data['CURRENT_INTEREST_RATE'] = pd.to_numeric(curr_data['CURRENT_INTEREST_RATE'])\n curr_data['CURRENT_INTEREST_RATE'] = curr_data['CURRENT_INTEREST_RATE'].fillna((curr_data['CURRENT_INTEREST_RATE'].mode()))\n\n #CURRENT_DEFERRED_UPB\n #curr_data['CURRENT_DEFERRED_UPB'] = pd.to_numeric(curr_data['CURRENT_DEFERRED_UPB'])\n curr_data['CURRENT_DEFERRED_UPB'] = curr_data['CURRENT_DEFERRED_UPB'].fillna((curr_data['CURRENT_DEFERRED_UPB'].mode()))\n\n \n return curr_data\n\n\ndef ConvertToNumeric2(curr_data):\n number = LabelEncoder()\n #print(curr_data['ServicerName'])\n\n # N=0,Y=1 \n curr_data['REPURCHASE_FLAG'] = number.fit_transform(curr_data['REPURCHASE_FLAG'].astype('str'))\n\n # N=0,Y=1\n curr_data['MODIFICATION_FLAG'] = number.fit_transform(curr_data['MODIFICATION_FLAG'].astype('str'))\n \n \n curr_data['LOAN_ORIGNATION_QUARTER'] = number.fit_transform(curr_data['LOAN_ORIGNATION_QUARTER'].astype('str'))\n\n #print(curr_data['ServicerName'])\n return curr_data\n\n\ndef seperateData(curr_data):\n curr_data['MONTHLY_REPORTING_YEAR'] = curr_data['MONTHLY_REPORTING_PERIOD'].apply(lambda x: str(x)[:4])\n curr_data['MONTHLY_REPORTING_YEAR'] = pd.to_numeric(curr_data['MONTHLY_REPORTING_YEAR'])\n curr_data['MONTHLY_REPORTING_MONTH'] = curr_data['MONTHLY_REPORTING_PERIOD'].apply(lambda x: str(x)[4:])\n curr_data['MONTHLY_REPORTING_MONTH'] = pd.to_numeric(curr_data['MONTHLY_REPORTING_MONTH'])\n curr_data['MONTHLY_REPORTING_MONTH'] = curr_data['MONTHLY_REPORTING_MONTH'].fillna((curr_data['MONTHLY_REPORTING_MONTH'].mode()))\n\n curr_data['LOAN_ORIGNATION_YEAR'] = curr_data['LOAN_SEQUENCE_NUMBER'].apply(lambda x: str(x)[2:4])\n curr_data['LOAN_ORIGNATION_YEAR'] = pd.to_numeric(curr_data['LOAN_ORIGNATION_YEAR'])\n curr_data['LOAN_ORIGNATION_YEAR'] = curr_data['LOAN_ORIGNATION_YEAR'].fillna((curr_data['LOAN_ORIGNATION_YEAR'].mode()))\n\n curr_data['LOAN_ORIGNATION_QUARTER'] = curr_data['LOAN_SEQUENCE_NUMBER'].apply(lambda x: str(x)[4:6])\n curr_data['LOAN_ORIGNATION_QUARTER'] = curr_data['LOAN_ORIGNATION_QUARTER'].fillna((curr_data['LOAN_ORIGNATION_QUARTER'].mode()))\n\n #curr_data['LOAN_ORIGNATION_QUARTER'] = pd.to_numeric(curr_data['LOAN_ORIGNATION_QUARTER'])\n \n return curr_data \n\n\ndef selectColumns(curr_data):\n res = missing_values_table(curr_data)\n col_to_consider = res.index[res['% of Total Values']<60]\n return col_to_consider.values.tolist()\n\ndirectory = 'all_data'\nyear,next_year = getYear()\n\nresponse, session = get_login()\n#hist_data,hist_data_time = getData(year,next_year,response,session)\n#getData(year,next_year,response,session)\ndataFile,dataFile1 = getData(year,next_year,response,session)\ndata = pd.read_csv(dataFile,delimiter='|', header=None,low_memory=True)\ndata1 = pd.read_csv(dataFile1,delimiter='|', header=None,low_memory=True)\n\n\n#data = pd.read_csv('historical_data1_time_Q120051.txt',delimiter='\\t', header=None,low_memory=True)\n#data = np.loadtxt(fname = 'all_data\\\\historical_data1_Q12013.txt', delimiter = '|')\n#data\n\n#data1 = data\n#data1 = pd.read_csv('historical_data1_time_Q120051.txt',delimiter='\\t', header=None,low_memory=True)\n\ncurr_data = add_header(data)\ncurr_data = df_strip(data)\nmissing_values_table(curr_data)\ncurr_data = df_strip(curr_data)\ncurr_data = ConvertToNumeric1(curr_data)\ncurr_data = seperateData(curr_data)\ncurr_data = ConvertToNumeric2(curr_data)\n\n#print(curr_data[curr_data['NumberOfUnits'].isnull()== True]['NumberOfUnits'])\ncol_to_consider=selectColumns(curr_data)\n#print(col_to_consider)\n\n\nnext_data = add_header(data1)\nnext_data = df_strip(data1)\nmissing_values_table(next_data)\nnext_data = df_strip(next_data)\nnext_data = ConvertToNumeric1(next_data)\nnext_data = seperateData(next_data)\nnext_data = ConvertToNumeric2(next_data)\n\n#print(curr_data[curr_data['NumberOfUnits'].isnull()== True]['NumberOfUnits'])\ncol_to_consider=selectColumns(next_data)\n#print(col_to_consider)\n\n#print(curr_data[curr_data['CURRENT_LOAN_DELINQUENCY_STATUS'] == 'R'])\n\ncurr_data[col_to_consider].dtypes\n\ncol_to_consider.remove('LOAN_SEQUENCE_NUMBER')\ncol_to_consider.remove('CURRENT_LOAN_DELINQUENCY_STATUS')\ncol_to_consider.remove('MONTHLY_REPORTING_PERIOD')\ncol_to_consider.remove('MONTHLY_REPORTING_MONTH')\ncol_to_consider.remove('LOAN_ORIGNATION_YEAR')\ncol_to_consider.remove('LOAN_ORIGNATION_QUARTER')\n\n\ncurr_data['YN_CURRENT_LOAN_DELINQUENCY_STATUS'] = (curr_data.CURRENT_LOAN_DELINQUENCY_STATUS > 0).astype(int)\n#next_data['YN_CURRENT_LOAN_DELINQUENCY_STATUS'] = (next_data.CURRENT_LOAN_DELINQUENCY_STATUS > 0).astype(int)\n#print(curr_data['YN_CURRENT_LOAN_DELINQUENCY_STATUS'] )\n#curr_data = curr_data.dropna() \nprint(curr_data[curr_data['YN_CURRENT_LOAN_DELINQUENCY_STATUS'] == 0])\n#print(curr_data[col_to_consider])\n\nX = curr_data[col_to_consider]\ny = curr_data['YN_CURRENT_LOAN_DELINQUENCY_STATUS']\ny = np.ravel(y)\n\nfrom sklearn.linear_model import LogisticRegression\nmodel = LogisticRegression()\nmodel = model.fit(X, y)\n\nX_test = next_data[col_to_consider]\n\n#Run the model on the test set\ny_pred = model.predict(X_test)\n\nfrom sklearn.metrics import confusion_matrix\nconfusion_matrix(y, y_pred)\n\nmodel.score(X, y)\n\nmodel.coef_\n\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import logit, probit, poisson, ols\nfrom sklearn.metrics import roc_curve, auc\nlogit = sm.Logit(y, X.astype(float))\nmodel = logit.fit()\nprint(model.summary())\n\n# Add prediction to dataframe\ncurr_data['pred'] = model.predict(X.astype(float))\n\nfpr, tpr, thresholds =roc_curve(y, curr_data['pred'])\nroc_auc = auc(fpr, tpr)\nprint(fpr, tpr, thresholds)\nprint(\"Area under the ROC curve : %f\" % roc_auc)\n\nimport matplotlib.pyplot as plt\nplt.figure()\nplt.plot(fpr, tpr, label='ROC curve')\nplt.plot([0, 1], [0, 1], 'k--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('1-Specificity')\nplt.ylabel('Sensitivity')\nplt.title('ROC curve')\nplt.legend(loc=\"lower right\")\nplt.show()\n\nnet = buildNetwork(8,5, 1)\n\nds = SupervisedDataSet(8,1)\n\nfor index, row in curr_data.iterrows():\n input_data = row[col_to_consider]\n output_data = row['OriginalInterestRate']\n ds.addSample(input_data,output_data)\n\ntrainer = BackpropTrainer(net, ds)\n\ntrainer.train()\n\ntrainer.trainUntilConvergence()\n\nfor index, row in next_data.iterrows():\n input_data = row[cols_to_keep]\n output_data = row['OriginalInterestRate']\n #ds.addSample(input_data,output_data)\n print(net.activate(input_data))\n\nfrom sklearn import svm\n#SVM Model\nclf = svm.SVC(kernel='linear')\nclf.fit(X.astype(int), y.astype(int))\n\nTrianing\ny1= curr_data['YN_CURRENT_LOAN_DELINQUENCY_STATUS']\n\npred = clf.predict(X1)\npred\n\n#Performance on test dataset\npd.crosstab(pred,y1[1],rownames=['pred'], colnames=['y1'])","sub_path":"Midterm/adsmidtermclassification.py","file_name":"adsmidtermclassification.py","file_ext":"py","file_size_in_byte":16062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"231878256","text":"from bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nimport re\nimport pandas as pd\nimport numpy as np\ncat_date=[]\nwebsite = []\nprice=[]\ndisease=[]\nstage=[]\nticker=[]\ndescriptions=[]\nurl= \"https://www.biopharmcatalyst.com/calendars/fda-calendar\"\nresponse = Request(url, headers = {'User-Agent': 'Mozilla/5.0'})\nweb_page = urlopen(response).read()\npage_soup = BeautifulSoup(web_page, \"html.parser\")\nfor date in page_soup.find_all(class_='filter-table__td js-td js-td--ticker js-td--stages js-td--indications js-td--fda js-td--portfolio js-catalyst-searchable'):\n cat_date2= date.get(\"data-catalyst-searchable\")\n cat_date.append(cat_date2)\nfor href in page_soup.find_all(class_=\"filter-table__td js-td js-td--ticker js-td--stages js-td--indications js-td--fda js-td--portfolio js-catalyst-searchable\"):\n value2= href.find_all('a',href=True)\n #attrs={'href':re.compile(\"^http://\")})\n value= re.search(\"\\\"(.*?)\\\"\",str(value2))\n value=value.group()\n website.append(value)\nfor prices in page_soup.find_all(class_=\"filter-table__td js-td js-td--price text-right\"):\n grab_price= prices.get('data-value')\n price.append(float(grab_price))\nfor diseases in page_soup.find_all(class_=\"filter-table__td js-td js-td--ticker js-td--stages js-td--indications js-td--fda js-td--portfolio js-catalyst-searchable\"):\n grab_disease = diseases.get('data-indications')\n disease.append(grab_disease)\nfor stages in page_soup.find_all(class_=\"filter-table__td js-td js-td--stage\"):\n grab_stage=stages.get('data-value')\n stage.append(grab_stage)\nfor tickers in website:\n result= re.search(\"([A-Z])\\w+\",tickers)\n result2=result.group()\n ticker.append(result2)\nstage_array=np.asarray(stage)\nraw_data={'Ticker':ticker,\n 'Price':price,\n 'Stage':stage_array,\n 'Disease':disease,\n 'Website':website,\n 'Catalyst Date':cat_date}\ndf=pd.DataFrame(data=raw_data,index=None)\ndef make_clickable(val):\n return '{}'.format(val,val)\nfor description in page_soup.find_all(class_=\"catalyst-note\"):\n grab_descriptions= re.search(\"\\>(.*?)(?=\\<)\",str(description))\n clean_descriptions= grab_descriptions.group()\n descriptions.append(clean_descriptions)\n #description.append(description1)\n#print(soup.find_all(class_=\"filter-table__text-wrapper\"))\n# for description in soup.find_all(class_=\"catalyst-note\"):\n# grab_description= description.\nprint(df)\n","sub_path":"biopharma_scrape.py","file_name":"biopharma_scrape.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"64320126","text":"\nclass solution(object):\n\n def totalparents(cls,input1,input2,input3):\n\n\n\n\n\n map2={}\n for i in range(input1):\n if input3[i] in map2:\n map2[input3[i]]+=1\n\n else:\n map2[input3[i]]=1\n\n\n\n sum=0\n if input2==0:\n sum=sum+map2[-1]\n for k,v in map2.items():\n\n if k!=-1 and v>=input2:\n sum=sum+1\n\n\n\n\n return sum\n\n\n\n\n\n\ninput1=int(input())\ninput2=int(input())\ninput3=[int(items) for items in input().split()]\n\ns=solution()\n\nprint(s.totalparents(input1,input2,input3))\n\n","sub_path":"program1/h1.py","file_name":"h1.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"475971969","text":"# We build a team of bots, one basic defender and one basic attacker\nTEAM_NAME = 'one and one'\n\nfrom demo05_basic_defender import move as move_defender\nfrom demo04_basic_attacker import move as move_attacker\nfrom talk_util import say_underlined\n\ndef move(bot, state):\n # Keep two \"substates\" — one for each bot\n if state == {}:\n state['attacker'] = {}\n state['defender'] = {}\n\n if bot.turn == 0:\n next_pos = move_defender(bot, state['defender'])\n if bot.round < 10:\n say_underlined(bot, 'defender')\n else:\n next_pos = move_attacker(bot, state['attacker'])\n if bot.round < 10:\n say_underlined(bot, 'attacker')\n\n return next_pos\n","sub_path":"demo06_one_and_one.py","file_name":"demo06_one_and_one.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"549235714","text":"# Behavioral Cloning Project\nimport csv\nimport cv2\nimport numpy as np \nimport keras\nimport os\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom random import shuffle\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D\nfrom keras.layers import Convolution2D\nfrom keras.layers import MaxPooling2D\n\nlines = []\nwith open('./selfcollected_data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n\nimages = []\nmeasurements = [] \naugmented_images, augmented_measurements = [],[]\n\ntrain_samples, validation_samples = train_test_split(lines, test_size=0.2)\n\ndef generator(lines, batch_size=32):\n num_samples = len(lines)\n while 1: # Loop forever so the generator never terminates\n shuffle(lines)\n for offset in range(0, num_samples, batch_size):\n batch_samples = lines[offset:offset+batch_size]\n\n images = []\n measurements = []\n\n for batch_sample in batch_samples:\n for i in range(3):\n if i==0: correction=0 #center\n elif i==1: correction=0.2 #left\n else: correction=-0.2 #right\n source_path = batch_sample[i]\n filename = source_path.split('/')[-1]\n current_path = './selfcollected_data/IMG/' + filename\n image = cv2.imread(current_path)\n images.append(image)\n measurement = float(batch_sample[3])+correction #cast it as a float\n measurements.append(measurement)\n images.append(cv2.flip(image,1))\n measurements.append(measurement*-1.0)\n\n X_train = np.array(images)\n y_train = np.array(measurements)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)\n\nmodel = Sequential()\n# Normalization (tried with -0.5 but worse results)\nmodel.add(Lambda(lambda x: (x/255.0), input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25),(0,0))))\n# Nvidia architecture - Start\nmodel.add(Convolution2D(24,5,5,subsample=(2,2),activation=\"relu\"))\nmodel.add(Convolution2D(36,5,5,subsample=(2,2),activation=\"relu\"))\nmodel.add(Convolution2D(48,5,5,subsample=(2,2),activation=\"relu\"))\nmodel.add(Convolution2D(64,3, 3,activation=\"relu\"))\nmodel.add(Convolution2D(64,3, 3,activation=\"relu\"))\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dense(50))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n\n# Using mse instead of the cross_entropy funcition because this is a \n# regression network instead of a classification network\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator, samples_per_epoch=len(train_samples), validation_data=validation_generator, \n nb_val_samples=len(validation_samples), nb_epoch=3)\nmodel.save('model.h5')\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"409118753","text":"import os\nimport subprocess\nfrom setuptools import setup, find_packages\nimport sys\nfrom setuptools.command.install import install\n\nsys.path.insert(0, '.')\nimport spruned\n\n\ndef read_pip_requirements():\n reqs = []\n with open(os.path.join(os.path.dirname(__file__), \"requirements.txt\"), 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip()\n if '://' not in line:\n reqs.append(line)\n return reqs\n\n\ndef read_dependency_links():\n links = []\n with open(os.path.join(os.path.dirname(__file__), \"requirements.txt\"), 'r') as f:\n lines = f.readlines()\n for l in lines:\n line = l.strip()\n if '://' in line:\n links.append(line)\n return links\n\n\ndef read_file(name):\n with open(name, 'r', encoding='utf-8') as f:\n file = f.read()\n return file\n\n\nclass InstallGithubDepedenciesCommand(install):\n description = 'install github deps'\n\n def run(self):\n def install_github_dependencies():\n\n for dep in read_dependency_links():\n subprocess.call(['pip', 'install', '-e', dep])\n\n install_github_dependencies()\n install.run(self)\n\n\ngithub_links = '\\n- '.join(read_dependency_links())\ngithub_libraries = '\\n- '.join([x.split('#egg=')[1] for x in read_dependency_links()])\nprint('\\nWarning! Know what you do!')\nprint('\\nSpruned installer will also install custom dependencies from the following github links:\\n\\n-', github_links)\nprint('\\nIs warmly advised to run spruned into a virtual environment, '\n 'especially if you have installed the following libraries:\\n\\n-', github_libraries)\n# Ouch, pip captured this. Find a way to inform the user on what's going on.\n\nsetup(\n cmdclass={\n 'install': InstallGithubDepedenciesCommand,\n },\n name='spruned',\n version=spruned.__version__,\n url='https://github.com/gdassori/spruned/',\n license='MIT',\n author='Guido Dassori',\n author_email='guido.dassori@gmail.com',\n python_requires='>=3.5.2',\n description='Bitcoin Lightweight Pseudonode',\n long_description=read_file('README.rst'),\n install_requires=read_pip_requirements(),\n packages=find_packages(exclude=['htmlcov']),\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n entry_points={\n 'console_scripts': [\n 'spruned = spruned.app:main'\n ]\n },\n include_package_data=True\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"148054627","text":"from graphvec import *\nimport argparse\nimport pickle\nimport os\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str, default='reuters')\nparser.add_argument('--save_name', type=str, default='save_name')\n\nparser.add_argument('--friendly_print', action='store_true')\nparser.add_argument('--print_freq', type=int, default=10)\nparser.add_argument('--backup_freq', type=int, default=10)\n\nparser.add_argument('--epochs', type=int, default=1000)\nparser.add_argument('--learning_rate', type=float, default=1e-2)\nparser.add_argument('--dropout', type=float, default=0.0)\nparser.add_argument('--pos_sample_size', type=int, default=128)\nparser.add_argument('--embedding_size_w', type=int, default=128)\nparser.add_argument('--embedding_size_d', type=int, default=2)\nparser.add_argument('--n_neg_samples', type=int, default=64)\nparser.add_argument('--window_size', type=int, default=8)\nparser.add_argument('--window_batch_size', type=int, default=128)\nparser.add_argument('--h_layers', nargs='+', type=int, default=[32, 8])\n\nparser.add_argument('--load_model', type=str)\nparser.add_argument('--train', action='store_true')\nparser.add_argument('--eval', action='store_true')\nparser.add_argument('--bugfix', action='store_true')\nargs = parser.parse_args()\n\ndef load_dataset(dataset):\n ''' Return tokenized dataset, detokenizer and tokenizer '''\n if dataset == 'reuters':\n print('Loading REUTERS dataset')\n word2id = np.load('../data/reuters/reuters_word2id.npy').item(0)\n id2word = np.load('../data/reuters/reuters_id2word.npy').item(0)\n tokenized = np.load('../data/reuters/reuters_tokenized.npy')\n elif dataset == 'alternative':\n print('Loading ALTERNATIVE dataset')\n word2id = np.load('../data/20_newsgroup/20newsgroup_word2id.npy').item(0)\n id2word = np.load('../data/20_newsgroup/20newsgroup_id2word.npy').item(0)\n tokenized = np.load('../data/20_newsgroup/20newsgroup_tokenized.npy')\n else:\n print('Unknown dataset: ', dataset)\n\n return tokenized, word2id, id2word\n\ndef load_dataset(dataset):\n ''' Return tokenized dataset, detokenizer and tokenizer '''\n if dataset == 'reuters':\n print('Loading REUTERS dataset')\n word2id = np.load('../data/reuters/reuters_word2id.npy').item(0)\n id2word = np.load('../data/reuters/reuters_id2word.npy').item(0)\n tokenized = np.load('../data/reuters/reuters_tokenized.npy')\n with open('../data/reuters/reuters_triplets.p', 'rb') as f:\n triplets = pickle.load(f)\n elif dataset == 'alternative':\n print('Loading ALTERNATIVE dataset')\n word2id = np.load('../data/20_newsgroup/20newsgroup_word2id.npy').item(0)\n id2word = np.load('../data/20_newsgroup/20newsgroup_id2word.npy').item(0)\n tokenized = np.load('../data/20_newsgroup/20newsgroup_tokenized.npy')\n with open('../data/20_newsgroup/20newsgroup_triplets.p', 'rb') as f:\n triplets = pickle.load(f)\n else:\n print('Unknown dataset: ', dataset)\n\n return tokenized, word2id, id2word, triplets\n\n\nif __name__ == \"__main__\":\n # Load dataset\n tokenized, word2id, id2word, triplets = load_dataset(args.dataset)\n\n # bugfix\n if args.bugfix:\n word2id[''.format(self.shortname(), self.callback.__qualname__, self.callback.__code__.co_firstlineno, self.interval)\n\n\nclass LossyRepeatingAlarmElement(RepeatingAlarmElement):\n\n @staticmethod\n def timeline_now(t):\n return t.future\n\n\nclass CrossZoneAlarmElement:\n\n def cross_zone(self):\n return True\n\n\nclass AlarmElementCrossZone(AlarmElement, CrossZoneAlarmElement):\n pass\n\n\nclass RepeatingAlarmElementCrossZone(RepeatingAlarmElement, CrossZoneAlarmElement):\n pass\n\n\nclass LossyRepeatingAlarmElementCrossZone(LossyRepeatingAlarmElement, CrossZoneAlarmElement):\n pass\n\n\ndef _register_auto_cleanup(alarm_handle):\n element_handle = alarm_handle._element_handle\n\n def on_alarm_handle_collected(_):\n ehid = id(element_handle)\n if ehid in _ALARM_ELEMENT_HANDLES:\n del _ALARM_ELEMENT_HANDLES[ehid]\n timeline = element_handle.timeline\n timeline.hard_stop(element_handle)\n\n _ALARM_ELEMENT_HANDLES[id(element_handle)] = weakref.ref(alarm_handle, on_alarm_handle_collected)\n\n\ndef _unregister_auto_cleanup(element_handle, teardown_handle):\n ehid = id(element_handle)\n if ehid in _ALARM_ELEMENT_HANDLES:\n if teardown_handle:\n wr = _ALARM_ELEMENT_HANDLES[ehid]\n handle = wr()\n if handle is not None:\n handle._teardown()\n del _ALARM_ELEMENT_HANDLES[ehid]\n\n\ndef _lookup_alarm_handle(element_handle):\n ehid = id(element_handle)\n wr = _ALARM_ELEMENT_HANDLES.get(ehid)\n if wr is not None:\n return wr()\n\n\ndef get_alarm_data_for_gsi():\n alarm_data = []\n for alarm_handle_ref in tuple(_ALARM_ELEMENT_HANDLES.values()):\n alarm_handle = alarm_handle_ref()\n if alarm_handle is None:\n continue\n else:\n element_handle = alarm_handle._element_handle\n element = element_handle.element\n if element is None:\n continue\n entry = {}\n entry['time'] = str(element_handle.when)\n entry['ticks'] = alarm_handle.get_remaining_time().in_ticks()\n entry['time_left'] = str(alarm_handle.get_remaining_time())\n owner = alarm_handle._owner_ref()\n if owner is None:\n owner_name = 'None Owner'\n else:\n owner_name = str(owner)\n entry['owner'] = owner_name\n entry['handle'] = id(alarm_handle)\n entry['callback'] = str(element.callback)\n alarm_data.append(entry)\n\n sort_key_fn = lambda data: data['ticks']\n alarm_data = sorted(alarm_data, key=sort_key_fn)\n return alarm_data","sub_path":"Scripts/simulation/alarms.py","file_name":"alarms.py","file_ext":"py","file_size_in_byte":7916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"477677719","text":"# c08ex07.py\n# Goldbach tester\n\nfrom c08ex05 import isPrime\n\ndef goldbach(x):\n cand = 3\n while cand < x/2:\n other = x - cand\n if isPrime(cand) and isPrime(other):\n return cand\n cand = cand + 2\n\ndef main():\n print(\"Goldbach checker\\n\")\n \n n = int(input(\"Enter an even natural number: \"))\n if n % 2 != 0:\n print(n, \"is not even!\")\n else:\n prime1 = goldbach(n)\n prime2 = n - prime1\n print(\"{0} + {1} = {2}\".format(prime1, prime2, n))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"code/chapter08/c08ex07.py","file_name":"c08ex07.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"480546153","text":"from PyQt4 import QtCore, QtGui\r\nfrom ..util import log, metrix, ui\r\n\r\nclass Widget(ui.Widget):\r\n\tdef setup(self):\r\n\t\tfrmServerSelection = QtGui.QGroupBox(\"Server Selection\", self)\r\n\t\tlayoutServerSelection = QtGui.QHBoxLayout(frmServerSelection)\r\n\t\tlblServer = QtGui.QLabel(\"&Server / Host name:\", frmServerSelection)\r\n\t\tself.txtServer = QtGui.QLineEdit(frmServerSelection)\r\n\t\tlblServer.setBuddy(self.txtServer)\r\n\t\tself.btnConnect = QtGui.QPushButton(\"&Connect\")\r\n\t\tself.txtServer.returnPressed.connect(self.btnConnect.click)\r\n\t\tlayoutServerSelection.addWidget(lblServer)\r\n\t\tlayoutServerSelection.addWidget(self.txtServer)\r\n\t\tlayoutServerSelection.addWidget(self.btnConnect)\r\n\t\t\r\n\t\tself.addLeftSide(frmServerSelection)\r\n\t\t\r\n\t\tfrmControls = QtGui.QGroupBox(\"Controls\", self)\r\n\t\tlayoutControls = QtGui.QHBoxLayout(frmControls)\r\n\t\tself.btnResync = QtGui.QPushButton(\"&Resync\", frmControls)\r\n\t\tlayoutControls.addWidget(self.btnResync)\r\n\r\n\t\tself.addLeftSide(frmControls)\r\n\r\n\t\tfrmSettings = QtGui.QGroupBox(\"Settings\", self)\r\n\t\tlayoutSettings = QtGui.QVBoxLayout(frmSettings)\r\n\t\tself.swOffset = ui.SecondsWidget(self, \"&Offset:\", range=(-60.0, +60.0), step=0.010)\r\n\t\tlayoutSettings.addWidget(self.swOffset)\r\n\t\tself.lsTimerRingSize = ui.LabeledSpinner(self, \"Timer ring size:\", QtGui.QSpinBox)\r\n\t\tself.lsTimerRingSize.setRange(0, 100000)\r\n\t\tself.lsTimerRingSize.setValue(200)\r\n\t\tself.lsTimerRingSize.setSingleStep(10)\r\n\t\tlayoutSettings.addWidget(self.lsTimerRingSize)\r\n\r\n\t\tself.addLeftSide(frmSettings)\r\n\r\n\t\tself.setWindowTitle(\"Client - SyncedMusic\")\r\n\r\n\t\tself.txtServer.setFocus()","sub_path":"synced-music/core/client/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"476564427","text":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\nimport tensorflow as tf\nfrom setting import FLAGS\n\ndef pred_inference(image_batch_flatten, keep_prob=0.5, reuse=False):\n '''\n obtain the inference\n Args:\n image_batch_flatten = [batch_size, (height * width * channels)] tensorflow object\n keep_prob = drop_out 0.5\n reuse = reuse the variable again\n Return:\n inferernce [batch_size, ]\n '''\n with tf.variable_scope('inference'):\n convs = []\n pools = []\n i = 1\n\n def _variable_with_weight_decay(name, shape, stddev, wd):\n var = tf.get_variable(name, shape=shape, initializer=tf.truncated_normal_initializer(stddev=stddev))\n if wd:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\n while i <= FLAGS.conv_loops:\n if i == 1:\n channels = FLAGS.channels\n biases_shape = FLAGS.bias\n conv_target = tf.reshape(image_batch_flatten, \n [-1, FLAGS.image_size, FLAGS.image_size, FLAGS.channels]\n )\n else:\n channels = biases_shape\n biases_shape = channels * 2\n conv_target = pools[-1]\n\n conv_i = 'conv_' + str(i)\n with tf.variable_scope(conv_i, reuse=reuse) as scope:\n weight = tf.get_variable('weights', \n [5, 5, channels, biases_shape], \n initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv = tf.nn.conv2d(\n conv_target, \n weight, \n [1, 1, 1, 1], \n padding='SAME')\n biases = tf.get_variable('biases', \n shape=[biases_shape], \n initializer=tf.constant_initializer(0.0))\n bias = tf.nn.bias_add(conv, biases)\n convs.append(tf.nn.dropout(tf.nn.relu(bias, name=scope.name), keep_prob))\n pools.append(\n tf.nn.max_pool(\n convs[-1], \n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME'))\n i += 1\n\n with tf.variable_scope('fc_1', reuse=reuse) as scope:\n dim = 1\n for d in pools[-1].get_shape()[1:].as_list():\n dim *= d\n \n c_weight_1 = _variable_with_weight_decay(\n 'c_weight_1', \n shape=[dim, FLAGS.final_bias], \n stddev=0.01, \n wd=0.005)\n c_bias_1 = tf.get_variable(\n 'c_bias_1', \n shape=[FLAGS.final_bias], \n initializer=tf.constant_initializer(0.0)) \n pool_flatten = tf.reshape(pools[-1], [-1, dim])\n h_fc1 = tf.nn.relu(tf.matmul(pool_flatten, c_weight_1) + c_bias_1) \n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n \n with tf.variable_scope('fc_2', reuse=reuse) as scope:\n c_weight_2 = _variable_with_weight_decay(\n 'c_weight_2', \n shape=[FLAGS.final_bias, FLAGS.num_classes], \n stddev=0.01, \n wd=0.005)\n c_bias_2 = tf.get_variable(\n 'c_bias_2', \n shape=[FLAGS.num_classes], \n initializer=tf.constant_initializer(0.0)) \n \n with tf.variable_scope('softmax') as scope: \n y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop , c_weight_2) + c_bias_2)\n \n return y_conv","sub_path":"learning/image/learn/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"623316317","text":"from __future__ import unicode_literals\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom django.db.models import Avg\nfrom models import User, Ticket, Event, Performer, Venue, Category, Purchase\nfrom django.contrib import messages\nimport re, bcrypt, json, requests\nfrom pprint import pprint\nfrom datetime import datetime\n\n\n\ndef index(request):\n request.session['nli_source'] = 'None'\n today = datetime.now()\n all_events = Event.objects.filter(visible_until__gte=today).order_by('event_date_time', 'popularity_score')\n categories = Category.objects.order_by('tag')\n context = {\n 'selected_events': all_events,\n 'categories': categories,\n }\n return render(request, 'stubhub/home.html', context)\n\n\ndef register(request):\n errors = User.objects.newUserValidator(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.add_message(request, messages.ERROR, errors[tag])\n return redirect(\"/log_reg\")\n else:\n user = generateNewUser(request)\n if not user:\n messages.add_message(request, messages.ERROR, \"User email already exists.\")\n return redirect(\"/\")\n else:\n setSession(request, user)\n return handleNliSource(request)\n\n\ndef generateNewUser(request):\n first = request.POST['first_name']\n last = request.POST['last_name']\n mail = request.POST['email']\n hash1 = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())\n user = User.objects.create(first_name=first, last_name=last, email=mail, password_hash=hash1)\n return user\n\n\ndef setSession(request, user):\n request.session['user_id'] = user.id\n request.session['user_name'] = user.first_name\n request.session['cart']=[]\n\n\ndef handleNliSource(request):\n if 'nli_source' in request.session and request.session['nli_source'] == 'sell':\n return redirect('/sell/{}'.format(request.session['nli_event_id']))\n elif 'nli_source' in request.session and request.session['nli_source'] == 'cart':\n return redirect('/buy/{}'.format(request.session['nli_event_id']))\n else:\n return redirect('/')\n\n\ndef login(request):\n errors = User.objects.loginValidator(request.POST)\n if len(errors):\n return invalidLogin(request)\n else:\n email = request.POST['email']\n user = User.objects.get(email=email)\n setSession(request, user)\n return handleNliSource(request)\n\n\ndef invalidLogin(request):\n messages.add_message(request, messages.ERROR, \"Invalid email or password.\")\n return redirect(\"/log_reg\")\n\n\ndef log_out(request):\n if 'cart' in request.session and len(request.session['cart']) > 0:\n return redirect('/log_out/confirm')\n else:\n request.session.clear()\n return redirect(\"/\")\n\n\ndef log_out_confirm(request):\n return render(request, 'stubhub/log_out_confirm.html')\n\n\ndef removeAllFromCart(request):\n for ticket_id in request.session['cart']:\n Ticket.objects.filter(id=ticket_id).update(available=True)\n request.session['cart']=[]\n return redirect('/log_out')\n\n\ndef init_sale(request, parameter):\n request.session['sell_path'] = False\n if not userLoggedIn(request):\n request.session['nli_source'] = 'sell'\n request.session['nli_event_id'] = parameter\n return redirect('/log_reg')\n event = getEvent(parameter)\n num_tix = request.POST.get('num_tix', False)\n tix = []\n for i in range(1, int(num_tix) + 1):\n tix.append(i)\n context = {\n \"event\": event,\n \"tix\": tix,\n \"num_tix\": num_tix\n }\n return render(request, 'stubhub/init_sale.html', context)\n\n\ndef userLoggedIn(request):\n if request.session['user_id']:\n return True\n return False\n\n\ndef getEvent(eventId):\n event = Event.objects.get(id=eventId)\n return event\n\n\ndef log_reg(request):\n return render (request,\"stubhub/login.html\")\n\n\ndef post_tickets(request, parameter):\n event = getEvent(parameter)\n createTickets(request, event)\n url = '/ticket_posted/'\n url += str(parameter)\n return redirect(url)\n\n\ndef createTickets(request, event):\n seller = getCurrentuser(request)\n num_tix = request.POST['num_tix']\n price = request.POST['price']\n for i in range(int(num_tix)):\n seat = \"seat_\" + str(i+1) \n seat = request.POST[seat]\n Ticket.objects.create(event=event, seller=seller, seat=seat, price=price)\n\n\ndef getCurrentuser(request):\n userId = request.session['user_id']\n user = User.objects.get(id=userId)\n return user\n\n\ndef ticket_posted(request, parameter):\n event = getEvent(parameter)\n context = {\n \"event\": event,\n }\n return render(request, 'stubhub/sell_success.html', context)\n\n\ndef acc_info(request, parameter):\n user = User.objects.get(id=parameter)\n tickets_for_sale = Ticket.objects.filter(seller=user, available=True)\n tickets_sold = Ticket.objects.filter(seller=user, available=False)\n tickets_bought = Ticket.objects.filter(buyer=user, available=False)\n context = { \n 'user': user,\n 'tickets_for_sale': tickets_for_sale,\n 'tickets_sold': tickets_sold,\n 'tickets_bought': tickets_bought\n }\n if userLoggedIn(request) and request.session['user_id'] == user.id:\n return render(request,\"stubhub/acc_info.html\",context)\n else:\n return render(request,\"stubhub/show_user.html\",context)\n\n\ndef cart(request):\n context = getCartContext(request)\n return render (request,\"stubhub/cart.html\",context)\n\n\ndef getCartContext(request):\n item_ids = request.session['cart']\n items = []\n total=0\n for item_id in item_ids:\n ticket = Ticket.objects.get(id=item_id)\n items.append(ticket)\n for ticket in items:\n total += int(ticket.price)\n request.session['total'] = total\n context = { \n 'user': getCurrentuser(request),\n 'items': items,\n 'total':total,\n }\n return context\n\n\ndef add_to_cart(request):\n if not userLoggedIn(request):\n return goToLogRegFromCart()\n if 'cart' not in request.session:\n initializeCart(request)\n ticketId = request.POST['ticket_id']\n string = updateTicketAvailability(request, ticketId)\n request.session['cart'].append(ticketId)\n x = \"/\" + str(string)\n return redirect('/buy'+ x)\n\n\ndef add_to_cart_from(request, parameter):\n if not userLoggedIn(request):\n return goToLogRegFromCart()\n if 'cart' not in request.session:\n initializeCart(request)\n ticketId = request.POST['ticket_id']\n updateTicketAvailability(request, ticketId)\n request.session['cart'].append(ticketId)\n x = \"/\" + str(parameter)\n return redirect('/acc_info'+ x)\n\n\ndef updateTicketAvailability(request, ticketId):\n ticket = Ticket.objects.get(id=ticketId)\n Ticket.objects.filter(id=ticketId).update(available=False)\n request.session['cart'].append(ticketId)\n return ticket.event.id\n\n\ndef goToLogRegFromCart():\n request.session['nli_source'] = 'cart'\n return redirect('/log_reg')\n\n\ndef initializeCart(request):\n request.session['cart']=[]\n\n\ndef remove_from_cart(request,parameter):\n ticket_id = parameter\n request.session['cart'].remove(ticket_id)\n Ticket.objects.filter(id=ticket_id).update(available=True)\n return redirect('/cart')\n\n\ndef check_out(request):\n if cartIsEmpty():\n return displayEmptyCartMsg(request)\n context = getCartContext(request)\n return render(request,'stubhub/check_out.html',context)\n\n\ndef cartIsEmpty():\n return request.session['cart'] == []\n\n\ndef displayEmptyCartMsg(request):\n messages.add_message(request, messages.ERROR, \"Your Cart is Empty\",extra_tags='CE')\n return redirect(\"/cart\")\n\n\ndef payment_shipping(request):\n return render(request,'stubhub/payment_shipping.html')\n\n\ndef order_review(request):\n cardNumber = request.POST['card_number']\n creditCard = getCreditCard(request)\n addrress = getAddress(request)\n request.session['card'] = creditCard\n request.session['address'] = address\n context = { \n 'user': User.objects.get(id=request.session['user_id']),\n 'items': getItems(request),\n 'total': request.session['total'],\n 'card': card,\n 'address': address,\n 'last_four': cardNumber[-4:]\n }\n return render(request,'stubhub/order_review.html',context)\n\n\ndef getCreditCard(request):\n creditCard = {\n 'first_name': request.POST['first_name'],\n 'last_name': request.POST['last_name'],\n 'card_number': cardNumber,\n 'exp_month':request.POST['month'],\n 'exp_year': request.POST['year']\n }\n return creditCard\n\n\ndef getAddress(request):\n addrress = {\n 'full_name': request.POST['full_name'],\n 'address': request.POST['address'],\n 'zip': request.POST['zip'],\n 'city':request.POST['city'],\n 'state': request.POST['state'],\n 'country':request.POST['country']\n }\n return address\n\n\ndef getItems(request):\n items = []\n item_ids = request.session['cart']\n for item_id in item_ids:\n ticket = Ticket.objects.get(id=item_id)\n items.append(ticket)\n return items\n\n\ndef purchase(request):\n return redirect('/confirmation.html')\n\n\ndef order_confirmation(request):\n user = getCurrentuser(request)\n items = getItems(request)\n for ticket in items:\n Ticket.objects.filter(id=ticket.id).update(buyer=user)\n request.session['total'] = 0\n request.session['cart'] = []\n return render(request,'stubhub/confirmation.html')\n \n\ndef process_search(request):\n if len(request.POST['text_search']) > 0:\n search_info = request.POST['text_search']\n request.session['search_field'] = 'text'\n elif len(request.POST['event_date']) > 0:\n search_info = request.POST['event_date']\n request.session['search_field'] = 'date'\n elif len(request.POST['category']) > 0:\n search_info = request.POST['category']\n request.session['search_field'] = 'category'\n else:\n return redirect('/')\n request.session['search_info'] = search_info\n return redirect('/search')\n\n\ndef search_results(request):\n search_field = request.session['search_field']\n search_info = request.session['search_info']\n today = datetime.now()\n if search_field == 'text':\n selected_events = processTextSearch(search_info)\n elif search_field == 'category':\n selected_events = processCategorySearch(search_info)\n elif search_field == 'date':\n selected_events = Event.objects.filter(event_date_time__contains=search_info) \n num_results = len(selected_events)\n categories = Category.objects.order_by('tag')\n context = {\n 'num_results' : num_results,\n 'selected_events': selected_events,\n 'query': search_info,\n 'categories': categories\n }\n return render(request, 'stubhub/search_results.html', context)\n\n\ndef processTextSearch(text):\n titleMatches = Event.objects.filter(visible_until__gte=today).filter(title__contains=text)\n venueMatches = Event.objects.filter(visible_until__gte=today).filter(venue__name__contains=text)\n performerMatches = Event.objects.filter(visible_until__gte=today).filter(performers__name__contains=text)\n return titleMatches|venueMatches|performerMatches\n\n\ndef processCategorySearch(searchedCategory):\n category = Category.objects.get(display_tag=searchedCategory)\n category_ref = category.seatgeek_ref\n categoryMatches = Event.objects.filter(visible_until__gte=today).filter(category=category)\n categoryParentMatches = Event.objects.filter(visible_until__gte=today).filter(category__parent_ref=category_ref)\n return categoryMatches|categoryParentMatches\n\n\ndef show_event(request, parameter):\n event = Event.objects.get(id=parameter)\n context = { \"event\": event }\n return render(request, 'stubhub/show_event.html', context)\n\n\ndef buy_tix(request, parameter):\n event = getEvent(parameter)\n if userLoggedIn(request):\n currentUserId = request.session['user_id']\n else:\n request.session['nli_event_id'] = parameter\n currentUserId = -1\n if request.method == \"GET\":\n available_tix = Ticket.objects.filter(available=True, event=event).exclude(seller=currentUserId).order_by(\"seat\")\n elif request.method == \"POST\":\n available_tix = putAvailableTicketsInOrder(request, event)\n context = {\n \"event\": event,\n \"available_tix\": available_tix,\n }\n return render(request, 'stubhub/buy_tix.html', context)\n\n\ndef putAvailableTicketsInOrder(request, event):\n availableTickets = Ticket.objects.filter(available=True, event=event)\n filterBy = request.POST['filter_by']\n if filterBy == \"seat\":\n availableTickets = availableTickets.order_by(\"seat\")\n elif filterBy == \"price_asc\":\n availableTickets = availableTickets.order_by(\"price\")\n elif filterBy == \"price_desc\":\n availableTickets = availableTickets.order_by(\"-price\")\n return availableTickets\n\n\ndef geo(request, lat, lon):\n event_url = \"https://api.seatgeek.com/2/events?client_id=ODI2MjI2OHwxNTAwOTE1NzYzLjYy&lat=\" + str(lat)\n event_url += \"&lon=\" + str(lon) + \"&range=5mi&per_page=1000&sort=datetime_local.asc&score.gt=0.5\"\n allEventData = getData(event_url)\n allEvents = allEventData['events']\n addCategoriesToDatabaseIfNecessary(allEvents)\n addEventsToDatabaseIfNecessary(allEvents)\n request.session['geo'] = True\n return redirect('/');\n\n\ndef getData(url):\n response = requests.get(url)\n return json.loads(response.text)\n\n\ndef addCategoriesToDatabaseIfNecessary(events):\n for event in events:\n taxonomies = event['taxonomies']\n for category in taxonomies:\n tag = category['name']\n formatted_tag = tag.replace('_', ' ')\n display_tag = formatted_tag.title()\n seatgeek_ref = category['id']\n parent_ref = category['parent_id']\n try:\n Category.objects.get(tag=tag)\n except:\n Category.objects.create(tag=tag, seatgeek_ref=seatgeek_ref,parent_ref=parent_ref, display_tag=display_tag)\n\n\ndef addEventsToDatabaseIfNecessary(events):\n for event in events:\n title = event['title']\n short_title = event['short_title']\n event_date_time = datetime.strptime(event['datetime_local'], \"%Y-%m-%dT%H:%M:%S\")\n visible_until = datetime.strptime(event['visible_until_utc'], \"%Y-%m-%dT%H:%M:%S\")\n popularity_score = event['score']\n image = event['performers'][0]['image']\n category = Category.objects.get(tag = event['type'])\n event_venue=event['venue']['name']\n try:\n venue = Venue.objects.get(name=event_venue)\n except:\n name = event['venue']['name']\n address = event['venue']['address']\n extended_address = event['venue']['extended_address']\n venue = Venue.objects.create(name=name, address=address, extended_address=extended_address)\n try:\n Event.objects.get(title=title)\n except:\n thisEvent = Event.objects.create(title=title, short_title=short_title, event_date_time=event_date_time, \n visible_until=visible_until, popularity_score=popularity_score, \n category=category, venue=venue, image=image)\n for performer in event['performers']:\n try:\n performer = Performer.objects.get(name=name)\n except:\n performer = Performer.objects.create(name=name)\n thisEvent.performers.add(performer)\n\n\ndef sell_search(request):\n request.session['sell_path'] = True\n categories = Category.objects.order_by('tag')\n context = { 'categories': categories }\n return render(request, 'stubhub/sell_search.html', context)","sub_path":"apps/stubhub/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"325141448","text":"import logging\nfrom typing import Type, TypeVar, Generic, Callable\n\nlogger = logging.getLogger(__name__)\n\nClass = TypeVar(\"Class\")\nValue = TypeVar(\"Value\")\nMethod = Callable[[Class], Value]\n\n\nclass FallbackDescriptor(Generic[Class, Value]):\n def __init__(self, func: Method, cached: bool = True, logging: bool = False) -> None:\n \"\"\"\n Initialize the descriptor.\n\n Arguments\n ---------\n func\n Fallback function if no value exists.\n cached\n Cache the value calculated by `func`.\n logging\n Log a warning if fallback function is used.\n \"\"\"\n self.__doc__ = getattr(func, \"__doc__\") # keep the docs\n self.func = func\n self.cached = cached\n self.logging = logging\n self.prop_name = f\"__{self.func.__name__}\"\n\n def __get__(self, obj: Class, cls: Type[Class]) -> Value:\n \"\"\"\n Get the value.\n\n Return either the cached value or call the underlying function and\n optionally cache its result.\n \"\"\"\n if not hasattr(obj, self.prop_name):\n if self.logging:\n logger.warning(\"Using `%s` without prefetched value.\", self.func)\n\n value: Value = self.func(obj)\n if self.cached:\n self.__set__(obj, value)\n else:\n return value\n\n return getattr(obj, self.prop_name)\n\n def __set__(self, obj: Class, value: Value) -> None:\n \"\"\"\n Store value in a private property.\n \"\"\"\n setattr(obj, self.prop_name, value)\n\n def __delete__(self, obj: Class) -> None:\n \"\"\"\n Clear current value from private property.\n \"\"\"\n if hasattr(obj, self.prop_name):\n delattr(obj, self.prop_name)\n\n\ndef fallback_property(\n cached: bool = True, logging: bool = False\n) -> Callable[[Method], FallbackDescriptor]:\n \"\"\"\n Decorate a class method to return a precalculated value instead.\n\n This might be useful if you have a function that aggregates values from\n related objects, which could already be fetched using an annotated queryset.\n The decorated methods will favor the precalculated value over calling the\n actual method.\n\n NOTE: The annotated value must have the same name as the decorated function!\n \"\"\"\n\n def inner(func: Method) -> FallbackDescriptor:\n return FallbackDescriptor(func, cached=cached, logging=logging)\n\n return inner\n","sub_path":"fallback_property/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"214607593","text":"# !/usr/bin/python\n# This software here will maintain the main settings as well as the main starting point of the game.\nimport sys\nimport pygame\nfrom settings import Settings as Settings_Alien\nimport spaceship\n\n\ndef main():\n # Root point for game to start\n pygame.init()\n # Initialize all imported pygame modules\n screen = pygame.display.set_mode((Settings_Alien().screen_width, Settings_Alien().screen_height))\n # Initialize output size monitor\n pygame.display.set_caption(\"==Alien==\")\n # Output game's banner\n bg_color = Settings_Alien().bg_color\n # Display spaceshift on screen\n spaceship_main = spaceship.Spaceship(screen)\n # Start main loop for the game.\n while True:\n # Listen for mouse & keyboard events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n # Full display to screen\n screen.fill(bg_color)\n spaceship_main.blitme()\n pygame.display.flip()\n\n\nmain()\n","sub_path":"alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"417183890","text":"from django.shortcuts import render\nfrom rooms.models import Room\nfrom rest_framework.decorators import api_view\nfrom rooms.serializer import Room_serializer,Room_Create_serializer\nfrom django.http import JsonResponse\nfrom rest_framework.parsers import JSONParser\n# Create your views here.\n\n@api_view(['GET'])\ndef list_rooms(request):\n room_list = Room.objects.all()\n serializer = Room_serializer(room_list,many=True)\n return JsonResponse(serializer.data,safe=False)\n\n@api_view(['GET'])\ndef room_detail(request,pk):\n patient_detail = Room.objects.get(pk=pk)\n serializer = Room_serializer(patient_detail)\n return JsonResponse(serializer.data,safe=False)\n\n@api_view(['POST'])\ndef add_room(request):\n data = JSONParser().parse(request)\n serialized_data = Room_Create_serializer(data=data)\n if serialized_data.is_valid():\n serialized_data.save()\n return JsonResponse(serialized_data.data,status=201)\n\n return JsonResponse(serialized_data.errors, status=400)\n\n","sub_path":"rooms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"159075133","text":"# encoding: UTF-8\n\nTEST_FILENAME = 'horseColicTest.txt'\nTRAIN_FILENAME = 'horseColicTraining.txt'\n\nfrom public import function\n\nfrom logRegres import *\n\n\nlogReg = LogRegress(maxIter=100,alpha=0.01,method=2)\nlogReg.loadDataSet(TRAIN_FILENAME)\nlogReg.trainLogRegres()\n\n# logReg.showLogRegres()\n#\nnumDataSet, strLabels = function.file2matrix(TEST_FILENAME)\nlogReg.setTest(numDataSet,strLabels.astype(int))\n\n","sub_path":"logRegres/horseColic.py","file_name":"horseColic.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"302487142","text":"'''\nLeetcode- 45. Jump Game II - https://leetcode.com/problems/jump-game-ii/\nTime complexity - O(N)\nspace complexity - O(1)\napproach - we maintain maxinterval and nextinterval for each iteration and if nums[i]+i >Nextinterval then we update it.\n\n'''\nclass Solution:\n def jump(self, nums: List[int]) -> int:\n if len(nums)==0 or len(nums)==1:\n return 0\n Intervalmax=nums[0]\n Nextinterval=nums[0]\n jump=1\n for i in range(1,len(nums)):\n if i+nums[i]>Nextinterval:\n Nextinterval=i+nums[i]\n if Intervalmax==i and i!=len(nums)-1:\n Intervalmax=Nextinterval\n jump+=1\n if Intervalmax>=len(nums)-1:\n break\n return jump\n \n ","sub_path":"Problem-135.py","file_name":"Problem-135.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"222561170","text":"import os \nfrom flask import Flask, request, redirect, render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import session\nfrom models import db\nfrom models import Fcuser\nfrom flask_wtf.csrf import CSRFProtect\nfrom forms import RegisterForm, LoginForm\n\napp = Flask(__name__)\n\n@app.route('/login', methods=['GET','POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n userid = form.data.get('userid')\n session['userid'] = userid\n return redirect('/')\n \n return render_template('login.html', form = form)\n\n@app.route('/register', methods=['GET','POST'])\ndef register():\n form = RegisterForm()\n if form.validate_on_submit():\n fcuser = Fcuser()\n fcuser.userid = form.data.get('userid')\n fcuser.username = form.data.get('username')\n fcuser.password = form.data.get('password')\n\n db.session.add(fcuser)\n db.session.commit()\n print('Success!')\n\n return redirect('/')\n \n return render_template('register.html', form = form)\n\n@app.route('/')\ndef hello():\n userid = session['userid']\n return render_template('hello.html', userid=userid)\n\nif __name__ == \"__main__\":\n basedir = os.path.abspath(os.path.dirname(__file__))\n dbfile = os.path.join(basedir, 'db.sqlite')\n\n app.config['SQLALCHEMY_DATABASE_URI'] =\"sqlite:///\"+dbfile\n app.config['SQLALCHEMY_COMMIT_ON_TREEDOWN'] = True\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n app.config['SECRET_KEY'] = 'sdafoiauyefhioauwefbnh'\n\n csrf = CSRFProtect()\n csrf.init_app(app)\n\n db.init_app(app)\n db.app = app\n db.create_all()\n \n app.run(host='127.0.0.1',port=5000, debug=True)","sub_path":"기초/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"27241594","text":"'''\nDynamic Simulation - Obstacle Avoidance Algorithm\n@author LukasHuber\n@date 2018-05-24\n'''\n\n# Command to automatically reload libraries -- in ipython before exectureion\nimport numpy as np\n\n# Visualization libraries\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n# 3D Animatcoion utils\nfrom mpl_toolkits.mplot3d import Axes3D\nimport mpl_toolkits.mplot3d.art3d as art3d\n#from matplotlib.animation import writers\n\nimport time\n\nfrom math import pi\n\n#first change the cwd to the script path\n#scriptPath = os.path.realpath(os.path.dirname(sys.argv[0]))\n#os.chdir(scriptPath)\n\nimport sys\n \n# ---------- Import Custom libraries ----------\n\n\nlib_string = \"/home/lukas/Code/MachineLearning/ObstacleAvoidanceAlgroithm/lib_obstacleAvoidance/\"\nif not any (lib_string in s for s in sys.path):\n sys.path.append(lib_string)\n\nlib_string = \"/home/lukas/Code/MachineLearning/ObstacleAvoidanceAlgroithm/\"\nif not any (lib_string in s for s in sys.path):\n sys.path.append(lib_string)\n\nfrom draw_ellipsoid import *\nfrom lib_obstacleAvoidance import obs_check_collision\nfrom class_obstacle import *\nfrom lib_modulation import *\nfrom obs_common_section import *\n#from obs_dynamic_center import *\nfrom obs_dynamic_center_3d import *\n\nfrom dynamicalSystem_lib import *\n\nfrom matplotlib import animation\nplt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'\n# plt.ion()\n# plt.show()\n\n# -------------- Start script --------------\nprint()\nprint(' ----- Script <> started. ----- ')\nprint()\n\n###### Create function to allow pause (one click) and stop (double click) on figure #####\npause=False\npause_start = 0\ndef onClick(event): \n global pause\n global pause_start\n global anim\n \n pause ^= True\n if pause:\n pause_start = time.time()\n else:\n dT = time.time()-pause_start\n if dT < 0.3: # Break simulation at double click\n print('Animation exited.')\n anim.ani.event_source.stop()\n\n \n##### Anmation Function #####\nclass Animated():\n \"\"\"An animated scatter plot using matplotlib.animations.FuncAnimation.\"\"\"\n def __init__(self, x0, obs=[], N_simuMax = 600, dt=0.01, attractorPos='default', convergenceMargin=0.01, xRange=[-10,10], yRange=[-10,10], zRange=[-10,10], sleepPeriod=0.03, nonlinear=False, RK4_int = False, dynamicalSystem=linearAttractor):\n\n self.dim = x0.shape[0]\n\n #self.simuColors=[]\n \n # Initialize class variables\n self.obs = obs\n self.N_simuMax = N_simuMax\n self.dt = dt\n if attractorPos == 'default':\n self.attractorPos = self.dim*[0.0]\n else:\n self.attractorPos = attractorPos\n \n self.sleepPeriod=sleepPeriod\n\n # last three values are observed for convergence\n self.convergenceMargin = convergenceMargin\n self.lastConvergences = [convergenceMargin for i in range(3)] \n\n # Get current simulation time\n self.old_time = time.time()\n self.pause_time = self.old_time\n \n self.N_points = x0.shape[1]\n\n self.x_pos = np.zeros((self.dim, self.N_simuMax+2, self.N_points))\n \n self.x_pos[:,0,:] = x0\n \n self.xd_ds = np.zeros(( self.dim, self.N_simuMax+1, self.N_points ))\n #self.t = np.linspace(( 0, self.N_simuMax*self.dt, num=self.N_simuMax ))\n self.t = np.linspace(0,self.N_simuMax+1,num=self.N_simuMax+1)*dt\n\n # Simulation parameters\n self.RK4_int = RK4_int\n self.nonlinear = nonlinear\n self.dynamicalSystem = dynamicalSystem\n\n self.converged = False\n \n self.iSim = 0\n\n self.lines = [] # Container to keep line plots\n self.startPoints = [] # Container to keep line plots\n self.endPoints = [] # Container to keep line plots \n self.patches = [] # Container to keep patch plotes\n self.contour = []\n self.centers = []\n self.cent_dyns = []\n\n # Setup the figure and axes.\n if self.dim==2:\n self.fig, self.ax = plt.subplots()\n else:\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, projection='3d')\n #self.fig.set_size_inches(14, 9)\n self.fig.set_size_inches(12, 8)\n \n self.ax.set_xlim(xRange)\n self.ax.set_ylim(yRange)\n #self.ax.set_xlabel('x1')\n #self.ax.set_ylabel('x2')\n if self.dim==3:\n self.ax.set_zlim(zRange)\n self.ax.set_zlabel('x3')\n #self.ax.view_init(elev=0.3, aim=0.4)\n\n # Set axis etc.\n plt.gca().set_aspect('equal', adjustable='box')\n\n # Set up plot\n #self.setup_plot()\n #self.tt1 = self.ax.text(.5, 1.05, '', transform = self.ax.transAxes, va='center', animated=True, )\n \n # Adjust dynamic center\n # intersection_obs = obs_common_section(self.obs)\n # dynamic_center_3d(self.obs, intersection_obs)\n \n # Then setup FuncAnimation \n self.ani = FuncAnimation(self.fig, self.update, interval=1, frames = self.N_simuMax-2, repeat=False, init_func=self.setup_plot, blit=True, save_count=self.N_simuMax-2)\n\n def setup_plot(self):\n print('setup started')\n # Draw obstacle\n self.obs_polygon = []\n \n # Numerical hull of ellipsoid\n for n in range(len(self.obs)):\n self.obs[n].draw_ellipsoid(numPoints=50) # 50 points resolution\n\n for n in range(len(self.obs)):\n if self.dim==2:\n emptyList = [[0,0] for i in range(50)]\n #self.obs_polygon.append( plt.Polygon(self.obs[n].x_obs, animated=True,))\n self.obs_polygon.append( plt.Polygon(emptyList, animated=True,))\n self.obs_polygon[n].set_color(np.array([176,124,124])/255)\n self.obs_polygon[n].set_alpha(0.8)\n patch_o = plt.gca().add_patch(self.obs_polygon[n])\n self.patches.append(patch_o)\n\n if self.obs[n].x_end > 0:\n cont, = plt.plot([],[], 'k--', animated=True)\n else:\n cont, = plt.plot([self.obs[n].x_obs_sf[ii][0] for ii in range(len(self.obs[n].x_obs_sf))],\n [self.obs[n].x_obs_sf[ii][1] for ii in range(len(self.obs[n].x_obs_sf))],\n 'k--', animated=True)\n self.contour.append(cont)\n else: # 3d\n N_resol=50 # TODO save as part of obstacle class internally from assigining....\n self.obs_polygon.append(\n self.ax.plot_surface(\n np.reshape([obs[n].x_obs[i][0] for i in range(len(obs[n].x_obs))],\n (N_resol,-1)),\n np.reshape([obs[n].x_obs[i][1] for i in range(len(obs[n].x_obs))],\n (N_resol,-1)),\n np.reshape([obs[n].x_obs[i][2] for i in range(len(obs[n].x_obs))],\n (N_resol, -1)) ) )\n\n # Center of obstacle\n center, = self.ax.plot([],[],'k.', animated=True) \n self.centers.append(center)\n \n if hasattr(self.obs[n], 'center_dyn'):# automatic adaptation of center\n cent_dyn, = self.ax.plot([],[], 'k+', animated=True, linewidth=18, markeredgewidth=4, markersize=13)\n # ax_ifd.plot(obs[n].center_dyn[0],obs[n].center_dyn[1], 'k+', linewidth=18, markeredgewidth=4, markersize=13)\n self.cent_dyns.append(cent_dyn)\n \n for ii in range(self.N_points):\n line, = plt.plot([], [], '--', lineWidth = 4, animated=True)\n self.lines.append(line)\n point, = plt.plot(self.x_pos[0,0,ii],self.x_pos[1,0,ii], '*k', markersize=10, animated=True)\n if self.dim==3:\n point, = plt.plot(self.x_pos[0,0,ii],self.x_pos[1,0,ii], self.x_pos[2,0,ii], '*k', markersize=10, animated=True)\n self.startPoints.append(point)\n point, = plt.plot([], [], 'bo', markersize=15, animated=True)\n self.endPoints.append(point)\n\n\n if self.dim==2:\n plt.plot(self.attractorPos[0], self.attractorPos[1], 'k*', linewidth=7.0, markeredgewidth=4, markersize=13)\n else:\n plt.plot([self.attractorPos[0]], [self.attractorPos[1]], [self.attractorPos[2]], 'k*', linewidth=7.0)\n\n self.fig.canvas.mpl_connect('button_press_event', onClick) # Button click enabled\n\n #self.tt1 = self.ax.text(.5, 8.2, '', va='center', fontsize=20)\n\n print('setup finished')\n\n #return (self.lines + self.obs_polygon + self.contour + self.centers + self.cent_dyns + self.startPoints + self.endPoints + [self.tt1])\n return (self.lines + self.obs_polygon + self.contour + self.centers + self.cent_dyns + self.startPoints + self.endPoints)\n \n def update(self, iSim):\n if not plt.fignum_exists(self.fig.number):\n anim.ani.event_source.stop()\n \n #if saveFigure:\n if pause: # NO ANIMATION -- PAUSE\n self.old_time=time.time()\n return (self.lines + self.obs_polygon + self.contour + self.centers + self.cent_dyns + self.startPoints + self.endPoints)\n\n if not plt.fignum_exists(self.fig.number):\n anim.ani.event_source.stop()\n \n print('loop count={} - frame ={}-Simulation time ={}'.format(self.iSim, iSim, np.round(self.dt*self.iSim, 3) ))\n\n # intersection_obs = obs_common_section(self.obs)\n #print('center before',obs[0].center_dyn)\n # dynamic_center_3d(self.obs, intersection_obs)\n # print('center after',obs[0].center_dyn)\n \n if self.RK4_int: # Runge kutta integration\n for j in range(self.N_points):\n self.x_pos[:, self.iSim+1,j] = obs_avoidance_rk4(self.dt, self.x_pos[:,self.iSim,j], self.obs, x0=self.attractorPos, obs_avoidance = obs_avoidance_interpolation_moving)\n\n #self.x_pos[:, self.iSim+1,j] = obs_avoidance_rk4(self.dt, self.x_pos[:,self.iSim,j], self.obs, x0=self.attractorPos, obs_avoidance = obs_avoidance_modulation)\n \n elif self.nonlinear:\n for j in range(self.N_points):\n self.xd_ds[:,self.iSim,j] = obs_avoidance_nonlinear_radial(self.x_pos[:,self.iSim, j], self.dynamicalSystem, obs, self.attractorPos)\n self.x_pos[:,self.iSim+1,:] = self.x_pos[:,self.iSim, :] + self.xd_ds[:,self.iSim, :]*self.dt\n \n else: # Simple euler integration\n # Calculate DS\n for j in range(self.N_points):\n xd_temp = linearAttractor(self.x_pos[:,self.iSim, j], self.attractorPos)\n \n self.xd_ds[:,self.iSim,j] = obs_avoidance_interpolation_moving(self.x_pos[:,self.iSim, j], xd_temp, self.obs)\n self.x_pos[:,self.iSim+1,:] = self.x_pos[:,self.iSim, :] + self.xd_ds[:,self.iSim, :]*self.dt\n \n self.t[self.iSim+1] = (self.iSim+1)*self.dt\n\n # Update lines\n for j in range(self.N_points):\n self.lines[j].set_xdata(self.x_pos[0,:self.iSim+1,j])\n self.lines[j].set_ydata(self.x_pos[1,:self.iSim+1,j])\n if self.dim==3:\n self.lines[j].set_3d_properties(zs=self.x_pos[2,:self.iSim+1,j])\n\n self.endPoints[j].set_xdata(self.x_pos[0,self.iSim+1,j])\n self.endPoints[j].set_ydata(self.x_pos[1,self.iSim+1,j])\n if self.dim==3:\n self.endPoints[j].set_3d_properties(zs=self.x_pos[2,self.biSim+1,j])\n \n # ========= Check collision ----------\n #collisions = obs_check_collision(self.x_pos[:,self.iSim+1,:], obs)\n #collPoints = np.array()\n\n #print('TODO --- collision observation')\n #collPoints = self.x_pos[:,self.iSim+1,collisions]\n\n # if collPoints.shape[0] > 0:\n # plot(collPoints[0,:], collPoints[1,:], 'rx')\n # print('Collision detected!!!!')\n for o in range(len(self.obs)):# update obstacles if moving\n self.obs[o].update_pos(self.t[self.iSim], self.dt) # Update obstacles\n\n self.centers[o].set_xdata(self.obs[o].x0[0])\n self.centers[o].set_ydata(self.obs[o].x0[1])\n if self.dim==3:\n self.centers[o].set_3d_properties(zs=obs[o].x0[2])\n\n if hasattr(self.obs[o], 'center_dyn'):# automatic adaptation of center\n self.cent_dyns[o].set_xdata(self.obs[o].center_dyn[0])\n self.cent_dyns[o].set_ydata(self.obs[o].center_dyn[1])\n if self.dim==3:\n self.cent_dyns[o].set_3d_properties(zs=self.obs[o].center_dyn[2])\n\n\n if self.obs[o].x_end > self.t[self.iSim] or self.iSim<1: # First round or moving\n if self.dim ==2: # only show safety-contour in 2d, otherwise not easily understandable\n self.contour[o].set_xdata([self.obs[o].x_obs_sf[ii][0] for ii in range(len(self.obs[o].x_obs_sf))])\n self.contour[o].set_ydata([self.obs[o].x_obs_sf[ii][1] for ii in range(len(self.obs[o].x_obs_sf))])\n\n if self.dim==2:\n self.obs_polygon[o].xy = self.obs[o].x_obs\n else:\n self.obs_polygon[o].xyz = self.obs[o].x_obs\n self.iSim += 1 # update simulation counter\n self.check_convergence() # Check convergence \n \n # Pause for constant simulation speed\n self.old_time = self.sleep_const(self.old_time)\n self.pause_time = self.old_time\n\n #self.tt1.set_text('{:2.2f} s'.format(round(self.t[self.iSim+1],2) ) )\n\n #return (self.lines + self.obs_polygon + self.contour + self.centers + self.cent_dyns + self.startPoints + self.endPoints + [self.tt1] )\n return (self.lines + self.obs_polygon + self.contour + self.centers + self.cent_dyns + self.startPoints + self.endPoints)\n\n def check_convergence(self):\n #return\n self.lastConvergences[0] = self.lastConvergences[1]\n self.lastConvergences[1] = self.lastConvergences[2]\n\n self.lastConvergences[2] = np.sum(abs(self.x_pos[:,self.iSim,:] - np.tile(self.attractorPos, (self.N_points,1) ).T ))\n\n if (sum(self.lastConvergences) < self.convergenceMargin) or (self.iSim+1>=self.N_simuMax):\n self.ani.event_source.stop()\n \n if (self.iSim>=self.N_simuMax-1):\n print('Maximum number of {} iterations reached without convergence.'.format(self.N_simuMax))\n else:\n print('Convergence with tolerance of {} reached after {} iterations.'.format(sum(self.lastConvergences), self.iSim+1) )\n\n \n def show(self):\n plt.show()\n\n def sleep_const(self, old_time=0):\n next_time = old_time+self.sleepPeriod\n \n now = time.time()\n \n sleep_time = next_time - now # get sleep time\n sleep_time = min(max(sleep_time, 0), self.sleepPeriod) # restrict in sensible range\n\n time.sleep(sleep_time)\n\n return next_time\n\n\nanimationName = -1\nsaveFigure=0\nN = 4\n\ndef samplePointsAtBorder(N, xRange, yRange):\n # Draw points evenly spaced at border\n dx = xRange[1]-xRange[0]\n dy = yRange[1]-yRange[0]\n\n N_x = ceil(dx/(2*(dx+dy))*(N))+2\n N_y = ceil(dx/(2*(dx+dy))*(N))-0\n\n x_init = np.vstack((np.linspace(xRange[0],xRange[1], num=N_x),\n np.ones(N_x)*yRange[0]) )\n\n x_init = np.hstack((x_init, \n np.vstack((np.linspace(xRange[0],xRange[1], num=N_x),\n np.ones(N_x)*yRange[1] )) ))\n\n ySpacing=(yRange[1]-yRange[0])/(N_y+1)\n x_init = np.hstack((x_init, \n np.vstack((np.ones(N_y)*xRange[0],\n np.linspace(yRange[0]+ySpacing,yRange[1]-ySpacing, num=N_y) )) ))\n\n x_init = np.hstack((x_init, \n np.vstack((np.ones(N_y)*xRange[1],\n np.linspace(yRange[0]+ySpacing,yRange[1]-ySpacing, num=N_y) )) ))\n\n return x_init\n\n \nsimuCase=7\n\n\nif simuCase==0:\n N = 10\n x_init = np.vstack((np.ones(N)*20,\n np.linspace(-10,10,num=N) ))\n ### Create obstacle \n obs = []\n a = [5, 2] \n p = [1, 1]\n x0 = [10.0, 0]\n th_r = 30/180*pi\n sf = 1.\n\n #xd=[0, 0]\n w = 0\n x_start = 0\n x_end = 2\n #obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n\n a = [3,2]\n p = [1,1]\n x0 = [7,-6]\n th_r = -40/180*pi\n sf = 1.\n\n xd=[0.25, 1]\n w = 0\n x_start = 0\n x_end = 10\n \n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n a = [3,2]\n p = [1,1]\n x0 = [7,-6]\n th_r = -40/180*pi\n sf = 1.\n\n xd=[0., 0]\n w = 0\n x_start = 0\n x_end = 0\n #obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n \n ob2 = Obstacle(\n a= [1,1],\n p= [1,1],\n x0= [10,-8],\n th_r= -40/180*pi,\n sf=1,\n xd=[0, 0],\n x_start=0,\n x_end=0,\n w=0\n )\n #obs.append(ob2)\n\n ob3 = Obstacle(\n a= [1,1],\n p= [1,1],\n x0= [14,-2],\n th_r= -40/180*pi,\n sf=1,\n xd=[0, 0],\n x_start=0,\n x_end=0,\n w=0\n )\n obs.append(ob3)\n\n xRange = [ -1,20]\n yRange = [-10,10]\n zRange = [-10,10]\n #obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n attractorPos = [0,0]\n\n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.05, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01,attractorPos=attractorPos )\n \nelif simuCase==1:\n N = 10\n x_init = np.vstack((np.ones(N)*1,\n np.linspace(-1,1,num=N),\n np.linspace(-1,1,num=N) ))\n ### Create obstacle \n obs = []\n\n x0 = [0.5,0.2,0.0]\n a = [0.4,0.1,0.1]\n #a = [4,4,4]\n p = [10,1,1]\n th_r = [0, 0, 30./180*pi]\n sf = 1.\n\n xd=[0,0,0]\n w = [0,0,0]\n\n x_start = 0\n x_end = 2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n\n ### Create obstacle\n x0 = [0.5,-0.2,0]\n a = [0.4,0.1,0.1]\n p = [10,1,1]\n th_r = [0, 0, -30/180*pi]\n sf = 1\n\n xd=[0,0,0]\n w = [0,0,0]\n\n x_start = 0\n x_end = 2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n xRange = [-0.2,1.8]\n yRange = [-1,1]\n zRange = [-1,1]\n\n\nelif simuCase ==2:\n xRange = [-0.7,0.3]\n yRange = [2.3,3.0]\n \n xRange = [-3,3]\n yRange = [-3,3.0]\n\n N = 10\n #x_init = np.vstack((np.linspace(-.19,-0.16,num=N),\n # np.ones(N)*2.65))\n\n x_init = np.vstack((np.linspace(-3,-1,num=N),\n np.ones(N)*0))\n \n xAttractor = np.array([0,0])\n\n obs = []\n \n obs.append(Obstacle(a=[1.1, 1],\n p=[1,1],\n x0=[0.5,1.5],\n th_r=-25*pi/180,\n sf=1.0\n ))\n \n a = [0.2,5]\n p = [1,1]\n x0 = [0.5, 5]\n th_r = -25/180*pi\n sf = 1.0\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.003, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01)\n\n \nelif simuCase ==3:\n xRange = [-0.7,0.3]\n yRange = [2.3,3.0]\n \n xRange = [-4,4]\n yRange = [-0.1,6.0]\n\n N = 20\n x_init = np.vstack((np.linspace(-4.5,4.5, num=N),\n np.ones(N)*5.5))\n \n \n xAttractor = np.array([0,0])\n\n obs = []\n obs.append(Obstacle(\n a = [1.1,1.2],\n p = [1,1],\n x0 = [-1, 1.5],\n th_r = -25/180*pi,\n sf = 1.0\n ))\n \n obs.append(Obstacle(\n a = [1.8,0.4],\n p = [1,1],\n x0 = [0, 4],\n th_r = 20/180*pi,\n sf = 1.0,\n ))\n \n obs.append(Obstacle(\n a=[1.2,0.4],\n p=[1,1],\n x0=[3,3],\n th_r=-30/180*pi,\n sf=1.0 \n ))\n\n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.02, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01)\n\nelif simuCase==4:\n\n # Moving in LAB\n xRange = [0,16]\n yRange = [0,9]\n \n #x_init = np.vstack((np.ones(N)*16,\n # np.linspace(0,9,num=N) ))b\n \n ### Create obstacle \n obs = []\n x0 = [3.5,1]\n a = [2.5,0.8]\n p = [1,1]\n th_r = -10\n sf = 1.3\n\n xd0=[0,0]\n w0 = 0\n\n x01 =x0\n x_start = 0\n x_end = 10\n obs.append(Obstacle(a=a, p=p, x0=x01,th_r=th_r, sf=sf, x_start=x_start, x_end=x_end, timeVariant=True))\n\n def func_w1(t):\n t_interval1 = [0, 2.5, 5, 7, 8, 10]\n w1 = [th_r, -20, -140, -140, -170, -170]\n \n for ii in range(len(t_interval1)-1):\n if t < t_interval1[ii+1]:\n return (w1[ii+1]-w1[ii])/(t_interval1[ii+1]-t_interval1[ii]) * pi/180\n return 0\n\n def func_xd1(t):\n t_interval1x = [0, 2.5, 5, 7, 8, 10]\n xd1 = [[x01[0], 7, 9, 9, 7, 6],\n [x01[1], 4, 5, 5, 4, -2]]\n\n for ii in range(len(t_interval1x)-1):\n if t < t_interval1x[ii+1]:\n dt = (t_interval1x[ii+1]-t_interval1x[ii])\n return [(xd1[0][ii+1]-xd1[0][ii])/dt, (xd1[1][ii+1]-xd1[1][ii])/dt]\n return 0\n\n obs[0].func_w = func_w1\n obs[0].func_xd = func_xd1\n\n x0 = [12,8]\n a = [2,1.2]\n p = [1,1]\n th_r = 0\n sf = 1.3\n\n xd0=[0,0]\n w0 = 0\n\n x_start = 0\n x_end = 10\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, x_start=x_start, x_end=x_end, timeVariant=True))\n\n def func_w2(t):\n t_interval = [0, 2., 6.5, 7, 10]\n w = [th_r, -60, -60, 30, 30]\n \n for ii in range(len(t_interval)-1):\n if t < t_interval[ii+1]:\n return (w[ii+1]-w[ii])/(t_interval[ii+1]-t_interval[ii]) * pi/180\n return 0\n\n def func_xd2(t):\n t_interval = [0, 2.0, 5, 6.5, 9, 10]\n xd = [[x0[0], 13, 13, 12, 14, 15], \n [x0[1], 6, 6, 3, -2, -3 ]]\n\n for ii in range(len(t_interval)-1):\n if t < t_interval[ii+1]:\n dt = (t_interval[ii+1]-t_interval[ii])\n return [(xd[0][ii+1]-xd[0][ii])/dt, (xd[1][ii+1]-xd[1][ii])/dt]\n return 0\n\n obs[1].func_w = func_w2\n obs[1].func_xd = func_xd2\n\n #x_init = np.array([[15.5],[0.2]])\n N = 20\n\n x_init = samplePointsAtBorder(N, xRange, yRange)\n collisions = obs_check_collision(x_init, obs)\n x_init = x_init[:,collisions[0]]\n \n attractorPos = [4,8]\n\n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.01, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01,attractorPos=attractorPos )\n \n if False: #save animation\n anim.ani.save('ani/animation_multipleObstacles_conv.mp4', dpi=100, fps=25)\n print('Saving finished.')\n \n\nelif simuCase==5:\n\n \n xRange = [-4,4]\n yRange = [-0.1,6.0]\n\n N = 10\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n print('axample at rorder')\n\n # dx = xRange[1]-xRange[0]\n # dy = yRange[1]-yRange[0]\n\n # N_x = ceil(dx/(2*(dx+dy))*N)\n # N_y = ceil(dx/(2*(dx+dy))*N)\n\n # x_init = np.vstack((np.linspace(xRange[0],xRange[1], num=N_x),\n # np.ones(N_x)*yRange[0]) )\n\n # x_init = np.hstack((x_init, \n # np.vstack((np.linspace(xRange[0],xRange[1], num=N_x),\n # np.ones(N_x)*yRange[1] )) ))\n\n # x_init = np.hstack((x_init, \n # np.vstack((np.ones(N_y)*xRange[0],\n # np.linspace(yRange[0],yRange[1], num=N_y) )) ))\n\n # x_init = np.hstack((x_init, \n # np.vstack((np.ones(N_y)*xRange[1],\n # np.linspace(yRange[0],yRange[1], num=N_y) )) ))\n #x_init = np.array( [[-2,-2,-1],\n # [2, 3, 3]])\n xAttractor = np.array([0,0])\n\n obs = []\n obs.append(Obstacle(\n a = [1.1,1.2],\n p = [1,1],\n x0 = [-1, 1.5],\n th_r = -25/180*pi,\n sf = 1\n ))\n \n obs.append(Obstacle(\n a = [1.8,0.4],\n p = [1,1],\n x0 = [0, 4],\n th_r = 20/180*pi,\n sf = 1.0,\n ))\n \n obs.append(Obstacle(\n a=[1.2,0.4],\n p=[1,1],\n x0=[3,3],\n th_r=-30/180*pi,\n sf=1.0 \n ))\n\n N = 10\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.02, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01)\n\n if True: #save animation\n anim.ani.save('ani/animation_peopleWalking.mp4', dpi=100, fps=25)\n print('Saving finished.')\n\n #dist slow = 0.18\n # anim.ani.save('ani/simue.mpeg', writer=\"ffmpeg\")\n #FFwriter = animation.FFMpegWriter()\n #anim.ani.save('ani/basic_animation.mp4', writer = FFwriter, fps=20)\n\nif simuCase==6:\n xRange = [-0.1,12]\n yRange = [-5,5]\n\n N=5\n #x_init = samplePointsAtBorder(N, xRange, yRange)\n x_init = np.vstack((np.ones((1,N))*8,\n np.linspace(-1,1,num=N),))\n\n xAttractor=[0,0]\n \n obs = []\n a=[0.3, 2.5]\n p=[1,1]\n x0=[2,0]\n th_r=-50/180*pi\n sf=1\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n # Obstacle 2\n a = [0.4,2.5]\n p = [1,1]\n #x0 = [7,2]\n x0 = [6,0]\n th_r = 50/180*pi\n sf = 1\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\nif simuCase==7:\n xAttractor = np.array([0,0])\n centr = [2, 2.5]\n\n obs = []\n N = 12\n R = 5\n th_r0 = 38/180*pi\n rCent=2.4\n for n in range(N):\n obs.append(Obstacle(\n a = [0.4,3],\n p = [1,1],\n x0 = [R*cos(2*pi/N*n), R*sin(2*pi/N*n)],\n th_r = th_r0 + 2*pi/N*n,\n sf = 1.0))\n \n obs[n].center_dyn=[obs[n].x0[0]-rCent*sin(obs[n].th_r),\n obs[n].x0[1]+rCent*cos(obs[n].th_r)]\n\n obs[n].tail_effect = True\n \n xRange = [-10,10]\n yRange = [-8,8]\n N = 20\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.02, N_simuMax=1000, convergenceMargin=0.3, sleepPeriod=0.001, RK4_int=True)\n\n\n # animationName = 'ani/animation_ring_noConvergence.mp4'\n animationName = 'ani/animation_ring_convergence.mp4'\n\nif simuCase ==8:\n xAttractor = np.array([0,0])\n centr = [2, 2.5]\n\n obs = []\n obs.append(Obstacle(\n a = [2,2],\n p = [1,1],\n x0 = [10,-7],\n th_r = 0,\n sf = 1.0,\n xd = [-5,5],\n x_start=0,\n x_end=10))\n \n xRange = [-1,10]\n yRange = [-5,5]\n N = 20\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.0, N_simuMax=800, convergenceMargin=0.3, sleepPeriod=0.01)\n\n animationName = 'ani/animation_movingCircle.mp4'\n \nif simuCase ==9:\n xAttractor = np.array([0,0])\n centr = [2, 2.5]\n\n obs = []\n obs.append(Obstacle(\n a = [0.4,3],\n p = [1,1],\n x0 = [2,0],\n th_r = 0,\n sf = 1.0,\n w = 3,\n x_start=0,\n x_end=10))\n \n xRange = [-3,7]\n yRange = [-5,5]\n N = 20\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.005, N_simuMax=800, convergenceMargin=0.3, sleepPeriod=0.01)\n\n if True: #save animation\n anim.ani.save('ani/animation_rotatingEllipse.mp4', dpi=100, fps=25)\n print('Saving finished.')\n \nif simuCase ==10:\n xAttractor = np.array([0,0])\n centr = [1.5, 3.0]\n ### Three obstacles touching -- no common center, no convergence\n obs = []\n a = [0.6,0.6]\n p = [1,1]\n x0 = [1.5, .7]\n th_r = -60/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n a = [1,0.4]\n p = [1,4]\n x0 = [3, -00.8]\n th_r= +60/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n a = [1.2,0.2]\n p = [2,2]\n x0 = [2.3,.1]\n th_r = 20/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n N = 20\n\n xRange = [-0.5,5.5]\n yRange = [-2.5,2.5]\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.005, N_simuMax=int(800/3), convergenceMargin=0.3, sleepPeriod=0.01)\n\n if True: #save animation\n anim.ani.save('ani/animation_multipleObstacles_noConv.mp4', dpi=100, fps=25)\n print('Saving finished.')\n \nif simuCase ==11:\n xAttractor = np.array([0,0])\n centr = [2.05, 2.55-dy]\n\n dy =2.5\n \n obs = []\n a = [0.6,0.6]\n p = [1,1]\n x0 = [2., 3.2-dy]\n th_r = -60/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n obs[0].center_dyn = centr\n\n a = [1,0.4]\n p = [1,3]\n x0 = [1.5, 1.6-dy]\n th_r = +60/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n obs[1].center_dyn = centr\n\n a = [1.2,0.2]\n p = [2,2]\n x0 = [3.3,2.1-dy]\n th_r = -20/180*pi\n sf = 1.2\n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n obs[1].center_dyn = centr\n\n N = 20\n\n xRange = [-0.5,5.5]\n yRange = [-2.5,2.5]\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.005, N_simuMax=600, convergenceMargin=0.3, sleepPeriod=0.01)\n\n if False: #save animation\n anim.ani.save('ani/animation_multipleObstacles_conv.mp4', dpi=100, fps=25)\n print('Saving finished.')\n\n\nif simuCase==12:\n N = 10\n ### Create obstacle \n obs = []\n a = [5, 2] \n p = [1, 1]\n x0 = [10.0, 0]\n th_r = 30/180*pi\n sf = 1.\n\n #xd=[0, 0]\n w = 3\n x_start = 0\n x_end = 2\n\n a = [3,0.8]\n p = [1,1]\n x0 = [3,0]\n th_r = 100/180*pi\n sf = 1.\n\n xd=[0., 0]\n w = 10\n x_start = 0\n x_end = 10\n \n obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf, xd=xd, x_start=x_start, x_end=x_end, w=w))\n #obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n \n xRange = [ -1,8]\n yRange = [-4,4]\n x_init = np.vstack((np.ones(N)*xRange[1],\n np.linspace(yRange[0],yRange[1],num=N) ))\n #x_init = np.array([[12],[1]])\n\n #obs.append(Obstacle(a=a, p=p, x0=x0,th_r=th_r, sf=sf))\n\n attractorPos = [0,0]\n\n # anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.001, N_simuMax=2080, convergenceMargin=0.3, sleepPeriod=0.01,attractorPos=attractorPos, dynamicalSystem=nonlinear_stable_DS, nonlinear=True)\n\n # anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.01, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01)\n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.001, N_simuMax=2080, convergenceMargin=0.3, sleepPeriod=0.01,attractorPos=attractorPos, dynamicalSystem=linearAttractor, nonlinear=True)\n\n #anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.001, N_simuMax=1040, convergenceMargin=0.3, sleepPeriod=0.01)\n\nif simuCase == 13:\n # Parallel ellipses; flow going through\n xAttractor = np.array([0,0])\n\n th_r0 = 38/180*pi\n obs = []\n obs.append(Obstacle(\n a = [4,0.4],\n p = [1,1],\n x0 = [0, 2],\n th_r = 30/180*pi,\n sf = 1.0))\n\n n = 0\n rCent = 3\n # obs[n].center_dyn=[obs[n].x0[0], \n # obs[n].x0[1]]\n obs[n].center_dyn=[obs[n].x0[0]-rCent*np.cos(obs[n].th_r),\n obs[n].x0[1]-rCent*np.sin(obs[n].th_r)]\n\n # obs.append(Obstacle(\n # a = [4,0.4],\n # p = [1,1],\n # x0 = [0, 4],\n # th_r = 30*180/pi,\n # sf = 1.0))\n \n # n = 1\n # obs[n].center_dyn=[obs[n].x0[0]-rCent*np.cos(obs[n].th_r),\n # obs[n].x0[1]-rCent*np.sin(obs[n].th_r)]\n \n xRange = [-5,5]\n yRange = [-1,7]\n N = 20\n \n x_init = samplePointsAtBorder(N, xRange, yRange)\n \n anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, dt=0.02, N_simuMax=1000, convergenceMargin=0.3, sleepPeriod=0.001, RK4_int=True)\n\n\n animationName = 'ani/avoiding_ellipse.mp4'\n\n\n\n# saveFigure\nif True:\n# if saveFigure:\n if type(animationName)==int:\n anim.ani.save('ani/test.mp4', dpi=100,fps=50)\n else:\n anim.ani.save(animationName, dpi=100,fps=50)\n print('Saving finished.')\n plt.close('all')\nelse:\n anim.show()\n print('Animation')\n\n#if __name__ == '__main__':\n#if True:\n #anim = Animated(x_init, obs, xRange=xRange, yRange=yRange, zRange=zRange, dt=0.005, N_simuMax=200000, convergenceMargin=0.3, sleepPeriod=0.01, )\n #\nprint('\\n\\n---- Script finished ---- \\n\\n')\n","sub_path":"dynamicSimulation.py","file_name":"dynamicSimulation.py","file_ext":"py","file_size_in_byte":32961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"350225023","text":"import cv2\nfrom os import path\n\nimport maproom.constants as c\n\ndef markerPath(fname):\n return path.join('markers', fname)\n\nprint('wrote image with dims', c.charucoImgDims, 'maps to size', (c.charucoSqSizeM * c.charucoNSqHoriz, c.charucoSqSizeM * c.charucoNSqVert), 'meters')\nprint('or', (c.charucoSqSizeM * c.charucoNSqHoriz / c.inToM, c.charucoSqSizeM * c.charucoNSqVert / c.inToM), 'inches')\n\ncharucoImg = c.charucoBoard.draw(c.charucoImgDims)\ncv2.imwrite(markerPath('charuco-calibration.png'), charucoImg)\n\npixelSize = c.markerSizeIn * c.imgPPI\n\nrobot01 = cv2.aruco.drawMarker(c.markerDictionary, 23, pixelSize)\ncv2.imwrite(markerPath('robot01.png'), robot01)\n\nrobot02 = cv2.aruco.drawMarker(c.markerDictionary, 24, pixelSize)\ncv2.imwrite(markerPath('robot02.png'), robot02)\n\nrobot03 = cv2.aruco.drawMarker(c.markerDictionary, 25, pixelSize)\ncv2.imwrite(markerPath('robot03.png'), robot03)\n\nrobot04 = cv2.aruco.drawMarker(c.markerDictionary, 26, pixelSize)\ncv2.imwrite(markerPath('robot04.png'), robot04)\n\nrobot05 = cv2.aruco.drawMarker(c.markerDictionary, 27, pixelSize)\ncv2.imwrite(markerPath('robot05.png'), robot05)\n\nrobot06 = cv2.aruco.drawMarker(c.markerDictionary, 28, pixelSize)\ncv2.imwrite(markerPath('robot06.png'), robot06)\n\nrobot07 = cv2.aruco.drawMarker(c.markerDictionary, 29, pixelSize)\ncv2.imwrite(markerPath('robot07.png'), robot07)\n\nrobot08 = cv2.aruco.drawMarker(c.markerDictionary, 30, pixelSize)\ncv2.imwrite(markerPath('robot08.png'), robot08)\n","sub_path":"skycam/generate_markers.py","file_name":"generate_markers.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"54729555","text":"import flask\nfrom flask import request, jsonify\nimport sqlite3\nimport json\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return '''

Distant Reading Archive

A prototype API for distant reading of science fiction novels.

'''\n\n#Users\n#Getting all users\n@app.route('/bookmarking/users', methods=['GET'])\ndef api_user_all():\n users_table = ['user_id', 'user_name']\n sql = 'SELECT * FROM Users ORDER BY user_id ASC;'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n res = cur.execute(sql).fetchall()\n users = [dict(zip(users_table, r)) for r in res]\n return jsonify({'count':len(res), 'users':users}), 200\n\n#Adding one or more new user(s)\n@app.route('/bookmarking', methods=['POST'])\ndef api_user_one():\n users_table = ['user_id', 'user_name']\n try:\n data = request.get_json(silent=True)\n user_ids = data['user_id']\n user_names = data['user_name']\n # 500 error\n except Exception as e:\n return jsonify({\"reasons\":[{\"message\":\"Internal Server Error\"}]}), 500\n sql = 'INSERT INTO Users(user_id, user_name) values(?, ?);'\n try:\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n # if input data is more than one\n if str(type(user_ids)) == \"\":\n res = []\n already_exists = []\n for idx in range(len(user_ids)):\n user_id = user_ids[idx]\n user_name = user_names[idx]\n try:\n cur.execute(sql, (user_id, user_name,))\n conn.commit()\n res.append(dict(zip(users_table, [user_id, user_name])))\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"User already exists\"}]}), 400\n # else input data is one\n else:\n user_id = user_ids\n user_name = user_names\n cur.execute(sql, (user_id, user_name,))\n conn.commit()\n res = dict(zip(users_table, [user_id, user_name]))\n return jsonify(res), 201\n # 400 error\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"User already exists\"}]}), 400\n\n#Deleting a user\n@app.route('/bookmarking/', methods=['DELETE'])\ndef api_user_del(user_id):\n sql = 'DELETE FROM Users WHERE user_id=?;'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n res = cur.execute('SELECT * FROM Users WHERE user_id=?;', (user_id,)).fetchall()\n if res:\n cur.execute(sql, (user_id,))\n conn.commit()\n return '', 204\n else:\n return jsonify({\"reasons\":[{\"message\":\"User does not exists\"}]}), 404\n\n#Bookmarks\n#Getting all bookmarks\n@app.route('/bookmarking/bookmarks', methods=['GET'])\ndef api_bookmark_all():\n bookmarks_table = ['url', 'tags', 'text', 'user_id']\n sql = 'SELECT * FROM Bookmarks ORDER BY user_id ASC;'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n res = cur.execute(sql).fetchall()\n bookmarks = [dict(zip(bookmarks_table, r)) for r in res]\n return jsonify({'count':len(res), 'bookmarks':bookmarks}), 200\n\n#Getting all bookmarks for a certain user\n@app.route('/bookmarking/bookmarks/', methods=['GET'])\ndef api_bookmark_certain(user_id):\n bookmarks_table = ['url', 'tags', 'text', 'user_id']\n sql = 'SELECT * FROM Bookmarks WHERE user_id=? ORDER BY url ASC;'\n try:\n # sql을 이용할 경우\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n res = cur.execute(sql, (user_id,)).fetchall()\n conn.commit()\n bookmarks = [dict(zip(bookmarks_table, r)) for r in res]\n return jsonify({'count':len(res), 'bookmarks':bookmarks}), 200\n # 404 error\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"The user does not exist\"}]}), 404\n\n#Getting target bookmarks for a certain user\n@app.route('/bookmarking/bookmarks//', methods=['GET'])\ndef api_bookmark_target(user_id, url):\n bookmarks_table = ['url', 'tags', 'text', 'user_id']\n sql = 'SELECT * FROM Bookmarks WHERE user_id=? AND url=?;'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n res = cur.execute(sql, (user_id, url,)).fetchall()\n conn.commit()\n if not res:\n return jsonify({\"reasons\":[{\"message\":\"The user does not exist\"}]}), 404\n else:\n bookmarks = [dict(zip(bookmarks_table, r)) for r in res]\n return jsonify({'count':len(res), 'bookmarks':bookmarks}), 200\n\n#Adding one or more bookmark(s) for a user\n@app.route('/bookmarking//bookmarks', methods=['POST'])\ndef api_bookmark_add(user_id):\n bookmarks_table = ['url', 'tags', 'text', 'user_id']\n data = request.get_json(silent=True)\n # url & user_id is essential / tags & text is not essential\n if not user_id:\n return jsonify({\"reasons\": [{\"message\": \"User does not exist\"}]}), 404\n try:\n urls = data['url']\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"Url does not exist\"}]}), 500\n try:\n tagss = data['tags']\n except Exception as e:\n print(e)\n tagss = [[] for i in range(len(urls))]\n try:\n texts = data['text']\n except Exception as e:\n print(e)\n text = [[] for i in range(len(urls))]\n try:\n sql = 'INSERT INTO Bookmarks(url, tags, text, user_id) VALUES(?, ?, ?, ?);'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n # if input data is more than one\n if str(type(urls)) == \"\":\n res = []\n already_exists = []\n for idx in range(len(urls)):\n url = urls[idx]\n tags = tagss[idx]\n text = texts[idx]\n try:\n cur.execute(sql, (url, tags, text, user_id,))\n conn.commit()\n res.append([url, tags, text, user_id])\n except Exception as e:\n print(e)\n return jsonify({\"reasons\": [{\"message\": \"User and url does already exist\"}]}), 400\n # else input data is one\n else:\n url = urls\n tags = tagss\n text = texts\n try:\n cur.execute(sql, (url, tags, text, user_id,))\n conn.commit()\n except Exception as e:\n print(e)\n return jsonify({\"reasons\": [{\"message\": \"User and url does already exist\"}]}), 400\n # test\n sql = 'SELECT * FROM Bookmarks WHERE user_id=? AND url=?;'\n res = cur.execute(sql, (user_id, url,)).fetchall()\n conn.commit()\n bookmarks = [dict(zip(bookmarks_table, r)) for r in res]\n return jsonify({'count':len(res), 'bookmarks':bookmarks}), 201\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"The user or url does not exist\"}]}), 404\n\n#Updating the title/tag(s) for a bookmarks for a target user\n@app.route('/bookmarking//bookmarks/', methods=['PUT'])\ndef api_bookmark_update_delete(user_id, url):\n bookmarks_table = ['url', 'tags', 'text', 'user_id']\n data = request.get_json(silent=True)\n # url & user_id is essential / tags & text is not essential\n if not user_id or not url:\n return jsonify({\"reasons\":[{\"message\":\"Request is incorrect\"}]}), 500\n try:\n tags = data['tags']\n except Exception as e:\n print(e)\n tags = ''\n try:\n text = data['text']\n except Exception as e:\n print(e)\n text = ''\n try:\n sql = 'UPDATE Bookmarks SET tags=?, text=? WHERE url=? AND user_id=?;'\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n cur.execute(sql, (tags, text, url, user_id,))\n conn.commit()\n # test\n sql = 'SELECT * FROM Bookmarks WHERE user_id=? AND url=?;'\n res = cur.execute(sql, (user_id, url,)).fetchall()\n conn.commit()\n bookmarks = [dict(zip(bookmarks_table, r)) for r in res]\n return jsonify({'count':len(res), 'bookmarks':bookmarks}), 201\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"The user or url does not exist\"}]}), 404\n\n#Deleting a bookmark for a target user\n@app.route('/bookmarking//bookmarks/', methods=['DELETE'])\ndef api_bookmark_del(user_id, url):\n try:\n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n sql = 'SELECT * FROM Bookmarks WHERE user_id=? AND url=?;'\n res = cur.execute(sql, (user_id, url,)).fetchall()\n conn.commit()\n if not res:\n return jsonify({\"reasons\": [{\"message\": \"The user or bookmark does not exist\"}]}), 404\n else:\n sql = 'DELETE FROM Bookmarks WHERE user_id=? AND url=?;'\n cur.execute(sql, (user_id, url,))\n conn.commit()\n return '', 204\n except Exception as e:\n print(e)\n return jsonify({\"reasons\":[{\"message\":\"Request in incorrect\"}]}), 500\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return \"

404

The resource could not be found.

\", 404\n\n@app.route('/bookmarking', methods=['GET'])\ndef api_filter():\n query_parameters = request.args\n url = query_parameters.get('url')\n tags = query_parameters.get('tags')\n text = query_parameters.get('text')\n user_id = query_parameters.get('user_id')\n\n query = \"SELECT * FROM bookmarks WHERE\"\n to_filter = []\n\n if url:\n query += ' url=? AND'\n to_filter.append(url)\n if tags:\n query += ' tags=? AND'\n to_filter.append(tags)\n if text:\n query += ' text=? AND'\n to_filter.append(text)\n if user_id:\n query += ' user_id=? AND'\n to_filter.append(user_id)\n if not (url or tags or text or user_id):\n return page_not_found(404)\n\n query = query[:-4] + ';'\n \n conn = sqlite3.connect('books.db')\n cur = conn.cursor()\n results = cur.execute(query, to_filter).fetchall()\n \n return jsonify(results)\n\napp.run()","sub_path":"os_unknown2/api_final.py","file_name":"api_final.py","file_ext":"py","file_size_in_byte":10213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"529225784","text":"class Solution:\n def findAnagrams(self, s: str, p: str) -> List[int]:\n #Time - O(n) ; n is length of s\n #Space - O(1) #as the number of alphabets are 26\n if len(p) > len(s) : #if length of s is less than p, return empty list.\n return []\n output = []\n hash_p = collections.Counter(p) # counter hashmap of characters of p\n hash_s = collections.Counter(s[:len(p)]) #counter hashmap of characters of s till length of p.\n #From here, we use sliding window technique.\n if hash_p == hash_s:\n output.append(0)\n for i in range(len(p), len(s)):\n #remove the first element and add the next element in s.\n if hash_s[s[i - len(p)]] > 1:\n hash_s[s[i - len(p)]] -= 1\n else:\n del(hash_s[s[i - len(p)]])\n if s[i] in hash_s:\n hash_s[s[i]] += 1\n else:\n hash_s[s[i]] = 1\n if hash_p == hash_s: #compare if both hashmaps are same, and append the start index if True\n output.append(i - len(p) + 1)\n return output","sub_path":"Week2/AllAnagramsInString.py","file_name":"AllAnagramsInString.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"7358195","text":"import time\nfrom lib import zabbix2canopsis\nfrom lib import event2amqp\n\n\nif __name__ == \"__main__\":\n while True:\n ZabixApi = zabbix2canopsis.ZabbixApi(configfile=\"zabbix.cfg\")\n CanoAmqp = event2amqp.EventCanopsis(configfile=\"zabbix.cfg\")\n eventlist = ZabixApi()\n for event in eventlist:\n print(event)\n CanoAmqp(event)\n time.sleep(45)\n print('Check beeing processed !')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"570522286","text":"# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\nimport matplotlib as mpl\n# mpl.use('TkAgg') # バックエンドでエラーが出る人用。Linuxでは多分要らない。\nimport json\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.dates import date2num\nfrom mpl_finance import candlestick_ohlc\nimport pandas as pd\nimport numpy as np\n\n\ndef make_chart(currency_pair, data_path):\n \"\"\"\n parameters: currency_pair(str):通貨ペア\n data_path(path):保存場所\n \"\"\"\n with open(data_path, 'r') as fp:\n data_freq = 10 # 1分のデータをまとめて何分足のチャートにするか。単位は分。\n data = json.load(fp)\n le = len(data)\n idx = pd.date_range(data[0][0], data[le - 1][0], freq='1min')\n data = [data[i][1:] for i in range(len(data))]\n data2 = []\n for i in range(len(data)):\n data2.append(float(data[i][0]))\n data = np.array(data2) \n\n if len(data) != len(idx):\n with open('error_log.json', 'w') as file:\n file.write('error: data missing or overlapped')\n file.close()\n\n df = pd.Series(data, index=idx).resample('{0}min'.format(data_freq)).ohlc()\n df.plot()\n fig = plt.figure()\n ax = plt.subplot()\n\n xdate = date2num([x for x in df.index]) # Timestamp -> datetime\n ohlc = np.vstack((xdate, df.values.T)).T # datetime -> float\n\n candlestick_ohlc(ax, ohlc, width=1/24/60*data_freq, colorup='g', colordown='r', alpha=.4)\n\n ax.grid() # グリッド表示\n plt.ylabel(\"Price\")\n ax.set_xlim(df.index[0], df.index[-1]) # x軸の範囲\n fig.autofmt_xdate() # x軸のオートフォーマット\n plt.xlabel(\"Date\")\n plt.title(currency_pair)\n plt.savefig('chart.png') # チャートはpngとして保存する。\n","sub_path":"make_chart.py","file_name":"make_chart.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"371865278","text":"Height=input(\"Height: \")\r\n\r\n#Function to check, if the input is in range?\r\ndef check_input_range(height):\r\n\tif height<1 or height >8:\r\n\t\treturn False\r\n\telse:\r\n\t\tprint(height)\r\n\t\treturn True\r\n\r\n#Function to check, if the input is int?\r\ndef check_input_type(height):\r\n\ttry:\r\n\t\tHeight=int(height)\r\n\t\treturn True\r\n\texcept ValueError:\r\n\t\treturn False\r\n\t\t\r\n\r\n#Loop Checking\r\nwhile True:\r\n\tif check_input_type(Height):\r\n\t\tbreak\r\n\telse:\r\n\t\tHeight=input(\"Height: \")\r\n\r\n\r\n\r\nwhile True:\r\n\tif check_input_range(int(Height)):\r\n\t\tbreak\r\n\telse:\r\n\t\tHeight=input(\"Height: \")\r\n\r\n\r\n#Building The Pyramid\r\nHeight=int(Height)\r\nfor i in range(Height+1):\r\n\ttemp=Height-i\r\n\tfor j in range(temp):\r\n\t\tprint(\" \", end=\"\") #for printing same line in python\r\n\tfor j in range(i):\r\n\t\tprint(\"#\", end=\"\")\r\n\tprint(\"\\n\")\t\r\n\t\r\n","sub_path":"mario_less.py","file_name":"mario_less.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"361191155","text":"from packs.multiscale.preprocess.dual_primal.create_dual_and_primal_mesh import MultilevelData\n\nimport pdb\nfrom packs.directories import data_loaded\nfrom run_compositional import run_simulation\nimport time\n\n\"\"\" ---------------- LOAD STOP CRITERIA AND MESH DATA ---------------------- \"\"\"\n\nname_current = 'current_compositional_results_'\nname_all = data_loaded['name_save_file'] + '_'\nmesh = 'mesh/' + data_loaded['mesh_name']\n\nif data_loaded['use_vpi']:\n stop_criteria = max(data_loaded['compositional_data']['vpis_para_gravar_vtk'])\nelse: stop_criteria = data_loaded['compositional_data']['maximum_time']\n\nloop_max = 1000\nrun_criteria = 0\nloop = 0\n\"\"\" ----------------------------- RUN CODE --------------------------------- \"\"\"\n\nload = data_loaded['load_data']\nconvert = data_loaded['convert_english_to_SI']\n\nt = time.time()\nsim = run_simulation(name_current, name_all)\nM, data_impress, wells, fprop, load = sim.initialize(load, convert, mesh)\n\nmultilevel_structure = MultilevelData(data_impress, M)\n\nimport pdb; pdb.set_trace()","sub_path":"adm_impec-00/packs/tests/test_compositional_1.py","file_name":"test_compositional_1.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"552071707","text":"cities = eval(open('cities15000-smaller.txt',encoding=\"utf-8\").read())\nimport geohash_copied as gh\n\n\nct = [(cit1, cit2, latitude, longitude, country, cntr, pop, gh.encode(float(latitude), float(longitude))) for cit1, cit2, latitude, longitude, country, cntr, pop in cities[:5000]]\n\n\ndef sizeof_fmt(num, suffix=''):\n for unit in ['','K','M','G','T','P','E','Z']:\n if abs(num) < 1024.0:\n # return \"%3.1f%s%s\" % (num, unit, suffix)\n return \"%3.2f %s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, 'Y', suffix)\n\nct.sort(key=lambda i:i[7])\nfrom pprint import pformat\n\nct_quick = [(latitude, longitude, gh[:4], country, cit1, sizeof_fmt(int(pop))) for cit1, cit2, latitude, longitude, country, cntr, pop, gh in ct]\nf = open('cities-1000-geohash-smallerl.nogit.txt', 'w', encoding=\"utf-8\")\n\nppf = pformat(ct_quick, width=200, indent = 4)\nf.write(ppf)\n\n# for cit1, cit2, latitude, longitude, country, cntr, pop, gh in ct:\n# print(gh[:4], country, cit1, sizeof_fmt(int(pop)))","sub_path":"_projlab/spatialite-geohash/geonames-geohash.py","file_name":"geonames-geohash.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"491735447","text":"\nfrom habitat_baselines.config.default import get_config\nfrom habitat_baselines.common.pepper_env import PepperRLExplorationEnv\nimport cv2\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nrgb_buffer = []\ndepth_buffer = []\nforward_step = 0.25\nturn_step = 0.1\n\ncfg = get_config()\nprint(cfg)\npepper_env = PepperRLExplorationEnv(cfg)\npepper_env.reset()\n\nkey = 0\nc_action = np.random.choice(3, 1, p=[0.8, 0.1, 0.1])[0]\ndefault_rot = np.random.choice(3, 1, p=[0, 0.5, 0.5])[0]\n\nvalues = []\nforward_enabled = True\nuser_forward_enabled = True\nnum_forward = 0\n\nactions = []\nstep = 0\nobservations, reward, done, info = \\\n pepper_env.reset()\nlast_pose = pepper_env.get_position()[0]\n\nx_p = []\ny_p = []\n\nx_o = []\ny_o = []\n\n\nplt.ion()\nplt.show()\n\nwhile key != ord('q'):\n step += 1\n last_action = c_action\n observations, reward, done, info = \\\n pepper_env.step(None, action={\"action\": c_action})\n pose = observations['robot_position']\n rot = observations['robot_rotation']\n sonar = observations['sonar']\n odom = observations['odom']\n\n\n gps_to_goal = observations['gps_with_pointgoal_compass']\n movement = np.linalg.norm(pose - last_pose)\n\n print(\"-\" * 100)\n print(\"Pose:\", pose)\n print(\"Odom:\", odom)\n print(\"Sonar:\", sonar)\n print(\"Movement:\", movement)\n print(\"STEP:\", step)\n\n x_p.append(pose[0])\n y_p.append(pose[1])\n x_o.append(odom[0][0])\n y_o.append(odom[0][1])\n\n plt.clf()\n plt.plot(x_p, y_p)\n plt.plot(x_o, y_o, \"--\")\n plt.pause(0.01)\n\n rgb = observations['rgb']\n depth = observations['depth']\n\n cv2.imshow(\"RGB\", rgb)\n cv2.imshow(\"Depth\", depth)\n key = cv2.waitKey(500)\n\n if sonar < 0.9 or (last_action == 0 and movement < 0.15):\n forward_enabled = False\n print(\"Disabled forward\")\n else:\n forward_enabled = True\n print(\"Enabled forward\")\n\n if forward_enabled and user_forward_enabled:\n c_action = np.random.choice(3, 1, p=[0.8, 0.1, 0.1])[0]\n else:\n print(\"Forward is disabled\")\n c_action = default_rot\n print(\"Running with default rotation\")\n\n if c_action == 0:\n num_forward += 1\n else:\n num_forward = 0\n\n if num_forward == 3:\n print(\"Changing default rotation\")\n default_rot = np.random.choice(3, 1, p=[0, 0.5, 0.5])[0]\n num_forward = 0\n\n if key == ord('w'):\n user_forward_enabled = not user_forward_enabled\n #elif key == ord('a'):\n # c_action = 1\n #elif key == ord('d'):\n # c_action = 2\n\n values.append({\n \"rgb\": rgb,\n \"depth\": depth,\n \"odom_pose\": odom[0],\n \"odom_rot\": odom[1],\n \"position\": pose,\n \"rotation\": rot,\n \"action\": last_action,\n \"gps_to_goal_compass\": gps_to_goal,\n \"sonar\": sonar\n })\n last_pose = pose\n if step == 250:\n break\n\nimport datetime\nnow = datetime.datetime.now()\ndt_string = now.strftime(\"%d.%m.%Y %H:%M:%S\")\npickle.dump(values, open(dt_string + \"pepper_save.p\", \"wb\"))\npepper_env.close()\n\n","sub_path":"aimas/pepper_save.py","file_name":"pepper_save.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"469224531","text":"#!/usr/bin/env python\n\nimport argparse\nfrom array import array\n\nimport elias\n\ndef main():\n parser = argparse.ArgumentParser(description='Compress data')\n parser.add_argument(\n '-o', metavar='FILE', required=True,\n type=argparse.FileType('wb'),\n help='File to save compressed data to'\n )\n parser.add_argument(\n 'input', metavar='INPUT_FILE', type=argparse.FileType('rb'),\n help='File to compress'\n )\n\n args = parser.parse_args()\n\n raw_data = array('B', args.input.read())\n\n filetype = args.o.name.split('.')[-1]\n if filetype == 'eliasd':\n encoded = elias.compress(raw_data, elias.elias_delta_encode)\n elif filetype == 'unary':\n encoded = elias.compress(raw_data, elias.unary_encode)\n else:\n parser.error('Unknown file type: %s' % filetype)\n\n args.o.write(encoded)\n\nif __name__ == '__main__':\n main()\n","sub_path":"ffcrunch/compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"259998940","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport os\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\npath = os.getcwd()\npath\n\n\n# In[3]:\n\n\ntest = pd.read_csv(path + '/data/processed_date_data/test.csv')\nprint(test.shape) # (178028, 49)\ntest1 = test[(test['smart_5raw'] > 0) | \n (test['smart_187raw'] > 0) | \n (test['smart_188raw'] > 0) | \n (test['smart_197raw'] > 0) | \n (test['smart_198raw'] > 0)] # 筛选raw:5,187,188,197,198任一大于0\nprint(test1.shape) # (33111, 49)\ntg = test1.groupby([\"manufacturer\", \"model\", \"serial_number\"])['dt'].max().reset_index()\nprint(tg.shape) # 2213\ntg.head()\n\n\n# In[6]:\n\n\ntest = pd.read_csv(path + '/data/processed_date_data/test.csv')\nprint(test.shape) # (178028, 49)\ntest1 = test[(test['smart_5_normalized'] < 100) | \n (test['smart_187_normalized'] < 100) | \n (test['smart_188_normalized'] < 100) | \n (test['smart_197_normalized'] < 100) | \n (test['smart_198_normalized'] < 100)] # 筛选raw:5,187,188,197,198任一大于0\nprint(test1.shape) # \ntg = test1.groupby([\"manufacturer\", \"model\", \"serial_number\"])['dt'].max().reset_index()\nprint(tg.shape) # \ntg.head()\n\n\n# In[7]:\n\n\n# 把最终结果里的dt转换成test里最晚出现的时间\ntest = pd.read_csv(path + '/data/processed_date_data/test.csv')\ntd = test[test['model']==1]\ntd = td[['serial_number','dt']].groupby(['serial_number'])['dt'].max().reset_index()\nprint(td.shape)\ntr = tg.merge(td, how='left', on='serial_number')\nprint(tr.shape)\ntr['dt'] = tr['dt_y']\ndel tr['dt_x']\ndel tr['dt_y']\ntr.drop_duplicates(inplace=True)\ntr.reset_index(drop=True, inplace=True)\ntr.to_csv(path + '/result/result_rules_max_test141.csv', index=False, header=None)\nprint(tr.shape)\ntr.head()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"code/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"94137600","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 4 14:20:16 2021\n\n@author: pc\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsRegressor\nimport matplotlib.pyplot as plt\n\n# Initialize data of lists\ndata = {'x1':[0, 0.4, 0.7, 0.5, 0.5, 0.6, 0.3, 0.1, 0.8, 0.8]\n ,'x2':[0.6, 0.4, 0.8, 0.2, 0.8, 0, 0.2, 0.6, 0.8, 0]\n ,'y':[-0.6, -0.6, 0.6, 1.8, 1.2, 1.2, 1.4, 0.6, 1.8, 1.6]}\n\n# using dictionary to convert specific columns\nconvert_dict = {'x1': float,\n 'x2': float,\n 'y': float\n }\n\n# create data\ndf = pd.DataFrame(data)\ndf = df.astype(convert_dict)\nprint(df.dtypes)\n\n#Specify the data\nx = df[['x1', 'x2']]\ny = df['y']\n\n# Find optimal number of neighbours\nresult = pd.DataFrame()\nmax_neighbors = x.shape[0]\n\nfor k in range(max_neighbors):\n kNNSpec = KNeighborsRegressor(n_neighbors = (k+1), metric = 'chebyshev')\n nbrs = kNNSpec.fit(x, y)\n pred_y = nbrs.predict(x)\n error_y = y - pred_y\n sse_y = np.sum(np.absolute(error_y))\n result = result.append([[(k+1), sse_y]], ignore_index = True)\n \nresult = result.rename(columns = {0: 'Number of Neighbors', 1: 'Sum of Squared Error'})\n\nplt.scatter(result['Number of Neighbors'], result['Sum of Squared Error'])\nplt.xlabel('Number of Neighbors')\nplt.ylabel('Sum of Squared Error')\nplt.xticks(np.arange(1,max_neighbors+1,1))\nplt.grid(axis = 'both')\nplt.show()\n\nsuggested_neighbor = result.nsmallest(2, 'Sum of Squared Error').tail(1).reset_index(drop=True).loc[0]['Number of Neighbors']\n\nprint(f'The number of neighbors that yields the smallest criterion is k = {suggested_neighbor}')","sub_path":"Final Exam/My Try/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"373201922","text":"# 入力された文の一定以上の文字数の単語を並び変えるやつ\r\nimport random\r\n\r\ndef shuffle(str):\r\n result = []\r\n for word in str.split():\r\n if len(word) > 4: # 長さが4超であればシャッフル\r\n word = word[:1] + ''.join(random.sample(word[1:-1], len(word) - 2)) + word[-1:]\r\n result.append(word)\r\n\r\n return ' '.join(result)\r\n\r\nstr = input('文章を入力: ')\r\nstr = shuffle(str)\r\n\r\nprint(str)","sub_path":"09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"545152959","text":"import numpy as np\nimport astropy.io.fits as pyfits\n#from surveysim.utils import angsep\nfrom surveysim.exposurecalc import airMassCalculator\nfrom surveysim.avoidobject import avoidObject, moonLoc\nfrom surveysim.utils import mjd2lst\nfrom surveysim.observefield import setup_time\nfrom datetime import datetime\nfrom astropy.time import Time\nfrom desitarget.targetmask import obsconditions as obsbits\n\nMAX_AIRMASS = 10.0 #3.0 This new bound effectively does nothing.\nMIN_MOON_SEP = 90.0\nMIN_MOON_SEP_BGS = 5.0\n\ndef nextFieldSelector(obsplan, mjd, conditions, tilesObserved, slew, previous_ra, previous_dec):\n \"\"\"\n Returns the first tile for which the current time falls inside\n its assigned LST window and is far enough from the Moon and\n planets.\n\n Args:\n obsplan: string, FITS file containing the afternoon plan\n mjd: float, current time\n conditions: dictionnary containing the weather info\n tilesObserved: list containing the tileID of all completed tiles\n slew: bool, True if a slew time needs to be taken into account\n previous_ra: float, ra of the previous observed tile (degrees)\n previous_dec: float, dec of the previous observed tile (degrees)\n\n Returns:\n target: dictionnary containing the following keys:\n 'tileID', 'RA', 'DEC', 'Program', 'Ebmv', 'maxLen',\n 'MoonFrac', 'MoonDist', 'MoonAlt', 'DESsn2', 'Status',\n 'Exposure', 'obsSN2', 'obsConds'\n overhead: float (seconds)\n \"\"\"\n\n hdulist = pyfits.open(obsplan)\n tiledata = hdulist[1].data\n moonfrac = hdulist[0].header['MOONFRAC']\n tileID = tiledata['TILEID']\n tmin = tiledata['LSTMIN']\n tmax = tiledata['LSTMAX']\n explen = tiledata['MAXEXPLEN']/240.0\n ra = tiledata['RA']\n dec = tiledata['DEC']\n program = tiledata['PROGRAM']\n obsconds = tiledata['OBSCONDITIONS']\n\n lst = mjd2lst(mjd)\n dt = Time(mjd, format='mjd')\n found = False\n for i in range(len(tileID)):\n dra = np.abs(ra[i]-previous_ra)\n if dra > 180.0:\n dra = 360.0 - dra\n ddec = np.abs(dec[i]-previous_dec)\n overhead = setup_time(slew, dra, ddec)\n t1 = tmin[i] + overhead/240.0\n t2 = tmax[i] - explen[i]\n\n if ( ((t1 <= t2) and (lst > t1 and lst < t2)) or ( (t2 < t1) and ((lst > t1 and t1 <=360.0) or (lst >= 0.0 and lst < t2))) ):\n if (avoidObject(dt.datetime, ra[i], dec[i]) and airMassCalculator(ra[i], dec[i], lst) < MAX_AIRMASS):\n moondist, moonalt, moonaz = moonLoc(dt.datetime, ra[i], dec[i])\n if ( (len(tilesObserved) > 0 and tileID[i] not in tilesObserved['TILEID']) or len(tilesObserved) == 0 ):\n if (( (moonalt < 0.0 and (obsconds[i] & obsbits.mask('DARK')) != 0) ) or\n (moonalt >=0.0 and\n (( (moonfrac < 0.2 or (moonalt*moonfrac < 12.0)) and moondist > MIN_MOON_SEP and (obsconds[i] & obsbits.mask('GRAY')) != 0 ) or\n ( (obsconds[i] & obsbits.mask('BRIGHT')) != 0 and moondist > MIN_MOON_SEP_BGS) ))):\n found = True\n break\n\n if found == True:\n tileID = tiledata['TILEID'][i]\n RA = ra[i]\n DEC = dec[i]\n Ebmv = tiledata['EBV_MED'][i]\n maxLen = tiledata['MAXEXPLEN'][i]\n DESsn2 = 100.0 # Some made-up number -> has to be the same as the reference in exposurecalc.py\n status = tiledata['STATUS'][i]\n exposure = -1.0 # Updated after observation\n obsSN2 = -1.0 # Idem\n target = {'tileID' : tileID, 'RA' : RA, 'DEC' : DEC, 'Program': program[i], 'Ebmv' : Ebmv, 'maxLen': maxLen,\n 'MoonFrac': moonfrac, 'MoonDist': moondist, 'MoonAlt': moonalt, 'DESsn2': DESsn2, 'Status': status,\n 'Exposure': exposure, 'obsSN2': obsSN2, 'obsConds': obsconds[i]}\n else:\n target = None\n return target, overhead\n\n","sub_path":"py/surveysim/nextobservation.py","file_name":"nextobservation.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"118677844","text":"#!/usr/bin/env python3\n#\n# Copyright AlertAvert.com (c) 2017. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\nfrom copy import deepcopy\nfrom flask import (\n Flask,\n make_response,\n redirect,\n render_template,\n request,\n url_for,\n)\nimport json\nimport os\nfrom werkzeug.utils import secure_filename\n\nfrom elasticsearch_connector import ElasticsearchConnector\n\n\nDOCTYPE = 'plants'\nINDEX_NAME = 'cfgreendesign'\nUPLOAD_FOLDER = '/tmp'\n\n\ndef load_template():\n with open('templates/query.json') as template:\n return json.load(template)\n\n\nTEMPLATE = load_template()\nHEADERS = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n}\nALLOWED_EXTENSIONS = {'xls', 'xlsx'}\n\nMAPPING = [\n {\"key\": \"botanical_name\", \"caption\": \"Botanical\", \"width\": \"10%\"},\n {\"key\": \"common_name\", \"caption\": \"Name\", \"width\": \"10%\"},\n {\"key\": \"description\", \"caption\": \"Description\", \"width\": \"20%\"},\n {\"key\": \"flowering_months\", \"caption\": \"Flowering\", \"width\": \"10%\"},\n {\"key\": \"quantity\", \"caption\": \"Qty\", \"width\": \"5%\"},\n {\"key\": \"plant_size_at_maturity\", \"caption\": \"Plant size at Maturity\", \"width\": \"10%\"},\n {\"key\": \"qualify_for_rebate\", \"caption\": \"Rbt\", \"width\": \"5%\"},\n {\"key\": \"water_needs_according_to_wucols\", \"caption\": \"Water\", \"width\": \"5%\"},\n {\"key\": \"pot_size\", \"caption\": \"Pot\", \"width\": \"5%\"},\n {\"key\": \"native\", \"caption\": \"Native\", \"width\": \"5%\"},\n {\"key\": \"light\", \"caption\": \"Light\", \"width\": \"5%\"},\n {\"key\": \"attract_butterflies\", \"caption\": \"Attract\", \"width\": \"10%\"},\n {\"key\": \"classification\", \"caption\": \"Cat\", \"width\": \"5%\"},\n {\"key\": \"attract_bees\", \"caption\": \"Bees?\", \"width\": \"5%\"}\n]\n\nPLANT_KEYS = [\n {\"key\": \"classification\", \"caption\": \"Classification\"},\n {\"key\": \"description\", \"caption\": \"Description\"},\n {\"key\": \"full_description\", \"caption\": \"Full Description\"},\n {\"key\": \"flowering_months\", \"caption\": \"Flowering Months\"},\n {\"key\": \"quantity\", \"caption\": \"Quantity\"},\n {\"key\": \"plant_size_at_maturity\", \"caption\": \"Plant size at Maturity\"},\n {\"key\": \"water_needs_according_to_wucols\", \"caption\": \"Water needs according to WUCOLS\"},\n {\"key\": \"pot_size\", \"caption\": \"Recommended Pot Size\"},\n {\"key\": \"native\", \"caption\": \"California Native?\"},\n {\"key\": \"light\", \"caption\": \"Light\"},\n {\"key\": \"attract_butterflies\", \"caption\": \"Attracts birds & hummingbirds?\"},\n {\"key\": \"attract_bees\", \"caption\": \"Attracts Bees?\"},\n {\"key\": \"qualify_for_rebate\", \"caption\": \"Qualifies for Rebate?\"},\n]\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef entry():\n filename = request.args.get('filename')\n errmsg = request.args.get('errmsg')\n upload_id = request.args.get('upload_id')\n return render_template('search.html', uploaded_file=filename, msg=errmsg, upload_id=upload_id)\n\n\n@app.route('/search')\ndef search():\n query_args = request.args.get('q')\n offset = int(request.args.get('offset', 0))\n size = int(request.args.get('size', 25))\n query = build_query(query_args, offset, size)\n connector = app.config['ES_HOST']\n response = connector.search_for(query)\n if response.ok:\n return render_template(\"results.html\",\n results=process_results(response.json()),\n meta=MAPPING)\n return make_response(\"Failed: {}\".format(response.reason), 400)\n\n\n@app.route('/plant/')\ndef get_plant(id):\n connector = app.config['ES_HOST']\n response = connector.find_one(id)\n if response.ok:\n return render_template(\"plant.html\", plant=response.json().get(\"_source\"), meta=PLANT_KEYS)\n else:\n return redirect(url_for('entyr', errmsg=\"Could not find plant ({} id missing)\".format(id)))\n\n\n@app.route('/import', methods=['POST'])\ndef upload_file():\n # check if the post request has the file part\n file = request.files.get('importFile')\n if not file:\n app.logger.error(\"Missing import file name: \", request.files)\n return redirect(url_for('entry', errmsg=\"Missing file\"))\n app.logger.info(\"Uploading file '%s'\", file.filename)\n # if user does not select file, browser also\n # submit a empty part without filename\n filename = secure_filename(file.filename)\n if filename == '' or not allowed_file(filename):\n app.logger.error(\"Not a valid filename: %s\", filename)\n return redirect(url_for('entry', errmsg=\"You must select a valid Excel file\"))\n\n local_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(local_path)\n app.logger.info(\"Uploading data to Elasticsearch server\")\n try:\n connector = app.config['ES_HOST']\n stats_id = connector.rebuild_index(local_path)\n return redirect(url_for('entry', filename=filename, upload_id=stats_id))\n except Exception as ex:\n app.logger.error(\"Cannot upload file: {}. Reason: {}\".format(filename, ex))\n return redirect(url_for('entry', errmsg=\"Error while importing data from '{}' ({})\".format(\n filename, ex)))\n\n\n@app.route('/uploads/')\ndef show_metadata(upload_id):\n connector = app.config['ES_HOST']\n response = connector.find_metadata(upload_id)\n if response.ok:\n return render_template(\"metadata.html\", metadata=response.json().get(\"_source\"))\n else:\n return redirect(url_for('entry', errmsg=\"Could not find details for upload ({} id \"\n \"missing)\".format(upload_id)))\n\n\n\ndef process_results(results):\n items = list()\n if \"hits\" in results:\n if \"hits\" in results.get(\"hits\"):\n hits = results[\"hits\"][\"hits\"]\n for hit in hits:\n item = hit.get(\"_source\")\n item['id'] = hit[\"_id\"]\n items.append(item)\n return items\n\n\ndef build_query(search_terms, offset=0, size=25):\n query = deepcopy(TEMPLATE)\n query[\"query\"][\"multi_match\"][\"query\"] = search_terms\n query['from'] = offset\n query['size'] = size\n return query\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--debug\", action='store_true', default=False)\n parser.add_argument(\"--host\")\n parser.add_argument(\"--port\", type=int, default=8000)\n parser.add_argument(\"--es_host\", required=True)\n parser.add_argument(\"--es_port\", type=int, required=True)\n parser.add_argument(\"--secret\", required=True)\n parser.add_argument(\"--workdir\", default=UPLOAD_FOLDER)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n cfg = parse_args()\n app.config['ES_HOST'] = ElasticsearchConnector(INDEX_NAME, DOCTYPE,\n host=cfg.es_host,\n port=cfg.es_port)\n app.config['UPLOAD_FOLDER'] = cfg.workdir\n app.config['SECRET'] = cfg.secret\n app.secret_key = cfg.secret\n\n app.run(host=cfg.host, port=cfg.port, debug=cfg.debug)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"279575154","text":"from django.contrib.auth.models import Permission,Group\nfrom django.contrib.contenttypes.models import ContentType\nfrom fa_system.models import CustomUser\n\ninvestor=Group.objects.get(name='investors')\nbranch=Group.objects.get(name='branches')\nanalyst=Group.objects.get(name='analysts')\nfinDep=Group.objects.get(name='finDep')\n\n\nct=ContentType.objects.get_for_model(CustomUser)\nPermission.objects.create(\n codename='asInvestor',\n name='as investor',\n content_type=ct,\n)\npInv=Permission.objects.get(name='as investor')\n\nPermission.objects.create(\n codename='asBranch',\n name='as branch',\n content_type=ct,\n)\npBra=Permission.objects.get(name='as branch')\n\nPermission.objects.create(\n codename='asAnalyst',\n name='as analyst',\n content_type=ct,\n)\npAna=Permission.objects.get(name='as analyst')\n\nPermission.objects.create(\n codename='asFinDep',\n name='as finDep',\n content_type=ct,\n)\npFin=Permission.objects.get(name='as finDep')\n\ninvestor.permissions.add(pInv)\nbranch.permissions.add(pBra)\nanalyst.permissions.add(pAna)\nfinDep.permissions.add(pFin)\nprint('Done')\n\n\n","sub_path":"createPermissionsAssignToGroup.py","file_name":"createPermissionsAssignToGroup.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"295549112","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\n\nfrom apps.restaurants.models import Restaurant\n\nUser = get_user_model()\n\n\nclass Review(models.Model):\n\n NOTATION = (\n (1, \"1 star, could do better\"),\n (2, \"2 stars, experience is ok\"),\n (3, \"3 stars, expect a good lunch\"),\n (4, \"4 stars, great restaurant\"),\n (5, \"5 stars, WOW experience\"),\n )\n\n content = models.TextField(\n verbose_name=\"review content\"\n )\n rating = models.IntegerField(\n verbose_name=\"review rating\",\n choices=NOTATION\n )\n date_created = models.DateTimeField(\n verbose_name=\"created time\",\n auto_now_add=True\n )\n date_modified = models.DateTimeField(\n verbose_name=\"modified\",\n auto_now=True\n )\n idUser = models.ForeignKey(\n to=User,\n related_name=\"fk_Review_to_User\",\n on_delete=models.CASCADE\n )\n idRestaurant = models.ForeignKey(\n to=Restaurant,\n related_name=\"fk_Review_to_Restaurant\",\n on_delete=models.CASCADE\n )\n likes = models.ManyToManyField(\n to=User,\n related_name='review_likes',\n blank=True\n )\n\n class Meta:\n ordering = ['-date_modified']\n\n def __str__(self):\n return f'Review #{self.id}'","sub_path":"backend/apps/reviews/models/models_reviews.py","file_name":"models_reviews.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"556281192","text":"import torch\nimport logging\nimport gzip\nimport os\nimport csv\nfrom torch.utils.data import Dataset\nfrom typing import List\nfrom typing import Union, List\nfrom tqdm import tqdm\nimport logging\n\n\nclass InputExample:\n \"\"\"\n Structure for one input example with texts, the label and a unique id\n \"\"\"\n def __init__(self, guid: str, texts: List[str], label: Union[int, float]):\n \"\"\"\n Creates one InputExample with the given texts, guid and label\n\n str.strip() is called on both texts.\n\n :param guid\n id for the example\n :param texts\n the texts for the example\n :param label\n the label for the example\n \"\"\"\n self.guid = guid\n self.texts = [text.strip() for text in texts]\n self.label = label\n\n\nclass LabeledSTSDataReader(object):\n \"\"\"Semantic Textual Similarity data reader\"\"\"\n def __init__(self, s1_col_idx=0, s2_col_idx=1, score_col_idx=2, delimiter=\"\\t\", dataset_folder=None, \n quoting=csv.QUOTE_NONE, normalize_scores=False, min_score=0, max_score=1):\n self.dataset_folder = dataset_folder\n self.score_col_idx = score_col_idx\n self.s1_col_idx = s1_col_idx\n self.s2_col_idx = s2_col_idx\n self.delimiter = delimiter\n self.quoting = quoting\n self.normalize_scores = normalize_scores\n self.min_score = min_score\n self.max_score = max_score\n\n def get_examples(self, filename, max_examples=0, skip_head=False, predict_mode=False):\n \"\"\"\n filename specified which data split to use (train.csv, dev.csv, test.csv).\n \"\"\"\n if self.dataset_folder is not None:\n filepath = os.path.join(self.dataset_folder, filename)\n else:\n filepath = filename\n with open(filepath, encoding=\"utf-8\") as fIn:\n data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)\n examples = []\n for id, row in enumerate(data):\n if skip_head == True and id == 0:\n continue\n if predict_mode == False:\n score = int(row[self.score_col_idx])\n else:\n score = 0\n\n s1 = row[self.s1_col_idx]\n s2 = row[self.s2_col_idx]\n examples.append(InputExample(guid=filename+str(id), texts=[s1, s2], label=score))\n if id < 10:\n logging.info(\"Example idx:%d\\ntexts:%s\\t%s\\nlabel:%d\"%(id, s1, s2, score)) \n\n if max_examples > 0 and len(examples) >= max_examples:\n break\n\n return examples\n\n\nclass SentencesDataset(Dataset):\n \"\"\"\n Dataset for smart batching, that is each batch is only padded to its longest sequence instead of padding all\n sequences to the max length.\n The SentenceBertEncoder.smart_batching_collate is required for this to work.\n SmartBatchingDataset does *not* work without it.\n \"\"\"\n def __init__(self, examples: List[InputExample], model, show_progress_bar: bool = None):\n \"\"\"\n Create a new SentencesDataset with the tokenized texts and the labels as Tensor\n \"\"\"\n if show_progress_bar is None:\n show_progress_bar = (logging.getLogger().getEffectiveLevel() == logging.INFO or logging.getLogger().getEffectiveLevel() == logging.DEBUG)\n self.show_progress_bar = show_progress_bar\n\n self.convert_input_examples(examples, model)\n\n def convert_input_examples(self, examples: List[InputExample], model):\n \"\"\"\n Converts input examples to a SmartBatchingDataset usable to train the model with\n SentenceTransformer.smart_batching_collate as the collate_fn for the DataLoader\n\n smart_batching_collate as collate_fn is required because it transforms the tokenized texts to the tensors.\n\n :param examples:\n the input examples for the training\n :param model\n the Sentence BERT model for the conversion\n :return: a SmartBatchingDataset usable to train the model with SentenceTransformer.smart_batching_collate as the collate_fn\n for the DataLoader\n \"\"\"\n num_texts = len(examples[0].texts)\n inputs = [[] for _ in range(num_texts)]\n labels = []\n too_long = [0] * num_texts\n label_type = None\n iterator = examples\n max_seq_length = model.get_max_seq_length()\n\n if self.show_progress_bar:\n iterator = tqdm(iterator, desc=\"Convert dataset\")\n\n for ex_index, example in enumerate(iterator):\n if label_type is None:\n if isinstance(example.label, int):\n label_type = torch.long\n elif isinstance(example.label, float):\n label_type = torch.float\n tokenized_texts = [model.tokenize(text) for text in example.texts]\n\n for i, token in enumerate(tokenized_texts):\n if max_seq_length != None and max_seq_length > 0 and len(token) >= max_seq_length:\n too_long[i] += 1\n\n labels.append(example.label)\n for i in range(num_texts):\n inputs[i].append(tokenized_texts[i])\n\n tensor_labels = torch.tensor(labels, dtype=label_type)\n\n logging.info(\"Num sentences: %d\" % (len(examples)))\n for i in range(num_texts):\n logging.info(\"Sentences {} longer than max_seqence_length: {}\".format(i, too_long[i]))\n\n self.tokens = inputs\n self.labels = tensor_labels\n\n def __getitem__(self, item):\n return [self.tokens[i][item] for i in range(len(self.tokens))], self.labels[item]\n\n def __len__(self):\n return len(self.tokens[0])","sub_path":"transformer_encoder/data_util/LabeledSTSDataUtil.py","file_name":"LabeledSTSDataUtil.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"409757930","text":"__author__ = 'nsrivas3'\n\nclass Solution:\n # @param {integer} n\n # @return {boolean}\n def isHappy(self, n):\n notHappy = 0\n iterlist = []\n\n def check(n):\n sum1 = 0\n while n!=0:\n sum1 = sum1 + (n%10)**2\n n = int(n/10)\n n = sum1\n # print(sum1)\n return(n)\n\n while (n!=1) and (notHappy!=1):\n n = check(n)\n print(\"n: \"+str(n)+\" NotHappy: \"+str(notHappy)+\" iterlist: \"+str(iterlist))\n for I in iterlist:\n if n==I:\n print(\"n: \"+str(n)+\" NotHappy: \"+str(notHappy)+\" ChkRslt \"+str(n==I))\n notHappy = 1\n break\n else:\n print(\"n: \"+str(n)+\" NotHappy: \"+str(notHappy)+\" ChkRslt \"+str(n==I))\n iterlist.append(n)\n\n if n==1: return(True)\n elif notHappy == 1: return(False)\n\nSol1 = Solution()\nprint(Sol1.isHappy(7))\n\n","sub_path":"Closed Questions/IsHappy_v2.py","file_name":"IsHappy_v2.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"30316493","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Pool\n#python img_data.py ./data/casia_ds_Expert.txt ./data/casia_ds\n\ndef FitToDistr01(amounm_array):\n\tmax_t = max(amounm_array)\n\tmin_t = min(amounm_array)\n\tinit_range = max_t - min_t\n\treturn [(init_val - min_t) / init_range for init_val in amounm_array]\n\n\ndef PersonEyeDistr(same, diff):\n\t#same = [float(format(i, '.3f')) for i in same]\n\tuniq_dist_same = list(set(same))\n\tuniq_dist_same.sort()\n\t\n\t#diff = [float(format(i, '.3f')) for i in diff]\n\tuniq_dist_diff = list(set(diff))\n\tuniq_dist_diff.sort()\n\t\n\tall_mount = len(same) + len(diff)\n\tsame_am = [same.count(i) / all_mount for i in uniq_dist_same]\n\tdiff_am = [diff.count(i) / all_mount for i in uniq_dist_diff]\n\t\n\treturn uniq_dist_same, FitToDistr01(same_am), uniq_dist_diff, FitToDistr01(diff_am)\n\nsame = []\nwith open('same.txt', 'r') as data_file:\n\tfor line in data_file:\n\t\tsame += [int(i) for i in line.split()]\ndiff = []\nwith open('diff.txt', 'r') as data_file:\n\tfor line in data_file:\n\t\tdiff += [int(i) for i in line.split()]\n\nsame_len = len(same)\ndiff_len = len(diff)\n\nsame = FitToDistr01(same)\ndiff = FitToDistr01(diff)\n\nsame.sort()\ndiff.sort()\n\nprint('same_arr_len -> ', len(same), 'avg ->', sum(i for i in same) / len(same))\nprint('diff_arr_len -> ', len(diff), 'avg ->', sum(i for i in diff) / len(diff))\n\n\n\nsame_dist, same_am, diff_dist, diff_am = PersonEyeDistr(same, diff)\n\nplt.figure(figsize=(10,6))\nplt.title(\"Left and Right eyes\")\nplt.xlabel(r\"$\\rho$\", fontsize=20)\nplt.ylabel(\"amount\", fontsize=18)\nplt.plot(same_dist, same_am, 'ro', label=\"same person dist\")\nplt.plot(diff_dist, diff_am, 'bo', label=\"diff person dist\")\nplt.legend(loc=\"best\")\nplt.show()","sub_path":"distr.py","file_name":"distr.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"222754910","text":"import math\r\n\r\nclass PGaussSolver:\r\n def __init__(self, fp, a, b, n):\r\n self.m_fp = fp\r\n self.m_A = a\r\n self.m_B = b\r\n self.m_N = n\r\n self.Result = 0\r\n\r\n def execute(self):\r\n integral = 0\r\n for i in range(1, self.m_N+1):\r\n integral += self.m_fp(self.legendreZeroes(self.m_N, i)) * self.weight(self.m_N, self.legendreZeroes(self.m_N, i))\r\n self.Result = ((self.m_B-self.m_A)/2.0)*integral\r\n\r\n def getResult(self):\r\n return self.Result\r\n\r\n def legendre(self, m_N, x):\r\n if m_N == 0:\r\n return 1\r\n elif m_N == 1:\r\n return x\r\n else:\r\n return ((2.0*m_N-1)/m_N)*x*self.legendre(m_N-1, x)-((1.0*m_N-1)/m_N)*self.legendre(m_N-2, x)\r\n\r\n def dLegendre(self, m_N, x):\r\n d = (1.0*m_N/(x*x-1))*((x*self.legendre(m_N, x))-self.legendre(m_N-1, x))\r\n return d\r\n\r\n def legendreZeroes(self, m_N, i):\r\n xnew = 0\r\n xold = 0\r\n pi = math.pi\r\n xold = math.cos(pi*(i-1/4.0)/(m_N+1/2.0))\r\n xnew = xold - self.legendre(m_N, xold)/self.dLegendre(m_N, xold)\r\n while (1+abs(xnew-xold)>1.0):\r\n xold = xnew\r\n xnew = xold - self.legendre(m_N, xold)/self.dLegendre(m_N, xold)\r\n return xnew\r\n\r\n def weight(self, m_N, x):\r\n w = 2/((1-x**2)*(self.dLegendre(m_N, x)**2))\r\n return w","sub_path":"AP-HW5-9523124/Q6/PGaussSolver.py","file_name":"PGaussSolver.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"199694653","text":"import gym\nfrom gym.wrappers import Monitor\nimport glob\nimport io\nimport base64\nfrom pyvirtualdisplay import Display\n\nfrom gym import wrappers\nfrom IPython import display\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport gym\nimport numpy as np\nfrom collections import deque\nimport tensorflow as tf\nfrom tensorflow.keras import layers,models\n\nvirtual_display = Display(visible=0, size=(1400, 900))\nvirtual_display.start()\n\n\ndef query_environment(name):\n env = gym.make(name)\n spec = gym.spec(name)\n print(f\"Action Space: {env.action_space}\")\n print(f\"Observation Space: {env.observation_space}\")\n print(f\"Max Episode Steps: {spec.max_episode_steps}\")\n print(f\"Nondeterministic: {spec.nondeterministic}\")\n print(f\"Reward Range: {env.reward_range}\")\n print(f\"Reward Threshold: {spec.reward_threshold}\")\n\nMEMORYLEN=int(10000)\nBATCHSIZE=64\nEPOCHS=1\n# UPDATE_EVERY = 4\n\n\nclass DQNAgent():\n def __init__(self,actions=2,obs=4):\n self.actions=actions\n self.observations=obs\n self.model=self.load_model()\n\n self.memory=deque(maxlen=MEMORYLEN)\n self.gamma=0.99\n self.patience=0\n self.target_model=self.load_model()\n \n self.copy_weights()\n self.a=0.75\n self.b=0\n \n def play(self,observation,epsilon):\n if (len(self.memory)epsilon:\n# print(\"model\")\n action=self.model_predictions(observation)\n else:\n action=np.random.randint(low=0,high=self.actions)\n return action\n \n def step(self,state, action, reward, next_state, done):\n self.memory.append([state, action, reward, next_state, done])\n if ((len(self.memory)>=BATCHSIZE) & (np.random.random() < 0.25 )):\n self.train_model()\n pass\n \n def load_memory_with_probs(self):\n mem=np.array(list(self.memory))\n y_pred=self.model.predict(np.stack(mem[:,0]))\n \n \n data=np.array(mem)\n \n state, action, reward, next_state, done=np.stack(data[:,0]),np.stack(data[:,1]),np.stack(data[:,2]),np.stack(data[:,3]),np.stack(data[:,4])\n qnext_max=np.max(self.target_model.predict(next_state),axis=1)\n qnext_max=reward+ self.gamma*qnext_max*(1-done)\n qtable_to_update=self.target_model.predict(state)\n for indx,qs in enumerate(qtable_to_update):\n qtable_to_update[indx,action[indx]]=qnext_max[indx]\n# self.model.fit(state,qtable_to_update,epochs=1,verbose=0)\n y_pred=self.model.predict(state)\n errors=[]\n for i in range(y_pred.shape[0]):\n errors.append(np.abs(y_pred[i,action[i]] - qtable_to_update[i,action[i]]))\n \n errors=[(error+0.1)**self.a for error in errors]\n sig_p=sum(errors)\n errors=[error/sig_p for error in errors]\n \n# print(data.shape,len(errors))\n mem=np.hstack([data,np.array(errors).reshape(-1,1)])\n return mem\n \n def train_model(self):\n memory=self.load_memory_with_probs()\n rnd_indices = np.random.choice(len(memory), size=BATCHSIZE,p=memory[:,5].astype('float64'))\n data=np.array(memory)[rnd_indices]\n np.random.shuffle(data)\n \n state, action, reward, next_state, done=np.stack(data[:,0]),np.stack(data[:,1]),np.stack(data[:,2]),np.stack(data[:,3]),np.stack(data[:,4])\n qnext_max=np.max(self.target_model.predict(next_state),axis=1)\n qnext_max=reward+ self.gamma*qnext_max*(1-done)\n qtable_to_update=self.target_model.predict(state)\n for indx,qs in enumerate(qtable_to_update):\n qtable_to_update[indx,action[indx]]=qnext_max[indx]\n# print(data[:5])\n importance=[(1/p)*(1/len(memory))**self.b for p in data[:,5]]\n self.model.fit(state,qtable_to_update,epochs=1,verbose=0,sample_weight=np.array(importance))\n self.patience+=1\n if self.patience==10:\n self.copy_weights()\n self.patience=0\n \n pass\n def model_predictions(self,observation):\n pred=self.model.predict(observation.reshape(1,-1))\n pred=np.argmax(pred)\n return pred\n \n def load_model(self):\n num_input = layers.Input(shape=(self.observations, ))\n x = layers.Dense(24,activation=\"relu\")(num_input)\n# x = layers.BatchNormalization()(x)\n# x = layers.Dropout(0.1)(x)\n x = layers.Dense(24, activation=\"relu\")(x)\n# x = layers.Dropout(0.1)(x)\n# x = layers.BatchNormalization()(x)\n y = layers.Dense(self.actions, activation=\"linear\")(x)\n model = models.Model(inputs=num_input, outputs=y)\n model.compile(loss=\"mse\",optimizer=tf.keras.optimizers.Adam(lr=0.01,decay=0.01))\n model.summary()\n return model\n def copy_weights(self):\n self.target_model.set_weights(self.model.get_weights()) \nimport time\nfrom tqdm import tqdm\nstarttime=time.time()\nscores = [] # list containing scores from each episode\nscores_window = deque(maxlen=100) # last 100 scores\nn_episodes=5000\nagent=DQNAgent()\n\nmax_t=500\neps_start=1.0\neps_end=0.15\neps_decay=0.99\n\n\n\neps = eps_start\nenv=gym.make('LunarLander-v2')\neps_history=[]\nfor i_episode in range(1, n_episodes+1):\n state = env.reset()\n score = 0\n for i_ in range(1,max_t+1):\n action = agent.play(state,eps)\n next_state, reward, done, _ = env.step(action)\n if done:\n if (i_>=140):\n agent.step(state, action, reward+5, next_state, done)\n else:\n agent.step(state, action, reward-5, next_state, done)\n else:\n agent.step(state, action, reward, next_state, done)\n \n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps)\n eps_history.append(eps)\n if i_episode % 100 == 0:\n agent.model.save_weights(\"./weightsfolder/Lunar_weights_{}.h5\".format(i_episode))\n if i_episode % 10 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=190.0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n \n break\nendtime= time.time() \nprint(endtime-starttime)\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(len(scores)), scores)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.savefig(\"lunFT.png\")\n# plt.show()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(len(eps_history)), eps_history)\nplt.ylabel('Epsilone')\nplt.xlabel('Episode #')\nplt.savefig(\"epsilonLunarFT.png\")\n# plt.show() \nagent.model.save_weights(\"./weightsfolder/Lunar_agent_weights.h5\")","sub_path":"lunar_dqm_fixdtarget_prioreplay.py","file_name":"lunar_dqm_fixdtarget_prioreplay.py","file_ext":"py","file_size_in_byte":7065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"118438869","text":"from django.http import HttpResponse\nimport MySQLdb\nfrom libs import config\nimport json\ndef mysql_connect(query,fetch='all'):\n\tconnection=MySQLdb.connect(host=config.mysql_ip,user=config.mysql_login,passwd=config.mysql_pwd,db=config.mysql_db,port=int(config.mysql_port))\n\tcur=connection.cursor()\n\tcur.execute(query)\n\tresult = [] \n\tcolumns = tuple( [d[0].decode('utf8') for d in cur.description] ) \n\tfor row in cur:\n\t\tresult.append(dict(zip(columns, row)))\n\tif fetch == 'one':\n\t\tif len(result) > 0:\n\t\t\tresult = result[0]\n\treturn result\n\ndef mysql_commit():\n\tconnection.commit()\n\ndef setResponse(result):\n\treturn HttpResponse(json.dumps(result))\n\ndef setExceptionResponse(result):\n\treturn HttpResponse(json.dumps(result),status=500)\n","sub_path":"investclub/libs/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"481763743","text":"import pygame, random\nfrom pygame.locals import *\n\nclass Inimigo(pygame.sprite.Sprite):\n\n def __init__(self, nome, velocidade):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.image.load(nome).convert_alpha()\n self.rect = self.image.get_rect()\n\n self.rect[0] = random.randint(10, 700)\n self.rect[1] = 0\n\n self.velocidade = velocidade\n self.velocidadeInicial = velocidade\n\n def update(self):\n self.rect[1] += self.velocidade\n if (self.rect[1] >= 600):\n self.rect[0] = random.randint(10, 700)\n self.rect[1] = 0\n if self.velocidade != 11:\n self.velocidade += 1\n\n def atualiza(self):\n self.rect[0] = random.randint(10, 700)\n self.rect[1] = 0\n\n self.velocidade = self.velocidadeInicial","sub_path":"scripts/inimigo.py","file_name":"inimigo.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"529936700","text":"import numpy as np\nimport math\n\nfrom quaternion import Quat as Quaternion\nimport dualquaternion\n\n# Transformation test\npt = [7, 0, 0]\ntrans1 = [0, 0, 0]\nrotAxis1 = [0, 1, 0]\nrotAngle1 = math.radians(0)\ntrans2 = [0, 0, 0]\nrotAxis2 = [0, 1, 0]\nrotAngle2 = math.radians(90)\n\n\ndq1 = dualquaternion.DualQuat()\ndq2 = dualquaternion.DualQuat()\ndq1.setTransformation(Quaternion(axis=rotAxis1, angle=rotAngle1), trans1)\ndq2.setTransformation(Quaternion(axis=rotAxis2, angle=rotAngle2), trans2)\ndq3 = dq2 * dq1 # transform by dq1 then dq2\ndq4 = dq1 * 0.5 + dq2 * 0.5 # blending 2 transform\nprint ('dq1: ', dq1)\nprint ('dq2: ', dq2)\nprint ('dq3: ', dq3)\nprint ('dq4: ', dq4)\nprint('dq1*: ', dq1.transform(pt))\nprint('dq2*: ', dq2.transform(pt))\nprint('dq3*: ', dq3.transform(pt))\nprint('dq4*: ', dq4.transform(pt))\n\n\n\n\n# Using matrix\ndef transformByMatrix(pt, trans, rotAxis, rotAngle):\n # Construct quaternion\n quat = Quaternion(axis=rotAxis, angle=rotAngle)\n \n # Construct transformation matrix\n xform = quat.transformation_matrix\n xform[0,3] = trans[0]\n xform[1,3] = trans[1]\n xform[2,3] = trans[2]\n \n # Transform with matrix\n ptArr = np.array(pt + [1])\n ptMat = np.matrix(ptArr).transpose()\n xform = np.matrix(xform)\n result = xform * ptMat\n \n # Result\n result = result.transpose().tolist()\n return result[0][:3]\n\nprint ('transformByMatrix: ', transformByMatrix(pt, trans1, rotAxis1, rotAngle1))\n","sub_path":"dualQuaternion/dqtransformationtest.py","file_name":"dqtransformationtest.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"524846609","text":"#!/usr/bin/env python3\n\nimport sys\nfrom enum import Enum\n\n\n\"\"\"\nGiven a filepath, opens the file and reads the text as tape.\n\nINPUT:\n filename: a valid unix path to a list of comma-separated integers.\n\nRETURN: list of integers\n\"\"\"\ndef read_input(filename):\n\n text = \"\"\n\n with open(filename, \"r\") as fp:\n text = fp.readline()\n\n if text == \"\":\n print(\"No Input\")\n exit(2) \n\n tape = list(map(int,text.split(',')))\n\n return tape\n\n\n\nclass Status:\n FINISHED = 0\n OK = 1\n INPUT_REQUIRED = 2\n OUT_OF_BOUNDS = 3\n BAD_OPCODE = 4\n\n\n\nclass IntComp:\n\n \"\"\"\n Determines the literal values of all parameters for an intcode instruction.\n\n INPUTS:\n tape: list of integers in the form of an intcode program\n head: currently executing opcode of the intcode program\n inst_len: the number of integers in tape that are part of the instruction\n write_param: indicates which parameter is the address of the instruction output. Is the head of the parameter list. (this is gross, but I haven't figured out anything better)\n\n RETURN:\n params: list of integers, containing the literal inputs to the instruction calling this function.\n \"\"\"\n def get_params(self, inst_len, write_param):\n\n mode_digits = str(self.tape[self.head])[-3::-1]\n params = self.tape[self.head+1:self.head+inst_len]\n modes = []\n\n for i in range(inst_len-1):\n if len(mode_digits) > i:\n modes.append(int(mode_digits[i]))\n else:\n modes.append(0)\n\n\n for i in range(len(params)):\n if write_param != i and modes[i] == 0:\n params[i] = self.tape[params[i]]\n\n return params\n\n\n\n def instr_add(self):\n \n inst_len = 4\n params = self.get_params(inst_len, 2)\n\n self.tape[params[2]] = params[0] + params[1]\n\n self.head += inst_len\n return Status.OK\n\n\n\n def instr_multiply(self):\n \n inst_len = 4\n params = self.get_params(inst_len, 2)\n\n self.tape[params[2]] = params[0] * params[1]\n\n self.head += inst_len\n return Status.OK\n\n\n\n def instr_input(self, inputs):\n\n inst_len = 2\n\n if len(inputs) == 0:\n return Status.INPUT_REQUIRED\n \n num = inputs.pop(0)\n self.tape[self.tape[self.head+1]] = num\n \n self.head += inst_len\n return Status.OK\n\n\n\n def instr_output(self, outputs):\n\n inst_len = 2\n params = self.get_params(inst_len, -1)\n \n outputs.append(params[0])\n \n self.head += inst_len\n return Status.OK\n\n\n\n def instr_jump_if(self, if_true):\n \n inst_len = 3\n params = self.get_params(inst_len, -1)\n ip = self.head\n\n if (params[0] == 0) != if_true:\n ip = params[1]\n else:\n ip = self.head + inst_len\n\n if ip >= 0 and ip < len(self.tape):\n self.head = ip\n return Status.OK\n else:\n return Status.OUT_OF_BOUNDS\n\n\n\n def instr_less_than(self):\n \n inst_len = 4\n params = self.get_params(inst_len, 2)\n \n if params[0] < params[1]:\n self.tape[params[2]] = 1\n else:\n self.tape[params[2]] = 0\n\n self.head += inst_len\n return Status.OK\n\n\n\n def instr_equals(self):\n \n inst_len = 4\n params = self.get_params(inst_len, 2)\n \n if params[0] == params[1]:\n self.tape[params[2]] = 1\n else:\n self.tape[params[2]] = 0\n\n self.head += inst_len\n return Status.OK\n\n\n\n \"\"\"\n Executes an intcode program\n\n INPUT:\n tape: list of integers that form an intcode program\n human_IO: True if you want IO to interact with user. \n False if you want IO to interact with another program.\n RETURN:\n \"\"\"\n def execute_tape(self, inputs = []):\n\n outputs = []\n status = Status.OK\n\n while self.head >= 0 and self.head < len(self.tape):\n\n opcode = int(str(self.tape[self.head])[-2:]) #I love python.\n\n if opcode == 1:\n status = self.instr_add()\n elif opcode == 2:\n status = self.instr_multiply()\n elif opcode == 3:\n status = self.instr_input(inputs)\n elif opcode == 4:\n status = self.instr_output(outputs)\n elif opcode == 5:\n status = self.instr_jump_if(True)\n elif opcode == 6:\n status = self.instr_jump_if(False)\n elif opcode == 7:\n status = self.instr_less_than()\n elif opcode == 8:\n status = self.instr_equals()\n elif opcode == 99:\n status = Status.FINISHED\n else:\n print(f\"Unrecognized opcode \\\"{opcode}\\\" at index {self.head}.\")\n exit(2)\n\n if status != Status.OK:\n break\n\n if self.head >= len(self.tape):\n print(f\"Tape overflow with head {self.head}\")\n exit(3)\n\n return (status, outputs)\n\n\n\n def __init__(self, tape):\n self.tape = tape\n self.head = 0\n\n\n def __init__(self, filename):\n self.tape = read_input(filename)\n self.head = 0\n #self.Status = STATUS()\n\n\n# end of class\n\n\n\n\"\"\"\n def main():\n\n print(\"READING INTCODE PROGRAM\")\n\n filename = \"\"\n\n if len(sys.argv) > 1:\n filename = sys.argv[1]\n\n if filename == \"\":\n filename = \"7day-input.txt\"\n\n comp = IntComp(filename)\n\n comp.execute_tape()\n\n print(\"\\nEND OF INTCODE PROGRAM\\n\" + str(tape))\n\n\n\nif __name__ == \"__main__\":\n IntComp.main()\n\"\"\"\n","sub_path":"7day/intcomp2.py","file_name":"intcomp2.py","file_ext":"py","file_size_in_byte":5824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"267392990","text":"import Pyro4\nimport time\n\nclass Player:\n def __init__(self, uri):\n self.server = Pyro4.Proxy(uri)\n self.name = \"\"\n self.sticks = 3\n self.turn = -1\n self.addName()\n self.requestMyTurn()\n\n def addName(self):\n name = input(\"What is your name?\\n\").strip()\n self.name = name\n logged = self.server.addPlayer(name)\n while logged == 0:\n print(\"user already logged! Try again\")\n name = input(\"What is your name?\\n\").strip()\n self.name = name\n logged = self.server.addPlayer(name)\n print(\"Welcome, \", name)\n\n def requestMyTurn(self):\n self.turn = self.server.requestTurn(self.name)\n\n def waitForMyTurn(self):\n print(\"waiting for your turn\")\n\n while not self.server.isMyTurn(self.turn):\n time.sleep(0.5)\n\n def play(self):\n while self.sticks > 0:\n sticksToSend = self.send_sticks()\n stickResponse = self.server.playerPlay(sticksToSend, self.name)\n\n if stickResponse == 0:\n\n print(\"waiting for players to put their sticks\")\n\n while not self.server.sticksInGame():\n time.sleep(2)\n\n self.waitForMyTurn()\n\n shot = self.send_shot()\n shot = int(shot)\n shotResponse = -1\n\n while shotResponse != 0:\n shotResponse = self.server.playerShots(shot, self.name)\n\n if shotResponse == 0:\n print(\"good luck with this shot\")\n self.server.nextTurn()\n else:\n print(\"repeated shot! Try again\")\n shot = self.send_shot()\n\n print(\"waiting for other players\")\n\n while not self.server.allPlayed():\n time.sleep(2)\n\n winner = self.server.requestWinner()\n\n time.sleep(1)\n\n allShots = self.server.requestShots()\n self.printShots(allShots)\n\n time.sleep(1)\n\n score = self.server.requestScore()\n self.printScore(score)\n\n time.sleep(1)\n\n if winner == self.name:\n self.decrease_sticks()\n winner = \"YOU\"\n else:\n print(\"good Luck next time\")\n\n print(\"winner: \",winner)\n\n\n\n time.sleep(3)\n print(\"Turn endo\")\n\n if self.server.lastPlayer():\n print(\"YOU LOST\")\n self.server.endGame()\n exit()\n\n self.server.newRound()\n\n print(\"Congratulations! You are out of the game\")\n self.server.removePlayer(self.name)\n\n def send_sticks(self):\n player_sticks = -1\n if self.server.isFirstTurn():\n minSticks = 1\n else:\n minSticks = 0\n while player_sticks < minSticks or player_sticks > self.sticks:\n print(\"Choose the number of sticks to put in the game (between \", minSticks, \" and \", self.sticks, \")\")\n player_sticks = input().strip()\n while not player_sticks.isdigit():\n print(\"Please choose a number between \", minSticks, \" and \", self.sticks)\n player_sticks = input().strip()\n player_sticks = int(player_sticks)\n return int(player_sticks)\n\n def send_shot(self):\n print(\"Say your shot of the total of sticks in game\")\n player_shot = input().strip()\n\n return player_shot\n\n def decrease_sticks(self):\n print(\"Correct shot!\")\n self.sticks -= 1\n\n def printShots(self, shots):\n print(\"######################################\")\n\n print(\"Shots fired:\")\n\n for key in shots.keys():\n print(key, \": \", shots[key])\n\n print(\"######################################\")\n\n def printScore(self, score):\n print(\"######################################\")\n\n print(\"Score now:\")\n\n for s in score.keys():\n print(s, \" has \", score[s], \" sticks\")\n\n print(\"######################################\")\n\ndef main():\n serverURI = \"PYRONAME:matches.server\"\n player = Player(serverURI)\n\n player.play()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"315593318","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.decorators import api_view,authentication_classes,permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .models import geo_code\nfrom .serializer import geo_codeSerializer\n\n#from rest_framework.authentication import TokenAuthentication \nfrom rest_framework.permissions import IsAuthenticated\n# Create your views here.\n\n#from rest_framework_simplejwt.authentication import JWTAuthentication \n# for JWT authentication_classes and that too when we need to use it on local level\n#i.e for views. py otherwise direct add rest_framework_simplejwt.authentication.JWTAuthentication globally into settings\n\n@api_view(['GET'])\n#@authentication_classes([JWTAuthentication])\n@permission_classes([IsAuthenticated])\ndef get_geo_code(request):\n\tgeo_info=geo_code.objects.all()\n\tserializer=geo_codeSerializer(geo_info,many=True)\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef create_geo_code(request):\n\tserializer = geo_codeSerializer(data=request.data)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n@api_view(['POST'])\n\n@permission_classes([IsAuthenticated])\ndef update_geo_code(request, pk):\n\tgeo_record = geo_code.objects.get(seo_id=pk)\n\tprint(geo_record)\n\tserializer = geo_codeSerializer(instance=geo_record, data=request.data)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef delete_geo_code(request, pk):\n\tgeo_record = geo_code.objects.get(seo_id=pk)\n\tgeo_record.delete()\n\n\treturn Response('Item succsesfully delete!')\n","sub_path":"myrestapi/geo_code/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"393880807","text":"#scripting by james.duclos@commercebank.com\n#rlps, commercial card, data intelligence\n#In Colaberation with Mike Hockens\n#This report loads into the datamart around 9pm\n\nfrom collections import OrderedDict\nimport requests\nimport sf_mod.james_force as jf\nfrom shutil import copy\nfrom proxies import proxy_dict2\n\n#assign working folder\nfolder = \"//rlps_kw-84gv942/Scripting/fr_SF/HSF/\"\n\n#Pull New Salesforce Token\nimport sys\nsys.path.insert(0, '//rlps_kw-84gv942/Scripting/fr_SF/pull_sf_token') \nfrom pull_sf_token_class import Token\n\n#Get token\ntoken = Token(instance='cs66').get_token()\n\nprint(\"Setting headers\")\nmy_headers = {\n 'Content-Type': 'application/json',\n 'Authorization': \"Bearer \" + token,\n 'X-PrettyPrint': '1'\n }\n\nprint(\"Getting stuff with webcall\")\ndef pull_report(report_id):\n \n result = requests.post(\"https://cs66.salesforce.com/services/data/v34.0/analytics/reports/{0}\".format(report_id), \n headers=my_headers, proxies=proxy_dict2)\n \n output = result.json(object_pairs_hook=OrderedDict)\n jf.print_sf_json(output, folder + \"output-\" + report_id + \"_sandbox.csv\")\n src = folder + \"output-\" + report_id + \"sandbox.csv\"\n\n #dst = '//cbsh.com/kcdfspool/DR-Commercial/HSF/1. Hockens/Requests/hsf_' + report_id + '.txt'\n #copy(src, dst)\n \n #dst = '//wkpv1gspta01/infomovergs/HSF_SLFC/hsf_' + report_id + '.txt'\n #copy(src, dst)\n\nreport_ids = [\"00O330000049zNy\",\"00O33000004A1xF\"]\n\nfor id in report_ids:\n pull_report(id)","sub_path":"fr_SF/HSF/hsf_cpcs_cr_pull_from_sandbox.py","file_name":"hsf_cpcs_cr_pull_from_sandbox.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"15575692","text":"\"\"\"\r\nRuns the application server and client modules.\r\n Requirements:\r\n - python34\r\n - npm\r\n\"\"\"\r\n\r\nfrom threading import Thread\r\nfrom argparse import RawTextHelpFormatter\r\nimport argparse\r\nimport os\r\n\r\n# The directory of this file should be the root directory of the project.\r\nROOT_DIR = os.path.dirname(os.path.realpath(__file__))\r\n\r\n# Path to virtualenv binaries for setup.\r\nPYTHON = os.path.join('env', 'Scripts', 'python.exe')\r\nPIP = os.path.join('env', 'Scripts', 'pip.exe')\r\n\r\n\r\ndef _set_directory(subdir):\r\n working_dir = os.path.join(ROOT_DIR, subdir)\r\n os.chdir(working_dir)\r\n print('Current directory: %s' % working_dir)\r\n\r\n\r\ndef _run_command(command):\r\n out = os.system(command)\r\n print('%s [%d]' % (command, out))\r\n if out is not 0: # Raise an exception if the command fails.\r\n raise Exception('Command failed: %s' % command)\r\n\r\n\r\ndef _main():\r\n description = 'Setup development environment.\\n\\n' \\\r\n 'If you have already run this script, it may be necessary to \\n' \\\r\n 'skip virtual environment and superuser setup. This can be \\n' \\\r\n 'done with options `--skip-venv` and `--skip-superuser`.'\r\n parser = argparse.ArgumentParser(description=description,\r\n formatter_class=RawTextHelpFormatter)\r\n parser.add_argument('--skip-venv', action='store_true')\r\n parser.add_argument('--skip-pip', action='store_true')\r\n parser.add_argument('--skip-migrate', action='store_true')\r\n parser.add_argument('--skip-superuser', action='store_true')\r\n parser.add_argument('--skip-server', action='store_true')\r\n parser.add_argument('--skip-client', action='store_true')\r\n args = parser.parse_args()\r\n\r\n # Install dependencies.\r\n _run_command('pip install virtualenv')\r\n _run_command('npm install -g bower')\r\n _run_command('npm install -g less')\r\n\r\n try:\r\n # Server setup.\r\n if not args.skip_server:\r\n _set_directory('server')\r\n\r\n # Initialize virtual environment.\r\n if not args.skip_venv:\r\n _run_command('virtualenv env')\r\n else:\r\n print('Skipping virtual environment setup.')\r\n\r\n # Update pip to latest version.\r\n if not args.skip_pip:\r\n _run_command('%s -m pip install --upgrade pip' % PYTHON)\r\n else:\r\n print('Skipping pip update.')\r\n _run_command('%s install -r requirements.txt' % PIP)\r\n\r\n # Django migrations.\r\n if not args.skip_migrate:\r\n _run_command('%s manage.py migrate' % PYTHON)\r\n else:\r\n print('Skipping Django migrations.')\r\n\r\n # Create Django superuser.\r\n if not args.skip_superuser:\r\n _run_command('%s manage.py createsuperuser' % PYTHON)\r\n else:\r\n print('Skipping Django Admin superuser creation.')\r\n else:\r\n print('Skipping server setup.')\r\n\r\n # Client setup.\r\n if not args.skip_client:\r\n _set_directory('client')\r\n _run_command('npm install')\r\n _set_directory(os.path.join('client', 'static'))\r\n _run_command('bower install')\r\n else:\r\n print('Skipping client setup.')\r\n\r\n except Exception as exception:\r\n print('Setup failed due to:\\n\\t%s' % str(exception))\r\n\r\n\r\nif __name__ == '__main__':\r\n _main()\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"502749405","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom web.items import WebItem\nfrom scrapy.exceptions import NotConfigured\n\nclass webSpider(scrapy.Spider):\n name = \"web\"\n\n def start_requests(self):\n for url in self.settings.get('PATH').keys():\n yield scrapy.Request(url=url,\n cookies = self.settings.get('COOKIES'),\n callback=self.parse,)\n\n def parse(self, response):\n try:\n path = self.settings.get('PATH')[response.url]\n root = path['ROOT']\n title = path['TITLE']\n link = path['LINK']\n except TypeError:\n raise NotConfigured('PATH should be a dict value')\n except:\n raise NotConfigured('Url or xpath is not configured, please check settings')\n\n for sel in response.xpath(root):\n item = WebItem()\n item['title'] = sel.xpath(title).extract()\n item['link'] = sel.xpath(link).extract()\n yield item\n","sub_path":"web/spiders/web_spider.py","file_name":"web_spider.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"607633621","text":" #!/usr/bin/env python\nimport os\nimport re\nimport sys\nimport warnings\ntry:\n from setuptools import setup\nexcept:\n from distutils.core import setup\n\nMAJOR = 0\nMINOR = 1\nMICRO = 0\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\nQUALIFIER = ''\n\n# code to extract and write the version copied from pandas, which is available\n# under the BSD license:\nFULLVERSION = VERSION\nwrite_version = True\n\nif not ISRELEASED:\n import subprocess\n FULLVERSION += '.dev'\n\n pipe = None\n for cmd in ['git', 'git.cmd']:\n try:\n pipe = subprocess.Popen(\n [cmd, \"describe\", \"--always\", \"--match\", \"v[0-9]*\"],\n stdout=subprocess.PIPE)\n (so, serr) = pipe.communicate()\n if pipe.returncode == 0:\n break\n except:\n pass\n\n if pipe is None or pipe.returncode != 0:\n # no git, or not in git dir\n if os.path.exists('src/xray/version.py'):\n warnings.warn(\"WARNING: Couldn't get git revision, using existing xray/version.py\")\n write_version = False\n else:\n warnings.warn(\"WARNING: Couldn't get git revision, using generic version string\")\n else:\n # have git, in git dir, but may have used a shallow clone (travis does this)\n rev = so.strip()\n # makes distutils blow up on Python 2.7\n if sys.version_info[0] >= 3:\n rev = rev.decode('ascii')\n\n if not rev.startswith('v') and re.match(\"[a-zA-Z0-9]{7,9}\", rev):\n # partial clone, manually construct version string\n # this is the format before we started using git-describe\n # to get an ordering on dev version strings.\n rev = \"v%s.dev-%s\" % (VERSION, rev)\n\n # Strip leading v from tags format \"vx.y.z\" to get th version string\n FULLVERSION = rev.lstrip('v')\n\nelse:\n FULLVERSION += QUALIFIER\n\n\ndef write_version_py(filename=None):\n cnt = \"\"\"\\\nversion = '%s'\nshort_version = '%s'\n\"\"\"\n if not filename:\n filename = os.path.join(\n os.path.dirname(__file__), 'src', 'xray', 'version.py')\n\n a = open(filename, 'w')\n try:\n a.write(cnt % (FULLVERSION, VERSION))\n finally:\n a.close()\n\nif write_version:\n write_version_py()\n\n\nsetup(name='xray',\n version=FULLVERSION,\n description='Extended arrays for working with scientific datasets',\n author='Stephan Hoyer, Alex Kleeman, Eugene Brevdo',\n author_email='TODO',\n install_requires=['scipy >= 0.13', 'numpy >= 1.8', 'netCDF4 >= 1.0.6',\n 'pandas >= 0.13.1'],\n tests_require=['mock >= 1.0.1', 'nose >= 1.0'],\n url='https://github.com/akleeman/xray',\n test_suite='nose.collector',\n packages=['xray'],\n package_dir={'': 'src'})\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"598755090","text":"from django.shortcuts import render\nfrom django.http import *\nfrom django.core import serializers\nfrom django.db import connection\nimport json\n# Modelos\nfrom panel.models import Summary as sm\nfrom panel.models import Result as rs\nfrom panel.models import Labels as lb\n\ndef index(request):\n\treturn render(request, 'panel/index.html')\n\ndef main(request):\n\treturn render(request, 'panel/main.html')\n #return HttpResponse(\"bienvenido a mi pagina en %s\" % request.path) \n\ndef configure(request):\n\treturn render(request, 'panel/graph.html', {'parametro':request.POST['parametro'], 'algoritmo': request.POST['algoritmo']})\t \n\ndef load_summary(request):\n\tif (request.GET['parametro'] and request.GET['algoritmo']):\n\t\tdespegue = sm.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase='despegue').values('row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'grupo', 'parametro')\t\n\t\taterrizaje = sm.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase='aterrizaje').values('row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'grupo', 'parametro')\t\n\t\treturn JsonResponse({'despegue': parse_data_summary(list(despegue)), 'aterrizaje':parse_data_summary(list(aterrizaje))})\n\telse:\n\t\treturn JsonResponse({'despegue': [], 'aterrizaje':[]})\t\n\ndef load_result(request):\n\tdespegue = sm.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'])\t\n\treturn JsonResponse(list(despegue), safe=False)\n\ndef parse_data_summary(data):\n\torder_index = ['row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'grupo']\t\n\tlist_result = []\t\n\t\n\tfor item in data:\n\t\torder_row = []\n\t\tgrupo = ''\n\t\ti = 1\n\t\tfor current in order_index:\n\t\t\tif current == 'grupo':\n\t\t\t\tgrupo = item[current]\n\t\t\telse:\n\t\t\t\torder_row.append([i,item[current]])\n\t\t\t\ti = i + 1\n\t\tlist_result.append({grupo:order_row})\n\n\treturn list_result\n \t\ndef asign_label(request):\n\tgrupos = rs.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase= request.GET['fase']).values('cluster').distinct()\n\n\tfor current in grupos:\t\t\n\t\tcurrent_clusters = rs.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase= request.GET['fase'], cluster=current['cluster'])\n\t\tfor flight in current_clusters:\t\t\n\t\t\tflight.etiqueta = request.GET[current['cluster']]\n\t\t\tflight.save()\n\treturn JsonResponse({}, safe=False)\n\ndef cantidad_grupo(request):\n\tgrupos = rs.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase= request.GET['fase']).values('cluster').distinct()\n\tlistado = {}\n\tfor current in grupos:\n\t\tvalores = rs.objects.filter(parametro=request.GET['parametro'], algoritmo=request.GET['algoritmo'], fase= request.GET['fase'], cluster=current['cluster'])\n\t\tlistado[current['cluster']] = len(valores)\n\treturn JsonResponse(listado, safe=False)\n\ndef load_resumen(request):\n\tpass\n\ndef resumen(request):\n\t\"\"\"FILTRAR POR EL ALGORITMO\"\"\"\n\t#if request.GET['algoritmo']:\n\t#\talgoritmo = request.GET['algoritmo']\n\t#else:\n\talgoritmo = 'K-means'\n\tparametros = ['VRTG', 'AOAC', 'FLAP', 'PTCH', 'ROLL']\n\tvuelos = rs.objects.filter(algoritmo=algoritmo, fase='despegue').values('flight','id').distinct()\n\tetiquetas_despegue = []\n\tfor vuelo in vuelos:\n\t\tlistado = {'flight':vuelo['flight'],'VRTG':'-', 'AOAC':'-', 'FLAP':'-', 'PTCH':'-', 'ROLL':'-'}\n\t\t\n\t\t\"\"\"for parametro in parametros:\t\t\t\n\t\t\ttry:\n\t\t\t\tetiqueta = rs.objects.get(algoritmo=algoritmo, fase='despegue', flight=vuelo['flight'], parametro=parametro)\n\t\t\texcept etiqueta.DoesNotExist, e:\n\t\t\t\tprint str(vuelo['id']) +' --- '+ str(e)\n\t\t\telse:\n\t\t\t\tlistado[parametro] = etiqueta.etiqueta\n\t\t\tfinally:\n\t\t\t\tpass\"\"\"\n\t\t\t\n\t\t\t#if hasattr(etiqueta, 'etiqueta'):\n\t\t\t#\tprint str(vuelo['id'])+' -- '+ etiqueta.etiqueta\n\t\t\t\t\n\t\tetiquetas_despegue.append(listado)\n\treturn render(request, 'panel/resumen.html', {'despegue':list(etiquetas_despegue)})\t \n\ndef detalle_vuelo(request, flight):\n\treturn render(request, 'panel/detalle_vuelo.html', {'vuelo':flight, 'algoritmo':''})\n\ndef load_detalle_vuelo(request):\n\tparametros = ['VRTG', 'AOAC', 'FLAP', 'PTCH', 'ROLL']\n\tlistado = {'despegue':{'VRTG':[], 'AOAC':[], 'FLAP':[], 'PTCH':[], 'ROLL':[]}, 'aterrizaje':{'VRTG':[], 'AOAC':[], 'FLAP':[], 'PTCH':[], 'ROLL':[]}}\n\tfor parametro in parametros:\n\t\tquery_objetc = rs.objects.filter(flight=request.GET['vuelo'], parametro=parametro, fase='despegue').values('row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'parametro').distinct()\n\t\tlistado['despegue'][parametro] = parse_data_detalle(list(query_objetc), request.GET['vuelo'])\n\t\tquery_objetc = rs.objects.filter(flight=request.GET['vuelo'], parametro=parametro, fase='aterrizaje').values('row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'parametro').distinct()\n\t\tlistado['aterrizaje'][parametro] = parse_data_detalle(list(query_objetc), request.GET['vuelo'])\n\treturn JsonResponse({'listado': listado})\n\ndef parse_data_detalle(data, vuelo):\n\torder_index = ['row0', 'row1', 'row2', 'row3', 'row4', 'row5', 'row6', 'row7', 'row8', 'row9', 'row10', 'row11', 'row12', 'row13', 'row14', 'row15', 'row16', 'row17', 'row18', 'row19', 'row20', 'row21', 'row22', 'row23', 'row24', 'parametro']\t\n\tlist_result = []\t\n\t\n\tfor item in data:\n\t\torder_row = []\n\t\tparametro = ''\n\t\ti = 1\n\t\tfor current in order_index:\n\t\t\tif current == 'parametro':\n\t\t\t\tparametro = item[current]\n\t\t\telse:\n\t\t\t\torder_row.append([i,item[current]])\n\t\t\t\ti = i + 1\n\t\tlist_result.append({vuelo:order_row})\n\n\treturn list_result\n \t","sub_path":"panel/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"564536671","text":"\"\"\"Plugwise Water Heater component for HomeAssistant.\"\"\"\n\nimport logging\nimport plugwise\n\nfrom . import (\n DOMAIN,\n DATA_ADAM,\n PwEntity,\n)\n\nfrom homeassistant.components.climate.const import (\n CURRENT_HVAC_COOL,\n CURRENT_HVAC_HEAT,\n CURRENT_HVAC_IDLE,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nCURRENT_HVAC_DHW = \"dhw\"\nWATER_HEATER_ICON = \"mdi:thermometer\"\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Add the Plugwise Water Heater.\"\"\"\n\n if discovery_info is None:\n return\n\n api = hass.data[DATA_ADAM].data\n\n devices = []\n ctrl_id = None\n try:\n devs = api.get_devices()\n except RuntimeError:\n _LOGGER.error(\"Unable to get location info from the API\")\n return\n\n _LOGGER.info('Dev %s', devs)\n for dev in devs:\n data = None\n _LOGGER.info('Dev %s', dev)\n if dev['name'] == 'Controlled Device':\n ctrl_id = dev['id']\n dev_id = None\n name = 'adam'\n _LOGGER.info('Name %s', name)\n data = api.get_device_data(dev_id, ctrl_id, None)\n\n if data is None:\n _LOGGER.debug(\"Received no data for device %s.\", name)\n return\n\n device = PwWaterHeater(api, name, dev_id, ctrl_id)\n _LOGGER.info('Adding water_heater.%s', name)\n if not device:\n continue\n devices.append(device)\n add_entities(devices, True)\n\n\nclass PwWaterHeater(PwEntity):\n \"\"\"Representation of a Plugwise water_heater.\"\"\"\n\n def __init__(self, api, name, dev_id, ctlr_id):\n \"\"\"Set up the Plugwise API.\"\"\"\n self._api = api\n self._name = name\n self._dev_id = dev_id\n self._ctrl_id = ctlr_id\n self._cooling_status = None\n self._heating_status = None \n self._boiler_status = None\n self._dhw_status = None\n\n @property\n def name(self):\n \"\"\"Return the name of the thermostat, if any.\"\"\"\n return self._name\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n if self._heating_status or self._boiler_status:\n return CURRENT_HVAC_HEAT\n if self._dhw_status:\n return CURRENT_HVAC_DHW\n if self._cooling_status:\n return CURRENT_HVAC_COOL\n return CURRENT_HVAC_IDLE\n\n @property\n def icon(self):\n \"\"\"Return the icon to use in the frontend.\"\"\"\n return WATER_HEATER_ICON\n\n def update(self):\n \"\"\"Update the data from the water_heater.\"\"\"\n _LOGGER.debug(\"Update water_heater called\")\n data = self._api.get_device_data(self._dev_id, self._ctrl_id, None)\n\n if data is None:\n _LOGGER.debug(\"Received no data for device %s.\", self._name)\n else:\n if 'central_heating_state' in data:\n self._heating_status = data['central_heating_state'] \n if 'boiler_state' in data:\n self._boiler_status = data['boiler_state']\n if 'cooling_state' in data:\n self._cooling_status = data['cooling_state'] \n if 'dhw_state' in data:\n self._dhw_status = data['dhw_state'] \n","sub_path":"custom_components/adam/water_heater.py","file_name":"water_heater.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"346376557","text":"from typing import List\nfrom os import listdir\nfrom AudioProcessing import *\nfrom collections import Counter\nimport numpy as np\nfrom FingerPrintDatabase import FingerPrintDatabase, get_fingerprints\nfrom SongDatabase import *\nfrom Spectrograms import spectrogram, local_peaks\nfrom multiprocessing import Process, Queue, Value\nimport time\n'''\npotential features:\n- real time audio\n- ratio for more accurate predictions\n- website?\n\n'''\n\n# main prediction functions should be here\n# it uses other classes for the prediction\n# todo: add background cancelling even in song file\n\nclass Predictor:\n def __init__(self) -> None:\n self.fingerprints = FingerPrintDatabase()\n self.songs = SongDatabase()\n self.pollster = Counter()\n self.percent_thres = 0\n self.store_fanout_value = 2\n self.pred_fanout_value = 30\n self.thres_ratio = 1.5\n self.store_width = 3\n self.store_length = 3\n self.store_perc = 98\n self.thickness = 10\n self.pred_length = 3\n self.pred_width = 3\n self.pred_perc = 80\n self.time_diff_grain = 10\n self.realtime_accum = []\n self.test_accum = []\n \n def tally(self, songs : List, time0):\n if not songs is None: \n self.pollster.update(Counter([(song, int((time-time0)/self.time_diff_grain)) for song, time in songs]))\n\n def get_tally_winner(self):\n # print(self.pollster.most_common()[:4])\n if len(self.pollster)==0:\n return -1\n common, ratio = self.confidence_ratio()\n if ratio < self.thres_ratio:\n return -1\n return common\n \n def confidence_ratio(self):\n # uses the built in counters to find an approximate ratio for confident guesses\n counter = self.pollster.most_common()\n # takes the \"most common\" song\n most_common = counter[0][0][0]\n print(counter[0][1])\n common_two = None\n for index in range(1, len(counter)):\n if counter[index][0][0] != most_common:\n common_two = index\n break\n if common_two is None:\n ratio = 1e9\n else:\n ratio = counter[0][1] / counter[common_two][1]\n return most_common, ratio\n\n def add_song(self, file_path : str, songname : str, artist : str):\n if songname in self.songs.name2id:\n return\n audio, sampling_rate = read_song(file_path)\n # these should read in discrete digital data\n spectro, freqs, times = spectrogram(audio)\n # returns (Frequency, Time) data\n thres = np.percentile(spectro, self.percent_thres)\n peaks = local_peaks(spectro, thres, self.store_width, self.store_length, self.store_perc, self.thickness)\n print(len(peaks))\n self.songs.save_song(peaks, songname, artist, self.fingerprints, self.store_fanout_value)\n \n def add_songs(self, *, dir_path : str):\n files = listdir(dir_path)\n for file in files:\n if 'DS_Store' in file:\n continue\n print(f'reading {file}')\n file_parts = file.split('_')\n self.add_song(dir_path+\"/\"+file, *file_parts[:2])\n \n def delete_song(self, songname : str):\n self.songs.delete_song(songname, self.store_fanout_value,self.fingerprints)\n\n def save_data(self, dir_path):\n self.songs.save_data(dir_path+\"/songs\")\n self.fingerprints.save_data(dir_path+\"/fingerprints\")\n \n def load_data(self, dir_path):\n self.songs.load_data(dir_path+\"/songs\")\n self.fingerprints.load_data(dir_path+\"/fingerprints\")\n\n def preprocess(self, audio):\n # these should read in discrete digital data\n spectro, freqs, times = spectrogram(audio)\n # returns (Frequency, Time) data\n thres = np.percentile(spectro, self.percent_thres)\n peaks = local_peaks(spectro, thres, self.pred_width, self.pred_length, self.pred_perc)\n # returns a list of peaks (f, t)\n return peaks, len(times)\n \n def process_peaks(self, peaks):\n fingerprints, times = get_fingerprints(peaks, self.pred_fanout_value)\n for fingerprint, time in zip(fingerprints,times):\n songs = self.fingerprints.query_fingerprint(fingerprint)\n self.tally(songs, time)\n\n def process_prediction(self, audio : np.ndarray):\n peaks, _ = self.preprocess(audio)\n self.process_peaks(peaks)\n\n def predict(self, *, file_path : str = '', record_time : float = 0, samples : np.ndarray = None):\n self.pollster = Counter()\n # this is meant to be a function that indicates the general structure of the program\n # it uses some pseudo functions that should be implemented\n if file_path!='':\n audio, sampling_rate = read_song(file_path)\n elif record_time > 0:\n audio = record_song(record_time)\n else:\n audio = samples\n self.process_prediction(audio)\n ret = self.get_tally_winner()\n if ret==-1:\n return \"Oops, did not find this song!\"\n else:\n return self.songs.id2name[ret]\n\n def process_prediction_realtime(self, queue, ret):\n tmp_ret = -1\n all_peaks = None\n offset = 0\n while True:\n self.pollster = Counter()\n data = queue.get()\n if data is None:\n ret.value = self.get_tally_winner()\n break\n peaks, time_len = self.preprocess(data)\n peaks[:,1] += offset\n if all_peaks is None:\n all_peaks = peaks\n else:\n all_peaks = np.concatenate([all_peaks,peaks])\n self.process_peaks(all_peaks)\n tmp_ret = self.get_tally_winner()\n offset += time_len + 1\n if tmp_ret != -1:\n ret.value = tmp_ret\n while not queue.empty():\n queue.get()\n break\n\n def predict_realtime(self, file_path: str='', samples: np.ndarray = None, step_size: int = 1, state:int = 1):\n if state == 0:\n self.queue = Queue()\n self.realtime_ret = Value('i',-1)\n self.process = Process(target=self.process_prediction_realtime, args=(self.queue,self.realtime_ret,))\n self.process.start()\n elif state == 1:\n if self.realtime_ret.value != -1:\n self.process.join()\n return self.songs.id2name[self.realtime_ret.value]\n if samples is None:\n audio, sampling_rate = read_song(file_path)\n else:\n audio = samples\n self.realtime_accum.append(audio)\n if len(self.realtime_accum)>=step_size:\n data = np.concatenate(self.realtime_accum)\n self.realtime_accum = []\n self.queue.put(data)\n self.test_accum.append(data)\n else:\n self.queue.put(None)\n self.process.join()\n if self.realtime_ret.value==-1:\n return \"Oops, did not find this song!\"\n else:\n return self.songs.id2name[self.realtime_ret.value]\n\n\n# predictor = Predictor()\n# predictor.load_data('song_recognition/database')\n# print(predictor.predict(record_time=5))\n\n\n# peaks = predictor.songs.database[predictor.songs.name2id['Imperial March']][\"peaks\"]\n# fingerprints, times = get_fingerprints(peaks,2)\n# print(fingerprints[:])\n\n# predictor.add_songs(dir_path='AGOP-mp3-files')\n# predictor.save_data('song_recognition/database')\n\n# first_print = (202, 831, 0)\n# print(predictor.fingerprints.database[first_print])\n# print(predictor.fingerprints.query_fingerprint(first_print))\n\n# predictor.delete_song('Imperial-March')\n# print(len(predictor.fingerprints.database))","sub_path":"song_recognition/Prediction.py","file_name":"Prediction.py","file_ext":"py","file_size_in_byte":7820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"110867765","text":"#!/usr/bin/python -u\n# coding: utf-8\n\nfrom flask import *\nimport json\nimport urllib.request\nimport ssl\n\napp = Flask(__name__)\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef handle_root():\n if request.method == \"GET\":\n return 'IBPS(Iraira Bo Print Server) is running'\n else:\n comm_path = '/home/pi/work/iraira/iraira_bo_print/comm.txt'\n \n if request.headers['Content-Type'] != 'application/json':\n print(request.headers['Content-Type'])\n return jsonify(res='error'), 400\n\n ssl._create_default_https_context = ssl._create_unverified_context\n url_score_site = 'https://lchika.club/scores'\n url_result_server = 'http://192.168.100.111'\n headers = {\n 'Content-Type': 'application/json',\n }\n app.logger.info(request.json)\n req = urllib.request.Request(url_score_site, json.dumps(request.json).encode(), headers)\n with urllib.request.urlopen(req) as res:\n res_html = res.read().decode('utf-8')\n print('score_site res=' + res_html)\n req = urllib.request.Request(url_result_server, json.dumps(request.json).encode(), headers)\n try:\n with urllib.request.urlopen(req) as res:\n res_html = res.read().decode('utf-8')\n print('result_sserver res=' + res_html)\n except:\n app.logger.info('Error: failed to request to result server')\n with open(comm_path, mode='a') as f:\n f.write(json.dumps(request.json) + '\\n')\n return 'score was sent'\n\nif __name__ == '__main__':\n app.run(\"0.0.0.0\", debug=True)\n #app.run(\"0.0.0.0\")\n","sub_path":"flask/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"303751723","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n ┏┛ ┻━━━━━┛ ┻┓\n ┃       ┃\n ┃   ━   ┃\n ┃ ┳┛  ┗┳ ┃\n ┃       ┃\n ┃   ┻   ┃\n ┃       ┃\n ┗━┓   ┏━━━┛\n ┃   ┃ 神兽保佑\n ┃   ┃ 代码无BUG!\n ┃   ┗━━━━━━━━━┓\n ┃        ┣┓\n ┃     ┏┛\n ┗━┓ ┓ ┏━━━┳ ┓ ┏━┛\n ┃ ┫ ┫ ┃ ┫ ┫\n ┗━┻━┛ ┗━┻━┛\n\"\"\"\n\nimport codecs\nfrom tqdm import tqdm\nfrom collections import Counter\nimport pickle\nimport os\nimport numpy as np\nimport itertools\n\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\n\n\ndef load_sentences(path):\n \"\"\"\n 从文档中读取句子\n :param path:\n :return:\n \"\"\"\n sentences = []\n sentence = []\n for line in tqdm(codecs.open(path, 'r', encoding='utf-8'), desc='数据读取'):\n line = line.strip()\n if not line:\n if len(sentence) > 0:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n assert len(word) == 2\n sentence.append(word)\n if len(sentence) > 0:\n sentences.append(sentence)\n return sentences\n\n\ndef change_bio_to_bioes(sentences):\n \"\"\"\n 将bio编码转换为BIOES编码\n :param sentences:\n :return:\n \"\"\"\n new_tags = []\n new_sentences = []\n for idx, sentence in tqdm(enumerate(sentences), desc='数据处理'):\n tags = [each[-1] for each in sentence]\n new_tag = ['O']\n for tag in tags:\n # 处理O前的I 改为E\n if tag == 'O' and new_tag[-1].split('-')[0] == 'I':\n new_tag[-1] = 'E-' + str(new_tag[-1].split('-')[1])\n # 处理O前的B 改为S\n if tag == 'O' and new_tag[-1].split('-')[0] == 'B':\n new_tag[-1] = 'S-' + str(new_tag[-1].split('-')[1])\n # 放行O\n if tag == 'O':\n new_tag.append(tag)\n # 放行 B\n if tag.split('-')[0] == 'B':\n new_tag.append(tag)\n # 放行 I\n if tag.split('-')[0] == 'I':\n new_tag.append(tag)\n new_tags.append(new_tag[1:])\n for i in range(len(sentence)):\n # 因为添加了一个O在最前面统一操作,因此+1对齐原句\n sentence[i][-1] = new_tag[i + 1]\n new_sentences.append(sentence)\n return new_sentences\n\n\ndef word_mapping(sentences):\n \"\"\"\n 单词映射,获取所有单词的id映射(这一步很显然是one-hot操作,可以被BERT等词嵌入操作取代)\n :param sentences:\n :return:\n \"\"\"\n words = []\n for sentence in sentences:\n for each in sentence:\n words.append(each[0])\n words_counter = Counter(words)\n words_dict = {}\n for each in words_counter:\n words_dict[each] = words_counter[each]\n words_dict[''] = 10000001\n words_dict[''] = 10000000\n sorted_items = sorted(words_dict.items(), key=lambda x: -x[1])\n id_to_word = {i: v[0] for i, v in enumerate(sorted_items)}\n word_to_id = {v[0]: i for i, v in enumerate(sorted_items)}\n return words_dict, id_to_word, word_to_id\n\n\ndef tag_mapping(sentences):\n \"\"\"\n 标签映射,类似于one-hot,这一步不可以取代,类似于输出类别索引\n :param sentences:\n :return:\n \"\"\"\n tags = []\n for sentence in sentences:\n for each in sentence:\n tags.append(each[1])\n tags_counter = Counter(tags)\n tags_dict = {}\n for each in tags_counter:\n tags_dict[each] = tags_counter[each]\n sorted_item = sorted(tags_dict.items(), key=lambda x: -x[1])\n id_to_tags = {i: v[0] for i, v in enumerate(sorted_item)}\n tags_to_id = {v[0]: i for i, v in enumerate(sorted_item)}\n return tags_dict, id_to_tags, tags_to_id\n\n\ndef prepare_dataset(sentences, word_to_id, tags_to_id):\n \"\"\"\n 准备训练数据,将sentences的word和tag全部转换为idx\n :param sentences:\n :param words_to_id:\n :param tags_to_id:\n :return:\n \"\"\"\n data = []\n for sentence in sentences:\n word_list = [word[0] for word in sentence]\n # 这里其实有一点问题,这里已经应该是idx了,不应该else UNK,应该是上面的‘10000000’,\n # 但是因为词汇表就是从数据集中获取的,一般不会在这里报错\n word_id_list = [word_to_id[word if word in word_to_id else ''] for word in word_list]\n tag_id_list = [tags_to_id[word[-1]] for word in sentence]\n data.append([word_list, word_id_list, tag_id_list])\n return data\n\n\ndef load_data(config):\n \"\"\"\n 读取数据(整合)\n :param config:\n :return:\n \"\"\"\n # 由于处理需要时间,因此进行序列化存储,这里检查是否有存储过的序列化文件\n if os.path.exists(config.dataset_pkt):\n dataset_pkt = pickle.load(open(config.dataset_pkt, 'rb'))\n train_sentences = dataset_pkt['train']\n dev_sentences = dataset_pkt['dev']\n test_sentences = dataset_pkt['test']\n else:\n # 加载数据集\n train_sentences = load_sentences(config.train_path)\n dev_sentences = load_sentences(config.dev_path)\n test_sentences = load_sentences(config.test_path)\n # 编码转换\n train_sentences = change_bio_to_bioes(train_sentences)\n dev_sentences = change_bio_to_bioes(dev_sentences)\n test_sentences = change_bio_to_bioes(test_sentences)\n dataset_plt = {}\n dataset_plt['train'] = train_sentences\n dataset_plt['dev'] = dev_sentences\n dataset_plt['test'] = test_sentences\n pickle.dump(dataset_plt, open(config.dataset_pkt, 'wb'))\n\n # 单词映射以及标签映射的存储\n if os.path.exists(config.map_pkt):\n map_pkt = pickle.load(open(config.map_pkt, 'rb'))\n words_dict = map_pkt['words_dict']\n id_to_word = map_pkt['id_to_word']\n word_to_id = map_pkt['word_to_id']\n tags_dict = map_pkt['tags_dict']\n id_to_tags = map_pkt['id_to_tags']\n tags_to_id = map_pkt['tags_to_id']\n else:\n words_dict, id_to_word, word_to_id = word_mapping(train_sentences)\n tags_dict, id_to_tags, tags_to_id = tag_mapping(train_sentences)\n map_pkt = {}\n map_pkt['words_dict'] = words_dict\n map_pkt['id_to_word'] = id_to_word\n map_pkt['word_to_id'] = word_to_id\n map_pkt['tags_dict'] = tags_dict\n map_pkt['id_to_tags'] = id_to_tags\n map_pkt['tags_to_id'] = tags_to_id\n pickle.dump(map_pkt, open(config.map_pkt, 'wb'))\n\n # 处理后的data文件\n if os.path.exists(config.handled_pkt):\n data_pkt = pickle.load(open(config.handled_pkt, 'rb'))\n train_data = data_pkt['train_data']\n dev_data = data_pkt['dev_data']\n test_data = data_pkt['test_data']\n else:\n train_data = prepare_dataset(train_sentences, word_to_id, tags_to_id)\n test_data = prepare_dataset(test_sentences, word_to_id, tags_to_id)\n dev_data = prepare_dataset(dev_sentences, word_to_id, tags_to_id)\n data_dict = {}\n data_dict['train_data'] = train_data\n data_dict['test_data'] = test_data\n data_dict['dev_data'] = dev_data\n pickle.dump(data_dict, open(config.handled_pkt, 'wb'))\n\n return train_data, dev_data, test_data, word_to_id, id_to_word, tags_to_id, id_to_tags\n\n\ndef load_word2vec(config, id_to_word):\n \"\"\"\n 读取word2vec词嵌入向量\n :param config:\n :param id_to_word:\n :param word_dim:\n :return:\n \"\"\"\n if os.path.exists(config.embedding_matrix_file):\n embedding_mat = np.load(config.embedding_matrix_file)\n return embedding_mat\n else:\n pre_trained = {}\n emb_invalid = 0\n for i, line in enumerate(codecs.open(config.emb_file, 'r', encoding='utf-8')):\n line = line.rstrip().split()\n if len(line) == config.embsize + 1:\n pre_trained[line[0]] = np.array(\n [float(x) for x in line[1:]]\n ).astype(np.float32)\n else:\n emb_invalid = emb_invalid + 1\n\n if emb_invalid > 0:\n print('waring: %i invalid lines' % emb_invalid)\n\n num_words = len(id_to_word)\n embedding_mat = np.zeros([num_words, config.embsize])\n for i in range(num_words):\n word = id_to_word[i]\n if word in pre_trained:\n embedding_mat[i] = pre_trained[word]\n else:\n pass\n print('加载了 %i 个字向量' % len(pre_trained))\n np.save(config.embedding_matrix_file, embedding_mat)\n return embedding_mat\n\n\ndef get_X_and_Y_data(dataset, max_len, num_classes):\n \"\"\"\n 将数据拆分为X和Y\n :param dataset:\n :param max_len:\n :param num_classes:\n :return:\n \"\"\"\n x_data = [data[1] for data in dataset]\n x_data = pad_sequences(x_data, maxlen=max_len, dtype='int32', padding='post', truncating='post', value=0)\n y_data = [data[2] for data in dataset]\n y_data = pad_sequences(y_data, maxlen=max_len, dtype='int32', padding='post', truncating='post', value=0)\n y_data = to_categorical(y_data, num_classes=num_classes)\n return x_data, y_data\n\n\ndef check_label(front_label, follow_label):\n \"\"\"\n 检查标签前后是否连贯\n :param front_label:\n :param follow_label:\n :return:\n \"\"\"\n tag_check = {\n \"I\": [\"B\", \"I\"],\n \"E\": [\"B\", \"I\"],\n }\n if not follow_label:\n raise Exception(\"follow label should not both None\")\n\n if not front_label:\n return True\n\n if follow_label.startswith(\"B-\"):\n return False\n\n if (follow_label.startswith(\"I-\") or follow_label.startswith(\"E-\")) and \\\n front_label.endswith(follow_label.split(\"-\")[1]) and \\\n front_label.split(\"-\")[0] in tag_check[follow_label.split(\"-\")[0]]:\n return True\n return False\n\n\ndef format_result(chars, tags):\n \"\"\"\n 将网络输出转换为字典格式\n :param chars:\n :param tags:\n :return:\n \"\"\"\n entities = []\n entity = []\n for index, (char, tag) in enumerate(zip(chars, tags)):\n entity_continue = check_label(tags[index - 1] if index > 0 else None, tag)\n if not entity_continue and entity:\n entities.append(entity)\n entity = []\n entity.append([index, char, tag, entity_continue])\n if entity:\n entities.append(entity)\n\n entities_result = []\n for entity in entities:\n if entity[0][2].startswith(\"B-\"):\n entities_result.append(\n {\"begin\": entity[0][0] + 1,\n \"end\": entity[-1][0] + 1,\n \"words\": \"\".join([char for _, char, _, _ in entity]),\n \"type\": entity[0][2].split(\"-\")[1]\n }\n )\n\n return entities_result\n\n\nif __name__ == '__main__':\n sentences = load_sentences(r'./data/dev.txt')\n bioes_tag = change_bio_to_bioes(sentences)\n words_dict, id_to_word, word_to_id = word_mapping(sentences)\n tags_dict, id_to_tags, tags_to_id = tag_mapping(sentences)\n data = prepare_dataset(sentences, word_to_id, tags_to_id)\n print()\n","sub_path":"NER/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":11489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"514002883","text":"\"\"\"all models migrations\n\nRevision ID: 426cef99c026\nRevises: 4cad96b74ef3\nCreate Date: 2021-07-10 17:08:58.298372\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '426cef99c026'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n 'produtos',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('descricao', sa.String(length=200), nullable=False),\n sa.Column('preco', sa.DECIMAL(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n )\n op.drop_table('produtos')\n op.drop_constraint(\n 'conta_produto_id_produto_fkey', 'conta_produto', type_='foreignkey'\n )\n op.create_foreign_key(None, 'conta_produto', 'produtos', ['id_produto'], ['id'])\n op.drop_constraint(\n 'estoque_produto_id_produto_fkey', 'estoque_produto', type_='foreignkey'\n )\n op.create_foreign_key(None, 'estoque_produto', 'produtos', ['id_produto'], ['id'])\n op.drop_constraint(\n 'fornecedor_produto_id_produto_fkey', 'fornecedor_produto', type_='foreignkey'\n )\n op.create_foreign_key(\n None, 'fornecedor_produto', 'produtos', ['id_produto'], ['id']\n )\n op.drop_constraint(\n 'produto_ordem_de_compra_id_produto_fkey',\n 'produto_ordem_de_compra',\n type_='foreignkey',\n )\n op.create_foreign_key(\n None, 'produto_ordem_de_compra', 'produtos', ['id_produto'], ['id']\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'produto_ordem_de_compra', type_='foreignkey')\n op.create_foreign_key(\n 'produto_ordem_de_compra_id_produto_fkey',\n 'produto_ordem_de_compra',\n 'produto',\n ['id_produto'],\n ['id'],\n )\n op.drop_constraint(None, 'fornecedor_produto', type_='foreignkey')\n op.create_foreign_key(\n 'fornecedor_produto_id_produto_fkey',\n 'fornecedor_produto',\n 'produto',\n ['id_produto'],\n ['id'],\n )\n op.drop_constraint(None, 'estoque_produto', type_='foreignkey')\n op.create_foreign_key(\n 'estoque_produto_id_produto_fkey',\n 'estoque_produto',\n 'produto',\n ['id_produto'],\n ['id'],\n )\n op.drop_constraint(None, 'conta_produto', type_='foreignkey')\n op.create_foreign_key(\n 'conta_produto_id_produto_fkey',\n 'conta_produto',\n 'produto',\n ['id_produto'],\n ['id'],\n )\n op.create_table(\n 'produto',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column(\n 'descricao', sa.VARCHAR(length=200), autoincrement=False, nullable=False\n ),\n sa.Column('preco', sa.NUMERIC(), autoincrement=False, nullable=False),\n sa.PrimaryKeyConstraint('id', name='produto_pkey'),\n )\n op.drop_table('produtos')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/426cef99c026_all_models_migrations.py","file_name":"426cef99c026_all_models_migrations.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"174437264","text":"##################################\n# fichier amitie-entre-gardes-entrainement.py\n# nom de l'exercice : Amitié entre gardes\n# url : http://www.france-ioi.org/algo/task.php?idChapter=648&idTask=0&sTab=task&iOrder=5\n# type : entrainement\n#\n# Nom du chapitre : \n#\n# Compétence développée : \n#\n# auteur : \n##################################\n\n# chargement des modules\n\n\n# mettre votre code ici\n\ndebutsoldat1=int(input())\nfinsoldat1=int(input())\ndebutsoldat2=int(input())\nfinsoldat2=int(input())\n\nif (finsoldat2 str:\n \"\"\"\n Fetch a customer ID by database lookup or create one if a token is provided\n \"\"\"\n cid = current_user.customer_id\n if not cid and token:\n cid = stripe.Customer.create(\n email=current_user.email,\n source=token\n ).id\n current_user.customer_id = cid\n db.session.commit()\n return cid\n\ndef new_subscription(plan: str, token: str) -> bool:\n \"\"\"\n Create a new subscription for the current user\n \"\"\"\n cid = get_customer_id(token)\n subscription = stripe.Subscription.create(\n customer=cid,\n items=[{'plan': PLANS[plan]['id']}]\n )\n current_user.subscription_id = subscription.id\n current_user.plan = plan\n db.session.commit()\n return True\n\ndef change_subscription(plan: str) -> bool:\n \"\"\"\n Change the subscription from one plan to another\n \"\"\"\n sid = current_user.subscription_id\n if not sid or current_user.plan == plan:\n return False\n subscription = stripe.Subscription.retrieve(sid)\n subscription.modify(sid,\n cancel_at_period_end=False,\n items=[{\n 'id': subscription['items']['data'][0].id,\n 'plan': PLANS[plan]['id'],\n }]\n )\n current_user.subscription_id = subscription.id\n current_user.plan = plan\n db.session.commit()\n return True\n\ndef cancel_subscription() -> bool:\n \"\"\"\n Cancel a subscription\n \"\"\"\n sid = current_user.subscription_id\n if sid:\n subscription = stripe.Subscription.retrieve(sid)\n subscription.delete()\n cid = current_user.customer_id\n if cid:\n customer = stripe.Customer.retrieve(cid)\n customer.delete()\n current_user.customer_id = None\n current_user.subscription_id = None\n current_user.plan = None\n db.session.commit()\n return True\n","sub_path":"avwx_account/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"341660330","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"PROTPGD\")\n# ecal mapping\nprocess.load(\"Geometry.EcalMapping.EcalMapping_cfi\")\n\nprocess.load(\"Geometry.EcalMapping.EcalMappingRecord_cfi\")\n\n# magnetic field\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\n\n# Calo geometry service model\nprocess.load(\"Geometry.CaloEventSetup.CaloGeometry_cfi\")\n\n# Calo geometry service model\nprocess.load(\"Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi\")\n\n# IdealGeometryRecord\nprocess.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\")\n\nprocess.load(\"CalibCalorimetry.Configuration.Ecal_FakeConditions_cff\")\n\n#include \"SimCalorimetry/EcalTrigPrimProducers/data/ecalTriggerPrimitiveDigis_with_suppressed.cff\"\nprocess.load(\"SimCalorimetry.EcalTrigPrimProducers.ecalTriggerPrimitiveDigis_cff\")\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring('file:/data/uberthon/tpg/elec_unsupp_pt10-100.root')\n)\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(10)\n)\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = cms.untracked.vstring('drop *_*_*_*', \n 'keep *_simEcalTriggerPrimitiveDigis_*_*', \n 'keep *_ecalDigis_*_*', \n 'keep *_ecalRecHit_*_*', \n 'keep *_ecalWeightUncalibRecHit_*_*', \n 'keep PCaloHits_*_EcalHitsEB_*', \n 'keep PCaloHits_*_EcalHitsEE_*', \n 'keep edmHepMCProduct_*_*_*'),\n fileName = cms.untracked.string('TrigPrim.root')\n)\n\nprocess.Timing = cms.Service(\"Timing\")\n\nprocess.SimpleMemoryCheck = cms.Service(\"SimpleMemoryCheck\")\n\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n cerr = cms.untracked.PSet(\n enable = cms.untracked.bool(False)\n ),\n cout = cms.untracked.PSet(\n DEBUG = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n ),\n EcalTPG = cms.untracked.PSet(\n limit = cms.untracked.int32(1000000)\n ),\n enable = cms.untracked.bool(True),\n threshold = cms.untracked.string('DEBUG')\n ),\n debugModules = cms.untracked.vstring('simEcalTriggerPrimitiveDigis')\n)\n\nprocess.p = cms.Path(process.simEcalTriggerPrimitiveDigis)\nprocess.outpath = cms.EndPath(process.out)\n\n\n","sub_path":"SimCalorimetry/EcalTrigPrimProducers/test/writeTP_cfg.py","file_name":"writeTP_cfg.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"553029341","text":"from src import logger\nfrom io import BytesIO\n\n\ndef make_sequence(log, msg=\"\"):\n log.debug('debug')\n log.warning('warning')\n log.info('info')\n log.error('error: {0}'.format(msg))\n log.fatal('fatal')\n try:\n raise RuntimeError(\"cos sie spieprzylo\")\n except Exception:\n log.exception('exception')\n\n\ndef test_logger():\n #print dir(logger)\n #make_sequence(logger.logger)\n\n make_sequence(logger.make_logger(\"testowy logger\", debug=True, colored=True))\n make_sequence(logger.make_logger(\"logger_nocolor\", debug=True, colored=False), False)\n","sub_path":"tests/test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"29219895","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 4 16:08:17 2021\n\n@author: timvr\n\"\"\"\n\nfrom mastermind.game.algorithms.Mastermind_Oracle import build_mastermind_a_circuit, build_mastermind_b_circuit\nfrom mastermind.arithmetic.dradder import add, sub\nfrom mastermind.arithmetic.count import count, icount\nfrom mastermind.arithmetic.increm import increment, decrement, cnincrement, cndecrement\nfrom qiskit import QuantumCircuit\nimport numpy as np\n\ndef build_find_colour_positions_circuit(circuit, x, q, a, c, d, secret_sequence, d_positions=None):\n '''\n Builds mastermind check circuit on circuit. Requires the inputs q, a, b, c\n and secret_sequence. You can optionally choose to measure the outcomes.\n\n Parameters\n ----------\n circuit : QuantumCircuit\n Circuit to build mastermind circuit on.\n x : QuantumRegister, length n\n holds binary proto-quereis\n q : QuantumRegister, length n*ceil(log2(k))\n holds two-colour queries to the oracle\n a : QuantumRegister, length 1+ceil(log2(k))\n holds oracle 'a' outputs\n c : integer, c in {0, 1, ..., k-1}\n the colour of which we want to know the positions\n d : integer, d in {0, 1, ..., k-1}\n any colour which does not occur in the secret string\n secret_sequence: List, length n\n Secret sequence.\n\n Returns\n -------\n circuit : QuantumCircuit\n Circuit with find_colour_positions algorithm appended to it.\n\n '''\n \n #0: init\n circuit.barrier()\n \n #1: Hadamard\n [circuit.h(qubit) for qubit in x]\n circuit.barrier()\n \n #2: build query\n _build_query_two_colours(circuit, x, q, c, d)\n circuit.barrier()\n \n #3: get Oracle a response\n build_mastermind_a_circuit(circuit, q, a, secret_sequence)\n circuit.barrier()\n \n #3.alt: sub d positions if used\n if d_positions != None:\n for (i,j) in enumerate(d_positions):\n if j == 1:\n circuit.x(x[i])\n cndecrement(circuit, [x[i]], a)\n circuit.x(x[i])\n circuit.barrier()\n \n #4: Z gate on output LSB\n circuit.z(a[0]) # should be the LSB; maybe that's actually a[-1]!!!!!!!!!!\n circuit.barrier()\n \n #5: undo step 2 & 3\n if d_positions != None:\n for (i,j) in enumerate(d_positions):\n if j == 1:\n circuit.x(x[i])\n cnincrement(circuit, [x[i]], a)\n circuit.x(x[i])\n build_mastermind_a_circuit(circuit, q, a, secret_sequence, do_inverse=True)\n _build_query_two_colours(circuit, x, q, c, d)\n circuit.barrier()\n \n #11\n [circuit.h(qubit) for qubit in x]\n circuit.barrier()\n \n # Return the check circuit\n return circuit\n\n\ndef build_find_colour_positions_alt_circuit(circuit, x, q, a, b, c, k, secret_sequence):\n '''\n Builds mastermind check circuit on circuit. Requires the inputs q, a, b, c\n and secret_sequence. You can optionally choose to measure the outcomes.\n\n Parameters\n ----------\n circuit : QuantumCircuit\n Circuit to build mastermind circuit on.\n x : QuantumRegister, length n\n holds binary proto-queries\n q : QuantumRegister, length n*ceil(log2(k))\n holds two-colour queries to the oracle\n a : QuantumRegister, length 1+ceil(log2(k))\n holds oracle 'a' outputs\n b : QuantumRegister, length 1+ceil(log2(k))+ceil(log2(n))\n holds inner product outputs\n c : integer, c in {0, 1, ..., k-1}\n the colour of which we want to know the positions\n k : integer\n number of available colours\n secret_sequence: List, length n\n Secret sequence.\n\n Returns\n -------\n circuit : QuantumCircuit\n Circuit with find_colour_positions_alt algorithm appended to it.\n\n '''\n \n logk = int(np.ceil(np.log2(k)))\n \n #0: init\n circuit.barrier()\n \n #1: Hadamard\n [circuit.h(qubit) for qubit in x]\n circuit.barrier()\n \n #2: calculate the MMa sum\n for d in range(k):\n #2a: build query\n _build_query_two_colours(circuit, x, q, c, d)\n circuit.barrier()\n \n #2b: get Oracle a response\n build_mastermind_a_circuit(circuit, q, a, secret_sequence)\n circuit.barrier()\n \n #2c: add to output reg\n add(circuit, a, b)\n circuit.barrier()\n \n #2d: undo #2a & #2b\n build_mastermind_a_circuit(circuit, q, a, secret_sequence, do_inverse=True)\n _build_query_two_colours(circuit, x, q, c, d)\n circuit.barrier()\n \n #3: add the count of c colours to the b reg\n count(circuit, a=x, b=b, step=1) # or step=-1?????\n circuit.barrier()\n \n #4: ignore the logk LSBs in the b reg\n #... which of course requires literally no code, but I'll add identity gates for clarity\n for i in range(logk):\n circuit.i(b[i])\n circuit.barrier()\n \n #5: decrement the remaining value by 1 to find the desired inner product\n decrement(circuit, b[logk::])\n circuit.barrier()\n \n #6: Z gate on output LSB (the effective LSB, not the actual one)\n circuit.z(b[logk]) # should be the remaining LSB; not exactly sure if this is the correct one!\n circuit.barrier()\n \n #7: undo steps 2 through 5\n increment(circuit, b[logk::])\n for i in range(logk):\n circuit.i(b[i])\n icount(circuit, a=x, b=b, step=1)\n for d in range(k):\n _build_query_two_colours(circuit, x, q, c, d)\n build_mastermind_a_circuit(circuit, q, a, secret_sequence)\n sub(circuit, a, b)\n build_mastermind_a_circuit(circuit, q, a, secret_sequence, do_inverse=True)\n _build_query_two_colours(circuit, x, q, c, d)\n \n #8\n [circuit.h(qubit) for qubit in x]\n circuit.barrier()\n \n # Return the check circuit\n return circuit\n\n\ndef _build_query_two_colours(circuit, x, q, c, d):\n '''\n Performs CNOTs on the query q according to binary proto-query x:\n - if x[i]=1, then the binary version of c is applied\n - alse, d is applied.\n\n Parameters\n ----------\n circuit : QuantumCircuit\n Circuit to build mastermind circuit on.\n x : QuantumRegister, length n\n holds binary proto-queries\n q : QuantumRegister, length n*ceil(log2(k))\n holds two-colour queries to the oracle\n a : QuantumRegister, length 1+ceil(log2(k))\n holds oracle 'a' outputs\n c : integer, c in {0, 1, ..., k-1}\n the colour of which we want to know the positions\n d : integer, d in {0, 1, ..., k-1}\n any colour which does not occur in the secret string\n secret_sequence: List, length n\n Secret sequence.\n\n Returns\n -------\n circuit : QuantumCircuit\n Circuit with build_query_two_colours sub-circuit appended to it.\n\n '''\n \n n_x = len(x)\n n_q = len(q)\n \n amount_colour_bits = n_q // n_x\n \n binary_c = bin(c)[2:].zfill(amount_colour_bits)\n binary_d = bin(d)[2:].zfill(amount_colour_bits)\n \n for i in range(n_x):\n for (j,bit) in enumerate(binary_c[::-1]):\n if bit == '1':\n circuit.cnot(x[i], q[i*amount_colour_bits + j])\n else:\n pass\n circuit.x(x[i])\n for (j,bit) in enumerate(binary_d[::-1]):\n if bit == '1':\n circuit.cnot(x[i], q[i*amount_colour_bits + j])\n else:\n pass\n circuit.x(x[i])\n \n return circuit\n\n","sub_path":"src/mastermind/game/algorithms/Find_Colour_Positions.py","file_name":"Find_Colour_Positions.py","file_ext":"py","file_size_in_byte":7422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"429463703","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 9 12:21:15 2016\n\n@author: pbva152\n\"\"\"\n\nimport convert\nimport numpy as np\nimport matasanochallenge3\n\n\ndatas = np.genfromtxt(\"mt6.txt\", dtype = str) \ndata = \"\".join(datas)\n\ndef HammDist( string1, string2):\n assert(len(string1) == len(string2))\n hammweight = 0\n for i in range(0, len(string1)):\n tempbin = bin(string1[i] ^ string2[i])\n hammweight = tempbin.count(\"1\") + hammweight\n return hammweight\n \n\nbytedata = convert.base642byte(data)\nmetricvec = []\nfinalmetric = 100000 \nfor keysize in range(2, 41):\n bytes1 =[]\n bytes2 = []\n bytes3 = []\n bytes4 = []\n for i in range(0, keysize):\n bytes1.append(bytedata[i])\n bytes2.append(bytedata[i + keysize])\n bytes3.append(bytedata[i + (2 * keysize)])\n bytes4.append(bytedata[i + (3 * keysize)])\n metric1 = HammDist(bytes1, bytes2) / keysize\n metric2 = HammDist(bytes1, bytes3) / keysize\n metric3 = HammDist(bytes1, bytes4) / keysize\n metric4 = HammDist(bytes2, bytes3) / keysize\n metric5 = HammDist(bytes2, bytes4) / keysize\n metric6 = HammDist(bytes3, bytes4) / keysize\n metric = (metric1 + metric2 + metric3 + metric4 + metric5 + metric6) / 6\n metricvec.append(metric)\n if (metric < finalmetric):\n finalmetric = metric\n finalkeysize = keysize\n \nplaintext = [''] * len(bytedata)\n\nfor i in range(0,finalkeysize):\n ciph = []\n \n for j in range(0, (len(bytedata) // finalkeysize) +1):\n if (j* finalkeysize + i) < len(bytedata):\n ciph.append(bytedata[j* finalkeysize + i])\n plain = matasanochallenge3.DecryptCaesarCipher(convert.byte2hex(ciph))\n for j in range(0, (len(bytedata) // finalkeysize)+1):\n if (j* finalkeysize + i) < len(bytedata):\n plaintext[j* finalkeysize + i] = plain[j] \nfinalplaintext = \"\".join(plaintext)\nprint(finalplaintext)\n\n\n\n\n\n\n\n\n","sub_path":"python/matasano/set1/c6.py","file_name":"c6.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"380233265","text":"\"\"\"TaxIDlink\n\nauthor: jbonet\ndate: 10/2013\n\n@oliva's lab\n\"\"\"\n\n\"\"\"\nImport Standard Libraries\n\"\"\"\nimport sys, os, re\nimport subprocess\nimport warnings\nimport urllib\n\n\"\"\"\nDependences in SBI library\n\"\"\"\nfrom SBI import SBIglobals\nfrom SBI.databases import taxIDftp\nfrom SBI.beans import Path\nfrom SBI.beans import File\n\nclass TaxIDlink(object):\n \"\"\"The TaxIDlink class controls the download and parsing of TaxID database\n\n \"\"\"\n def __init__(self, local = None):\n self._local = os.path.abspath(local)\n self.__name__ = 'databases.TaxIDlink' # This must be included in every class for the SBIglobals.alert()\n\n self._nodes = 'nodes.dmp'\n self._names = 'names.dmp'\n self._delet = 'delnodes.dmp'\n self._merged = 'merged.dmp'\n self._taxid = 'taxid.gz'\n\n if local is not None:\n self.local = local\n\n \"\"\"ATTRIBUTES\"\"\"\n @property\n def local(self): return self._local\n @local.setter\n def local(self, value):\n self._local = os.path.abspath(value)\n self._nodes = os.path.join(self._local, self._nodes)\n self._names = os.path.join(self._local, self._names)\n self._delet = os.path.join(self._local, self._delet)\n self._merged = os.path.join(self._local, self._merged)\n self._taxid = os.path.join(self._local, self._taxid)\n\n @property\n def localTaxIDs(self):\n taxFile = File(self._taxid, 'r')\n for tax_line in taxFile.descriptor:\n yield tax_line\n taxFile.close()\n\n @property\n def source(self):\n return taxIDftp['show']\n\n \"\"\"BOOLEANS\"\"\"\n @property\n def has_local(self): return self._local is not None\n\n \"\"\"METHODS\"\"\"\n def download(self):\n if not self.has_local:\n raise NameError('A local TaxID database directory must be defined.')\n\n Path.mkdir(self.local)\n destination = os.path.join(self.local, 'taxdmp.zip')\n urllib.urlretrieve(taxIDftp['global'], destination)\n command = ['unzip', '-o', destination, '-d', self.local]\n p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n out, err = p.communicate()\n\n self._process()\n\n return True\n\n def get_TaxID(self, TaxID):\n if self.has_local:\n for tax_line in self.localTaxIDs:\n if tax_line.split('\\t')[0] == TaxID:\n return tax_line\n\n else:\n raise NameError('A local TaxID database directory must be defined.')\n\n def get_TaxIDs(self, TAXset):\n if isintance(TAXset, str):\n warnings.warn('For single TaxID search the get_TaxID function is recomended.')\n yield self.get_TaxID(TAXset)\n\n if self.has_local:\n for tax_line in self.localTaxIDs:\n if tax_line.split('\\t')[0] in TAXset:\n yield tax_linej\n else:\n raise NameError('A local TaxID database directory must be defined.')\n\n \"\"\"PRIVATE METHODS\"\"\"\n def _process(self):\n inh = {}\n nodefile = File(file_name = self._nodes, action = 'r')\n for line in nodefile.descriptor:\n line = re.sub('\\'', '\\\\\\'', line)\n line_data = line.split('|')\n inh[line_data[0].strip()] = TaxID(line_data[0].strip())\n inh[line_data[0].strip()].parent = line_data[1].strip()\n inh[line_data[0].strip()].rank = line_data[2].strip()\n nodefile.close()\n\n namefile = File(file_name = self._names, action = 'r')\n for line in namefile.descriptor:\n line = re.sub('\\'', '\\\\\\'', line)\n line_data = line.split('|')\n if line_data[3].strip() == 'scientific name':\n inh[line_data[0].strip()].name = line_data[1].strip()\n namefile.close()\n\n delefile = File(file_name = self._delet, action = 'r')\n for line in delefile.descriptor:\n data = line.split('|')\n inh[data[0].strip()] = TaxID(data[0].strip())\n inh[data[0].strip()].old = True\n delefile.close()\n\n mrgefile = File(file_name = self._merged, action = 'r')\n for line in mrgefile.descriptor:\n data = line.split('|')\n inh[data[0].strip()] = TaxID(data[0].strip())\n inh[data[0].strip()].old = True\n inh[data[0].strip()].new = data[1].strip()\n mrgefile.close()\n\n taxFile = File(self._taxid, 'w', True)\n for taxid in inh:\n taxFile.write(str(inh[taxid]) + \"\\n\")\n taxFile.close()\n\nclass TaxID(object):\n def __init__(self, taxid = None, inline = None):\n if inline is not None:\n inline = inline.strip()\n self.taxid = taxid if inline is None else inline.split('\\t')[0]\n self.name = None if inline is None else inline.split('\\t')[1] if inline.split('\\t')[1] != 'None' else None\n self.rank = None if inline is None else inline.split('\\t')[2] if inline.split('\\t')[2] != 'None' else None\n self.parent = None if inline is None else inline.split('\\t')[3] if inline.split('\\t')[3] != 'None' else None\n self.old = False if inline is None else eval(inline.split('\\t')[4])\n self.new = None if inline is None else inline.split('\\t')[5] if inline.split('\\t')[5] != 'None' else None\n\n \"\"\"BOOLEANS\"\"\"\n @property\n def has_old(self): return self.old\n @property\n def has_new(self): return False if self.new is None else True\n\n \"\"\"OVERWRITE PARENT METHODS\"\"\"\n def __str__(self):\n return \"{0.taxid}\\t{0.name}\\t{0.rank}\\t{0.parent}\\t{0.old}\\t{0.new}\".format(self)\n","sub_path":"collision_detection_program/SBI/databases/TaxIDlink.py","file_name":"TaxIDlink.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"432660897","text":"################################################################\n# Author : yiorgosynkl (find me in Github: https://github.com/yiorgosynkl)\n# Date created : 20201211\n# Problem link : https://leetcode.com/problems/merge-in-between-linked-lists/\n################################################################\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nclass Solution:\n # time: O(n), space: O(1), where n = size(list1) + size(list2).\n def mergeInBetween(self, list1: ListNode, a: int, b: int, list2: ListNode) -> ListNode:\n p1, p2 = list1, list2\n for _ in range(a-1):\n p1 = p1.next\n s1 = p1 # first stop point\n for _ in range(b-a+2):\n p1 = p1.next\n s2 = p1 # second stop point\n s1.next = p2 \n while p2.next != None:\n p2 = p2.next\n p2.next = s2 # connect stop points and list\n return list1\n \n ","sub_path":"biweekly_contest_40_virtual/1669_merge_in_between_linked_lists.py","file_name":"1669_merge_in_between_linked_lists.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"622613223","text":"'''\n======================================================================\nCreated on Jan 14, 2018\n\nPURPOSE: this module provides classes to read Maven projects from git or other repos\n specifically intended to create the graph of multiple project dependencies\n\nROADMAP: TODO - \n 1. review how properties are distributed and could break things\n 2. review subproject dependencies on top level, are props declared?\n 2. review parent POM, are props declared?\n 3. are external property files used?\n\n\n@author: Larry\n======================================================================\n'''\nimport os\nimport subprocess\n\n#import json\n#import xml.etree.ElementTree as ET\n#import urllib2\n#import csv\nimport xml.etree.cElementTree as ET\nimport re\nimport urllib.request\n\n\n#======================================================================= \n# static functions and constants\nclass Util(object):\n mvn_pom_ns = {\"mvn\":\"http://maven.apache.org/POM/4.0.0\"}\n \n def __init__(self):\n pass \n \n @staticmethod\n def get_tag_value(name, section):\n s = ('mvn:%s' % name)\n elem = section.find(s, Util.mvn_pom_ns)\n if elem ==None:\n return'' \n return elem.text\n\n @staticmethod\n def get_path(dirs):\n path = ''\n for d in dirs:\n path += d + '/' \n return path[:len(path) -1]\n\n # if hasattr(a, 'property'):\n \n @staticmethod\n def run_process_2(cmd_args):\n #result = subprocess.run(['dir', '../*.*'], stdout=subprocess.PIPE)\n #result = subprocess.run(['C:/apps/maven352/bin/mvn', 'help:effective-pom'], stdout=subprocess.PIPE)\n result = subprocess.run(['cd', '..'], stdout=subprocess.PIPE, shell=True) \n result = subprocess.run(cmd_args, stdout=subprocess.PIPE, shell=True) \n print(result.stdout.decode('utf-8'))\n\n \n @staticmethod\n def run_process(cmd_args, args_in):\n cmd = subprocess.Popen(cmd_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)\n if (args_in):\n cmd.stdin.write(args_in.encode('utf-8'))\n cmd.stdin.flush() # Must include this to ensure data is passed to child process\n result = cmd.stdout.read()\n \n print(args_in.encode('utf-8'))\n print(result) #.stdout.decode('utf-8'))\n '''\n cmdline = [\"cmd\", \"/q\", \"/k\", \"echo off\"]\n batch = b\"\"\"\\\n rem vsinstr -coverage helloclass.exe /exclude:std::*\n vsperfcmd /start:coverage /output:run.coverage\n helloclass\n vsperfcmd /shutdown\n exit\n \"\"\" \n \n '''\n def test_map_update(self):\n A = {'a':1, 'b':2, 'c': 3}\n B = {'c':99, 'd':4, 'e':5}\n A.update(B)\n print(A)\n\n#======================================================================= \n# identifies Maven coordinates for a project or dependnecy\nclass MavenCoords(object):\n def __init__(self, element, props): \n if (not element):\n self.groupid =''\n self.artifactid = ''\n self.version = ''\n self.scope = ''\n self.relative_path = ''\n self.key ='' \n return \n \n self.groupid = Util.get_tag_value('groupId', element)\n self.artifactid = Util.get_tag_value('artifactId', element)\n self.version = Util.get_tag_value('version', element) \n self.relative_path = Util.get_tag_value('relativePath', element) \n self.scope = Util.get_tag_value('scope', element) \n self.refresh_key(props) \n \n def refresh_key(self, props):\n if (props and self.version in props):\n self.version = props[self.version]\n self.key = '%s|%s|%s' % (self.groupid, self.artifactid, self.version) \n\n \n\n#======================================================================= \n# a maven project POM complete with properties and dependencies \nclass MavenProject(object):\n def __init__(self, pom_url, project_map): \n #dirs = pom_url.split('/')\n\n self.pom_url = pom_url; \n self.project_map = project_map\n self.pom_file = self.get_pom_file(self.pom_url)\n self.name = Util.get_tag_value('name', self.pom_file) \n self.packaging = Util.get_tag_value('packaging', self.pom_file) \n \n self.init_from_parent() \n self.properties.update(self.get_properties(self.pom_file)) \n self.coord = MavenCoords(self.pom_file, self.properties) \n self.dependencies.update(self.get_dependencies(self.pom_file))\n self.project_map[self.coord.key] = self \n self.get_sub_modules(self.pom_file)\n self.history = []\n self.consumers = []\n #if self.packaging =='pom':\n \n # parent pom's will always be pre-existent to child pom's. they will be looked by coord key from\n # the global graph / project list \n def init_from_parent(self):\n parent_section = self.pom_file.findall('mvn:parent', Util.mvn_pom_ns) \n if (parent_section):\n self.parent_coord = MavenCoords(parent_section[0], None)\n parent = self.project_map[self.parent_coord.key]\n if (parent):\n self.properties = parent.properties.copy()\n self.dependencies = parent.dependencies.copy() \n else:\n print('Error: POM {} has unresolved parent POM reference {}'.format(self.name, parent.key)) \n else:\n self.dependencies = {}\n self.properties = {} \n self.coord = MavenCoords(None, None)\n dirs = self.pom_url.split('/')\n print(dirs)\n print (Util.get_path(dirs))\n \n \n def get_sub_modules(self, pom_file):\n section = pom_file.findall('mvn:modules', Util.mvn_pom_ns) \n self.modules = {}\n if (not section):\n return \n \n for elem in section[0].findall('*'):\n sub_proj = self.get_sub_module(elem.text)\n self.modules[sub_proj.coord.key] = sub_proj \n self.project_map[sub_proj.coord.key] = sub_proj\n \n\n def get_sub_module(self, sub_dir):\n dirs = self.pom_url.split('/')\n x = len(dirs)\n dirs[x-1] = 'pom.xml'\n dirs.insert(x-1, sub_dir)\n path = Util.get_path(dirs) \n module = MavenProject(path, self.project_map) \n return module\n\n def get_properties(self, pom):\n section = pom.findall('mvn:properties', Util.mvn_pom_ns)\n props = {}\n if (len(section)==0):\n return props\n \n for elem in section[0].findall('*'):\n k = re.sub('{.*?}', '', elem.tag)\n k = '${%s}' % k\n props[k] = elem.text\n return props\n\n def get_dependencies(self, pom):\n section = pom.findall('mvn:dependencies', Util.mvn_pom_ns)\n deps_map = {}\n if (len(section)==0):\n return deps_map\n \n for dep_section in section[0].findall('mvn:dependency', Util.mvn_pom_ns): \n obj = MavenCoords(dep_section, self.properties)\n deps_map[obj.key] = obj \n return deps_map\n\n @staticmethod\n def get_pom_file(pomfile):\n if pomfile.find(\"http://\") >=0 or pomfile.find(\"https://\") >=0: \n opener = urllib.request.build_opener() \n pom = ET.parse( opener.open(pomfile) ).getroot() \n else:\n pom = ET.parse(pomfile).getroot() \n return pom\n\n def logx(self, level):\n print() \n print('---------Maven Project---------')\n #print('key: %s * Group: %s * Id: %s * Ver: %s' % (self.coord.key, self.coord.groupid, self.coord.artifactid, self.coord.version))\n print('key: {0} * Name: {1} * Group: {2} * Id: {3} * Ver: {4}'.format(self.coord.key, self.name, self.coord.groupid, self.coord.artifactid, self.coord.version))\n print() \n if level ==0:\n return \n \n print(' dependencies') \n for k, v in self.dependencies.items():\n print(' key: %s * Group: %s * Id: %s * Ver: %s' % (k, v.groupid, v.artifactid, v.version))\n \n print() \n print(' properties: ', self.properties)\n \n print (' consumers')\n for proj in self.consumers:\n print(' ', proj.coord.key)\n \nclass DAGerror(Exception):\n def __init__(self, arg):\n self.arg = arg\n\n#======================================================================= \n# \nclass MavenProjectGraph(object):\n \n def __init__(self, pom_url_list):\n self.pom_url_list = pom_url_list\n self.proj_list = []\n self.proj_map = {}\n #self.validation = {}\n \n def generate_pom_list(self):\n for pom_url in self.pom_url_list:\n MavenProject(pom_url, self.proj_map)\n #self.proj_list.append(proj)\n #self.proj_map[proj.coord.key] = proj\n \n self.proj_list = list(self.proj_map.values())\n \n for proj in self.proj_list:\n proj.logx(1) #$$\n print()\n \n def set_options(self):\n pass\n \n \n # PURPOSE: sort the list in DAG dependency order and capture each project consumers\n #\n #\n def resolve_graph(self):\n self.resolve_dependencies()\n self.resolve_consumers()\n \n \n # PURPOSE: reorder the project list such that each projects dependencies appear before that project\n #\n # NOTE #1: iterate thru the list looking fwd in the list for each project's dependencies\n # for each dependency found, move it behind that project\n #\n # NOTE #2: the DAG is complete when the list is scanned and no dependencies exist fwd of each project\n #\n # NOTE #3: a history of each dependency relocation is maintained for each project\n # a circular reference will be detected if that \n # \n def resolve_dependencies(self):\n try:\n while True:\n for p in self.proj_list:\n print(p.name)\n\n i = 0 \n #dependency_found = False \n while i < len(self.proj_list):\n dependency_found = False \n proj_base = self.proj_list[i]\n \n print('loop i={}, base={}'.format(i, proj_base.name))\n \n j = i + 1\n while j < len(self.proj_list):\n print(' loop j {}'.format(j))\n\n proj_scan = self.proj_list[j]\n \n # a forward project dependency is found for the base project, move it behind the base project\n if proj_scan.coord.key in proj_base.dependencies:\n \n # dejavu - a repeated reorder indicates circular dependency\n if proj_scan.coord.key in proj_base.history:\n raise DAGerror(\"Error: base project - {} - encountered duplicate reorder for dependency - {} -\".format\n ( proj_base.name, proj_scan.name))\n \n # remove the fwd item first to avoid order issues \n del self.proj_list[j] #self.proj_list.remove(j)\n \n # insert behind the base project\n self.proj_list.insert(i, proj_scan)\n \n print(' reorded scan {} from j={} to i={}'.format( proj_scan.name, j, i)) \n \n for p in self.proj_list:\n print(p.name)\n \n proj_base.history.append(proj_scan.coord.key) \n dependency_found = True\n i = i -1\n break\n \n j =j+1 # while j\n \n i=i+1 # while i \n \n # repeat outer loop until nothing is reordered \n if not dependency_found:\n break\n else:\n i = 0 \n \n except DAGerror as e:\n print(e)\n \n # PURPOSE: for each project in the list, discover the set of consuming projects\n #\n # NOTE #1: call this method AFTER the dependency graph has been properly resolved\n # consuming projects will be forward in the list\n #\n def resolve_consumers(self):\n for i in range(len(self.proj_list)):\n proj_base = self.proj_list[i]\n j = i\n while j < len(self.proj_list)-1:\n j = j+1\n proj_scan = self.proj_list[j]\n if (proj_base.coord.key in proj_scan.dependencies):\n proj_base.consumers.append(proj_scan)\n \n \n def list_projects(self): \n for proj in self.proj_list:\n proj.logx(1) \n \n \n#==========================================================================\ndef main():\n pom_files = ['D:\\\\devspaces\\\\wks4\\\\py1\\\\snipits2.xml', \n 'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml']\n \n pom_files = ['D:\\\\devspaces\\\\wks4\\\\py1\\\\pom-A.xml', \n 'D:\\\\devspaces\\\\wks4\\\\py1\\\\pom-B.xml',\n 'D:\\\\devspaces\\\\wks4\\\\py1\\\\pom-C.xml',\n 'D:\\\\devspaces\\\\wks4\\\\py1\\\\pom-D.xml',\n ]\n \n pom_files = ['C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-A.xml', \n 'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-B.xml',\n 'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-C.xml',\n 'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-D.xml',\n ]\n \n # C:\\Users\\Larry\\Dropbox\\gitcode\\gh\\maven_proj_graph\n \n s = ['dir', '*']\n s = ['C:/apps/maven352/bin/mvn', 'help:effective-pom']\n \n s2 = ['C:\\\\apps\\\\maven352\\\\bin\\\\mvn', 'help:effective-pom']\n \n #Util.run_process(['cd', '..'], 'C:\\\\apps\\\\maven352\\\\bin\\\\mvn help:effective-pom')\n \n #Util.run_process('C:\\\\apps\\\\maven352\\\\bin\\\\mvn help:effective-pom', '')\n \n #Util.test_map_update(None)\n \n #return()\n \n graph = MavenProjectGraph(pom_files)\n \n graph.generate_pom_list()\n \n graph.resolve_graph()\n \n graph.list_projects()\n\n\n#==========================================================================\n# see this article for opening remote xml files\n# https://stackoverflow.com/questions/28238713/python-xml-parsing-lxml-urllib-request\n \ndef main2(): \n cwd = os.getcwd()\n cwd = 'D:\\\\devspaces\\\\wks4\\\\py1\\\\'\n pom_file = cwd + 'snipits2.xml'\n \n pom_file = 'D:\\\\devspaces\\\\wks4\\\\py1\\\\snipits2.xml'\n pom = ET.parse(pom_file).getroot() \n \n # https://github.com/LeonardoZ/java-concurrency-patterns.git\n \n # this is the correct patttern for reading single files from github\n # https://raw.githubusercontent.com/user/repository/branch/filename\n \n # this is the web page containing the file \n # 'https://github.com/LeonardoZ/java-concurrency-patterns/blob/master/pom.xml'\n \n pom_file_url = 'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml'\n \n opener = urllib.request.build_opener()\n \n f = opener.open(pom_file_url)\n \n \n # ng, file=urllib.urlopen(file=urllib.urlopen())\n \n #parser = ET.HTMLParser()\n\n #with urlopen('https://pypi.python.org/simple') as f:\n #tree = ET.parse(f, parser) \n\n #pom_file = urllib.request.urlopen(pom_file)\n \n pom = ET.parse(opener.open(pom_file_url)).getroot() \n\n project = MavenProject(pom)\n project.logx()\n\nif __name__ == '__main__':\n main()\n\n\n#main()\n\n'''\n=====================================================================\nnotes:\n alternatives - use maven to get equiv pom \n > mvn help:effective-pom\n\nhttps://stackoverflow.com/questions/4760215/running-shell-command-from-python-and-capturing-the-output\n\n\n'''\n\n","sub_path":"maven_proj_graph/pkg1/mvnsortmod1.py","file_name":"mvnsortmod1.py","file_ext":"py","file_size_in_byte":16760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"356432877","text":"#!/usr/bin/python3\n\"\"\"RESTful API for Amenities object \"\"\"\nfrom flask import jsonify, abort, request\nfrom api.v1.views import app_views\nfrom models.base_model import BaseModel\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models import storage\n\n\n@app_views.route('/amenities', methods=['GET'],\n strict_slashes=False)\ndef get_amenities():\n \"\"\"Retrieves all Amenity objects \"\"\"\n list_amenities = []\n for amenity in storage.all('Amenity').values():\n list_amenities.append(amenity.to_dict())\n return jsonify(list_amenities)\n\n\n@app_views.route('/amenities/', methods=['GET'],\n strict_slashes=False)\ndef get_amenity(amenity_id):\n \"\"\" Retrieves a Amenity object \"\"\"\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n return jsonify(amenity.to_dict())\n\n\n@app_views.route('/amenities/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_amenity(amenity_id):\n \"\"\" Deletes a Amenity object \"\"\"\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n empty_dict = {}\n amenity.delete()\n storage.save()\n return jsonify(empty_dict), 200\n\n\n@app_views.route('/amenities', methods=['POST'],\n strict_slashes=False)\ndef create_amenity():\n \"\"\" Creates a City object \"\"\"\n my_dict = request.get_json()\n if my_dict is None:\n abort(400, \"Not a JSON\")\n elif \"name\" not in my_dict:\n abort(400, \"Missing name\")\n new_amenity = Amenity(**my_dict)\n new_amenity.save()\n return jsonify(new_amenity.to_dict()), 201\n\n\n@app_views.route('/amenities/',\n methods=['PUT'],\n strict_slashes=False)\ndef update_amenity(amenity_id):\n \"\"\"Update an Amenity object\"\"\"\n if amenity_id:\n my_dict = request.get_json()\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n if my_dict is None:\n abort(400, \"Not a JSON\")\n for key, value in my_dict.items():\n if key not in [\"id\", \"created_at\", \"updated_at\"]:\n setattr(amenity, key, value)\n storage.save()\n return jsonify(amenity.to_dict()), 200\n","sub_path":"api/v1/views/amenities.py","file_name":"amenities.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"552361307","text":"t = int(input())\n\n\ndef bus(k, n, m):\n cnt = 0\n now = 0\n charge = [False] * (int(n) + 1)\n tmp = list(map(int, input().split()))\n for s in tmp:\n charge[s] = True\n while True:\n if now+k <= n and charge[now+k] == True:\n now = now+k\n cnt += 1\n elif now+k == n:\n break;\n else:\n for j in range(1, k):\n if now+k-j <= n and charge[now+k-j] == True:\n now = now+k-j\n cnt += 1\n break;\n elif now+k-j == n:\n return cnt\n elif j == k-1 and charge[now+k-j] != True:\n return 0\n\n\n\n return cnt\n\nfor i in range(t):\n k, n, m = map(int, input().split())\n\n ans = bus(k, n, m)\n print('#'+str(i+1)+' '+ str(ans))\n","sub_path":"05/0510/SWEA_4831.py","file_name":"SWEA_4831.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"560392010","text":"from tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport json\nimport sqlite3\nfrom textblob import TextBlob\nfrom unidecode import unidecode\nimport time\nimport re\nfrom nltk.tokenize import word_tokenize, RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport string\nfrom geotext import GeoText\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n# consumer key, consumer secret, access token, access secret.\nconsumerKey = \"\"\nconsumerSecret = \"\"\naccessToken = \"\"\naccessTokenSecret = \"\"\n\nconn = sqlite3.connect('db.twitterdata')\nc = conn.cursor()\n\n\nclass Listener(StreamListener):\n def processTweet(self, tweet):\n tweet = re.sub(r'\\&\\w*;', '', tweet)\n tweet = re.sub('@[^\\s]+', '', tweet)\n tweet = re.sub(r'\\$\\w*', '', tweet)\n tweet = tweet.lower()\n tweet = re.sub(r'https?:\\/\\/.*\\/\\w*', '', tweet)\n tweet = re.sub(r'#\\w*', '', tweet)\n # tweet = re.sub(r'[' + punctuation.replace('@', '') + ']+', ' ', tweet)\n tweet = re.sub(r't\\b\\w\\b', '', tweet)\n tweet = re.sub(r'\\s\\s+', ' ', tweet)\n tweet = re.sub(r\"[-()\\\"#/@;:<>{}[`+=~*|.!?,.....']\", \"\", tweet)\n tweet = re.sub(r\"rt\", '', tweet)\n tweet = re.sub(r\"1234567890\", \"\", tweet)\n tweet = tweet.lstrip(' ')\n tweet = ''.join(c for c in tweet if c <= '\\uFFFF')\n tweet = ''.join(c for c in tweet if not c.isdigit())\n return tweet\n\n def wordList(self, tweet):\n stop_words = set(stopwords.words('english') + list(string.punctuation))\n word = word_tokenize(tweet.replace('\\n', ' '))\n # freq_words = FreqDist(word)\n filtered_word = []\n for w in word:\n if len(w) > 2:\n if w not in stop_words:\n filtered_word.append(w)\n return filtered_word\n\n def location(self, userlocation):\n country = None\n city = None\n state = None\n cur = conn.cursor()\n cur.execute('SELECT * FROM worldcities') # You need to have table worldcities information.\n world = cur.fetchall()\n places = GeoText(str(userlocation))\n countries = places.countries\n cities = places.cities\n if len(cities) == 1 and len(countries) == 1:\n city = cities[0]\n country = countries[0]\n for w in world:\n if len(w[0]) != 0 and len(w[2]) != 0 and str(city).find(w[0]) != -1 and str(country).find(w[2]) != -1:\n state = w[5]\n else:\n if len(countries) == 1:\n country = countries[0]\n if len(cities) == 1:\n city = cities[0]\n population = []\n country_array = []\n state_array = []\n for w in world:\n if len(w[0]) != 0 and str(cities).find(w[0]) != -1:\n if w[6] is not None:\n population.append(int(w[6]))\n else:\n population.append(0)\n country_array.append(w[2])\n state_array.append(w[5])\n elif w[5] is not None and str(cities).find(w[5]) != -1:\n if w[6] is not None:\n population.append(int(w[6]))\n else:\n population.append(0)\n country_array.append(w[2])\n state_array.append(w[5])\n if len(population) != 0:\n p_index = population.index(max(population))\n country = country_array[p_index]\n state = state_array[p_index]\n else:\n if len(country_array) != 0:\n country = country_array[0]\n if len(state_array) != 0:\n state = state_array[0]\n else:\n splited = str(userlocation).lower().strip(',').strip('.').split()\n splited_left = str(userlocation).split(',')[0]\n if 'usa' in splited:\n country = 'United States'\n if 'uk' in splited:\n country = 'United Kingdom'\n if 'england' in splited:\n country = 'United Kingdom'\n state = 'England'\n if userlocation != splited_left:\n splited_right = str(userlocation).split(',')[1].strip()\n cur.execute('SELECT state FROM usa_states WHERE code LIKE ?', (splited_left,))\n code_right = cur.fetchone()\n if code_right is not None:\n state = code_right[0]\n country = 'United States'\n cur.execute('SELECT state FROM usa_states WHERE code LIKE ?', (splited_right,))\n code_left = cur.fetchone()\n if code_left is not None:\n state = code_left[0]\n country = 'United States'\n cur.execute('SELECT state FROM usa_states WHERE state LIKE ?', (splited_left,))\n state_right = cur.fetchone()\n if state_right is not None:\n state = state_right[0]\n country = 'United States'\n cur.execute('SELECT state FROM usa_states WHERE state LIKE ?', (splited_right,))\n state_left = cur.fetchone()\n if state_left is not None:\n state = state_left[0]\n country = 'United States'\n\n returnedlist = [country, state, city]\n print(userlocation)\n print(countries)\n print(cities)\n print(returnedlist)\n print(\"------------------------------\")\n return returnedlist\n\n def on_data(self, data):\n try:\n tweet_frame = json.loads(data)\n user_id = tweet_frame['user']['id']\n tweet = unidecode(tweet_frame['text'])\n user_name = tweet_frame[\"user\"][\"name\"]\n user_screen_name = tweet_frame[\"user\"][\"screen_name\"]\n user_followers_count = tweet_frame['user']['followers_count']\n user_verified = tweet_frame['user']['verified']\n user_location = tweet_frame['user']['location']\n created_at = tweet_frame['created_at']\n id_str = tweet_frame['id_str']\n verified_int = 0\n if user_verified:\n verified_int = 1\n else:\n verified_int = 0\n\n clean_tweet = self.processTweet(tweet)\n\n if user_name is not None:\n if user_followers_count is not None:\n if user_location is not None:\n if user_followers_count > 20:\n vader_analyzer = SentimentIntensityAnalyzer()\n vader_polarity = vader_analyzer.polarity_scores(clean_tweet)\n vader_compound = vader_polarity['compound']\n\n textblob_analyzer = TextBlob(clean_tweet)\n textblob_polarity = textblob_analyzer.sentiment.polarity\n textblob_subjective = textblob_analyzer.subjectivity\n\n insertedlist = self.location(user_location)\n\n country = insertedlist[0]\n state = insertedlist[1]\n city = insertedlist[2]\n word_list = ','.join(self.wordList(clean_tweet))\n\n c.execute(\n \"INSERT INTO homepage_tweets (user_id, user_name, user_screen_name, user_follower_count, created, verified, location, country, state, city, tweet, clean_tweet, word_list, polarity, subjectivity, vader_compound,id_str) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?)\",\n (user_id, user_name, user_screen_name, user_followers_count, created_at, verified_int,\n user_location, country, state, city, tweet, clean_tweet, word_list, textblob_polarity,\n textblob_subjective, vader_compound, id_str\n ))\n\n conn.commit()\n print(user_followers_count)\n except KeyError as e:\n print(str(e))\n return (True)\n\n def on_error(self, status):\n print(status)\n\ndef stream():\n while True:\n try:\n auth = OAuthHandler(consumerKey, consumerSecret)\n auth.set_access_token(accessToken, accessTokenSecret)\n twitterStream = Stream(auth, Listener())\n twitterStream.filter(track=[\"a\", \"e\", \"i\", \"o\", \"u\"], languages=['en'])\n except Exception as e:\n print(str(e))\n time.sleep(1)\nif __name__ == \"__main__\":\n stream()\n","sub_path":"streamingtweets.py","file_name":"streamingtweets.py","file_ext":"py","file_size_in_byte":9033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"584383917","text":"from wordcloud import WordCloud \r\nimport matplotlib.pyplot as plt \r\nimport pandas as pd \r\nfrom tweet_feed import Feed\r\nfrom basic_cleaner import BasicCleaner\r\nimport data_object\r\n\r\n\r\n\r\n\r\nfile_path = \"../DataCollection/191020-20_39_57--191020-20_40_07\" \r\nsentiment_range = [float(-1), float(-0.5)]\r\n\r\n\r\n\r\ndef get_long_tweet_string():\r\n feed = Feed()\r\n queue_stream = feed.disk_get_tweet_queue(file_path)\r\n data_objects = [data_object.get_dataobj_converted(tweet) for tweet in queue_stream]\r\n for obj in data_objects: BasicCleaner.autocleaner(obj,sentiment_range, True)\r\n long_string = [obj.text*(obj.valid_sentiment_range) for obj in data_objects]\r\n return \" \".join(long_string)\r\n\r\n\r\n\r\nWC = WordCloud(width = 800, height = 800, \r\n background_color ='white', \r\n min_font_size = 10).generate(get_long_tweet_string()) \r\n \r\n# plot WC \r\nplt.figure(figsize = (8, 8), facecolor = None) \r\nplt.imshow(WC) \r\nplt.axis(\"off\") \r\nplt.tight_layout(pad = 0) \r\n\r\n\r\nplt.show() ","sub_path":"generate_wordcloud.py","file_name":"generate_wordcloud.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"599906893","text":"from django.conf.urls import url\nfrom rest_framework.routers import DefaultRouter\nfrom views import (NotificationViewSet,\n ReportRetrieveAPIView,\n ReportViewSet)\n\n\nreport_router = DefaultRouter()\nreport_router.register(r'report', ReportViewSet, base_name='report')\n\nreport_view_url = [\n url(r'^report/(?P[0-9a-f-]+)/$', ReportRetrieveAPIView.as_view(), name='report-view')\n]\n\nnotification_router = DefaultRouter()\nnotification_router.register(r'notification', NotificationViewSet, base_name='notification')\n","sub_path":"reporting/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"202629834","text":"\"\"\"Quicksort algorithm from Grokking Algorithms: An illustrated guide for \nprogrammers and other curiouse people.\n\"\"\"\n\n\ndef quicksort(items):\n \"\"\"Quicksort is a sorting algorithm. Faster than selction sort and is \n frequenly used. It splits a list on a pivot point and recursively \n sorts each resulting list.\n \"\"\"\n if len(items) < 2 or items[0] == items[1]:\n return items\n else:\n pivot = items[len(items) // 2]\n less = [i for i in items if i < pivot]\n greater = [i for i in items if i > pivot]\n equal = [i for i in items if i == pivot]\n return quicksort(less) + quicksort(equal) + quicksort(greater)\n\nif __name__ == \"__main__\":\n lst = [ 44, 2, 17, 3, 9, 18, 6, 12]\n print(quicksort(lst))\n","sub_path":"skill_builders/books/ga_quicksort.py","file_name":"ga_quicksort.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"466324563","text":"import tensorflow as tf\n\nx = tf.placeholder(dtype=tf.float32)\ny = x * 2\n\ninput_data = [1, 2]\n\nsess = tf.Session()\nresult = sess.run(y, feed_dict={x: input_data})\nprint(result)\nsess.close()","sub_path":"week01/placeholder1.py","file_name":"placeholder1.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"67368023","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport plot_setting\n\nfolder_diff =\"/home/fschubert/Master/sim_data/complete_diff_long/\"\nfolder_non_diff =\"/home/fschubert/Master/sim_data/complete_non_diff_long/\"\nfolder_diff_no_top =\"/home/fschubert/Master/sim_data/diffusive_no_distance_topology/\"\nfolder_non_diff_no_top =\"/home/fschubert/Master/sim_data/non_diffusive_no_distance_topology/\"\n\nW_diff=np.load(folder_diff+\"W_eTOe_record.npy\")\nW_non_diff=np.load(folder_non_diff+\"W_eTOe_record.npy\")\nW_diff_no_top=np.load(folder_diff_no_top+\"W_eTOe_record.npy\")\nW_non_diff_no_top=np.load(folder_non_diff_no_top+\"W_eTOe_record.npy\")\n\n\nW_diff=np.transpose(W_diff,(0,2,1))\nW_non_diff=np.transpose(W_non_diff,(0,2,1))\nW_diff_no_top=np.transpose(W_diff_no_top,(0,2,1))\nW_non_diff_no_top=np.transpose(W_non_diff_no_top,(0,2,1))\n\nt_ind=999\n\nout_sum_diff = W_diff[t_ind,:,:].sum(axis=0)*1000\nout_sum_non_diff = W_non_diff[t_ind,:,:].sum(axis=0)*1000\nout_sum_diff_no_top = W_diff_no_top[t_ind,:,:].sum(axis=0)*1000\nout_sum_non_diff_no_top = W_non_diff_no_top[t_ind,:,:].sum(axis=0)*1000\n\nsort_out_sum_diff = np.sort(out_sum_diff)\nsort_out_sum_non_diff = np.sort(out_sum_non_diff)\nsort_out_sum_diff_no_top = np.sort(out_sum_diff_no_top)\nsort_out_sum_non_diff_no_top = np.sort(out_sum_non_diff_no_top)\n\nplt.plot(np.linspace(0.,1.,400),sort_out_sum_non_diff_no_top,'.',label=\"non-diffusive, no spatial topology\")\nplt.plot(np.linspace(0.,1.,400),sort_out_sum_non_diff,'.',label=\"non-diffusive, spatial topology\")\nplt.plot(np.linspace(0.,1.,400),sort_out_sum_diff_no_top,'.',label=\"diffusive, no spatial topology\")\nplt.plot(np.linspace(0.,1.,400),sort_out_sum_diff,'.',label=\"diffusive, spatial topology\")\n\nplt.xlabel(\"Quantile of excitatory neurons\")\nplt.ylabel(\"Sum of outgoing synapse weights\") \nplt.legend()\n\nplt.ylim([0.,200])\n\n\nplt.savefig(\"/home/fschubert/Master/plots/out_weight_quantile.png\")\n\n#plt.show()\n\n#import pdb\n#pdb.set_trace()\n","sub_path":"plotting_scripts/out_weight_quantile.py","file_name":"out_weight_quantile.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"217232748","text":"#!/usr/bin/env python3\n\nimport configparser\nimport ipaddress\nimport logging\nimport logging.handlers\nimport os\nimport requests\nimport sys\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom godaddypy import Client, Account\n\nPREVIOUS_IP_FILE = 'previous-ip.txt'\n\n\ndef raise_if_invalid_ip(ip):\n ipaddress.ip_address(ip)\n\n\ndef get_public_ip():\n r = requests.get('https://api.ipify.org')\n r.raise_for_status()\n \n ip = r.text\n raise_if_invalid_ip(ip)\n\n return ip\n\n\ndef get_previous_public_ip():\n try:\n with open(PREVIOUS_IP_FILE, 'r') as f:\n ip = f.read()\n except FileNotFoundError:\n return None\n \n # Sanity check\n raise_if_invalid_ip(ip)\n\n return ip\n\n\ndef store_ip_as_previous_public_ip(ip):\n with open(PREVIOUS_IP_FILE, 'w') as f:\n f.write(ip)\n\n\ndef get_public_ip_if_changed():\n current_public_ip = get_public_ip()\n previous_public_ip = get_previous_public_ip()\n\n if current_public_ip != previous_public_ip:\n return current_public_ip\n else:\n return None\n\n\ndef get_godaddy_client():\n config = configparser.ConfigParser()\n config.read('config/godaddy-dyndns.conf')\n\n account = Account(api_key=config.get('godaddy', 'api_key'),\n api_secret=config.get('godaddy', 'api_secret'))\n if not account:\n raise RuntimeError('Could not log in into GoDaddy')\n\n client = Client(account)\n\n return client\n\n\ndef get_schedule_timer():\n config = configparser.ConfigParser()\n config.read('config/godaddy-dyndns.conf')\n timer = config.get('godaddy', 'timer')\n unit = config.get('godaddy', 'unit')\n\n if not timer or not unit:\n print('Please specify a timer and unit (seconds, minutes, hours, days) in godaddy-dyndns.conf')\n else:\n return timer, unit\n\n\ndef init_logging():\n l = logging.getLogger()\n rotater = logging.handlers.RotatingFileHandler('config/godaddy-dyndns.log', maxBytes=10000000, backupCount=2)\n l.addHandler(rotater)\n l.setLevel(logging.INFO)\n rotater.setFormatter(logging.Formatter('%(asctime)s %(message)s'))\n\n\ndef updatedns():\n ip = get_public_ip_if_changed()\n \n # If the IP hasn't changed then there's nothing to do.\n if ip is None:\n logging.info(\"IP has not changed\")\n return None\n\n # Open config file to read\n config = configparser.ConfigParser()\n config.read('config/godaddy-dyndns.conf')\n domains = [x.strip() for x in (config.get('godaddy', 'domains').split(','))]\n records = [x.strip() for x in (config.get('godaddy', 'records').split(','))]\n \n #Initialize godaddy client\n client = get_godaddy_client()\n\n logging.info(\"Changing listed domains to %s\" % ip)\n \n for domain in client.get_domains():\n if domain in domains: #Check to make sure the domain is requested\n for dns_records in client.get_records(domain, record_type='A'):\n if dns_records[\"name\"] in records:\n full_domain = \"%s.%s\" % (dns_records[\"name\"], domain)\n \n if ip == dns_records[\"data\"]:\n # There's a race here (if there are concurrent writers),\n # but there's not much we can do with the current API.\n logging.info(\"%s unchanged\" % full_domain)\n else:\n if not client.update_record_ip(ip, domain, dns_records[\"name\"], 'A'):\n raise RuntimeError('DNS update failed for %s' % full_domain)\n \n logging.info(\"%s changed from %s to %s\" % (full_domain, dns_records[\"data\"], ip))\n \n store_ip_as_previous_public_ip(ip)\n\n\nif __name__ == '__main__':\n timer, unit = get_schedule_timer()\n \n init_logging()\n logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)\n\n #Initialize schedule\n sched = BlockingScheduler()\n if unit == 'seconds':\n sched.add_job(updatedns, 'interval', seconds=int(timer))\n elif unit == 'minutes':\n sched.add_job(updatedns, 'interval', minutes=int(timer))\n elif unit == 'hours':\n sched.add_job(updatedns, 'interval', hours=int(timer))\n elif unit == 'days':\n sched.add_job(updatedns, 'interval', days=int(timer))\n else:\n print('Unit of measurement needs to be either: seconds, minutes, hours or days within godaddy-dyndns.conf')\n\n\n try:\n sched.start()\n except (KeyboardInterrupt, SystemExit):\n pass\n except Exception as e:\n logging.error('Exception: %s' % e)\n logging.shutdown()\n sys.exit(1)","sub_path":"godaddy-dyndns.py","file_name":"godaddy-dyndns.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"23945638","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nfrom collections import defaultdict\n\nimport os\nimport yaml\n\nfrom .state import State\nfrom .state_delegate import StateDelegate\n\n\ndef dict_constructor(loader, node):\n return defaultdict(str, loader.construct_pairs(node))\n\n\nclass StateMachine(StateDelegate):\n \"\"\"\n Describes the states available, as well as identifying the current\n state.\n\n StateMachine acts as a delegate for State objects, so that the current\n state can be updated and the current state machine can be persisted after\n each update.\n \"\"\"\n\n def __init__(self, path):\n self._machine = None\n self._current_state = None\n self._current_state_name = ''\n self._path = path\n self._states = None\n\n def did_enter_state(self, old_state, new_state_name):\n \"\"\"StateDelegate method\"\"\"\n old_state_name = old_state.name\n self.states[old_state_name] = old_state\n self._current_state_name = new_state_name\n return True\n\n def save(self):\n \"\"\"StateDelegate method\"\"\"\n states_as_dict = [state.to_dict() for state in list(self.states.values())]\n data = {\n 'current_state': self._current_state_name,\n 'states': states_as_dict,\n }\n with open(self.path, 'wt') as f:\n yaml.dump(data, f, default_flow_style=False)\n\n @property\n def machine(self):\n if self._machine is None:\n self._machine = self._read_machine()\n\n return self._machine\n\n @property\n def current_state(self):\n return self.states[self._current_state_name]\n\n @property\n def path(self):\n return self._path\n\n @property\n def states(self):\n if self._states is None:\n self._states = {}\n self._current_state_name = self.machine['current_state']\n self._define(self.machine['states'])\n\n return self._states\n\n def _define(self, definitions):\n for _, definition in enumerate(definitions):\n state = State(definition)\n state.delegate = self\n state_name = state.name\n self._states[state_name] = state\n\n def _read_machine(self):\n \"\"\"\n Reads the state machine from a YAML file.\n\n Returns:\n - State machine (dict) if read from file\n - State machine (dict) that is only an end state, if path or file\n is missing\n\n Raises:\n - RuntimeError if YAML if not properly formatted\n \"\"\"\n\n if self.path is None:\n return {\n 'name': 'no_state',\n }\n\n if os.path.exists(self.path):\n with open(self.path, 'rt') as data:\n try:\n machine = yaml.load(data)\n except yaml.YAMLError:\n raise RuntimeError(f'{self.path} is not a YAML file')\n\n return machine\n else:\n return {\n 'name': 'no_state',\n }\n","sub_path":"state_service/state_machine.py","file_name":"state_machine.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"576726295","text":"#!/usr/bin/env python\n# license removed for brevity\nimport time\nimport rospy\nimport csv\nimport os\nfrom std_msgs.msg import String\nfrom ev3_ros.msg import MotorCommands\n\n\ndef readInCsv():\n global robotNames\n robotNames = {}\n global filePath\n filePath = raw_input(\"Enter csv URI: \")\n while \".csv\" not in filePath:\n print(filePath)\n filePath = raw_input(\"not a csv file, try again: \")\n\n with open(os.getcwd() + \"/../data/\" + filePath) as csvFile:\n reader = csv.DictReader(csvFile)\n for row in reader:\n if row[\"name\"] not in robotNames:\n robotNames[row[\"name\"]] = {\"pub\": rospy.Publisher(row[\"name\"], MotorCommands, queue_size=10),\n \"commandDict\": {round(float(row[\"startTime\"]), 2):\n {\"start_time\": float(row[\"startTime\"]),\n \"end_time\": float(row[\"endTime\"]),\n \"right_speed\": float(row[\"rightMotorSpeed\"]),\n \"left_speed\": float(row[\"leftMotorSpeed\"])\n }\n }\n }\n print(row[\"name\"], \" has been added at: \", round(float(row[\"startTime\"]), 2), \" with motor speeds: \", float(row[\"rightMotorSpeed\"]), float(row[\"leftMotorSpeed\"]))\n else:\n robotNames[row[\"name\"]][\"commandDict\"][round(float(row[\"startTime\"]), 2)] = {\n \"start_time\": float(row[\"startTime\"]),\n \"end_time\": float(row[\"endTime\"]),\n \"right_speed\": float(row[\"rightMotorSpeed\"]),\n \"left_speed\": float(row[\"leftMotorSpeed\"])\n }\n print(row[\"name\"], \" new motor speeds at: \", round(float(row[\"startTime\"]), 2), \" with motor speeds: \", float(row[\"rightMotorSpeed\"]), float(row[\"leftMotorSpeed\"]))\n\n return filePath\n\n\ndef mc_pub_csv():\n pub = rospy.Publisher('default', MotorCommands, queue_size=10)\n rospy.init_node('mc_pub_csv', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n\n #reader = csv.DictReader(csvFile)\n\n for x in range(0, 10):\n for robot in robotNames:\n pub = robotNames[robot][\"pub\"]\n mc = MotorCommands()\n mc.right_speed = 0.0\n mc.left_speed = 0.0\n pub.publish(mc)\n rospy.loginfo(robot)\n rospy.loginfo(mc)\n rate.sleep()\n\n start = \"\"\n first = True\n start_time = time.clock()\n\n while not rospy.is_shutdown():\n while \"y\" not in start:\n if \"n\" in start:\n exit(0)\n start = raw_input(\"start the dance: (y/n)\")\n if \"y\" in start and not first:\n start_time = time.clock()\n first = False\n\n time_diff = time.clock() - start_time\n\n for robot in robotNames:\n\n #print(\" current_time: \", round(current_time,2),\n # \"robot: \", robot,\n # \" startTime: \", robotNames[robot][\"commandDict\"])\n if (round(float(time_diff), 4)*100) in robotNames[robot][\"commandDict\"]:\n #print(\" startTime: \", robotNames[robot][\"commandDict\"],\n # \" pub: \", robotNames[robot][\"pub\"],\n # \" mc.right_speed: \", robotNames[robot][\"commandDict\"][round(current_time, 2)][\"right_speed\"],\n # \" mc.left_speed: \", robotNames[robot][\"commandDict\"][round(current_time, 2)][\"left_speed\"]\n # )\n pub = robotNames[robot][\"pub\"]\n right_speed = robotNames[robot][\"commandDict\"][(round(float(time_diff), 4)*100)][\"right_speed\"]\n left_speed = robotNames[robot][\"commandDict\"][(round(float(time_diff), 4)*100)][\"left_speed\"]\n #right_speed = max(-1, min(right_speed, 1))\n #left_speed = max(-1, min(left_speed, 1))\n while right_speed < 0.1:\n right_speed = right_speed * 10\n while left_speed < 0.1:\n left_speed = left_speed * 10\n mc.right_speed = right_speed\n mc.left_speed = left_speed\n print(\"time diff: \", (round(float(time_diff), 4)*100))\n print(\"New speed at time: \", time.clock(), \" for Robot: \", robot, \" with pub: \", pub, \" rs: \", mc.right_speed, \" ls: \", mc.left_speed)\n\n\n rospy.loginfo(mc)\n pub.publish(mc)\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n readInCsv()\n except Exception:\n print(Exception)\n pass\n\n try:\n mc_pub_csv()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"scripts/mc_pub_csv_speed_fix.py","file_name":"mc_pub_csv_speed_fix.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"65"} +{"seq_id":"150699492","text":"# *- encoding=utf-8 -*-\n'''\n用于转换markdown 为 html\n'''\nimport os\nimport markdown\nfrom bs4 import BeautifulSoup\nclass MarkdownToHtml(object):\n \"\"\"\n 一个转换类\n \"\"\"\n def __init__(self, cssFilePath=None):\n if cssFilePath != None:\n # 读取外部css文件的内容\n self.get_style(cssFilePath)\n self.head_tag = ''\n\n def get_style(self, css):\n '''\n 读取css文件\n '''\n with open(css, 'r') as file:\n string = file.read()\n new_css = '