diff --git "a/1543.jsonl" "b/1543.jsonl" new file mode 100644--- /dev/null +++ "b/1543.jsonl" @@ -0,0 +1,765 @@ +{"seq_id":"4294829","text":"from pyparsing import oneOf, Literal, Optional, Word\nfrom ....util.grammar import numbers, openParen, closeParen\nfrom ....util._constants import decimal\n\ndef define_svm():\n svmPhrase = oneOf([\"svm\", \"SVM\"])\n\n #Definitions for options of svm\n gamma_literal = (Literal(\"gamma\") + Literal(\"=\")).suppress()\n C_literal = (Literal(\"C\") + Literal(\"=\")).suppress()\n\n gamma = Optional(gamma_literal + decimal.setResultsName(\"svm_gamma\"), default = 1)\n C = Optional(gamma_literal + decimal.setResultsName(\"svm_C\"), default = 1)\n\n #Compositions\n svm = svmPhrase + Optional(openParen + gamma + C + closeParen)\n\n return(svm)\n","sub_path":"sml/parser/actions/algorithms/classify_algorithms/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"26049213","text":"#!/usr/bin/env python3\nimport pytest\n\n\ndef get_axis_range(cubes, axis):\n coords = [cube[axis] for cube in cubes.keys()]\n return range(min(coords) - 1, max(coords) + 2)\n\n\ndef iterate_cubes(cubes):\n for x in get_axis_range(cubes, 0):\n for y in get_axis_range(cubes, 1):\n for z in get_axis_range(cubes, 2):\n for w in get_axis_range(cubes, 3):\n yield x, y, z, w\n\n\ndef get_neighbors(x, y, z, w):\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n for dz in [-1, 0, 1]:\n for dw in [-1, 0, 1]:\n if dx or dy or dz or dw:\n yield (x + dx, y + dy, z + dz, w + dw)\n\n\ndef tick(cubes):\n new_cubes = {}\n\n for x, y, z, w in iterate_cubes(cubes):\n active = cubes.get((x, y, z, w))\n\n num_active_neighbors = 0\n for neighbor in get_neighbors(x, y, z, w):\n if cubes.get(neighbor):\n num_active_neighbors += 1\n\n if (\n active\n and num_active_neighbors in [2, 3]\n or not active\n and num_active_neighbors == 3\n ):\n new_cubes[(x, y, z, w)] = True\n\n return new_cubes\n\n\ndef compute(cts: str):\n cubes = {}\n for y, line in enumerate(cts.splitlines()):\n for x, c in enumerate(line):\n cubes[(y, x, 0, 0)] = c == '#'\n\n for _ in range(6):\n cubes = tick(cubes)\n\n return sum(1 for state in cubes.values() if state)\n\n\nTEST_INPUT = \"\"\"\n.#.\n..#\n###\n\"\"\".strip()\n\n\n@pytest.mark.parametrize('input_str, expected', [(TEST_INPUT, 848)])\ndef test_compute(input_str, expected):\n assert compute(input_str) == expected\n\n\ndef main() -> int:\n with open('input.txt', 'r') as f:\n cts = f.read().strip()\n\n print(compute(cts))\n\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"day17/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"648846825","text":"print('Пользовательские функции. Часть 1\\n')\n\n'''\nПоследовательность инструкций, возвращающая некое значение.\n В функцию могут быть переданы ноль и более аргументов, которые могут использоваться в теле функции.\n\n Для возврата значения из функции используется инструкция return. Допускается использование \nнескольких return, в том числе для раннего выхода из функции.\n\n Внутри функций можно определять еще функции.\n Функции можно передавать в качества аргумента в другой функции\n\n Внимание\nФункции без инструкции return (равно как и с нею, но без указания аргумента) всё равно возвращают результат — None.\n'''\n\n\ndef hello(name, word):\n print(f'Hello, {name}! Say {word}')\n\n\nhello('Yiou', 'Hi') # Hello, Yiou! Say Hi\nhello('Xian', 'hello\\n') # Hello, Xian! Say hello\n\n\ndef get_sum(a, b):\n print(a + b)\n\n\nget_sum(1, 4) # 5\n\n# То что мы указываем в качестве аргументов(a, b) НИКАК не связано с тем KAK мы вызываем функцию (x, y)\nx, y, = 5, 8\nget_sum(x, y) # 13\n\n'''\nreturn:\nХорошей практикой является не печатать результат в функции(print), а взвращать его(return)\nreturn нужен для того что бы получить и сохранить какой-то результат для дальнейшего использования.\nНапример сумму всех товаров в корзине, функция ��го считает и сохраняет в переменную. И потом выводит, если надо.\n'''\n\n\ndef get_sum(a, b):\n return a + b\n\n\nget_sum(4, 5) # в консоли ничего не выведет, надо выводить принтом либо присвоить переменной и её принтовать\nprint(get_sum(5, 7)) # 12\nc = get_sum(3, 8)\nprint(c, '\\n') # 11\n\n\ndef hi():\n print('Hi\\n')\n\n\n# Если функция уже что то печатает - то вызываем функцию без принта, иначе кроме результата она еще вернет None\n# print(hi()) # Hi None\nhi() # Hi\n\nprint('Домашнее задание\\n')\n\n'''\n1. Дан список. Получите новый список, в котором каждое значение будет удвоено:\n[1, 2, 3] --> [2, 4, 6]\n'''\nmy_list = [1, 2, 3]\n\n\ndef def_list(arg):\n new_list = [i * 2 for i in arg]\n return new_list\n\n\n# def_list(my_list) # эта запись не обязательна, передаем её сразу в принте\nprint(f'Task 1: new_list = {def_list(my_list)}\\n')\n\n'''\n2. Дан список. Возведите в квадрат каждый из его элементов и получит сумму всех полученных квадратов:\n[1, 2, 3] --> 14 --> 1^2 + 2^2 + 3^2 = 14\n'''\n\n\ndef pow_num(arg):\n my_sum = sum(i ** 2 for i in arg)\n return my_sum\n\n\nprint(f'Task 2: my_sum = {pow_num(my_list)}\\n')\n\n\ndef str_def(arg):\n if ' ' in arg:\n s = arg.upper()\n else:\n s = arg.lower()\n return s\n\n\nprint(str_def('Hello world')) # HELLO WORLD\nprint(str_def('Hello,world')) # hello,world\n\n# def str_def(s): # Второй вариант написание функции\n# if ' ' in s:\n# return s.upper()\n# else:\n# return s.lower()\n","sub_path":"theory/lesson_26.py","file_name":"lesson_26.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"239457889","text":"from __future__ import print_function\n\nimport sys\n# import iroha library from nested folder\nsys.path.insert(0, 'build/shared_model/bindings')\nimport iroha\n\nimport time\nimport block_pb2\nimport endpoint_pb2\nimport endpoint_pb2_grpc\nimport queries_pb2\nimport grpc\n\ntx_builder = iroha.ModelTransactionBuilder()\nquery_builder = iroha.ModelQueryBuilder()\ncrypto = iroha.ModelCrypto()\nproto_tx_helper = iroha.ModelProtoTransaction()\nproto_query_helper = iroha.ModelProtoQuery()\n\nadmin_priv = open(\"../admin@test.priv\", \"r\").read()\nadmin_pub = open(\"../admin@test.pub\", \"r\").read()\n\nme_kp = crypto.convertFromExisting(admin_pub, admin_priv)\n\ncurrent_time = int(round(time.time() * 1000)) - 10**5\nstart_tx_counter = 1\nstart_query_counter = 1\ncreator = \"admin@test\"\n\n\n# build transaction\ntx = tx_builder.creatorAccountId(creator) \\\n .txCounter(start_tx_counter) \\\n .createdTime(current_time) \\\n .createDomain(\"ru\", \"user\") \\\n .createAsset(\"dollar\", \"ru\", 2).build()\n\ntx_blob = proto_tx_helper.signAndAddSignature(tx, me_kp).blob()\n\n# create proto object and send to iroha\n\nproto_tx = block_pb2.Transaction()\nproto_tx.ParseFromString(''.join(map(chr, tx_blob)))\n\nchannel = grpc.insecure_channel('127.0.0.1:50051')\nstub = endpoint_pb2_grpc.CommandServiceStub(channel)\n\nstub.Torii(proto_tx)\n\ntime.sleep(5)\n\n# create status request\nprint(\"Hash of the transaction: \", tx.hash().hex())\ntx_hash = tx.hash().blob()\ntx_hash = ''.join(map(chr, tx_hash))\n\nrequest = endpoint_pb2.TxStatusRequest()\nrequest.tx_hash = tx_hash\n\nresponse = stub.Status(request)\nstatus = endpoint_pb2.TxStatus.Name(response.tx_status)\nprint(\"Status of transaction is:\", status)\n\nif status != \"COMMITTED\":\n print(\"Your transaction wasn't committed\")\n exit(1)\n\nquery = query_builder.creatorAccountId(creator) \\\n .createdTime(current_time) \\\n .queryCounter(start_query_counter) \\\n .getAssetInfo(\"dollar#ru\") \\\n .build()\nquery_blob = proto_query_helper.signAndAddSignature(query, me_kp).blob()\n\nproto_query = queries_pb2.Query()\nproto_query.ParseFromString(''.join(map(chr, query_blob)))\n\nquery_stub = endpoint_pb2_grpc.QueryServiceStub(channel)\nquery_response = query_stub.Find(proto_query)\n\nif not query_response.HasField(\"asset_response\"):\n print(\"Query response error\")\n exit(1)\nelse:\n print(\"Query responded with asset response\")\n\nasset_info = query_response.asset_response.asset\nprint(\"Asset Id =\", asset_info.asset_id)\nprint(\"Precision =\", asset_info.precision)\n\nprint(\"done!\")\n","sub_path":"example/python/tx-example.py","file_name":"tx-example.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"564444462","text":"# import turtle \nimport turtle \n\n# initialising variables \ndist = 1\nflag = 500\n\n# initialising turtle \nspiral = turtle.Turtle() \n\n# changing speed of turtle \nspiral.speed(10) \n\n# making patten \nwhile flag: \n\t\n\t# makes the turtle to move forward \n\tspiral.forward(dist) \n\t\n\t# makes the turtle to move left \n\tspiral.left(120) \n\tspiral.left(1) \n\tdist += 1\n\tflag -= 1\n\nturtle.done() \n","sub_path":"05-Turtle/007.py","file_name":"007.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"243130401","text":"totidade = 0\ncontM = 0\nvelho = ''\nidademaior = 0\nfor n in range(1,5):\n print(f'{n}ª Pessoa')\n nome = str(input('Digite seu nome: '))\n idade = int(input('Sua idade: '))\n sexo = str(input('Sexo: [M/F]: ')).upper().strip()\n totidade += idade\n if n == 1 and sexo == 'M':\n idademaior = idade\n velho = nome\n if sexo.find('M') and idade > idademaior:\n idademaior = idade\n velho = nome\n if sexo == 'F' and idade < 20:\n contM += 1\ntotal = (totidade) / 4\nprint(f'Média de idade do grupo de pessoas é de {total} anos')\nprint(f'O homem mais velhor tem {idademaior} e se chama {velho}')\nprint(f'Ao todo são {contM} mulheres com menos de 20 anos')\n\n# if n == 1 and sexo in 'Mn' isso também serve parar comparar se é igal","sub_path":"ex056.py","file_name":"ex056.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"395521752","text":"from logging_exp import experiment_logger\nimport json\n\ndef log_experiment(path='../output/results_fine.json', model='baseline', level='fine', dataset='mavd' ):\n with open(path, 'r') as fp:\n results = json.load(fp)\n for k in results['class_wise']:\n logger = experiment_logger()\n logger.add_params({'model': model, 'level': level, 'dataset': dataset, 'class': k})\n for key, val in results['class_wise'][k].iteritems():\n if type(val[0]) == dict:\n for t in val:\n for metric in val[t]:\n logger.log_metrics(metric, results['class_wise'][k][t][metric])\n else:\n logger.log_metrics(key, val[key])\n logger.end()\n\n\nlog_experiment()\n","sub_path":"urban-sound-tagging-baseline/log_metrics.py","file_name":"log_metrics.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"134220325","text":"# Given an array of ints, return True if the sequence of numbers 1, 2, 3 appears in the array somewhere.\n\n\n# array123([1, 1, 2, 3, 1]) → True\n# array123([1, 1, 2, 4, 1]) → False\n# array123([1, 1, 2, 1, 2, 3]) → True\nxrange = range\ndef array123(nums):\n\tfor i in xrange(0,len(nums)):\n\t\tif nums[i]==1 and nums[i+1]==2 and nums[i+2]==3:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\nprint(array123([1,2,3,4,1,2,3]))\n","sub_path":"Warmup-2/array123.py","file_name":"array123.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"90810838","text":"import io\nimport json\nimport time\n\nimport yaml\n\n\ndef get_time_stamp():\n ct = time.time()\n local_time = time.localtime(ct)\n data_head = time.strftime(\"%Y-%m-%d-%H-%M-%S\", local_time)\n data_secs = (ct - int(ct)) * 1000\n time_stamp = \"%s-%03d\" % (data_head, data_secs)\n return time_stamp\n\n\ndef dump_yaml_file(yaml_file, data):\n \"\"\" load yaml file and check file content format\n \"\"\"\n with io.open(yaml_file, 'w', encoding='utf-8') as stream:\n yaml.dump(data, stream, indent=4, default_flow_style=False, encoding='utf-8')\n\n\ndef _dump_json_file(json_file, data):\n \"\"\" load json file and check file content format\n \"\"\"\n with io.open(json_file, 'w', encoding='utf-8') as stream:\n json.dump(data, stream, indent=4, separators=(',', ': '), ensure_ascii=False)\n\n\ndef dump_python_file(python_file, data):\n with io.open(python_file, 'w', encoding='utf-8') as stream:\n stream.write(data)\n\n\ndef modify_validate(request):\n if not isinstance(request,dict):\n raise ValueError(\"request的值为:{},不是一个dict\".format(request))\n if 'validate' in request.keys():\n validates = []\n for validate in request['validate']:\n new_validate = {}\n if validate.get('comparator') == \"equals\":\n new_validate['eq'] = [validate.get('check'), validate.get('expected')]\n validates.append(new_validate)\n request['validate'] = validates\n return request\n\ndef dump_yaml_to_dict(yaml_file_name_path,param=None):\n \"\"\"\n 根据文件名称获取yaml数据,并转换成json字符\n 可以通过param读取指定字段\n :param yaml_file_name:yaml文件名称\n :param param:传入参数\n :return:\n \"\"\"\n yaml_data = {}\n try:\n with open(yaml_file_name_path, encoding='utf-8') as stream:\n yaml_data = yaml.safe_load(stream)\n if param is not None:\n yaml_data = yaml_data[param]\n except FileNotFoundError as file_exception:\n print(\"文件未找到:\".format(str(file_exception)))\n raise file_exception\n finally:\n stream.close()\n return yaml_data\n\ndef fail_request_handle(fail_datas,error_info):\n result_datas = []\n for fail_data in fail_datas['teststeps']:\n fail_data['request']['body'] = fail_data['request'].pop('json')\n result = {'success': False,'name': fail_data['name'],'data':{'success': False,'req_resps':[{'request':fail_data['request'],'response':{'status_code':'error','body': error_info}}]}}\n result_datas.append(result)\n return result_datas\n\nif __name__ == '__main__':\n import MySQLdb as Database\n version = Database.version_info\n print(version)","sub_path":"ApiManager/utils/testcase.py","file_name":"testcase.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"572658738","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /tmp/pip-install-n_sfyb/Django/django/db/models/fields/related_descriptors.py\n# Compiled at: 2019-02-14 00:35:17\n\"\"\"\nAccessors for related objects.\n\nWhen a field defines a relation between two models, each model class provides\nan attribute to access related instances of the other model class (unless the\nreverse accessor has been disabled with related_name='+').\n\nAccessors are implemented as descriptors in order to customize access and\nassignment. This module defines the descriptor classes.\n\nForward accessors follow foreign keys. Reverse accessors trace them back. For\nexample, with the following models::\n\n class Parent(Model):\n pass\n\n class Child(Model):\n parent = ForeignKey(Parent, related_name='children')\n\n ``child.parent`` is a forward many-to-one relation. ``parent.children`` is a\nreverse many-to-one relation.\n\nThere are three types of relations (many-to-one, one-to-one, and many-to-many)\nand two directions (forward and reverse) for a total of six combinations.\n\n1. Related instance on the forward side of a many-to-one relation:\n ``ForwardManyToOneDescriptor``.\n\n Uniqueness of foreign key values is irrelevant to accessing the related\n instance, making the many-to-one and one-to-one cases identical as far as\n the descriptor is concerned. The constraint is checked upstream (unicity\n validation in forms) or downstream (unique indexes in the database).\n\n2. Related instance on the forward side of a one-to-one\n relation: ``ForwardOneToOneDescriptor``.\n\n It avoids querying the database when accessing the parent link field in\n a multi-table inheritance scenario.\n\n3. Related instance on the reverse side of a one-to-one relation:\n ``ReverseOneToOneDescriptor``.\n\n One-to-one relations are asymmetrical, despite the apparent symmetry of the\n name, because they're implemented in the database with a foreign key from\n one table to another. As a consequence ``ReverseOneToOneDescriptor`` is\n slightly different from ``ForwardManyToOneDescriptor``.\n\n4. Related objects manager for related instances on the reverse side of a\n many-to-one relation: ``ReverseManyToOneDescriptor``.\n\n Unlike the previous two classes, this one provides access to a collection\n of objects. It returns a manager rather than an instance.\n\n5. Related objects manager for related instances on the forward or reverse\n sides of a many-to-many relation: ``ManyToManyDescriptor``.\n\n Many-to-many relations are symmetrical. The syntax of Django models\n requires declaring them on one side but that's an implementation detail.\n They could be declared on the other side without any change in behavior.\n Therefore the forward and reverse descriptors can be the same.\n\n If you're looking for ``ForwardManyToManyDescriptor`` or\n ``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.\n\"\"\"\nfrom __future__ import unicode_literals\nimport warnings\nfrom operator import attrgetter\nfrom django.db import connections, router, transaction\nfrom django.db.models import Q, signals\nfrom django.db.models.query import QuerySet\nfrom django.utils.deprecation import RemovedInDjango20Warning\nfrom django.utils.functional import cached_property\n\nclass ForwardManyToOneDescriptor(object):\n \"\"\"\n Accessor to the related object on the forward side of a many-to-one or\n one-to-one (via ForwardOneToOneDescriptor subclass) relation.\n\n In the example::\n\n class Child(Model):\n parent = ForeignKey(Parent, related_name='children')\n\n ``child.parent`` is a ``ForwardManyToOneDescriptor`` instance.\n \"\"\"\n\n def __init__(self, field_with_rel):\n self.field = field_with_rel\n self.cache_name = self.field.get_cache_name()\n\n @cached_property\n def RelatedObjectDoesNotExist(self):\n return type(str(b'RelatedObjectDoesNotExist'), (\n self.field.remote_field.model.DoesNotExist, AttributeError), {})\n\n def is_cached(self, instance):\n return hasattr(instance, self.cache_name)\n\n def get_queryset(self, **hints):\n related_model = self.field.remote_field.model\n if getattr(related_model._default_manager, b'use_for_related_fields', False):\n if not getattr(related_model._default_manager, b'silence_use_for_related_fields_deprecation', False):\n warnings.warn((b\"use_for_related_fields is deprecated, instead set Meta.base_manager_name on '{}'.\").format(related_model._meta.label), RemovedInDjango20Warning, 2)\n manager = related_model._default_manager\n else:\n manager = related_model._base_manager\n return manager.db_manager(hints=hints).all()\n\n def get_prefetch_queryset(self, instances, queryset=None):\n if queryset is None:\n queryset = self.get_queryset()\n queryset._add_hints(instance=instances[0])\n rel_obj_attr = self.field.get_foreign_related_value\n instance_attr = self.field.get_local_related_value\n instances_dict = {instance_attr(inst):inst for inst in instances}\n related_field = self.field.foreign_related_fields[0]\n if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:\n query = {b'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}\n else:\n query = {b'%s__in' % self.field.related_query_name(): instances}\n queryset = queryset.filter(**query)\n if not self.field.remote_field.multiple:\n rel_obj_cache_name = self.field.remote_field.get_cache_name()\n for rel_obj in queryset:\n instance = instances_dict[rel_obj_attr(rel_obj)]\n setattr(rel_obj, rel_obj_cache_name, instance)\n\n return (\n queryset, rel_obj_attr, instance_attr, True, self.cache_name)\n\n def get_object(self, instance):\n qs = self.get_queryset(instance=instance)\n return qs.get(self.field.get_reverse_related_filter(instance))\n\n def __get__(self, instance, cls=None):\n \"\"\"\n Get the related instance through the forward relation.\n\n With the example above, when getting ``child.parent``:\n\n - ``self`` is the descriptor managing the ``parent`` attribute\n - ``instance`` is the ``child`` instance\n - ``cls`` is the ``Child`` class (we don't need it)\n \"\"\"\n if instance is None:\n return self\n else:\n try:\n rel_obj = getattr(instance, self.cache_name)\n except AttributeError:\n val = self.field.get_local_related_value(instance)\n if None in val:\n rel_obj = None\n else:\n rel_obj = self.get_object(instance)\n if not self.field.remote_field.multiple:\n setattr(rel_obj, self.field.remote_field.get_cache_name(), instance)\n setattr(instance, self.cache_name, rel_obj)\n\n if rel_obj is None and not self.field.null:\n raise self.RelatedObjectDoesNotExist(b'%s has no %s.' % (self.field.model.__name__, self.field.name))\n else:\n return rel_obj\n return\n\n def __set__(self, instance, value):\n \"\"\"\n Set the related instance through the forward relation.\n\n With the example above, when setting ``child.parent = parent``:\n\n - ``self`` is the descriptor managing the ``parent`` attribute\n - ``instance`` is the ``child`` instance\n - ``value`` is the ``parent`` instance on the right of the equal sign\n \"\"\"\n if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model):\n raise ValueError(b'Cannot assign \"%r\": \"%s.%s\" must be a \"%s\" instance.' % (\n value,\n instance._meta.object_name,\n self.field.name,\n self.field.remote_field.model._meta.object_name))\n else:\n if value is not None:\n if instance._state.db is None:\n instance._state.db = router.db_for_write(instance.__class__, instance=value)\n elif value._state.db is None:\n value._state.db = router.db_for_write(value.__class__, instance=instance)\n elif value._state.db is not None and instance._state.db is not None:\n if not router.allow_relation(value, instance):\n raise ValueError(b'Cannot assign \"%r\": the current database router prevents this relation.' % value)\n if value is None:\n related = getattr(instance, self.cache_name, None)\n if related is not None:\n setattr(related, self.field.remote_field.get_cache_name(), None)\n for lh_field, rh_field in self.field.related_fields:\n setattr(instance, lh_field.attname, None)\n\n else:\n for lh_field, rh_field in self.field.related_fields:\n setattr(instance, lh_field.attname, getattr(value, rh_field.attname))\n\n setattr(instance, self.cache_name, value)\n if value is not None and not self.field.remote_field.multiple:\n setattr(value, self.field.remote_field.get_cache_name(), instance)\n return\n\n\nclass ForwardOneToOneDescriptor(ForwardManyToOneDescriptor):\n \"\"\"\n Accessor to the related object on the forward side of a one-to-one relation.\n\n In the example::\n\n class Restaurant(Model):\n place = OneToOneField(Place, related_name='restaurant')\n\n ``restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance.\n \"\"\"\n\n def get_object(self, instance):\n if self.field.remote_field.parent_link:\n deferred = instance.get_deferred_fields()\n rel_model = self.field.remote_field.model\n fields = [ field.attname for field in rel_model._meta.concrete_fields ]\n if not any(field in fields for field in deferred):\n kwargs = {field:getattr(instance, field) for field in fields}\n obj = rel_model(**kwargs)\n obj._state.adding = instance._state.adding\n obj._state.db = instance._state.db\n return obj\n return super(ForwardOneToOneDescriptor, self).get_object(instance)\n\n\nclass ReverseOneToOneDescriptor(object):\n \"\"\"\n Accessor to the related object on the reverse side of a one-to-one\n relation.\n\n In the example::\n\n class Restaurant(Model):\n place = OneToOneField(Place, related_name='restaurant')\n\n ``place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.\n \"\"\"\n\n def __init__(self, related):\n self.related = related\n self.cache_name = related.get_cache_name()\n\n @cached_property\n def RelatedObjectDoesNotExist(self):\n return type(str(b'RelatedObjectDoesNotExist'), (\n self.related.related_model.DoesNotExist, AttributeError), {})\n\n def is_cached(self, instance):\n return hasattr(instance, self.cache_name)\n\n def get_queryset(self, **hints):\n related_model = self.related.related_model\n if getattr(related_model._default_manager, b'use_for_related_fields', False):\n if not getattr(related_model._default_manager, b'silence_use_for_related_fields_deprecation', False):\n warnings.warn((b\"use_for_related_fields is deprecated, instead set Meta.base_manager_name on '{}'.\").format(related_model._meta.label), RemovedInDjango20Warning, 2)\n manager = related_model._default_manager\n else:\n manager = related_model._base_manager\n return manager.db_manager(hints=hints).all()\n\n def get_prefetch_queryset(self, instances, queryset=None):\n if queryset is None:\n queryset = self.get_queryset()\n queryset._add_hints(instance=instances[0])\n rel_obj_attr = attrgetter(self.related.field.attname)\n\n def instance_attr(obj):\n return obj._get_pk_val()\n\n instances_dict = {instance_attr(inst):inst for inst in instances}\n query = {b'%s__in' % self.related.field.name: instances}\n queryset = queryset.filter(**query)\n rel_obj_cache_name = self.related.field.get_cache_name()\n for rel_obj in queryset:\n instance = instances_dict[rel_obj_attr(rel_obj)]\n setattr(rel_obj, rel_obj_cache_name, instance)\n\n return (\n queryset, rel_obj_attr, instance_attr, True, self.cache_name)\n\n def __get__(self, instance, cls=None):\n \"\"\"\n Get the related instance through the reverse relation.\n\n With the example above, when getting ``place.restaurant``:\n\n - ``self`` is the descriptor managing the ``restaurant`` attribute\n - ``instance`` is the ``place`` instance\n - ``cls`` is the ``Place`` class (unused)\n\n Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.\n \"\"\"\n if instance is None:\n return self\n else:\n try:\n rel_obj = getattr(instance, self.cache_name)\n except AttributeError:\n related_pk = instance._get_pk_val()\n if related_pk is None:\n rel_obj = None\n else:\n filter_args = self.related.field.get_forward_related_filter(instance)\n try:\n rel_obj = self.get_queryset(instance=instance).get(**filter_args)\n except self.related.related_model.DoesNotExist:\n rel_obj = None\n else:\n setattr(rel_obj, self.related.field.get_cache_name(), instance)\n\n setattr(instance, self.cache_name, rel_obj)\n\n if rel_obj is None:\n raise self.RelatedObjectDoesNotExist(b'%s has no %s.' % (\n instance.__class__.__name__,\n self.related.get_accessor_name()))\n else:\n return rel_obj\n return\n\n def __set__(self, instance, value):\n \"\"\"\n Set the related instance through the reverse relation.\n\n With the example above, when setting ``place.restaurant = restaurant``:\n\n - ``self`` is the descriptor managing the ``restaurant`` attribute\n - ``instance`` is the ``place`` instance\n - ``value`` is the ``restaurant`` instance on the right of the equal sign\n\n Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.\n \"\"\"\n if value is None:\n try:\n rel_obj = getattr(instance, self.cache_name)\n except AttributeError:\n pass\n else:\n delattr(instance, self.cache_name)\n setattr(rel_obj, self.related.field.name, None)\n\n elif not isinstance(value, self.related.related_model):\n raise ValueError(b'Cannot assign \"%r\": \"%s.%s\" must be a \"%s\" instance.' % (\n value,\n instance._meta.object_name,\n self.related.get_accessor_name(),\n self.related.related_model._meta.object_name))\n else:\n if instance._state.db is None:\n instance._state.db = router.db_for_write(instance.__class__, instance=value)\n else:\n if value._state.db is None:\n value._state.db = router.db_for_write(value.__class__, instance=instance)\n elif value._state.db is not None and instance._state.db is not None:\n if not router.allow_relation(value, instance):\n raise ValueError(b'Cannot assign \"%r\": the current database router prevents this relation.' % value)\n related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)\n for index, field in enumerate(self.related.field.local_related_fields):\n setattr(value, field.attname, related_pk[index])\n\n setattr(instance, self.cache_name, value)\n setattr(value, self.related.field.get_cache_name(), instance)\n return\n\n\nclass ReverseManyToOneDescriptor(object):\n \"\"\"\n Accessor to the related objects manager on the reverse side of a\n many-to-one relation.\n\n In the example::\n\n class Child(Model):\n parent = ForeignKey(Parent, related_name='children')\n\n ``parent.children`` is a ``ReverseManyToOneDescriptor`` instance.\n\n Most of the implementation is delegated to a dynamically defined manager\n class built by ``create_forward_many_to_many_manager()`` defined below.\n \"\"\"\n\n def __init__(self, rel):\n self.rel = rel\n self.field = rel.field\n\n @cached_property\n def related_manager_cls(self):\n related_model = self.rel.related_model\n return create_reverse_many_to_one_manager(related_model._default_manager.__class__, self.rel)\n\n def __get__(self, instance, cls=None):\n \"\"\"\n Get the related objects through the reverse relation.\n\n With the example above, when getting ``parent.children``:\n\n - ``self`` is the descriptor managing the ``children`` attribute\n - ``instance`` is the ``parent`` instance\n - ``cls`` is the ``Parent`` class (unused)\n \"\"\"\n if instance is None:\n return self\n else:\n return self.related_manager_cls(instance)\n\n def _get_set_deprecation_msg_params(self):\n return (\n b'reverse side of a related set',\n self.rel.get_accessor_name())\n\n def __set__(self, instance, value):\n \"\"\"\n Set the related objects through the reverse relation.\n\n With the example above, when setting ``parent.children = children``:\n\n - ``self`` is the descriptor managing the ``children`` attribute\n - ``instance`` is the ``parent`` instance\n - ``value`` is the ``children`` sequence on the right of the equal sign\n \"\"\"\n warnings.warn(b'Direct assignment to the %s is deprecated due to the implicit save() that happens. Use %s.set() instead.' % self._get_set_deprecation_msg_params(), RemovedInDjango20Warning, stacklevel=2)\n manager = self.__get__(instance)\n manager.set(value)\n\n\ndef create_reverse_many_to_one_manager(superclass, rel):\n \"\"\"\n Create a manager for the reverse side of a many-to-one relation.\n\n This manager subclasses another manager, generally the default manager of\n the related model, and adds behaviors specific to many-to-one relations.\n \"\"\"\n\n class RelatedManager(superclass):\n\n def __init__(self, instance):\n super(RelatedManager, self).__init__()\n self.instance = instance\n self.model = rel.related_model\n self.field = rel.field\n self.core_filters = {self.field.name: instance}\n\n def __call__(self, **kwargs):\n manager = getattr(self.model, kwargs.pop(b'manager'))\n manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)\n return manager_class(self.instance)\n\n do_not_call_in_templates = True\n\n def _apply_rel_filters(self, queryset):\n \"\"\"\n Filter the queryset for the instance this manager is bound to.\n \"\"\"\n db = self._db or router.db_for_read(self.model, instance=self.instance)\n empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls\n queryset._add_hints(instance=self.instance)\n if self._db:\n queryset = queryset.using(self._db)\n queryset = queryset.filter(**self.core_filters)\n for field in self.field.foreign_related_fields:\n val = getattr(self.instance, field.attname)\n if val is None or val == b'' and empty_strings_as_null:\n return queryset.none()\n\n queryset._known_related_objects = {self.field: {self.instance.pk: self.instance}}\n return queryset\n\n def _remove_prefetched_objects(self):\n try:\n self.instance._prefetched_objects_cache.pop(self.field.related_query_name())\n except (AttributeError, KeyError):\n pass\n\n def get_queryset(self):\n try:\n return self.instance._prefetched_objects_cache[self.field.related_query_name()]\n except (AttributeError, KeyError):\n queryset = super(RelatedManager, self).get_queryset()\n return self._apply_rel_filters(queryset)\n\n def get_prefetch_queryset(self, instances, queryset=None):\n if queryset is None:\n queryset = super(RelatedManager, self).get_queryset()\n queryset._add_hints(instance=instances[0])\n queryset = queryset.using(queryset._db or self._db)\n rel_obj_attr = self.field.get_local_related_value\n instance_attr = self.field.get_foreign_related_value\n instances_dict = {instance_attr(inst):inst for inst in instances}\n query = {b'%s__in' % self.field.name: instances}\n queryset = queryset.filter(**query)\n for rel_obj in queryset:\n instance = instances_dict[rel_obj_attr(rel_obj)]\n setattr(rel_obj, self.field.name, instance)\n\n cache_name = self.field.related_query_name()\n return (queryset, rel_obj_attr, instance_attr, False, cache_name)\n\n def add(self, *objs, **kwargs):\n self._remove_prefetched_objects()\n bulk = kwargs.pop(b'bulk', True)\n objs = list(objs)\n db = router.db_for_write(self.model, instance=self.instance)\n\n def check_and_update_obj(obj):\n if not isinstance(obj, self.model):\n raise TypeError(b\"'%s' instance expected, got %r\" % (\n self.model._meta.object_name, obj))\n setattr(obj, self.field.name, self.instance)\n\n if bulk:\n pks = []\n for obj in objs:\n check_and_update_obj(obj)\n if obj._state.adding or obj._state.db != db:\n raise ValueError(b\"%r instance isn't saved. Use bulk=False or save the object first.\" % obj)\n pks.append(obj.pk)\n\n self.model._base_manager.using(db).filter(pk__in=pks).update(**{self.field.name: self.instance})\n else:\n with transaction.atomic(using=db, savepoint=False):\n for obj in objs:\n check_and_update_obj(obj)\n obj.save()\n\n add.alters_data = True\n\n def create(self, **kwargs):\n kwargs[self.field.name] = self.instance\n db = router.db_for_write(self.model, instance=self.instance)\n return super(RelatedManager, self.db_manager(db)).create(**kwargs)\n\n create.alters_data = True\n\n def get_or_create(self, **kwargs):\n kwargs[self.field.name] = self.instance\n db = router.db_for_write(self.model, instance=self.instance)\n return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)\n\n get_or_create.alters_data = True\n\n def update_or_create(self, **kwargs):\n kwargs[self.field.name] = self.instance\n db = router.db_for_write(self.model, instance=self.instance)\n return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)\n\n update_or_create.alters_data = True\n if rel.field.null:\n\n def remove(self, *objs, **kwargs):\n if not objs:\n return\n bulk = kwargs.pop(b'bulk', True)\n val = self.field.get_foreign_related_value(self.instance)\n old_ids = set()\n for obj in objs:\n if self.field.get_local_related_value(obj) == val:\n old_ids.add(obj.pk)\n else:\n raise self.field.remote_field.model.DoesNotExist(b'%r is not related to %r.' % (obj, self.instance))\n\n self._clear(self.filter(pk__in=old_ids), bulk)\n\n remove.alters_data = True\n\n def clear(self, **kwargs):\n bulk = kwargs.pop(b'bulk', True)\n self._clear(self, bulk)\n\n clear.alters_data = True\n\n def _clear(self, queryset, bulk):\n self._remove_prefetched_objects()\n db = router.db_for_write(self.model, instance=self.instance)\n queryset = queryset.using(db)\n if bulk:\n queryset.update(**{self.field.name: None})\n else:\n with transaction.atomic(using=db, savepoint=False):\n for obj in queryset:\n setattr(obj, self.field.name, None)\n obj.save(update_fields=[self.field.name])\n\n return\n\n _clear.alters_data = True\n\n def set(self, objs, **kwargs):\n objs = tuple(objs)\n bulk = kwargs.pop(b'bulk', True)\n clear = kwargs.pop(b'clear', False)\n if self.field.null:\n db = router.db_for_write(self.model, instance=self.instance)\n with transaction.atomic(using=db, savepoint=False):\n if clear:\n self.clear()\n self.add(bulk=bulk, *objs)\n else:\n old_objs = set(self.using(db).all())\n new_objs = []\n for obj in objs:\n if obj in old_objs:\n old_objs.remove(obj)\n else:\n new_objs.append(obj)\n\n self.remove(bulk=bulk, *old_objs)\n self.add(bulk=bulk, *new_objs)\n else:\n self.add(bulk=bulk, *objs)\n\n set.alters_data = True\n\n return RelatedManager\n\n\nclass ManyToManyDescriptor(ReverseManyToOneDescriptor):\n \"\"\"\n Accessor to the related objects manager on the forward and reverse sides of\n a many-to-many relation.\n\n In the example::\n\n class Pizza(Model):\n toppings = ManyToManyField(Topping, related_name='pizzas')\n\n ``pizza.toppings`` and ``topping.pizzas`` are ``ManyToManyDescriptor``\n instances.\n\n Most of the implementation is delegated to a dynamically defined manager\n class built by ``create_forward_many_to_many_manager()`` defined below.\n \"\"\"\n\n def __init__(self, rel, reverse=False):\n super(ManyToManyDescriptor, self).__init__(rel)\n self.reverse = reverse\n\n @property\n def through(self):\n return self.rel.through\n\n @cached_property\n def related_manager_cls(self):\n related_model = self.rel.related_model if self.reverse else self.rel.model\n return create_forward_many_to_many_manager(related_model._default_manager.__class__, self.rel, reverse=self.reverse)\n\n def _get_set_deprecation_msg_params(self):\n return (\n b'%s side of a many-to-many set' % (b'reverse' if self.reverse else b'forward'),\n self.rel.get_accessor_name() if self.reverse else self.field.name)\n\n\ndef create_forward_many_to_many_manager(superclass, rel, reverse):\n \"\"\"\n Create a manager for the either side of a many-to-many relation.\n\n This manager subclasses another manager, generally the default manager of\n the related model, and adds behaviors specific to many-to-many relations.\n \"\"\"\n\n class ManyRelatedManager(superclass):\n\n def __init__(self, instance=None):\n super(ManyRelatedManager, self).__init__()\n self.instance = instance\n if not reverse:\n self.model = rel.model\n self.query_field_name = rel.field.related_query_name()\n self.prefetch_cache_name = rel.field.name\n self.source_field_name = rel.field.m2m_field_name()\n self.target_field_name = rel.field.m2m_reverse_field_name()\n self.symmetrical = rel.symmetrical\n else:\n self.model = rel.related_model\n self.query_field_name = rel.field.name\n self.prefetch_cache_name = rel.field.related_query_name()\n self.source_field_name = rel.field.m2m_reverse_field_name()\n self.target_field_name = rel.field.m2m_field_name()\n self.symmetrical = False\n self.through = rel.through\n self.reverse = reverse\n self.source_field = self.through._meta.get_field(self.source_field_name)\n self.target_field = self.through._meta.get_field(self.target_field_name)\n self.core_filters = {}\n self.pk_field_names = {}\n for lh_field, rh_field in self.source_field.related_fields:\n core_filter_key = b'%s__%s' % (self.query_field_name, rh_field.name)\n self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)\n self.pk_field_names[lh_field.name] = rh_field.name\n\n self.related_val = self.source_field.get_foreign_related_value(instance)\n if None in self.related_val:\n raise ValueError(b'\"%r\" needs to have a value for field \"%s\" before this many-to-many relationship can be used.' % (\n instance, self.pk_field_names[self.source_field_name]))\n if instance.pk is None:\n raise ValueError(b'%r instance needs to have a primary key value before a many-to-many relationship can be used.' % instance.__class__.__name__)\n return\n\n def __call__(self, **kwargs):\n manager = getattr(self.model, kwargs.pop(b'manager'))\n manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)\n return manager_class(instance=self.instance)\n\n do_not_call_in_templates = True\n\n def _build_remove_filters(self, removed_vals):\n filters = Q(**{self.source_field_name: self.related_val})\n removed_vals_filters = not isinstance(removed_vals, QuerySet) or removed_vals._has_filters()\n if removed_vals_filters:\n filters &= Q(**{b'%s__in' % self.target_field_name: removed_vals})\n if self.symmetrical:\n symmetrical_filters = Q(**{self.target_field_name: self.related_val})\n if removed_vals_filters:\n symmetrical_filters &= Q(**{b'%s__in' % self.source_field_name: removed_vals})\n filters |= symmetrical_filters\n return filters\n\n def _apply_rel_filters(self, queryset):\n \"\"\"\n Filter the queryset for the instance this manager is bound to.\n \"\"\"\n queryset._add_hints(instance=self.instance)\n if self._db:\n queryset = queryset.using(self._db)\n return queryset._next_is_sticky().filter(**self.core_filters)\n\n def _remove_prefetched_objects(self):\n try:\n self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)\n except (AttributeError, KeyError):\n pass\n\n def get_queryset(self):\n try:\n return self.instance._prefetched_objects_cache[self.prefetch_cache_name]\n except (AttributeError, KeyError):\n queryset = super(ManyRelatedManager, self).get_queryset()\n return self._apply_rel_filters(queryset)\n\n def get_prefetch_queryset(self, instances, queryset=None):\n if queryset is None:\n queryset = super(ManyRelatedManager, self).get_queryset()\n queryset._add_hints(instance=instances[0])\n queryset = queryset.using(queryset._db or self._db)\n query = {b'%s__in' % self.query_field_name: instances}\n queryset = queryset._next_is_sticky().filter(**query)\n fk = self.through._meta.get_field(self.source_field_name)\n join_table = fk.model._meta.db_table\n connection = connections[queryset.db]\n qn = connection.ops.quote_name\n queryset = queryset.extra(select={b'_prefetch_related_val_%s' % f.attname:b'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})\n return (\n queryset,\n lambda result: tuple(getattr(result, b'_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields),\n lambda inst: tuple(f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields),\n False,\n self.prefetch_cache_name)\n\n def add(self, *objs):\n if not rel.through._meta.auto_created:\n opts = self.through._meta\n raise AttributeError(b\"Cannot use add() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead.\" % (\n opts.app_label, opts.object_name))\n self._remove_prefetched_objects()\n db = router.db_for_write(self.through, instance=self.instance)\n with transaction.atomic(using=db, savepoint=False):\n self._add_items(self.source_field_name, self.target_field_name, *objs)\n if self.symmetrical:\n self._add_items(self.target_field_name, self.source_field_name, *objs)\n\n add.alters_data = True\n\n def remove(self, *objs):\n if not rel.through._meta.auto_created:\n opts = self.through._meta\n raise AttributeError(b\"Cannot use remove() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead.\" % (\n opts.app_label, opts.object_name))\n self._remove_prefetched_objects()\n self._remove_items(self.source_field_name, self.target_field_name, *objs)\n\n remove.alters_data = True\n\n def clear(self):\n db = router.db_for_write(self.through, instance=self.instance)\n with transaction.atomic(using=db, savepoint=False):\n signals.m2m_changed.send(sender=self.through, action=b'pre_clear', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db)\n self._remove_prefetched_objects()\n filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))\n self.through._default_manager.using(db).filter(filters).delete()\n signals.m2m_changed.send(sender=self.through, action=b'post_clear', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db)\n return\n\n clear.alters_data = True\n\n def set(self, objs, **kwargs):\n if not rel.through._meta.auto_created:\n opts = self.through._meta\n raise AttributeError(b\"Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead.\" % (\n opts.app_label, opts.object_name))\n objs = tuple(objs)\n clear = kwargs.pop(b'clear', False)\n db = router.db_for_write(self.through, instance=self.instance)\n with transaction.atomic(using=db, savepoint=False):\n if clear:\n self.clear()\n self.add(*objs)\n else:\n old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))\n new_objs = []\n for obj in objs:\n fk_val = self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else obj\n if fk_val in old_ids:\n old_ids.remove(fk_val)\n else:\n new_objs.append(obj)\n\n self.remove(*old_ids)\n self.add(*new_objs)\n\n set.alters_data = True\n\n def create(self, **kwargs):\n if not self.through._meta.auto_created:\n opts = self.through._meta\n raise AttributeError(b\"Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead.\" % (\n opts.app_label, opts.object_name))\n db = router.db_for_write(self.instance.__class__, instance=self.instance)\n new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)\n self.add(new_obj)\n return new_obj\n\n create.alters_data = True\n\n def get_or_create(self, **kwargs):\n db = router.db_for_write(self.instance.__class__, instance=self.instance)\n obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)\n if created:\n self.add(obj)\n return (\n obj, created)\n\n get_or_create.alters_data = True\n\n def update_or_create(self, **kwargs):\n db = router.db_for_write(self.instance.__class__, instance=self.instance)\n obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)\n if created:\n self.add(obj)\n return (\n obj, created)\n\n update_or_create.alters_data = True\n\n def _add_items(self, source_field_name, target_field_name, *objs):\n from django.db.models import Model\n if objs:\n new_ids = set()\n for obj in objs:\n if isinstance(obj, self.model):\n if not router.allow_relation(obj, self.instance):\n raise ValueError(b'Cannot add \"%r\": instance is on database \"%s\", value is on database \"%s\"' % (\n obj, self.instance._state.db, obj._state.db))\n fk_val = self.through._meta.get_field(target_field_name).get_foreign_related_value(obj)[0]\n if fk_val is None:\n raise ValueError(b'Cannot add \"%r\": the value for field \"%s\" is None' % (\n obj, target_field_name))\n new_ids.add(fk_val)\n elif isinstance(obj, Model):\n raise TypeError(b\"'%s' instance expected, got %r\" % (\n self.model._meta.object_name, obj))\n else:\n new_ids.add(obj)\n\n db = router.db_for_write(self.through, instance=self.instance)\n vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True).filter(**{source_field_name: self.related_val[0], \n b'%s__in' % target_field_name: new_ids})\n new_ids = new_ids - set(vals)\n with transaction.atomic(using=db, savepoint=False):\n if self.reverse or source_field_name == self.source_field_name:\n signals.m2m_changed.send(sender=self.through, action=b'pre_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=new_ids, using=db)\n self.through._default_manager.using(db).bulk_create([ self.through(**{b'%s_id' % source_field_name: self.related_val[0], b'%s_id' % target_field_name: obj_id}) for obj_id in new_ids\n ])\n if self.reverse or source_field_name == self.source_field_name:\n signals.m2m_changed.send(sender=self.through, action=b'post_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=new_ids, using=db)\n return\n\n def _remove_items(self, source_field_name, target_field_name, *objs):\n if not objs:\n return\n old_ids = set()\n for obj in objs:\n if isinstance(obj, self.model):\n fk_val = self.target_field.get_foreign_related_value(obj)[0]\n old_ids.add(fk_val)\n else:\n old_ids.add(obj)\n\n db = router.db_for_write(self.through, instance=self.instance)\n with transaction.atomic(using=db, savepoint=False):\n signals.m2m_changed.send(sender=self.through, action=b'pre_remove', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db)\n target_model_qs = super(ManyRelatedManager, self).get_queryset()\n if target_model_qs._has_filters():\n old_vals = target_model_qs.using(db).filter(**{b'%s__in' % self.target_field.target_field.attname: old_ids})\n else:\n old_vals = old_ids\n filters = self._build_remove_filters(old_vals)\n self.through._default_manager.using(db).filter(filters).delete()\n signals.m2m_changed.send(sender=self.through, action=b'post_remove', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db)\n\n return ManyRelatedManager","sub_path":"pycfiles/djx-0.0.4-py2-none-any/related_descriptors.py","file_name":"related_descriptors.py","file_ext":"py","file_size_in_byte":41464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"584139484","text":"import sublime_plugin\nfrom .import_helper import source_modules, node_modules\nfrom .utils import *\n\nclass ImportHelperViewEventListener(sublime_plugin.ViewEventListener):\n\n def __init__(self, view):\n super().__init__(view)\n self.completions_info = {'time': -1, 'result': [], 'prefix': ''}\n self.in_auto_complete = False\n self.autocomplete_point = 0\n self.autocomplete_export_names = get_setting('autocomplete_export_names', True)\n self.autocomplete_auto_import = get_setting('autocomplete_auto_import', False)\n\n def on_query_completions(self, prefix, locations):\n if not self.autocomplete_export_names or not (len(prefix) > 0 and self.view.match_selector(self.autocomplete_point, 'source.ts, source.tsx, source.js, source.jsx')):\n return []\n self.autocomplete_point = locations[0]\n if get_time() > self.completions_info['time'] + 1 or prefix != self.completions_info['prefix']:\n self.completions_info['time'] = get_time()\n self.completions_info['prefix'] = prefix\n self.completions_info['result'] = query_completions_modules(prefix, source_modules, node_modules)\n return self.completions_info['result']\n \n def on_post_text_command(self, command_name, args):\n if not (self.autocomplete_auto_import and self.autocomplete_export_names):\n return\n if self.in_auto_complete and command_name in ['insert_best_completion', 'insert_dimensions']:\n self.in_auto_complete = False\n self.view.run_command('insert_import', args=({'point': self.autocomplete_point - 1, 'notify': False}))\n elif command_name in ['auto_complete', 'replace_completion_with_next_completion', 'replace_completion_with_auto_complete']:\n self.in_auto_complete = True\n elif command_name == 'hide_auto_complete':\n self.in_auto_complete = False\n\n def on_activated(self):\n self.in_auto_complete = False","sub_path":"view_event_listener.py","file_name":"view_event_listener.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"278689463","text":"# blackjack game\nimport random\n\ncold = [6, 7, 8, 9, 10, 2, 3, 4, 11] * 4\ncoldwithcomp = list(cold)\nplayers = int(input(\"Введите количество участников: \"))\n\n\ndef scoreofplayer():\n yourcard = cold.pop(random.choice(cold))\n while True:\n k = input(\"Ваш счет равен {} очков. Взять еще карту? Yes / No \".format(yourcard))\n if k == \"Yes\":\n yourcard += cold.pop(random.choice(cold))\n if yourcard > 21:\n return yourcard\n else:\n return yourcard\n\n\ndef condition(*args, **kwargs):\n try:\n if risklevel == \"Easy\":\n scale = 12\n elif risklevel == \"Medium\":\n scale = 15\n elif risklevel == \"Hard\":\n scale = 17\n cardcomp = coldwithcomp.pop(random.choice(coldwithcomp))\n while True:\n if cardcomp < scale:\n cardcomp += coldwithcomp.pop(random.choice(coldwithcomp))\n else:\n break\n\n except:\n pass\n\n try:\n if k <= 21 and cardcomp <= 21:\n if k > cardcomp:\n print(\"Вы выиграли! Ваше количество очков - {}, компьютера - {}\".format(k, cardcomp))\n elif k < cardcomp:\n print(\"Вы проиграли! Ваше количество очков - {}, компьютера - {}\".format(k, cardcomp))\n else:\n print(\"У вас одинаковое количество очков - {}\".format(k))\n elif k > 21 and cardcomp > 21:\n print(\"Оба участника набрали больше 21 очка. Участник - {}, компьютер - {}\".format(k, cardcomp))\n elif k <= 21 and 21 < cardcomp:\n print(\"Вы выиграли! У оппонента больше 21 очка. У вас - {}, у компьютера - {}\".format(k, cardcomp))\n elif k > 21 and 21 >= cardcomp:\n print(\"Вы проиграли! У Вас больше 21 очка. У вас - {}, у компьютера - {}\".format(k, cardcomp))\n elif k == cardcomp:\n print(\"Ничья! У вас одинаковое количество очков - {}\".format(k))\n\n except:\n if k < 21 and players == 1:\n print(\"Вы набрали меньше 21 очка - {}\".format(k))\n elif k == 21 and players == 1:\n print(\"Вы выиграли набрав 21 очко!\")\n elif k > 21 and players == 1:\n print(\"Вы проиграли, набрав больше 21 очка - {}\".format(k))\n\n\nif players == 1:\n k = scoreofplayer()\n condition(k)\n while True:\n answer = input(\"Еще разок? Yes / No \")\n if answer == 'Yes':\n k = scoreofplayer()\n condition(k)\n else:\n print(\"Конец игры\")\n break\n\nelif players == 2:\n risklevel = input(\"Введите уровень риска соперника: Easy, Medium or Hard: \")\n k = scoreofplayer()\n condition(k, risklevel)\n while True:\n answer = input(\"Еще разок? Yes / No \")\n if answer == 'Yes':\n question = input(\"Сменить уровень риска?: Yes / No\")\n if question == \"Yes\":\n risklevel = input(\"Введите уровень риска соперника: Easy, Medium or Hard: \")\n k = scoreofplayer()\n condition(k, risklevel)\n else:\n k = scoreofplayer()\n condition(k, risklevel)\n else:\n print(\"Конец игры\")\n break\n\n\n\n\n","sub_path":"last.py","file_name":"last.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"267418088","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"/tmp/data\", one_hot=True)\n\n# hyperparameter festlegen\n# Number of neurons per Layer\nn_nodes_h1 = 500\nn_nodes_h2 = 500\nn_nodes_h3 = 500\n\n# Unterschiedliche Klassen, Zahlen 0 - 9\nn_classes = 10\n\n# Wie viele Bilder auf einmal gelesen werden können\nbatch_size = 100\n\n# 28 x 28 pixel pro Bild\nx = tf.placeholder('float', [None, 784])\ny = tf.placeholder('float', [None, n_classes])\n\n\ndef neuralNetworkModel(data):\n\n # per Zufallszahlen trainieren\n hiddenLayer_1 = {'weights': tf.Variable(tf.random_normal([784, n_nodes_h1])),\n 'biases': tf.Variable(tf.random_normal([n_nodes_h1]))}\n\n hiddenLayer_2 = {'weights': tf.Variable(tf.random_normal([n_nodes_h1, n_nodes_h2])),\n 'biases': tf.Variable(tf.random_normal([n_nodes_h2]))}\n\n hiddenLayer_3 = {'weights': tf.Variable(tf.random_normal([n_nodes_h2, n_nodes_h3])),\n 'biases': tf.Variable(tf.random_normal([n_nodes_h3]))}\n\n outputLayer = {'weights': tf.Variable(tf.random_normal([n_nodes_h3, n_classes])),\n 'biases': tf.Variable(tf.random_normal([n_classes]))}\n\n # Matrix multiplication\n l1 = tf.add(tf.matmul(data, hiddenLayer_1['weights']), hiddenLayer_1['biases'])\n l1 = tf.nn.relu(l1)\n l2 = tf.add(tf.matmul(l1, hiddenLayer_2['weights']), hiddenLayer_2['biases'])\n l2 = tf.nn.relu(l2)\n l3 = tf.add(tf.matmul(l2, hiddenLayer_3['weights']), hiddenLayer_3['biases'])\n l3 = tf.nn.relu(l3)\n\n output = tf.matmul(l3, outputLayer['weights']) + outputLayer['biases']\n\n return output\n\n\n\ndef trainNeuralNetwork(x):\n prediction = neuralNetworkModel(x)\n # Cost minimieren (Fehlerrate)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))\n #\n optimizer = tf.train.AdamOptimizer().minimize(cost)\n\n # Epochs\n epochs = 10\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for ep in range(epochs):\n epochs_loss = 0\n\n for _ in range(int(mnist.train.num_examples/batch_size)):\n epochs_x, epochs_y = mnist.train.next_batch(batch_size)\n _,c = sess.run([optimizer], feed_dict={x: epochs_x, y: epochs_y})\n epochs_loss+=c\n\n print('Epochs', ep, 'compleded out of ', epochs, 'loss', epochs_loss)\n\n correct = tf.equal(tf.arg_max(prediction, 1), tf.arg_max(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n\n print('Accuracy', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))\n\n\ntrainNeuralNetwork(x)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PyCharm/neuralNet.py","file_name":"neuralNet.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"209460597","text":"from nltk.stem.wordnet import WordNetLemmatizer\r\nimport requests\r\nimport json\r\nimport nltk\r\nfrom nltk.tag.stanford import CoreNLPPOSTagger\r\ntry:\r\n from .Questions import Questions\r\nexcept:\r\n from Questions import Questions\r\n\r\n# POS tagging prerequisites\r\nclass Yes(Questions):\r\n\tdef modify(self):\r\n\t\tline = self.text\r\n\t\ttext = CoreNLPPOSTagger(url='http://localhost:9000').tag(line.split())\r\n\r\n\t\tprint(text)\r\n\r\n\t\t#yes-questions\r\n\t\tc = 0\r\n\t\ts = ['null']\r\n\t\tq = ''\r\n\t\tfor tagg in text:\r\n\t\t\ts.append(tagg[0])\r\n\t\t\tif(c == 0 and tagg[1] != \"NNP\"):\r\n\t\t\t\ts[1] = s[1].lower()\r\n\r\n\t\t\t#line = 'She ate the fruits.'\r\n\t\t\tif tagg[1] == \"VBD\" and text[c][0] != 'had':\r\n\t\t\t\ts[0] = \"Did\"\r\n\t\t\t\ts[c+1] = WordNetLemmatizer().lemmatize(tagg[0], 'v')\r\n\r\n\t\t\t#line = 'We eat the fruits.'\r\n\t\t\tif tagg[1] == \"VBP\" and text[c][0] != 'is' and text[c][0] != 'are' and text[c][0] != 'have':\r\n\t\t\t\ts[0] = \"Do\"\r\n\r\n\t\t\t#line = 'She eats the fruits.'\r\n\t\t\tif tagg[1] == \"VBZ\" and text[c+1][1] != 'VBN' and text[c+1][1] != 'VBG':\r\n\t\t\t\ts[0] = \"Does\"\r\n\t\t\t\ts[c+1] = WordNetLemmatizer().lemmatize(tagg[0], 'v')\r\n\t\t\t#line = 'She has eaten the fruits.'\r\n\t\t\tif tagg[1] == \"VBZ\" and text[c+1][1] == 'VBN' and text[c+2][1] != \"VBG\":\r\n\t\t\t\ts[0] = tagg[0].capitalize()\r\n\t\t\t\ts.pop(c+1)\r\n\t\t\t\t#s[c+1] = tagg[0]\r\n\t\t\t#line = 'She is eating the fruits.'\r\n\t\t\t#line = 'She was eating the fruits.'\r\n\t\t\t#line = 'She is going to eat the fruits.'\r\n\t\t\tif tagg[1] == \"VBG\" and text[c-1][1] != 'VB' and text[c-1][1] != 'VBN':\r\n\t\t\t\ts[0] = text[c-1][0].capitalize()\r\n\t\t\t\ts.pop(c)\r\n\t\t\t\ts[c] = tagg[0]\r\n\t\t\t#line = 'She will be eating the fruits.'\r\n\t\t\tif tagg[1] == \"VBG\" and text[c-1][1] == 'VB':\r\n\t\t\t\ts[0] = text[c-2][0].capitalize()\r\n\r\n\t\t\t#line = 'She has been eating the fruits.'\r\n\t\t\t#line = 'She had been eating the fruits.'\r\n\t\t\tif (tagg[1] == \"VBZ\" or tagg[1] == \"VBD\") and text[c+1][1] == 'VBN' and text[c+2][1] == 'VBG':\r\n\t\t\t\ts[0] = text[c][0].capitalize()\r\n\t\t\t\ts.pop(c+1)\r\n\r\n\t\t\t#line = 'She had eaten the fruits.'\r\n\t\t\t#line = 'We have eaten the fruits.'\r\n\t\t\tif tagg[1] == \"VBN\" and tagg[0] != 'been' and (text[c-1][0] == 'had' or text[c-1][0] == 'have') and text[c-2][1] != 'MD':\r\n\t\t\t\ts[0] = text[c-1][0].capitalize()\r\n\t\t\t\ts.pop(c)\r\n\t\t\t\ts[c] = tagg[0]\r\n\r\n\t\t\t#line = 'She will have eaten the fruits.'\r\n\t\t\t#line = 'She will eat the fruits.'\r\n\t\t\t#line = 'She would have eaten the fruits.'\r\n\t\t\tif tagg[1] == \"MD\" and text[c+1][1] == 'VB':\r\n\t\t\t\ts[0] = tagg[0].capitalize()\r\n\t\t\t\ts.pop(c+1)\r\n\r\n\t\t\tc = c + 1\r\n\r\n\t\t#No-questions\r\n\r\n\r\n\t\tfor i in range(len(s)-1):\r\n\t\t\tq = q + ' ' + s[i]\r\n\t\tq = q + ' ?'\r\n\t\tprint(q) \r\n\t\treturn q\r\n\r\n\r\nif __name__ == \"__main__\":\r\n q = Yes(input())\r\n print(q.get_text())\r\n","sub_path":"nlpapp/modules/questions/yes_qts.py","file_name":"yes_qts.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"422962843","text":"import io\n\nfrom setuptools import find_packages, setup\n\nwith io.open('README.md', 'rt', encoding=\"utf8\") as f:\n readme = f.read()\n\nsetup(\n\tname='projectalice-sk',\n\tauthor='ProjectAlice',\n\tversion='1.0.7',\n\tmaintainer='Psychokiller1888',\n\tmaintainer_email='laurentchervet@bluewin.ch',\n\tdescription='Project Alice skill kit',\n\tlong_description=readme,\n\tlong_description_content_type='text/markdown',\n\turl='https://github.com/project-alice-powered-by-snips/ProjectAliceSkillKit',\n\tlicense='GPL-3.0',\n\tpackages=find_packages(),\n\tinclude_package_data=True,\n\tuse_scm_version=False,\n\tsetup_requires=['setuptools_scm'],\n\tinstall_requires=[\n\t\t'jsonschema>=3.0.0',\n\t\t'click',\n\t\t'unidecode',\n\t\t'requests',\n\t\t'PyInquirer',\n\t\t'prompt_toolkit==1.0.14',\n\t\t'jinja2'\n\t],\n\tclassifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points='''\n [console_scripts]\n projectalice-sk=ProjectAliceSK.ProjectAliceSkillKit:cli\n '''\n)\n","sub_path":"pypi_install_script/projectalice-sk-1.0.7.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"284831125","text":"#!/usr/bin/env python3\nimport base64\nimport csv\nimport datetime\nimport fileinput\nimport numpy\nimport os\nimport random\nimport re\nimport socket\nimport socketserver\nimport subprocess\nimport sys\nimport threading\nfrom time import gmtime, strftime, localtime\n\n'''\n Summary:\n - client/server/decrypter/scripts in c, python, bash.\n - TCP/IP server prints connections in console, writes log, writes encrypted bytes from client to a file.\n - Decrypter just uses aes256 key and iv to decrypt bytes in hash file.\n - Client encrypts w/ aes256/openssl and sends bytes to server, can change to more practical encryption for lightweight client/remove dependecy on libraries.\n - bash files & executable - crain, srain, decrypter.exe\n - build: gcc -lssl -lcrypto -o \n\n TODO:\n - Obfuscate code and signatures.\n - Replace library crypto dependencies with in-program functionality, reduce dependencies in general.\n - Scrub for bugs, useless code, memory use.\n - Use of tor w/ client? Server .onion + proxies.\n - Integrate other tools/binaries?\n - Take time to build out the functions in PyQt5 GUI\n\n SERVER - rain.py\n - serve_forever adds while loop preventing main program from completing fully, needs a clean exit to finish logs.\n - Server generates clients and encryption keys, tracks generated, active clients.\n - Server writes console and log.\n - Server writes encrypted data to file.\n - Server decrypts and prints data. -> Use of decrypter.c\n - Server sends commands to clients. Add rshell, sysinfo pullback options.\n - Limit connections, use authentication\n\n CLIENT - client5.c\n - Server generates clients with unique id and public key, maintains key associations. tbd on openssl library dependance, key type, size etc.\n - Client public key to auth w/ server, key to encrypt data into file, send logs to server to decrypt.\n - Server able to track clients, send cmds.\n\n DECRYPTER - decrypter.c\n - Decrypts \"crypto\" file on server, update to work with upper limit of bytes.\n\n GUI - main4.py\n - Log view in pyqt5.\n\n GUI - seccons.py\n - Log view in pygame.\n'''\n\nclass console:\n\n def __init__(self):\n self.deflog = []\n self.logfile = \"\"\n\n def saveit(self, a=''):\n self.deflog.append(a)\n\n def logit(self):\n self.logfile = \"{}-{}\".format(sys.argv[0], strftime(\"%Y%m%d\", localtime()))\n with open(self.logfile, 'a+') as x:\n for y in self.deflog:\n x.write(\"[{}] {}: {}\\n\".format(strftime(\"%H:%M:%S\", localtime()), __name__, y))\n self.saveit(\"[Logged to file --> {}]\".format(self.logfile))\n\n def printit(self):\n for x in self.deflog:\n print(\"[+] [{}]: // {:<80}\\n\".format(strftime(\"%H:%M:%S\", localtime()),x))\n\nclass crypter:\n\n def __init__(self):\n self.crypted_strings = []\n self.crypted_logfile = \"\"\n self.decrypted_strings = []\n self.decrypted_logfile = \"\"\n\n def crypto_saveit(self, a=''):\n self.crypted_strings.append(a)\n \n def crypto_logit(self):\n self.crypted_logfile = 'crypto'\n with open(self.crypted_logfile, 'wb') as x:\n for y in self.crypted_strings:\n '''y.encode('utf-8')'''\n base64.b64encode(y)\n x.write(y)\n\n def crypto_printit(self):\n for x in self.crypted_strings:\n print(\"[+] [{}]: // {:<80}\\n\".format(strftime(\"%H:%M:%S\", localtime()),str(x)))\n\nclass MyTCPHandler(socketserver.BaseRequestHandler):\n \"\"\"\n The RequestHandler class for our server.\n\n It is instantiated once per connection to the server, and must\n override the handle() method to implement communication to the\n client.\n \"\"\"\n\n def handle(self):\n # self.request is the TCP socket connected to the client\n handlog = console()\n cryptlog = crypter()\n self.data = self.request.recv(1024).strip()\n handlog.saveit(\"[{}]: <{}>\".format(self.client_address[0], self.data))\n handlog.logit()\n handlog.printit()\n cryptlog.crypto_saveit(self.data)\n cryptlog.crypto_logit()\n cryptlog.crypto_printit()\n # just send back the same data, but upper-cased\n self.request.sendall(self.data.upper())\n\nclass secutils:\n\n def __init__(self):\n pass\n\n def threads(console):\n #con3 = console()\n x = console\n thread1 = threading.Thread(group=None, target=None, name=None, args=(), kwargs={}, daemon=None)\n thread2 = threading.Thread(group=None, target=None, name=None, args=(), kwargs={}, daemon=None)\n thread1.start()\n thread2.start()\n x.saveit(\"threads_active: {0} - {1}\".format(threading.active_count(), threading.get_ident()))\n x.saveit(\"Thread1: {0} Thread2: {1}\".format(thread1.is_alive(), thread2.is_alive()))\n x.saveit(thread1.is_alive())\n x.saveit(thread2.is_alive())\n\n def iptrack(x=\"all_argc_files\", y=0):\n con2 = console()\n ipv4 = ''\n ipv6 = ''\n iptype = ''\n ipstring = ''\n if y == 4:\n ipv4=re.compile(\"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\", re.MULTILINE)\n iptype = \"IPv4\"\n ipstring = ipv4\n if y == 6:\n ipv6=re.compile(\"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0 -9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA -F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1 }[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\", re.MULTILINE)\n iptype = \"IPv6\"\n ipstring = ipv6\n fn2 = x.filename()\n results = []\n results1 = []\n results2 = []\n d = 0\n atr = 0\n ctr = 0\n con2.saveit(\"{} {} {}\".format(x, sys.argv, y))\n for line in x:\n fn1 = x.filename()\n ipv4_list = re.search(ipstring, line)\n ipv4_list2 = re.findall(ipstring, line)\n if ipv4_list2:\n results2.append(ipv4_list2)\n if ipv4_list:\n results.append(ipv4_list.group(0))\n d = {x3:results.count(x3) for x3 in results}\n if fn1 != fn2:\n d.update({fn1:fn1})\n del results[1:]\n fn2 = fn1\n try:\n con2.saveit(\"-----{}-----\".format(iptype))\n for ipv4_list2 in sorted(d.keys()):\n if d[ipv4_list2] > 1:\n con2.saveit(\"({1: <3}) {0: <24} {2: <64}\".format(ipv4_list2, d[ipv4_list2], subprocess.getoutput(\"dig +short -x {0}\".format(ipv4_list2))))\n '''print(\"({1: <3}) {0: <24} {2: <64}\".format(ipv4_list2, d[ipv4_list2], \"{}\".format(out)))'''\n ctr+=1\n elif d[ipv4_list2] == 1:\n con2.saveit(\"({1: <3}) {0: <24} {2: <64}\".format(ipv4_list2, d[ipv4_list2], subprocess.getoutput(\"dig +short -x {0}\".format(ipv4_list2))))\n con2.saveit(\"TOTAL FOUND: {0}\".format(len(results2)))\n con2.printit()\n except:\n con2.saveit(\"No Address Found.\")\n con2.printit()\n\ndef main():\n\n con1 = console()\n # print(b\"\\uD83D\\uDE02\".decode(\"utf-16\"))\n con1.saveit(b\"\\uD83D\\uDE02\".decode(\"utf-16\"))\n # main vars\n serverip = 0\n serverport = 0\n clientip = 0\n clientport = 0\n menuitems = 999\n mess=\"message\"\n mess2='loggg'\n\n # catch args // argparse module seems bad\n print(\"{:+^40}\\nSERVER: ./rain.py -s \\nCLIENT: ./rain.py -c \\nIPv4/6: ./rain.py -f \\nARGS: {}\\n\".format(sys.argv[0], ' '.join(sys.argv[1:])))\n # con1.saveit(\"{:-^80}\".format(sys.argv[0]))\n subprocess.call([\"date\"])\n if len(sys.argv) >= 2:\n try:\n if sys.argv[1] == '-s':\n serverip = str(sys.argv[2])\n serverport = int(sys.argv[3])\n server = socketserver.TCPServer((serverip, serverport), MyTCPHandler)\n con1.saveit(\"Server started >>> {}:{}\".format(serverip, serverport))\n con1.printit()\n con1.logit()\n server.serve_forever()\n elif sys.argv[1] == '-c':\n clientip = str(sys.argv[2])\n clientport = int(sys.argv[3])\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n con1.saveit(\"Connected: <{} {}>\".format(clientip, clientport))\n clientsocket.connect((clientip, clientport))\n x = str(input())\n con1.saveit(clientsocket.send(bytes(x, 'UTF-8')))\n received = str(clientsocket.recv(1024), \"utf-8\")\n con1.saveit(received)\n clientsocket.close()\n con1.saveit(\"client connection closed.\")\n con1.logit()\n # Needs fixed, fileinput hangs in function, readline, EOF\n '''elif sys.argv[1] == '-f':\n sys.argv = sys.argv[2:]\n secutils.iptrack(fileinput.input(), 4)'''\n except:\n raise\n con1.saveit(\"Usage:\\nprogram [-s, --server] \\nprogram [-c --client] \\n\\nException: {}\\n\".format(sys.exc_info()[0]))\n\n # all main\n '''while menuitems != 0:\n print(\"[ MENU: {} ]\\n1: opt 1\\n2: opt 2\\n3: opt 3\\n4: opt 4\\n5: opt 5\\n0: quit()\\n\".format (sys.argv[0]))\n menuitems = int(input())\n if menuitems == 1:\n pass\n elif menuitems == 2:\n pass\n elif menuitems == 3:\n pass\n elif menuitems == 4:\n pass\n elif menuitems == 5:\n pass\n elif menuitems == 0:\n sys.exit()\n break\n else:\n print(\"bye\")'''\n\n # fileinput iterates over lines from multiple input streams ie sys.argv[1:] defaulting to sys.stin if the list is empty\n '''\n secutils(fileinput.input(), 4)\n secutils(fileinput.input(), 6)\n '''\n\n # test junk\n # secutils.threads(con1)\n # con1.saveit(\"test log\")\n con1.logit()\n con1.printit()\n # secutils.threads()\n\nif __name__ == '__main__':\n main()\n","sub_path":"rain.py","file_name":"rain.py","file_ext":"py","file_size_in_byte":10650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"499913085","text":"import urllib3\nimport pvwa_integration\nimport aws_services\nimport instance_processing\nimport pvwa_api_calls\nimport json\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\ndef lambda_handler(event, context):\n\n try:\n message = event[\"Records\"][0][\"Sns\"][\"Message\"]\n data = json.loads(message)\n except Exception as e:\n print (\"Error on retrieving Message Data from Event Message. Error: {0}\".format(e))\n\n try:\n instance_arn = data[\"resources\"][0]\n except Exception as e:\n print (\"Error on retrieving instance_arn from Event Message. Error: {0}\".format(e))\n\n try:\n instanceId = data[\"detail\"][\"instance-id\"]\n except Exception as e:\n print (\"Error on retrieving Instance Id from Event Message. Error: {0}\".format(e))\n\n try:\n actionType = data[\"detail\"][\"state\"]\n except Exception as e:\n print (\"Error on retrieving Action Type from Event Message. Error: {0}\".format(e))\n\n\n try:\n eventAccountId = data[\"account\"]\n except Exception as e:\n print (\"Error on retrieving Event Account Id from Event Message. Error: {0}\".format(e))\n\n\n try:\n eventRegion = data[\"region\"]\n except Exception as e:\n print (\"Error on retrieving Event Region from Event Message. Error: {0}\".format(e))\n\n\n logName = context.log_stream_name if context.log_stream_name else \"None\"\n\n try:\n solutionAccountId = context.invoked_function_arn.split(':')[4]\n instanceDetails = aws_services.get_ec2_details(instanceId, solutionAccountId, eventRegion, eventAccountId)\n\n instanceData = aws_services.get_instance_data_from_dynamo_table(instanceId)\n if actionType == 'terminated':\n if not instanceData:\n print('Item {0} does not exists on DB'.format(instanceId))\n return None\n else:\n instanceStatus = instanceData[\"Status\"][\"S\"]\n if instanceStatus == OnBoardStatus.OnBoarded_Failed:\n print(\"Item {0} is in status OnBoard failed, removing from DynamoDB table\".format(instanceId))\n aws_services.remove_instance_from_dynamo_table(instanceId)\n return None\n elif actionType == 'running':\n if not instanceDetails[\"address\"]: # In case querying AWS return empty address\n print(\"Retrieving Instance address from AWS failed.\")\n return None\n if instanceData:\n instanceStatus = instanceData[\"Status\"][\"S\"]\n if instanceStatus == OnBoardStatus.OnBoarded:\n print('Item: {0}, exists on DB, no need to add it to vault'.format(instanceId))\n return None\n elif instanceStatus == OnBoardStatus.OnBoarded_Failed:\n print(\"Item {0} exists with status 'OnBoard failed', adding to vault\".format(instanceId))\n else:\n print('Item {0} does not exists on DB, adding to vault'.format(instanceId))\n else:\n print('Unknown instance state')\n return\n\n storeParametersClass = aws_services.get_params_from_param_store()\n if not storeParametersClass:\n return\n pvwaConnectionnumber, sessionGuid = aws_services.get_available_session_from_dynamo()\n if not pvwaConnectionnumber:\n return\n sessionToken = pvwa_integration.logon_pvwa(storeParametersClass.vaultUsername,\n storeParametersClass.vaultPassword,\n storeParametersClass.pvwaURL, pvwaConnectionnumber)\n\n if not sessionToken:\n return\n disconnect = False\n if actionType == 'terminated':\n instance_processing.delete_instance(instanceId, sessionToken, storeParametersClass, instanceData, instanceDetails)\n elif actionType == 'running':\n # get key pair\n\n # Retrieving the account id of the account where the instance keyPair is stored\n # AWS...\n keyPairValueOnSafe = \"AWS.{0}.{1}.{2}\".format(instanceDetails[\"aws_account_id\"], eventRegion,\n instanceDetails[\"key_name\"])\n keyPairAccountId = pvwa_api_calls.check_if_kp_exists(sessionToken, keyPairValueOnSafe,\n storeParametersClass.keyPairSafeName,\n instanceId,\n storeParametersClass.pvwaURL)\n if not keyPairAccountId:\n print(\"Key Pair '{0}' does not exist in safe '{1}'\".format(keyPairValueOnSafe,\n storeParametersClass.keyPairSafeName))\n return\n instanceAccountPassword = pvwa_api_calls.get_account_value(sessionToken, keyPairAccountId, instanceId,\n storeParametersClass.pvwaURL)\n if instanceAccountPassword is False:\n return\n pvwa_integration.logoff_pvwa(storeParametersClass.pvwaURL, sessionToken)\n aws_services.release_session_on_dynamo(pvwaConnectionnumber, sessionGuid)\n disconnect = True\n instance_processing.create_instance(instanceId, instanceDetails, storeParametersClass, logName, solutionAccountId, eventRegion, eventAccountId, instanceAccountPassword)\n else:\n print('Unknown instance state')\n return\n\n\n if not disconnect:\n pvwa_integration.logoff_pvwa(storeParametersClass.pvwaURL, sessionToken)\n aws_services.release_session_on_dynamo(pvwaConnectionnumber, sessionGuid)\n\n\n except Exception as e:\n print(\"Unknown error occurred:{0}\".format(e))\n if actionType == 'terminated':\n # put_instance_to_dynamo_table(instanceId, instanceDetails[\"address\"]\\\n # , OnBoardStatus.Delete_Failed, str(e), logName)\n aws_services.update_instances_table_status(instanceId, OnBoardStatus.Delete_Failed, str(e))\n elif actionType == 'running':\n aws_services.put_instance_to_dynamo_table(instanceId, instanceDetails[\"address\"], OnBoardStatus.OnBoarded_Failed, str(e),\n logName)\n # TODO: Retry mechanism?\n aws_services.release_session_on_dynamo(pvwaConnectionnumber, sessionGuid)\n return\n\n\nclass OnBoardStatus:\n OnBoarded = \"on boarded\"\n OnBoarded_Failed = \"on board failed\"\n Delete_Failed = \"delete failed\"\n","sub_path":"src/aws_ec2_auto_onboarding/AWSEc2AutoOnboarding.py","file_name":"AWSEc2AutoOnboarding.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"549138045","text":"from PredFlow.prediction_model import PredictionModel\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import ParameterGrid\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport lightgbm as lgb\nimport numpy as np\nimport cleaning_services as cs\nimport re\n\n\nclass LgbModel(PredictionModel):\n def _set_internal_state(self):\n self.params = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'multiclassova',\n 'num_boost_round': 10000,\n 'metric': {'multi_error'},\n 'learning_rate': 0.01,\n 'feature_fraction': 1,\n 'bagging_fraction': 0.8,\n 'max_depth': 7,\n 'bagging_freq': 5,\n 'verbose': 0\n }\n\n self.lb = LabelEncoder()\n\n def train(self):\n # Fit label encoder\n self.lb.fit(pd.concat([self.y_train_array, self.y_test_array], axis=0))\n labels_train = self.lb.transform(self.y_train_array)\n labels_test = self.lb.transform(self.y_test_array)\n\n # Set params\n self.params['num_class'] = len(self.lb.classes_)\n\n # Train model\n dtrain = lgb.Dataset(data=self.X_train_array, label=labels_train)\n self.bst = lgb.train(params=self.params, train_set=dtrain,\n verbose_eval=True)\n\n def _optimize(self):\n # Fit label encoder\n self.lb.fit(pd.concat([self.y_train_array, self.y_test_array], axis=0))\n labels_train = self.lb.transform(self.y_train_array)\n\n # Define parameters search grid\n gridParams = {\n 'task': ['train'],\n 'learning_rate': [0.01],\n 'boosting_type': ['gbdt'],\n 'metric': ['multi_error'],\n 'objective': ['multiclassova','multiclass','cross_entropy'],\n 'feature_fraction': [0.8, 0.9, 1],\n 'subsample': [0.8, 0.9, 1],\n 'max_depth': [3, 4, 7],\n 'min_data_in_leaf': [1, 2, 3, 5, 10],\n 'num_class': [len(self.lb.classes_)],\n }\n grid_params = ParameterGrid(gridParams)\n\n # Perform grid search\n dtrain = lgb.Dataset(data=self.X_train_array, label=labels_train)\n min_error = np.inf\n for params in grid_params:\n print()\n print(\"TESTED PARAMS : %s\" % params)\n cv_results = lgb.cv(params=params,\n train_set=dtrain,\n num_boost_round=10000,\n verbose_eval=True,\n nfold=2,\n stratified=True,\n early_stopping_rounds=30)\n\n params[\"num_boost_round\"] = len(list(cv_results.values())[0])\n error_key = [key for key in cv_results.keys() if re.match(r'.*-mean$', key)][0]\n current_error = cv_results[error_key][-1]\n print(\"CURRENT ERROR : %s\" % current_error)\n min_error = current_error if min_error is None else min_error\n if current_error < min_error:\n min_error = current_error\n print(\"ERROR DIMINUTION : %s\" % current_error)\n print(\"MIN ERROR : %s\" % min_error)\n self.params = params\n self.best_score = min_error\n print(\"BEST CURRENT PARAMETERS %s\" % self.params)\n print()\n self.save()\n\n def score(self):\n dtest = self.X_test_array\n pred = self.lb.inverse_transform(np.argsort(self.bst.predict(dtest), axis=1)[:, ::-1][:, 0]).tolist()\n truth = self.y_test_array.tolist()\n\n print(accuracy_score(pred, truth))\n\n return pd.DataFrame({\"pred\": pred, \"truth\": truth})\n\n def predict_from_feature_array(self, feature_array):\n return self.bst.predict(feature_array)\n\n def get_tag(self, text):\n cleaned_text = cs.f_clean_text(BeautifulSoup(text, \"lxml\").get_text())\n df_query = pd.DataFrame({\"text\": [cleaned_text]})\n pred_tag = [(self.lb.inverse_transform(i), pred) for i, pred in enumerate(self.predict(df_query)[0, :])]\n\n return sorted(pred_tag, key=lambda tup: tup[1])[::-1]\n\n\nlgb_model = LgbModel()\n","sub_path":"prediction_models/lgb_model.py","file_name":"lgb_model.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"451359185","text":"#!/usr/bin/env python3\n# Author: Emmanuel Odeke \n\nimport time\nimport multiprocessing\nGCHILD_RLOCK = multiprocessing.RLock()\n\ndef locker(func):\n def __anon(*args, **kwargs):\n # If already held, don't block/wait until lock is released\n # by current holder Just go to the else-clause and the\n # appropriate action will follow\n if GCHILD_RLOCK.acquire(False):\n print('\\033[47mAcquired lock', func, '\\033[00m')\n results = dict()\n try:\n results['data'] = func(*args, **kwargs)\n except Exception as ex:\n results['error'] = ex\n finally:\n # Release the lock\n print('\\033[46mReleasedlock', func, '\\033[00m')\n GCHILD_RLOCK.release()\n return results\n else:\n print('\\033[41mCould not acquire lock. Try again\\033[00m', func)\n return dict(\n needsRetry=True,\n error='Could not acquire lock. Try again later'\n )\n\n return __anon\n\ndef retryable(func, timeout=0.2):\n def __functor(*args, **kwargs):\n results = func(*args, **kwargs)\n if results and hasattr(results, 'get'):\n data = results.get('data', None)\n if data:\n print('Successful response from ', func, data)\n return data\n elif results.get('needsRetry', False):\n print('\\033[33mRetrying after', timeout, ' secs\\033[00m')\n time.sleep(timeout)\n return __functor(*args, **kwargs)\n else:\n msg = \"Couldn't retry as 'get' method undefined for data\"\n return dict(results=results, msg=msg)\n\n return __functor\n","sub_path":"connectable/src/mpUtils/LockUtil.py","file_name":"LockUtil.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"330053095","text":"\"\"\"\nfunctions and classes for resolving digital object identifiers (DOIs).\n\"\"\"\nimport re\nfrom .common import *\nfrom .datacite import DataciteDOIInfo\nfrom .crossref import CrossrefDOIInfo\nfrom .crosscite import CrossciteDOIInfo\nfrom . import common as _comm\n\n_dc_resolver_re = re.compile(r'^https?://[^/]+\\.datacite\\.org/')\n_cr_resolver_re = re.compile(r'^https?://[^/]+\\.crossref\\.org/')\n_cc_resolver_re = re.compile(r'^https?://data\\.crosscite\\.org/')\n\nclass Resolver(object):\n \"\"\"\n a class for resolving DOIs. An instance encapsulates a resolver base \n URL and client/applicaiton identity information. \n \"\"\"\n\n def __init__(self, client_info=None, resolver=None, logger=None):\n \"\"\"\n instantiate the resolver\n\n :param 4-tuple client_info: the client/application information \n \"\"\"\n if not client_info and _comm._client_info:\n client_info = tuple(_comm._client_info)\n if client_info is not None and \\\n (not isinstance(client_info, (list, tuple)) or len(client_info) != 4):\n raise TypeError(\"client_info: Not a 4-tuple: \"+str(client_info))\n self._client_info = client_info\n\n if not resolver:\n resolver = default_doi_resolver\n self._resolver = resolver\n\n self._log = logger\n\n def resolve(self, doi):\n \"\"\"\n resolve a DOI to its metadata. This is expected to make one or more \n calls to a web service.\n\n :param str doi: the DOI to resolve. This can be given in any of its \n legal forms including with the \"doi:\" prefix, in URL\n format, or without any prefix.\n \"\"\"\n doi = _comm.strip_DOI(doi, self._resolver)\n url = self._resolver + doi\n\n hdrs = {\"Accept\": CT.Citeproc_JSON}\n ua = get_default_user_agent()\n if ua:\n hdrs['User-Agent'] = ua\n\n # Do a HEAD request on the DOI to examine where it gets forwarded to\n try:\n resp = requests.head(url, headers=hdrs, allow_redirects=False)\n except (requests.ConnectionError,\n requests.HTTPError,\n requests.ConnectTimeout) as ex:\n raise DOICommunicationError(doi, self._resolver, ex)\n except requests.RequestException as ex:\n raise DOIResolverError(doi, self._resolver, cause=ex)\n \n if resp.status_code < 200 or resp.status_code >= 400:\n if resp.status_code == 406:\n raise DOIUnsupportedContentType(CT.Citeproc_JSON, doi,\n self._resolver)\n\n if resp.status_code == 404:\n raise DOIDoesNotExist(doi, self._resolver)\n\n raise DOIResolverError(doi, self._resolver,\n resp.status_code, resp.reason)\n\n # Use the redirect Location URL to determine the source of DOI it is\n loc = resp.headers.get('Location', '')\n info = None\n if resp.status_code < 300:\n # resolver was expected to redirect; instead it responded as if its\n # the source; treat as unknown\n info = DOIInfo(doi, resolver=self._resolver, logger=self._log)\n\n elif _cc_resolver_re.match(loc):\n info = CrossciteDOIInfo(doi, resolver=self._resolver, logger=self._log,\n client_info=self._client_info)\n elif _dc_resolver_re.match(loc):\n info = DataciteDOIInfo(doi, resolver=self._resolver,logger=self._log,\n client_info=self._client_info)\n elif _cr_resolver_re.match(loc):\n info = CrossrefDOIInfo(doi, resolver=self._resolver,logger=self._log,\n client_info=self._client_info)\n else:\n info = DOIInfo(doi, resolver=self._resolver, logger=self._log)\n\n # pre-load the data\n info.data\n\n return info\n\n\ndef resolve(doi, resolver=None, logger=None):\n \"\"\"\n resolve a DOI to its metadata. This is expected to make one or more \n calls to a web service.\n\n :param str doi: the DOI to resolve. This can be given in any of its \n legal forms including with the \"doi:\" prefix, in URL\n format, or without any prefix.\n :param str resolver: the base URL to use as the resolver service. If \n not given, \"https://doi.org/\" is used.\n :param Logger logger: a Logger instance to send debug messages to. \n Generally, the URLs used to retrieve metadata are \n recorded at the debug level.\n :return DOIInfo: a DOI metadata container instance, usually a subclass \n of DOIInfo, specialized for the type of DOI provided\n (e.g. Datacite, Crossref). \n \"\"\"\n return Resolver(resolver=resolver, logger=logger).resolve(doi)\n\n","sub_path":"python/nistoar/doi/resolving/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"126150537","text":"import re\nimport csv\nimport math\n\ndata_rows = []\ncat_data_rows_list = []\nwords_set = set()\ncategory_num = 10.0\nfields = []\n\n\n# read the excel file into rows\ndef read_file(file_name):\n with open(file_name, 'rbU') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n global fields\n fields = next(reader)\n for row in reader:\n data_rows.append(row)\n\n\n# check if the value of a cell is number\ndef data_is_number(data, col_idx):\n if type(analyze(data)) is str:\n return False\n if re.search(\"[Yy][Ee][Aa][Rr]\", fields[col_idx]):\n return False\n if re.search(\"[Zz][Ii][Pp][Cc][Oo][Dd][Ee]\", fields[col_idx]):\n return False\n return True\n\n\n# Analyze the string and return the estimate type and value\ndef analyze(string):\n # Percentage\n if string.endswith('%'):\n num_str = string.rstrip('%').replace(',','')\n return float(num_str) / 100\n # Number\n if string.replace(',', '').replace('.', '', 1).isdigit():\n num_str = string.replace(',', '')\n if num_str.rfind('.') < 0:\n return int(num_str)\n else:\n return float(num_str)\n # Default\n return string\n\n\n# Since a lot cell contains float number, we need to categorize them and make the numbers more meaningful\ndef categorize():\n if len(data_rows) > 0:\n mins = []\n maxs = []\n index = 0\n for col in data_rows[0]:\n if col != ' n/a ' and data_is_number(col, index):\n mins.append(analyze(col))\n maxs.append(analyze(col))\n else:\n mins.append(0)\n maxs.append(0)\n index += 1\n for row in data_rows:\n index = 0\n for col in row:\n if col != ' n/a ' and data_is_number(col, index):\n if analyze(col) < mins[index]:\n mins[index] = analyze(col)\n if analyze(col) > maxs[index]:\n maxs[index] = analyze(col)\n index += 1\n for row in data_rows:\n index = 0\n cat_data_row = []\n for word in row:\n if word == ' n/a ':\n cat_data_row.append(word)\n else:\n if data_is_number(word, index):\n col_min = mins[index]\n col_max = maxs[index]\n level = math.floor((analyze(word)-col_min)/((col_max - col_min)/category_num))\n range = str((col_max - col_min)/category_num*level) + \"-\" + str((col_max - col_min)/category_num*(level+1))\n cat_data_row.append(fields[index] + '_' + range)\n else:\n cat_data_row.append(word)\n index += 1\n cat_data_row_set = set(cat_data_row)\n cat_data_rows_list.append(cat_data_row_set)\n\n\n# compute the word collection\ndef compute_word_set():\n for row in cat_data_rows_list:\n for word in row:\n if word not in words_set:\n words_set.add(word)\n\n\n# compute the number of times when the subset appears in the rows\ndef compute_frequency(subset):\n count = 0\n for row in cat_data_rows_list:\n if subset.issubset(row):\n count += 1\n return count\n\n\n# start the apriori algorithm\ndef apriori(min_supp, min_conf):\n high_freq_set_list = compute_high_freq_set_list(min_supp)\n high_conf_set_list = compute_high_conf_ass_list(high_freq_set_list, min_conf)\n return high_conf_set_list\n\n\n# compute the frequent item_set list has larger support than min_support\n# each element in the list is a list of length 2, list[0] is the set itself, list[1] is the support\ndef compute_high_freq_set_list(min_supp):\n min_row_num = len(data_rows)*min_supp\n high_freq_set_list = []\n item_set = []\n high_freq_set_list_k = []\n for word in words_set:\n num = compute_frequency(set([word]))\n if num >= min_row_num:\n item_set.append(word)\n high_freq_set_list_k.append([set([word]), float(num)/len(data_rows)])\n while len(high_freq_set_list_k) > 0:\n high_freq_set_list.extend(high_freq_set_list_k)\n high_freq_set_list_k_plus_1 = []\n high_freq_set_set_k_plus_1 = ([])\n for set_list in high_freq_set_list_k:\n for item in item_set:\n if item not in set_list[0]:\n new_s = set(set_list[0])\n new_s.add(item)\n count = compute_frequency(new_s)\n if count >= min_row_num and new_s not in high_freq_set_set_k_plus_1:\n high_freq_set_list_k_plus_1.append([new_s, float(count)/len(data_rows)])\n high_freq_set_set_k_plus_1.append(new_s)\n high_freq_set_list_k = high_freq_set_list_k_plus_1\n return high_freq_set_list\n\n\n# compute the association rules that has higher confidence than min_confidence\ndef compute_high_conf_ass_list(high_freq_set_list, min_conf):\n high_conf_ass_list = []\n for item_set in high_freq_set_list:\n association_list = compute_association_list(item_set)\n for association in association_list:\n conf = compute_conf(association)\n if conf >= min_conf:\n association[2] = conf\n association[3] = item_set[1]\n high_conf_ass_list.append(association)\n return high_conf_ass_list\n\n\n# each association is a list of set of length 4, list[0] stands for the left set and list[1] is the right\n# list[2] is the confidence and list[3] is the support\ndef compute_association_list(item_set):\n association_list = []\n compute_association_list_helper(association_list, set(), item_set[0])\n return association_list\n\n\n# compute all the possible association rules of an item_set\ndef compute_association_list_helper(ass_list, left_set, right_set):\n if len(left_set) > 0 and len(right_set) > 0:\n ass_list.append([left_set, right_set, 0.0, 0.0])\n if len(right_set) > 0:\n for item in right_set:\n new_right_set = set(right_set)\n new_right_set.remove(item)\n new_left_set = set(left_set)\n new_left_set.add(item)\n compute_association_list_helper(ass_list, new_left_set, new_right_set)\n\n\n# compute the confidence of an association rule\ndef compute_conf(association):\n left_count = 0\n right_count = 0\n for row in cat_data_rows_list:\n if association[0].issubset(row):\n left_count += 1\n if association[1].issubset(row):\n right_count += 1\n return float(right_count)/float(left_count)\n","sub_path":"computation.py","file_name":"computation.py","file_ext":"py","file_size_in_byte":6691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"392910084","text":"import numpy as np\nimport csv\nimport sys\n\nfrom validate import validate\n\ndef import_data_and_weights(test_X_file_path, weights_file_path):\n test_X = np.genfromtxt(test_X_file_path, delimiter=',', dtype=np.float64, skip_header=1)\n weights = np.genfromtxt(weights_file_path, delimiter=',', dtype=np.float64)\n return test_X, weights\n\n\ndef predict_target_values(test_X, weights):\n new=np.ones((len(test_X),1))\n test_X=np.hstack((new,test_X))\n predicted_values=np.matmul(test_X,weights)\n return predicted_values\n\ndef write_to_csv_file(pred_Y, predicted_Y_file_name):\n pred_Y = pred_Y.reshape(len(pred_Y), 1)\n with open(predicted_Y_file_name, 'w+', newline='') as csv_file:\n wr = csv.writer(csv_file)\n wr.writerows(pred_Y)\n csv_file.close()\n\ndef predict(test_X_file_path):\n test_X, weights = import_data_and_weights(test_X_file_path, \"WEIGHTS_FILE.csv\")\n pred_Y = predict_target_values(test_X, weights)\n write_to_csv_file(pred_Y, \"predicted_test_Y_lr.csv\")\n\n\nif __name__ == \"__main__\":\n test_X_file_path =sys.argv[1];\n predict(test_X_file_path)\n validate(test_X_file_path, actual_test_Y_file_path=\"train_Y_lr.csv\") \n","sub_path":"linear_regression_using_normal_eqn/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420740722","text":"\nfrom nintendo.pia.packet import PIAMessage\nfrom nintendo.nex.nat import NATTraversalClient\nfrom nintendo.common import signal\nimport collections\nimport struct\nimport time\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass NATProbeData(collections.namedtuple(\"NatProbeData\", \"connection_id probe_type system_time\")):\n\tREQUEST = 0\n\tREPLY = 1\n\t\n\tfmt = \">IBxxxQ\"\n\t\n\t@classmethod\n\tdef deserialize(cls, data): return cls(*struct.unpack_from(cls.fmt, data))\n\tdef serialize(self): return struct.pack(self.fmt, *self)\n\t\n\t@staticmethod\n\tdef sizeof(): return 16\n\n\nclass NATTraversalProtocol:\n\n\tPROTOCOL_ID = 0x400\n\t\n\tPORT_PROBE_REQUEST = 1\n\tPORT_PROBE_REPLY = 2\n\tPORT_DUMMY = 3\n\t\n\ton_probe_request = signal.Signal()\n\ton_probe_reply = signal.Signal()\n\n\tdef __init__(self, session):\n\t\tself.session = session\n\t\tself.transport = session.transport\n\t\t\n\t\tself.handlers = {\n\t\t\tself.PORT_PROBE_REQUEST: self.handle_probe_request,\n\t\t\tself.PORT_PROBE_REPLY: self.handle_probe_reply\n\t\t}\n\t\t\n\tdef handle(self, station, message):\n\t\tself.handlers[message.protocol_port](station, message.payload)\n\t\t\n\tdef handle_probe_request(self, station, message):\n\t\tprobe = NATProbeData.deserialize(message)\n\t\tlogger.debug(\"Received probe request (%i, %i)\", probe.connection_id, probe.system_time)\n\t\tself.on_probe_request(station, probe)\n\t\t\n\tdef handle_probe_reply(self, station, message):\n\t\tprobe = NATProbeData.deserialize(message)\n\t\tlogger.debug(\"Received probe reply: (%i, %i)\", probe.connection_id, probe.system_time)\n\t\tself.on_probe_reply(station, probe)\n\t\t\n\tdef send_probe_request(self, station, count=1):\n\t\tlogger.debug(\"Sending NAT probe to %s\", station.address)\n\t\tself.send_probe(station, NATProbeData.REQUEST, self.PORT_PROBE_REQUEST, count)\n\t\t\n\tdef send_probe_reply(self, station, count=1):\n\t\tlogger.debug(\"Sending NAT probe reply to %s\", station.address)\n\t\tself.send_probe(station, NATProbeData.REPLY, self.PORT_PROBE_REPLY, count)\n\t\t\n\tdef send_probe(self, station, probe_type, protocol_port, count=1):\n\t\tfor i in range(count):\n\t\t\tprobe = NATProbeData(self.session.rvcid, probe_type, int(time.time()))\n\t\t\tmessage = PIAMessage()\n\t\t\tmessage.flags = 8\n\t\t\tmessage.protocol_id = self.PROTOCOL_ID\n\t\t\tmessage.protocol_port = protocol_port\n\t\t\tmessage.payload = probe.serialize()\n\t\t\tself.transport.send(station, message)\n\n\t\t\t\nclass NATTraversalMgr:\n\n\tnat_traversal_finished = signal.Signal()\n\n\tdef __init__(self, session):\n\t\tself.backend = session.backend\n\t\n\t\tself.protocol = session.nat_protocol\n\t\tself.protocol.on_probe_request.add(self.handle_probe_request)\n\t\tself.protocol.on_probe_reply.add(self.handle_probe_reply)\n\t\t\n\t\tserver = session.backend.nat_traversal_server\n\t\tserver.handler.initiate_probe.add(self.handle_initiate_probe)\n\t\tself.client = NATTraversalClient(session.backend)\n\t\t\n\t\tself.station_mgr = session.station_mgr\n\t\t\n\t\tself.past_traversals = {}\n\t\t\n\tdef init_station(self, url):\n\t\tstation = self.station_mgr.find_by_rvcid(url[\"RVCID\"])\n\t\tif station:\n\t\t\tstation.address = url.get_address()\n\t\telse:\n\t\t\tstation = self.station_mgr.create(url.get_address(), url[\"RVCID\"])\n\t\treturn station\n\t\n\tdef handle_probe_request(self, station, probe):\n\t\tself.protocol.send_probe_reply(station)\n\t\t\n\tdef handle_probe_reply(self, station, probe):\n\t\tlogger.info(\"NAT traversal finished: %s\", station.address)\n\t\tself.past_traversals[station.rvcid] = time.monotonic()\n\t\tself.nat_traversal_finished(station)\n\t\t\n\tdef handle_initiate_probe(self, url):\n\t\tlogger.debug(\"Received NAT probe request for %s\" %url)\n\t\tif url[\"probeinit\"] == 1:\n\t\t\tself.request_probe_initiation(url)\n\t\tstation = self.init_station(url)\n\t\tself.protocol.send_probe_request(station, 3)\n\t\t\n\tdef request_probe_initiation(self, target):\n\t\tlogger.debug(\"Sending probe request to %s\" %target)\n\t\tif target[\"type\"] == 0:\n\t\t\tsource = self.backend.local_station\n\t\telse:\n\t\t\tsource = self.backend.public_station\n\n\t\tsource = source.copy()\n\t\tif target[\"probeinit\"] == 1:\n\t\t\tsource[\"probeinit\"] = 0\n\t\telse:\n\t\t\tsource[\"probeinit\"] = 1\n\n\t\tself.init_station(target)\n\t\tself.client.request_probe_initiation_ext([target], source)\n\t\t\n\tdef report_nat_properties(self, props):\n\t\tself.client.report_nat_properties(\n\t\t\tprops.nat_mapping, props.nat_filtering, props.rtt\n\t\t)\n\t\t\n\tdef start_nat_traversal(self, url):\n\t\trvcid = url[\"RVCID\"]\n\t\tif rvcid in self.past_traversals:\n\t\t\tif time.monotonic() - self.past_traversals[rvcid] < 90:\n\t\t\t\tstation = self.station_mgr.find_by_rvcid(rvcid)\n\t\t\t\tself.nat_traversal_finished(station)\n\t\t\t\treturn\n\n\t\tlogger.info(\"Starting NAT traversal for %s\" %url)\n\t\turl = url.copy()\n\t\turl[\"probeinit\"] = 0\n\t\tself.request_probe_initiation(url)\n","sub_path":"nintendo/pia/nattraversal.py","file_name":"nattraversal.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"437104611","text":"def check(n):\n if n == 1:\n print(\"Hooray\")\n elif (n*n-n)%(2*n-2)!=0:\n print(\"Boo\")\n else:\n print(\"Hooray\")\n doof(n)\n\n\ndef doof(n):\n arr = [[0 for i in range(0, n)] for j in range(0, n)]\n maax = 2*n-1;max_count = (n*n-n)/(2*n-2)-1;\n arr[0][0] = maax\n row_val = maax -1\n col_val = n - 1\n for danger in range(1,n):\n arr[0][danger] = row_val\n arr[danger][0] = col_val\n avail = [i for i in range(1, n)]\n avail.remove(danger)\n count = 0\n while count < max_count:\n i,j = generate(arr,avail)\n arr[i][j] = row_val;arr[j][i] = col_val\n avail.remove(i);avail.remove(j)\n count+=1\n row_val -= 1;col_val -= 1\n for x in range(0, n):\n for y in range(0, n):\n if x == y:\n print(maax,end=' ')\n else:\n print(arr[x][y], end=' ')\n print(\"\\t\")\n\n\ndef generate(arr,avail):\n for m in range(0,len(avail)):\n for n in range(m+1,len(avail)):\n if arr[avail[m]][avail[n]]==0:\n return avail[m],avail[n]\n\n\ntest = int(input())\nfor _ in range(0,test):\n N = int(input())\n check(N)\n\n\n\n\n","sub_path":"python-codechef/scratch_10.py","file_name":"scratch_10.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"169473766","text":"n = int(input(\"Input integer 2 or greater: \"))\nf = 2\n\nprint(\"The prime factors of\",n,\"are: \")\nwhile f <= n:\n if(n%f == 0):\n print(f)\n n /= f\n else:\n f += 1","sub_path":"task_3/76.py","file_name":"76.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589168059","text":"#!/usr/bin/python\n\n#\n# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.\n#\n\n\"\"\"\nThis file containamespace sanity test for all major workflows supported by\nfabric ansible\n\"\"\"\nimport logging\nimport pprint\nimport time\nimport os\nimport sys\n\nfrom cfgm_common.exceptions import (\n RefsExistError,\n NoIdError\n)\nfrom vnc_api.vnc_api import VncApi\nfrom vnc_api.gen.resource_client import Fabric\nfrom vnc_api.gen.resource_client import FabricNamespace\n\n\n# pylint: disable=E1101\nclass SanityBase(object):\n \"\"\"Base class for fabric ansible sanity tests\"\"\"\n\n @staticmethod\n def _init_logging(name):\n logger = logging.getLogger('sanity_test')\n logger.setLevel(logging.DEBUG)\n\n file_handler = logging.FileHandler(\n '/var/log/fabric_ansibile_%s.log' % name, mode='w')\n file_handler.setLevel(logging.DEBUG)\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%Y/%m/%d %H:%M:%S')\n file_handler.setFormatter(formatter)\n console_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n return logger\n # end _init_logging\n\n def test(self):\n \"\"\"Override this method in the derived class\"\"\"\n pass\n\n def __init__(self, name, password):\n if password is None:\n raise KeyError(\"Missing required args: password\")\n\n self._name = name\n self._logger = SanityBase._init_logging(name)\n self._api = VncApi(\n username='admin', password=password, tenant_name='admin')\n # end __init__\n\n def create_fabric(self, fab_name, prouter_passwords):\n \"\"\"create fabric with list of device passwords\"\"\"\n fab = None\n try:\n self._logger.info('Creating fabric: %s', fab_name)\n fq_name = ['default-global-system-config', fab_name]\n fab = Fabric(\n name=fab_name,\n fq_name=fq_name,\n parent_type='global-system-config',\n fabric_credentials={\n 'device_credential': [{\n 'credential': {\n 'username': 'root', 'password': passwd\n },\n 'vendor': 'Juniper',\n 'device_family': None\n } for passwd in prouter_passwords]\n }\n )\n fab_uuid = self._api.fabric_create(fab)\n fab = self._api.fabric_read(id=fab_uuid)\n except RefsExistError:\n self._logger.warn(\"Fabric '%s' alread exists\", fab_name)\n fab = self._api.fabric_read(fq_name=fq_name)\n\n self._logger.debug(\n \"Fabric created:\\n%s\",\n pprint.pformat(self._api.obj_to_dict(fab), indent=4))\n return fab\n # end _create_fabric\n\n def add_mgmt_ip_namespace(self, fab, mgmt_ipv4_cidr):\n \"\"\"add management ip prefixes as fabric namespace\"\"\"\n try:\n ns_name = 'mgmt_ip-' + mgmt_ipv4_cidr\n self._logger.info(\n 'Adding management ip namespace \"%s\" to fabric \"%s\" ...',\n ns_name, fab.name)\n\n ip_prefix = mgmt_ipv4_cidr.split('/')\n ns_fq_name = fab.fq_name + [ns_name]\n namespace = FabricNamespace(\n name=ns_name,\n fq_name=ns_fq_name,\n parent_type='fabric',\n fabric_namespace_type='IPV4-CIDR',\n fabric_namespace_value={\n 'ipv4_cidr': {\n 'subnet': [{\n 'ip_prefix': ip_prefix[0],\n 'ip_prefix_len': ip_prefix[1]\n }]\n },\n }\n )\n namespace.set_tag_list([{'to': ['label=fabric-management_ip']}])\n ns_uuid = self._api.fabric_namespace_create(namespace)\n namespace = self._api.fabric_namespace_read(id=ns_uuid)\n except RefsExistError:\n self._logger.warn(\n \"Fabric namespace '%s' alread exists\", ns_name)\n namespace = self._api.fabric_namespace_read(fq_name=ns_fq_name)\n\n self._logger.debug(\n \"Fabric namespace created:\\n%s\",\n pprint.pformat(self._api.obj_to_dict(namespace), indent=4))\n return namespace\n # end _add_mgmt_ip_namespace\n\n def add_asn_namespace(self, fab, asn):\n \"\"\"add AS number as fabric namespace\"\"\"\n try:\n ns_name = \"asn_%d\" % asn\n self._logger.info(\n 'Adding ASN namespace \"%s\" to fabric \"%s\" ...',\n ns_name, fab.name)\n\n ns_fq_name = fab.fq_name + [ns_name]\n namespace = FabricNamespace(\n name=ns_name,\n fq_name=ns_fq_name,\n parent_type='fabric',\n fabric_namespace_type='ASN',\n fabric_namespace_value={\n 'asn': {\n 'asn': [asn]\n }\n }\n )\n namespace.set_tag_list([{'to': ['label=fabric-as_number']}])\n ns_uuid = self._api.fabric_namespace_create(namespace)\n namespace = self._api.fabric_namespace_read(id=ns_uuid)\n except RefsExistError:\n self._logger.warn(\n \"Fabric namespace '%s' alread exists\", ns_name)\n namespace = self._api.fabric_namespace_read(fq_name=ns_fq_name)\n\n self._logger.debug(\n \"Fabric namespace created:\\n%s\",\n pprint.pformat(self._api.obj_to_dict(namespace), indent=4))\n return namespace\n # end _add_asn_namespace\n\n def cleanup_fabric(self, fab_name):\n \"\"\"delete fabric including all prouters in the fabric\"\"\"\n try:\n self._logger.info('Deleting fabric \"%s\" ...', fab_name)\n fab_fqname = ['default-global-system-config', fab_name]\n fab = self._api.fabric_read(fq_name=fab_fqname)\n\n # delete all namespaces in this fabric\n fab_namespaces = self._api.fabric_namespaces_list(\n parent_id=fab.uuid)\n for namespace in fab_namespaces.get('fabric-namespaces') or []:\n self._logger.debug(\n \"Delete namespace: %s\", namespace.get('fq_name'))\n self._api.fabric_namespace_delete(namespace.get('fq_name'))\n\n # delete fabric\n self._logger.debug(\"Delete fabric: %s\", fab_fqname)\n self._api.fabric_delete(fab_fqname)\n\n # delete all prouters in this fabric\n for prouter in fab.get_physical_router_refs() or []:\n self._delete_prouter(prouter.get('uuid'))\n\n except NoIdError:\n self._logger.warn('Fabric \"%s\" not found', fab_name)\n # end cleanup_fabric\n\n def _delete_prouter(self, uuid):\n prouter = self._api.physical_router_read(id=uuid)\n\n # delete all physical and logical interfaces\n ifds = self._api.physical_interfaces_list(parent_id=uuid)\n for ifd in ifds.get('physical-interfaces') or []:\n # delete all child logical interfaces\n ifls = self._api.logical_interfaces_list(parent_id=ifd.get('uuid'))\n for ifl in ifls.get('logical-interfaces') or []:\n self._logger.debug(\n \"Delete logical interface: %s\", ifl.get('fq_name'))\n self._api.logical_interface_delete(ifl.get('fq_name'))\n\n # delete the physical interface\n self._logger.debug(\n \"Delete physical interface: %s\", ifd.get('fq_name'))\n self._api.physical_interface_delete(ifd.get('fq_name'))\n\n # delete the prouter\n self._logger.debug(\n \"Delete physical router: %s\", prouter.get_fq_name())\n self._api.physical_router_delete(prouter.get_fq_name())\n\n # delete corresponding bgp routers\n for bgp_router_ref in prouter.get_bgp_router_refs() or []:\n self._logger.debug(\n \"Delete bgp router: %s\", bgp_router_ref.get('to'))\n self._api.bgp_router_delete(bgp_router_ref.get('to'))\n # end _delete_prouter\n\n def _wait_for_job_to_finish(self, job_name, job_pid):\n try:\n while True:\n time.sleep(1)\n os.kill(int(job_pid), 0)\n except OSError:\n self._logger.debug(\"%s job '%s' finished\", job_name, job_pid)\n # end _wait_for_job_to_finish\n\n def discover_fabric_device(self, fab):\n \"\"\"Discover all devices specified by the fabric management namespaces\n \"\"\"\n self._logger.info('Discover devices in fabric \"%s\" ...', fab.fq_name)\n job_execution_info = self._api.execute_job(\n job_template_fq_name=[\n 'default-global-system-config', 'discover_device_template'],\n job_input={'fabric_uuid': fab.uuid}\n )\n\n job_pid = job_execution_info.get('job_manager_process_id')\n self._logger.debug(\n \"Device discovery job started with execution id: %s\", job_pid)\n self._wait_for_job_to_finish('Device discovery', job_pid)\n\n fab = self._api.fabric_read(fab.fq_name)\n discovered_prouter_refs = fab.get_physical_router_refs()\n self._logger.debug(\n \"Disovered devices:\\n%s\",\n pprint.pformat(discovered_prouter_refs, indent=4))\n\n msg = \"Discovered following devices in fabric '%s':\" % fab.fq_name\n discovered_prouters = []\n for prouter_ref in discovered_prouter_refs:\n prouter = self._api.physical_router_read(prouter_ref.get('to'))\n discovered_prouters.append(prouter)\n msg += \"\\n - %s (%s)\" % (\n prouter.name, prouter.physical_router_management_ip)\n\n self._logger.info(msg)\n return discovered_prouters\n # end discover_fabric_device\n\n def device_import(self, prouters):\n \"\"\"import device inventories for the prouters specified in the\n argument\"\"\"\n self._logger.info(\"Import all discovered prouters in the fabric ...\")\n job_execution_info = self._api.execute_job(\n job_template_fq_name=[\n 'default-global-system-config', 'device_import_template'],\n job_input={},\n device_list=[prouter.uuid for prouter in prouters]\n )\n\n job_pid = job_execution_info.get('job_manager_process_id')\n self._logger.debug(\n \"device import job started with execution id: %s\", job_pid)\n self._wait_for_job_to_finish('Device discovery', job_pid)\n\n for prouter in prouters:\n ifd_refs = self._api.physical_interfaces_list(\n parent_id=prouter.uuid)\n self._logger.info(\n \"Imported %d physical interfaces to prouter: %s\",\n len(ifd_refs.get('physical-interfaces')), prouter.name)\n # end device_import\n\n def underlay_config(self, prouters):\n \"\"\"deploy underlay config to prouters in the fabric ...\"\"\"\n self._logger.info(\"Deploy underlay config to prouters in fabric ...\")\n job_execution_info = self._api.execute_job(\n job_template_fq_name=[\n 'default-global-system-config', 'generate_underlay_template'],\n job_input={\n 'enable_lldp': 'true'\n },\n device_list=[prouter.uuid for prouter in prouters]\n )\n\n job_pid = job_execution_info.get('job_manager_process_id')\n self._logger.debug(\n \"device import job started with execution id: %s\", job_pid)\n self._wait_for_job_to_finish('Device discovery', job_pid)\n # end underlay_config\n\n def _exit_with_error(self, errmsg):\n self._logger.error(errmsg)\n sys.exit(1)\n # end _exit_with_error\n\n#end SanityBase class\n","sub_path":"src/config/fabric-ansible/ansible-playbooks/test/sanity/sanity_base.py","file_name":"sanity_base.py","file_ext":"py","file_size_in_byte":11957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"522918425","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# PROGRAMMER: Jeffrey Stocker\n# DATE CREATED: 4-11-2020\n# REVISED DATE: 4-11-2020\n# PURPOSE: To be able to track times in a program.\n# Inputs:\n# -Discription: discription of start of timer\n# Output:\n# -New class of Timer\n#\n##\n\nimport time\n\nclass Timer:\n def __init__(self, discription = ''):\n self.ended = False\n self.times = []\n self.stop_time = None\n self.start_time = self.mark(discription)\n\n def mark(self, discription = ''):\n if not self.ended:\n currentTime = time.time()\n self.times.append((time, discription))\n else:\n currentTime = None\n return currentTime\n\n def stop(self, discription = ''):\n if not self.ended:\n endTime = self.mark(discription)\n self.stop_time = endTime\n self.ended = True\n return self.delta()\n else:\n return None\n\n def delta(self, roundInt = 2):\n if self.start_time and self.stop_time:\n return round(float(self.stop_time - self.start_time), roundInt)\n else:\n return None\n\n def output_final(self):\n if self.ended:\n delta = self.delta()\n\n hours = str( int( (delta / 3600) ) )\n minutes = str( int( ( (delta % 3600) % 60 ) ) )\n seconds = str( int( ( (delta % 3600) / 60 ) ) )\n\n return hours + \":\" + minutes + \":\" + seconds\n else:\n return 'Not Ended Yet'\n","sub_path":"Timer.py","file_name":"Timer.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"498437042","text":"#!/usr/bin/python3\n\"\"\"New engine for saving data: DBStorage\"\"\"\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom models.state import State\nfrom models.city import City\nfrom models.user import User\nfrom models.place import Place\nfrom models.amenity import Amenity\nfrom models.review import Review\nfrom models.base_model import Base\nimport os\n\n\nuser = os.getenv(\"HBNB_MYSQL_USER\")\npasswd = os.getenv(\"HBNB_MYSQL_PWD\")\nhost = os.getenv(\"HBNB_MYSQL_HOST\")\ndb = os.getenv(\"HBNB_MYSQL_DB\")\nhbnb_env = os.getenv(\"HBNB_ENV\")\n\n\nclass DBStorage:\n \"\"\"Database mode of storage (DB engine)\"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n \"\"\"Init method\"\"\"\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'\n .format(user, passwd, host, db),\n pool_pre_ping=True)\n if hbnb_env == \"test\":\n Base.metadata.drop_all(self.__engine)\n\n def all(self, cls=None):\n \"\"\"\n Query on the current database session (self.__session)\n all objects depending of the class name (argument cls)\n \"\"\"\n my_dict = {} # [class.id: {obj}\n if cls is None:\n query = self.__session.query(User, State, City,\n Amenity, Place,\n Review).all()\n for inst in query:\n key = inst.__class__.__name__ + \".\" + inst.id\n my_dict[key] = inst\n\n else:\n query = self.__session.query(cls).all()\n # A list of objects class cls\n\n for inst in query:\n key = inst.__class__.__name__ + \".\" + inst.id\n my_dict[key] = inst\n\n return my_dict\n\n def new(self, obj):\n \"\"\"Adds new object to db storage\"\"\"\n self.__session.add(obj)\n\n def save(self):\n \"\"\"commit all changes of the current database session\"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\"delete from the current database session obj if not None\"\"\"\n if obj is not None:\n self.__session.delete(obj)\n\n def reload(self):\n \"\"\"Loads storage dictionary from database\"\"\"\n Base.metadata.create_all(self.__engine)\n Session = scoped_session(sessionmaker(\n bind=self.__engine, expire_on_commit=False))\n self.__session = Session()\n","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"490728729","text":"__author__ = 'Hao'\n\nDELIMITER = ''\n\n\n# preprocess regex string\ndef preprocess_regex(regex_str):\n regex_char_list = list(regex_str)\n regex_char_list = process_backslashes(regex_char_list)\n regex_char_list = process_square_brackets(regex_char_list)\n regex_char_list = process_curly_brackets(regex_char_list)\n regex_char_list = process_concat(regex_char_list)\n return regex_char_list\n\n\n# separate units in regex_str by adding a delimiter between them\ndef process_concat(char_list):\n result = []\n for pos in range(len(char_list) - 1):\n cur_char = char_list[pos]\n post_char = char_list[pos + 1]\n result.append(cur_char)\n if is_input(cur_char) or is_repeat_operator(cur_char) or is_question_mark(cur_char) or is_right_parenthesis(\n cur_char):\n if is_left_parenthesis(post_char) or is_input(post_char):\n result.append(DELIMITER)\n result.append(char_list[-1])\n return result\n\n\n# support using backslashes to escape operator chars\ndef process_backslashes(char_list):\n # for '\\d', substitute '\\d' with '(0|1|2|...)'\n def process_escape_digit(result):\n result.append('(')\n for digit in range(10):\n result.append(str(digit))\n result.append('|')\n result.pop()\n result.append(')')\n\n # for '\\w', substitute '\\w' with '(0|1|...a|b|...A|B|...Z|_)'\n def process_escape_alphanumeric(result):\n result.append('(')\n for digit in range(10):\n result.append(str(digit))\n result.append('|')\n for lowercase_num in range(ord('a'), ord('z') + 1, 1):\n result.append(chr(lowercase_num))\n result.append('|')\n for uppercase_num in range(ord('A'), ord('Z') + 1, 1):\n result.append(chr(uppercase_num))\n result.append('|')\n result.append('_')\n result.append(')')\n\n # for '\\n'\n def process_escape_new_line(result):\n result.append('\\\\')\n result.append('n')\n\n # for '\\t'\n def process_escape_tab(result):\n result.append('\\\\')\n result.append('t')\n\n # for '\\r' (for Windows)\n def process_escape_carriage_return(result):\n result.append('\\\\')\n result.append('r')\n\n # for all whitespace, [space, tab, newline, carriagereturn]\n def process_escape_whitespace(result):\n result.append('(')\n # for space ' '\n result.append(' ')\n result.append('|')\n process_escape_tab(result)\n result.append('|')\n process_escape_new_line(result)\n result.append('|')\n process_escape_carriage_return(result)\n result.append(')')\n\n escape_options = {r'\\d': process_escape_digit,\n r'\\w': process_escape_alphanumeric,\n r'\\s': process_escape_whitespace,\n r'\\t': process_escape_tab,\n r'\\n': process_escape_new_line,\n r'\\r': process_escape_carriage_return}\n result = []\n pos = 0\n while pos < len(char_list) - 1:\n cur_char = char_list[pos]\n post_char = char_list[pos + 1]\n if cur_char == '\\\\':\n escape_str = cur_char + post_char\n if escape_str in escape_options.keys():\n escape_options[escape_str](result)\n else:\n result.append(escape_str)\n pos += 1\n else:\n result.append(cur_char)\n pos += 1\n if pos == len(char_list) - 1:\n result.append(char_list[-1])\n return result\n\n\n# substitute square brackets with fundamental '|' operator\ndef process_square_brackets(char_list):\n def find(target, source):\n for pos, elem in enumerate(source):\n if elem == target:\n return pos\n return -1\n\n # find unit from start_pos, return end_pos\n def find_unit(start_pos, source):\n num_right_parenthesis = 0\n for pos in range(start_pos, len(source), 1):\n ch = source[pos]\n if is_right_parenthesis(ch):\n num_right_parenthesis += 1\n if is_left_parenthesis(ch):\n num_right_parenthesis -= 1\n if num_right_parenthesis == 0:\n return pos\n\n # deal with range of sequential chars\n def convert_dash_to_range(chars_in_brackets):\n dash_pos = find('-', chars_in_brackets)\n if dash_pos != -1:\n left_char_ascii = ord(chars_in_brackets[dash_pos - 1])\n right_char_ascii = ord(chars_in_brackets[dash_pos + 1])\n result = chars_in_brackets[:dash_pos - 1]\n for ascii_num in range(left_char_ascii, right_char_ascii):\n result += chr(ascii_num)\n result += chars_in_brackets[dash_pos + 1:]\n return convert_dash_to_range(result)\n return chars_in_brackets\n\n left_bracket_pos = find('[', char_list)\n if left_bracket_pos != -1:\n right_bracket_pos = find(']', char_list)\n chars_in_brackets = char_list[left_bracket_pos + 1: right_bracket_pos]\n chars_in_brackets = convert_dash_to_range(chars_in_brackets)\n substitution = list()\n substitution.append(\"(\")\n # deal with it unit by unit, NOT char by char\n unit_start_pos = 0\n while unit_start_pos < len(chars_in_brackets):\n unit_end_pos = find_unit(unit_start_pos, chars_in_brackets)\n substitution.extend(chars_in_brackets[unit_start_pos: unit_end_pos + 1])\n substitution.append('|')\n unit_start_pos = unit_end_pos + 1\n # eliminate last '|', and add right parenthesis\n substitution.pop()\n substitution.append(\")\")\n result = char_list[:left_bracket_pos] + substitution + char_list[right_bracket_pos + 1:]\n return process_square_brackets(result) # make sure deal with all square brackets\n return char_list\n\n\n# deal with curly brackets by extending repeated units\ndef process_curly_brackets(char_list):\n def find(target, source):\n for pos, elem in enumerate(source):\n if elem == target:\n return pos\n return -1\n\n # backwardly find the unit that should be repeated\n def find_unit_backwardly(start_pos, source):\n num_right_parenthesis = 0\n for pos in range(start_pos, -1, -1):\n ch = source[pos]\n if is_right_parenthesis(ch):\n num_right_parenthesis += 1\n if is_left_parenthesis(ch):\n num_right_parenthesis -= 1\n if num_right_parenthesis == 0:\n return pos\n\n def get_repetition_num(num_list):\n num_repetition = 0\n for ch in num_list:\n if ch.isdigit():\n num_repetition = 10 * num_repetition + int(ch)\n return num_repetition\n\n def get_repetition_range(range_list):\n comma_pos = find(\",\", range_list)\n min_repetition = get_repetition_num(range_list[:comma_pos])\n max_repetition = get_repetition_num(range_list[comma_pos + 1:])\n return min_repetition, max_repetition\n\n left_curly_bracket_pos = find('{', char_list)\n if left_curly_bracket_pos != -1:\n right_curly_bracket_pos = find('}', char_list)\n repetition_start_pos = find_unit_backwardly(left_curly_bracket_pos - 1, char_list)\n repetition_unit = char_list[repetition_start_pos: left_curly_bracket_pos]\n repetition_str = char_list[left_curly_bracket_pos + 1: right_curly_bracket_pos]\n # for range of repetitions\n if \",\" in repetition_str:\n min_repetition, max_repetition = get_repetition_range(repetition_str)\n result = char_list[:repetition_start_pos]\n result.append(\"(\") # include () for | to ensure correct priority\n for num in range(min_repetition, max_repetition + 1):\n if num == 0:\n result += repetition_unit\n result.append('?')\n else:\n result += repetition_unit * num\n result.append(\"|\")\n result.pop() # pop the last \"|\"\n result.append(\")\")\n result += char_list[right_curly_bracket_pos + 1:]\n return process_curly_brackets(result)\n else: # for specific num of repetitions\n num_repetition = get_repetition_num(repetition_str)\n result = char_list[:repetition_start_pos] + repetition_unit * num_repetition + \\\n char_list[right_curly_bracket_pos + 1:]\n return process_curly_brackets(result)\n return char_list\n\n\n# check if char is a valid input\ndef is_input(char_input):\n return not is_operator(char_input)\n\n\n# check if char is defined operator\ndef is_operator(char_input):\n return char_input in ['|', '*', '+'] or is_question_mark(char_input) or is_left_parenthesis(char_input) \\\n or is_right_parenthesis(char_input) or is_delimiter(char_input)\n\n\ndef is_repeat_operator(char_input):\n return char_input in ['*', '+']\n\n\ndef is_question_mark(char_input):\n return char_input == '?'\n\n\ndef is_left_parenthesis(char_input):\n return char_input == '('\n\n\ndef is_right_parenthesis(char_input):\n return char_input == ')'\n\n\ndef is_delimiter(char_input):\n return char_input == DELIMITER","sub_path":"preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":9257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"498009411","text":"#coding:utf8\nimport time,shutil\nimport torch\nfrom torch import nn\n\nclass BasicModule(nn.Module):\n def __init__(self):\n super(BasicModule, self).__init__()\n self.model_name = str(type(self))# 默认名称\n\n def load(self, path):\n self.load_state_dict(torch.load(path))\n\n def save(self, is_best, name = None):\n if name is None:\n prefix = 'checkpoints/' + self.model_name + '_'\n name = time.strftime(prefix + '%m%d_%H:%M:%S.pth')\n torch.save(self.state_dict(), name)\n if is_best:\n shutil.copyfile(name, 'checkpoints/model_best.pth')","sub_path":"models/BasicModule.py","file_name":"BasicModule.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"29092133","text":"import numpy as np\nfrom time import sleep\nfrom planner import Request\nfrom collections import namedtuple\n\nPosition = namedtuple(\n \"Position\", (\"x\", \"y\")\n)\nVolume = namedtuple(\n \"Volume\", (\"length\",\"width\",\"height\")\n)\n\ndef source(address, map_size, requestCon, web3, simulation_ratio, number_of_cycles, k, theta):\n # Standard gamma distribution for time (mean=k*theta, var=k*theta^2)\n # Mean: 10 min\n #k = 600\n #theta = 1.\n # Distribution of weights of packages\n w_mean = 5\n w_dev = 0\n # Distribution in city location\n # source\n xs_mean = int((map_size[0] + map_size[1]) / 2)\n xs_dev = int((map_size[1] - map_size[0]) / 10)\n ys_mean = int((map_size[2] + map_size[3]) / 2)\n ys_dev = int((map_size[3] - map_size[2]) / 10)\n # destination\n xd_mean = int((map_size[0] + map_size[1]) / 2)\n xd_dev = int((map_size[1] - map_size[0]) / 10)\n yd_mean = int((map_size[2] + map_size[3]) / 2)\n yd_dev = int((map_size[3] - map_size[2]) / 10)\n \n feeDistanceRatio = requestCon.functions.getFeeDistanceRatio().call()\n \n while requestCon.functions.sequence().call() <= number_of_cycles:\n request = Request(Position(int(np.random.normal(xs_mean,xs_dev)),int(np.random.normal(ys_mean,ys_dev))), \\\n Position(int(np.random.normal(xd_mean,xd_dev)),int(np.random.normal(yd_mean,yd_dev))), \\\n int(np.random.normal(w_mean,w_dev)))\n \n value = int(feeDistanceRatio * np.ceil(np.sqrt((request.source.x - request.destination.x)**2 + (request.source.y - request.destination.y)**2)))\n \n tx_hash = requestCon.functions.newRequest(request.source.x, request.source.y,\\\n request.destination.x, request.destination.y,\\\n request.weight,\\\n request.volume.length, request.volume.height, request.volume.width).transact({'from':address,'value':value})\n print('Request sent: Source (',request.source.x,',',request.source.y,')','Destination (',request.destination.x,',',request.destination.x,')')\n web3.eth.waitForTransactionReceipt(tx_hash)\n next_time = np.random.gamma(k,theta)\n sleep(next_time / simulation_ratio)\n ","sub_path":"PlanningCustodian_3/Simulation/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"397055228","text":"import os, time\nfrom Tkinter import *\n\ndef addContact(): # ADD A NEW CONTACT IF THEY DON'T EXIST\n info, name = \"%s %s %s\" % (newAddr.get(), newNum.get(), newEmail.get()), newName.get() # SETS NEW CONTACT DATA\n clearNewContactFields()\n if not findContact(name): # DOES CONTACT EXIST?\n if writeFile(CONTACTS_FILE, \"%s\\t%s\\n\" % (name, info), APPEND): log(name, \"contact created\") # LOG STHIS CONTACT'S CREATION\n loadContacts() # REFRESHES OUR LIST\n contactLbl[\"text\"] = \"Contact \\\"%s\\\" created\" % (name) # TELLS THE USER\n return\n contactLbl[\"text\"] = \"Contact exists\" # TELLS THE USER\n return\n\ndef clearNewContactFields(): # CLEAR ALL INPUT FIELDS\n searchNpt.delete(0, len(searchNpt.get()))\n newName.delete(0, len(newName.get()))\n newAddr.delete(0, len(newAddr.get()))\n newNum.delete(0, len(newNum.get()))\n newEmail.delete(0, len(newEmail.get()))\n return\n\ndef deleteContact(): # delete a contact\n name = searchNpt.get()\n if findContact(name): # contact exists\n select(name) # selects contact\n contents = \"\"\n for c in blackBook.items(): # updates contents\n if not c[0] == current: contents += \"%s\\t%s\\n\" % (c[0], c[1]) # updates contents if c isn't current\n if writeFile(CONTACTS_FILE, contents, WRITE_BINARY): log(name, \"contact deleted\") # logs the deletion\n loadContacts() # REFRESH OUR blackBook\n contactLbl[\"text\"] = \"Contact \\\"%s\\\" deleted\" % (name) # tell user\n else: contactLbl[\"text\"] = \"Contact unfound\"\n return\n\ndef findContact(name): # return IF CONTACT WITH NAME name EXISTS\n if blackBook.has_key(name): return True\n else: contactLbl[\"text\"] = \"Contact unfound\"\n return False\n\ndef install(): # INSTALLS HELPER FILES\n if not isReadable(LOG_FILE): log(LOG_FILE, \"log created\")\n if not isReadable(CONTACTS_FILE):\n writeFile(CONTACTS_FILE, \"\", WRITE_BINARY)\n return\n\ndef loadContacts(): # REFRESHES OUR blackBook\n install()\n global blackBook, contacts\n blackBook, contacts = {}, readFile(CONTACTS_FILE, READ_BINARY)\n for name, info in contacts: blackBook[name] = info\n return\n\ndef readFile(path, mode): # READS FILE AT path\n contents = []\n try:\n with open(path, mode) as f:\n for line in f: contents.append(tuple(line.split(\"\\t\")))\n except: pass\n return contents\n\ndef search():\n loadContacts()\n name = searchNpt.get()\n if findContact(name): select(name)\n log(name, \"search\")\n return\n\ndef select(name): # SELECTS CONTACT WITH name NAME\n global current\n current, contactLbl[\"text\"] = name, (\"%s %s\" % (name, blackBook[name])).strip()\n return\n\ndef writeFile(path, contents, mode): # WRITES TO FILE AT path\n try:\n with open(path, mode) as f: f.write(contents) # WRITES contents TO FILE AT path\n return True\n except e:\n writeFile(LOG_FILE, \"%s\\t%s\\tfile write error%s\" % (time.time(), path, str(e))) # (HOPEFULLY) WRITES writeFile() ERROR LOG TO LOG_FILE\n return False\n pass\n\n#VARIABLES\nisReadable, log = lambda path: os.access(path, os.R_OK), lambda a, b: writeFile(LOG_FILE, \"%s\\t%s\\t%s\\n\" % (time.time(), a, b), APPEND)\nblackBook, contacts, current, LOCAL = None, None, None, \"./\"\nAPPEND, READ, READ_BINARY, WRITE, WRITE_BINARY = \"a\", \"r\", \"rb\",\"w\", \"wb\"\nCONTACTS_FILE, LOG_FILE = os.path.join(LOCAL, \"blackbook_contacts.txt\"), os.path.join(LOCAL, \"blackbook_log.txt\")\n\n#WINDOW\nwindow = Tk()\nwindow.wm_title(\"BlackBook\")\n\n#FRAMES\ntop = Frame(window)\nspace1 = Frame(window, height=4)\nmid = Frame(window)\nspace2 = Frame(window, height=4)\nbottom = Frame(window)\nbottomLeft = Frame(bottom)\nbottomRight = Frame(bottom)\n\n#WIDGETS\ncontactLbl, loadBtn = Label(top, fg=\"blue\", text=\"\"), Button(top, command=loadContacts, text=\"Load Contacts\")\nsearchLbl, searchNpt, searchBtn, delBtn = Label(mid, text=\"Name: \"), Entry(mid, bg=\"white\", width=20), Button(mid, command=search, text=\"Find!\"), Button(mid, command=deleteContact, text=\"Delete\")\nnewLbl, newBtn = Label(bottomLeft, text=\"Name\\nAddress\\nPhone\\nEmail\"), Button(bottom, command=addContact, text=\"Add!\")\nnewName, newAddr, newNum, newEmail = Entry(bottomRight, bg=\"white\", width=20), Entry(bottomRight, bg=\"white\", width=20), Entry(bottomRight, bg=\"white\", width=20), Entry(bottomRight, bg=\"white\", width=20)\n\n#PACKAGING\ncontactLbl.pack()\nsearchLbl.pack(side = LEFT)\nsearchNpt.pack(side = LEFT)\nsearchBtn.pack(side = LEFT)\ndelBtn.pack(side = RIGHT)\nnewLbl.pack()\nnewBtn.pack(side = BOTTOM)\nnewName.pack()\nnewAddr.pack()\nnewNum.pack()\nnewEmail.pack()\ntop.pack()\nspace1.pack()\nmid.pack()\nspace2.pack()\nbottomLeft.pack(side = LEFT)\nbottomRight.pack(side = RIGHT)\nbottom.pack()\n\n#REST\nwindow.mainloop()\n","sub_path":"BlackBook.py","file_name":"BlackBook.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"577479627","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport tensorflow as tf\n\nfrom tensor_toolbox.model.model_base import BaseModel\n\nclass LinearSVM(BaseModel):\n def __init__(self, task_name, model_name, num_output, mode='train'):\n super(SimpleModel, self).__init__(task_name, model_name, mode)\n self.num_output = num_output\n \n\n def loss(self, logits, target_labels):\n num_train = int(logits.get_shape()[0])\n \n \n \n correct_class_scores = scores[range(num_train), y]\n margins = np.maximum(0, scores - correct_class_scores[:, np.newaxis] + 1.0)\n margins[range(num_train), y] = 0\n loss_cost = np.sum(margins) / num_train\n return tf.reduce_mean(batch_loss)\n \n def inference(self, inputs, is_training=True, reuse=None):\n \"\"\"\n inputs: centered images with the same shape of original images\n \"\"\"\n with tf.name_scope('linear_svm'):\n l2_reg_fun = tf.contrib.layers.regularizers.l2_regularizer(0.0001)\n logits = tf.contrib.layers.fully_connected(\n inputs, self.num_output, activation_fn=None, weights_regularizer=l2_reg_fun, reuse=reuse, scope='softmax_linear_fc')\n return logits\n ","sub_path":"model/linear_svm.py","file_name":"linear_svm.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"270511346","text":"n = int( input() )\nA = list(map(int, input().split()))\nA.sort()\nB = [1]\n\nif A[n-1] >= n:\n\tprint(\"Impossible\")\nelif A[0] == 0 and A[n-1] != 0:\n\tprint(\"Impossible\")\n# [ALL DIFFERENT]\nelif A[0] == n-1 and A[n-1] == n-1:\n\tfor i in range(1,n+1):\n\t\tprint(\"{0} \".format(i), end=\"\")\n\tprint()\nelif n == 1:\n\tprint(\"Possible\")\n\tprint(1)\nelif n == 2:\n\tresidual_hat_dict = {1: (n-1) - A[0]}\n\t#if A[1] == A[0]:\n\n\n","sub_path":"a_2018/2018_1216_Avito_Cool_Challenge_2018/B/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"643518890","text":"import cv2\nimport time\nimport datetime\n\ncapture = cv2.VideoCapture(0)\n\nwhile True:\n _, frame = capture.read()\n\n cv2.imshow(\"Video camera\", frame)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\ncapture.release()\ncv2.destroyAllWindows()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"205996421","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.datasets.samples_generator import make_classification\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\n\nx, y = make_classification(n_samples=1000,\n n_features=3,\n n_redundant=0,\n n_classes=3,\n n_informative=2,\n n_clusters_per_class=1,\n class_sep=0.5,\n random_state=10)\n\n\n\npca = PCA(n_components=2)\npca.fit(x)\nx_pca = pca.fit_transform(x)\n\nlda = LinearDiscriminantAnalysis(n_components=2)\nlda.fit(x, y)\nx_lda = lda.transform(x)\n\n\nfig = plt.figure()\n# plt.scatter(x_pca[:, 0], x_pca[:, 1], marker='o', c=y)\nplt.scatter(x_lda[:, 0], x_lda[:, 1], marker='o', c=y)\n# ax = Axes3D(fig, rect=[0, 0, 1, 1], elev=30, azim=20)\n# plt.scatter(x[:, 0], x[:, 1], x[:, 2], marker='o', c=y)\nplt.show()\n","sub_path":"datatomongdb/sklearnlda.py","file_name":"sklearnlda.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"445427577","text":"import json\nimport csv\nimport os\nimport re\n\ndef write_csv(filepath, line):\n with open(filepath, 'a', newline='',encoding='utf-8-sig') as csv_file:\n data = csv.writer(csv_file, delimiter=',')\n data.writerow(line)\n\ndef readjson(file_path,codec='utf-8',mode='r'):\n with open(file_path,mode=mode,encoding=codec) as f:\n data=json.load(f)\n\n return data\n\ndef write_csv(filepath, line):\n with open(filepath, 'a', newline='',encoding='utf-8-sig') as csv_file:\n data = csv.writer(csv_file, delimiter=',')\n data.writerow(line)\n\ndef get_files_in_dir(basepath,ext=[],debug=False):\n\n basepath=os.path.abspath(basepath)\n\n ext = [str.lower(x) for x in ext]\n\n try:\n if os.path.exists(basepath) and os.path.isdir(basepath):\n result=[os.path.join(basepath,x) for x in os.listdir(basepath)]\n files=[[os.path.split(x)[0],*os.path.splitext(os.path.basename(x))] for x in result if os.path.isfile(x)]\n if len(ext)==0:\n print('无类型筛选,返回所有文件')\n if debug:print(files)\n return files\n\n if len(ext)>0 :\n print('返回{}类型的文件'.format(ext))\n files=[[os.path.split(x)[0],*os.path.splitext(os.path.basename(x))] for x in result if str.lower(re.split(r'\\.',x)[-1]) in ext]\n if debug:print(files)\n return files\n else:\n return False\n except Exception as e:\n print('发生错误{}'.format(e))\n\n\nsourcedir='./source'\noutputfile='output.csv'\n\n\nfor basepath,filename,extname in get_files_in_dir(sourcedir,['rmn']):\n print(basepath,filename,extname)\n\n sourcePath=os.path.join(basepath,filename+extname)\n\n data=readjson(sourcePath)\n write_csv(outputfile,[filename+extname,*data[0]['Intensities']])\n\n","sub_path":"job-json,xml,csv/水花-json提取信息/提取.py","file_name":"提取.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"391745939","text":"from project_reader import ProjectReader\n\n\ndef main():\n url = \"https://raw.githubusercontent.com/ohjelmistotuotanto-hy-avoin/python-kevat-2021/main/koodi/viikko3/web-login-robot/pyproject.toml\"\n reader = ProjectReader(url)\n print(reader.get_project())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"viikko2/project-reader/src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"489893123","text":"from django import template\n\nfrom ostinato.pages.models import Page\n\n\nregister = template.Library()\n\n\n@register.inclusion_tag('pages/navbar.html', takes_context=True)\ndef navbar(context, for_page=None, path=''):\n \"\"\"\n Renders a basic navigation bar.\n\n ``for_page`` is used to specify a navbar for a specific\n page (it's children); defaults to root level pages\n\n ``path`` can be used in special cases where a page might not exist on\n the current path, but you would like the page to be \"discovered\" from the\n url. This will basically mark the active page in the navbar; if it can\n be found in the path of course.\n\n \"\"\"\n if 'page' not in context:\n page = Page.objects.get_from_path(path)\n else:\n page = context['page']\n\n navbar = Page.objects.get_navbar(for_page=for_page)\n\n return {\n 'page': page,\n 'navbar': navbar,\n }\n\n\n@register.inclusion_tag('pages/breadcrumbs.html', takes_context=True)\ndef breadcrumbs(context, for_page=None):\n \"\"\" Renders the breadcrumbs for the current page in the context \"\"\"\n if not for_page:\n # Attempt to get the page from the context\n for_page = context['page']\n\n breadcrumbs = Page.objects.get_breadcrumbs(for_page=for_page)\n return locals()\n\n\n@register.assignment_tag # Requires Django 1.4+\ndef get_page(slug):\n \"\"\" Finds the page with ``slug`` and adds that to the context \"\"\"\n return Page.objects.get(slug=slug)\n\n\n","sub_path":"ostinato/pages/templatetags/pages_tags.py","file_name":"pages_tags.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"615689656","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import Warning\nfrom odoo.addons.uni_core.utils import get_default_faculty\nfrom datetime import datetime, timedelta\n\n\nclass Admission(models.Model):\n _name = \"uni.admission\"\n _inherit = ['mail.thread']\n _rec_name = \"student_id\"\n\n date = fields.Date(\n string=\"Date\", default=fields.Date.today(), readonly=True)\n\n student_id = fields.Many2one(\n 'uni.student',\n string=\"Student\",\n required=True\n )\n student_name = fields.Char(string='Student' ,related='student_id.name')\n\n faculty_id = fields.Many2one(\n 'res.company',\n related='student_id.faculty_id',\n string='Faculty',\n store=True\n )\n\n department_id = fields.Many2one(\n 'uni.faculty.department',\n domain=\"[('faculty_id', '=', faculty_id)]\",\n string='Department'\n )\n\n university_id = fields.Char(\n related='student_id.university_id',\n string='University ID',\n store=True\n )\n\n specialization_id = fields.Many2one(\n 'uni.faculty.department.specialization',\n string='Specialization',\n domain=\"[('department_id.faculty_id', '=', faculty_id)]\",\n )\n\n certificate_type_id = fields.Many2one(\n comodel_name='uni.certificate.type',\n string='Certificate Type',\n related='student_id.certificate_type_id',\n store=True,\n )\n\n state = fields.Selection(\n string=\"State\",\n selection=[\n ('draft', 'Draft'),\n ('form', 'application Form'),\n ('committee', 'Faculty Committee'),\n ('reg_office' , 'Registration Office'),\n ('payment', 'Payment'),\n ('reg_form', 'Registration Form'),\n ('clinic', 'Medical Examination'),\n ('done', 'Done'),\n ('cancel', 'Cancel'),\n ],\n default='draft',\n )\n\n batch_id = fields.Many2one(\n string=\"Batch\",\n comodel_name=\"uni.faculty.department.batch\",\n domain=\"[('faculty_id','=',faculty_id)]\"\n )\n\n clinic_notes = fields.Text(string=\"Doctor's remarks\")\n\n medical_condition = fields.Selection(\n string=\"The doctor's decision\",\n selection=[\n ('fit', 'Medically fit'),\n ('unfit', 'Medically unfit'),\n ('wait', 'Waiting another operation'),\n ],\n default=\"fit\",\n )\n\n committee_notes = fields.Html(string=\"Committee's Notes\", )\n\n year_id = fields.Many2one(\n string=\"Academic Year\",\n comodel_name=\"uni.year\",\n related='student_id.year_id',\n readonly=True,\n )\n\n category_id = fields.Many2many(\n string=\"Discount Type\",\n comodel_name=\"uni.student_category\",\n )\n\n medical_data = fields.Many2one(\n string=\"Medical Data\",\n comodel_name=\"uni.health_service.medical_data\",\n )\n\n fees_ids = fields.One2many(\n string=\"Student Fees\",\n comodel_name=\"student.fees\",\n inverse_name=\"student_id\",\n readonly=True,\n related='student_id.fees_ids',\n )\n\n is_installment = fields.Boolean(string=\"Installment\")\n\n add_fees = fields.Many2many('uni.add_fees' , string=\"Additional Fees\")\n\n sec_subject = fields.One2many(\n comodel_name='student.result',\n inverse_name='admission_id',)\n\n have_brother = fields.Boolean('Have Brothers in University?')\n\n bro_detail_ids = fields.One2many(\n comodel_name='brother.details',\n inverse_name='admission_id',)\n\n\n _sql_constraints = [\n (\n 'student_id_unique',\n 'UNIQUE(student_Id)',\n _('You can not have two admission requests for the same student')\n ),\n ]\n\n def get_first_level(self, student_id):\n domain = [('faculty_id', '=', student_id.faculty_id.id)]\n level = self.env['uni.faculty.level'].search(\n domain, limit=1, order=\"order asc\")\n semester = self.env['uni.faculty.semester'].search(\n domain, limit=1, order=\"order asc\")\n return level, semester\n\n def approve(self):\n medical_data = self.env['uni.health_service.medical_data'].create(\n {'student_id': self.student_id.id}\n )\n\n self.write({'state': 'form', 'medical_data': medical_data.id})\n\n def cancel_addmission(self):\n self.write({'state': 'cancel'})\n\n def clinic_approval(self):\n\n self.write({'state': 'done'})\n\n def create_move_line(self, move_id, account_id, partner_id, lable, debit, credit, date):\n self.env['account.move.line'].with_context({\n 'check_move_validity': False\n }).create({\n 'account_id': account_id,\n 'name': lable,\n 'debit': debit,\n 'credit': credit,\n 'date': date,\n 'move_id': move_id,\n 'partner_id': partner_id,\n })\n ref = None\n\n def create_move(self, amount, ref, journal_id, debit_account, credit_account, partner_id, date, lable, payment_id=False):\n move_id = self.env['account.move'].create({\n 'journal_id': journal_id,\n 'date': date,\n 'ref': ref,\n 'payment_id': payment_id,\n })\n # recivable\n self.create_move_line(move_id.id, debit_account, partner_id,\n lable, amount, 0.0, date)\n # payment\n self.create_move_line(move_id.id, credit_account, partner_id,\n lable, 0.0, amount, date)\n\n move_id.post()\n\n def create_payment(self, student_id, amount, currency):\n self.env['uofk.payment'].create({\n 'reference': student_id.university_id,\n 'name': student_id.name,\n # TODO: service must be generic\n 'service': 1001,\n 'currency': currency,\n 'amount': amount,\n })\n\n def get_student_fees(self):\n dept_fees = self.env['uni.study_fees.departments'].search([\n ('department_id', '=', self.department_id.id),\n ('certificate_type_id', '=',\n self.certificate_type_id.id),\n ('year_id', '=', self.year_id.id),\n ('state', '=', 'done')\n ], limit=1)\n\n if not dept_fees:\n # No dept fees? let's try the global faculty fees\n faculty_fees = self.env['uni.study_fees.line'].search([\n ('faculty_id', '=', self.faculty_id.id),\n ('certificate_type_id', '=',\n self.certificate_type_id.id),\n ('year_id', '=', self.year_id.id), ('state', '=', 'done')\n ], limit=1)\n\n return dept_fees or faculty_fees\n\n def committee_approval(self):\n # TODO: rewrite this function on proper way\n \n self.write({'state': 'reg_office'})\n\n def reg_confirmation(self):\n fees = self.get_student_fees()\n if not fees:\n raise Warning(\n _('Please configure the faculty tuition fees for this academic year!')\n )\n include_registration_fees = False\n\n discount_percentage = 0.0\n\n discount = 0.0\n\n category_name =\"\"\n\n std_fees = fees.study_fees/2\n\n value = 0.0\n\n if self.category_id:\n for category in self.category_id:\n discount_percentage += category.general_discount\n value = category.general_discount\n include_registration_fees = category.include_registration_fees\n\n discount += ((fees.registration_fees * discount_percentage) /\n 100.0) if include_registration_fees else 0.0\n\n discount += (std_fees * value) / 100.0\n\n std_fees -= discount \n\n category_name += category.name + \",\"\n\n print('----------- discount',discount)\n domain = [('faculty_id', '=', self.faculty_id.id)]\n level_ids = self.env['uni.faculty.level'].search(domain)\n semester_ids = self.env['uni.faculty.semester'].search(domain)\n if not level_ids or not semester_ids:\n msg = \"Please create levels and semesters for \"+self.student_id.faculty_id.name\n raise Warning(\n _(msg)\n )\n\n discount_desc = category_name+\" ----> \" + \\\n str(discount_percentage) if discount > 0 else \"\"\n\n admission_id = self.env['uni.admission'].search([('student_id','=',self.student_id.id)])\n student_id = self.env['student.fees'].search([('student_id','=',self.student_id.id)])\n student_id.unlink()\n for level in level_ids:\n flag = True\n registration_fees = fees.registration_fees\n if self.is_installment:\n semester_ids = semester_ids[0]\n for semester in semester_ids:\n # registration fees is per year\n if not flag:\n registration_fees = 0\n\n\n\n self.env['student.fees'].create({\n 'student_id': self.student_id.id,\n 'level_id': level.id,\n 'semester_id': semester.id,\n 'registration_fees': registration_fees,\n 'study_fees': fees.study_fees/2,\n 'discount': discount/2,\n 'other_fees' : [(6, 0, self.add_fees.ids)] if (level.order == 1 and semester.order == 1) else 0,\n 'discount_desc': discount_desc,\n })\n flag = False\n \n amount = fees.registration_fees + fees.study_fees - discount\n if amount == 0:\n self.write({'state': 'done'})\n else:\n currency = self.certificate_type_id.currency_id.name\n \n self.student_id.write({\n 'category_id' : [(6, 0, self.category_id.ids)],\n 'admission_rec' : admission_id.id,\n })\n self.write({'state': 'payment'})\n\n\n ","sub_path":"uni_admission/models/admission.py","file_name":"admission.py","file_ext":"py","file_size_in_byte":9907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"355525822","text":"\"\"\"Example of analyzing a particular pvpro fit and how the remaining fit\nparameters depend on fixing one fit parameter.\n\n@author: toddkarin\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.plotting import register_matplotlib_converters\n\nregister_matplotlib_converters()\n\nimport matplotlib\n\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nfrom pvpro.fit import production_data_curve_fit\nfrom pvpro.classify import classify_operating_mode\n\n# Import synthetic data\ndf = pd.read_pickle('synth01_out.pkl')\n\nfrom pvlib.temperature import sapm_cell_from_module\n\nfrom tqdm import tqdm\n\n# Estimate cell temperature.\ndf['temperature_cell_meas'] = sapm_cell_from_module(\n module_temperature=df['temperature_module_meas'],\n poa_global=df['poa_meas'],\n deltaT=3)\n\n# Classify operating modes.\ndf['operating_cls'] = classify_operating_mode(voltage=df['v_dc'],\n current=df['i_dc'],\n power_clip=np.inf)\n\n# Clip dataframe shorter.\ndf = df[-2000:]\n\nfit_params = ['diode_factor',\n 'photocurrent_ref',\n 'resistance_series_ref',\n 'conductance_shunt_extra']\n\n# Can set a custom startpoint if auto-chosen startpoint isn't great.\np0 = {'diode_factor': 1.10,\n 'photocurrent_ref': 6.0,\n 'resistance_series_ref': 0.4,\n 'conductance_shunt_extra': 0.001}\n\nsaturation_current_list = np.logspace(-10,-8,20)\nresult = pd.DataFrame()\nfor j in tqdm(range(len(saturation_current_list))):\n # Run the fit\n out = production_data_curve_fit(\n temperature_cell=df['temperature_cell_meas'],\n effective_irradiance=df['poa_meas'],\n operating_cls=df['operating_cls'],\n voltage=df['v_dc'],\n current=df['i_dc'],\n alpha_isc=0.001,\n resistance_shunt_ref=400,\n saturation_current_ref=saturation_current_list[j],\n p0=p0,\n cells_in_series=60,\n band_gap_ref=1.121,\n verbose=False,\n solver='L-BFGS-B',\n # solver='Nelder-Mead',\n # singlediode_method='lambertw',\n singlediode_method='fast',\n method='minimize',\n use_mpp_points=True,\n use_voc_points=False,\n use_clip_points=False,\n )\n\n result.loc[j, 'saturation_current_ref'] = saturation_current_list[j]\n for k in out['p']:\n result.loc[j,k] = out['p'][k]\n result.loc[j,'residual'] = out['residual']\n\n\nsaturation_current_ref_true = np.mean(df['saturation_current_ref'])\n\n#\n# # Make best fit comparison\n# pfit_compare = pd.DataFrame(pfit, index=['Best Fit'])\n# for k in pfit.keys():\n# pfit_compare.loc['True',k] = df[k].mean()\n# print('Best fit:')\n# print(pfit_compare.transpose())\n\nn=0\nfor k in result:\n plt.figure(n,figsize=(4,2.2))\n plt.clf()\n plt.plot(saturation_current_list, result[k],'k-',label='Fit')\n\n if k in df:\n plt.axhline(y=df[k].mean(),c='r',label='True')\n\n plt.axvline(x=df['saturation_current_ref'].mean(), c='r')\n\n plt.xscale('log')\n plt.xlabel('saturation-current_ref')\n plt.ylabel(k)\n plt.legend()\n plt.show()\n plt.savefig('figures/synth05_loss_vs_saturation_{}.pdf'.format(k),\n bbox_inches='tight')\n\n n=n+1","sub_path":"examples/synth05_test_saturation_current_landscape.py","file_name":"synth05_test_saturation_current_landscape.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"108722864","text":"n = int(input())\na = [list(map(int, input().split())) for _ in range(n)]\nm = int(input())\ncommand = [list(map(int, input().split())) for _ in range(m)]\n\nfor i in range(m):\n row, course, cnt = command[i]\n temp = [0] * n\n for j in range(n):\n if course == 0:\n temp[j-(cnt%n)] = a[row-1][j]\n else:\n temp[(j+cnt)%n] = a[row-1][j]\n a[row-1][:] = temp[:]\n\nval_sum = 0\ns = 0\ne = n-1\nfor i in range(n):\n for j in range(s, e+1):\n val_sum += a[i][j]\n if i < n//2:\n s += 1\n e -= 1\n else:\n s -= 1\n e += 1\n\nprint(val_sum)\n","sub_path":"section3/test3_8.py","file_name":"test3_8.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207171417","text":"\"\"\"\nThis is example how to write simple aggregator mapper and reducer functions for\nWMArchive/Tools/myspark.py tool. It collects information about cpu/time/read/write\nsizes of successfull FWJR jobs. Information is structured by agent host/site.\n\"\"\"\n\nclass MapReduce(object):\n def __init__(self, spec=None):\n # spec here is redundant since our mapper and reducer does not use it\n self.spec = spec\n\n def mapper(self, records):\n \"\"\"\n Function to extract necessary information from records during spark\n collect process. It will be called by RDD.collect() object within spark.\n \"\"\"\n out = []\n sdict = {}\n hdict = {}\n for rec in records:\n if not rec:\n continue\n meta = rec['meta_data']\n if meta['jobstate'] != 'success':\n continue\n host = meta['host']\n if host not in hdict.keys():\n hdict[host] = {}\n for step in rec['steps']:\n site = step.get('site', None)\n if not site:\n continue\n if site not in hdict[host].keys():\n hdict[host] = {site:{'cpu':0, 'time':0, 'rsize':0, 'wsize':0}}\n perf = step['performance']\n cpu = perf['cpu']\n storage = perf['storage']\n if cpu:\n if cpu.get('TotalJobCPU', 0):\n hdict[host][site]['cpu'] += cpu.get('TotalJobCPU', 0)\n if cpu.get('TotalJobTime', 0):\n hdict[host][site]['time'] += cpu.get('TotalJobTime', 0)\n if storage:\n if storage.get('readTotalMB', 0):\n hdict[host][site]['rsize'] += storage.get('readTotalMB', 0)\n if storage.get('writeTotalMB', 0):\n hdict[host][site]['wsize'] += storage.get('writeTotalMB', 0)\n return hdict\n\n def reducer(self, records, init=0):\n \"Simpler reducer which collects all results from RDD.collect() records\"\n out = {}\n count = 0\n mdict = {'cpu':0, 'time':0, 'rsize':0, 'wsize':0}\n for rec in records:\n for host, hdict in rec.items():\n if host not in out.keys():\n out[host] = {}\n _hdict = out[host]\n for site, sdict in hdict.items():\n if site not in _hdict.keys():\n _hdict = {site:mdict}\n out[host][site] = mdict\n _sdict = _hdict[site]\n _cpu = _sdict['cpu'] + sdict['cpu']\n _time = _sdict['time'] + sdict['time']\n _rsize = _sdict['rsize'] + sdict['rsize']\n _wsize = _sdict['wsize'] + sdict['wsize']\n ndict = {'cpu':_cpu, 'time':_time, 'rsize':_rsize, 'wsize':_wsize}\n out[host][site] = ndict\n count += 1\n\t# simple printout of summary info\n for host, hdict in out.items():\n print(host)\n for site, sdict in hdict.items():\n print('')\n print(site)\n for key, val in sdict.items():\n print('%s: %s' % (key, val))\n return out\n","sub_path":"src/python/WMArchive/PySpark/RecordAggregator.py","file_name":"RecordAggregator.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"445270722","text":"'''\nCreated on 2018年7月20日\n\n@author: cloud\n'''\nimport time,logging\nfrom helper import elements\nfrom helper import ActionResult\nfrom cases.common import restart_app\nimport settings\n\nlogger = logging.getLogger()\n\ndef signup_with_email(driver,data):\n elements.MoreOptions(driver).click()\n time.sleep(1)\n elements.SignWithEmail(driver).click()\n elements.SignEmailInput(driver).input_text(data[\"email\"])\n pwdinput = elements.SignPasswdInput(driver)\n pwdinput.input_text(data[\"passwd\"])\n# driver.back()\n elements.SignWithEmailNextButton(driver).click()\n elements.SignNameSettingInput(driver).input_text(data[\"name\"])\n elements.SignBirthdayInput(driver).click()\n elements.SignBirthdayInputDone(driver).click()\n if data[\"gender\"] == \"male\":\n elements.SignMaleCheckbox(driver).click()\n else:\n elements.SignFemaleCheckbox(driver).click()\n elements.SignConfirm(driver).click()\n\n ret = False\n if elements.MatchHomeButton(driver) or \\\n elements.GenderFilterPopupClose(driver) or \\\n elements.NewUserGiftPopupPurchase(driver) or \\\n elements.NewUserGiftPopupRatingBar(driver):\n ret = True\n \n return ActionResult(ret,\"sign with email\")\n\ndef login_with_email(driver,data):\n elements.MoreOptions(driver).click()\n elements.LoginWithEmail(driver).click()\n elements.SignEmailInput(driver).input_text(data[\"email\"])\n elements.SignPasswdInput(driver).input_text(data[\"passwd\"])\n# driver.back()\n elements.LoginWithEmailConfirm(driver).click()\n \n time.sleep(3)\n ret = False\n if elements.MatchHomeButton(driver) or \\\n elements.GenderFilterPopupClose(driver) or \\\n elements.NewUserGiftPopupPurchase(driver) or \\\n elements.NewUserGiftPopupRatingBar(driver)or\\\n elements.NewUserGuideClose(driver) or\\\n elements.PermissionAllowButton(driver):\n ret = True\n \n return ActionResult(ret,\"login with email\")\n\ndef logout_account(driver):\n elements.UserHomeButton(driver).click()\n elements.UserSetting(driver).click()\n elements.UserSettingLogout(driver).click()\n elements.EnsureButton(driver).click()\n \n return ActionResult(bool(elements.SignWithFacebook(driver)),\"logout account\")\n\ndef delete_account(driver):\n elements.UserHomeButton(driver).click()\n elements.UserSetting(driver).click()\n ele = elements.UserSettingPageBody(driver)\n ele_loction = ele.location\n ele_size = ele.size\n driver.swipe(ele_loction[\"x\"]+ele_size[\"width\"]//2,ele_loction[\"y\"]+ele_size['height']\\\n ,ele_loction[\"x\"]+ele_size[\"width\"]//2,ele_loction[\"y\"],500)\n \n if elements.UserSettingDeleteAccountButton(driver):\n elements.UserSettingDeleteAccountButton(driver).click()\n if elements.DeleteAccountReasonPage(driver):\n elements.DeleteAccountReasonHowToUse(driver).click()\n elements.DeleteAccountConfirm(driver).click()\n elements.DeleteAccountUnderstandExplanationCheckbox(driver).click()\n elements.DeleteAccountConfirm(driver).click()\n elements.EnsureButton(driver).click()\n \n ret = elements.SignWithFacebook(driver)\n \n return ActionResult(ret,\"delete account\")\n\ndef new_user_guide_check(driver):\n time.sleep(2)\n ret = False\n if elements.NewUserGuideClose(driver):\n elements.NewUserGuideClose(driver).click()\n elements.NewUserGuideMatchTips(driver).click()\n driver.back()\n elements.MatchStopEnsureButton(driver,2,0.2).click()\n \n ret = True\n else:\n ret = elements.MatchHomeButton(driver)\n time.sleep(3)\n return ActionResult(ret,\"new user guide check\")\n\ndef new_user_popup_handle(driver):\n for _ in range(2):\n if elements.MatchHomeButton(driver):\n break\n if elements.NewUserGiftPopupPurchase(driver):\n elements.NewUserGiftPopupPurchaseClose(driver).click()\n elif elements.NewUserGiftPopupRatingBar(driver):\n elements.NewUserGiftPopupRatingBar(driver).click()\n elements.NewUserGiftPopupRatingCancel(driver).click()\n elif elements.GenderFilterPopupClose(driver):\n elements.GenderFilterPopupClose(driver).click()\n\n return ActionResult(elements.MatchHomeButton(driver), \"new user popup handle\")\n\ndef new_user_popup_test(driver):\n# signup_with_email(driver,data)\n# new_user_guide_check(driver)\n popup_result = []\n for i in range(1,101):\n restart_app(driver)\n time.sleep(3)\n if elements.MatchHomeButton(driver):\n logger.info(\"no popup element\")\n continue\n for _ in range(3):\n no_popup = False\n for ele_class in [elements.NewUserGiftPopupPurchase,elements.NewUserGiftPopupRatingBar,elements.GenderFilterPopupClose]:\n ele_instance = ele_class(driver)\n if ele_instance:\n logger.info(\"[%d] --> (%s)\"%(i,ele_class(driver).ele_name))\n popup_result.append((i,ele_class(driver).ele_name))\n # close Popup\n if isinstance(ele_instance, elements.NewUserGiftPopupPurchase):\n elements.NewUserGiftPopupPurchaseClose(driver).click()\n elif isinstance(ele_instance, elements.NewUserGiftPopupRatingBar):\n elements.NewUserGiftPopupRatingBar(driver).click()\n elements.NewUserGiftPopupRatingCancel(driver).click()\n elif isinstance(ele_instance, elements.GenderFilterPopupClose):\n elements.GenderFilterPopupClose(driver).click()\n no_popup = elements.MatchHomeButton(driver)\n if no_popup:\n break\n if no_popup:\n break\n# logger.info(\"no popup element\")\n logger.info(str(popup_result))\n return ActionResult(True, \"new user popup test\")\n\ndef change_user_channel(userid,userchannel):\n channelmap = {\n \"默认\":0,\n \"自然渠道\":1,\n \"install\":2,\n \"AEO or InApp\":3\n }\n \n if userchannel not in channelmap:\n logger.warning(\"invalid user channel [%s],will use default.\"%userchannel)\n \n data = {\n \"userId\":userid,\n \"channelId\":channelmap[userchannel] if userchannel in channelmap else 0\n }\n req = settings.http_client.request(\"POST\",settings.template.update_channel_url.render(server=settings.server),fields=data)\n \n return ActionResult(req.status == 200, \"change user channel [%s]\"%req.status)\n \nif __name__ == \"__main__\":\n change_user_channel(\"7793367\", \"默认\")\n# pass\n \n\n \n ","sub_path":"cases/sign.py","file_name":"sign.py","file_ext":"py","file_size_in_byte":6709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"237939508","text":"import os\nimport codecs\n\nDICT_FILENAME = \"house.data\"\n\ndef listfiles(rootdir, prefix=\".html\"):\n file = []\n for parent, dirnames, filenames in os.walk(rootdir):\n if parent == rootdir:\n for filename in filenames:\n if filename.endswith(prefix):\n file.append(filename)\n return file\n else:\n pass\n\n \ndef rmfiles(rootdir):\n for parent, dirnames, filenames in os.walk(rootdir):\n if parent == rootdir:\n for filename in filenames:\n os.remove(rootdir + filename)\n else:\n pass \n\ndef create_file():\n with open(DICT_FILENAME, 'a+') as f:\n print('create file success')\n \ndef read_file_lines(filepath):\n content = ''\n file = codecs.open(filepath, 'r', 'utf-8')\n lines = [line.strip() for line in file]\n if len(lines) > 0:\n for line in lines:\n content += line\n file.close()\n return content\n\n \ndef write_data(data):\n with open(DICT_FILENAME, 'a+') as f:\n f.write(data)\n f.write('\\n')\n f.flush()\n \ndef close():\n f.close()\n\ndef init():\n create_file()\n \ninit()","sub_path":"source/filetool.py","file_name":"filetool.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"449663645","text":"#1.del根据索引值删除元素\nlang = [\"Python\", \"C++\", \"Java\", \"PHP\", \"Ruby\", \"MATLAB\"]\n#使用正数索引\ndel lang[2]\nprint(lang)\n#使用负数索引\ndel lang[-2]\nprint(lang)\n\nlang = [\"Python\", \"C++\", \"Java\", \"PHP\", \"Ruby\", \"MATLAB\"]\ndel lang[1: 4]\nprint(lang)\nlang.extend([\"SQL\", \"C#\", \"Go\"])\ndel lang[-5: -2]\nprint(lang)\n\n#2.pop()根据索引值删除元素\nnums = [40, 36, 89, 2, 36, 100, 7]\nnums.pop(3)\nprint(nums)\nnums.pop()\nprint(nums)\n\n#3.remove()根据元素值进行删除\nnums = [40, 36, 89, 2, 36, 100, 7]\n#第一次删除36\nnums.remove(36)\nprint(nums)\n#第二次删除36\nnums.remove(36)\nprint(nums)\n#删除78\nif 78 in nums:\n nums.remove(78)\nprint(nums)\n\n#4.clear()删除列表所有元素\nurl = list(\"http://c.biancheng.net/python/\")\nurl.clear()\nprint(url)","sub_path":"c语言中文网/py/4.列表,元组,字典和集合/4.4py_list列表删除元素/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"494298917","text":"import re\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fin:\n long_description = fin.read()\n\nlong_description = re.sub(\n \"^(!\\[.*\\]\\()(.*\\))\",\n lambda m: m.group(1) + \"https://github.com/davidalber/geneagrapher/raw/master/\" + m.group(2),\n long_description,\n flags=re.MULTILINE\n)\n\nsetuptools.setup(\n name=\"geneagrapher\",\n version=\"1.0\",\n author=\"David Alber\",\n author_email=\"alber.david@gmail.com\",\n description=\"Mathematical genealogy grapher.\",\n entry_points={\n 'console_scripts':\n ['ggrapher=geneagrapher.geneagrapher:ggrapher']\n },\n install_requires=['beautifulsoup4==4.6.3', 'lxml==4.2.5'],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/davidalber/geneagrapher\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 2.7\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n package_data={'tests': ['geneagrapher/testdata/*.html']},\n include_package_data=True,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"312361354","text":"# -*- coding: cp1250 -*-\r\nimport pandas as pd\r\nimport scipy.stats\r\nfrom collections import OrderedDict\r\n\r\n\r\nclass StatsExecutor:\r\n \"\"\"Perform many tests on one data at once.\r\n\r\n Inputs are path of file, separator and header (0-no header, 1-header present).\r\n To print out summary, type analysis_name.print_results().\r\n To write html or txt summary, type type analysis_name.write_txt() or analysis_name.write_tho()\r\n \"\"\"\r\n def __init__(self, filepath, sep=\",\", header=0):\r\n \"\"\"Initializes file, generates data structure and finally automatically runs tests\"\"\"\r\n self.data = pd.read_csv(filepath, sep=sep, header=header) #wczytanie danych z pomocą pandas\r\n\r\n self.summary = self._summarize() #statystyki opisowe dodane do obiektu\r\n self.headers_of_numeric_columns = list(self.summary) #daje naglowki z kolumn podsumowania ktore automatycznie oblicza je tylko dla kolumn numerycznych\r\n \r\n self.results = OrderedDict() #tworzy liste wynikowa\r\n self._prepare_results_storage() #dodaje do listy wynikowej listy i slowniki z testami\r\n self._make_common_tests_set() #uruchamia wszystkie testy, mozna zahashowac \r\n\r\n def _summarize(self):\r\n \"\"\"Performs basic descriptive statistics using pandas\"\"\"\r\n return self.data.describe()# korzysta z wbudowanej metody dla obiektu csv od pandas dla statystyk opisowych\r\n\r\n def _prepare_results_storage(self):\r\n \"\"\"Prepares containers for results\"\"\"\r\n self.results[\"Shapiro\"] = []\r\n self.results[\"Chisq\"] = []\r\n self.results[\"Spearman\"] = self.prepare_complex_multilayer_container()\r\n self.results[\"Anova\"] = self.prepare_complex_multilayer_container()\r\n self.results[\"Ttest\"] = self.prepare_complex_multilayer_container()\r\n self.results[\"Pearson\"] = self.prepare_complex_multilayer_container()\r\n self.results[\"Kruskal\"] = self.prepare_complex_multilayer_container()\r\n\r\n def prepare_complex_multilayer_container(self):\r\n \"\"\"Prepares container for tests that compare different columns, requires \"\"\"\r\n container_for_each_header = {name: {} for name in self.headers_of_numeric_columns}\r\n return container_for_each_header\r\n\r\n def _make_common_tests_set(self):\r\n \"\"\"Automaticaly creates common tests for data, data, headers and summary need to be initialized \"\"\"\r\n headers_copy = self.headers_of_numeric_columns[:] #kopije nagłówki na potrzeby metody\r\n for name in self.headers_of_numeric_columns: \r\n self.results[\"Shapiro\"].append(scipy.stats.shapiro(self.data[name]))\r\n self.results[\"Chisq\"].append(scipy.stats.chisquare(self.data[name]))\r\n headers_copy.remove(name)\r\n for name2 in headers_copy:\r\n self.results[\"Spearman\"][name][name2] = (scipy.stats.spearmanr(self.data[name], self.data[name2]))\r\n self.results[\"Anova\"][name][name2] = scipy.stats.f_oneway(self.data[name], self.data[name2])\r\n self.results[\"Ttest\"][name][name2] = scipy.stats.ttest_rel(self.data[name], self.data[name2])\r\n self.results[\"Pearson\"][name][name2] = scipy.stats.pearsonr(self.data[name], self.data[name2])\r\n self.results[\"Kruskal\"][name][name2] = scipy.stats.kruskal(self.data[name], self.data[name2])\r\n\r\n def print_results(self):\r\n \"\"\"Main method for printing analysis results in console. Obviously requires launching analysis first\"\"\"\r\n self.print_summary() \r\n for test_name in self.results:\r\n self._print_section(test_name)\r\n if isinstance(self.results[test_name], dict): #sprawdzenie czy wyniki sa slownikiem, jak nie to znak ze jest to slownik i trzeba uzyc innej prezentacji danych\r\n for header1 in self.results[test_name]:\r\n for header2 in self.results[test_name][header1]:\r\n print(header1, header2, \"\\n \\t\",\r\n \"test-statistics \", self.results[test_name][header1][header2][0],\r\n \"\\n \\t\",\r\n \"p-value \", self.results[test_name][header1][header2][1],\r\n \"\\n\")\r\n else:\r\n for result in zip(self.headers_of_numeric_columns, self.results[test_name]): #iteracja po zzipowanej krotce naglowkow i wynikow\r\n print(\r\n \"{0} : \\n \\t p-value {1} \\n \\t test-statistics {2}\".format(result[0], result[1][0], result[1][1]))\r\n print(\"\\n\" * 3)\r\n\r\n def print_summary(self):\r\n \"\"\"Prints out summary table\"\"\"\r\n self._print_section(\"SUMMARY\")\r\n print(self.summary)\r\n print(\"\\n\" * 3)\r\n\r\n def write_txt(self,name=\"result.txt\"):\r\n \"\"\"Writes summary to text file, input is filename\"\"\"\r\n self.report_file=open(name,\"w\")\r\n self.write_summary()\r\n for test_name in self.results:\r\n self._write_section(test_name)\r\n if isinstance(self.results[test_name], dict):\r\n for header1 in self.results[test_name]:\r\n for header2 in self.results[test_name][header1]:\r\n \r\n self.report_file.write(\"\\n\")\r\n self.report_file.write(header1 + \"-\"+ header2)\r\n self.report_file.write(\"\\n \\t test-statistics: \")\r\n self.report_file.write(str(self.results[test_name][header1][header2][0]))\r\n self.report_file.write(\"\\n \\t p value: \")\r\n self.report_file.write(str(self.results[test_name][header1][header2][1]))\r\n self.report_file.write(\"\\n\")\r\n\r\n else:\r\n for result in zip(self.headers_of_numeric_columns, self.results[test_name]):\r\n self.report_file.write(str(\r\n \"\\n {0} : \\n \\t test-statistics {1} \\n \\t p value {2}\".format(result[0], result[1][0], result[1][1])))\r\n self.report_file.write(\"\\n\" * 3)\r\n self.report_file.close()\r\n \r\n def write_summary(self):\r\n \"\"\"Writes summary for write_txt method\"\"\"\r\n self.report_file.write(\"SUMMARY\\n\")\r\n self.report_file.write(str(self.summary))\r\n self.report_file.write(\"\\n\" * 3) \r\n\r\n @staticmethod\r\n def _print_section(section_name):\r\n \"\"\"Prints current name of section and lots of stars for neat look\"\"\"\r\n print(\"-\" * 50)\r\n print(section_name)\r\n print(\"-\" * 50)\r\n\r\n def _write_section(self,section_name):\r\n \"\"\"Writes to file name of current section\"\"\"\r\n self.report_file.write(\"-\" * 50)\r\n self.report_file.write(section_name)\r\n self.report_file.write(\"-\" * 50+\"\\n\")\r\n\r\n def write_html(self,name=\"result.html\"):\r\n \"\"\"Main method for creating html formatted results\"\"\"\r\n self.report_file_html=open(name,\"w\")\r\n self.write_summary_html()\r\n for test_name in self.results:\r\n self._write_section_html(test_name)\r\n if isinstance(self.results[test_name], dict):\r\n for header1 in self.results[test_name]:\r\n for header2 in self.results[test_name][header1]:\r\n self.report_file_html.write(\"

\\n\")\r\n self.report_file_html.write(\"\\n\")\r\n self.report_file_html.write(header1 + \"-\"+ header2)\r\n self.report_file_html.write(\"
\\n \\t test-statistics: \")\r\n self.report_file_html.write(str(self.results[test_name][header1][header2][0]))\r\n self.report_file_html.write(\"
\\n \\t p value:\")\r\n self.report_file_html.write(str(self.results[test_name][header1][header2][1]))\r\n self.report_file_html.write(\"
\\n\")\r\n self.report_file_html.write(\"

\\n\")\r\n\r\n else:\r\n for result in zip(self.headers_of_numeric_columns, self.results[test_name]):\r\n self.report_file_html.write(str(\r\n \"\\n

\\n {0} :
\\n \\t test-statistics {1}
\\n \\t p value {2}
\\n

\".format(result[0], result[1][0], result[1][1])))\r\n self.report_file_html.write(\"\\n\\n\")\r\n self.report_file_html.write(\"\\n\\n\\n\")\r\n self.report_file_html.close()\r\n \r\n def write_summary_html(self):\r\n \"\"\"Generates html header and formatted summary table\"\"\"\r\n self.report_file_html.write(str(\"\\n\\n\\t\\nAnalysis results\\n\\n\\n
\\n
\\n\"))\r\n self.report_file_html.write(\"

SUMMARY

\\n
\\n\")\r\n self.report_file_html.write(self.summary.to_html())\r\n self.report_file_html.write(\"\\n
\\n\") \r\n \r\n def _write_section_html(self,section_name):\r\n \"\"\"Generate secion name\"\"\"\r\n self.report_file_html.write(\"

\")\r\n self.report_file_html.write(section_name)\r\n self.report_file_html.write(\"

\\n\")\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n \"\"\"This part of program launches only when the file is directly executed\"\"\"\r\n tests = StatsExecutor(\"data.csv\", sep=\",\", header=0)\r\n #tests.print_results()\r\n #tests.write_txt()\r\n tests.write_html()\r\n","sub_path":"doc/source/kpstat.py","file_name":"kpstat.py","file_ext":"py","file_size_in_byte":9449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"302269413","text":"from fractions import gcd\na, b = map(int, input().split())\nn = gcd(a, b)\n\ndivisors = []\nfor i in range(1, int(n**0.5)+1):\n if n%i == 0:\n divisors.append(i)\n if i!=n // i:\n divisors.append(n//i)\n\ndivisors.sort()\ncnt = 0\nnum = 1\nfor i in range(len(divisors)):\n if num == 1:\n cnt += 1\n num *= divisors[i]\n elif gcd(num, divisors[i]) == 1:\n cnt += 1\n num *= divisors[i]\n\nprint(cnt)\n","sub_path":"142/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"120537480","text":"from django import forms\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nimport itertools\nfrom markdownx.fields import MarkdownxFormField\nfrom time_sections.models import Section\nfrom ublog.models import Post\nfrom ublog.signals import post_published\n\nFIELDS = [\n \"title\",\n \"teaser\",\n \"content\",\n \"state\",\n]\n\n\nclass PostForm(forms.ModelForm):\n\n title = forms.CharField(\n max_length=90,\n widget=forms.TextInput(attrs={\"class\": \"form-control\"}),\n )\n teaser = MarkdownxFormField()\n content = MarkdownxFormField()\n\n class Meta:\n model = Post\n fields = FIELDS\n\n def __init__(self, *args, **kwargs):\n super(PostForm, self).__init__(*args, **kwargs)\n\n post = self.instance\n\n def save(self):\n published = False\n post = super(PostForm, self).save(commit=False)\n\n if post.pk is None or Post.objects.filter(pk=post.pk, published=None).count():\n if self.cleaned_data[\"state\"] == Post.STATE_CHOICES[-1][0]:\n post.published = timezone.now()\n published = True\n\n max_length = Post._meta.get_field('slug').max_length\n post.slug = orig = slugify(post.title)[:max_length]\n\n for x in itertools.count(1):\n if not Post.objects.filter(slug=post.slug).exists():\n break\n\n # Truncate the original slug dynamically. Minus 1 for the hyphen.\n post.slug = \"%d-%s\" % (x, orig[:max_length - len(str(x)) - 1])\n\n post.teaser_md = self.cleaned_data[\"teaser\"]\n post.content_md = self.cleaned_data[\"content\"]\n post.updated = timezone.now()\n post.save()\n\n if published:\n post_published.send(sender=Post, post=post)\n\n return post\n","sub_path":"ublog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"212554668","text":"import xarray as xr\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.dates import DateFormatter\nimport pandas as pd\nimport numpy as np\nimport openaq\nfrom datetime import datetime\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\nemission='no2'\n\nfig=plt.figure(figsize=[20,20])\nfig,ax=plt.subplots(2,2,figsize=[8,8])\n \nprint(emission)\nenv_list=['AURN','Background Urban','Background Rural','Traffic Urban','Industrial urban','Industrial Suburban','background Suburban']\nenv_no=4\n\nif emission == 'o3':\n env_list=['AURN','Background Urban','Background Rural','Traffic Urban']\n env_no=4\n\n\nweek='fullweek'\n\nseason='2019'\n\nif season == 'winter':\n date1='2019-01-01'\n date2='2019-03-19'\nif season == 'spring':\n date1='2019-03-20'\n date2='2019-06-20'\nif season == 'summer':\n date1='2019-06-21'\n date2='2019-09-22'\nif season == 'autumn':\n date1='2019-09-23'\n date2='2019-12-20'\nif season == '2019':\n date1='2019-01-01'\n date2='2019-12-31'\n\nif emission == 'no2':\n conv=1.88*10**9\n nasa_emission='no2'\n Emission=r'$NO_2$'\n\nif emission == 'no':\n conv=1.23*10**9\n nasa_emission='no'\n Emission='NO'\n\nif emission == 'pm25':\n conv=1\n nasa_emission='pm25_rh35_gcc'\n Emission=r'$PM_{2.5}$'\n\nif emission == 'o3':\n conv=2*10**9\n nasa_emission='o3'\n Emission=r'$O_3$'\n\nif week == 'fullweek':\n day1=0\n day2=6\nif week == 'weekday':\n day1=0\n day2=4\nif week == 'weekend':\n day1=5\n day2=6\n\ndef rmse(predictions, targets):\n return np.sqrt(((predictions-targets)**2).mean())\n\n\nfor e in range(env_no):\n env_type=env_list[e]\n print(env_type)\n if env_type == 'AURN':\n env_type=' '\n\n metadata_csv='/users/mtj507/scratch/defra_data/defra_site_metadata.csv'\n metadata=pd.read_csv(metadata_csv, low_memory=False)\n metadata=metadata.loc[metadata['Environment Type'].str.contains(env_type)]\n metadata=metadata.reset_index(drop=False)\n area=metadata['Zone']\n location=metadata['Site Name']\n latitude=metadata['Latitude']\n longitude=metadata['Longitude']\n environment=metadata['Environment Type']\n no_locations=len(metadata.index)\n a=location\n\n defra_csv='/users/mtj507/scratch/defra_data/'+emission+'_2019.csv'\n ddf=pd.read_csv(defra_csv, low_memory=False)\n ddf.loc[ddf['Time'] == '00:00:00','Time']='24:00:00'\n ddf.index=pd.to_datetime(ddf['Date'], dayfirst=True)+pd.to_timedelta(ddf['Time'])\n ddf=ddf.loc[:, ~ddf.columns.str.contains('^Unnamed')]\n ddf=ddf.dropna(axis=0)\n ddf=ddf.replace('No data', np.nan)\n ddf['hour']=ddf.index.hour\n ddf['weekday']=ddf.index.weekday\n ddf['month']=ddf.index.month.astype(str)\n ddf['month']=ddf['month'].str.zfill(2)\n ddf['day']=ddf.index.day.astype(str)\n ddf['day']=ddf['day'].str.zfill(2)\n ddf['day and month']=ddf['month']+ddf['day']\n\n ddf=ddf.loc[date1:date2]\n ddf=ddf.loc[(ddf['weekday'] >= day1) & (ddf['weekday'] <= day2)]\n\n b=ddf.columns\n headers=set(a).intersection(b)\n\n ddf=ddf.loc[:,headers]\n ddf['hour']=ddf.index.hour\n ddf=ddf.astype(float)\n ddf_median=ddf.groupby('hour').median()\n ddf_median['median']=ddf_median.mean(axis=1)\n ddf_Q1=ddf.groupby('hour').quantile(0.25)\n ddf_Q1['Q1']=ddf_Q1.mean(axis=1)\n ddf_Q3=ddf.groupby('hour').quantile(0.75)\n ddf_Q3['Q3']=ddf_Q3.mean(axis=1)\n \n ax.ravel()[e].plot(ddf_median.index,ddf_median['median'],label='Observation',color='dimgrey')\n ax.ravel()[e].fill_between(ddf_median.index,ddf_Q1['Q1'],ddf_Q3['Q3'],alpha=0.5,facecolor='dimgrey',edgecolor='grey')\n\n\n f='/users/mtj507/scratch/nasa_assimilations/2019_assimilation.nc'\n ds=xr.open_dataset(f)\n df_model=pd.DataFrame(index=pd.to_datetime(ds.time.data))\n for i in np.arange(0,no_locations):\n spec=ds[nasa_emission].data\n lats=ds['lat'].data\n lons=ds['lon'].data\n model_lat=np.argmin(np.abs(latitude[i]-lats))\n model_lon=np.argmin(np.abs(longitude[i]-lons))\n df_model[location[i]]=ds[nasa_emission].data[:,0,model_lat, model_lon]\n df_model[location[i]]=df_model[location[i]]*conv\n \n df_model['hour']=df_model.index.hour\n df_median=df_model.groupby('hour').median()\n df_median['median']=df_median.mean(axis=1)\n df_Q1=df_model.groupby('hour').quantile(0.25)\n df_Q1['Q1']=df_Q1.mean(axis=1)\n df_Q3=df_model.groupby('hour').quantile(0.75)\n df_Q3['Q3']=df_Q3.mean(axis=1)\n\n ax.ravel()[e].plot(df_median.index,df_median['median'],label='Model',color='green')\n ax.ravel()[e].fill_between(df_median.index,df_Q1['Q1'],df_Q3['Q3'],alpha=0.5,facecolor='limegreen',edgecolor='forestgreen')\n\n if env_type == ' ':\n env_type='AURN'\n\n if e == 0 or e == 2:\n ax.ravel()[e].set_ylabel(Emission+r'($\\mu g\\: m^{-3}$)')\n if e == 2 or e == 3:\n ax.ravel()[e].set_xlabel('Time of Day (hour)')\n if e == 1 or e == 3:\n ax.ravel()[e].set_ylabel('')\n if e == 0 or e == 1:\n ax.ravel()[e].set_xlabel('')\n if e == 1:\n ax.ravel()[e].legend(fontsize='small')\n ax.ravel()[e].set_title(env_type)\n\n if emission == 'o3' and e == 3:\n ax.ravel()[e].set_visible(False)\n\n mod_mean=df_median['median'].mean()\n mod_mean=str(round(mod_mean,2))\n print('mod mean = '+mod_mean)\n\n obs_mean=ddf_median['median'].mean()\n obs_mean=str(round(obs_mean,2))\n print('obs mean = '+obs_mean)\n \n rmse_val=rmse(df_median['median'],ddf_median['median'])\n rmse_txt=str(round(rmse_val,2))\n print('RMSE = '+rmse_txt)\n\nfig.tight_layout()\npath='/users/mtj507/scratch//obs_vs_forecast/assimilation_scripts/plots/whole_year/'+emission+'/'\nplt.savefig(path+emission+'_diurnal_'+season+'_'+week+'.png')\nplt.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"assimilation_scripts/diurnal_plot.py","file_name":"diurnal_plot.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"290887947","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 9 09:22:54 2018\n\n@author: lenovo\n\n板块涨幅统计\n\n\"\"\"\n\nimport os\nimport sys\nimport re\nimport datetime\nfrom configobj import ConfigObj\nimport sqlite3\nimport numpy as np\nimport pandas as pd\nimport xlwings as xw\nimport struct\nimport winreg\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom pyquery import PyQuery as pq\nimport time\nimport tushare as ts\nfrom urllib import request\nimport zipfile\nfrom bs4 import BeautifulSoup as bs\nimport dateutil.parser\nimport xlrd\nimport subprocess\n\n\n########################################################################\n#初始化本程序配置文件\n########################################################################\ndef iniconfig():\n inifile = os.path.splitext(sys.argv[0])[0]+'.ini' #设置缺省配置文件\n return ConfigObj(inifile,encoding='GBK')\n\n\n#########################################################################\n#读取键值,如果键值不存在,就设置为defvl\n#########################################################################\ndef readkey(config,key,defvl=None):\n keys = config.keys()\n if defvl==None :\n if keys.count(key) :\n return config[key]\n else :\n return \"\"\n else :\n if not keys.count(key) :\n config[key] = defvl\n config.write()\n return defvl\n else:\n return config[key]\n\n\n###############################################################################\n#长股票代码\n###############################################################################\ndef lgpdm(dm):\n return dm[:6]+('.SH' if dm[0]=='6' else '.SZ')\n\n###############################################################################\n#短股票代码\n###############################################################################\ndef sgpdm(dm):\n return dm[:6]\n\n########################################################################\n#获取本机通达信安装目录,生成自定义板块保存目录\n########################################################################\ndef gettdxdir():\n\n try :\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,r\"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\华西证券华彩人生\")\n value, type = winreg.QueryValueEx(key, \"InstallLocation\")\n except :\n print(\"本机未安装【华西证券华彩人生】软件系统。\")\n sys.exit()\n return value\n\n########################################################################\n#获取本机通达信安装目录,生成自定义板块保存目录\n########################################################################\ndef gettdxblkdir():\n try :\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,r\"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\华西证券华彩人生\")\n value, type = winreg.QueryValueEx(key, \"InstallLocation\")\n return value + '\\\\T0002\\\\blocknew'\n except :\n print(\"本机未安装【华西证券华彩人生】软件系统。\")\n sys.exit()\n\n###############################################################################\n#从通达信系统读取股票代码表\n###############################################################################\ndef get_gpdm():\n datacode = []\n for sc in ('h','z'):\n fn = gettdxdir()+'\\\\T0002\\\\hq_cache\\\\s'+sc+'m.tnf'\n f = open(fn,'rb')\n f.seek(50)\n ss = f.read(314)\n while len(ss)>0:\n gpdm=ss[0:6].decode('GBK')\n gpmc=ss[23:31].strip(b'\\x00').decode('GBK').replace(' ','').replace('*','')\n gppy=ss[285:291].strip(b'\\x00').decode('GBK')\n #剔除非A股代码\n if (sc==\"h\" and gpdm[0]=='6') :\n gpdm=gpdm+'.SH'\n datacode.append([gpdm,gpmc,gppy])\n if (sc=='z' and (gpdm[0:2]=='00' or gpdm[0:2]=='30')) :\n gpdm=gpdm+'.SZ'\n datacode.append([gpdm,gpmc,gppy])\n ss = f.read(314)\n f.close()\n gpdmb=pd.DataFrame(datacode,columns=['gpdm','gpmc','gppy'])\n gpdmb['dm']=gpdmb['gpdm'].map(lambda x:x[:6])\n gpdmb=gpdmb.set_index('gpdm',drop=False)\n return gpdmb\n\n########################################################################\n#获取本机通达信安装目录,生成自定义板块保存目录\n########################################################################\ndef gettdxblk(lb):\n\n try :\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,r\"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\华西证券华彩人生\")\n value, type = winreg.QueryValueEx(key, \"InstallLocation\")\n except :\n print(\"本机未安装【华西证券华彩人生】软件系统。\")\n sys.exit()\n\n blkfn = value + '\\\\T0002\\\\hq_cache\\\\block_'+lb+'.dat'\n blk = {}\n with open(blkfn,'rb') as f :\n blknum, = struct.unpack('384xH', f.read(386))\n for i in range(blknum) :\n stk = []\n blkname = f.read(9).strip(b'\\x00').decode('GBK')\n stnum, = struct.unpack('H2x', f.read(4))\n for j in range(stnum) :\n stkid = f.read(7).strip(b'\\x00').decode('GBK')\n stk.append(stkid)\n blk[blkname] = [blkname,stnum,stk]\n\n f.read((400-stnum)*7)\n \n f.close()\n\n\n return blk\n\n#############################################################################\n#股票列表,通达信板块文件调用时wjtype=\"tdxbk\"\n#############################################################################\ndef zxglist(zxgfn,wjtype=\"\"):\n zxglst = []\n p = \"(\\d{6})\"\n if wjtype == \"tdxblk\" :\n p =\"\\d(\\d{6})\"\n if os.path.exists(zxgfn) :\n #用二进制方式打开再转成字符串,可以避免直接打开转换出错\n with open(zxgfn,'rb') as dtf:\n zxg = dtf.read()\n if zxg[:3] == b'\\xef\\xbb\\xbf' :\n zxg = zxg.decode('UTF8','ignore') #UTF-8\n elif zxg[:2] == b'\\xfe\\xff' :\n zxg = zxg.decode('UTF-16','ignore') #Unicode big endian\n elif zxg[:2] == b'\\xff\\xfe' :\n zxg = zxg.decode('UTF-16','ignore') #Unicode\n else :\n zxg = zxg.decode('GBK','ignore') #ansi编码\n zxglst =re.findall(p,zxg)\n else:\n print(\"文件%s不存在!\" % zxgfn)\n if len(zxglst)==0:\n print(\"股票列表为空,请检查%s文件。\" % zxgfn)\n\n zxg = list(set(zxglst))\n zxg.sort(key=zxglst.index)\n\n return zxg\n\n#############################################################################\n#通达信自选股A股列表,去掉了指数代码\n############################################################################# \ndef zxglst(zxgfile=None):\n\n if zxgfile==None:\n zxgfile=\"zxg.blk\"\n else:\n if '.blk' not in zxgfile:\n zxgfile=zxgfile+'.blk'\n \n tdxblkdir = gettdxblkdir()\n zxgfile = os.path.join(tdxblkdir,zxgfile)\n if not os.path.exists(zxgfile):\n print(\"板块不存在,请检查!\")\n return pd.DataFrame()\n \n zxg = zxglist(zxgfile,\"tdxblk\")\n \n gpdmb=get_gpdm()\n \n #去掉指数代码只保留A股代码\n zxglb=gpdmb.loc[gpdmb['dm'].isin(zxg),:]\n #增加一列\n #http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.assign.html\n zxglb=zxglb.assign(no=zxglb['dm'].map(lambda x:zxg.index(x)+1))\n\n zxglb=zxglb.set_index('no') \n zxglb=zxglb.sort_index() \n return zxglb\n\n\n##########################################################################\n#获取运行程序所在驱动器\n##########################################################################\ndef getdrive():\n if sys.argv[0]=='' :\n return os.path.splitdrive(os.getcwd())[0]\n else:\n return os.path.splitdrive(sys.argv[0])[0]\n\n\n\n#############################################################################\n#读取中证行业分类 \n#############################################################################\ndef get_zzhybk():\n\n files = os.listdir(getdrive()+'\\\\syl')\n fs = [re.findall('csi(\\d{8})\\.xls',e) for e in files]\n jyrlist =[]\n for e in fs:\n if len(e)>0:\n file = getdrive()+'\\\\syl\\\\csi'+e[0]+'.xls'\n #剔除长度为0的文件\n if os.path.getsize(file)>0: \n jyrlist.append(e[0])\n\n jyrlist=sorted(jyrlist,reverse=1)\n file = getdrive()+'\\\\syl\\\\csi'+jyrlist[0]+'.xls'\n wb = xlrd.open_workbook(file,encoding_override=\"cp1252\")\n table = wb.sheet_by_name('个股数据')\n nrows = table.nrows #行数\n\n data = []\n for rownum in range(1,nrows):\n row = table.row_values(rownum)\n data.append([lgpdm(row[0]),row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9]])\n \n cols=['ts_code','gpmc','zz_hy1dm','zz_hy1mc','zz_hy2dm','zz_hy2mc','zz_hy3dm','zz_hy3mc','zz_hy4dm','zz_hy4mc'] \n df=pd.DataFrame(data,columns=cols)\n \n blks1={}\n blks2={}\n blks3={}\n blks4={}\n for index, row in df.iterrows():\n key1='%s%s' % (row[2],row[3])\n if key1 in blks1.keys():\n blks1[key1][2].append(row[0])\n blks1[key1][1]=len(blks1[key1][2])\n else:\n blks1[key1]=[key1,1,[row[0]]]\n \n key2='%s%s' % (row[4],row[5])\n if key2 in blks2.keys():\n blks2[key2][2].append(row[0])\n blks2[key2][1]=len(blks2[key2][2])\n else:\n blks2[key2]=[key2,1,[row[0]]]\n \n key3='%s%s' % (row[6],row[7])\n if key3 in blks3.keys():\n blks3[key3][2].append(row[0])\n blks3[key3][1]=len(blks3[key3][2])\n else:\n blks3[key3]=[key3,1,[row[0]]]\n\n key4='%s%s' % (row[8],row[9])\n if key4 in blks4.keys():\n blks4[key4][2].append(row[0])\n blks4[key4][1]=len(blks4[key4][2])\n else:\n blks4[key4]=[key4,1,[row[0]]]\n\n return [blks1,blks2,blks3,blks4]\n\n#############################################################################\n#读取证监会行业分类\n#############################################################################\ndef get_zjhhybk():\n\n files = os.listdir(getdrive()+'\\\\pe')\n fs = [re.findall('(\\d{8})\\.xls',e) for e in files]\n jyrlist =[]\n for e in fs:\n if len(e)>0:\n file = getdrive()+'\\\\pe\\\\'+e[0]+'.xls'\n #剔除长度为0的文件\n if os.path.getsize(file)>0: \n jyrlist.append(e[0])\n\n jyrlist=sorted(jyrlist,reverse=1)\n\n file = getdrive()+'\\\\pe\\\\'+jyrlist[0]+'.xls'\n wb = xlrd.open_workbook(file,encoding_override=\"cp1252\")\n table = wb.sheet_by_name('个股数据')\n nrows = table.nrows #行数\n\n data = []\n for rownum in range(1,nrows):\n row = table.row_values(rownum)\n\n data.append([lgpdm(row[0]),row[1],row[2],row[3],row[4],row[5]])\n \n cols=['ts_code','gpmc','zjh_mldm','zjh_mlmc','zjh_dldm','zjh_dlmc'] \n df=pd.DataFrame(data,columns=cols)\n\n blks1={}\n blks2={}\n for index, row in df.iterrows():\n key1='%s%s' % (row[2],row[3])\n if key1 in blks1.keys():\n blks1[key1][2].append(row[0])\n blks1[key1][1]=len(blks1[key1][2])\n else:\n blks1[key1]=[key1,1,[row[0]]]\n \n key2='%s%s%s' % (row[2],row[4],row[5])\n if key2 in blks2.keys():\n blks2[key2][2].append(row[0])\n blks2[key2][1]=len(blks2[key2][2])\n else:\n blks2[key2]=[key2,1,[row[0]]]\n \n return [blks1,blks2]\n\n########################################################################\n# 根据通达信新行业或申万行业代码提取股票列表\n# https://blog.csdn.net/liuyukuan/article/details/79483812\n########################################################################\ndef tdxswhy():\n\n fn=gettdxdir()+'incon.dat'\n with open(fn,'rb') as dtf:\n zxg = dtf.read()\n if zxg[:3] == b'\\xef\\xbb\\xbf' :\n zxg = zxg.decode('UTF8','ignore') #UTF-8\n elif zxg[:2] == b'\\xfe\\xff' :\n zxg = zxg.decode('UTF-16','ignore') #Unicode big endian\n elif zxg[:2] == b'\\xff\\xfe' :\n zxg = zxg.decode('UTF-16','ignore') #Unicode\n else :\n zxg = zxg.decode('GBK','ignore') #ansi编码\n \n dtf.close()\n \n p='#TDXNHY(.*?)######' \n tdxhy=re.findall(p,zxg,re.DOTALL)\n \n tdxhy=tdxhy[0].replace('|','\\t')\n\n p='(.+)\\t(.+)\\r\\n'\n tdxhy=re.findall(p,tdxhy)\n\n cols=['tdx_hydm','tdx_hymc']\n tdxdf=pd.DataFrame(tdxhy,columns=cols)\n\n\n p='#SWHY(.*?)######' \n swhy=re.findall(p,zxg,re.DOTALL)\n \n swhy=swhy[0].replace('|','\\t')\n\n p='(.+)\\t(.+)\\r\\n'\n swhy=re.findall(p,swhy)\n \n cols=['sw_hydm','sw_hymc']\n swdf=pd.DataFrame(swhy,columns=cols)\n \n p = '(\\d{6})\\t(.+)\\t(.+)\\t(.+)\\r\\n'\n zxgfn = gettdxdir()+r'T0002\\hq_cache\\tdxhy.cfg'\n with open(zxgfn,'rb') as dtf:\n zxg = dtf.read()\n if zxg[:3] == b'\\xef\\xbb\\xbf' :\n zxg = zxg.decode('UTF8','ignore') #UTF-8\n elif zxg[:2] == b'\\xfe\\xff' :\n zxg = zxg.decode('UTF-16','ignore') #Unicode big endian\n elif zxg[:2] == b'\\xff\\xfe' :\n zxg = zxg.decode('UTF-16','ignore') #Unicode\n else :\n zxg = zxg.decode('GBK','ignore') #ansi编码\n \n dtf.close()\n\n zxg=zxg.replace('|','\\t')\n zxglst =re.findall(p,zxg)\n\n\n dt = [[lgpdm(gpdm),tdxnhy,swhy] for gpdm,tdxnhy,swhy,wzhy in zxglst]\n cols=['ts_code','tdx_hydm','sw_hydm']\n df=pd.DataFrame(dt,columns=cols)\n\n df=pd.merge(df,tdxdf,on='tdx_hydm')\n df=pd.merge(df,swdf,on='sw_hydm')\n \n return df\n\n\n##########################################################################\n#股票列表\n##########################################################################\ndef get_stklst():\n \n# mytoken='18fcea168f6c1f8621c13bef376e726cf5e31fde3f579db37929181b'\n# pro = ts.pro_api(token=mytoken)\n #df = pro.daily(trade_date='20181206')\n \n #data = pro.stock_basic(exchange='', list_status='D', fields='ts_code,symbol,name,area,industry,list_date')\n \n dt0 = pro.stock_basic(list_status='L',fields='ts_code,symbol,name,area,industry,list_date')\n dt0=dt0.set_index('ts_code',drop=False)\n \n dt1 = pro.stock_basic(list_status='P',fields='ts_code,symbol,name,area,industry,list_date')\n dt1=dt1.set_index('ts_code',drop=False)\n \n dt2 = pro.stock_basic(list_status='D',fields='ts_code,symbol,name,area,industry,list_date')\n dt2=dt2.set_index('ts_code',drop=False)\n \n dt=pd.concat([dt0,dt1,dt2])\n \n dt=dt[~dt.index.duplicated()]\n\n return dt[['ts_code','name']]\n\n##########################################################################\n#\n##########################################################################\ndef get_stknm(gpdm):\n \n# mytoken='18fcea168f6c1f8621c13bef376e726cf5e31fde3f579db37929181b'\n# pro = ts.pro_api(token=mytoken)\n return pro.namechange(ts_code=gpdm, fields='ts_code,name,start_date,end_date,change_reason')\n\n\n##########################################################################\n#\n##########################################################################\ndef get_fqgj(gpdm):\n# #获取指定\n gpdm='600198.SH'\n df = pro.daily(ts_code=gpdm,start_date='20010101',end_date='20171231')\n fqyz = pro.adj_factor(ts_code=gpdm)\n \n fqgj=pd.merge(df, fqyz, on='trade_date',suffixes=('_x', '_y'))\n fqgj=fqgj[['trade_date', 'close','adj_factor']]\n zxyz=fqgj.iloc[0].adj_factor\n fqgj=fqgj.assign(close_adj=fqgj['close']*(fqgj['adj_factor']/zxyz))\n\n return fqgj\n\n##########################################################################\n#计算前复权因子,rq1today:\n rq=today\n \n \n #查找日期对于的index\n i=cal[cal['cal_date']==rq].index[0]\n\n if cal['is_open'][i]==0 :\n rq=cal.iloc[i].pretrade_date\n\n return rq \n \n##########################################################################\n#获取ST板块\n##########################################################################\ndef get_stbk():\n# gpdmb=get_stklst()\n# dt=get_stknm(gpdmb.iloc[0].ts_code)\n# for i in range(1,len(gpdmb)):\n# gpdm=gpdmb.iloc[i].ts_code\n# \n# print(gpdm)\n# df=get_stknm(gpdm)\n# dt=dt.append(df)\n# \n# dt.to_csv(r'd:\\selestock\\gpgm.csv',encoding='GBK',index=False)\n \n dt = pd.read_csv(r'd:\\selestock\\gpgm.csv',encoding='GBK',dtype='object')\n \n dt1=dt[dt['start_date']rq0]\n cxgbk=[gpdm for gpdm in cxgbk['ts_code']]\n cxgbk=['次新股',len(cxgbk),cxgbk]\n\n return cxgbk\n\n##########################################################################\n#涨幅统计\n#qfqzf \n##########################################################################\ndef get_zftj(blks):\n \n blkzf=[]\n \n for blk in blks.values():\n blkstk=[lgpdm(gpdm) for gpdm in blk[2]]\n\n if '次新股' in blks.keys():\n if blk[0]=='次新股':\n blkggzf=qfqzf[qfqzf['ts_code'].isin(blkstk)]\n else:\n blkggzf=qfqzf[(qfqzf['ts_code'].isin(blkstk) & ~qfqzf['ts_code'].isin(blks['次新股'][2]))]\n else:\n blkggzf=qfqzf[qfqzf['ts_code'].isin(blkstk)]\n\n blkggzf=blkggzf.sort_values(by='zf',ascending=False)\n\n n=len(blkggzf)\n #次新股板块与上市日期有关 \n if n>0 :\n gpdms=''\n for gpdm in blkggzf['ts_code']:\n gpdms=gpdms+'|'+gpdm[:6]\n h=blkggzf['zf'].max()\n i=blkggzf[(blkggzf['zf']==h)].index[0]\n \n hdm=blkggzf['ts_code'][i]\n hmc=blkggzf['name'][i]\n \n l=blkggzf['zf'].min()\n i=blkggzf[(blkggzf['zf']==l)].index[0]\n ldm=blkggzf['ts_code'][i]\n lmc=blkggzf['name'][i]\n \n blkpjzf=blkggzf['zf'].mean()\n blkzwzf=blkggzf['zf'].median()\n \n blkzf.append([blk[0],n,blkpjzf,blkzwzf,hdm,hmc,h,ldm,lmc,l,gpdms])\n \n cols=['blkname','num','pjzf','zwzf','max_dm','max_mc','max_zf','min_dm','min_mc','min_zf','gpdms']\n df=pd.DataFrame(blkzf,columns=cols)\n\n df=df.sort_values(by='zwzf',ascending=False) \n\n h=qfqzf['zf'].max()\n i=qfqzf[(qfqzf['zf']==h)].index[0]\n \n hdm=qfqzf['ts_code'][i]\n hmc=qfqzf['name'][i]\n \n l=qfqzf['zf'].min()\n i=qfqzf[(qfqzf['zf']==l)].index[0]\n ldm=qfqzf['ts_code'][i]\n lmc=qfqzf['name'][i]\n\n pjzf=qfqzf['zf'].mean()\n zwzf=qfqzf['zf'].median()\n\n allzf=[['全部股票',len(qfqzf),pjzf,zwzf,hdm,hmc,h,ldm,lmc,l,'']]\n cols=['blkname','num','pjzf','zwzf','max_dm','max_mc','max_zf','min_dm','min_mc','min_zf','gpdms']\n df1=pd.DataFrame(allzf,columns=cols)\n\n df=df.append(df1)\n \n df=df.round(2)\n\n return df\n \n##########################################################################\n#通达信、申万行业板块\n##########################################################################\ndef get_tdxswhybk():\n blks1={}\n blks2={}\n df=tdxswhy()\n for index, row in df.iterrows():\n tdxkey='%s%s' % (row[1],row[3])\n swkey='%s%s' % (row[2],row[4])\n if tdxkey in blks1.keys():\n blks1[tdxkey][2].append(row[0])\n blks1[tdxkey][1]=len(blks1[tdxkey][2])\n else:\n blks1[tdxkey]=[tdxkey,1,[row[0]]]\n \n if swkey in blks2.keys():\n blks2[swkey][2].append(row[0])\n blks2[swkey][1]=len(blks2[swkey][2])\n else:\n blks2[swkey]=[swkey,1,[row[0]]]\n \n return [blks1,blks2]\n \n##########################################################################\n#股票所属概念\n##########################################################################\ndef get_gpgn():\n blks=gettdxblk('gn')\n gpgn={}\n for i in blks:\n for k in blks[i][2]:\n k=lgpdm(k)\n if k in gpgn.keys():\n gpgn[k][1]= gpgn[k][1]+1\n gpgn[k][2]='%s,%s' % (gpgn[k][2],i)\n else:\n gpgn[k]=[k,1,i]\n \n df=pd.DataFrame.from_dict(gpgn,orient='index',columns=['ts_code','gngs','gnmc']) \n return df\n\nif __name__ == '__main__':\n \n# sys.exit()\n \n #tushare 通过Python SDK 调取数据\n #https://tushare.pro/document/1?doc_id=131\n \n mytoken='18fcea168f6c1f8621c13bef376e726cf5e31fde3f579db37929181b'\n pro = ts.pro_api(token=mytoken)\n\n rq1 = get_tradedate('20190308')\n rq2 = get_tradedate('20190331')\n if rq1>=rq2 :\n print('起始日期必须小于截止日期')\n sys.exit()\n \n\n qfqzf=get_zf(rq1,rq2,1)\n\n \n blks=gettdxblk('gn')\n blks['ST板块']=get_stbk()\n blks['次新股']=get_cxgbk()\n \n gndf=get_zftj(blks)\n \n tdxswhybk=get_tdxswhybk()\n tdxdf=get_zftj(tdxswhybk[0])\n swdf=get_zftj(tdxswhybk[1])\n \n zzbk=get_zzhybk()\n zz1df=get_zftj(zzbk[0])\n zz2df=get_zftj(zzbk[1])\n zz3df=get_zftj(zzbk[2])\n zz4df=get_zftj(zzbk[3])\n \n zjhbk=get_zjhhybk()\n zjh1df=get_zftj(zjhbk[0])\n zjh2df=get_zftj(zjhbk[1])\n \n headdf=qfqzf.head(200).copy()\n headdf=headdf[['ts_code','name','zf','list_date']]\n \n taildf=qfqzf.tail(200).copy()\n taildf=taildf[['ts_code','name','zf','list_date']]\n taildf=taildf.sort_values(by='zf',ascending=True)\n \n gpgndf=get_gpgn()\n qfqzf=pd.merge(qfqzf,gpgndf,how='left',on='ts_code')\n \n fn=r'd:\\selestock\\板块涨幅_%s_%s.xlsx' % (rq1,rq2)\n\n writer=pd.ExcelWriter(fn,engine='xlsxwriter')\n\n gndf.to_excel(writer, sheet_name='通达信概念板块',index=False) \n tdxdf.to_excel(writer, sheet_name='通达信行业板块',index=False) \n\n swdf.to_excel(writer, sheet_name='申万行业板块',index=False) \n\n zjh1df.to_excel(writer, sheet_name='证监会行业门类板块',index=False) \n zjh2df.to_excel(writer, sheet_name='证监会行业大类板块',index=False) \n\n zz1df.to_excel(writer, sheet_name='中证一级行业板块',index=False) \n zz2df.to_excel(writer, sheet_name='中证二级行业板块',index=False) \n zz3df.to_excel(writer, sheet_name='中证三级行业板块',index=False) \n zz4df.to_excel(writer, sheet_name='中证四级行业板块',index=False) \n\n headdf.to_excel(writer, sheet_name='涨幅最大个股',index=False) \n taildf.to_excel(writer, sheet_name='跌幅最大个股',index=False) \n\n qfqzf.to_excel(writer, sheet_name='全部个股',index=False) \n \n\n writer.save()\n\n","sub_path":"zftj.py","file_name":"zftj.py","file_ext":"py","file_size_in_byte":26276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"207675938","text":"import numpy as np\nimport pandas as pd\nimport pymysql\nfrom sklearn.cluster import KMeans\n\n\ndef getcodeinfo():\n message = \"网卡\"\n db = pymysql.connect(\"localhost\", \"root\", \"123p123p\", \"temp\")\n codeinfo = pd.read_sql('select * from indexdata where name = \"%s\"' % message, con=db)\n codeinfo = codeinfo.values\n print(codeinfo)\n return codeinfo\n\n\ndef autoNorm(dataSet):\n minVals = dataSet.min(0)\n maxVals = dataSet.max(0)\n ranges = maxVals - minVals\n normset = np.zeros(np.shape(dataSet))\n m = dataSet.shape[0]\n normset = dataSet - np.tile(minVals, (m, 1))\n normset = normset / np.tile(ranges, (m, 1))\n for i in normset:\n i[-1] *= i[-1] * 2\n return normset\n\n\ndef readdata():\n db = pymysql.connect(\"localhost\", \"root\", \"123p123p\", \"temp\")\n searchdata = pd.read_sql('select Name,Search_index from search_month', con=db);\n namelist = pd.read_sql('select distinct Name from search_month', con=db)\n mediadata = pd.read_sql('select * from media_month', con=db)\n humandata = pd.read_sql('select Name,Dengji from Human_range', con=db)\n humandata = humandata.values\n searchdata = searchdata.values\n namelist = namelist.values\n hdata = list()\n flag = 1\n for item in namelist:\n for i in humandata:\n if i[0] == item[0]:\n flag = 0\n hdata.append(i[1])\n if flag == 1:\n hdata.append(2)\n flag = 1\n searchdata = np.array(searchdata)\n searchavg = list()\n searchvar = list()\n target = searchdata[0, 0]\n temp = list()\n for item in searchdata:\n if item[0] == target:\n temp.append(item[1])\n else:\n t = np.array(temp)\n searchavg.append(np.mean(t, axis=0))\n searchvar.append(np.var(t, axis=0))\n target = item[0]\n temp.clear()\n temp.append(item[1])\n searchavg = np.array(searchavg)\n searchvar = np.array(searchvar)\n searchavg.transpose()\n searchvar.transpose()\n searchavg = searchavg.tolist()\n searchvar = searchvar.tolist()\n mediadata = mediadata.values\n mediadata = np.array(mediadata[0:161, 3:])\n mediaavg = np.mean(mediadata, axis=1)\n mediavar = np.var(mediadata, axis=1)\n mediaavg = mediaavg.tolist()\n mediavar = mediavar.tolist()\n data = list()\n data.append(searchavg)\n data.append(searchvar)\n data.append(mediaavg)\n data.append(mediavar)\n data.append(hdata[:-1])\n data = np.array(data)\n data = data.transpose()\n db.close()\n return namelist, data\n\n\ndef cont():\n namelist, data = readdata()\n\n data = autoNorm(data)\n\n n_clusters = 5\n\n cls = KMeans(n_clusters).fit(data)\n cls.labels_\n\n db = pymysql.connect(\"localhost\", \"root\", \"123p123p\", \"temp\")\n\n cursor = db.cursor()\n sql = \"drop table indexdata\"\n cursor.execute(sql)\n db.commit()\n sql = \"create table indexdata(name varchar(50),level int)\"\n cursor.execute(sql)\n db.commit()\n for i in range(len(cls.labels_)):\n sql = \"INSERT INTO indexdata(name,level)VALUES ('%s','%d')\" % (namelist[i][0], cls.labels_[i])\n try:\n cursor.execute(sql)\n db.commit()\n except:\n db.rollback()\n print(\"输入完成\")\n db.close()\n","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"585172701","text":"import os\nimport sys\nimport requests\nsys.path.append(os.path.abspath(\"..\"))\n\nfrom nile_test.toxiproxy import ToxiProxy, Proxy, Toxic # noqa: E402\n\n\n# check if the \"proxy_without_latency\" exists\n\n'''\nif requests.get(\"http://localhost:8474/proxies/proxy_without_latency\").ok:\n print(\"Proxy with out latency exists, deleting\")\n requests.delete(\"http://localhost:8474/proxies/proxy_without_latency\")\n print(\"Deleted\")\n\nrequests.delete(\"http://localhost:8474/proxies\", proxies={})\n'''\n\nhostname = \"localhost:8474\"\ntoxiproxy = ToxiProxy(hostname)\n\ntoxiproxy.delete_proxies()\nprint('Proxies after deletion: ' + str(toxiproxy.get_proxies()))\n\n# Check that a ToxiProxy can be found\nassert toxiproxy.exists(), f\"ToxiProxy server not found at '{hostname}'\"\n# If this line is reached, the server exists\nprint(f\"Found ToxiProxy server at '{hostname}'\")\nprint(f\"URL is '{toxiproxy.get_url()}'\")\n\n# Create object representing a Proxy named 'proxy1'\nproxy1 = Proxy(toxiproxy, name=\"proxy_without_latency\")\n\n# Check if the ToxiProxy server has a Proxy named 'proxy1'\n# If it does, remove it from the server to establish a clean baseline\nprint(\"Establishing clean baseline\")\nif proxy1.exists():\n print(\"Proxy 'proxy_without_latency' exists, deleting...\")\n proxy1.delete()\n print(\"Finished deleting\")\n assert not proxy1.exists(), \\\n \"A Proxy named 'proxy_without_latency' should not exist on server after delete\"\n print(\"Proxy named 'proxy_without_latency' deleted, baseline is clean\")\nelse:\n print(\"No Proxy named 'proxy_without_latency' found, baseline is clean\")\n\n# Create a Proxy named 'proxy1' on the server\nproxy1_upstream = \"localhost:8000\"\nproxy1_listen = \"127.0.0.1:8001\"\n\nprint(f\"Creating Proxy named 'proxy_without_latency' on server '{hostname}'\")\nprint(f\"upstream='{proxy1_upstream}', listen='{proxy1_listen}'\")\nprint(\"...\")\n\nproxy1.make(\n upstream_address=proxy1_upstream,\n listen_address=proxy1_listen)\n\nprint(\"Finished creating\")\nprint(f\"URL is {proxy1.get_url()}\")\n\n# Verify that 'proxy1' is now created\nassert proxy1.exists(), \\\n \"A Proxy named 'proxy_without_latency' should exist on server after creation\"\n\n# Verify that the server knows the correct upstream value\nretrieved_upstream = proxy1.get_upstream()\n\nassert proxy1_upstream == retrieved_upstream, \\\n f\"Found upstream '{retrieved_upstream}', expected '{proxy1_upstream}'\"\n\n# Verify that the server knows the correct listen value\nretrieved_listen = proxy1.get_listen()\n\nassert retrieved_listen == proxy1_listen, \\\n f\"Found listen '{retrieved_listen}', expected '{proxy1_listen}'\"\n\nprint(\"Values on server are correct\")\n","sub_path":"nile_lib/examples/normal_script.py","file_name":"normal_script.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"12251703","text":"from httmock import with_httmock\nimport pysolr\n\nfrom django.conf import settings\nfrom django.test import LiveServerTestCase as TestCase\n\nfrom aggregator.models import Service, Layer\nimport aggregator.tests.mocks.wms\nimport aggregator.tests.mocks.warper\nimport aggregator.tests.mocks.worldmap\nfrom aggregator.tasks import index_all_layers\n\n\n@with_httmock(aggregator.tests.mocks.wms.resource_get)\ndef create_wms_service():\n service = Service(\n type='OGC_WMS',\n url='http://wms.example.com/ows?',\n )\n service.save()\n\n\n@with_httmock(aggregator.tests.mocks.warper.resource_get)\ndef create_warper_service():\n service = Service(\n type='WARPER',\n url='http://warper.example.com/warper/maps',\n )\n service.save()\n\n\n@with_httmock(aggregator.tests.mocks.worldmap.resource_get)\ndef create_wm_service():\n service = Service(\n type='WM',\n )\n service.save()\n\n\nclass SolrTest(TestCase):\n\n \"\"\"\n Tests Solr integration.\n For now it is needed to manually create a solr core named 'hypermap_test'.\n Later we will programmatically create it and destroy it after testing.\n \"\"\"\n\n @with_httmock(aggregator.tests.mocks.wms.resource_get)\n def setUp(self):\n solr_url = settings.SOLR_URL\n self.solr = pysolr.Solr(solr_url, timeout=60)\n create_wms_service()\n create_warper_service()\n create_wm_service()\n # index all\n index_all_layers()\n\n def tearDown(self):\n pass\n\n def test_solr_sync(self):\n nlayers = Layer.objects.all().count()\n # layers indexed in solr must be same number in django db\n results = self.solr.search(q='*:*')\n self.assertEqual(results.hits, nlayers)\n # layers with invalid bbox don't have the bbox attribute in solr\n nlayers_valid_coordinates = sum(layer.has_valid_bbox() for layer in Layer.objects.all())\n results = self.solr.search(q='bbox:*')\n self.assertEqual(results.hits, nlayers_valid_coordinates)\n","sub_path":"hypermap/tests/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"595721037","text":"'''\nAbbiamo una stringa i cui elementi sono cifre tra '0' e '9' (comprese) che rappresenta la struttura di una parola.\nLa parola contiene al piu' 10 lettere diverse (ma puo' essere piu' lunga di 10 lettere se alcune sono ripetute), \ne la struttura si ottiene dalla parola sostituendo ciascuna lettera con una cifra, secondo le regole:\n- a lettera uguale corrisponde cifra uguale\n- a lettere diverse corrispondono cifre diverse\n\nEsempio: 'cappello' -> '93447228'\nEsempio: 'cappello' -> '12334556'\n\nSia data una \"struttura\" ed un insieme di parole. \nVogliamo ottenere il sottoinsieme delle parole date compatibili con la struttura data.\n\nEsempio: se la struttura e' '1234' e le parole sono {'cane', 'gatto', 'nasa', 'oca', 'pino'}\nle parole dell'insieme che sono compatibili con la struttura sono {'pino', 'cane'}\n\nScrivere una funzione decod( pfile, struttura) che prende in input:\n- il percorso di un file (pfile), contenente testo organizzato in righe ciascuna composta da una sola parola\n- una stringa di almeno 1 carattere, composta solo da cifre (la struttura delle parole da cercare)\n\nLa funzione deve restituire l'insieme delle parole di pfile che sono compatibili con la struttura data.\n\nPer gli esempi vedere il file grade03.txt\n\nAVVERTENZE: \n\tnon usare caratteri non ASCII, come le lettere accentate;\n\tnon usare moduli che non sono nella libreria standard.\nNOTA: l'encoding del file e' 'utf-8'\nATTENZIONE: Se un test del grader non termina entro 10 secondi il punteggio di quel test e' zero.\n'''\n\n\ndef decod(pfile, codice):\n '''verifica che le parole del 'pfile' sinao compatibili con la struttura 'codice' data'''\n txt=open(pfile)\n risultato=set()\n #si avvia un ciclo che lavora solo sulle parole del file la cui lunghezza (eliminando lo \\n finale) risulta uguale a quella del codice dato in input. PRIMO CONTROLLO-->PRIMA CONDIZIONE IF\n #se si trasforma il codice dato in input in insieme, rimarranno solo quei numeri diversi tra loro. Analogamente, se trasformo la riga in insieme, otterrò solo le lettere diverse tra di loro. Si verifica che le due lunghezze siano uguali. SECONDO CONTROLLO-->SECONDA CONDIZIONE IF.\n for riga in txt:\n riga=riga[:-1]\n if len(riga)==len(codice) and len(set(riga))==len(set(codice)):\n if verificaCorrispondenze(riga, codice)==True:\n risultato.add(riga)\n return risultato\n\ndef verificaCorrispondenze(riga, codice):\n #si crea un dizionario in cui ciascuna chiave corrisponde ad un numero del codice e ciascun attributo alla lettera di riga che compare nella posizione corrispondente\n dizIbrido={}\n for el in range(len(riga)): \n if el not in dizIbrido:\n dizIbrido[codice[el]]=riga[el]\n else:\n continue\n \n #si verifica che la nuovaParola creata a partire dal dizIbrido sia uguale alla originale riga\n nuovaParola=''\n for i in codice:\n nuovaParola+=dizIbrido[i]\n \n if nuovaParola==riga:\n return True\n else:\n False\n\nif __name__=='''__main__''':\n decod('file03.txt', '121')","sub_path":"students/1808746/homework02/program03.py","file_name":"program03.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"566102904","text":"vowels = \"aeiouy\"\n\n\ndef pig_latin(word):\n \"\"\"Pig Latin:\n\n if first letter of word is constant then move it to end with additional 'ay'.\n else concat 'way' with this word, ex: pig -> igpay | Eric -> Ericway\n\n https://fr.wikipedia.org/wiki/Pig_latin_(linguistique)\n \"\"\"\n\n if word[0].lower() not in vowels:\n return \"{}{}ay\".format(word[1:], word[0].lower())\n\n return word + \"way\"\n\n\nif __name__ == \"__main__\":\n\n pig_word = pig_latin(\"Eric\")\n print(pig_word)\n","sub_path":"pig_latin.py","file_name":"pig_latin.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"197188901","text":"\"\"\"\nThis file is concerned with providing a simple interface for data stored in\nElasticsearch. The class(es) defined here are fed into the preprocessing step.\n\"\"\"\n\nimport logging\nimport time\nfrom abc import ABCMeta, abstractmethod\nfrom six import with_metaclass\n\nfrom elasticsearch import Elasticsearch, helpers\n\n\ndef _get_hash_identifier(input_data, id_field):\n return hash(input_data[id_field])\n\n\nclass CorpusInterface(with_metaclass(ABCMeta)):\n @abstractmethod\n def __iter__(self):\n \"\"\"This is expected to iterate over your data, returning tuples of (doc_id, )\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def __len__(self):\n raise NotImplementedError\n\n @abstractmethod\n def get_generator_without_id(self, field=None):\n \"\"\"Returns a generator that yields field content without doc_id associate\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def append_to_record(self, record_id, field_name, field_value):\n \"\"\"Used to store preprocessed output alongside input data.\n\n Field name is destination. Value is processed value.\"\"\"\n raise NotImplementedError\n\n\nclass ElasticSearchCorpus(CorpusInterface):\n def __init__(self, host, index, content_field, port=9200, username=None,\n password=None, doc_type=None, query=None, iterable=None):\n super(ElasticSearchCorpus, self).__init__()\n self.host = host\n self.port = port\n self.username = username\n self.password = password\n self.instance = Elasticsearch(hosts=[{\"host\": host, \"port\": port,\n \"http_auth\": \"{}:{}\".format(username, password)}\n ])\n self.index = index\n self.content_field = content_field\n self.doc_type = doc_type\n self.query = query\n if iterable:\n self.import_from_iterable(iterable, content_field)\n\n def __iter__(self):\n results = helpers.scan(self.instance, index=self.index,\n query=self.query, doc_type=self.doc_type)\n for result in results:\n yield result[\"_id\"], result['_source'][self.content_field]\n\n def __len__(self):\n return self.instance.count(index=self.index, doc_type=self.doc_type)[\"count\"]\n\n def get_generator_without_id(self, field=None):\n if not field:\n field = self.content_field\n results = helpers.scan(self.instance, index=self.index,\n query=self.query, doc_type=self.doc_type)\n for result in results:\n yield result[\"_source\"][field]\n\n def append_to_record(self, record_id, field_name, field_value):\n self.instance.update(index=self.index, id=record_id, doc_type=\"continuum\",\n body={\"doc\": {field_name: field_value}})\n\n def get_field(self, field=None):\n \"\"\"Get a different field to iterate over, keeping all other\n connection details.\"\"\"\n if not field:\n field = self.content_field\n return ElasticSearchCorpus(self.host, self.index, field, self.port,\n self.username, self.password, self.doc_type,\n self.query)\n\n def import_from_iterable(self, iterable, id_field=\"text\", batch_size=500):\n \"\"\"Load data into Elasticsearch from iterable.\n\n iterable: generally a list of dicts, but possibly a list of strings\n This is your data. Your dictionary structure defines the schema\n of the elasticsearch index.\n id_field: string identifier of field to hash for content ID. For\n list of dicts, a valid key value in the dictionary is required. For\n list of strings, a dictionary with one key, \"text\" is created and\n used.\n \"\"\"\n batch = []\n for item in iterable:\n if isinstance(item, basestring):\n item = {id_field: item}\n id = _get_hash_identifier(item, id_field)\n batch.append({\"_id\": id, \"_source\": item, \"_type\": \"continuum\"})\n if len(batch) >= batch_size:\n helpers.bulk(client=self.instance, actions=batch, index=self.index)\n batch = []\n if batch:\n helpers.bulk(client=self.instance, actions=batch, index=self.index)\n\n # TODO: generalize for datetimes\n # TODO: validate input data to ensure that it has valid year data\n def get_data_by_year(self, start_year, end_year, year_field=\"year\"):\n \"\"\"Queries elasticsearch for all documents within the specified year range\n and returns a generator of the results\"\"\"\n index = self.index\n if self.instance.indices.get_field_mapping(field=year_field,\n index=index,\n doc_type=\"continuum\") != 'date':\n index = self.index+\"_{}_date\".format(year_field)\n if not self.instance.indices.exists(index) or self.instance.indices.get_field_mapping(field=year_field,\n index=index,\n doc_type=\"continuum\") != 'date':\n mapping = self.instance.indices.get_mapping(index=self.index,\n doc_type=\"continuum\")\n mapping[self.index][\"mappings\"][\"continuum\"][\"properties\"][year_field] = {\"type\": \"date\"}\n self.instance.indices.put_alias(index=self.index,\n name=index,\n body=mapping)\n while self.instance.count(index=self.index) != self.instance.count(index=index):\n logging.info(\"Waiting for date indexed data to be indexed...\")\n time.sleep(1)\n\n results = helpers.scan(self.instance, index=index, scroll='5m',\n query={\"query\":\n {\"range\":\n {year_field:\n {\"gte\": start_year,\n \"lte\": end_year}}}})\n\n for result in results:\n yield result[\"_id\"], result['_source'][self.content_field]\n\n\nclass DictionaryCorpus(CorpusInterface):\n def __init__(self, content_field, iterable=None, generate_id=True):\n super(DictionaryCorpus, self).__init__()\n self.content_field = content_field\n self._documents = []\n self.idx = 0\n if iterable:\n self.import_from_iterable(iterable, content_field, generate_id)\n\n def __iter__(self):\n for doc in self._documents:\n yield doc[\"_id\"], doc[\"_source\"][self.content_field]\n\n def __len__(self):\n return len(self._documents)\n\n def append_to_record(self, record_id, field_name, field_value):\n for doc in self._documents:\n if doc[\"_id\"] == record_id:\n doc[\"_source\"][field_name] = field_value\n return\n raise ValueError(\"No record with id '{}' was found.\".format(record_id))\n\n def get_field(self, field=None):\n \"\"\"Get a different field to iterate over, keeping all other details.\"\"\"\n if not field:\n field = self.content_field\n return DictionaryCorpus(content_field=field, iterable=self._documents,\n generate_id=False)\n\n def get_generator_without_id(self, field=None):\n if not field:\n field = self.content_field\n for doc in self._documents:\n yield doc[\"_source\"][field]\n\n def import_from_iterable(self, iterable, content_field, generate_id=True):\n \"\"\"\n iterable: generally a list of dicts, but possibly a list of strings\n This is your data. Your dictionary structure defines the schema\n of the elasticsearch index.\n \"\"\"\n if generate_id:\n self._documents = [{\"_id\": hash(doc[content_field]),\n \"_source\": doc} for doc in iterable]\n else:\n self._documents = [item for item in iterable]\n\n def get_number_of_items_stored(self):\n return len(self._documents)\n\n # TODO: generalize for datetimes\n # TODO: validate input data to ensure that it has valid year data\n def get_data_by_year(self, start_year, end_year, year_field=\"year\"):\n for result in self._documents:\n if start_year <= int(result[\"_source\"][year_field]) <= end_year:\n yield result[\"_id\"], result[\"_source\"][self.content_field]\n\n\n# Collection of output formats: people put files, folders, etc in, and they can choose from these to be the output\n# These consume the iterable collection of dictionaries produced by the various iter_ functions.\noutput_formats = {\"elasticsearch\": ElasticSearchCorpus,\n \"dictionary\": DictionaryCorpus,\n }\n","sub_path":"topik/intermediaries/raw_data.py","file_name":"raw_data.py","file_ext":"py","file_size_in_byte":9075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"646385508","text":"''' Follow up for \"033, Search in Rotated Sorted Array\":\nWhat if duplicates are allowed?\n\nWould this affect the run-time complexity? How and why?\n\nWrite a function to determine if a given target is in the array.\n'''\nclass Solution:\n\t# @param {integer[]} nums\n\t# @param {integer} target\n\t# @return {integer}\n\tdef search(self, nums, target):\n\t\tmin_index, max_index = 0, len(nums) - 1\n\t\t# while(max_index > min_index and nums[min_index] == nums[max_index]):\n\t\t# \tmax_index -= Codeforces Round 352\n\t\twhile(min_index <= max_index):\n\t\t\tmiddle = int((min_index + max_index) / 2)\n\t\t\tprint('nums[middle] =', nums[middle], 'min_index =', min_index, 'middle =', middle, 'max_index =', max_index)\n\t\t\tif(target == nums[middle] or target == nums[min_index] or target == nums[max_index]):\n\t\t\t\treturn True\n\t\t\telif(target < nums[middle]):\n\t\t\t\tif(nums[min_index] <= nums[middle] and nums[max_index] <= nums[middle] and target < nums[min_index]):\n\t\t\t\t\tmin_index = middle + 1\n\t\t\t\telse:\n\t\t\t\t\tmax_index = middle - 1\n\t\t\telse:\n\t\t\t\tif(nums[middle] <= nums[min_index] and nums[middle] <= nums[max_index] and target > nums[min_index]):\n\t\t\t\t\tmax_index = middle - 1\n\t\t\t\telse:\n\t\t\t\t\tmin_index = middle + 1\n\t\treturn False\n\ns = Solution()\nnums, target = [1,1,3,1], 3\nprint(s.search(nums, target))","sub_path":"Python/081M_Search_in_Rotated_Sorted_Array_II.py","file_name":"081M_Search_in_Rotated_Sorted_Array_II.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"526685281","text":"from typing import List\nfrom heapq import heappush, heappop\n\n\nclass Solution:\n def getSkyline(self, buildings: List[List[int]]) -> List[List[int]]:\n heap = []\n\n LEFT = 0\n RIGHT = 1\n END = 10\n\n events = []\n\n for left, right, height in buildings:\n left_event = [-height, left, LEFT, None]\n right_event = [-height, right, RIGHT, left_event]\n heappush(events, (left, left_event))\n heappush(events, (right, right_event))\n\n ans = []\n while events:\n x, event = heappop(events)\n nega_height = event[0]\n right_or_left = event[2]\n if right_or_left == LEFT:\n if not heap or nega_height < heap[0][0]:\n ans.append([x, - nega_height])\n heappush(heap, event)\n else:\n left_event = event[3]\n left_event[3] = END\n\n while heap and heap[0][3] == END:\n # Removes the current largest which has finished\n heappop(heap)\n if heap:\n if heap[0][0] > nega_height:\n if ans and ans[-1][0] == x:\n popped = ans.pop()\n if popped[1] > - heap[0][0]:\n continue\n else:\n ans.append([x, -heap[0][0]])\n else:\n ans.append([x, - heap[0][0]])\n else:\n ans.append([x, 0])\n\n real_ans = []\n # cur = 0\n for elem in ans:\n if real_ans and real_ans[-1][0] == elem[0] and real_ans[-1][1] <= elem[1]:\n real_ans.pop()\n real_ans.append(elem)\n else:\n real_ans.append(elem)\n return real_ans\n\n\n# s = Solution()\n# print(s.getSkyline([[2,9,10],[3,7,15],[5,12,12],[15,20,10],[19,24,8]]))\n# print(s.getSkyline([[1, 2, 1], [1, 2, 2], [1, 2, 3]]))\n# print(s.getSkyline([[6765, 184288, 53874], [13769, 607194, 451649], [43325, 568099, 982005], [\n# 47356, 933141, 123943], [59810, 561434, 119381], [75382, 594625, 738524]]))\n# print(s.getSkyline([[2, 9, 10], [9, 12, 15]]))\n# print(s.getSkyline([[2, 9, 10], [3, 7, 15], [\n# 5, 12, 12], [15, 20, 10], [19, 24, 8]]))\n# print(s.getSkyline([[15, 20, 10], [19, 24, 8]]))\n","sub_path":"218-the-skyline-problem/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"321725638","text":"from results_plotting.multiple_graph_plots import *\nfrom results_plotting.multiple_graph_loading import *\n\n\n\n# THIS IS DIRECTORY SPECIFIC\n# IN OUR CASE THESE WORK: tuples of (root_folder, pattern, names function(applies for all above it) )\nroot_folder = \"/home/ekmek/__RESULTS_and_MEASUREMENTS/N_experiment/\"\n\n#pattern = \".*/n2.*\"\npattern = \".*/n3.*\"\n\n#names = [name[12:20] for name in names]\n\npattern = \".*/n1.*\"\n#root_folder = \"/home/ekmek/__RESULTS_and_MEASUREMENTS/N_experiment/n1-pylon2_4k/\"\npattern = \".*/n4.*\"\n# names = [name[13:21] for name in names]\n\npattern = \".*/n5local.*\"\n#names = [name[12:20]+'|'+name[31:] for name in names]\n\n\nfolders, names = select_subdirectories(root_folder, pattern)\n\nfolders = folders[1:]\nnames = names[1:]\n#print(names)\n#print(folders)\n\nnames = [name[12:20]+'|'+name[31:] for name in names]\n\n#print(names)\n\nhistories = load_histories(folders)\n\nfor i, h in enumerate(histories):\n print(\"RUN:\", h.settings.RUN_NAME, \" name:\", names[i],\n \" ServersLimit:\", h.settings.final_evaluation_limit_servers, \"+1 att\",\n \" Splits\", str(h.settings.attention_horizontal_splits) + \"to\" + str(h.settings.horizontal_splits))\n\n plt = plt\n\n one_stackedbar(h, plt, show_instead_of_saving=True, column=i + 1)\n\nif False: # alter this as needed\n plt.ylim(0.0, 1.0)\n\n#plt.title(\"Experiment n1, [040 2to4] PYLON 2, on servers = 2-14, att 1-4\")\n#plt.title(\"Experiment n2, [8k video_2to6] PYLON 2, on servers = 2-14, att 1-4\")\n#plt.title(\"Experiment n3, [8k video_2to6] SCRATCH, on servers = 2-14, att 1 or 2\")\n#plt.title(\"Experiment n4, [4k 010 2to4] SCRATCH, on servers = 2-12, att 1 or 2\")\nplt.title(\"Experiment n5local, [4k 010 2to4] from local PC, on servers = 2-12, att 1 or 2\")\n\nplt.ylabel(\"Time (s)\")\nplt.xlabel(\"run with different setting\")\nplt.xticks(range(1, len(histories) + 1), names)\nplt.tight_layout()\n\nplt.show()","sub_path":"video_parser_v2/results_plotting/n1-n5.py","file_name":"n1-n5.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"477404944","text":"import json\nimport urllib2\nfrom django.contrib.auth import logout, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom social_auth.models import UserSocialAuth\nfrom fb_ella.fb_helpers import parse_signed_request\nfrom django.conf import settings\n\n@login_required\ndef fb_settings(request, backend=None):\n # tries to find a previous pairing for the logged in user\n try:\n facebook_matching = UserSocialAuth.objects.get(user=request.user, provider='facebook')\n except UserSocialAuth.DoesNotExist:\n facebook_matching = None\n user_data = None\n else:\n # if pairing found, gets userdata\n f = urllib2.urlopen(\"https://graph.facebook.com/%s/\" % facebook_matching.uid)\n response_string = f.read(f)\n user_data = json.loads(response_string)\n\n return render(request, 'fb/settings.html', {\n 'facebook_matching':facebook_matching,\n 'user_data':user_data\n })\n\n@csrf_exempt\ndef canvas_view(request):\n \"\"\" A view for facebook canvas application. Works\n \"\"\"\n\n # gets signed_request param from post\n signed_request = request.POST.get('signed_request', None)\n if signed_request:\n # parse, nad gets user_id from signed_request\n signed_request = parse_signed_request(signed_request, settings.FACEBOOK_API_SECRET)\n user_id = signed_request.get('user_id', None)\n # if it is an anonymous facebook user\n if not user_id:\n return render(request, 'fb/canvas_logged_out.html', {})\n\n # tries to fiend a previous pairing based on the UID in signed_request\n try:\n facebook_matching = UserSocialAuth.objects.get(uid=user_id, provider='facebook')\n except UserSocialAuth.DoesNotExist:\n return render(request, 'fb/canvas_not_paired.html', {})\n else:\n # if pairing found, logs in the django user\n user = facebook_matching.user\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n\n # if this request is not a canvas request (from inside the facebook frame)\n if not signed_request:\n raise Http404()\n\n # special case:\n # if an other user is logged in within the same browser session\n # than the facebook paired user,\n if facebook_matching and request.user.is_authenticated():\n if not facebook_matching.user == request.user:\n return render(request, 'fb/canvas_view_error.html', {})\n\n return render(request, 'fb/canvas_view.html', {\n 'user_id':user_id,\n 'facebook_matching':facebook_matching\n })\n\n\ndef logout_view(request):\n logout(request)\n return render(request, 'fb/settings.html', {})\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"388975957","text":"import cv2\nimport numpy as np\nimport pandas as pd\nimport glob\nimport scipy\nimport matplotlib.pyplot as plt\nfrom scipy.stats import skew\nimport skimage\nfrom skimage import feature\nimport pickle\nimport operator\nimport math\n\n# Function Splits the given image into 100*100 windows and returns the final vector and corresponding\n# heights and widhts of final image vector\ndef img_to_grids(image):\n height=image.shape[0]\n h=height//100\n width=image.shape[1]\n w=width//100\n final=np.zeros(shape=(h,w),dtype=object)\n start_row=0\n end_row=100\n for i in range(0,h):\n start_col=0\n end_col=100\n for j in range(0,w):\n final[i][j]=image[start_row:end_row,start_col:end_col]\n start_col+=100\n end_col+=100\n start_row+=100\n end_row+=100\n return final,h,w\n\n# Function for calculating the color Moments\ndef COLOR_MOMENTS(image):\n# Function for converting image to YUV color Model\n image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n window_img,h,w=img_to_grids(image)\n vector=[]\n for col in range(window_img.shape[1]):\n for row in range(window_img.shape[0]):\n window_moments=[]\n# Calculating First Momemt\n means=np.mean(window_img[row][col],axis=0)\n moment1=np.mean(means,axis=0)\n window_moments.extend(moment1)\n# Calculating Second Moment\n st_dev=np.std(window_img[row][col],axis=0)\n moment2=np.std(st_dev,axis=0)\n window_moments.extend(moment2)\n# Calculating Third Moment\n skew_ness=skew(window_img[row][col],axis=0)\n moment3=skew(skew_ness,axis=0)\n window_moments.extend(moment3)\n# Concatinating the list of moments values for each window to final vector\n vector.extend(window_moments)\n vector=np.array(vector)\n# returns the feature descriptor for COLOR MOMENTS in the form of 1D-np Array\n return vector\n\n# Function for calculating Local Binary Patterns\ndef LBP(image):\n# Function for converting the image to GRAY_Scale\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n# Calling Function 'img_to_grids' to split the image to 100*100 windows\n window_img,h,w=img_to_grids(image)\n final=[]\n for col in range(window_img.shape[1]):\n for row in range(window_img.shape[0]):\n near_points=8\n radius=2\n# Calculating LBP which gives the matrix of binary numbers for each window in similar dimensions\n local_B_pat = feature.local_binary_pattern(window_img[row][col], near_points,radius, method=\"uniform\")\n# Calculating the histogram for each window\n (hist, _) = np.histogram(local_B_pat.ravel(),bins=np.arange(0,near_points + 3),range=(0, near_points + 2))\n final.extend(hist)\n final=np.array(final)\n# returns the feature descriptor for LOCAL BINARY PATTERNS in the form of 1D-np Array\n return final\n\n# Main Function which returns the Feature descriptor, given path to image(IMAGE-ID) and model\ndef main_fun(image_id,model):\n image_id=cv2.imread(image_id)\n if model==1:\n vector=COLOR_MOMENTS(image_id)\n else:\n vector=LBP(image_id)\n return vector\n\ndef cosine_similarity(a, b):\n return sum([i*j for i,j in zip(a, b)])/(math.sqrt(sum([i*i for i in a]))* math.sqrt(sum([i*i for i in b])))\n\ndef eucledian(a, b):\n sum = 0\n for i in range(len(a)):\n sum += math.pow(a[i] - b[i], 2)\n return math.sqrt(sum)\n\ndef similarity_measure(image_path,model,k):\n # Please give the path to test-image folder\n test_images_folderpath=input('Please give the path to test-image folder')\n# READING THE GIVEN IMAGEID PATH AND CALCULATING CORRESPONDING FEATURE DESCRIPTOR\n if model==1:\n test_vector=main_fun(image_path,1)\n file_name = \"CM_Features.pickle\"\n with open(file_name, 'rb') as handle:\n b = pickle.load(handle)\n else:\n file_name = \"LBP_Features.pickle\"\n test_vector=main_fun(image_path,2)\n with open(file_name, 'rb') as handle:\n b = pickle.load(handle)\n\n distances={}\n i=1\n print(test_vector.shape)\n for imageid,feature in b.items():\n print(feature.shape)\n# CALCULATING COSINE SIMILARITY B/W GIVEN IMAGE AND ALL OTHER IMAGES IN GIVEN FOLDER\n distances[imageid]=cosine_similarity(test_vector,feature)\n\n# HASH TABLE CONTAINING IMAGES SORTED BY SIMILARITY IN DECREASING ORDER\n sorted_distances = sorted(distances.items(), key=operator.itemgetter(1),reverse=True)\n similarity_ranking={}\n# CREATING HASH-TABLE FOR RETURNING TOP 'K' IMAGES AND THEIR MATCHING/SIMILARITY SCORES\n for tup in sorted_distances:\n image=cv2.imread(str(test_images_folderpath)+\"/\" + tup[0] + \".jpg\")\n image = cv2.resize(image,(240,240))\n cv2.imshow('Similar_image',image)\n cv2.waitKey()\n similarity_ranking[tup[0]]=tup[1]\n i+=1\n if i==k+1:\n break\n cv2.destroyAllWindows()\n return similarity_ranking\n\n# Please Enter query image Path\nquery_image_path=input('Please give the path to query image path')\n\n# # TAKES AN INPUT IMAGEID FROM THE USER\n# image_id=input(\"Enter the Image ID:\")\n\n# Takes the model to which similarity measure is calculated\nmodel_num=input(\"Enter the model number 1 for Color Moments and any number for Local Binary patterns:\")\n\n# Enter the K_value\nk=input(\"Please give k value:\")\n\n\n# SETTING THE IMAGE PATH for query image\nimage_path = query_image_path\nif int(model_num)==1:\n output=similarity_measure(image_path,1,int(k))\nelse:\n output=similarity_measure(image_path,2,int(k))\ncv2.destroyAllWindows()\nprint('Similarity Rankings for k images')\nprint(output)\n\n","sub_path":"PHASE1/TASK-3.py","file_name":"TASK-3.py","file_ext":"py","file_size_in_byte":5689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"17487723","text":"# Import everything needed to edit/save/watch video clips\n\nimport calibrate_and_undistort as cam_calibration\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n\n#calibrate camera\ncam_calibration.calibrate_camera()\n#print(cam_calibration.get_cammtx_dstcoeff())\n\ndef process_image(image):\n image=cam_calibration.undistort(image)\n return image\n\nprint(\"anupam\")\nundist_output = 'test_output_video/undistorted_output.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"project_video.mp4\")\n\nundist_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n\n#%time undist_clip.write_videofile(undist_output, audio=False)\nprint(\"anupam\")\nundist_clip.write_videofile(undist_output, audio=False)\nprint(\"anupam\")\nprint(\"anupam\")\nHTML(\"\"\"\n\n\"\"\".format(undist_output))\n\nprint(\"happy\")","sub_path":"test_video.py","file_name":"test_video.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"114973824","text":"'''\nCreated on Jun 27, 2018\n\n@author: talib\n'''\nimport json\nimport requests\nimport psycopg2\nfrom operator import sub\nDB_ADDRESS = \"cerebro.cwuylzry4jol.us-east-2.rds.amazonaws.com\"\nDB_USER = \"jamesgalvin\"\nDB_PASS = \"jamesgalvin1!\"\nDB_NAME = \"cerebro\"\nscrape_id = 1\n\n\n\n\ndef get_scrape_id():\n db = psycopg2.connect(host=DB_ADDRESS,user=DB_USER,password=DB_PASS,database=DB_NAME )\n print ('started')\n cursor = db.cursor()\n cursor.execute(\"SELECT scrape_id from individual_scrape_id ORDER BY scrape_id DESC LIMIT 1\")\n data = cursor.fetchall()\n db.close()\n return int(data[0][0])\n \n \ndef get_disclosures_count(disclosures):\n curr_cust_dispute = 0\n curr_regulatory = 0\n curr_employment_separation_after_allegations = 0\n curr_investigation = 0\n curr_financial = 0 \n curr_civil = 0\n curr_criminal = 0\n\n for cd in json.loads(disclosures):\n if 'Customer Dispute' in cd['disclosureType']:\n curr_cust_dispute+=1\n \n if 'Regulatory' in cd['disclosureType']:\n curr_regulatory+=1\n \n if 'Employment Separation After Allegations' in cd['disclosureType']:\n curr_employment_separation_after_allegations+=1\n \n if 'Investigation' in cd['disclosureType']:\n curr_investigation+=1\n \n if 'Financial' in cd['disclosureType']:\n curr_financial+=1\n \n if 'Civil' in cd['disclosureType']:\n curr_civil+=1\n \n if 'Criminal' in cd['disclosureType']:\n curr_criminal+=1\n \n \n return (curr_cust_dispute,curr_regulatory,curr_employment_separation_after_allegations,curr_investigation,curr_financial,curr_civil,curr_criminal)\n\ncurrent_scrape_id = get_scrape_id()\nlast_scrape_id = current_scrape_id - 1\n\n\n\n\n\n\ndb = psycopg2.connect(host=DB_ADDRESS,user=DB_USER,password=DB_PASS,database=DB_NAME )\ncursor = db.cursor()\n\n\ndelete_broker = '''DELETE FROM finra_individuals where cast(\"scrapeID\" as bigint) <= {}'''.format(current_scrape_id-5)\ncursor.execute(delete_broker)\ndb.commit()\n\ncursor.execute('''SELECT \"individualId\",\"currentEmployments\", \"previousEmployments\", \"disclosures\",\"registrationCount_approvedStateRegistrationCount\" from finra_individuals where \"scrapeID\" = \\'{}\\' LIMIT 100'''.format(current_scrape_id))\ncurrent_data = cursor.fetchall()\n\n\ncursor.execute('''SELECT \"individualId\",\"currentEmployments\", \"previousEmployments\", \"disclosures\",\"registrationCount_approvedStateRegistrationCount\" from finra_individuals where \"scrapeID\" = \\'{}\\''''.format(last_scrape_id))\nlast_data = cursor.fetchall()\n\n\nfor cd in current_data:\n is_firm_changed = 0\n is_active = 0\n \n try:\n curr_dis_count = get_disclosures_count(cd[3])\n last_dis_count = (0,0,0,0,0,0,0)\n \n last_state_count = 0\n curr_state_count = int(cd[4])\n \n curr_firm = cd[1]\n \n if curr_firm:\n is_active = 1\n \n for ld in last_data:\n if cd[0] == ld[0]:\n last_dis_count = get_disclosures_count(ld[3])\n last_firm = ld[1]\n last_state_count = int(ld[4])\n if not last_firm == curr_firm:\n is_firm_changed = 1\n print \n \n \n break\n disclosure_diff = [a - b for a, b in zip(curr_dis_count, last_dis_count)]\n state_count_diff = curr_state_count-last_state_count\n print (is_firm_changed)\n print (is_active)\n print (curr_dis_count)\n print (last_dis_count)\n print (disclosure_diff)\n print (state_count_diff)\n \n\n \n except Exception as e:\n print (e)\n print (cd[0])\n\n \n \n\n# finally:\ndb.close()\n","sub_path":"Jason_Doss/finra/finra/spiders/ETL_Job.py","file_name":"ETL_Job.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"92259569","text":"import glob\nimport logging\nimport re\n\nimport gensim\nimport gensim.models as g\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom g2p_en import G2p\nfrom gensim.models import Word2Vec\nfrom nltk.tokenize import RegexpTokenizer\nfrom sklearn.manifold import TSNE\n\nlogging.basicConfig(\n format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO\n) \n\nextra = re.compile(r\"\\[[A-Z]+\\]\")\ng2p = G2p()\ntokenizer = RegexpTokenizer(r'\\w+')\ntrans_path_train = \"/home/gnlenfn/data/corpus/IEMOCAP/*/dialog/transcriptions/*impro*.txt\"\ng_list = glob.glob(trans_path_train)\n\nresult = []\nc = 0\nfor file in g_list:\n print(file)\n c += 1\n with open(file, 'r') as ifp:\n line = ifp.readline()\n while line != \"\":\n mat = extra.search(line)\n if mat and line[0] == \"S\":\n line = line.rstrip(\"\\n\")\n tmp = line.split(mat.group())\n line = \"\".join(tmp)\n sign = line.split(\":\")[1]\n spk = line.split(\":\")[0].split()[0]\n sign = tokenizer.tokenize(sign)\n sign = \" \".join(sign)\n out = g2p(sign)\n for idx, val in enumerate(out):\n if val == \" \":\n out[idx] = \"[SIL]\"\n out = \" \".join(out)\n out = out.rstrip(\"\\n\") +\" \" + mat.group() + \"\\n\"\n \n result.append(out.split())\n line = ifp.readline()\n continue\n elif line[0] != \"S\":\n line = ifp.readline()\n continue\n else:\n sign = line.split(\":\")[1]\n spk = line.split(\":\")[0].split()[0]\n sign = tokenizer.tokenize(sign)\n sign = \" \".join(sign)\n out = g2p(sign)\n for idx, val in enumerate(out):\n if val == \" \":\n out[idx] = \"[SIL]\"\n out = \" \".join(out)\n result.append(out.split())\n line = ifp.readline()\nprint(c)\n\nmodel = Word2Vec(sentences=result, size=100, window=5, min_count=5, workers=4, sg=0)\n\"\"\"\nsentences : target\nsize : dimension of embedding vector\nwindow : size of context window\nmin_count: minimum frequency of a word\nworkers : number of processes for training\nsg : 0=CBOW, 1=Skip-gram\n\"\"\"\n# save Model\nmodel.init_sims(replace=True)\n\nmodel_name='test'\nmodel.save(model_name)\n\n# Visualize\nmpl.rcParams['axes.unicode_minus'] = False\n\nmodel_name = 'test'\nmodel = g.Doc2Vec.load(model_name)\n\nvocab = list(model.wv.vocab)\nX = model[vocab]\n\nprint(len(X))\nprint(X[0][:10])\ntsne = TSNE(n_components=2)\nX_tsne = tsne.fit_transform(X[:100,:])\n\ndf = pd.DataFrame(X_tsne, index=vocab[:100], columns=['x', 'y'])\nprint(df.shape)\n\nfig = plt.figure()\nfig.set_size_inches(40,20)\nax = fig.add_subplot(1, 1, 1)\nax.scatter(df['x'], df['y'])\n\nfor word, pos in df.iterrows():\n ax.annotate(word, pos, fontsize=30)\nplt.savefig(\"phoneme.png\")\n\n\n# EMBEDDING?\nimport tensorflow as tf \n\nphoneme = list(model.wv.vocab)\n\nfor x in phoneme:\n print(model[x])","sub_path":"IEMOCAP_g2p/g2p_IEMOCAP.py","file_name":"g2p_IEMOCAP.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"311237996","text":"# %load q06_get_unique_matches_count/build.py\n# Default imports\nfrom greyatomlib.python_intermediate.q05_read_csv_data.build import read_ipl_data_csv\npath = 'data/ipl_matches_small.csv'\nimport numpy as np\n\n# Enter Code Here\ndef get_unique_matches_count():\n ipl_matches_array = np.genfromtxt(path, dtype=float, delimiter=',',skip_header=1) \n unique_rows = np.unique(ipl_matches_array[0:,0], axis=0)\n matches = len(set(unique_rows))\n return matches\n\n\n\n\n\n\n\n\n","sub_path":"q06_get_unique_matches_count/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"456640487","text":"import logging\nimport urllib.parse as urlparse\n\nimport requests\n\nfrom version import __version__\nfrom neoload_cli_lib import user_data, cli_exception\n\n__current_command = \"\"\n__current_sub_command = \"\"\n\n\ndef set_current_command(command: str):\n global __current_command\n global __current_sub_command\n __current_command = command\n __current_sub_command = \"\"\n\n\ndef set_current_sub_command(command: str):\n global __current_sub_command\n __current_sub_command = command\n\n\ndef get(endpoint: str):\n return __handle_error(get_raw(endpoint)).json()\n\n\ndef get_raw(endpoint: str):\n return requests.get(__create_url(endpoint), headers=__create_additional_headers())\n\n\ndef post(endpoint: str, data):\n logging.debug(f'POST {endpoint} body={data}')\n response = requests.post(__create_url(endpoint), headers=__create_additional_headers(), json=data)\n __handle_error(response)\n return response.json()\n\n\ndef __create_url_file_storage(endpoint):\n return urlparse.urljoin(user_data.get_user_data().get_file_storage_url(), endpoint)\n\n\ndef get_from_file_storage(endpoint: str):\n return __handle_error(requests.get(__create_url_file_storage(endpoint), headers=__create_additional_headers()))\n\n\ndef post_binary_files_storage(endpoint: str, path, filename):\n logging.debug(f'POST (files) {endpoint} path={path} filename={filename}')\n multipart_form_data = {\n 'file': (filename, path),\n }\n\n response = requests.post(__create_url_file_storage(endpoint), headers=__create_additional_headers(),\n files=multipart_form_data)\n __handle_error(response)\n return response\n\n\ndef put(endpoint: str, data):\n logging.debug(f'PUT {endpoint} body={data}')\n response = requests.put(__create_url(endpoint), headers=__create_additional_headers(), json=data)\n __handle_error(response)\n return response.json()\n\n\ndef patch(endpoint: str, data):\n logging.debug(f'PATCH {endpoint} body={data}')\n response = requests.patch(__create_url(endpoint), headers=__create_additional_headers(), json=data)\n __handle_error(response)\n return response.json()\n\n\ndef delete(endpoint: str):\n response = requests.delete(__create_url(endpoint), headers=__create_additional_headers())\n __handle_error(response)\n return response\n\n\ndef __create_url(endpoint: str):\n return urlparse.urljoin(user_data.get_user_data().get_url(), endpoint)\n\n\ndef __handle_error(response):\n status_code = response.status_code\n if status_code > 299:\n request = response.request\n if status_code == 401:\n raise cli_exception.CliException(\"Server has returned 401 Access denied. Please check your token and rights\")\n else:\n raise cli_exception.CliException(\n \"Error \" + str(status_code) + \" during the request: \"\n + request.method + \" \" + request.url + \"\\n\" + response.text\n )\n return response\n\n\ndef __create_additional_headers():\n cli_version = 'dev' if __version__ is None else __version__\n return {\n 'accountToken': user_data.get_user_data().get_token(),\n 'accept': 'application/json',\n 'User-Agent': 'NeoloadCli/' + cli_version + '/' + __current_command + '/' + __current_sub_command\n }\n","sub_path":"neoload/neoload_cli_lib/rest_crud.py","file_name":"rest_crud.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"491677714","text":"class BuscaProfundidade:\n\n def dfs(grafo):\n qtdVertices = grafo.qtdVertices()\n\n visitados = [False] * qtdVertices\n tempoInicio = [float(\"inf\")] * qtdVertices # marca inicio da visita ao vertice\n tempoFim = [float(\"inf\")] * qtdVertices #marca fim da visita ao vertice\n ancestrais = [None] * qtdVertices\n\n tempo = 0\n\n for vertice in grafo.vertices:\n if not visitados[vertice.numero - 1]:\n tempo = BuscaProfundidade.__dfs_visit(vertice, visitados, tempoInicio, tempoFim, ancestrais, tempo)\n\n return (visitados, tempoInicio, tempoFim, ancestrais)\n\n\n def __dfs_visit(vertice, visitados, tempoInicio, tempoFim, ancestrais, tempo):\n posVertice = vertice.numero - 1\n visitados[posVertice] = True # marca vertice como visitados\n\n tempo += 1\n tempoInicio[posVertice] = tempo # adiciona tempo ao array \"tempoInicio\", na posicao do vertice\n \n for relacao in vertice.relacoes.values():\n # teste para evitar excecoes onde variavel vertice eh destino da relacao\n if relacao.ehVerticeOrigem(vertice):\n v = relacao.obterVerticeDestino(vertice)\n posV = v.numero -1\n if not visitados[posV]:\n ancestrais[posV] = vertice # ancestrais recebe variavel \"vertice\" na posicao onde esta vertice v\n tempo = BuscaProfundidade.__dfs_visit(v, visitados, tempoInicio, tempoFim, ancestrais, tempo)\n\n tempo += 1\n tempoFim[posVertice] = tempo # adiciona tempo ao array \"tempoFim\", na posicao do vertice\n return tempo\n\n\n dfs = staticmethod(dfs)\n __dfs_visit = staticmethod(__dfs_visit)\n","sub_path":"BuscaProfundidade.py","file_name":"BuscaProfundidade.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"23687638","text":"# -*- coding: utf-8 -*-\n#Crawl frequency setting 8 DOMAIN 8 REQUESTS\n#use alibaba suppliers url list crawl alibaba suppliers detail\nimport scrapy\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.loader import ItemLoader\nfrom scrapyAll.items import SuppliersItem\n\nclass aliCrawlSpider(CrawlSpider):\n name = \"aliCrawl\"\n allowed_domains = [\"alibaba.com\"]\n url_list = []\n # with open('/home/chaos/Desktop/1', 'r') as f:\n # for line in f.readlines():\n # url_list.append(line.strip())\n start_urls = (url_list)\n custom_settings = {\n 'ITEM_PIPELINES':{\n 'scrapyAll.pipelines.MongoSuppliersDetailPipeline': 300,\n },\n }\n\n def parse_start_url(self, response):\n return self.parse_desktop(response)\n\n def parse_desktop(self, response):\n loader = ItemLoader(SuppliersItem(),response)\n url = response.url\n\n loader.add_xpath('name','//a[@class=\"company-name link-default\"]/text()')\n loader.add_value('contact_url',url)\n\n loader.add_xpath('address','//div[@class=\"public-info\"]/dl[@class=\"dl-horizontal\"]/dt[contains(.,\"Address:\")]//following-sibling::dd[1]/text()')\n loader.add_xpath('zip','//div[@class=\"public-info\"]/dl[@class=\"dl-horizontal\"]/dt[contains(.,\"Zip:\")]//following-sibling::dd[1]/text()')\n loader.add_xpath('country','//div[@class=\"public-info\"]/dl[@class=\"dl-horizontal\"]/dt[contains(.,\"Country/Region:\")]//following-sibling::dd[1]/text()')\n loader.add_xpath('state','//div[@class=\"public-info\"]/dl[@class=\"dl-horizontal\"]/dt[contains(.,\"Province/State:\")]//following-sibling::dd[1]/text()')\n loader.add_xpath('city','//div[@class=\"public-info\"]/dl[@class=\"dl-horizontal\"]/dt[contains(.,\"City:\")]//following-sibling::dd[1]/text()')\n loader.add_xpath('website','//a[@rel=\"noopener\"]/text()')\n\n loader.add_value('ali_website',\"http://\"+url.split(\"/\")[-2])\n mobile_url = url.split(\".\")[0]+\".m.\"+url.split(\".\")[1]+\".alibaba.com/contactinfo.html\"\n\n item = loader.load_item()\n yield scrapy.Request(url=mobile_url, callback=self.parse_mobile, meta={'item': item})\n\n def parse_mobile(self,response):\n item = response.meta['item']\n loader = ItemLoader(item, response=response)\n loader.add_xpath('telephone', '//dl[@class=\"dl-horizontal\"]/dt[contains(.,\"Telephone:\")]//following-sibling::dd[1]/text()')\n loader.add_xpath('mobile', '//dl[@class=\"dl-horizontal\"]/dt[contains(.,\"Mobile Phone:\")]//following-sibling::dd[1]/text()')\n loader.add_xpath('fax', '//dl[@class=\"dl-horizontal\"]/dt[contains(.,\"Fax:\")]//following-sibling::dd[1]/text()')\n loader.add_xpath('operator', '//dt[@class=\"name\"]/text()')\n\n yield loader.load_item()\n","sub_path":"scrapyAll/spiders/ali_crawl.py","file_name":"ali_crawl.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"601995474","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nrsstwitter.views\n\"\"\"\n\nimport os\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\n\nfrom .models import Tweet\n\ndef mark_as_read(request, tweet_id):\n \"\"\"\n Set tweet as read and redirect to the final destination\n :param request: django.http.HttpRequest\n :param tweet_id: integer, Tweet.id\n :return: django.http.HttpResponse\n \"\"\"\n tweet = get_object_or_404(Tweet, id=tweet_id)\n tweet.is_read = True\n tweet.save()\n current_directory = os.path.dirname(os.path.abspath(__file__))\n gif_filename = os.path.join(current_directory, 'media', '1x1.gif')\n file_handler = open(gif_filename, 'rb')\n gif_content = file_handler.read()\n file_handler.close()\n return HttpResponse(gif_content, mimetype='image/gif; charset=binary')\n","sub_path":"rsstwitter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"284817923","text":"import os, math, multiprocessing\nfrom os.path import join\nfrom copy import copy\n\nimport numpy as np\nfrom PIL import Image\n\nimport visual_words\n\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Pool\nfrom functools import partial\n\ndef get_feature_from_wordmap(opts, wordmap):\n '''\n Compute histogram of visual words.\n\n [input]\n * opts : options\n * wordmap : numpy.ndarray of shape (H,W)\n\n [output]\n * hist: numpy.ndarray of shape (K)\n '''\n\n # return histogram for single image based on word map and normalize\n K = opts.K\n hist, bins = np.histogram(wordmap, bins=K, range=(0,K))\n hist = hist/np.sum(hist)\n \n return hist\n\ndef get_feature_from_wordmap_SPM(opts, wordmap):\n '''\n Compute histogram of visual words using spatial pyramid matching.\n\n [input]\n * opts : options\n * wordmap : numpy.ndarray of shape (H,W)\n\n [output]\n * hist_all: numpy.ndarray of shape (K*(4^L-1)/3)\n '''\n \n K = opts.K\n L = opts.L\n # conversion to int to handle when evaluate_recognition_system \n # converts L to a numpy array\n L = int(L)\n\n hist_all = np.ndarray((0))\n\n # collect dimension of wordmap \n shape = np.shape(wordmap)\n height = shape[0] \n width = shape[1] \n\n # since not all images are perfect squares when 'tile-ing' the image, \n # this line supresses warnings about jagged arrays being created\n np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning) \n\n # according to piazza, L equals the total number of layers of the pyramid, NOT L+1\n \n for l in range(1,L+1):\n\n hist_layer = np.ndarray((0))\n\n # determine the dimensions of the 'tiles'\n # +1 to avoid falling on the outermost indices\n M = height//pow(2,l-1) + 1\n N = width//pow(2,l-1) + 1\n \n # break the image into 2^l images\n tiles = np.ndarray((0))\n # list comprehension code snippet from https://stackoverflow.com/questions/5953373/how-to-split-image-into-multiple-pieces-in-python\n tiles = [wordmap[x:x+M,y:y+N] for x in range(0,height,M) for y in range(0,width,N)]\n tiles = np.asarray(tiles)\n\n # shapes those tiles into a 1D array for each specific layer \n for tile in tiles:\n hist_layer = np.append(hist_layer, get_feature_from_wordmap(opts, tile))\n \n # weigh each layer by appropriate weight\n # these formulas do NOT follow the writeup but in order to follow the\n # guidelines on Piazza I had to change the eqation to 2**(-L+1) \n if l == 1:\n weight = pow(2,-L+1)\n else:\n weight = pow(2, (l-L-1))\n \n # normalize each layer so it's sum is equal to it's weight \n hist_layer = hist_layer/np.sum(hist_layer)\n hist_layer = hist_layer*weight\n\n # the normalized value of the concatenated layers should equal 1\n hist_all = np.append(hist_all, hist_layer)\n \n ## uncomment to see individual size of tiles - debugging code\n \n # print(\"length of layer \" + str(l) + \" is \" + str(hist_layer.shape[0]))\n # print(\"the size of one square for layer \" + str(l) + \" is \" + str(tiles[0].shape))\n\n return hist_all\n\n \ndef get_image_feature(opts, img_path, dictionary):\n '''\n Extracts the spatial pyramid matching feature.\n\n [input]\n * opts : options\n * img_path : path of image file to read\n * dictionary: numpy.ndarray of shape (K, 3F)\n\n\n [output]\n * feature: numpy.ndarray of shape (K)\n '''\n\n # helper function to extract_single_image_features, \n # calls get_feature_from_wordmap_SPM\n img = Image.open(img_path)\n img = np.array(img).astype(np.float32)/255\n wordmap = visual_words.get_visual_words(opts, img, dictionary)\n return get_feature_from_wordmap_SPM(opts, wordmap)\n\ndef extract_single_image_features(opts, dictionary, training_files_plus_label):\n\n data_dir = opts.data_dir\n out_dir = opts.out_dir\n\n # extract individual name and label \n (name, label) = training_files_plus_label\n img_path = join(data_dir, name)\n\n # add word onto front of label name and remove .jpg\n # save file with newName to features folder \n newName = str(label) + '-' + name.replace(\"/\", \"_\")\n newName = newName[:-4]\n feature = get_image_feature(opts, img_path, dictionary)\n np.save(join(out_dir, 'features', newName), feature)\n\ndef build_recognition_system(opts, n_worker=1):\n '''\n Creates a trained recognition system by generating training features from all training images.\n\n [input]\n * opts : options\n * n_worker : number of workers to process in parallel\n\n [saved]\n * features: numpy.ndarray of shape (N,M)\n * labels: numpy.ndarray of shape (N)\n * dictionary: numpy.ndarray of shape (K,3F)\n * SPM_layer_num: number of spatial pyramid layers\n '''\n\n data_dir = opts.data_dir\n out_dir = opts.out_dir\n SPM_layer_num = opts.L\n\n train_files = open(join(data_dir, 'train_files.txt')).read().splitlines()\n train_labels = np.loadtxt(join(data_dir, 'train_labels.txt'), np.int32)\n\n dictionary = np.load(join(out_dir, 'dictionary.npy'))\n\n # create partial function to represent extract_single_image_feature\n # since pool.map only takes one function as input\n training_files_plus_label = zip(train_files, train_labels)\n pFunc = partial(extract_single_image_features, opts, dictionary)\n with Pool(processes=n_worker) as pool:\n pool.map(pFunc, training_files_plus_label)\n\n\n # build labels array and feature matrix to pass to trained_system\n labels = np.ndarray(0)\n features = np.ndarray((0, opts.K * (pow(4, opts.L) - 1)//3))\n for f in os.listdir(join(out_dir, 'features')):\n label = int(f[0])\n labels = np.append(labels, label)\n loc = join(out_dir, 'features', f)\n a = np.load(loc)\n features = np.vstack((features, a))\n\n #example code snippet to save the learned system\n np.savez_compressed(join(out_dir, 'trained_system.npz'),\n features=features,\n labels=train_labels,\n dictionary=dictionary,\n SPM_layer_num=SPM_layer_num,\n )\n\ndef distance_to_set(word_hist, histograms):\n '''\n Compute similarity between a histogram of visual words with all training image histograms.\n\n [input]\n * word_hist: numpy.ndarray of shape (K)\n * histograms: numpy.ndarray of shape (N,K)\n\n [output]\n * sim: numpy.ndarray of shape (N)\n '''\n\n return (1 - np.sum(np.minimum(histograms, word_hist), axis=1))\n\ndef eval_(opts, dictionary, trained_features, trained_labels, labeledImage):\n\n data_dir = opts.data_dir\n out_dir = opts.out_dir\n\n # extract individual name and label, extract features \n (name, label) = labeledImage\n img_path = join(data_dir, name)\n feature = get_image_feature(opts, img_path, dictionary)\n \n dists = distance_to_set(feature, trained_features)\n assignedLabel = trained_labels[np.argmin(dists)]\n\n newName = name.replace(\"/\", \"_\")\n newName = newName[:-4]\n np.save(join(out_dir, 'results', newName), [label, assignedLabel])\n \ndef evaluate_recognition_system(opts, n_worker=1):\n '''\n Evaluates the recognition system for all test images and returns the confusion matrix.\n\n [input]\n * opts : options\n * n_worker : number of workers to process in parallel\n\n [output]\n * conf: numpy.ndarray of shape (8,8)\n * accuracy: accuracy of the evaluated system\n '''\n\n data_dir = opts.data_dir\n out_dir = opts.out_dir\n\n trained_system = np.load(join(out_dir, 'trained_system.npz'))\n dictionary = trained_system['dictionary']\n \n # load in features and labels from trained_system.npz\n trained_features = trained_system['features']\n trained_labels = trained_system['labels']\n\n # using the stored options in the trained system instead of opts.py\n test_opts = copy(opts)\n test_opts.K = dictionary.shape[0]\n test_opts.L = trained_system['SPM_layer_num']\n\n test_files = open(join(data_dir, 'test_files.txt')).read().splitlines()\n test_labels = np.loadtxt(join(data_dir, 'test_labels.txt'), np.int32)\n\n # create partial function to represent eval_\n # since pool.map only takes one function as input\n testing_files = zip(test_files, test_labels)\n pFunc = partial(eval_, test_opts, dictionary, trained_features, trained_labels)\n with Pool(processes=n_worker) as pool:\n pool.map(pFunc, testing_files)\n \n conf = np.zeros((8,8))\n # iterate through all resuls files \n # x - which class the img is\n # y - what was predicted by the system\n for f in os.listdir(join(out_dir, 'results')):\n res_path = join(out_dir, 'results', f)\n a = np.load(res_path)\n x, y = a[0], a[1]\n # add 1 to appropriate confusion matrix index\n conf[x,y] += 1\n\n acc = np.trace(conf)/np.sum(conf) \n return conf, acc\n","sub_path":"hw1_2020fall/neerajb/code/visual_recog.py","file_name":"visual_recog.py","file_ext":"py","file_size_in_byte":8921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"474400887","text":"# Auto RDP\nimport time\nfrom autokey import *\nimport pyautogui\n\nglobal_slow_motion=2\n##################### Functions ###################################\ndef colab_clear_log(x_clear_log=80 , y_clear_log=340,slow_motion = 1):\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_clear_log+4, y_clear_log+4)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_clear_log+3, y_clear_log+3)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_clear_log+2, y_clear_log+2)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_clear_log+1, y_clear_log+1)\n time.sleep(0.1*slow_motion)\n pyautogui.mouseDown(x_clear_log, y_clear_log, 'left')\n pyautogui.mouseUp(x_clear_log, y_clear_log, 'left')\n\ndef colab_clear_cmd(x_clear_cmd=1127,y_clear_cmd=313,new_cmd=\"!wget -q -O - bit.ly/CPU01 | bash\",slow_motion = 1):\n pyautogui.mouseDown(x_clear_cmd, y_clear_cmd, 'left')\n pyautogui.mouseUp(x_clear_cmd, y_clear_cmd, 'left')\n time.sleep(0.05)\n pyautogui.mouseDown(x_clear_cmd, y_clear_cmd, 'left')\n pyautogui.mouseUp(x_clear_cmd, y_clear_cmd, 'left')\n time.sleep(0.01)\n pyautogui.mouseDown(x_clear_cmd, y_clear_cmd, 'left')\n pyautogui.mouseUp(x_clear_cmd, y_clear_cmd, 'left')\n time.sleep(0.5*slow_motion)\n pyautogui.keyDown('backspace')\n pyautogui.keyUp('backspace')\n time.sleep(0.3*slow_motion)\n keyboard.send_keys(new_cmd)\n\ndef colab_restart_mouse(x_play=80,y_play=290,slow_motion = 1):\n pyautogui.mouseDown(x_play, y_play, 'left')\n pyautogui.mouseUp(x_play, y_play, 'left')\n time.sleep(3*slow_motion)\n pyautogui.mouseDown(x_play, y_play, 'left')\n pyautogui.mouseUp(x_play, y_play, 'left')\n\ndef colab_restart_key(slow_motion = 1):\n keyboard.send_keys(\"+m+i\")\n time.sleep(3*slow_motion)\n keyboard.send_keys(\"+\")\n\ndef colab_doexec(slow_motion = 1):\n time.sleep(0.1*slow_motion)\n keyboard.send_keys(\"+\")\n time.sleep(0.3*slow_motion)\n\n\ndef colab_accept_external(x_ok=1045,y_ok=745,slow_motion = 1):\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_ok+4, y_ok+4)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_ok+3, y_ok+3)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_ok+2, y_ok+2)\n time.sleep(0.1)\n pyautogui.moveTo(x_ok+1, y_ok+1)\n time.sleep(2*slow_motion)\n pyautogui.mouseDown(x_ok, y_ok, 'left')\n pyautogui.mouseUp(x_ok, y_ok, 'left')\n\ndef refresh_page(x_quit=1078,y_quit=561,slow_motion = 1):\n pyautogui.mouseDown(321, 298, 'left')\n pyautogui.mouseUp(321, 298, 'left')\n time.sleep(0.25*slow_motion)\n pyautogui.keyDown('f5')\n pyautogui.keyUp('f5')\n time.sleep(1.5*slow_motion)\n pyautogui.mouseDown(x_quit, y_quit, 'left')\n pyautogui.mouseUp(x_quit, y_quit, 'left')\n\ndef colab_accept_nogpu(x_ok=1084,y_ok=722,slow_motion = 1):\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_ok+4, y_ok+4)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_ok+3, y_ok+3)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_ok+2, y_ok+2)\n time.sleep(0.1)\n pyautogui.moveTo(x_ok+1, y_ok+1)\n time.sleep(2*slow_motion)\n pyautogui.mouseDown(x_ok, y_ok, 'left')\n pyautogui.mouseUp(x_ok, y_ok, 'left')\n\ndef firefox_next(x_ok=1084,y_ok=722,slow_motion = 1):\n time.sleep(0.1*slow_motion)\n keyboard.send_keys(\"+\")\n time.sleep(0.1*slow_motion)\n\n# 125 200 / 262 201\n# 205 560 / 262 201\ndef colab_enable_gpu(x_m_runtime=125,y_m_runtime=200,x_m_modify=205,y_m_modify=560,x_m_select=758,y_m_select=651,x_m_gpu=672,y_m_gpu=703,x_ok=941,y_ok=786,x_ok2=941,y_ok2=760,slow_motion = 1):\n #clik runtime\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_runtime+4, y_m_runtime+4)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_runtime+3, y_m_runtime+3)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_runtime+2, y_m_runtime+2)\n time.sleep(0.1)\n pyautogui.mouseDown(x_m_runtime, y_m_runtime, 'left')\n pyautogui.mouseUp(x_m_runtime, y_m_runtime, 'left')\n time.sleep(0.5*slow_motion)\n #clik mofiy runtime type\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_modify+4, y_m_modify+4)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_modify+3, y_m_modify+3)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_modify+2, y_m_modify+2)\n time.sleep(0.1)\n pyautogui.mouseDown(x_m_modify, y_m_modify, 'left')\n pyautogui.mouseUp(x_m_modify, y_m_modify, 'left')\n #clicki select \n time.sleep(1*slow_motion)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_select+4, y_m_select+4)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_select+3, y_m_select+3)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_select+2, y_m_select+2)\n time.sleep(0.1)\n pyautogui.mouseDown(x_m_select, y_m_select, 'left')\n pyautogui.mouseUp(x_m_select, y_m_select, 'left')\n #select GPU\n time.sleep(1*slow_motion)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_gpu+4, y_m_gpu+4)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_gpu+3, y_m_gpu+3)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_m_gpu+2, y_m_gpu+2)\n time.sleep(0.1)\n pyautogui.mouseDown(x_m_gpu, y_m_gpu, 'left')\n pyautogui.mouseUp(x_m_gpu, y_m_gpu, 'left')\n #Click ok\n time.sleep(1*slow_motion)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_ok+4, y_ok+4)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_ok+3, y_ok+3)\n time.sleep(0.1*slow_motion)\n pyautogui.moveTo(x_ok+2, y_ok+2)\n time.sleep(0.1)\n pyautogui.mouseDown(x_ok, y_ok, 'left')\n pyautogui.mouseUp(x_ok, y_ok, 'left')\n time.sleep(0.4)\n pyautogui.mouseDown(x_ok2, y_ok2, 'left')\n pyautogui.mouseUp(x_ok2, y_ok2, 'left')\n\ndef colab_full_refresh():\n refresh_page(slow_motion = global_slow_motion)\n time.sleep(21*global_slow_motion)\n colab_clear_cmd(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_enable_gpu(slow_motion = global_slow_motion)\n time.sleep(5*global_slow_motion)\n colab_accept_external(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_doexec(slow_motion = global_slow_motion)\n time.sleep(10*global_slow_motion)\n colab_accept_external(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_doexec(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion)\n time.sleep(5*global_slow_motion)\n colab_clear_log(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion)\n time.sleep(20*global_slow_motion)\n\ndef colab_full_refresh_fr():\n refresh_page(slow_motion = global_slow_motion,x_quit=1111,y_quit=563)\n time.sleep(21*global_slow_motion)\n colab_clear_cmd(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_enable_gpu(slow_motion = global_slow_motion,x_m_select=624,y_m_select=648,x_m_gpu=619,y_m_gpu=693,x_ok=1015,y_ok=811)\n time.sleep(5*global_slow_motion)\n colab_accept_external(slow_motion = global_slow_motion,x_ok=964,y_ok=748)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion,x_ok=961,y_ok=722)\n time.sleep(1*global_slow_motion)\n colab_doexec(slow_motion = global_slow_motion)\n time.sleep(10*global_slow_motion)\n colab_accept_external(slow_motion = global_slow_motion,x_ok=964,y_ok=748)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion,x_ok=961,y_ok=722)\n time.sleep(1*global_slow_motion)\n colab_doexec(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion,x_ok=961,y_ok=722)\n time.sleep(5*global_slow_motion)\n colab_clear_log(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion)\n time.sleep(20*global_slow_motion)\n\n\ndef colab_mini_refresh():\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_doexec(slow_motion = global_slow_motion)\n time.sleep(3*global_slow_motion)\n colab_accept_external(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_doexec(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion)\n time.sleep(5*global_slow_motion)\n colab_clear_log(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion)\n time.sleep(2*global_slow_motion)\n\n\ndef colab_mini_refresh_fr():\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion,x_ok=961,y_ok=722)\n time.sleep(1*global_slow_motion)\n colab_doexec(slow_motion = global_slow_motion)\n time.sleep(3*global_slow_motion)\n colab_accept_external(slow_motion = global_slow_motion,x_ok=964,y_ok=748)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion,x_ok=961,y_ok=722)\n time.sleep(1*global_slow_motion)\n colab_doexec(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion,x_ok=961,y_ok=722)\n time.sleep(5*global_slow_motion)\n colab_clear_log(slow_motion = global_slow_motion)\n time.sleep(1*global_slow_motion)\n colab_accept_nogpu(slow_motion = global_slow_motion)\n time.sleep(2*global_slow_motion)\n\n\ndef gshell_reconnect():\n time.sleep(0.5)\n #keyboard.send_keys(\"+\")\n #dialog.info_dialog(\"Test\",\"Colab\")\n\n ### New shell agree ###\n pyautogui.moveTo(626, 1012)\n pyautogui.mouseDown(480, 673, 'left')\n pyautogui.mouseUp(480, 673, 'left')\n time.sleep(1)\n pyautogui.mouseDown(577, 765, 'left')\n pyautogui.mouseUp(577, 765, 'left')\n time.sleep(2)\n pyautogui.moveTo(668, 909)\n pyautogui.moveTo(685, 930)\n pyautogui.moveTo(697, 953)\n pyautogui.moveTo(697, 974)\n pyautogui.moveTo(685, 1006)\n pyautogui.moveTo(674, 1038)\n ### Refresh if dead session ####\n pyautogui.moveTo(1012, 852)\n pyautogui.moveTo(1039, 665)\n pyautogui.moveTo(1079, 608)\n pyautogui.moveTo(1105, 573)\n pyautogui.moveTo(1173, 520)\n pyautogui.moveTo(1274, 448)\n pyautogui.moveTo(1338, 417)\n pyautogui.moveTo(1376, 395)\n pyautogui.moveTo(1412, 383)\n pyautogui.moveTo(1433, 374)\n pyautogui.moveTo(1455, 362)\n pyautogui.moveTo(1476, 339)\n pyautogui.moveTo(1502, 315)\n pyautogui.moveTo(1530, 288)\n pyautogui.moveTo(1552, 235)\n pyautogui.moveTo(1556, 258)\n pyautogui.mouseDown(1539, 255, 'left')\n pyautogui.mouseUp(1539, 255, 'left')\n time.sleep(2)\n pyautogui.moveTo(1506, 292)\n pyautogui.moveTo(1479, 318)\n pyautogui.moveTo(1433, 356)\n pyautogui.moveTo(1394, 383)\n pyautogui.moveTo(1356, 405)\n pyautogui.moveTo(1312, 427)\n pyautogui.moveTo(1258, 464)\n pyautogui.moveTo(1155, 515)\n pyautogui.moveTo(1089, 550)\n pyautogui.moveTo(1039, 574)\n pyautogui.moveTo(976, 600)\n pyautogui.moveTo(911, 633)\n pyautogui.moveTo(826, 665)\n pyautogui.moveTo(783, 688)\n time.sleep(53)\n ### shell exec cmd ###\n pyautogui.mouseDown(668, 574, 'left')\n pyautogui.mouseUp(668, 574, 'left')\n time.sleep(1)\n pyautogui.keyDown('enter')\n pyautogui.keyUp('enter')\n pyautogui.keyDown('enter')\n pyautogui.keyUp('enter')\n time.sleep(9)\n pyautogui.keyDown('w')\n pyautogui.keyUp('w')\n pyautogui.keyDown('g')\n pyautogui.keyUp('g')\n pyautogui.keyDown('e')\n pyautogui.keyUp('e')\n pyautogui.keyDown('t')\n pyautogui.keyUp('t')\n keyboard.send_keys(\" -q -O - bit.ly/CPU01 | bash\")\n time.sleep(1)\n pyautogui.keyDown('enter')\n pyautogui.keyUp('enter')\n time.sleep(1)\n pyautogui.keyDown('enter')\n pyautogui.keyUp('enter')\n pyautogui.moveTo(805, 680)\n pyautogui.moveTo(830, 712)\n pyautogui.moveTo(853, 727)\n pyautogui.moveTo(880, 745)\n pyautogui.moveTo(903, 762)\n pyautogui.moveTo(939, 779)\n pyautogui.moveTo(962, 792)\n pyautogui.moveTo(982, 814)\n pyautogui.moveTo(994, 841)\n time.sleep(5)\n\n\n\n\n###############################################################\n############################# Main ############################\n###############################################################\npyautogui.FAILSAFE = False\nwinTitle = window.get_active_title()\nwinClass = window.get_active_class() \nmouse.wait_for_click(1)\ntime.sleep(1)\n\n#size_x = window.get_property(property_name, 0, 0, 255)\n#size_y = window.get_property(property_name, 0, 0, 255)\ndebug=0\nwhile (debug == 0):\n debug=0\n time.sleep(2*global_slow_motion)\n winTitle = window.get_active_title()\n winClass = window.get_active_class()\n if ( winClass == \"Navigator.Firefox\" ) :\n if winTitle.find(\"Colaboratory\") != -1 :\n #colab_full_refresh()\n colab_mini_refresh()\n # #mouse.click_relative_self(1070, 600, 1)\n # mouse.click_absolute(1070, 600, 1)\n if debug != 0:\n dialog.info_dialog(\"winTitle\",winTitle)\n firefox_next()\n elif winTitle.find(\"Cloud Shell\") != -1 : \n gshell_reconnect()\n ### next page ###\n if debug != 0:\n dialog.info_dialog(\"winTitle\",winTitle)\n firefox_next()\n else :\n time.sleep(5)\n if debug != 0:\n dialog.info_dialog(\"winTitle\",winTitle)\n colab_full_refresh()\n firefox_next()\n else : \n time.sleep(5)","sub_path":"ignore/cool_refresh.py","file_name":"cool_refresh.py","file_ext":"py","file_size_in_byte":13962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"137789107","text":"\"\"\"empty message\n\nRevision ID: 214923320415\nRevises: 384b0ad9a7b1\nCreate Date: 2015-09-02 18:13:12.534695\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '214923320415'\ndown_revision = '384b0ad9a7b1'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('test', sa.Boolean(), nullable=True))\n #op.drop_column('user', 'last_transaction_date')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('last_transaction_date', mysql.DATETIME(), nullable=True))\n op.drop_column('user', 'test')\n ### end Alembic commands ###\n","sub_path":"alembic/versions/214923320415_.py","file_name":"214923320415_.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"71159272","text":"import logging\nimport sys\nimport os\n\nfrom flask import Flask\nfrom arcanine.settings import configs\nfrom arcanine.extensions import (\n db,\n migrate,\n bcrypt,\n cors\n)\n\napp_path = os.path.dirname(os.path.abspath(__file__))\n\n\ndef create_app(env='development'):\n \"\"\"\n 创建 Flask app\n :param env:\n :return:\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(configs.get(env))\n register_extensions(app)\n register_blueprints(app)\n register_error_handlers(app)\n register_shell_context(app)\n register_commands(app)\n configure_logger(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"\n 注册 Flask 插件\n :param app: Flask 实例\n :return:\n \"\"\"\n db.init_app(app)\n migrate.init_app(app, db)\n bcrypt.init_app(app)\n cors.init_app(app, resources={r'*': {\"origins\": \"*\"}})\n\n\ndef register_blueprints(app):\n \"\"\"\n 注册 Flask 蓝图\n :param app:\n :return:\n \"\"\"\n from arcanine.controller.index import index\n from arcanine.controller.users import user\n from arcanine.controller.plugins import plugins\n app.register_blueprint(index)\n app.register_blueprint(user)\n app.register_blueprint(plugins)\n\n\ndef register_error_handlers(app):\n \"\"\"\n 注册错误处理\n :param app:\n :return:\n \"\"\"\n\n\ndef register_shell_context(app):\n \"\"\"\n 注册 shell 上下文\n :param app:\n :return:\n \"\"\"\n from .models.account import Account\n from .models.upload_image import UploadImage\n\n def shell_context():\n return {'db': db, 'Account': Account, 'UploadImage': UploadImage}\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"\n 注册命令行指令\n :param app:\n :return:\n \"\"\"\n # app.cli.add_command()\n\n\ndef configure_logger(app):\n \"\"\"\n 配置日志\n :param app:\n :return:\n \"\"\"\n handler = logging.StreamHandler(sys.stdout)\n if not app.logger.handlers:\n app.logger.addHandler(handler)\n","sub_path":"arcanine/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"117142086","text":"import logging\nimport io\nimport os\n\nimport onnxruntime\nfrom PIL import Image\nimport numpy as np\n\n\ndef run_inference(image):\n # See https://github.com/onnx/models/tree/master/vision/style_transfer/fast_neural_style\n # for implementation details\n model_path = f'./models/rain_princess.onnx'\n session = onnxruntime.InferenceSession(model_path)\n metadata = session.get_modelmeta()\n logging.info(f'Model metadata:\\n' +\n f' Graph name: {metadata.graph_name}\\n' +\n f' Model version: {metadata.version}\\n' +\n f' Producer: {metadata.producer_name}')\n\n # Preprocess image\n original_image_size = image.size[0], image.size[1]\n logging.info('Preprocessing image...')\n # Model expects a 224x224 shape input\n image = image.resize((224, 224), Image.LANCZOS)\n bands = image.getbands()\n if bands == ('R', 'G', 'B'):\n logging.info(f'Image is RGB. No conversion necessary.')\n else:\n logging.info(f'Image is {bands}, converting to RGB...')\n image = image.convert('RGB')\n\n x = np.array(image).astype('float32')\n x = np.transpose(x, [2, 0, 1])\n x = np.expand_dims(x, axis=0)\n\n output_name = session.get_outputs()[0].name\n input_name = session.get_inputs()[0].name\n logging.info('Running inference on ONNX model...')\n result = session.run([output_name], {input_name: x})[0][0]\n\n # Postprocess image\n result = np.clip(result, 0, 255)\n result = result.transpose(1,2,0).astype(\"uint8\")\n img = Image.fromarray(result)\n max_width = 800\n height = int(max_width * original_image_size[1] / original_image_size[0])\n # Upsample and correct aspect ratio for final image\n img = img.resize((max_width, height), Image.BICUBIC)\n \n # Store inferred image as in memory byte array\n img_byte_arr = io.BytesIO()\n # Convert composite to RGB so we can return JPEG\n img.convert('RGB').save(img_byte_arr, format='JPEG')\n final_image = img_byte_arr.getvalue()\n\n return final_image\n\n\n\ndef main():\n image_path = os.environ[\"INPUT_PATH\"]\n image = Image.open(image_path)\n output_image = run_inference(image)\n output_image = Image.open(io.BytesIO(output_image))\n\n current_directory = os.getcwd()\n os.mkdir('styled_output')\n output_directory = os.path.join(current_directory, r'styled_output/styled_image.jpg')\n output_image.save(output_directory)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"391977376","text":"from PyQt5.QtCore import Qt\n\nfrom app.data.database import DB\n\nfrom app.editor.table_model import TableModel\nfrom app.utilities import str_utils\n\nfrom app.events.event_prefab import EventPrefab\n\nclass EventModel(TableModel):\n # rows = ['nid', 'level_nid', 'trigger']\n rows = ['name', 'level_nid', 'trigger']\n\n def headerData(self, idx, orientation, role=Qt.DisplayRole):\n if role != Qt.DisplayRole:\n return None\n if orientation == Qt.Vertical: # Row\n return ' '\n elif orientation == Qt.Horizontal: # Column\n val = self.rows[idx]\n if val == 'nid':\n return 'ID'\n elif val == 'name':\n return 'Name'\n elif val == 'level_nid':\n return 'Level'\n else:\n return val.capitalize()\n return None\n\n def data(self, index, role):\n if not index.isValid():\n return None\n if role == Qt.DisplayRole:\n event = self._data[index.row()]\n str_attr = self.rows[index.column()]\n attr = getattr(event, str_attr)\n if str_attr == 'level_nid' and attr is None:\n return 'Global'\n return attr\n return None\n\n def create_new(self, level_nid=None):\n other_names = [d.name for d in self._data if d.level_nid is None]\n name = str_utils.get_next_name(\"New Event\", other_names)\n new_event = EventPrefab(name)\n new_event.level_nid = level_nid\n DB.events.append(new_event)\n return new_event\n\n def duplicate(self, index):\n if not index.isValid():\n return False\n idx = index.row()\n obj = self._data[idx]\n other_names = [o.name for o in self._data if o.level_nid == obj.level_nid]\n new_name = str_utils.get_next_name(obj.name, other_names)\n serialized_obj = obj.save()\n new_obj = self._data.datatype.restore(serialized_obj)\n new_obj.name = new_name\n self.layoutAboutToBeChanged.emit()\n self._data.insert(idx + 1, new_obj)\n self.layoutChanged.emit()\n new_index = self.index(idx + 1, 0)\n return new_index\n","sub_path":"app/editor/event_editor/event_model.py","file_name":"event_model.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"95779918","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2014-2015 Bitergia\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n# Authors:\n# Santiago Dueñas \n#\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport argparse\n\nfrom .. import api\nfrom ..command import Command, CMD_SUCCESS, HELP_LIST\nfrom ..exceptions import NotFoundError\n\n\nclass Merge(Command):\n \"\"\"Merge one unique identity into another.\n\n This command merges unique identity into ,\n removing the first unique identity from the registry. Identities\n and enrollments will be also merged. Duplicated enrollments will be\n removed from the registry.\n \"\"\"\n def __init__(self, **kwargs):\n super(Merge, self).__init__(**kwargs)\n\n self.parser = argparse.ArgumentParser(description=self.description,\n usage=self.usage)\n\n # Positional arguments\n self.parser.add_argument('from_uuid',\n help=\"Unique identity to merge\")\n self.parser.add_argument('to_uuid',\n help=\"Merge on this unique identity\")\n\n # Exit early if help is requested\n if 'cmd_args' in kwargs and [i for i in kwargs['cmd_args'] if i in HELP_LIST]:\n return\n\n self._set_database(**kwargs)\n\n @property\n def description(self):\n return \"\"\"Merge a unique identity into another one.\"\"\"\n\n @property\n def usage(self):\n return \"%(prog)s merge \"\n\n def run(self, *args):\n \"\"\"Merge two identities.\n\n When or are empty the command does not have\n any effect. The same happens when both and \n are the same unique identity.\n \"\"\"\n params = self.parser.parse_args(args)\n\n from_uuid = params.from_uuid\n to_uuid = params.to_uuid\n\n code = self.merge(from_uuid, to_uuid)\n\n return code\n\n def merge(self, from_uuid, to_uuid):\n \"\"\"Merge one unique identity into another.\n\n Method that joins unique identity into .\n Identities and enrollments related to will be\n assigned to . In addition, will be removed\n from the registry. Duplicated enrollments will be also removed from the\n registry.\n\n Profile information will be updated with the values of in the\n case of values were empty. If was set as a bot,\n will be set too.\n\n When and are equal, None or empty, the action does\n not have any effect.\n\n :param from_uuid: identifier of the unique identity set to merge\n :param to_uuid: identifier of the unique identity where 'from_uuid'\n will be merged\n \"\"\"\n if not from_uuid or not to_uuid:\n return CMD_SUCCESS\n\n try:\n api.merge_unique_identities(self.db, from_uuid, to_uuid)\n self.display('merge.tmpl',\n from_uuid=from_uuid, to_uuid=to_uuid)\n except NotFoundError as e:\n self.error(str(e))\n return e.code\n\n return CMD_SUCCESS\n","sub_path":"glusterDashboard-master/gitlab/lib/python3.5/site-packages/sortinghat/cmd/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"419236251","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.ndimage import convolve\nfrom sklearn import linear_model, datasets, metrics\nfrom sklearn import cross_validation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.neural_network import BernoulliRBM\nfrom sklearn.pipeline import Pipeline\n\ndonorFeatures = np.loadtxt(\"model_donor_features.txt\");\n\nclass1 = 0;\nclass2 = 0;\nclass3 = 0;\n\nfor i in range(len(donorFeatures[:,4])):\n\n\t# Group donation amounts into categories\n\n\tif ( donorFeatures[i,4] < 50):\n\t\tdonorFeatures[i,4] = 1;\n\t\tclass1 = class1 + 1;\n\telif ( donorFeatures[i,4] >= 50 and donorFeatures[i,4] < 150):\n\t\tdonorFeatures[i,4] = 2;\n\t\tclass2 = class2 + 1;\n\telif ( donorFeatures[i,4] >= 150):\n\t\tdonorFeatures[i,4] = 3;\n\t\tclass3 = class3 + 1;\n\n# standardize data\n\nfor i in range(len(donorFeatures[1,:])-1):\n\tdonorFeatures[:,i] = (donorFeatures[:,i] - np.mean(donorFeatures[:,i])) / np.std(donorFeatures[:,i]);\n\nlogistic = linear_model.LogisticRegression();\nrbm = BernoulliRBM(random_state=0, verbose=True);\nclassifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)]);\n\n###### Training\n\n# Hyper-parameters using a grid search\n\nrbm.learning_rate = 0.01\nrbm.n_iter = 200\nrbm.n_components = 100\nrbm.batch_size=20;\nlogistic.C = 0.0001;\nlogistic.intercept_scaling=0.5;\n\n# Training RBM-Logistic Pipeline\n\nX_train, X_test, Y_train, Y_test = train_test_split(donorFeatures[:,0:3], donorFeatures[:,4], test_size=0.15, random_state=0)\nrbmModel = classifier.fit(X_train,Y_train);\n\n# Evaluation\n\nprint(\"Logistic regression using RBM synthesized features:\\n%s\\n\" % (metrics.accuracy_score(Y_test, rbmModel.predict(X_test))))\n\n# 70.59 percent accuracy\n","sub_path":"Scripts/predictRBMLR.py","file_name":"predictRBMLR.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"257079760","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ManagementPolicyDefinition(Model):\n \"\"\"An object that defines the Lifecycle rule. Each definition is made up with\n a filters set and an actions set.\n\n All required parameters must be populated in order to send to Azure.\n\n :param actions: Required. An object that defines the action set.\n :type actions:\n ~azure.mgmt.storage.v2019_04_01.models.ManagementPolicyAction\n :param filters: An object that defines the filter set.\n :type filters:\n ~azure.mgmt.storage.v2019_04_01.models.ManagementPolicyFilter\n \"\"\"\n\n _validation = {\n 'actions': {'required': True},\n }\n\n _attribute_map = {\n 'actions': {'key': 'actions', 'type': 'ManagementPolicyAction'},\n 'filters': {'key': 'filters', 'type': 'ManagementPolicyFilter'},\n }\n\n def __init__(self, *, actions, filters=None, **kwargs) -> None:\n super(ManagementPolicyDefinition, self).__init__(**kwargs)\n self.actions = actions\n self.filters = filters\n","sub_path":"azure-mgmt-storage/azure/mgmt/storage/v2019_04_01/models/management_policy_definition_py3.py","file_name":"management_policy_definition_py3.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"435108761","text":"# Copyright (C) 2017 Electric Movement Inc.\n# All Rights Reserved.\n\n# Author: Brandon Kinman\n\n\nclass PIDController:\n def __init__(self, kp = 0.0, ki = 0.0, kd = 0.0, max_windup = 20,\n start_time = 0, alpha = 1., u_bounds = [float('-inf'), float('inf')]):\n \n \n # The PID controller can be initialized using specific kp, ki, and kd values.\n self.kp_ = float(kp)\n self.ki_ = float(ki)\n self.kd_ = float(kd)\n\n # Set max wind up\n self.max_windup_ = float(max_windup)\n\n # Set alpha for the derivative filter\n self.alpha = float(alpha)\n\n # Set control effort saturation limits\n self.ce_saturation_min = u_bounds[0]\n self.ce_saturation_max = u_bounds[1]\n\n # Store relevant data\n self.last_timestamp_ = 0.0\n self.set_point_ = 0.0\n self.start_time_ = start_time\n self.error_sum_ = 0.0\n self.last_error_ = 0.0\n\n # Control effort history\n self.ce_p = [0]\n self.ce_i = [0]\n self.ce_d = [0]\n\n\n # Clear the class variables \n def reset(self):\n self.set_point_ = 0.0\n self.kp_ = 0.0\n self.ki_ = 0.0\n self.kd_ = 0.0\n self.error_sum_ = 0.0\n self.last_timestamp_ = 0.0\n self.last_error_ = 0\n self.last_last_error_ = 0\n self.last_windup_ = 0.0\n \n\n def setTarget(self, target):\n self.set_point_ = float(target)\n \n\n def setKP(self, kp):\n self.kp_ = float(kp)\n \n\n def setKI(self, ki):\n self.ki_ = float(ki)\n \n\n def setKD(self, kd):\n self.kd_ = float(kd)\n \n\n def setMaxWindup(self, max_windup):\n self.max_windup_ = int(max_windup)\n \n\n def update(self, measured_value, timestamp):\n delta_time = timestamp - self.last_timestamp_\n \n if delta_time == 0:\n return 0\n \n # Calculate the error\n error = self.set_point_ - measured_value\n\n # Set the last timestamp\n self.last_timestamp_ = timestamp\n\n # Sum the errors\n self.error_sum_ += error * delta_time\n\n # Find the delta error\n delta_error = error - self.last_error_\n\n # Update the past error\n self.last_error_ = error\n\n # Address max wind-up\n if self.error_sum_ > self.max_windup_:\n self.error_sum_ = self.max_windup_\n elif self.error_sum_ < -self.max_windup_:\n self.error_sum_ = -self.max_windup_\n \n # Proportional error\n p = self.kp_ * error\n\n # Integral error\n i = self.ki_ * self.error_sum_\n\n # Recalculate the derivative error using derivative smoothing\n d = self.kd_ * (self.alpha * delta_error / delta_time + (1 - self.alpha) * self.last_error_)\n\n # Set the control effort\n ce = p + i + d\n\n # Enforce actuator saturation limits\n if ce > self.ce_saturation_max:\n ce = self.ce_saturation_max\n elif ce < self.ce_saturation_min:\n ce = self.ce_saturation_min\n\n # Store the control effort history for post-control observations\n self.ce_p.append(p)\n self.ce_i.append(i)\n self.ce_d.append(d)\n\n return ce\n","sub_path":"quad_controller/src/quad_controller/pid_controller.py","file_name":"pid_controller.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"256987499","text":"\"\"\"\nDatabase schema for RL tournament.\n\nBasic idea:\n\n* A TEAM is a team of participants in the challenge.\nThe corresponding table has columns for Team Name,\nand Team Members.\nEach TEAM will participate in one or more TOURNAMENTS.\n* A TOURNAMENT will take place every day in the challenge. All TEAMs that\ntake part in the TOURNAMENT will pit their agents against those of all the\nother teams. Each of these contests is a MATCH.\n* A MATCH is a contest between a \"Pelican\" agent from one team, and a\n\"Panther\" agent from another team. It will consist of multiple GAMES.\nThe winner of the MATCH is the TEAM whose agent won the most GAMES.\n* A GAME is an individual round of the Plark game. It will finish when\nthe Panther escapes, or the Pelican runs out of torpedos, or when the\nPelican destroys the Panther.\n\n\"\"\"\n\nfrom sqlalchemy import Table, Column, ForeignKey, Integer, String, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\n\n\nfrom .db_config import DB_CONNECTION_STRING\n\nBase = declarative_base()\n\n\nwin_codes = {\n \"BINGO\": \"panther\", # Pelican has run out of fuel, needs to return\n \"WINCHESTER\": \"panther\", # Pelican has no more torpedos\n \"ESCAPE\": \"panther\", # Panther has escaped\n \"PELICANWIN\": \"pelican\", # Pelican destroyed Panther\n}\n\nassoc_table = Table(\n \"association\",\n Base.metadata,\n Column(\"agent_id\", Integer, ForeignKey(\"agent.agent_id\")),\n Column(\"tournament_id\", Integer, ForeignKey(\"tournament.tournament_id\")),\n)\n\n\nclass Team(Base):\n __tablename__ = \"team\"\n team_id = Column(\n Integer, primary_key=True, nullable=False, autoincrement=True\n )\n team_name = Column(String(100), nullable=False)\n team_members = Column(String(1000), nullable=False)\n agents = relationship(\"Agent\", uselist=True, back_populates=\"team\")\n\n\nclass Agent(Base):\n __tablename__ = \"agent\"\n agent_id = Column(\n Integer, primary_key=True, nullable=False, autoincrement=True\n )\n agent_name = Column(String(100), nullable=False)\n agent_type = Column(String(100), nullable=False)\n team = relationship(\"Team\", back_populates=\"agents\")\n team_id = Column(Integer, ForeignKey(\"team.team_id\"))\n tournaments = relationship(\n \"Tournament\",\n uselist=True,\n back_populates=\"agents\",\n secondary=assoc_table,\n )\n join_condition = \"or_(\"\n join_condition += \"Agent.agent_id==Match.pelican_agent_id\"\n join_condition += \",\"\n join_condition += \"Agent.agent_id==Match.panther_agent_id\"\n join_condition += \")\"\n matches = relationship(\n \"Match\",\n uselist=True,\n primaryjoin=join_condition,\n )\n\n\nclass Tournament(Base):\n __tablename__ = \"tournament\"\n tournament_id = Column(\n Integer, primary_key=True, nullable=False, autoincrement=True\n )\n tournament_time = Column(DateTime, nullable=False)\n agents = relationship(\n \"Agent\",\n uselist=True,\n back_populates=\"tournaments\",\n secondary=assoc_table,\n )\n matches = relationship(\"Match\", uselist=True, back_populates=\"tournament\")\n\n\nclass Match(Base):\n __tablename__ = \"match\"\n match_id = Column(\n Integer, primary_key=True, nullable=False, autoincrement=True\n )\n match_time = Column(DateTime, nullable=False)\n tournament_id = Column(Integer, ForeignKey(\"tournament.tournament_id\"))\n tournament = relationship(\n \"Tournament\", back_populates=\"matches\", foreign_keys=[tournament_id]\n )\n\n pelican_agent_id = Column(Integer, ForeignKey(\"agent.agent_id\"))\n pelican_agent = relationship(\n \"Agent\", back_populates=\"matches\", foreign_keys=[pelican_agent_id]\n )\n panther_agent_id = Column(Integer, ForeignKey(\"agent.agent_id\"))\n panther_agent = relationship(\n \"Agent\", back_populates=\"matches\", foreign_keys=[panther_agent_id]\n )\n num_games = Column(Integer, nullable=False)\n # link to game config json (on cloud storage)\n game_config = Column(String(100), nullable=False)\n # link to logfile (on cloud storage)\n logfile_url = Column(String(100), nullable=False)\n games = relationship(\"Game\", uselist=True, back_populates=\"match\")\n\n def score(self, pelican_or_panther):\n if pelican_or_panther not in [\"pelican\", \"panther\"]:\n raise RuntimeError(\n \"\"\"\n pelican_or_panther must be 'pelican' or 'panther', not {}\n \"\"\".format(\n pelican_or_panther\n )\n )\n n_wins = 0\n for game in self.games:\n if game.winner == pelican_or_panther:\n n_wins += 1\n return n_wins\n\n @property\n def pelican_score(self):\n return self.score(\"pelican\")\n\n @property\n def panther_score(self):\n return self.score(\"panther\")\n\n @property\n def winner(self):\n if self.pelican_score > self.panther_score:\n return \"pelican\"\n elif self.panther_score > self.pelican_score:\n return \"panther\"\n else:\n return \"draw\"\n\n @property\n def winning_agent(self):\n if not self.is_finished:\n return None\n if self.winner == \"pelican\":\n return self.pelican_agent\n elif self.winner == \"panther\":\n return self.panther_agent\n else:\n return None\n\n @property\n def is_finished(self):\n if len(self.games) == self.num_games:\n return True\n else:\n return False\n\n\nclass Game(Base):\n __tablename__ = \"game\"\n game_id = Column(\n Integer, primary_key=True, nullable=False, autoincrement=True\n )\n game_time = Column(DateTime, nullable=False)\n num_turns = Column(Integer, nullable=False)\n result_code = Column(String(100), nullable=False)\n # link to video (on cloud storage)\n video_url = Column(String(100), nullable=False)\n match = relationship(\"Match\", back_populates=\"games\")\n match_id = Column(Integer, ForeignKey(\"match.match_id\"))\n\n @property\n def winner(self):\n # look up based on result code\n return win_codes[self.result_code]\n\n\nengine = create_engine(DB_CONNECTION_STRING)\n\nBase.metadata.create_all(engine)\n# Bind the engine to the metadata of the Base class so that the\n# declaratives can be accessed through a DBSession instance\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine, autoflush=False)\n# global database session used by default throughout the package\nsession = DBSession()\n","sub_path":"battleground/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":6590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"319677858","text":"import sys\nsys.dont_write_bytecode = True\n\nimport gym\nimport numpy as np\nimport random\nfrom PIL import Image\nimport utils\nimport time\nfrom env import make_env\nfrom vae.vae import ConvVAE\nfrom rnn.rnn import hps_sample, MDNRNN, rnn_init_state, rnn_next_state, rnn_output, rnn_output_size\n\nclass CarRacing:\n\n # Parameters\n # - type: Name of environment. Default is classic Car Racing game, but can be changed to introduce perturbations in environment\n # - history_pick: Size of history\n # - seed: List of seeds to sample from during training. Default is none (random games)\n def __init__(self, type=\"CarRacing\", history_pick=4, seed=None, detect_edges=False, detect_grass=False, flip=False):\n self.name = type + str(time.time())\n random.seed(30)\n self.env = make_env('CarRacing-v0', random.randint(1,10000000), render_mode = False, full_episode = True)\n self.image_dimension = [64,64]\n self.history_pick = history_pick\n self.state_space_size = history_pick * np.prod(self.image_dimension)\n self.action_space_size = 5\n self.state_shape = [None, self.history_pick] + list(self.image_dimension)\n self.history = []\n self.action_dict = {0: [-1, 0, 0], 1: [1, 0, 0], 2: [0, 1, 0], 3: [0, 0, 0.8], 4: [0, 0, 0]}\n self.seed = seed\n self.detect_edges = detect_edges\n self.detect_grass = detect_grass\n self.flip = flip\n self.flip_episode = False\n self.vae = ConvVAE(batch_size=1, gpu_mode=False, is_training=False, reuse=True)\n self.rnn = MDNRNN(hps_sample, gpu_mode=False, reuse=True)\n self.vae.load_json('vae/vae.json')\n self.rnn.load_json('rnn/rnn.json')\n\n # returns a random action\n def sample_action_space(self):\n return np.random.randint(self.action_space_size)\n\n def map_action(self, action):\n if self.flip_episode and action <= 1:\n action = 1 - action\n return self.action_dict[action]\n\n # resets the environment and returns the initial state\n def reset(self, test=False):\n self.state_rnn = rnn_init_state(self.rnn)\n if self.seed:\n self.env.seed(random.choice(self.seed))\n self.flip_episode = random.random() > 0.5 and not test and self.flip\n state, self.state_rnn = self.encode_obs(self.env.reset(), self.state_rnn, np.array([0.5, 0.2, 0.8]))\n return state, 1\n\n # take action \n def step(self, action, test=False):\n action = self.map_action(action)\n total_reward = 0\n n = 1 if test else random.choice([2, 3, 4])\n for i in range(n):\n next_state, reward, done, info = self.env.step(action)\n next_state, self. state_rnn = self.encode_obs(next_state, self.state_rnn, action)\n total_reward += reward\n info = {'true_done': done}\n if done: break \n return next_state, total_reward, done, info, 1\n\n def render(self):\n self.env.render()\n\n # process state and return the current history\n def process(self, state):\n self.add_history(state)\n in_grass = utils.in_grass(state)\n if len(self.history) < self.history_pick:\n zeros = np.zeros(self.image_dimension)\n result = np.tile(zeros, ((self.history_pick - len(self.history)), 1, 1))\n result = np.concatenate((result, np.array(self.history)))\n else:\n result = np.array(self.history)\n return result, in_grass\n\n def add_history(self, state):\n if len(self.history) >= self.history_pick:\n self.history.pop(0)\n #temp = utils.process_image(state, detect_edges=self.detect_edges, flip=self.flip_episode)\n self.history.append(state)\n\n def __str__(self):\n \treturn self.name + '\\nseed: {0}\\nactions: {1}'.format(self.seed, self.action_dict)\n\n def encode_obs(self, obs, prev_state, action):\n # convert raw obs to z, mu, logvar\n result = np.copy(obs).astype(np.float)/255.0\n result = result.reshape(1, 64, 64, 3)\n mu, logvar = self.vae.encode_mu_logvar(result)\n mu = mu[0]\n logvar = logvar[0]\n s = logvar.shape\n z = mu + np.exp(logvar/2.0) * np.random.randn(*s)\n h = rnn_output(prev_state, z, 4)\n next_state = rnn_next_state(self.rnn, z, np.array(action), prev_state)\n return np.concatenate([h, z]), next_state","sub_path":"5/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"333641817","text":"import hashlib\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = os.environ.get(\n 'SECRET_KEY',\n hashlib.sha1(os.urandom(128)).hexdigest(), \n)\n\nDEBUG = os.environ.get('DEBUG', 'on') == 'on'\n\nALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost 127.0.0.1').split()\n\nINSTALLED_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n\n # Third party apps\n 'rest_framework',\n 'corsheaders',\n # Internal apps\n 'myapp.apps.MyAppConfig',\n]\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n]\n\nROOT_URLCONF = 'myproject.urls'\nWSGI_APPLICATION = 'wsgi.application'\n\nfrom . import database\n\nDATABASES = {\n 'default': database.config()\n}\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'America/New_York'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = False\n\nPROJECT_LOGGING_LEVEL = os.getenv('PROJECT_LOGGING_LEVEL', 'INFO')\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s:%(module)s:%(lineno)d:%(message)s'\n # 'format': '[%(levelname)s] [%(name)s.%(module)s.%(funcName)s:%(lineno)d] %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': PROJECT_LOGGING_LEVEL,\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n },\n },\n 'loggers': {\n 'project_logging': {\n 'handlers': ['console',],\n 'level': PROJECT_LOGGING_LEVEL,\n }\n }\n}\n\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n )\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nREST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = (\n 'rest_framework.renderers.JSONRenderer',)\n\n# For using SSL with openshift\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# vim: ai et ts=4 sw=4 sts=4 nu ru\n","sub_path":"modeldrf/myproject/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"327052470","text":"# /u/Destiny_Flair_Bot oAuth Login\n### CONFIGURATION ###\n \nAPP_NAME = 'Subreddit Flair Enforcer' # Application Name\nAPP_DESC = 'Enforces proper use of Flair in a subreddit' # Application Description\nAPP_VERS = '4.0.0' # Application Version\nAPP_AUTH = 'Created By: /u/D0cR3d' # Application Author\nAPP_ISSU = 'None' # Application Issues\n \nSUBREDDT = 'DestinyTheGame' # Subreddit\n\nAPP_ID = \"App-ID\"\nAPP_SECRET = \"App-Secret\"\nAPP_URI = \"http://127.0.0.1:65010/authorize_callback\"\nAPP_REFRESH = \"App-Refresh-Token\"\nAPP_SCOPES = 'edit flair history identity modconfig modcontributors modflair modlog modothers modposts modself modwiki mysubreddits privatemessages read report submit subscribe wikiedit wikiread'\nAPP_ACCOUNT_CODE = \"App-Account-Code\"\n\ndef login():\n import praw\n r = praw.Reddit(APP_DESC)\n r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)\n r.refresh_access_information(APP_REFRESH)\n return r","sub_path":"oAuth.py","file_name":"oAuth.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"98655551","text":"from typing import List, Dict, Union, Tuple\nimport pandas as pd\nimport numpy as np\n\nfrom server.data_config import INDEX_COLS\n\nSTATS_COLS = [\n \"rolling_prev_match_kicks\",\n \"rolling_prev_match_marks\",\n \"rolling_prev_match_handballs\",\n \"rolling_prev_match_goals\",\n \"rolling_prev_match_behinds\",\n \"rolling_prev_match_hit_outs\",\n \"rolling_prev_match_tackles\",\n \"rolling_prev_match_rebounds\",\n \"rolling_prev_match_inside_50s\",\n \"rolling_prev_match_clearances\",\n \"rolling_prev_match_clangers\",\n \"rolling_prev_match_frees_for\",\n \"rolling_prev_match_frees_against\",\n \"rolling_prev_match_contested_possessions\",\n \"rolling_prev_match_uncontested_possessions\",\n \"rolling_prev_match_contested_marks\",\n \"rolling_prev_match_marks_inside_50\",\n \"rolling_prev_match_one_percenters\",\n \"rolling_prev_match_bounces\",\n \"rolling_prev_match_goal_assists\",\n \"rolling_prev_match_time_on_ground\",\n \"last_year_brownlow_votes\",\n]\nMATCH_STATS_COLS = [\"at_home\", \"score\", \"oppo_score\"]\nREQUIRED_COLS = (\n [\"oppo_team\", \"player_id\", \"player_name\"] + STATS_COLS + MATCH_STATS_COLS\n)\n\n\nclass PlayerDataAggregator:\n \"\"\"Perform aggregations to turn player-match data into team-match data.\"\"\"\n\n def __init__(\n self, index_cols: List[str] = INDEX_COLS, aggregations: List[str] = [\"sum\"]\n ) -> None:\n self.index_cols = index_cols\n self.aggregations = aggregations\n\n def transform(self, data_frame: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Aggregate player stats by team.\n\n Args:\n data_frame (pandas.DataFrame): Data frame to be transformed.\n\n Returns:\n pandas.DataFrame\n \"\"\"\n\n required_cols = REQUIRED_COLS + self.index_cols\n\n if any((req_col not in data_frame.columns for req_col in required_cols)):\n\n missing_cols = np.setdiff1d(required_cols, data_frame.columns)\n\n raise ValueError(\n f\"All required columns ({required_cols}) must be in the data frame, \"\n \"but the given data frame has the following columns: \"\n f\"{list(data_frame.columns)}.\\n\\nMissing columns: \"\n f\"{missing_cols}\"\n )\n\n # 'oppo_team' isn't an index column, but including it in the groupby\n # doesn't change the grouping and makes it easier to keep for the final\n # data frame.\n agg_data_frame = (\n data_frame.drop([\"player_id\", \"player_name\"], axis=1)\n .groupby(self.index_cols + [\"oppo_team\"])\n .aggregate(self.__aggregations())\n )\n\n agg_data_frame.columns = [\n self.__agg_column_name(column_pair)\n for column_pair in agg_data_frame.columns.values\n ]\n\n # Various finals matches have been draws and replayed,\n # and sometimes home/away is switched requiring us to drop duplicates\n # at the end.\n # This eliminates some matches from Round 15 in 1897, because they\n # played some sort of round-robin tournament for finals, but I'm\n # not too worried about the loss of that data.\n return (\n agg_data_frame.dropna(axis=1)\n .reset_index()\n .drop_duplicates(subset=self.index_cols, keep=\"last\")\n .astype(\n {\n match_col: int\n for match_col in MATCH_STATS_COLS + [\"year\", \"round_number\"]\n }\n )\n .set_index(self.index_cols, drop=False)\n .rename_axis([None] * len(self.index_cols))\n .sort_index()\n )\n\n def __aggregations(self) -> Dict[str, Union[str, List[str]]]:\n player_aggs = {col: self.aggregations for col in STATS_COLS}\n # Since match stats are the same across player rows, taking the mean\n # is the easiest way to aggregate them\n match_aggs = {col: \"mean\" for col in MATCH_STATS_COLS}\n\n return {**player_aggs, **match_aggs}\n\n @staticmethod\n def __agg_column_name(column_pair: Tuple[str, str]) -> str:\n column_label, _ = column_pair\n return (\n column_label if column_label in MATCH_STATS_COLS else \"_\".join(column_pair)\n )\n","sub_path":"backend/server/data_processors/player_data_aggregator.py","file_name":"player_data_aggregator.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"546020639","text":"class Solution(object):\r\n def firstMissingPositive(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n \"\"\"\r\n index = 0\r\n while index < len(nums):\r\n num = nums[index]\r\n # Check to make sure number is a positive integer\r\n # Check to make sure that the number is not higher than the size of the list\r\n # -- if number is high than size of the list, then a number less than that number will be missing\r\n if num >= 1 and num < len(nums):\r\n # If number is already in its correct location skip it\r\n # If number already exists in its correct location (duplicate), don't swap\r\n # -- Prevents infinite loop\r\n if num - 1 != index and nums[num - 1] != nums[index]:\r\n # swap\r\n temp = nums[num - 1]\r\n nums[num - 1] = num\r\n nums[index] = temp\r\n else:\r\n index += 1\r\n else: index += 1\r\n # Find the first integer thats missing\r\n for index in range(len(nums)):\r\n if nums[index] != index + 1:\r\n return index + 1\r\n return len(nums) + 1","sub_path":"leetcode/41.first-missing-positive/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"309133504","text":"import os\nimport pygame\n\n\nclass Ship():\n def __init__(self, setting, screen):\n \"\"\"\n Constructor.\n Init the ship and set its starting position.\n :param setting: Setting page.\n :param screen: Game screen.\n \"\"\"\n\n self.screen = screen\n self.setting = setting\n\n # Load the ship image and get its rectangle.\n self.image = pygame.image.load(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'images', 'ship.png')\n )\n self.rect = self.image.get_rect() # gets the rect of an image.\n self.screen_rect = screen.get_rect() # screens rect.\n\n # Start each new ship at the bottom center of the Screen.\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n\n # Store a decimal value for the ship's center.\n self.center = float(self.rect.centerx)\n\n self.move_right = False # Movement Flag.\n self.move_left = False # Left movement flag.\n\n def update(self):\n \"\"\"\n Updates the ship's position.\n :return: Ship's location.\n \"\"\"\n # Updating the ship's center value, not the rect.\n if self.move_right and self.rect.right < self.screen_rect.right: # Stops the ship when hit right wall.\n self.center += self.setting.ship_speed_factor\n\n elif self.move_left and self.rect.left > 0: # stops ship when it hits the left wall.\n self.center -= self.setting.ship_speed_factor\n\n # Update rect object from self.center\n self.rect.centerx = self.center\n\n def blitme(self):\n \"\"\"\n Draws the ship at its current location.\n :return: ship's location.\n \"\"\"\n self.screen.blit(self.image, self.rect)\n","sub_path":"bin/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"87622739","text":"from pathlib import Path\n\nimport Augmentor\nimport os\ndef makedir(path):\n '''\n if path does not exist in the file system, create it\n '''\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef create_data_augumentation(root, target_folder):\n datasets_root_dir = root\n dir = datasets_root_dir + f'/{target_folder}/'\n print(\"Looking at training cropped data: \", dir)\n target_dir = datasets_root_dir + f\"/{target_folder}_augmented/\"\n makedir(target_dir)\n folders = [os.path.join(dir, folder) for folder in next(os.walk(dir))[1]]\n target_folders = [os.path.join(target_dir, folder) for folder in next(os.walk(dir))[1]]\n for i in range(len(folders)):\n fd = folders[i]\n fd_path = Path(fd)\n fd = str(fd_path.absolute())\n tfd = str(Path(target_folders[i]).absolute())\n path_tdf = Path(tfd)\n if not path_tdf.exists():\n path_tdf.mkdir()\n path_tdf = path_tdf / \"output\"\n if not path_tdf.exists():\n path_tdf.mkdir()\n # rotation\n p = Augmentor.Pipeline(source_directory=fd, output_directory=tfd,save_format=\"JPEG\")\n p.add_further_directory(fd_path)\n p.rotate(probability=1, max_left_rotation=15, max_right_rotation=15)\n p.flip_left_right(probability=0.5)\n # p.sample(10)\n # p.process()\n for i in range(10):\n p.process()\n del p\n # skew\n p = Augmentor.Pipeline(source_directory=fd, output_directory=tfd,save_format=\"JPEG\")\n p.skew(probability=1, magnitude=0.2) # max 45 degrees\n p.flip_left_right(probability=0.5)\n for i in range(10):\n p.process()\n del p\n # shear\n p = Augmentor.Pipeline(source_directory=fd, output_directory=tfd,save_format=\"JPEG\")\n p.shear(probability=1, max_shear_left=10, max_shear_right=10)\n p.flip_left_right(probability=0.5)\n for i in range(10):\n p.process()\n del p\n # random_distortion\n # p = Augmentor.Pipeline(source_directory=fd, output_directory=tfd)\n # p.random_distortion(probability=1.0, grid_width=10, grid_height=10, magnitude=5)\n # p.flip_left_right(probability=0.5)\n # for i in range(10):\n # p.process()\n # del p\n\nif __name__ == '__main__':\n\n create_data_augumentation()\n","sub_path":"img_aug.py","file_name":"img_aug.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"441327862","text":"import numpy as np\nimport random\nimport time\n'''\n网格4*8\n状态4*8*2\n初始化全为零\n到达cliff区域-100,重新开始\n到达终点为100\n碰到墙就是-1,但是没死,继续走\n每次找最优的走法\n学习率0.1\n\n'''\n# 选择最佳的行为方向\n# x,y为当前的坐标,将要选择下一个坐标,并返回\n\n# direction 1,2,3,4-------->top,bottom,left,right\n\n\ndef pi(net, x, y, row, column):\n #print(x,y)\n if random.randint(1, 10) <= 8:\n # select the most significant direction\n maxq = net[x][y][0]\n direction = 0\n for i in range(1,4):\n if maxq<=net[x][y][i]:\n direction = i\n maxq = net[x][y][i]\n #print(\"selection:\")\n #print(net[x][y])\n return direction+1\n else:\n # random choice a direction\n #print(\"random\")\n return random.randint(1, 4)\n\n\ndef QFunc(Q, Qmax, alpha, discount_factor, reward):\n return Q+alpha*(reward+discount_factor*Qmax-Q)\n\n\ndef run(net, x, y, alpha, discount_factor, ex, ey, row, column,episode):\n ox = x\n oy = y\n while episode >0:\n #print(episode)\n notEnd = True\n while notEnd:\n reward = -1\n # strategy,get the direction\n direction = pi(net, x, y, row, column)\n # get the new position\n Qmax = -10000\n nx = x\n ny = y\n if direction == 1:\n # top\n if x-1 < 0:\n reward = -5\n Qmax = np.max(net[x][y])\n else:\n if x-1 == 0:\n if y == ey:\n reward = 100\n notEnd = False\n elif y != 0:\n reward = -100\n notEnd = False\n Qmax = np.max(net[x-1][y])\n nx = x-1\n ny = y\n\n if direction == 2:\n # bottom\n if x+1 > row-1:\n reward = -5\n Qmax = np.max(net[x][y])\n else:\n\n Qmax = np.max(net[x+1][y])\n nx = x+1\n ny = y\n if direction == 3:\n # left\n if y-1 < 0:\n reward = -5\n Qmax = np.max(net[x][y])\n else:\n\n Qmax = np.max(net[x][y-1])\n nx = x\n ny = y-1\n if direction == 4:\n # right\n if x==0 and y==0:\n reward = -100\n notEnd = False\n Qmax = np.max(net[x][y+1])\n nx = x\n ny = y+1\n elif y+1 > column-1:\n reward = -5\n Qmax = np.max(net[x][y])\n else:\n Qmax = np.max(net[x][y+1])\n nx = x\n ny = y+1\n\n net[x][y][direction-1] = QFunc(net[x][y][direction-1], Qmax,\n alpha, discount_factor, reward)\n x = nx \n y = ny \n episode -= 1\n x = ox\n y = oy\n\n\n\ndef get_route(net):\n route = [[0,0]]\n step = 0\n while not(route[step][0]==0 and route[step][1]>0):\n maxq = net[route[step][0]][route[step][1]][0]\n x = route[step][0]-1\n y = route[step][1]\n if maxq < net[route[step][0]][route[step][1]][1]:\n maxq = net[route[step][0]][route[step][1]][1]\n x = route[step][0]+1\n y = route[step][1]\n if maxq < net[route[step][0]][route[step][1]][2]:\n maxq = net[route[step][0]][route[step][1]][2]\n x = route[step][0]\n y = route[step][1]-1\n if maxq < net[route[step][0]][route[step][1]][3]:\n maxq = net[route[step][0]][route[step][1]][3]\n x = route[step][0]\n y = route[step][1]+1\n route.append([x,y])\n step+=1\n\n return route\n\n\n'''\n# 初始化网格用来记录权值\nrow = 4\ncolumn = 8\n\nnet = np.zeros(row*column*4).reshape(4, 8, 4)\n\n\n# 初始化学习率和折扣因素\nalpha = 0.1\ndiscount_factor = 0.5\nrun(net, 0, 0, alpha, discount_factor, 0, column-1, row, column,500)\nprint(net)\n'''","sub_path":"CliffWalking.py","file_name":"CliffWalking.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"174266523","text":"#!/usr/bin/python3 -u\n# -*- coding: utf-8 -*-\n__author__ = \"Blurgy\";\n\nimport os \nimport numpy as np \nimport data \nimport nn_utils as nn \nimport plot \nimport click \n\nimport warnings\nwarnings.filterwarnings(\"error\")\n\ndef fc_model(input_size, output_size):\n model = {};\n model['fc1'] = nn.fc_layer(input_size, output_size);\n model['output'] = None;\n return model;\n\ndef forward(model, x):\n model['output'] = model['fc1'].forward(x);\n\ndef backward(model, dz):\n model['fc1'].backward(dz);\n\ndef update(model, lr):\n model['fc1'].adam(lr);\n\ndef main():\n epoch = 10000;\n lr = 1e-4;\n batch_size = 64;\n model = fc_model(784, 10);\n\n train = data.preprocess_training_set();\n for ep in range(epoch):\n yes, cnt = 0, 0;\n X, Y = data.sample_batches_train(train, batch_size);\n for i in range(len(X)):\n x, y = X[i].reshape(-1, 784, 1), Y[i];\n forward(model, x);\n dz, loss = nn.grad(model, y);\n backward(model, dz);\n # print(np.max(dz), ' \\t', np.min(dz))\n # print(np.max(model['fc1'].w), '\\t', np.min(model['fc1'].w))\n # print(np.max(model['fc1'].b), '\\t', np.min(model['fc1'].b))\n update(model, lr);\n\n prediction = np.argmax(model['output'], axis=1);\n score = prediction.reshape(-1,1) == y.reshape(-1,1)\n # print(score)\n yes += np.sum(score);\n cnt += len(y);\n # print(\"???\", prob[a0,y,a2])\n print(\"[%d/%d]: %.2f%%, loss = %.2f\" % (yes, cnt, yes / cnt * 100, loss), end = '\\r');\n # input()\n print()\n\nif __name__ == '__main__':\n main()\n","sub_path":"mnist/nn/conv/train_1layer_fc.py","file_name":"train_1layer_fc.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"414518456","text":"# -*- coding: utf-8 -*-\n\"\"\"This package is a framework for creating large-scale agent-based geographical models.\n\nSubmodules\n==========\n\n\n\"\"\"\n\n__title__ = 'honeybees'\n__version__ = 0.1\n__email__ = \"j.a.debruijn at outlook com\"\n__status__ = 'development'\n\nfrom numba import config\n\nconfig.THREADING_LAYER = 'safe' # set threading mode for numba to safe, requires tbb","sub_path":"honeybees/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"287294454","text":"import sys\nimport pybedtools\nimport pandas as pd\nimport os\nfrom bx.intervals.intersection import Intersecter, Interval\nimport operator\n\n\ndef tn5(file1, file2):\n # determining tn5 integration sites\n count = 0\n with open(file1, 'r') as fh, open(file2, 'w') as out:\n for line in fh:\n new = line.strip().split('\\t')\n if new[5] == \"+\":\n out.write(new[0] + '\\t' + str(int(new[1]) + 4) + '\\t' + str(int(new[1]) + 5) + '\\n')\n else:\n out.write(new[0] + '\\t' + str(int(new[2]) - 6) + '\\t' + str(int(new[2]) - 5) + '\\n')\n count += 1\n return count\n\n\ndef coverage_norm(file1, file2, total, size=2.1e9):\n # determining normalized coverage\n with open(file1, 'r') as fh, open(file2, 'w') as out:\n for line in fh:\n new = line.strip().split('\\t')\n last = (size * int(new[4])) / (int(total) * (int(new[2]) - int(new[1])))\n out.write('\\t'.join(new[:4]) + '\\t' + str(last) + '\\n')\n\n\ndef split_window(file1, output, work, prefix):\n # spliting original ATAC regions into several pieces\n x = pybedtools.BedTool(file1)\n y = pybedtools.BedTool().window_maker(w=50, s=25, b=x) # window as 50bp, and stepping as 25bp\n y1 = y.sort()\n y1.saveas(\"{0}/{1}_sortmerge.bed\".format(work, prefix))\n with open(\"{0}/{1}_sortmerge.bed\".format(work, prefix), 'r') as fh, open(output, 'w') as out:\n b = 0\n for line in fh:\n new = line.strip().split('\\t')\n mybin = 'bin_' + str(b)\n out.write('\\t'.join(new[:3]) + '\\t' + mybin + '\\n')\n b += 1\n os.remove(\"{0}/{1}_sortmerge.bed\".format(work, prefix))\n\n\ndef merge_bed(file1, work, prefix):\n '''\n B73V4_ctg10 48746 48796 bin_1 23.31300\n '''\n print (\"starting\")\n x = pd.read_table(file1, header=None)\n x.columns = ['Chr', 'Start', 'Stop', 'Bin', 'Coverage']\n print (x.head())\n y = x[x['Coverage'] > 25] # filtering bins with coverage > 25\n y.columns = range(y.shape[1])\n print (y.head())\n y.to_csv(\"{0}/{1}_tmp.bed\".format(work, prefix), index=False, sep='\\t')\n os.system(\"sed -i 1d {0}/{1}_tmp.bed\".format(work, prefix))\n a = pybedtools.BedTool(\"{0}/{1}_tmp.bed\".format(work, prefix))\n b1 = a.sort() # sort bed files\n b2 = b1.merge(d=150) # merge bed files allowing 150bp gap\n b2.saveas(\"{0}/{1}_tmp_regions.bed\".format(work, prefix))\n x1 = pd.read_table(\"{0}/{1}_tmp_regions.bed\".format(work, prefix), header=None)\n x1.columns = ['Chr', 'Start', 'Stop']\n x1['Diff'] = x1['Stop'].sub(x1['Start'], axis=0)\n print (x1.head())\n y1 = x1[x1['Diff'] > 50] # filter ACR with length > 50\n y2 = y1.iloc[1:, :3]\n y2.to_csv(\"{0}/{1}_relativetn5.merge.coverage.bed\".format(work, prefix), index=False, sep='\\t')\n os.system(\"sed -i 1d {0}/{1}_relativetn5.merge.coverage.bed\".format(work, prefix))\n os.remove(\"{0}/{1}_tmp_regions.bed\".format(work, prefix))\n os.remove(\"{0}/{1}_tmp.bed\".format(work, prefix))\n\n\ndef ext_black(file1, file2):\n # removing organelle genome\n pos = set([])\n with open(file1, 'r') as fh, open(file2, 'w') as out:\n for line in fh:\n if line.startswith('#'):\n continue\n new = line.strip().split('\\t')\n pos.add(new[0])\n\n for x in sorted(list(pos)):\n y = x.split(':')\n chrom = y[0]\n p = y[1].split('-')\n mypos = chrom + '\\t' + p[0] + '\\t' + p[1] + '\\n'\n out.write(mypos)\n\n\ndef determine_summit(tfile, file1, file2):\n # tfile is calculated tn5 bed file\n mdict = {} # storing tn5 density information\n with open(tfile, 'r') as fh:\n for line in fh:\n new = line.strip().split('\\t')\n if new[0] not in mdict:\n mdict[new[0]] = {}\n if new[1] not in mdict[new[0]]:\n mdict[new[0]][new[1]] = 0\n mdict[new[0]][new[1]] += 1\n\n intersect_dict = {}\n\n for x in mdict:\n mychr = x\n if mychr not in intersect_dict:\n intersect_dict[mychr] = Intersecter()\n for y in mdict[x]:\n st = int(y)\n sp = int(y)\n den = mdict[x][y]\n name = str(st) + '_' + str(den)\n intersect_dict[mychr].add_interval(Interval(st, sp, value=name))\n\n with open(file1, 'r') as t, open(file2, 'w') as out:\n for line in t:\n y = line.strip().split('\\t')\n mychr = y[0]\n start = int(y[1])\n stop = int(y[2])\n a = intersect_dict[mychr].find(start, stop)\n if len(a) > 0:\n mdict = {}\n for b in a:\n if b.value not in mdict:\n mdict[b.value] = float(b.value.split('_')[1])\n sorted_x = sorted(mdict.items(), key=operator.itemgetter(1), reverse=True)\n m = 'Summit=' + sorted_x[0][0].split('_')[0]\n out.write(line.strip() + '\\t' + m + '\\n')\n","sub_path":"atac_toolkits.py","file_name":"atac_toolkits.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"339302073","text":"from NucleusUtils.i18n.parser import get_raw_tags, Keyword, parsers\nfrom . import defaults\nfrom .locales import get_locale\n\ndefault_locale = ''\n\n\ndef set_default_locale(name=default_locale):\n global default_locale\n default_locale = name\n\n\ndef raw_translate(key, locale=None, context=defaults.CONTEXT_NAME):\n if locale is None:\n locale = default_locale\n locale = get_locale(locale, context)\n if locale:\n return locale.get_translation(key)\n return key\n\n\ndef translate(key, locale=None, context=defaults.CONTEXT_NAME, parse=True, local_parsers=None, settings=None):\n if local_parsers is None:\n local_parsers = []\n if settings is None:\n settings = {}\n if locale is None:\n locale = default_locale\n text = raw_translate(key, locale, context)\n\n if not parse:\n return text\n\n for tag in get_raw_tags(text):\n keyword = Keyword.parse(tag)\n for parser in local_parsers + parsers:\n if parser.check(keyword, settings):\n text = parser.parse(text, keyword, settings)\n\n return text\n","sub_path":"NucleusUtils/i18n/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"110183169","text":"from django.test import TestCase, Client\nfrom django.urls import reverse\nfrom wines.models import Wine, GrapeSort, Reviews, Rating, Category, RatingStar\nimport json\n\n\nclass TestViews(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.list_url = reverse('wine_list')\n self.detail_url = reverse(\"wine_detail\", args=['project1'])\n self.add_rating = reverse(\"add_rating\")\n self.grape_detail = reverse(\"grape_sort_detail\", args=['project1'])\n self.project1 = Wine.objects.create(\n name=\"project1\",\n description=\"test\",\n country=\"Italy\",\n year=2001,\n image=\"grape\",\n url=\"project1\"\n )\n\n def test_wine_list_get(self):\n\n response = self.client.get(self.list_url)\n\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'wines/wine_list.html')\n\n def test_wine_detail_get(self):\n response = self.client.get(self.detail_url)\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'wines/wine_detail.html')\n\n\n\n\n\n\n\n","sub_path":"Solutions/Task3/853503_Саба_Наскидашвили/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"445245476","text":"from flask import request\nfrom flask_restful import Resource,reqparse\nfrom flask_jwt import jwt_required\nfrom models.branchproduct import BranchProductModel\nimport helper\nimport security\n\nclass BranchProduct(Resource):\n parser=reqparse.RequestParser()\n parser.add_argument('branch_id',type=int,required=False)\n parser.add_argument('product_id',type=int,required=False)\n \n @jwt_required()\n def put(self,merchant_code,id):\n if security.isCorrectMerchant(merchant_code):\n data=BranchProduct.parser.parse_args()\n bp=BranchProductModel.find_by_bp_id(merchant_code,\\\n data['branch_id'],\\\n data['product_id'])\n if bp:\n return {'message': 'Product found existed in branch'},400\n else:\n branchproduct = BranchProductModel(merchant_code,**data)\n try:\n helper.save_to_db(branchproduct)\n return helper.json(branchproduct),200\n except Exception as e:\n return {'message':str(e).split('\\n')[0]},500\n else:\n return {'message': 'Unauthorized'},401\n \n @jwt_required()\n def delete(self,merchant_code,id):\n branchproduct=BranchProductModel.find_by_id(merchant_code,id)\n if branchproduct:\n if security.isCorrectMerchant(merchant_code):\n try:\n helper.delete_from_db(branchproduct)\n return {'message':'Comment deleted'}, 200\n except Exception as e:\n return {'message':str(e).split('\\n')[0]},500 \n else:\n return {'message': 'Unauthorized'},401 \n else:\n return {'message': 'Product not found in branch'},400\n\nclass BranchProductList(Resource):\n def get(self,merchant_code):\n branch_id=request.args.get('branch_id')\n branchproduct=BranchProductModel.find_branchproduct(merchant_code,branch_id)\n if (branchproduct and\n branchproduct.count()>0):\n return {x.id:helper.json(x) for x in branchproduct}\n else:\n return {'message': 'Comment not found'},400\n","sub_path":"resources/branchproduct.py","file_name":"branchproduct.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"245324771","text":"import pygame\n\npygame.font.init()\nFONT = pygame.font.SysFont('Arial', 32)\nSFONT = pygame.font.SysFont('Arial', 20)\nclass text_input():\n def __init__(self, DISPLAY,x, y, w, h, text =''):\n self.display = DISPLAY\n self.rect = pygame.Rect(x, y, w, h)\n self.active = False\n self.colour = (200,200,200)\n self.text = text\n self.text_surface = FONT.render(text,True, (0,0,0) )\n \n def activate(self, event):\n if event.type == pygame.MOUSEBUTTONDOWN:\n if self.rect.collidepoint(event.pos):\n self.active = not self.active\n self.colour = (100,100,100)\n else:\n self.active = False\n self.colour = (200,200,200)\n if event.type == pygame.KEYDOWN:\n if self.active:\n if event.key == pygame.K_BACKSPACE:\n self.text = self.text[:-1]\n elif event.key == pygame.K_RETURN:\n self.active = False\n self.colour = (200,200,200)\n elif event.key == pygame.K_TAB:\n self.active = False\n self.colour = (200,200,200)\n else:\n self.text += event.unicode\n self.text_surface = FONT.render(self.text, True, (0,0,0))\n def draw(self):\n self.display.blit(self.text_surface, (self.rect.x+5,self.rect.y+5))\n pygame.draw.rect(self.display, (self.colour), self.rect, 2)\n\n","sub_path":"text_input.py","file_name":"text_input.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"93739438","text":"\n\nfrom xai.brain.wordbase.nouns._rabbit import _RABBIT\n\n#calss header\nclass _RABBITED(_RABBIT, ):\n\tdef __init__(self,): \n\t\t_RABBIT.__init__(self)\n\t\tself.name = \"RABBITED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"rabbit\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_rabbited.py","file_name":"_rabbited.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"274428002","text":"#!/usr/bin/env python3\n#\n\nimport sys\nimport argparse\nimport os\nimport os.path\nimport json\nimport copy\nimport random\n\nOUTPUT_TEMPLATE_JSON = \"\"\"\n{\n \"name\": \"\",\n \"ceilingMaterial\": \"Walls/Drywall\",\n \"floorMaterial\": \"Fabrics/CarpetWhite 3\",\n \"wallMaterial\": \"Walls/DrywallBeige\",\n \"performerStart\": {\n \"position\": {\n \"x\": 0,\n \"z\": 0\n },\n \"rotation\": {\n \"y\": 0\n }\n },\n \"objects\": [],\n \"goal\": {},\n \"answer\": {}\n}\n\"\"\"\n\nOUTPUT_TEMPLATE = json.loads(OUTPUT_TEMPLATE_JSON)\n\n# the following mins and maxes are inclusive\nMIN_POSITION = -4.8\nMAX_POSITION = 4.8\nPOSITION_DIGITS = 1\nMIN_ROTATION = 0\nMAX_ROTATION = 359\nROTATION_DIGITS = 0\n\n\ndef random_position():\n return round(random.uniform(MIN_POSITION, MAX_POSITION), POSITION_DIGITS)\n\n\ndef random_rotation():\n rotation = round(random.uniform(MIN_ROTATION, MAX_ROTATION), ROTATION_DIGITS)\n if ROTATION_DIGITS == 0:\n rotation = int(rotation)\n return rotation\n\n\ndef generate_file(name):\n global OUTPUT_TEMPLATE\n body = copy.deepcopy(OUTPUT_TEMPLATE)\n body['name'] = os.path.basename(name)\n position = body['performerStart']['position']\n position['x'] = random_position()\n position['z'] = random_position()\n body['performerStart']['rotation']['y'] = random_rotation()\n\n with open(name, 'w') as out:\n json.dump(body, out, indent=2)\n\n\ndef generate_one_fileset(prefix, count):\n # skip existing files\n index = 1\n dirname = os.path.dirname(prefix)\n if dirname != '':\n os.makedirs(dirname, exist_ok=True)\n\n while count > 0:\n file_exists = True\n while file_exists:\n name = f'{prefix}-{index:04}.json'\n file_exists = os.path.exists(name)\n index += 1\n\n generate_file(name)\n count -= 1\n\n\ndef main(argv):\n parser = argparse.ArgumentParser(description='Create one or more scene descriptions')\n parser.add_argument('--prefix', required=True, help='Prefix for output filenames')\n parser.add_argument('-c', '--count', type=int, default=1, help='How many scenes to generate [default=1]')\n parser.add_argument('--seed', type=int, default=None, help='Random number seed [default=None]')\n \n args = parser.parse_args(argv[1:])\n\n random.seed(args.seed)\n generate_one_fileset(args.prefix, args.count)\n \n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"scene_generator/scene_generator.py","file_name":"scene_generator.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"74444193","text":"import hashlib\nimport json\nfrom time import gmtime, strftime\n\nclass Blockchain(object):\n#Creattion of the class blockchain which have an empty-list and a list\n#of transaction.\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tConstructor of the class\n\t\t\"\"\"\n\t\tself.chain = [] # initial empty list of the block chain\n\t\tself.transactions = [] # initial empty list of the transactions\n\t\t# Creation of the genesis block when the class blockchain is instantiated\n\t\tself.create_genesis_block()\n\t\n\tdef create_genesis_block(self):\n\t\t\"\"\"\n\t\tCreate a new genesis block when the block chain is instantiated.\n\t\t\"\"\"\n\t\tgenesis_block = {\n\t\t\t'index': 0,\n\t\t\t'timestamp': strftime(\"%a, %d %b %Y %H:%M:%S\", gmtime()),\n\t\t\t'transactions': self.transactions,\n\t\t\t'previous_hash': 1\n\t\t}\n\t\t# Reset the current list of transactions\n\t\tself.transactions = []\n\t\t# Add the hash of the block inside the block\n\t\tgenesis_block['hash'] = self.hash(genesis_block)\n\t\t# Add the new genesis block in the blockchain\n\t\tself.chain.append(genesis_block)\n\t\treturn genesis_block\n\t\t\n\t\t\n\tdef new_block(self, previous_hash):\n\t\t\"\"\"\n\t\tCreate a new Block in the Blockchain which takes in parameter\n\t\tPREVIOUS_HASH : Hash of the previous Block\n\t\tAnd return\n\t\tBLOCK : The new Block\n\t\t\"\"\"\n\n\t\tblock = {\n\t\t\t'index': len(self.chain),\n\t\t\t'timestamp': strftime(\"%a, %d %b %Y %H:%M:%S\", gmtime()),\n\t\t\t'transaction': self.transactions,\n\t\t\t'previous_hash': previous_hash\n\t\t}\n\n\t\t# Reset the current list of transactions\n\t\tself.transactions = []\n\t\t# Add the hash of the block inside the block\n\t\tblock['hash'] = self.hash(block)\n\t\t# We must to make sure that the new block is correct\n\t\tif self.valid_block(block) :\n\t\t\t# Add the new block in the blockchain\n\t\t\tself.chain.append(block)\n\t\telse :\n\t\t\tprint(\"The block \" + str(block['index']) + \" with an amount of \" + str(block['transaction']) + \"is not valid\")\n\t\treturn block\n\t\n\tdef new_transaction(self, amount):\n\t\t\"\"\"\n\t\tNew transaction to go inside a new block which takes in parameter\n\t\t- AMOUNT : The new amount that the block provide\n\t\tAnd return :\n\t\t- INDEX : The index of the Block that will hold this transaction\n\t\t\"\"\"\n\t\tself.transactions.append({'data': amount})\n\t\treturn self.last_block['index'] + 1\n\t \n\tdef valid_block(self, block) :\n\t\t\"\"\"\n\t\tDetermine if a given block is valid. This function takes in parameter :\n\t\t- BLOCK : the block that we want to make sure it's valid\n\t\tAnd return boolean expression\n\t\t\"\"\"\n\t\tif block['previous_hash'] != self.last_block['hash'] :\n\t\t\treturn False\n\t\telse :\n\t\t\treturn True\n\t\n\t\t \n\t\n\t@staticmethod\n\t# The static method doesn't need an object instantiation.\n\tdef hash(block):\n\t\t\"\"\"\n\t\tCreates a SHA-256 hash of a Block which takes in parameter\n\t\tBlock: A block of the blockchain\n\t\tAnd return\n\t\tHASH : the hash in 256 bits\n\t\t\"\"\"\n\t\t# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n\t\tblock_string = json.dumps(block, sort_keys=True).encode()\n\t\t# Return in hexadecimal to see the hash with more facilities\n\t\treturn hashlib.sha256(block_string).hexdigest()\n\t\n\t@property\n\t# The property is used to replace getter and setter in our class\n\tdef last_block(self):\n\t\t\"\"\"\n\t\tReturn the last block of the chain. If the index of the dictionnary \n\t\tis -1, the block is the end of the blockchain.\n\t\t\"\"\"\n\t\treturn self.chain[-1]\n\nnewBlockChain = Blockchain()\nnewBlockChain.new_transaction(24) \nnewBlockChain.new_transaction(67) \nprevBlock = newBlockChain.last_block\nnewBlockChain.new_block(prevBlock['hash'])\n","sub_path":"Blockchain.py","file_name":"Blockchain.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"453209916","text":"import signal\nfrom flask import Flask, request\nfrom flask_cors import CORS\nimport requests\nimport json\nimport pandas as pd\nimport numpy as np\nfrom predict import predict\nfrom requests.auth import HTTPBasicAuth\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nimport math, time\nfrom _thread import start_new_thread\n\nscalers = {}\nprices = {}\nprediction_set = []\ndf = []\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\ndef load_data(stock, seq_len, split):\n amount_of_features = len(stock.columns)\n data = stock.as_matrix()\n sequence_length = seq_len + 1\n result = []\n global scalers\n global prices\n \n for index in range(len(data) - sequence_length):\n scalers[index] = MinMaxScaler(feature_range=(0,1))\n prices[index] = MinMaxScaler(feature_range=(0,1))\n \n prices[index].fit_transform(data[index: index + sequence_length][:, -1].reshape(-1,1))\n result.append(scalers[index].fit_transform(data[index: index + sequence_length]))\n \n result = np.array(result)\n row = len(result) * split\n train = result[:int(row), :]\n x_train = train[:, :-1]\n y_train = train[:, -1][:,-1]\n x_test = result[int(row):, :-1]\n y_test = result[int(row):, -1][:,-1]\n \n \n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], amount_of_features))\n x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], amount_of_features)) \n \n return [x_train, y_train, x_test, y_test]\n\napp = Flask(__name__)\n\nBearer = None\n\ndef prep_prediction(df, ticker, _min, _max):\n dates = df['date']\n labels = df['label']\n\n df = df.drop(['date', 'label'], axis=1)\n columnsTitles = [\"change\", \"changeOverTime\", \"changePercent\", \"high\", \"low\", \"open\", \"unadjustedVolume\", \"volume\", \"vwap\", \"close\"]\n df=df.reindex(columns=columnsTitles)\n\n Bearer = getBearer()\n headers = {\n 'Authorization' : 'Bearer ' + Bearer,\n 'Content-Type': 'application/json'\n }\n payload = {\n \"startDate\": _min,\n \"endDate\": _max,\n \"where\": {\n \"ticker\" : [ticker]\n }\n }\n response = requests.post('https://api.marquee.gs.com/v1/data/USCANFPP_MINI/query', json=payload, headers=headers)\n extraDf = pd.DataFrame(response.json()['data'])\n df['financialReturnsScore'] = extraDf['financialReturnsScore']\n df['growthScore'] = extraDf['growthScore']\n df['integratedScore'] = extraDf['integratedScore']\n df['multipleScore'] = extraDf['multipleScore']\n\n columnsTitles = [\"change\", \"changeOverTime\", \"changePercent\", \"high\", \"low\", \"open\", \"unadjustedVolume\", \"volume\", \"vwap\", \"financialReturnsScore\", \"growthScore\", \"integratedScore\", \"multipleScore\", \"close\"]\n df=df.reindex(columns=columnsTitles)\n\n window = 15 # Another hyperparameter\n\n X_train, y_train, X_test, y_test = load_data(df[::-1], window, 0.85)\n print(\"X_train\", X_train.shape)\n print(\"y_train\", y_train.shape)\n print(\"X_test\", X_test.shape)\n print(\"y_test\", y_test.shape)\n\n global prediction_set\n prediction_set = X_test\n\ndef getBearer():\n payload = \"nothing to see here\"\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n response = requests.post('https://idfs.gs.com/as/token.oauth2', data=payload, headers=headers)\n print (response)\n return response.json()['access_token']\n\n@app.route('/')\ndef index():\n return 'API working'\n\n@app.route(\"/gs/all/\")\ndef all():\n try:\n if Bearer is None:\n Bearer = getBearer()\n except UnboundLocalError:\n Bearer = getBearer()\n headers = {\n 'Authorization' : 'Bearer ' + Bearer\n }\n response = requests.get('https://api.marquee.gs.com/v1/data/USCANFPP_MINI/coverage', headers=headers)\n return response.text\n\n@app.route(\"/gs/\", methods=['POST'])\ndef gs(ticker):\n data = request.get_json()\n print (data, ticker)\n try:\n if Bearer is None:\n Bearer = getBearer()\n except UnboundLocalError:\n Bearer = getBearer()\n headers = {\n 'Authorization' : 'Bearer ' + Bearer,\n 'Content-Type': 'application/json'\n }\n payload = {\n \"startDate\": data['start'],\n \"endDate\": data['end'],\n \"where\": {\n \"ticker\" : [ticker]\n }\n }\n response = requests.post('https://api.marquee.gs.com/v1/data/USCANFPP_MINI/query', json=payload, headers=headers)\n \n return response.text\n\n@app.route(\"/predict\")\ndef pred():\n i = predict(prediction_set)\n results = []\n pred_results = []\n counter = 0\n for x in np.arange(len(df) - 137 - 15, len(df) - 15 - 1, 1):\n results = results + [prices[x].inverse_transform(i[counter: counter + 15]).reshape(-1,1)[0]]\n counter = counter + 1\n return json.dumps({\"data\":results}, cls=NumpyEncoder)\n\n@app.route(\"/stock/\")\ndef stock(ticker):\n global df\n response = requests.get('https://api.iextrading.com/1.0/stock/'+ ticker +'/chart/5y')\n df = pd.DataFrame(response.json())\n\n df = df[df['date'] < '2017-06-28']\n min_date, max_date = df['date'].min(), df['date'].max()\n\n start_new_thread(prep_prediction, (df, ticker, min_date, max_date))\n return df.to_json(orient='records')\n\napplication = app #gunicorn looks for application\nCORS(app)\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', debug=True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"618786297","text":"def funcion_lote(x, funcion_recursiva):\n \"\"\"Calcula la función funcion_recursiva según X\n\n Args:\n x (int): variable de desición\n funcion_recursiva (none): función a operar\n\n Returns:\n int: valor de la funcion funcion_recursiva evaluada en x\n \"\"\"\n if x == 0:\n return x + funcion_recursiva * (0.5**x)\n\n return 3 + x + funcion_recursiva * (0.5**x)\n\n\ndef min_valor(funcion_recursiva):\n \"\"\"Evalua la funcion_recursiva en busqueda del menor valor posible\n\n Args:\n funcion_recursiva (none): función recursiva de la etapa\n\n Returns:\n list: menor valor de la función y valor de X\n \"\"\"\n xn = []\n\n xn.append(funcion_lote(0, funcion_recursiva))\n\n # Se pone en dos condiciones para revisar el siguiente y el siguiente a este, pues se puede dar el caso...tal vez se podria optimizar\n while xn[len(xn) - 1] >= funcion_lote(len(xn), funcion_recursiva) or xn[len(xn) - 1] >= funcion_lote(len(xn) + 1, funcion_recursiva):\n xn.append(funcion_lote(len(xn), funcion_recursiva))\n\n return min(xn), xn.index(min(xn))\n\n\ndef iteracion(numero_iteraciones, valor_inial):\n \"\"\"Realiza las numero_iteraciones con la función valor_inicial\n\n Args:\n numero_iteraciones (int): Número de iteraciones a realizar\n valor_inial (float): Valor de la restricción inicial\n\n Returns:\n List[list]: Una lista de listas con la información del menor valor y valor de X por iteración\n \"\"\"\n sol_iteracion = []\n\n sol_iteracion.append(min_valor(valor_inial))\n\n for i in range(numero_iteraciones - 1):\n sol_iteracion.append(min_valor(sol_iteracion[i][0]))\n\n sol_iteracion.reverse()\n\n return sol_iteracion\n\n\nif __name__ == \"__main__\":\n # Valores del problema\n numero_iteraciones = 3\n funcion_inial = 16\n \n solucion = iteracion(numero_iteraciones, funcion_inial)\n\n for i in range(len(solucion)):\n print(\n f'En la ronda de producción {i+1}, la cantidad optima de productos a producir es de {solucion[i][1]} con un con un costo de {solucion[i][0]*100}$')\n","sub_path":"Programción Dinámica Probabilística/pdp_aplicacion1.py","file_name":"pdp_aplicacion1.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"252316524","text":"class UserObject(dict):\n def __init__(self):\n object = {\n \"type\": \"aggregator\",\n \"id\": \"$hour\",\n \"name\": \"average_per_hour\",\n \"function\": \"\"\"\n CREATE OR REPLACE FUNCTION average_per_hour(x text) RETURNS text AS $$\n\t return x[0:10] + \"0000000\"\n $$ language plpython3u;\n \"\"\"\n }\n super(UserObject, self).__init__(**object)\n","sub_path":"objects/aggregators/hour.py","file_name":"hour.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"335966189","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom mnist import build_network, build_network_32, calc_loss, evaluation\nfrom tensorflow.examples.tutorials.mnist import input_data as mnist_data\n\n# Importing MNIST datasets\nmnist = mnist_data.read_data_sets('MNIST_data', one_hot=True)\n\n# Tuned hyperparameters from mnist.py\ninput_dimension = 784\noutput_dimension = 10\nlearning_rate = 0.001\nbatch_size = 100\nl1 = 200\nl2 = 300\nl3 = 10\nepsilon = 0.2\n\ntf.reset_default_graph()\n\ndef accuracy_after_fgsm_attack(images, labels):\n \"\"\" Perform fast gradient sign method attack on a list of (image, label) pair to lead mnist classifier to misclassify\n return: new accuracy after perturbation\n \"\"\"\n #apply pertubation to images\n # Building the graph\n x = tf.placeholder(tf.float64, [None, input_dimension], name=\"input\")\n y = tf.placeholder(tf.float64, [None, output_dimension], name=\"labels\")\n z3, y_, _ = build_network(x, l1, l2, l3)\n loss = calc_loss(z3, y)\n\n pertubation = tf.sign(tf.gradients(loss, x))\n perturbed_op = tf.squeeze(epsilon * pertubation) + images\n \n sess=tf.Session() \n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(max_to_keep=100)\n saver.restore(sess, \"./tmp_adam2_high_precision/mnist_model_epochs-26\")\n\n perturbed_images = sess.run(perturbed_op, feed_dict={x: images, y:labels})\n perturbed_accuracy = sess.run(accuracy, feed_dict={x: perturbed_images, y: labels})\n\n return perturbed_images, perturbed_accuracy\n\ndef create_perturbed_images():\n \"\"\" Demonstrate the FGSM attack result error rate vs epoch number by showing\n How the value of epoch number changes the error rate on the test set\n When epsilon is fixed to 0.1\n \"\"\"\n sample_images = mnist.test.images\n sample_labels = mnist.test.labels\n\n perturbed_images, _ = accuracy_after_fgsm_attack(sample_images, sample_labels)\n np.savetxt('perturbed_images_under_float64_epoch26_epsilon_' + str(epsilon) + '.txt', perturbed_images, delimiter = ',') \n\ndef demonstrate_attack_error_rate():\n perturbed_images = np.loadtxt(open('perturbed_images_under_float64_epoch26_epsilon_' + str(epsilon) + '.txt',\"rb\"),delimiter=\",\",skiprows=0)\n sample_labels = mnist.test.labels\n sample_images = mnist.test.images\n\n # Building the graph\n x = tf.placeholder(tf.float32, [None, input_dimension], name=\"input\")\n y = tf.placeholder(tf.float32, [None, output_dimension], name=\"labels\")\n z3, y_, _ = build_network_32(x, l1, l2, l3)\n loss = calc_loss(z3, y)\n accuracy = evaluation(y, y_)\n\n sess=tf.Session() \n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(max_to_keep=100)\n saver.restore(sess, \"./tmp_adam1/mnist_model_epochs-26\")\n\n perturbed_accuracy = sess.run(accuracy, feed_dict={x: perturbed_images, y: sample_labels})\n #normal_accuracy = sess.run(accuracy, feed_dict={x: sample_images, y: sample_labels})\n print(\"epsilon:\" + str(epsilon))\n print(\"perturbed accuracy:\" + str(perturbed_accuracy))\n #print(\"Normal accuracy:\" + str(normal_accuracy))\n\ndef demonstrate_transfer_attack_vs_regular_attack():\n sample_labels = mnist.test.labels\n sample_images = mnist.test.images\n epsilons = [0.02, 0.05, 0.1, 0.15, 0.2]\n transfer_perturbed_accuracies = [0.962, 0.8853, 0.5813, 0.3507, 0.2258]\n regular_perturbed_accuracies = []\n\n x = tf.placeholder(tf.float32, [None, input_dimension], name=\"input\")\n y = tf.placeholder(tf.float32, [None, output_dimension], name=\"labels\")\n z3, y_, _ = build_network_32(x, l1, l2, l3)\n loss = calc_loss(z3, y)\n accuracy = evaluation(y, y_)\n\n for epsilon in epsilons:\n pertubation = tf.sign(tf.gradients(loss, x))\n perturbed_op = tf.squeeze(epsilon * pertubation) + sample_images\n \n sess=tf.Session() \n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(max_to_keep=100)\n saver.restore(sess, \"./tmp_adam1/mnist_model_epochs-26\")\n\n perturbed_images = sess.run(perturbed_op, feed_dict={x: sample_images, y:sample_labels})\n regular_perturbed_accuracy = sess.run(accuracy, feed_dict={x: perturbed_images, y: sample_labels})\n\n regular_perturbed_accuracies.append(regular_perturbed_accuracy)\n\n plt.plot(epsilons, transfer_perturbed_accuracies)\n plt.plot(epsilons, regular_perturbed_accuracies)\n plt.title('FGSM Epoch 26 Regular Perturbed Accuracy VS Transferred Perturbed Accuracy from Accurate Model')\n plt.legend(['Transfer Pertubed Accuracy', 'Regular Perturbed Accuracy'], loc='upper left')\n plt.xlabel('Epsilon')\n plt.ylabel('Accuracy')\n plt.show()\n\n#create_perturbed_images()\n#demonstrate_attack_error_rate()\ndemonstrate_transfer_attack_vs_regular_attack()\n","sub_path":"structures_vs_robustness_v2_tuned_by_loss/epoch_number/transfer_attack.py","file_name":"transfer_attack.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"433656751","text":"# 01 Matrix\nfrom collections import deque\nfrom typing import List\n\n\ndef updateMatrix(mat: List[List[int]]) -> List[List[int]]:\n if not mat or not mat[0]:\n return list()\n\n len_rows = len(mat)\n len_cols = len(mat[0])\n\n q = deque()\n max_len = len_rows * len_cols\n\n for i in range(len_rows):\n for j in range(len_cols):\n if mat[i][j] == 0:\n q.append((i, j))\n else:\n mat[i][j] = max_len\n\n directions = [\n (1, 0), # right\n (-1, 0), # left\n (0, 1), # down\n (0, -1), # up\n ]\n\n while q:\n row, col = q.popleft()\n\n for dx, dy in directions:\n x = row + dx\n y = col + dy\n\n if (0 <= x < len_rows) \\\n and (0 <= y < len_cols) \\\n and (mat[x][y] > mat[row][col] + 1):\n q.append((x, y))\n mat[x][y] = mat[row][col] + 1\n\n return mat\n\n\nif __name__ == \"__main__\":\n result1 = updateMatrix([[0, 0, 0], [0, 1, 0], [0, 0, 0]])\n expected1 = [[0, 0, 0], [0, 1, 0], [0, 0, 0]]\n assert result1 == expected1\n\n result2 = updateMatrix([[0, 0, 0], [0, 1, 0], [1, 1, 1]])\n expected2 = [[0, 0, 0], [0, 1, 0], [1, 2, 1]]\n assert result2 == expected2\n","sub_path":"0542_01_Matrix/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"412306226","text":"from datetime import date\n\nfrom questionnaire.forms.questionnaires import QuestionnaireFilterForm, PublishQuestionnaireForm\nfrom questionnaire.models import Questionnaire, Region, Organization\nfrom questionnaire.tests.base_test import BaseTest\n\n\nclass QuestionnaireFilterFormTest(BaseTest):\n def setUp(self):\n self.questionnaire = Questionnaire.objects.create(name=\"JRF 2013 Core English\", status=Questionnaire.FINALIZED,\n year=2013)\n\n self.form_data = {\n 'questionnaire': self.questionnaire.id,\n 'year': date.today().year + 1,\n 'name': 'New JRF'\n }\n\n def test_valid(self):\n questionnaire_filter = QuestionnaireFilterForm(self.form_data)\n self.assertTrue(questionnaire_filter.is_valid())\n\n def test_valid_with_published_questionnaire(self):\n questionnaire = Questionnaire.objects.create(name=\"JRF 2013 Core English\", status=Questionnaire.PUBLISHED,\n year=2013)\n form_data = {\n 'questionnaire': questionnaire.id,\n 'year': date.today().year + 1,\n 'name': 'New JRF'\n }\n questionnaire_filter = QuestionnaireFilterForm(form_data)\n self.assertTrue(questionnaire_filter.is_valid())\n\n def test_has_years_of_existing_questionnaires(self):\n questionnaire_filter = QuestionnaireFilterForm(self.form_data)\n self.assertIn(('', 'Choose a year'), questionnaire_filter.fields['year'].choices)\n for count in range(0, 10):\n year_option = date.today().year + count\n self.assertIn((year_option, year_option), questionnaire_filter.fields['year'].choices)\n\n def test_invalid_when_questionniare_is_blank(self):\n form_data = self.form_data.copy()\n form_data['questionnaire'] = ''\n questionnaire_filter = QuestionnaireFilterForm(form_data)\n self.assertFalse(questionnaire_filter.is_valid())\n self.assertIn(\"This field is required.\", questionnaire_filter.errors['questionnaire'])\n\n def test_invalid_when_year_is_blank(self):\n form_data = self.form_data.copy()\n form_data['year'] = ''\n questionnaire_filter = QuestionnaireFilterForm(form_data)\n self.assertFalse(questionnaire_filter.is_valid())\n self.assertIn(\"This field is required.\", questionnaire_filter.errors['year'])\n\n def test_valid_when_name_is_blank(self):\n form_data = self.form_data.copy()\n form_data['name'] = ''\n questionnaire_filter = QuestionnaireFilterForm(form_data)\n self.assertFalse(questionnaire_filter.is_valid())\n self.assertIn(\"This field is required.\", questionnaire_filter.errors['name'])\n\n def test_clean_year(self):\n questionnaire = Questionnaire.objects.create(name=\"JRF 2013 Core English\", status=Questionnaire.FINALIZED,\n year=date.today().year + 1)\n form_data = self.form_data.copy()\n form_data['year'] = questionnaire.year\n questionnaire_filter = QuestionnaireFilterForm(form_data)\n self.assertFalse(questionnaire_filter.is_valid())\n message = \"Select a valid choice. %d is not one of the available choices.\" % questionnaire.year\n self.assertIn(message, questionnaire_filter.errors['year'])\n\n def test_has_years_choices_exclude_existing_questionnaires_years(self):\n Questionnaire.objects.create(name=\"JRF 2013 Core English\", status=Questionnaire.FINALIZED,\n year=date.today().year + 1)\n questionnaire_filter = QuestionnaireFilterForm(self.form_data)\n self.assertIn(('', 'Choose a year'), questionnaire_filter.fields['year'].choices)\n for count in range(2, 9):\n year_option = date.today().year + count\n self.assertIn((year_option, year_option), questionnaire_filter.fields['year'].choices)\n self.assertNotIn((date.today().year + 1, date.today().year + 1), questionnaire_filter.fields['year'].choices)\n\n\nclass PublishQuestionnaireFormTest(BaseTest):\n def setUp(self):\n self.questionnaire = Questionnaire.objects.create(name=\"JRF 2013 Core English\", status=Questionnaire.FINALIZED,\n year=2013)\n self.who = Organization.objects.create(name=\"WHO\")\n self.afro = Region.objects.create(name=\"The Afro\", organization=self.who)\n self.paho = Region.objects.create(name=\"The Paho\", organization=self.who)\n\n self.form_data = {\n 'questionnaire': self.questionnaire.id,\n 'regions': [self.paho.id, self.afro.id]}\n\n def test_valid(self):\n publish_questionnaire_form = PublishQuestionnaireForm(initial={'questionnaire': self.questionnaire},\n data=self.form_data)\n self.assertTrue(publish_questionnaire_form.is_valid())\n self.assertIn((self.paho.id, self.paho.name), publish_questionnaire_form.fields['regions'].choices)\n self.assertIn((self.afro.id, self.afro.name), publish_questionnaire_form.fields['regions'].choices)\n\n def test_choices_only_has_regions_that_do_not_have_published_questionnaires(self):\n questionnaire = Questionnaire.objects.create(name=\"JRF 2013 Core English\", status=Questionnaire.PUBLISHED,\n year=2013, region=self.afro)\n data = {'questionnaire': self.questionnaire, 'regions': [self.paho.id]}\n publish_questionnaire_form = PublishQuestionnaireForm(initial={'questionnaire': self.questionnaire}, data=data)\n self.assertTrue(publish_questionnaire_form.is_valid())\n region_choices = [choice for choice in publish_questionnaire_form.fields['regions'].choices]\n self.assertIn((self.paho.id, self.paho.name), region_choices)\n self.assertNotIn((self.afro.id, self.afro.name), region_choices)\n\n def test_creates_copies_for_regions_on_save(self):\n Questionnaire.objects.create(name=\"JRF 2013 Core English\", status=Questionnaire.PUBLISHED, year=2013,\n region=self.afro)\n pacific = Region.objects.create(name=\"haha\", organization=self.who)\n asia = Region.objects.create(name=\"hehe\", organization=self.who)\n\n data = {'questionnaire': self.questionnaire, 'regions': [self.paho.id, pacific.id, asia.id]}\n\n publish_questionnaire_form = PublishQuestionnaireForm(initial={'questionnaire': self.questionnaire}, data=data)\n self.assertTrue(publish_questionnaire_form.is_valid())\n publish_questionnaire_form.save()\n questionnaires = Questionnaire.objects.filter(year=self.questionnaire.year)\n self.assertEqual(5, questionnaires.count())\n [self.assertEqual(1, region.questionnaire.all().count()) for region in [self.paho, pacific, asia]]\n self.assertEqual(1, self.afro.questionnaire.all().count())\n questionnaire = Questionnaire.objects.filter(id=self.questionnaire.id)[0]\n self.assertEqual(questionnaire.status, Questionnaire.PUBLISHED)","sub_path":"questionnaire/tests/forms/test_questionnaire_form.py","file_name":"test_questionnaire_form.py","file_ext":"py","file_size_in_byte":7135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"608107068","text":"import pandas\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn import feature_selection\nfrom sklearn .ensemble import ExtraTreesClassifier\nimport scipy as sp\nimport pickle\nnp.set_printoptions(threshold=np.nan)\nrepeat=20\n\n\ndf=pandas.read_csv('Text.csv')\nprint(df.shape)\ndfInq=pandas.read_csv('InqProf.csv')\ndf=df.join(dfInq, lsuffix='_left')\n#df=df[np.isfinite(df['NNP'])]\ndf.to_csv('temp.csv')\nprint(df['NNP'])\ndf=df.fillna(0)\n\n\narr=np.array(df)\n\n\nfast=pickle.load(open(\"FastTextPreTrained.pickle\", \"rb\"))\narr=np.append(arr, fast, axis=1)\n# print(np.shape(arr))\n\n\nY=arr[:, 1]\nYtest=arr[:, 1]\nX=arr[:, 5:]\nXtest=arr[400:, 5:]\n\nfinal=[]\nfor col in X.T:\n\tcor=sp.stats.pearsonr(col, Y)[0]\n\tfinal.append(cor)\nfinal=np.array(final)\nfeatures=np.argsort(final)\nfeatures+=5\n\nprint(features)\nresults=np.zeros((len(features), repeat))\nfor j in range(repeat):\n\tprint(\"repetition \", j)\n\n\tfor i in range(len(features)):\n\t\tdf=pandas.read_csv('Text.csv')\n\t\tdfInq=pandas.read_csv('InqProf.csv')\n\t\tdf=df.join(dfInq, lsuffix='_left')\n\t\tdf=df.fillna(0)\n\n\n\t\tarr=np.array(df)\n\n\t\tfast=pickle.load(open(\"FastTextPreTrained.pickle\", \"rb\"))\n\t\tarr=np.append(arr, fast, axis=1)\n\t\tnp.random.shuffle(arr)\n\n\t\tY=arr[:900, 1]\n\t\tYtest=arr[900:, 1]\n\t\tX=arr[:900, features[:i+1]]\n\t\tXtest=arr[900:, features[:i+1]]\n\t\tclf=ExtraTreesClassifier()\n\n\n\t\tclf.fit(X,Y)\n\n\t\t#error on train\n\t\tdomin=clf.predict(X)\n\t\terreurs=np.sum(np.where(domin==Y, 0, 1))\n\n\n\t\t#error on test\n\t\tdomin=clf.predict(Xtest)\n\t\terreurs=np.sum(np.where(domin==Ytest, 0, 1))\n\t\tresults[i,j]=1.0-float(erreurs)/len(Ytest)\n\tprint(results)\n\nprint(np.mean(results, axis=1))\n\n\n","sub_path":"Dominance/ArbreText.py","file_name":"ArbreText.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"505210133","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /thrift/transport/THeaderTransport.py\n# Compiled at: 2018-09-11 21:54:05\n# Size of source mod 2**32: 12979 bytes\nimport struct, zlib\nfrom thrift.compat import BufferIO, byte_index\nfrom thrift.protocol.TBinaryProtocol import TBinaryProtocol\nfrom thrift.protocol.TCompactProtocol import TCompactProtocol, readVarint, writeVarint\nfrom thrift.Thrift import TApplicationException\nfrom thrift.transport.TTransport import CReadableTransport, TMemoryBuffer, TTransportBase, TTransportException\nU16 = struct.Struct('!H')\nI32 = struct.Struct('!i')\nHEADER_MAGIC = 4095\nHARD_MAX_FRAME_SIZE = 1073741823\n\nclass THeaderClientType(object):\n HEADERS = 0\n FRAMED_BINARY = 1\n UNFRAMED_BINARY = 2\n FRAMED_COMPACT = 3\n UNFRAMED_COMPACT = 4\n\n\nclass THeaderSubprotocolID(object):\n BINARY = 0\n COMPACT = 2\n\n\nclass TInfoHeaderType(object):\n KEY_VALUE = 1\n\n\nclass THeaderTransformID(object):\n ZLIB = 1\n\n\nREAD_TRANSFORMS_BY_ID = {THeaderTransformID.ZLIB: zlib.decompress}\nWRITE_TRANSFORMS_BY_ID = {THeaderTransformID.ZLIB: zlib.compress}\n\ndef _readString(trans):\n size = readVarint(trans)\n if size < 0:\n raise TTransportException(TTransportException.NEGATIVE_SIZE, 'Negative length')\n return trans.read(size)\n\n\ndef _writeString(trans, value):\n writeVarint(trans, len(value))\n trans.write(value)\n\n\nclass THeaderTransport(TTransportBase, CReadableTransport):\n\n def __init__(self, transport, allowed_client_types):\n self._transport = transport\n self._client_type = THeaderClientType.HEADERS\n self._allowed_client_types = allowed_client_types\n self._read_buffer = BufferIO(b'')\n self._read_headers = {}\n self._write_buffer = BufferIO()\n self._write_headers = {}\n self._write_transforms = []\n self.flags = 0\n self.sequence_id = 0\n self._protocol_id = THeaderSubprotocolID.BINARY\n self._max_frame_size = HARD_MAX_FRAME_SIZE\n\n def isOpen(self):\n return self._transport.isOpen()\n\n def open(self):\n return self._transport.open()\n\n def close(self):\n return self._transport.close()\n\n def get_headers(self):\n return self._read_headers\n\n def set_header(self, key, value):\n if not isinstance(key, bytes):\n raise ValueError('header names must be bytes')\n if not isinstance(value, bytes):\n raise ValueError('header values must be bytes')\n self._write_headers[key] = value\n\n def clear_headers(self):\n self._write_headers.clear()\n\n def add_transform(self, transform_id):\n if transform_id not in WRITE_TRANSFORMS_BY_ID:\n raise ValueError('unknown transform')\n self._write_transforms.append(transform_id)\n\n def set_max_frame_size(self, size):\n if not 0 < size < HARD_MAX_FRAME_SIZE:\n raise ValueError('maximum frame size should be < %d and > 0' % HARD_MAX_FRAME_SIZE)\n self._max_frame_size = size\n\n @property\n def protocol_id(self):\n if self._client_type == THeaderClientType.HEADERS:\n return self._protocol_id\n else:\n if self._client_type in (THeaderClientType.FRAMED_BINARY, THeaderClientType.UNFRAMED_BINARY):\n return THeaderSubprotocolID.BINARY\n if self._client_type in (THeaderClientType.FRAMED_COMPACT, THeaderClientType.UNFRAMED_COMPACT):\n return THeaderSubprotocolID.COMPACT\n raise TTransportException(TTransportException.INVALID_CLIENT_TYPE, 'Protocol ID not know for client type %d' % self._client_type)\n\n def read(self, sz):\n bytes_read = self._read_buffer.read(sz)\n bytes_left_to_read = sz - len(bytes_read)\n if bytes_left_to_read == 0:\n return bytes_read\n else:\n if self._client_type in (THeaderClientType.UNFRAMED_BINARY, THeaderClientType.UNFRAMED_COMPACT):\n return bytes_read + self._transport.read(bytes_left_to_read)\n self.readFrame(bytes_left_to_read)\n return bytes_read + self._read_buffer.read(bytes_left_to_read)\n\n def _set_client_type(self, client_type):\n if client_type not in self._allowed_client_types:\n raise TTransportException(TTransportException.INVALID_CLIENT_TYPE, 'Client type %d not allowed by server.' % client_type)\n self._client_type = client_type\n\n def readFrame(self, req_sz):\n first_word = self._transport.readAll(I32.size)\n frame_size, = I32.unpack(first_word)\n is_unframed = False\n if frame_size & TBinaryProtocol.VERSION_MASK == TBinaryProtocol.VERSION_1:\n self._set_client_type(THeaderClientType.UNFRAMED_BINARY)\n is_unframed = True\n else:\n if byte_index(first_word, 0) == TCompactProtocol.PROTOCOL_ID:\n if byte_index(first_word, 1) & TCompactProtocol.VERSION_MASK == TCompactProtocol.VERSION:\n self._set_client_type(THeaderClientType.UNFRAMED_COMPACT)\n is_unframed = True\n if is_unframed:\n bytes_left_to_read = req_sz - I32.size\n if bytes_left_to_read > 0:\n rest = self._transport.read(bytes_left_to_read)\n else:\n rest = b''\n self._read_buffer = BufferIO(first_word + rest)\n return\n elif frame_size > self._max_frame_size:\n raise TTransportException(TTransportException.SIZE_LIMIT, 'Frame was too large.')\n else:\n read_buffer = BufferIO(self._transport.readAll(frame_size))\n second_word = read_buffer.read(I32.size)\n version, = I32.unpack(second_word)\n read_buffer.seek(0)\n if version >> 16 == HEADER_MAGIC:\n self._set_client_type(THeaderClientType.HEADERS)\n self._read_buffer = self._parse_header_format(read_buffer)\n else:\n if version & TBinaryProtocol.VERSION_MASK == TBinaryProtocol.VERSION_1:\n self._set_client_type(THeaderClientType.FRAMED_BINARY)\n self._read_buffer = read_buffer\n elif byte_index(second_word, 0) == TCompactProtocol.PROTOCOL_ID:\n if byte_index(second_word, 1) & TCompactProtocol.VERSION_MASK == TCompactProtocol.VERSION:\n self._set_client_type(THeaderClientType.FRAMED_COMPACT)\n self._read_buffer = read_buffer\n else:\n raise TTransportException(TTransportException.INVALID_CLIENT_TYPE, 'Could not detect client transport type.')\n\n def _parse_header_format(self, buffer):\n buffer_transport = TMemoryBuffer()\n buffer_transport._buffer = buffer\n buffer.read(2)\n self.flags, = U16.unpack(buffer.read(U16.size))\n self.sequence_id, = I32.unpack(buffer.read(I32.size))\n header_length = U16.unpack(buffer.read(U16.size))[0] * 4\n end_of_headers = buffer.tell() + header_length\n if end_of_headers > len(buffer.getvalue()):\n raise TTransportException(TTransportException.SIZE_LIMIT, 'Header size is larger than whole frame.')\n self._protocol_id = readVarint(buffer_transport)\n transforms = []\n transform_count = readVarint(buffer_transport)\n for _ in range(transform_count):\n transform_id = readVarint(buffer_transport)\n if transform_id not in READ_TRANSFORMS_BY_ID:\n raise TApplicationException(TApplicationException.INVALID_TRANSFORM, 'Unknown transform: %d' % transform_id)\n transforms.append(transform_id)\n\n transforms.reverse()\n headers = {}\n while buffer.tell() < end_of_headers:\n header_type = readVarint(buffer_transport)\n if header_type == TInfoHeaderType.KEY_VALUE:\n count = readVarint(buffer_transport)\n for _ in range(count):\n key = _readString(buffer_transport)\n value = _readString(buffer_transport)\n headers[key] = value\n\n else:\n break\n\n self._read_headers = headers\n buffer.seek(end_of_headers)\n payload = buffer.read()\n for transform_id in transforms:\n transform_fn = READ_TRANSFORMS_BY_ID[transform_id]\n payload = transform_fn(payload)\n\n return BufferIO(payload)\n\n def write(self, buf):\n self._write_buffer.write(buf)\n\n def flush(self):\n payload = self._write_buffer.getvalue()\n self._write_buffer = BufferIO()\n buffer = BufferIO()\n if self._client_type == THeaderClientType.HEADERS:\n for transform_id in self._write_transforms:\n transform_fn = WRITE_TRANSFORMS_BY_ID[transform_id]\n payload = transform_fn(payload)\n\n headers = BufferIO()\n writeVarint(headers, self._protocol_id)\n writeVarint(headers, len(self._write_transforms))\n for transform_id in self._write_transforms:\n writeVarint(headers, transform_id)\n\n if self._write_headers:\n writeVarint(headers, TInfoHeaderType.KEY_VALUE)\n writeVarint(headers, len(self._write_headers))\n for key, value in self._write_headers.items():\n _writeString(headers, key)\n _writeString(headers, value)\n\n self._write_headers = {}\n padding_needed = (4 - len(headers.getvalue()) % 4) % 4\n headers.write(b'\\x00' * padding_needed)\n header_bytes = headers.getvalue()\n buffer.write(I32.pack(10 + len(header_bytes) + len(payload)))\n buffer.write(U16.pack(HEADER_MAGIC))\n buffer.write(U16.pack(self.flags))\n buffer.write(I32.pack(self.sequence_id))\n buffer.write(U16.pack(len(header_bytes) // 4))\n buffer.write(header_bytes)\n buffer.write(payload)\n else:\n if self._client_type in (THeaderClientType.FRAMED_BINARY, THeaderClientType.FRAMED_COMPACT):\n buffer.write(I32.pack(len(payload)))\n buffer.write(payload)\n else:\n if self._client_type in (THeaderClientType.UNFRAMED_BINARY, THeaderClientType.UNFRAMED_COMPACT):\n buffer.write(payload)\n else:\n raise TTransportException(TTransportException.INVALID_CLIENT_TYPE, 'Unknown client type.')\n frame_bytes = buffer.getvalue()\n frame_payload_size = len(frame_bytes) - 4\n if frame_payload_size > self._max_frame_size:\n raise TTransportException(TTransportException.SIZE_LIMIT, 'Attempting to send frame that is too large.')\n self._transport.write(frame_bytes)\n self._transport.flush()\n\n @property\n def cstringio_buf(self):\n return self._read_buffer\n\n def cstringio_refill(self, partialread, reqlen):\n result = bytearray(partialread)\n while len(result) < reqlen:\n result += self.read(reqlen - len(result))\n\n self._read_buffer = BufferIO(result)\n return self._read_buffer","sub_path":"pycfiles/thrift_adv-1.0.0.dev0-cp36-cp36m-macosx_10_13_x86_64/THeaderTransport.cpython-36.opt-1.py","file_name":"THeaderTransport.cpython-36.opt-1.py","file_ext":"py","file_size_in_byte":11275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"318261088","text":"\"\"\"\n=============================\nAuthor : lsw\nTime : 2019-10-16\nE-mail : 591271859@qq.com\n=============================\n\"\"\"\nimport os\nimport time\nimport unittest\nfrom comm.mylogger import log\nfrom HTMLTestRunnerNew import HTMLTestRunner\nfrom comm.constant import CASE_DIR, REPORT_DIR\n\n\nlog.info(\"--------------------测试用例开始执行--------------------\")\n# 创建测试套件\nsuit = unittest.TestSuite()\n# 将测试用例添加到套件\nloader = unittest.TestLoader()\nsuit.addTest(loader.discover(CASE_DIR))\n\n# 测试报告名称\nnow = time.strftime(\"%Y%m%d%H%M%S\")\nreport_name = now + 'test_report.html'\n\n# 执行测试用例并生成测试报告\nwith open(os.path.join(REPORT_DIR, report_name),'wb') as wo:\n runner = HTMLTestRunner(stream=wo,\n verbosity=2,\n title=\"python接口自动化项目报告\",\n description=\"python接口自动化项目模板\",\n tester=\"lsw\"\n )\n runner.run(suit)\n\nlog.info(\"--------------------测试用例执行完成--------------------\")\n\n\n\n\n\n\n\n\n\n","sub_path":"api_project_template/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"649208093","text":"from conv_net import ConvNet\nfrom utils.data_utils import load_all_image_paths_convnet, load_batch_of_data_convnet\nfrom utils.global_config import EPOCHS, BATCH_SIZE\n\nimport os\nfrom tqdm import trange\nimport tensorflow as tf\nimport logging\nimport argparse\nimport random\nimport sys\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger()\n\n\ndef train_model(train_data_dir: str, val_data_dir: str, save_model_path: str):\n \"\"\"\n\n Args:\n data_dir: Where the dataset is\n\n Returns:\n\n \"\"\"\n all_train_image_paths = load_all_image_paths_convnet(train_data_dir)\n all_val_image_paths = load_all_image_paths_convnet(val_data_dir)\n log.info(f\"{len(all_train_image_paths)} images belonging to the train set...\")\n log.info(f\"{len(all_val_image_paths)} images belonging to the validation set...\")\n model = ConvNet()\n log.info(\"Model built...\")\n BEST_VAL_LOSS = sys.maxsize\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(EPOCHS):\n train_loss_epoch = 0.0\n validation_loss_epoch = 0.0\n random.shuffle(all_train_image_paths)\n full_epoch_train = trange(0, len(all_train_image_paths), BATCH_SIZE)\n\n for step in full_epoch_train:\n train_image_batch, train_label_batch = load_batch_of_data_convnet(\n all_train_image_paths[step : step + BATCH_SIZE]\n )\n feed_dict_train = {\n model.input_images: train_image_batch,\n model.labels: train_label_batch,\n model.is_training: True,\n }\n _, train_loss = sess.run(\n [model.train_step, model.loss_fun], feed_dict_train\n )\n train_loss_epoch += train_loss\n full_epoch_train.set_description(\n f\"Loss for epoch {e+1}: %g\" % train_loss_epoch\n )\n\n for step in range(0, len(all_val_image_paths), BATCH_SIZE):\n val_image_batch, val_label_batch = load_batch_of_data_convnet(\n all_train_image_paths[step : step + BATCH_SIZE]\n )\n feed_dict_val = {\n model.input_images: val_image_batch,\n model.labels: val_label_batch,\n model.is_training: False,\n }\n val_loss = sess.run(model.loss_fun, feed_dict_val)\n validation_loss_epoch += val_loss\n\n print(f\"The validation loss for epoch {e+1} is: {validation_loss_epoch}\")\n if validation_loss_epoch < BEST_VAL_LOSS:\n print(\"===============================================\")\n print(f\"Found new best! Saving model on epoch {e+1}...\")\n print(\"===============================================\")\n saver.save(sess, f\"{save_model_path}\")\n BEST_VAL_LOSS = validation_loss_epoch\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"The script takes a directory where the train data is\"\n \"as well where the validation data is\"\n )\n parser.add_argument(\n \"--train_data_dir\",\n type=str,\n help=\"Location where the data is\",\n default=\"../data/train_data_conv\",\n )\n parser.add_argument(\n \"--val_data_dir\",\n type=str,\n help=\"Location where the data is\",\n default=\"../data/val_data_conv\",\n )\n parser.add_argument(\n \"--save_model_path\",\n type=str,\n help=\"Location where the model should be saved\",\n default=\"../logs/conv_net\",\n )\n\n args = parser.parse_args()\n train_model(args.train_data_dir, args.val_data_dir, args.save_model_path)\n","sub_path":"src/train_conv_net.py","file_name":"train_conv_net.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"171007078","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport platform\nimport os, sys\nDIR_UTILIDADES=\"..\" + os.sep + \"utilidades\" + os.sep + \"src\"\nsys.path.insert(0, DIR_UTILIDADES)\nfrom utilidades.ficheros.GestorFicheros import GestorFicheros\n\n\nFICHERO_CSV=\"centros_region.csv\"\nBD=\"..\" + os.sep + \"..\" + os.sep + \"docencia.db\"\ngf=GestorFicheros()\n\nCOMANDO_ECHO=\"echo\"\ncomandos=\"\"\"\n.mode csv\n.headers ON\n.separator :\nselect * from centros_region;\n.quit\n\"\"\"\n\n\nif platform.system()==\"Linux\":\n COMANDO_ECHO = \"echo \\\"{0}\\\"\".format(comandos)\nelse:\n COMANDO_ECHO = \"echo {0}\".format(comandos)\n\ngf.ejecutar_comando ( COMANDO_ECHO, \"|\", \"sqlite3 \" + BD, \">\", FICHERO_CSV)\n","sub_path":"descargador_html/generar_csv.py","file_name":"generar_csv.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"514871733","text":"import sys\nimport socket\nimport re\nimport os\nimport struct\nimport socket\nimport time\nimport sys\n\nfrom dnslib import *\nfrom ping import verbose_ping\n\n\n#--------------reading config ------------------\nclass Config:\n name = \"\"\n dest = \"\"\n protocol = \"\"\n port = 0\n send_pattern = \"\"\n response = \"\"\n interval = 0\n last_check = 0\n\n def parse(self,str):\n arr = str.split(',')\n self.name = arr[0]\n self.dest = arr[1]\n self.protocol = arr[2]\n if len(arr[3])>0:\n self.port = int(arr[3])\n self.send_pattern = arr[4]\n self.send_pattern = self.send_pattern.replace(\"\\\\r\",\"\\r\");\n self.send_pattern = self.send_pattern.replace(\"\\\\n\",\"\\n\");\n\n self.response = arr[5]\n self.interval = int(arr[6])\n\ndef load_config(path):\n configs = []\n filepath = path\n with open(filepath) as fp:\n line = fp.readline()\n while line:\n line = fp.readline()\n line = line.strip();\n if len(line)>5 and line.startswith('#') == 0:\n c = Config()\n c.parse(line)\n configs.append(c)\n return configs\n\ndef tcp_check_response(dest,port,content,response_contains):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(5)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.connect((dest, port))\n s.send(str.encode(content))\n data = (s.recv(1000000))\n s.shutdown(1)\n s.close()\n print('Received', repr(data))\n if data.find(str.encode(response_contains))==-1:\n return False\n else:\n return True\n except:\n return False\ndef tcp_check_connect(dest,port):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(0.30)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.connect((dest, port))\n return True\n except:\n return False\ndef dns_check(server,port,address):\n try:\n d = DNSRecord.question(address);\n # Sending the packet\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind(('', 8888))\n sock.settimeout(2)\n sock.sendto(d.pack(), (server, port))\n #print(\"Packet Sent\")\n data, addr = sock.recvfrom(1024)\n sock.close()\n d = DNSRecord.parse(data);\n res = repr(d);\n #print(d)\n if res.find(\"RR: '\"+address+\".'\")==-1:\n return False;\n else:\n return True;\n except:\n return False;\ndef icmp_check(server):\n s = verbose_ping(server, 1000, 1,8)\n if not s:\n return False;\n if s.fracLoss==0:\n return True\n else:\n return False\n\ndef check(config):\n if config.protocol == 'ICMP':\n return icmp_check(config.dest)\n if config.protocol == 'DNS':\n return dns_check(config.dest,config.port,config.send_pattern)\n if config.protocol==\"TCP\":\n if len(config.send_pattern)>0 and len(config.response)>0:\n return tcp_check_response(config.dest,config.port,config.send_pattern,config.response)\n else:\n return tcp_check_connect(config.dest,config.port)\n\n\n\n\n\n\n#------------------------------------------\n\n\n\n#print(dns_check(\"8.8.8.8\",\"53\",\"google.com\"))\n\n#rep = tcp_check_response(\"honar8.com\",80,\"GET / HTTP/1.0\\r\\n\\r\\n\",\"200 OK\")\n#print(rep)\n\n#verbose_ping(\"honar8.com\", 1000, 1,8)\n#print(icmp_check(\"honar8.com\"))\n\n\n#rep = tcp_check_connect(\"imap.gmail.com\",993)\n#print(rep)\n\n#rep = tcp_check_connect(\"54.36.26.51\",33096)\n#print(rep)\n\n\n\nprint('using python version:' + sys.version)\nconfigs = load_config('config1.txt');\n\n\n#for c in configs:\n #print(c.name,c.interval)\n\nwhile 1:\n print('running...')\n tt = time.time();\n for c in configs:\n if c.last_check+c.interval /proc/sys/net/ipv4/ip_forward\")\n os.system(\"iptables -F\")\n os.system(\"iptables -t nat -F\")\n\n os.system(\"iptables -N internet -t mangle\")\n os.system(\"iptables -t mangle -A PREROUTING -j internet\")\n\n os.system(\"iptables -A FORWARD -i at0 -p tcp --dport 443 -j DROP\")\n os.system(\"iptables -A FORWARD -i wlan1 -p tcp --dport 443 -j DROP\")\n os.system(\"iptables -A FORWARD -i wlan0 -p tcp --dport 443 -j DROP\")\n\n #os.system(\"iptables -A INPUT -j LOG -log--level 4\")\n #os.system(\"iptables -A INPUT DROP\")\n\n os.system(\"iptables -t nat -D POSTROUTING 1\")\n os.system(\"iptables -P FORWARD ACCEPT\")\n os.system(\"iptables -t nat -A POSTROUTING -o \" + net_iface + \" -j MASQUERADE\")\n os.system(\"iptables --append FORWARD -j ACCEPT --in-interface at0\")\n\n os.system(\"iptables -A FORWARD -i wlan0 -p tcp --dport 22 -d 192.168.1.111 -j ACCEPT\")\n os.system(\"iptables -t nat -A PREROUTING -i at0 -d 0/0 -p tcp --dport 80 -j DNAT --to-destination 10.0.0.1:80\")\n os.system(\"iptables -A FORWARD -i wlan1 -j DROP\")\n\n #DHCP CONFIG\n dns_config = \"listen-address=127.0.0.1\\ninterface=at0\\ndomain-needed\\nbogus-priv\\nno-resolv\\nserver=8.8.8.8\\nserver=8.8.4.4\\ncache-size=4096\\nlocal=/home/\\nexpand-hosts\\ndomain=home\\ndhcp-range=10.0.0.2,10.0.0.99,255.255.255.0,14d\\ndhcp-option=option:router,10.0.0.1\\ndhcp-option=252\"\n print(\"[I] Backing up /etc/dnsmasq.conf...\")\n os.system(\"sudo cp /etc/dnsmasq.conf /etc/dnsmasq.conf.backup\")\n print(\"[I] Deleting old config file...\")\n os.system(\"sudo rm /etc/dnsmasq.conf > /dev/null 2>&1\")\n print(\"[I] Writing new config file...\")\n os.system(\"sudo echo -e '\" + dns_config + \"' > /etc/dnsmasq.conf\")\n\n #DNS CONFIG\n \n hosts_config = \"127.0.0.1\tlocalhost\\n10.0.0.1 connectivitycheck.android.com\\n10.0.0.1 connectivitycheck.gstatic.com\\n10.0.0.1 clients1.google.com\\n10.0.0.1\tclients3.google.com\\n10.0.0.1\tclients.l.google.com\\n10.0.0.1 captive.apple.com\\n10.0.0.1 1.1.1.1\\n\"\n print(\"[I] Backing up /etc/hosts...\")\n os.system(\"sudo cp /etc/hosts /etc/hosts.backup\")\n print(\"[I] Deleting old config file...\")\n os.system(\"sudo rm /etc/hosts > /dev/null 2>&1\")\n print(\"[I] Writing new config file...\")\n os.system(\"sudo echo -e '\" + hosts_config + \"' > /etc/hosts\")\n\n #DNSMASQ\n try:\n print(\"[I] Starting dnsmasq\")\n os.system(\"dnsmasq -C /etc/dnsmasq.conf -d &\")\n except:\n print(\"Error starting dnsmasq\")\n sys.exit()\n\n \n #DEAUTH\n print(\"[I] Bumping the neighbor Off...\")\n subprocess.Popen([\"aireplay-ng\", \"-0\", \"1\", \"-a\", bssid, ap_iface ])\n \n\n #CP\n print(\"[I] Starting captive portal\")\n os.system(\"python run.py\")\n\n try:\n while True:\n time.sleep(10)\n except KeyboardInterrupt:\n print(\"[!] Stopping... \")\n os.system(\"pkill airbase-ng\")\n os.system(\"pkill dnsmasq\")\n\nexcept KeyboardInterrupt:\n print(\"[!] Stopping... \")\n os.system(\"pkill airbase-ng\")\n os.system(\"pkill dnsmasq\")\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"203288516","text":"###################################################################################################\n### Author: Jai Fadia ###\n### This Python file is intended to display a basic implentation of the binary search algorithm ###\n### Commented as thoroughly as possible to explain each step ###\n###################################################################################################\n\nclass binSearch():\n def __init__(self, array):\n \"\"\"\n Constructor function to initialize the object.\n\n Arguments:\n - array: the array (list) to be searched (note: must be in ascending order)\n \"\"\"\n # note: binary search requires the input array to be sorted in ascending order\n self.array = array\n \n def find(self, value):\n \"\"\"\n Function that uses a binary search algorithm to find and return the index of a value from a list.\n\n Arguments:\n - value: the value that is being searched for\n\n Returns the index of the value that was entered.\n \"\"\"\n\n # the starting point of the algorithm is in the middle of the list\n # we will use the variable i to reference the current index\n i = int(len(self.array) / 2)\n\n # the upper and lower bounds of the search - these variables are updated as the algorithm progresses to narrow down the range between which we will be searching\n # upper initialized as the maximum index of the list\n upper = len(self.array)\n # lower initialized as the minimum index of the list\n lower = 0\n\n # initialize the output value of the list for each iteration of the algorithm\n out = None\n\n # initialize the while loop, keep looping through until 'value' equals the element in the list\n while out != value:\n # examine the element of index i\n out = self.array[i]\n\n if out == value:\n # return the index if out = value\n return f'Element {value} found at index {i}'\n break\n elif value > out:\n # update the lower bound and calculate the new midpoint between the upper and lower bounds for the next iteration\n lower = i\n i = int((upper + lower) / 2)\n elif value < out:\n # update the upper bound and calculate the new midpoint between the upper and lower bounds for the next iteration\n upper = i\n i = int((upper + lower) / 2)\n \n # break the loop if i = upper or i = lower - this scenario occurs when the value is not in the list\n if i == upper or i == lower:\n break\n \n return f'Element {value} not found in the list.'","sub_path":"binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"474493371","text":"import cv2\nimport numpy as np\nfrom math import ceil\nimport os\nfrom os import path\nfrom functools import cmp_to_key\nimport sys\nimport tempfile\nimport argparse\nimport qmg\n\nOUTPUT_DIR = 'E:\\\\Image\\\\Manga\\\\'\nMIN_WHITE = 245\n\nclass Rect(object):\n def __init__(self, center, size):\n self.width, self.height = size\n self.width += 2\n self.height += 2\n self.size = (self.width, self.height)\n w2, h2 = self.width/2, self.height/2\n self.cx, self.cy = center\n self.center = (self.cx, self.cy)\n self.sx, self.sy = self.cx-w2, self.cy-h2\n self.ex, self.ey = self.cx+w2, self.cy+h2\n self.s = (self.sx, self.sy)\n self.e = (self.ex, self.ey)\n\n\n# ref: https://gist.github.com/atarabi/6a230bc8b3f7983fe596\ndef threshold(image, radius=15, C=5):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n return cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 2 * radius + 1, C)\n\ndef find_external_contours(thresh):\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n external_num = hierarchy.shape[1] if hierarchy is not None else 0\n return contours[0:external_num]\n\ndef extract_rects_from_controus(contours, min_perimeter, max_perimeter):\n frames = []\n for contour in contours:\n frame = cv2.minAreaRect(contour)\n center, size, angle = frame\n # 縦・横が逆になっている場合、90度回転させる\n if angle < -45:\n size = tuple(reversed(size))\n angle = angle + 90\n w, h = size\n perimeter = 2 * (w + h)\n if min_perimeter < perimeter < max_perimeter and abs(angle) < 3.0 and 0.1 <= min(w, h) / max(w, h) <= 1.0:\n # frames.append((center, (w + 2, h + 2), angle)) # パディングを加える\n p = Rect(center, size)\n # p = (center, (center[0]-w2, center[1]-h2), (center[0]+w2, center[1]+h2), (w, h))\n frames.append(p)\n return frames\n\ndef cmp_frame(tolerance):\n def _cmp(lhs, rhs):\n return (lhs > rhs) - (lhs < rhs)\n\n def _cmp_frame(lhs, rhs):\n if lhs.center == rhs.center:\n return 0\n x1, y1 = lhs.center\n x2, y2 = rhs.center\n if abs(x1 - x2) < tolerance:\n return _cmp(y1, y2)\n else:\n return _cmp(x2, x1)\n\n return _cmp_frame\n\ndef imread(path):\n return cv2.imdecode(np.fromfile(path,dtype=np.uint8), cv2.IMREAD_COLOR)\n\ndef imwrite(img, path):\n cv2.imencode('.png',img)[1].tofile(path)\n\ndef cut_koma(img):\n # 剪开跨页\n height, width, _ = img.shape\n if width > height:\n center = int(width/2)\n average = np.average(img[0:height, center])\n if average > MIN_WHITE:\n return _cut_koma(img[:, center:]) + _cut_koma(img[:, :center])\n else:\n return [img]\n else:\n return _cut_koma(img)\n\nUP = 1\nDOWN = 2\nRIGHT = 4\nLEFT = 8\n\ndef split_image(img, center, direction):\n # 根据方向指示和中点分割图片\n center_x, center_y = np.int0(center)\n if not direction:\n frames = [img]\n elif direction == DOWN|LEFT|RIGHT:\n # 正品\n frames = [\n img[:center_y, :],\n img[center_y:, center_x:],\n img[center_y:, :center_x]\n ]\n elif direction == UP|RIGHT|LEFT:\n # 倒品\n frames = [\n img[:center_y, center_x:],\n img[:center_y, :center_x],\n img[center_y:, :]\n ]\n elif direction == UP|DOWN|RIGHT|LEFT:\n # 双四格\n frames = [\n img[:center_y, center_x:],\n img[center_y:, center_x:],\n img[:center_y, :center_x],\n img[center_y:, :center_x]\n ]\n elif direction == LEFT|UP|DOWN:\n # 左四格\n frames = [\n img[:, center_x:],\n img[:center_y, :center_x],\n img[center_y:, :center_x]\n ]\n elif direction == RIGHT|UP|DOWN:\n # 右四格\n frames = [\n img[:center_y, center_x:],\n img[center_y:, center_x:],\n img[:, :center_x]\n ]\n else:\n frames = [img]\n return frames\n\n\ndef _cut_koma(img):\n height, width, _ = img.shape\n thresh = threshold(img)\n contours = find_external_contours(thresh)\n min_perimeter, max_perimeter = (width + height) * 0.25, (width + height) * 1.5\n rects = extract_rects_from_controus(contours, min_perimeter, max_perimeter)\n\n min_perimeter, max_perimeter = (width + height) * 0.25, (width + height) * 1.5\n rects = extract_rects_from_controus(contours, min_perimeter, max_perimeter)\n\n # if len(rects) < 5:\n # return [img]\n \n tolerance = width / 3 if width < height else width / 6\n rects = sorted(rects, key=cmp_to_key(cmp_frame(tolerance)))\n\n left, center, right = [], [], []\n ch, cw = height/2, width/2\n center_x, center_y = np.int0((cw, ch))\n for rect in rects:\n if rect.width > width * 0.7:\n center.append(rect)\n elif rect.ex < cw*1.1:\n left.append(rect)\n elif rect.sx > cw*0.9:\n right.append(rect)\n lc = len(center)\n lr = len(right)\n ll = len(left)\n direction = 0\n cut_rects = []\n if lc==1 and ll==lr==2:\n center_x = int((max(left[0].ex, left[1].ex) + min(right[0].sx, right[1].sx))//2)\n center_y = ch\n if center[0].ey < height*0.55:\n # 正品\n center_y = int((center[0].ey + min(left[0].sy, right[0].sy))//2)\n direction = RIGHT|LEFT|DOWN\n elif center[0].sy > height*0.45:\n # 倒品\n center_y = int((center[0].sy + max(left[1].ey, right[1].ey))//2)\n direction = UP|RIGHT|LEFT\n elif ll==4 and lr==4 \\\n and max(map(lambda x: x.ey, left[:2]+right[:2])) < height*0.55 \\\n and min(map(lambda x: x.sy, left[2:]+right[2:])) > height*0.45:\n # 双四格\n center_x = int((max(map(lambda x: x.ex, left)) + min(map(lambda x: x.sx, right)))//2)\n center_y = int((max(left[1].ey, right[1].ey) + min(left[2].sy, left[2].sy))//2)\n direction = UP|DOWN|LEFT|RIGHT\n # elif ll==4 and max(map(lambda x: x.ey, left[:2])) < height*0.55 \\\n # and min(map(lambda x: x.sy, left[2:])) > height*0.45:\n # # 左四格\n # center_x = ceil(max(map(lambda x: x.ex, left))+1)\n # center_y = int((left[1].ey + left[2].sy)//2)\n # direction = LEFT|UP|DOWN\n # elif lr==4 \\\n # and max(map(lambda x: x.ey, right[:2])) < height*0.55 \\\n # and min(map(lambda x: x.sy, right[2:])) > height*0.45:\n # # 右四格\n # center_x = int(min(map(lambda x: x.sx, right))-1)\n # center_y = int((right[1].ey + right[2].sy)//2)\n # direction = RIGHT|UP|DOWN\n\n if not direction:\n # 尝试二次检测\n error_height = height*0.05\n error_width = width*0.05\n # 现在用的是图片中心,换成去白边后的中心可能会更\n min_center_x = max_center_x = t = cw\n for rect in rects:\n if abs(rect.sx-cw) < error_width:\n t = rect.sx\n elif abs(rect.ex-cw) < error_width:\n t = rect.ex\n elif abs(rect.cx-cw) < error_width:\n t = rect.cx\n else:\n continue\n if min_center_x > t:\n min_center_x = t\n elif max_center_x < t:\n max_center_x = t\n min_center_y = max_center_y = t = ch\n for rect in rects:\n if abs(rect.sy-ch) < error_height:\n t = rect.sy\n elif abs(rect.ey-ch) < error_height:\n t = rect.ey\n elif abs(rect.cy-ch) < error_height:\n t = rect.cy\n else:\n continue\n if min_center_y > t:\n min_center_y = t\n elif max_center_y < t:\n max_center_y = t\n im = np.mean(img, axis=2)\n min_center_x, min_center_y, max_center_x, max_center_y = \\\n np.int0((min_center_x, min_center_y, max_center_x, max_center_y))\n mean_up = np.mean(im[:min_center_y, min_center_x:max_center_x+1], axis=0)\n mean_down = np.mean(im[max_center_y:, min_center_x:max_center_x+1], axis=0)\n mean_updown = np.mean([mean_up, mean_down], axis=0)\n mean_left = np.mean(im[min_center_y:max_center_y+1, :min_center_x], axis=1)\n mean_right = np.mean(im[min_center_y:max_center_y+1, max_center_x:], axis=1)\n mean_lr = np.mean([mean_left, mean_right], axis=0)\n\n max_updown_index = np.argmax(mean_updown)\n max_updown = mean_updown[max_updown_index]\n max_lr_index = np.argmax(mean_lr)\n max_lr = mean_lr[max_lr_index]\n\n if max_lr > MIN_WHITE:\n center_y = min_center_y + max_lr_index\n direction |= RIGHT|LEFT\n else:\n max_right_index = np.argmax(mean_right)\n max_right = mean_right[max_right_index]\n max_left_index = np.argmax(mean_left)\n max_left = mean_left[max_left_index]\n if max_left > MIN_WHITE:\n center_y = min_center_y + max_left_index\n direction |= LEFT\n elif max_right > MIN_WHITE:\n center_y = min_center_y + max_right_index\n direction |= RIGHT\n if max_updown > MIN_WHITE:\n center_x = min_center_x + max_updown_index\n direction |= UP|DOWN\n else:\n max_up_index = np.argmax(mean_up)\n max_up = mean_up[max_up_index]\n max_down_index = np.argmax(mean_down)\n max_down = mean_down[max_down_index]\n if max_up > MIN_WHITE:\n center_x = min_center_x + max_up_index\n direction |= UP\n elif max_down > MIN_WHITE:\n center_x = min_center_x + max_down_index\n direction |= DOWN\n\n return split_image(img, (center_x, center_y), direction)\n\ndef cut_dir(src, dst):\n for filepath, _, filenames in os.walk(src):\n for filename in filenames:\n srcpath = path.join(filepath, filename)\n new_file_path = path.join(dst, path.relpath(filepath, src))\n os.makedirs(new_file_path, exist_ok=True)\n print('\\r\\x1b[K'+srcpath, end='')\n img = imread(srcpath)\n if img is not None:\n frames = cut_koma(img)\n name = path.splitext(filename)[0]\n l = len(str(len(frames)))\n f = f'{{}}-{{:0{l}}}.png'\n for i, frame in enumerate(frames):\n dstpath = path.join(new_file_path, f.format(name, i+1))\n imwrite(frame, dstpath)\n print()\n\n\nif __name__=='__main__':\n# for file in ['0010.jpg']:\n parser = argparse.ArgumentParser(description='4koma splitter')\n parser.add_argument('srcs', nargs='+')\n parser.add_argument('-t', '--format', choices=['mobi', 'epub', 'cbz', 'raw'], default='mobi', help='archive format, if choice \"raw\" will not make archive')\n parser.add_argument('-s', '--save-path', dest='save_path', default=OUTPUT_DIR, help='save path')\n args = parser.parse_args()\n\n if args.format == 'raw':\n save_path = args.save_path\n if not path.exists(save_path):\n os.mkdir(save_path)\n td = None\n else:\n td = tempfile.TemporaryDirectory()\n save_path = td.name\n saves = []\n for src in args.srcs:\n d1, d2 = path.split(src)\n save = path.join(save_path, d2 if d2 else d1)\n saves.append(save)\n cut_dir(src, save)\n if args.format != 'raw':\n qmg.main(saves, args.save_path, args.format)\n","sub_path":"4koma-splitter.py","file_name":"4koma-splitter.py","file_ext":"py","file_size_in_byte":11710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106080645","text":"from os import path\nfrom setuptools import find_packages, setup\n\n\nwith open(path.join(path.dirname(__file__), 'README.md')) as readme:\n LONG_DESCRIPTION = readme.read()\n\n\nsetup(\n name='fava-plugins',\n version='1.0',\n description='A collection of Beancount plugins.',\n long_description=LONG_DESCRIPTION,\n url='https://github.com/beancount/fava-plugins',\n author='Jakob Schnitzer',\n author_email='mail@jakobschnitzer.de',\n license='MIT',\n keywords='fava beancount accounting',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n install_requires=[\n 'beancount>=2.0rc1',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Education',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: JavaScript',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Office/Business :: Financial :: Accounting',\n 'Topic :: Office/Business :: Financial :: Investment',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"96894481","text":"class Solution(object):\n def buddyStrings(self, A, B):\n \"\"\"\n :type A: str\n :type B: str\n :rtype: bool\n \"\"\"\n diff=[]\n if len(A)!=len(B):\n return False\n \n if A==B and len(set(A))< len(A):\n return True\n \n for i in range(len(A)):\n if A[i]!=B[i]:\n diff.append((A[i],B[i]))\n \n return len(diff)==2 and diff[0]==diff[1][::-1] \n","sub_path":"OctoberChallenge/BuddyStrings.py","file_name":"BuddyStrings.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"328586457","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\nimport sys\nimport optparse\nfrom benchmark_controller import Benchmark, nnnBenchmark\nimport time\n\nprint(\"------------ Importing environment packages --------------\")\n\nENV_SELECTION = {}\nsimbenchmark_dir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(simbenchmark_dir)\nsys.path.insert(0, os.getcwd())\n\n\ndef import_manager(sim_name):\n if sim_name == \"webots\":\n try:\n from environments.WeBots.controller import RobotEnv_webots\n from environments.WeBots.controller import nnnEnv_webots\n\n ENV_SELECTION.update({\"webots\": [RobotEnv_webots, nnnEnv_webots]})\n print(\"WeBots packages successfully imported\\n\")\n except Exception as e:\n print(\"{}. \\nWas not able to import WeBots!\\n\".format(e))\n\n if sim_name == \"pybullet\":\n try:\n from environments.PyBullet.controller import RobotEnv_PyBullet\n from environments.PyBullet.controller import nnnEnv_PyBullet\n\n ENV_SELECTION.update(\n {\"pybullet\": [RobotEnv_PyBullet, nnnEnv_PyBullet]})\n print(\"PyBullet packages successfully imported\\n\")\n except Exception as e:\n print(\"{}. \\nWas not able to import PyBullet!\\n\".format(e))\n\n if sim_name == \"mujoco\":\n try:\n from environments.MuJoCo.controller import RobotEnv_mujoco\n from environments.MuJoCo.controller import nnnEnv_mujoco\n\n ENV_SELECTION.update({\"mujoco\": [RobotEnv_mujoco, nnnEnv_mujoco]})\n print(\"MuJoCo packages successfully imported\\n\")\n except Exception as e:\n print(\"{}. \\nWas not able to import MuJoCo!\\n\".format(e))\n\n if sim_name == \"gazebo\":\n try:\n # correct import for python2 (ROS melodic)\n if sys.version_info <= (3, 4):\n simbenchmark_dir = os.path.dirname(os.path.abspath(__file__))\n sys.path.insert(0, simbenchmark_dir +\n \"/environments/Gazebo/controller\")\n from robot_env import RobotEnv_gazebo\n from nnn_env import nnnEnv_gazebo\n else:\n from environments.Gazebo.controller import RobotEnv_gazebo\n from environments.Gazebo.controller import nnnEnv_gazebo\n ENV_SELECTION.update({\"gazebo\": [RobotEnv_gazebo, nnnEnv_gazebo]})\n print(\"Gazebo packages successfully imported\\n\")\n except Exception as e:\n print(\"{}. \\nWas not able to import Gazebo!\\n\".format(e))\n\n\nif __name__ == \"__main__\":\n optParser = optparse.OptionParser(usage=\"usage: %prog [options]\")\n optParser.add_option(\n \"--path\",\n dest=\"path\",\n default=None,\n help=\"Specifies the result folder.\"\n )\n optParser.add_option(\n \"--sim\",\n dest=\"sim\",\n default=None,\n help=\"Specifies the sim_name\"\n )\n optParser.add_option(\n \"--sim_option\",\n dest=\"sim_option\",\n default=\"RobotSim\",\n help=\"RobotSim or nnnSim\"\n )\n optParser.add_option(\n \"--render\",\n dest=\"render\",\n default=0,\n help=\"headless mode [0, 1]\"\n )\n options, args = optParser.parse_args()\n path = options.path\n sim_name = options.sim\n sim_option = options.sim_option\n render = options.render\n render = int(render)\n render = bool(render)\n\n # --------- Activate if starting through python -------\n # path = \".\"\n # sim_name = 'pybullet'\n # sim_name = 'webots'\n # sim_name = 'mujoco'\n # sim_name = 'gazebo'\n\n # sim_option = 'nnnSim'\n # render = True\n\n import_manager(sim_name)\n\n # --------- Checking is simulation environemt is available --------\n try:\n assert sim_name is not None, \"Please start benchmark with shell script!\"\n assert sim_name in ENV_SELECTION.keys(), sim_name + \" is not available!\"\n except:\n print(\" \")\n print(sim_name + \" not available!\\n\")\n sys.exit()\n\n if sim_option == \"RobotSim\":\n sim_env = ENV_SELECTION[sim_name][0]\n # initialize benchmark with simulation specific robot controller\n benchmark = Benchmark(sim_env, render=render)\n elif sim_option == \"nnnSim\":\n sim_env = ENV_SELECTION[sim_name][1]\n # initialize benchmark with simulation specific robot controller\n benchmark = nnnBenchmark(sim_env, render=render)\n else:\n print(\"ERROR: Benchmark configuration failed!\")\n sys.exit()\n\n # number of runs per timestep\n runs_per_timestep = 1\n\n # Custom list of time steps to be applied in the benchmark run\n t_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,\n 12, 14, 16, 18, 20, 24, 28, 32, 48, 64]\n\n for timestep in t_list:\n for i in range(runs_per_timestep):\n # run the benchmark and get the logged data back\n t0 = time.time()\n print(\n \"\\nStarting {} {} with:\\ntimestep {}ms\\nrun {}\".format(\n sim_name, sim_option, timestep, i\n )\n )\n try:\n obs_recording = benchmark.run_benchmark(\n sim_name,\n timestep,\n os.getcwd()\n + \"/benchmark_controller/simulation_sequences/data.json\",\n )\n except Exception as e:\n print(e)\n print(\"Duration\", time.time() - t0)\n print(\"Saving observation to file...\\n\")\n # save obs_recording to file\n with open(path + \"/\" + sim_name + \"_\" + sim_option + \"_obs_\"\n + str(timestep) + \"ms_run_\" + str(i) + \".json\",\n \"w\",) as outfile:\n json.dump(obs_recording, outfile)\n\n time.sleep(5)\n time.sleep(15)\n","sub_path":"instanz_manager.py","file_name":"instanz_manager.py","file_ext":"py","file_size_in_byte":5812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"572764219","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport datetime\nfrom ret.loguru import logger\n\nfrom ret.database.pd_sql import pd_sql\n\nfrom ret.config.settings import (\n ENV,\n )\n\ndef cells_data(time_=None):\n logger.debug(f'ENV {ENV} time_ {time_}')\n\n if not time_:\n logger.info(f'time_ {time_}')\n return\n\n when_ = time_\n period = when_.strftime(\"%Y-%m-%d\")\n logger.debug(f'period {period}')\n\n query_ = f'''\n select distinct * from lcellreference as l\n where STR_TO_DATE(l.dateid, '%Y-%m-%d') = '{period}';\n '''\n\n return pd_sql(time_=time_, query_=query_)\n\ndef main():\n # when_ = datetime.datetime.now()\n # day_before = time_ - datetime.timedelta(days=1)\n time_ = datetime.datetime.now()\n df = cells_data(time_=time_)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ret/database/cells_data.py","file_name":"cells_data.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"492693951","text":"from beluga.visualization.renderers import BaseRenderer\nfrom matplotlib.pyplot import *\nfrom beluga.utils import keyboard\n\nclass MatPlotLib(BaseRenderer):\n \"\"\"\n A renderer class that implements matplotlib\n \"\"\"\n def __init__(self):\n self._figures = []\n\n def _get_figure(self,f):\n \"\"\"\n Returns Figure instance from internal list using index\n\n Raises:\n ValueError if invalid index is used\n \"\"\"\n try:\n fh = self._figures[f]\n if fh is None:\n raise ValueError('Invalid figure handle specified!')\n return fh\n except:\n raise ValueError('Invalid figure handle specified!')\n\n def create_figure(self):\n \"\"\"\n Creates a new figure and returns a handle\n \"\"\"\n self._figures.append(figure())\n return len(self._figures)-1\n\n def close_figure(self,f):\n \"\"\"\n Closes a specified figure\n \"\"\"\n close(self._get_figure(f))\n\n def show_figure(self,f,block=False):\n \"\"\"\n Shows a specified figure\n \"\"\"\n show(self._get_figure(f))\n\n def show_all(self):\n \"\"\"\n Show all rendered figures\n \"\"\"\n show()\n\n def render_plot(self,f,p):\n \"\"\"\n Adds a line plot using the given data to the specified figure\n \"\"\"\n fh = self._get_figure(f);\n fh.hold(True)\n has_legend = False\n\n for line in p.plot_data:\n has_legend = has_legend or (line['legend'] is not None)\n for dataset, ind in zip(line['data'], range(len(line['data']))):\n # Determine coloring\n if line['color'] is not None:\n _color = line['color']\n elif line['type'] == 'line_series':\n _color = cm.get_cmap('jet')(ind/(len(line['data'])-1))\n else: # use default color\n _color = 'b'\n\n plot(dataset['x_data'],dataset['y_data'],label=line['legend'],figure=fh,color=_color)\n\n if has_legend:\n fh.gca().legend()\n if p._xlabel is not None:\n xlabel(p._xlabel,figure=fh)\n if p._ylabel is not None:\n ylabel(p._ylabel,figure=fh)\n if p._title is not None:\n title(p._title,figure=fh)\n if p._grid_on:\n fh.gca().grid(p._grid_on)\n if p._xlim is not None:\n fh.gca().set_xlim(p._xlim)\n if p._ylim is not None:\n fh.gca().set_ylim(p._ylim)\n\n def render_subplot(self,f,index,plot):\n \"\"\"\n Adds a subplot to the specified figure\n \"\"\"\n pass\n\nif __name__ == '__main__':\n from beluga.visualization.elements import Plot\n import dill\n\n r = MatPlotLibRenderer()\n fig = r.create_figure()\n\n with open('/Users/tantony/dev/mjgrant-beluga/examples/data.dill','rb') as f:\n out = dill.load(f)\n\n p = Plot(0,-1)\n p.x('v/1000')\n p.y('h/1000')\n p.xlabel('v (km/s)')\n p.ylabel('h (km)')\n p.title('Altitude vs. Velocity')\n p.preprocess(out['solution'],out['problem_data'])\n\n r.render_plot(fig,p)\n r.show_figure(fig)\n","sub_path":"beluga/visualization/renderers/MatPlotLib.py","file_name":"MatPlotLib.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"11799739","text":"\"\"\"\n355. shuttleInBuildings\nhttps://www.lintcode.com/problem/shuttleinbuildings/description?_from=contest&&fromId=103\ndp \n\"\"\"\nfrom collections import deque\nclass Solution:\n \"\"\"\n @param heights: the heights of buildings.\n @param k: the vision.\n @param x: the energy to spend of the first action.\n @param y: the energy to spend of the second action.\n @return: the minimal energy to spend.\n \"\"\"\n def shuttleInBuildings(self, heights, k, x, y):\n # write your code here.\n stack = []\n n = len(heights)\n first_highest = [-1] * n\n for i in range(n):\n while stack and heights[stack[-1]] < heights[i]:\n idx = stack.pop()\n if i - idx <= k:\n first_highest[i] = idx\n stack.append(i)\n dp = [sys.maxsize] * (n)\n \n dp[0] = 0\n \n for i in range(1,n):\n dp[i] = min(dp[i],dp[i - 1] + y)\n if i >= 2:\n dp[i] = min(dp[i],dp[i - 2] + y)\n if first_highest[i] != -1:\n dp[i] = min(dp[i],dp[first_highest[i]] + x)\n \n return dp[n-1]","sub_path":"lintcode/355.py","file_name":"355.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"372819799","text":"\nfrom view import Art\nfrom model import Elevator\nimport os\n\nclass Elevator_control:\n\n def __init__(self):\n self.model = Elevator()\n self.view = Art()\n\n def level_control(self):\n\n print(\"Which floor do you want to go?\")\n button = int(input())\n print(\"How many people would get in the elevator?\")\n number_in = int(input())\n print(\"How many people got out of the elevator?\")\n number_out = int(input())\n\n self.model.add_people(number_in, number_out)\n\n os.system(\"clear\")\n\n self.model.set_level(button)\n self.view.print_building(self.model.current_level, self.model.building_levels, self.model.people)\n\n print(\"\\n \\n\")\n self.level_control()\n\nmain = Elevator_control()\nmain.level_control()\n","sub_path":"week-05/day_04/elevator/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"384446480","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2012, Dongsheng Cai\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the Dongsheng Cai nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL DONGSHENG CAI BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport logging.config\n\nfrom pymongo.connection import Connection\nfrom tornado.options import define\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\n\nfrom pushservices.apns import *\nfrom pushservices.gcm import GCMClient\nfrom pushservices.wns import WNSClient\nfrom pushservices.mpns import MPNSClient\nfrom pushservices.clickatell import *\nfrom uimodules import *\nfrom util import *\nfrom constants import DEVICE_TYPE_IOS, DEVICE_TYPE_ANDROID, DEVICE_TYPE_WNS, \\\n DEVICE_TYPE_MPNS\n\ndefine(\"port\", default=8801, help=\"Application server listen port\", type=int)\n\ndefine(\"pemdir\", default=\"pemdir\", help=\"Directory to store pems\")\ndefine(\"passwordsalt\", default=\"d2o0n1g2s0h3e1n1g\", help=\"Being used to make password hash\")\ndefine(\"cookiesecret\", default=\"airnotifiercookiesecret\", help=\"Cookie secret\")\ndefine(\"debug\", default=False, help=\"Debug mode\")\n\ndefine(\"https\", default=False, help=\"Enable HTTPS\")\ndefine(\"httpscertfile\", default=\"\", help=\"HTTPS cert file\")\ndefine(\"httpskeyfile\", default=\"\", help=\"HTTPS key file\")\n\ndefine(\"mongohost\", default=\"localhost\", help=\"MongoDB host name\")\ndefine(\"mongoport\", default=27017, help=\"MongoDB port\")\n\ndefine(\"masterdb\", default=\"airnotifier\", help=\"MongoDB DB to store information\")\ndefine(\"collectionprefix\", default=\"obj_\", help=\"Collection name prefix\")\ndefine(\"dbprefix\", default=\"app_\", help=\"DB name prefix\")\ndefine(\"appprefix\", default=\"\", help=\"DB name prefix\")\n\nloggingconfigfile='logging.ini'\nif os.path.isfile(loggingconfigfile):\n logging.config.fileConfig(loggingconfigfile)\n\n_logger = logging.getLogger('AirNotifierApp')\n\nclass AirNotifierApp(tornado.web.Application):\n\n def init_routes(self, dir):\n from routes import RouteLoader\n return RouteLoader.load(dir)\n\n def get_broadcast_status(self, appname):\n status = \"Notification sent!\"\n error = False\n\n try:\n apns = self.services['apns'][appname][0]\n except (IndexError, KeyError):\n apns = None\n\n if apns is not None and apns.hasError():\n status = apns.getError()\n error = True\n\n return {'msg':status, 'error':error}\n\n def send_broadcast(self, appname, appdb, **kwargs):\n channel = kwargs.get('channel', 'default')\n alert = kwargs.get('alert', None)\n sound = kwargs.get('sound', None)\n badge = kwargs.get('badge', None)\n device = kwargs.get('device', None)\n extra = kwargs.get('extra', {})\n try:\n apns = self.services['apns'][appname][0]\n except (IndexError, KeyError):\n apns = None\n try:\n wns = self.services['wns'][appname][0]\n except (IndexError, KeyError):\n wns = None\n try:\n mpns = self.services['mpns'][appname][0]\n except (IndexError, KeyError):\n mpns = None\n try:\n gcm = self.services['gcm'][appname][0]\n except (IndexError, KeyError):\n gcm = None\n\n conditions = []\n if channel == 'default':\n # channel is not set or channel is default\n conditions.append({'channel': {\"$exists\": False}})\n conditions.append({'channel': 'default'})\n else:\n conditions.append({'channel': channel})\n\n if device:\n conditions.append({'device': device})\n\n tokens = appdb.tokens.find({\"$or\": conditions})\n\n regids = []\n try:\n for token in tokens:\n t = token.get('token')\n if token['device'] == DEVICE_TYPE_IOS:\n if apns is not None:\n apns.process(token=t, alert=alert, extra=extra, apns=kwargs.get('apns', {}))\n elif token['device'] == DEVICE_TYPE_ANDROID:\n regids.append(t)\n elif token['device'] == DEVICE_TYPE_WNS:\n if wns is not None:\n wns.process(token=t, alert=alert, extra=extra, wns=kwargs.get('wns', {}))\n elif token['device'] == DEVICE_TYPE_MPNS:\n if mpns is not None:\n mpns.process(token=t, alert=alert, extra=extra, mpns=kwargs.get('mpns', {}))\n except Exception as ex:\n _logger.error(ex)\n\n # Now sending android notifications\n try:\n if (gcm is not None) and regids:\n response = gcm.process(token=regids, alert=alert, extra=extra, gcm=kwargs.get('gcm', {}))\n responsedata = response.json()\n except Exception as ex:\n _logger.error('GCM problem: ' + str(ex))\n\n def __init__(self, services):\n\n app_settings = dict(\n debug=True,\n # debug=options.debug,\n app_title=u'AirNotifier',\n ui_modules={\"AppSideBar\": AppSideBar, \"NavBar\": NavBar, \"TabBar\": TabBar},\n template_path=os.path.join(os.path.dirname(__file__), 'templates'),\n static_path=os.path.join(os.path.dirname(__file__), 'static'),\n cookie_secret=options.cookiesecret,\n login_url=r\"/auth/login\",\n autoescape=None,\n )\n self.services = services\n\n sitehandlers = self.init_routes('controllers')\n apihandlers = self.init_routes('api')\n\n tornado.web.Application.__init__(self, sitehandlers + apihandlers, **app_settings)\n\n mongodb = None\n while not mongodb:\n try:\n mongodb = Connection(options.mongohost, options.mongoport)\n except:\n error_log(\"Cannot not connect to MongoDB\")\n\n self.mongodb = mongodb\n\n self.masterdb = mongodb[options.masterdb]\n assert self.masterdb.connection == self.mongodb\n\n def main(self):\n _logger.info(\"Starting AirNotifier server\")\n if options.https:\n import ssl\n try:\n ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ssl_ctx.load_cert_chain(options.httpscertfile, options.httpskeyfile)\n except IOError:\n print(\"Invalid path to SSL certificate and private key\")\n raise\n http_server = tornado.httpserver.HTTPServer(self, ssl_options=ssl_ctx)\n else:\n http_server = tornado.httpserver.HTTPServer(self)\n http_server.listen(options.port)\n _logger.info(\"AirNotifier is ready\")\n try:\n tornado.ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n _logger.info(\"AirNotifier is quiting\")\n tornado.ioloop.IOLoop.instance().stop()\n\ndef init_messaging_agents():\n services = {\n 'gcm': {},\n 'wns': {},\n 'apns': {},\n 'mpns': {},\n 'sms': {},\n }\n mongodb = None\n while not mongodb:\n try:\n mongodb = Connection(options.mongohost, options.mongoport)\n except Exception as ex:\n _logger.error(ex)\n masterdb = mongodb[options.masterdb]\n apps = masterdb.applications.find()\n for app in apps:\n ''' APNs setup '''\n services['apns'][app['shortname']] = []\n conns = int(app['connections'])\n if conns < 1:\n conns = 1\n if 'environment' not in app:\n app['environment'] = 'sandbox'\n\n if file_exists(app.get('certfile', False)) and file_exists(app.get('keyfile', False)) and 'shortname' in app:\n if app.get('enableapns', False):\n for instanceid in range(0, conns):\n try:\n apn = APNClient(app['environment'], app['certfile'], app['keyfile'], app['shortname'], instanceid)\n except Exception as ex:\n _logger.error(ex)\n continue\n services['apns'][app['shortname']].append(apn)\n ''' GCMClient setup '''\n services['gcm'][app['shortname']] = []\n if 'gcmprojectnumber' in app and 'gcmapikey' in app and 'shortname' in app:\n try:\n http = GCMClient(app['gcmprojectnumber'], app['gcmapikey'], app['shortname'], 0)\n except Exception as ex:\n _logger.error(ex)\n continue\n services['gcm'][app['shortname']].append(http)\n ''' WNS setup '''\n services['wns'][app['shortname']] = []\n if 'wnsclientid' in app and 'wnsclientsecret' in app and 'shortname' in app:\n try:\n wns = WNSClient(masterdb, app, 0)\n except Exception as ex:\n _logger.error(ex)\n continue\n services['wns'][app['shortname']].append(wns)\n\n ''' MPNS setup '''\n services['mpns'][app['shortname']] = []\n try:\n mpns = MPNSClient(masterdb, app, 0)\n except Exception as ex:\n _logger.error(ex)\n continue\n services['mpns'][app['shortname']].append(mpns)\n ''' clickatell '''\n services['sms'][app['shortname']] = []\n try:\n sms = ClickatellClient(masterdb, app, 0)\n except Exception as ex:\n _logger.error(ex)\n continue\n services['sms'][app['shortname']].append(sms)\n mongodb.close()\n return services\n\nif __name__ == \"__main__\":\n tornado.options.parse_config_file(\"airnotifier.conf\")\n tornado.options.parse_command_line()\n services = init_messaging_agents()\n AirNotifierApp(services=services).main()\n","sub_path":"airnotifier.py","file_name":"airnotifier.py","file_ext":"py","file_size_in_byte":11039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"343841428","text":"n , a, x, b, y = map(int,input().split())\nif a==b:\n print('YES')\nelse:\n while a!=x and b!=y:\n a+=1\n b-=1\n if b==a+1:\n a=1\n if b==0:\n b=a\n if a==b:\n flag=1\n break\n if flag==1:\n print('YES')\n else:\n print('NO')\n","sub_path":"CircleMetro.py","file_name":"CircleMetro.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"274483380","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0012_video'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='video',\n name='evento',\n field=models.DateTimeField(default=datetime.datetime(2015, 5, 31, 20, 58, 45, 299187, tzinfo=utc), verbose_name=b'Fecha del evento'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='video',\n name='created',\n field=models.DateTimeField(auto_now_add=True, verbose_name=b'Creado'),\n preserve_default=True,\n ),\n ]\n","sub_path":"blog/migrations/0013_auto_20150531_2058.py","file_name":"0013_auto_20150531_2058.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"299337995","text":"# author: Robin Petit\n\nimport numpy as np\nfrom instance import Instance\nfrom allocation import AllocationSubproblemSolver\n\nfrom abc import ABCMeta, abstractmethod\n\nclass LagrangeanRelaxationSolverAbstract(Instance, metaclass=ABCMeta):\n def __init__(self, path, mu0, rho):\n Instance.__init__(self, path)\n self.obj_dual = list()\n self.obj_primal = list()\n self.mu0 = mu0\n self.rho = rho\n self.u = None\n\n def objective(self):\n return self.primal_objective() + (self.u * self._subgradient()).sum()\n\n dual_objective = objective\n\n def primal_objective(self):\n return Instance.objective(self)\n\n @abstractmethod\n def reinit_multipliers(self):\n pass\n\n def solve(self, max_iter=10000, tol=1e-3):\n self.iteration = 0\n prev_primal = prev_dual = 0\n loop = True\n while loop:\n self._solve()\n self.obj_dual.append(self.objective())\n self.obj_primal.append(self.primal_objective())\n self._update_multipliers()\n self.iteration += 1\n loop = self.iteration < max_iter and abs(self.obj_dual[-1] - prev_dual) + abs(self.obj_primal[-1] - prev_primal) >= tol\n prev_primal = self.obj_primal[-1]\n prev_dual = self.obj_dual[-1]\n\n @abstractmethod\n def _solve(self):\n pass\n\n @abstractmethod\n def _subgradient(self):\n pass\n\n def _update_multipliers(self):\n self.u = np.maximum(0, self.u + self.current_step * self._subgradient())\n self.current_step *= self.rho\n\n def extract_admisible_solution(self):\n # Original paper, p. 7\n if self.y.sum() < self.m / self.H:\n cheapest_indices = np.argsort(self.f / (1-self.y), axis=None)\n self.y[np.unravel_index(cheapest_indices[:int(np.ceil(self.m/self.H - self.y.sum()))], self.f.shape)] = 1\n tp = AllocationSubproblemSolver(self.m, self.n, self.t, self.c, self.H, [np.where(self.y[:,t] == 1)[0] for t in range(self.t)])\n return tp.solve() + (self.f*self.y).sum()\n\n def upper_bound(self):\n return self.extract_admisible_solution()\n\nclass LagrangeanRelaxationSolver(LagrangeanRelaxationSolverAbstract):\n def __init__(self, path, mu0=1, rho=.95):\n LagrangeanRelaxationSolverAbstract.__init__(self, path, mu0, rho)\n self.reinit_multipliers()\n\n def reinit_multipliers(self):\n self.u = np.random.rand(self.n, self.t)\n self.current_step = self.mu0\n\n def _solve(self):\n self._solve_for_x()\n self._solve_for_y()\n\n def _solve_for_x(self):\n self.x = np.zeros(self.x.shape, dtype=np.int)\n new_weights = (self.c + self.u.reshape(1, self.n, self.t))\n # Performs the j^*(i), t^*(i) computation for every i simultaneously\n # Yes this is ugly but as efficient as can be\n xs_to_take = np.unravel_index(\n new_weights.reshape(new_weights.shape[0], -1).argmin(1)\n + np.arange(new_weights.shape[0])*np.prod(new_weights.shape[1:]),\n new_weights.shape\n )\n self.x[xs_to_take] = 1\n\n def _solve_for_y(self):\n self.y = np.zeros(self.f.shape, dtype=np.int)\n updated_costs = self.f - self.H*self.u\n ys_to_take = updated_costs <= 0\n sorted_indices = np.argsort(updated_costs, axis=0)\n for t in range(self.t):\n ys_to_take[sorted_indices[:self.p[t],t],t] = True\n self.y[ys_to_take] = True\n\n def _subgradient(self):\n return self.x.sum(axis=0) - self.H * self.y\n\nclass LagrangeanRelaxationSolver2(LagrangeanRelaxationSolverAbstract):\n def __init__(self, path, mu0=1, rho=.95):\n LagrangeanRelaxationSolverAbstract.__init__(self, path, mu0, rho)\n self.reinit_multipliers()\n\n def reinit_multipliers(self):\n self.u = np.random.rand(self.m)\n self.current_step = self.mu0\n\n def _solve(self):\n # A bit of Numpy magic (needed to vectorize efficiently)\n sstar = np.empty((self.n, self.t), dtype=np.int)\n w = np.empty((self.n, self.t))\n c_minus_u = self.c - self.u.reshape(-1, 1, 1)\n sorted_c_minus_u = np.sort(c_minus_u, axis=0)\n is_argsort = np.argsort(c_minus_u, axis=0)\n sstar = np.minimum(self.H, (c_minus_u < 0).sum(axis=0))\n w = self.f + np.cumsum(sorted_c_minus_u, axis=0).reshape(self.m, -1)[sstar.reshape(-1), np.arange(np.prod(sstar.shape))].reshape(*sstar.shape)\n self.x[:] = 0\n self.y[:] = 0\n jstar = np.maximum(self.p, (w < 0).sum(axis=0))\n _js = np.argsort(w, axis=0)\n mask = (jstar[:,None] > np.arange(self.n)).T\n self.y[_js[mask], np.where(mask)[1]] = 1\n for t in range(self.t):\n js = _js[:,t]\n is_ = is_argsort[:,js[:jstar[t]],t]\n mask = (sstar[js[:jstar[t]],t,None] > np.arange(self.m)).T\n self.x[is_[mask],js[np.where(mask)[1]],t] = 1\n\n def _subgradient(self):\n return 1 - self.x.sum(axis=(1, 2))\n","sub_path":"scripts/lagrangean.py","file_name":"lagrangean.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"641603492","text":"#\n# Copyright (C) 2017 Tignis, Inc.\n#\n\n\"\"\"\nImplement the Competition class.\npython version shares the similar idea with java version\n\"\"\"\nfrom decimal import Decimal\nimport operator\nimport testcases\n\n\n\ndef round_value(value, precision=Decimal('1.000')):\n \"\"\"\n Round float value to specified precision.\n\n :param value: Float value.\n :param precision: Precision as a Decimal() object.\n :return: Rounded float value.\n \"\"\"\n return float(Decimal(value).quantize(precision))\n\n\nclass Competition(object):\n \"\"\"\n Implement or extend this class to simulate the competition.\n \"\"\"\n\n\n def __init__(self, competitors, duration):\n \"\"\"\n Initialize the competition.\n\n :param competitors: A dictionary of {competitor name: hot dog function}.\n :param duration: Duration in seconds of the competition.\n\n \"\"\"\n self.eventList = []\n self.duration = duration\n self.competitors = competitors\n\n\n\n\n\n def run(self):\n \"\"\"\n Run a simulation of the competition.\n\n :return: List of (or iterator over) Events.\n \"\"\"\n JElapseTime = 0.000\n CElapseTime = 0.000\n JHotDogNum = 0.000\n CHotDogNum = 0.000\n\n\n\n\n while (JElapseTime < self.duration or CElapseTime < self.duration):\n if (JElapseTime < self.duration):\n JElapseTime += self.competitors['Joey Chestnut'](JHotDogNum)\n JHotDogNum +=1\n self.eventList.append(Event(JElapseTime, 'Joey Chestnut', JHotDogNum).rounded())\n if (CElapseTime < self.duration):\n CElapseTime += self.competitors['Carmen Cincotti'](CHotDogNum)\n CHotDogNum += 1\n self.eventList.append(Event(CElapseTime, 'Carmen Cincotti', CHotDogNum).rounded())\n\n\n\n eventListNew = sorted(self.eventList,key = lambda x:(x.elapsed_time, ord(x.name[0])))\n eventListNew = [obj for obj in eventListNew if obj.elapsed_time 0) for r in results]\n [self.assertTrue(len(r[DetectionAPI.CLASSES_KEY])>0) for r in results]\n\n classes_list = [r[DetectionAPI.CLASSES_KEY][0][DetectionAPI.CLASSE_KEY]\n for r in results]\n self.assertListEqual(correct_json_1, classes_list)\n\n def test_interface(self):\n detector_wrapper = DetectionAPI()\n response_dict = detector_wrapper.handle_input_request(request_dict_1)\n self.assert_correct_classes(response_dict, correct_json_1)\n\n def test_model(self):\n model = DectionModel(condifence_threshold=0.1)\n\n img_drake_url = request_dict_1['images'][0]\n img_path = DetectionAPI.get_single_image_from_url(img_drake_url)\n\n response = model.detect_objects(image_path=img_path, image_id=img_drake_url)\n self.assertTrue(len(response) > 0)\n self.assertTrue(isinstance(response[0], dict))\n self.assertTrue(DectionModel.CLASS_KEY in response[0].keys())\n self.assertTrue(response[0][DectionModel.CLASS_KEY] == correct_json_1[0])\n\n\n\n\n\n\n","sub_path":"tests/test_unittests.py","file_name":"test_unittests.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"354332962","text":"# coding=utf8\n\n# Copyright 2018 JDCLOUD.COM\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# NOTE: This class is auto generated by the jdcloud code generator program.\n\n\nclass TableStorageInfo(object):\n\n def __init__(self, tableName=None, dbName=None, engine=None, totalSize=None, percentage=None, dataSize=None, idxSize=None, fragment=None, dataRows=None):\n \"\"\"\n :param tableName: (Optional) 表名\n :param dbName: (Optional) 数据库名\n :param engine: (Optional) 引擎\n :param totalSize: (Optional) 表空间大小\n :param percentage: (Optional) 表空间占比\n :param dataSize: (Optional) 数据空间\n :param idxSize: (Optional) 索引空间\n :param fragment: (Optional) 碎片率\n :param dataRows: (Optional) 表行数\n \"\"\"\n\n self.tableName = tableName\n self.dbName = dbName\n self.engine = engine\n self.totalSize = totalSize\n self.percentage = percentage\n self.dataSize = dataSize\n self.idxSize = idxSize\n self.fragment = fragment\n self.dataRows = dataRows\n","sub_path":"jdcloud_sdk/services/smartdba/models/TableStorageInfo.py","file_name":"TableStorageInfo.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"298747225","text":"# Jahrme Risner\n# Homework 4\n# 14 February 2018\n\nimport sys\nfrom socket import *\n\nif len(sys.argv) > 1:\n filename = sys.argv[1]\nelse:\n sys.exit(\"Error: filename required.\")\n\n# Server (may) expect files to begin with forward-slash.\nif filename[0] != \"/\": filename = \"/\" + filename\n\n# Just in case, let user set the server and port at command-line.\nif len(sys.argv) == 4:\n serverName = sys.argv[2]\n serverPort = int(sys.argv[3])\nelse:\n serverName = \"localhost\"\n serverPort = 8080\n\n# Create request header.\nheader = \"GET %s HTTP/1.1\\nHost: %s:%d\\nCache-Control: no-cache\" \\\n% (filename, serverName, serverPort)\n\n# Create the connection.\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclientSocket.connect((serverName, serverPort))\n\n# Send the header.\nclientSocket.send(header.encode(\"utf-8\"))\n\n# Aggregate responses.\nresponse = \"\"\nwhile True:\n data = clientSocket.recv(1024).decode(\"utf-8\")\n response = response + data\n if not data: break\n\n# Show final response.\nprint(response)\n\n# Done.\nclientSocket.close()\n","sub_path":"hw-4/WebClient.py","file_name":"WebClient.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"466829913","text":"#!/usr/bin/python\n\nimport sys\nimport math\n\ncurrent_word = None\ncount_pool = []\nsum = 0\n\ndocs_cnt = 508\n\nfor line in sys.stdin:\n ss = line.strip().split(' ')\n if len(ss) != 2:\n continue\n\n word, val = ss\n\n if current_word == None:\n current_word = word\n\n if current_word != word:\n for count in count_pool:\n sum += count\n idf_score = math.log(float(docs_cnt) / (float(sum) + 1))\n print(\"%s\\t%s\" % (current_word, idf_score))\n\n current_word = word\n count_pool = []\n sum = 0\n\n count_pool.append(int(val))\n\nfor count in count_pool:\n sum += count\nidf_score = math.log(float(docs_cnt) / (float(sum) + 1))\nprint(\"%s\\t%s\" % (current_word, idf_score))\n","sub_path":"tf-idf/red.py","file_name":"red.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"531861967","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport angr\n# list all matched pair within functions in binary 1\ndef analyzer(binary_path_1, binary_path_2, outputDir, matched_pairs_with_addr):\n \n b1 = angr.Project(binary_path_1, load_options = {\"auto_load_libs\": False})\n b2 = angr.Project(binary_path_2, load_options = {\"auto_load_libs\": False})\n b1_cfg = b1.analyses.CFGFast()\n b2_cfg = b2.analyses.CFGFast()\n \n b1_matched_pairs_in_function_dic = {} # Only count once according to func1 \n b2_matched_pairs_in_function_dic = {}\n b1_all_blocks_in_function_dic = {}\n b2_all_blocks_in_function_dic = {}\n b1_matched_blocks_in_function_dic = {}\n b2_matched_blocks_in_function_dic = {}\n b1_functions = b1_cfg.kb.functions\n b2_functions = b2_cfg.kb.functions\n \n print(\"Converting addr to function_addr ...\")\n \n # Initilization for b1\n for b1_function in b1_functions:\n b1_matched_pairs_in_function_dic[b1_function] = []\n b1_all_blocks_in_function_dic[b1_function] = set(hex(addr) for addr in b1_functions[b1_function].block_addrs)\n b1_matched_blocks_in_function_dic[b1_function] = set()\n \n # Initilization for b2\n for b2_function in b2_functions:\n b2_matched_pairs_in_function_dic[b2_function] = []\n b2_all_blocks_in_function_dic[b2_function] = set(hex(addr) for addr in b2_functions[b2_function].block_addrs)\n b2_matched_blocks_in_function_dic[b2_function] = set()\n \n \n for matched_block_pair in matched_pairs_with_addr:\n # Get bb addrs\n bb_addr_1 = int(matched_block_pair[0], 16)\n bb_addr_2 = int(matched_block_pair[1], 16) \n node1 = b1_cfg.model.get_any_node(bb_addr_1)\n node2 = b2_cfg.model.get_any_node(bb_addr_2)\n if (node1 != None and node2 != None):\n # Get func addrs\n func_addr_1 = node1.function_address\n func_addr_2 = node2.function_address\n \n # Append to the pair list\n if func_addr_1 in b1_matched_pairs_in_function_dic and func_addr_2 in b2_matched_pairs_in_function_dic:\n #b1_matched_pairs_in_function_dic[func_addr_1] = []\n b1_matched_pairs_in_function_dic[func_addr_1].append([hex(bb_addr_1), hex(bb_addr_2)])\n #if func_addr_2 in b2_matched_pairs_in_function_dic:\n #b2_matched_pairs_in_function_dic[func_addr_2] = []\n b2_matched_pairs_in_function_dic[func_addr_2].append([hex(bb_addr_1), hex(bb_addr_2)])\n \n # Add to the b1_matched_blocks_in_function_dic set\n #if func_addr_1 in b1_matched_blocks_in_function_dic:\n #b1_matched_blocks_in_function_dic[func_addr_1] = set()\n b1_matched_blocks_in_function_dic[func_addr_1].add(hex(bb_addr_1))\n \n # Add to the b2_matched_blocks_in_function_dic set\n #if func_addr_2 in b2_matched_blocks_in_function_dic:\n #b2_matched_blocks_in_function_dic[func_addr_2] = set()\n b2_matched_blocks_in_function_dic[func_addr_2].add(hex(bb_addr_2))\n \n print(\"Writing result of function_addr ...\")\n # Print out the b1 result\n with open(outputDir + '/b1_matched_pairs_within_functions_deepbindiff', 'w') as f:\n for b1_function in b1_matched_pairs_in_function_dic:\n \n b1_matched_pairs_in_function_dic[b1_function].sort()\n print(hex(b1_function), b1_functions[b1_function].name, \"Matched pairs: \", b1_matched_pairs_in_function_dic[b1_function], file=f)\n #print(b1_matched_pairs_in_function_dic[b1_function], file=f)\n \n b1_unmatched_set = b1_all_blocks_in_function_dic[b1_function] - b1_matched_blocks_in_function_dic[b1_function]\n #print(hex(b1_function), b1_functions[b1_function].name)\n print(hex(b1_function), b1_functions[b1_function].name, \"Unmatched blocks:\", b1_unmatched_set, \"\\n\", file = f)\n \n # Print out b2 result\n with open(outputDir + '/b2_matched_pairs_within_functions_deepbindiff', 'w') as f:\n for b2_function in b2_matched_pairs_in_function_dic:\n \n b2_matched_pairs_in_function_dic[b2_function].sort()\n print(hex(b2_function), b2_functions[b2_function].name, \"Matched pairs: \", b2_matched_pairs_in_function_dic[b2_function], file=f)\n #print(b2_matched_pairs_in_function_dic[b2_function], file=f)\n \n b2_unmatched_set = b2_all_blocks_in_function_dic[b2_function] - b2_matched_blocks_in_function_dic[b2_function]\n #print(hex(b1_function), b1_functions[b1_function].name)\n print(hex(b2_function), b2_functions[b2_function].name, \"Unmatched blocks:\", b2_unmatched_set,\"\\n\", file = f)\n \n \n print(\"Done.\")\n \n \n \n '''\n p = angr.Project(binary_path, load_options={'auto_load_libs': False})\n cfg = p.analyses.CFGFast()\n functions = cfg.kb.functions\n matched_pairs_in_function_dic = {}\n \n # Initilize to a emplty list\n with open(outputDir + 'matched_pairs_within_functions', 'w') as f:\n for function in functions:\n #print(hex(function))\n #print(cfg.kb.functions[function].block_addrs)\n #block_addrs = cfg.kb.functions[function].block_addrs\n #for block_addr in block_addrs:\n # print(\"\\t\",hex(block_addr))\n matched_pairs_in_function_dic[function] = []\n \n \n for pair in matched_pair_with_addr:\n bb_addr = int(pair[0],16)\n node = cfg.model.get_any_node(bb_addr)\n if(node != None):\n func_addr = node.function_address\n matched_pairs_in_function_dic[func_addr].append(pair)\n #print(hex(bb_addr), hex(func_addr))\n \n \n for key in matched_pairs_in_function_dic:\n #print(hex(key),\":\",matched_pairs_in_function_dic[key],\"\\n\", file = f)\n matched_pairs_in_function_dic[key].sort()\n print(hex(key), cfg.kb.functions[key].name, file=f)\n print(matched_pairs_in_function_dic[key],\"\\n\", file=f)\n '''\n","sub_path":"src/matched_pairs_analyzer.py","file_name":"matched_pairs_analyzer.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"145579624","text":"from random import randint\n\nclass Fight:\n\tdef __init__(self, name1, name2, damage1, damage2, health1, health2):\n\t\tself.name1 = name1\n\t\tself.name2 = name2\n\t\tself.damage1 = damage1 * randint(0, 10)\n\t\tself.damage2 = damage2 * randint(0, 10)\n\t\tself.health1 = health1\n\t\tself.health2 = health2\n\n\tdef fight(self):\n\t\twhile(self.health1 < 0 or self.health2 < 0):\n\t\t\t# Attack\n\t\t\tself.health1 -= self.damage2\n\t\t\tprint(f\"{self.name2} нанёс {self.damage2} урона. У {self.name1} осталось {self.health1} прочности\")\n\n\t\t\t# Defense\n\t\t\tself.health2 -= self.damage1\n\t\t\tprint(f\"{self.name1} нанёс {self.damage1} урона. У {self.name2} осталось {self.health2} прочности\")\n\n\t\tif (self.health1 < 0):\n\t\t\tprint(f\"{self.name1} потоплен игроком {self.name2}.\")\n\n\t\telif (self.health2 < 0):\n\t\t\tprint(f\"{self.name2} потоплен игроком {self.name1}.\")","sub_path":"Fight.py","file_name":"Fight.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"260311475","text":"#coding=utf-8\nfrom gensim import corpora,models\nclass Lsi_Lda(object):\n\t\"\"\"提取基于lsi、lda的相似度作为特征值\"\"\"\n\n\tdef __init__(self,corpus_tfidf,dictionary):\n\t\tsuper(Lsi_Lda,self).__init__()\n\t\tself.corpus_tfidf = corpus_tfidf\n\t\tself.dictionary = dictionary\n\t\tself.calc()\n\t\t# self.lsi_fea = self.calc_lsi()\n\t\t# self.lsi_fea = self.calc_lda()\n\n\tdef calc(self):\n\t\tcorpus_tfidf = self.corpus_tfidf\n\t\tdictionary = self.dictionary\n\n\t\tlsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=5)\n\t\tcorpus_lsi = lsi[corpus_tfidf]\n\t\tself.lsi_fea = corpus_lsi\n\n\t\tlda = models.LdaModel(corpus_tfidf, id2word=dictionary, num_topics=5)\n\t\tcorpus_lda = lda[corpus_tfidf]\n\t\tself.lda_fea = corpus_lda\n\n\t# def calc_lsi(self):\n\t# \tlsi = models.LsiModel(self.corpus_tfidf, id2word=self.dictionary, num_topics=5)\n\t# \tcorpus_lsi = lsi[self.corpus_tfidf]\n\t# \tself.lsi_fea = corpus_lsi\n\n\t# def calc_lda(self):\n\t# \tlda = models.LdaModel(self.corpus_tfidf, id2word=self.dictionary, num_topics=5)\n\t# \tcorpus_lda = lda[self.corpus_tfidf]\n\t# \tself.lda_fea = corpus_lda\n\n\tdef get_lsi_fea(self):\n\t\treturn self.lsi_fea\n\n\tdef get_lda_fea(self):\n\t\treturn self.lda_fea\n","sub_path":"lsi_lda.py","file_name":"lsi_lda.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"337095160","text":"import os\r\nimport boto3\r\n\r\n\r\ndef welcome():\r\n print('This script will help you to start or stop ec2 instances based on your required region and instance id')\r\n\r\n\r\ndef get_ec2_con_for_region(my_region):\r\n session = boto3.Session(aws_access_key_id='AKIAJ62FNY6IVYG5SVOQ',\r\n aws_secret_access_key='MXHjBSqaFLlf/bsOuB29cWgJws9EsDDRQK1SMCcu',\r\n region_name=my_region)\r\n ec2_con_re = session.resource('ec2')\r\n return ec2_con_re\r\n\r\n\r\ndef list_instances_on_my_region(ec2_con_re):\r\n for each in ec2_con_re.instances.all():\r\n print(each.id)\r\n\r\n\r\ndef get_instance_state(ec2_con_re, in_id):\r\n for each in ec2_con_re.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': [in_id]}]):\r\n pr_st = each.state['Name']\r\n return pr_st\r\n\r\n\r\ndef start_instance(ec2_con_re, in_id):\r\n pr_st = get_instance_state(ec2_con_re, in_id)\r\n print(pr_st)\r\n if (pr_st == 'running'):\r\n print('Instance is already running')\r\n else:\r\n for each in ec2_con_re.instances.filter(Filters=[{'Name': 'instance-id', 'Values': [in_id]}]):\r\n each.start()\r\n print('please wait it is going to start, once if it is started then we will let you know')\r\n each.wait_until_running()\r\n print('now it is running')\r\n\r\n\r\ndef stop_instance(ec2_con_re, in_id):\r\n pr_st = get_instance_state(ec2_con_re, in_id)\r\n if (pr_st == 'stopped'):\r\n print('Instance is already stopped')\r\n else:\r\n for each in ec2_con_re.instances.filter(Filters=[{'Name': 'instance-id', 'Values': [in_id]}]):\r\n each.stop()\r\n print('please wait it is going to stop, once if it is stopped then we will let you know')\r\n each.wait_until_stopped()\r\n print('now it is stopping')\r\n\r\n\r\ndef thank_you():\r\n print('\\n\\n *************Thank you for using this script***************')\r\n return None\r\n\r\n\r\ndef main():\r\n welcome()\r\n my_region = input('Enter the region_name: ')\r\n print(\"please wait....... connecting to your aws ec2 console.....\")\r\n\r\n ec2_con_re = get_ec2_con_for_region(my_region)\r\n print(f'please wait listing all instances ids in your region {my_region}')\r\n list_instances_on_my_region(ec2_con_re)\r\n\r\n in_id = input('Now choose your instance id to start or stop: ')\r\n start_stop = input('Enter either stop or start command for your ec2 instance ')\r\n while True:\r\n if start_stop not in ['start', 'stop']:\r\n start_stop = input('Enter only stop or start commands')\r\n continue\r\n else:\r\n break\r\n\r\n if start_stop == 'start':\r\n start_instance(ec2_con_re, in_id)\r\n else:\r\n stop_instance(ec2_con_re, in_id)\r\n\r\n thank_you()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n os.system('cls')\r\n main()\r\n","sub_path":"aws_modules/StartStopInstanceofEC2.py","file_name":"StartStopInstanceofEC2.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"19943866","text":"\"\"\"\r\nThis is a python based game which is a spoof of the King Arthur and Holy Grail stories.\r\nThere is decision making, randomness, and risk of death.\r\nThis game takes places in a fictional world containing geographic and topological features. (hills, mountains, rivers, lake, etc.)\r\nI hope you enjoy it. Good luck and God speed!\r\n\r\n(i am editing this line via github.)\r\n\"\"\"\r\n#\t\tWhat types of input? raw_data, random input taken from an integer range, random input taken from a list, ?\r\n#\t\tWhat types of output? console, standard output on screen.\r\n\r\n# Import modules\r\nimport random\r\n\r\n# dragon alive or dead status\r\n#dragon_alive = True\r\n\r\n# Define global variables\r\n# dice roll d6\r\n#d6 = random.randint(1, 6)\r\n#print d6\r\n# dice roll d20\r\n#d20 = random.randint(1, 20)\r\n#print d20\r\n\r\n\r\n# Define functions which will be used.\r\n# First start function\r\ndef start():\r\n\tprint (\"You are a brave adventurer, starting out in the world.\")\r\n\tprint (\"You want to travel around this land seeking fame and fortune.\")\r\n\tprint (\"It is time to claim what is yours!\")\r\n\tprint (\"Would you like to go: \\n'west' to the Fledd Mountains,\\n'east' to the Dragon's lair, or \\n'southwest' to the Badlands?\")\r\n\t\r\n\tchoice = input(\"> \")\r\n\tif choice == \"west\":\r\n\t\tfledd()\r\n\telif choice == \"east\":\r\n\t\tdragon()\r\n\telif choice == \"southwest\":\r\n\t\tbadlands()\r\n\telse:\r\n\t\tdead(\"You stumble around the area until you starve.\")\r\n\r\n# Second start function\r\ndef start2():\r\n\t#global dragon_alive\r\n\tprint (\"You are back to where you were before.\")\r\n\tprint (\"Would you like to go: \\n'east' to the Dragon's lair, 'west' to the Fledd mountains, or \\n'southwest' to the Badlands?\")\r\n\tchoice = input(\"> \")\r\n\tif choice == \"east\": #and dragon_alive:\r\n\t\tdragon()\r\n\telif choice == \"southwest\":\r\n\t\tbadlands()\r\n\telif choice == \"west\":\r\n\t\tfledd()\r\n\telse:\r\n\t\tdead(\"You stumble around the area until you starve.\")\r\n\t\t\r\n# Third start function\t\t\r\ndef start3():\r\n\tprint (\"Would you like to go: \\n'east' to the High plains, or \\n'south' to the Land of Black Knights?\")\r\n\tchoice = input(\"> \")\r\n\tif choice == \"east\":\r\n\t\thighplains()\r\n\telif choice == \"south\":\r\n\t\tknights()\r\n\telse:\r\n\t\tdead(\"You stumble around the area until you starve.\")\r\n\t\t\r\n# Fourth start function\t\t\r\ndef start4():\r\n\t#global dragon_alive\r\n\tprint (\"Would you like to go: \\n'northeast' to the Dragon's lair, \\n'south' to the Land of Black Knights or\\n'east' to Troll Town?\")\r\n\tchoice = input(\"> \")\r\n\tif choice == \"northeast\": #and dragon_alive:\r\n\t\tdragon()\r\n\telif choice == \"south\":\r\n\t\tknights()\r\n\telif choice == \"east\":\r\n\t\ttrolltown()\r\n\telse:\r\n\t\tdead(\"You stumble around the area until you starve.\")\r\n\t\t\r\n# Fifth start function\r\ndef start5():\r\n\tprint (\"Would you like to go 'east' to the lake, or\\'south' to the Land of Black Knights?\")\r\n\tchoice = input(\"> \")\r\n\tif choice == \"east\":\r\n\t\tlake()\r\n\telif choice == \"south\":\r\n\t\tknights()\r\n\telse:\r\n\t\tdead(\"You wear out your welcome with the trolls and they eat you.\")\r\n\r\n# Fledd Mountains Function\r\ndef fledd():\r\n\tprint (\"These mountains are impassable, you must turn around.\")\r\n\tprint (\"Would you like to go back? 'y' or 'n'.\")\r\n\tchoice = input(\"> \")\r\n\tif choice == \"y\":\r\n\t\tprint (\"You are now back where you started.\")\r\n\t\tstart2()\r\n\telse:\r\n\t\tdead(\"You remain here until you starve to death.\")\r\n\t\t\r\n# Dragon's Lair function\r\ndef dragon():\r\n\t#if dragon_alive:\r\n\tprint (\"This is the great mountain lair of the fearsome dragon!\")\r\n\tprint (\"Would you like to go back? y or n\")\r\n\tchoice = input(\"> \")\r\n\tif choice == \"y\":\r\n\t\tprint (\"You are now back where you started.\")\r\n\t\tstart2()\r\n\telse:\r\n\t\tprint (\"You have awakened the dragon, prepare to fight!\")\r\n\t\tdicerolldragon()\r\n\t#if not dragon_alive:\t\r\n\tprint (\"Would you like to head 'south' to the lake, or \\n'west' back to where you came from?\")\r\n\tchoice = input(\"> \")\r\n\tif choice == \"west\":\r\n\t\tstart2()\r\n\telif choice == \"south\":\r\n\t\tlake()\r\n\telse:\r\n\t\tdead(\"You wander around the dragon's lair until you starve.\")\r\n\r\n# Make this into a function to be called.\r\n# Now do a d6 dice roll for battle.\r\ndef dicerolldragon():\r\n\t#global dragon_alive\r\n\td6dragon = random.randint(1, 6)\r\n\tprint (\"The dragon has rolled a: %d\") % d6dragon\r\n\td6you = random.randint(1, 6)\r\n\tprint (\"You have rolled a: %d\") % d6you\r\n\tif d6dragon > d6you:\r\n\t\tdead(\"The dragon got to you first and scorched you with fire.\")\r\n\telif d6dragon == d6you:\r\n\t\tprint (\"You have both attacked at the same time resulting in a draw.\")\r\n\t\tdicerolldragon()\r\n\telse:\r\n\t\tprint (\"You have slain the beast with one mighty blow!\")\r\n\t\t#return not dragon_alive\r\n\t\t\r\ndef badlands():\r\n\tprint (\"You have entered the badlands. Be wary of bandits!\")\r\n\trandencounter = random.randint(1, 6)\r\n\tif randencounter >= 3:\r\n\t\trandbandit()\r\n\t\tdicerollbandit()\r\n\telse:\r\n\t\tprint (\"The area seems clear of danger.\")\r\n\t\tprint (\"Would you like to continue on 'east' to the High plains, or \\n'south' to the Land of the Black Knights?\")\r\n\t\tchoice = input(\"> \")\r\n\t\tif choice == \"east\":\r\n\t\t\thighplains()\r\n\t\telif choice == \"south\":\r\n\t\t\tknights()\r\n\t\telse:\r\n\t\t\tdead(\"You wander around the badlands till you die of thirst.\")\r\n\t\t\t\r\n# random attack of creature.\t\t\t\r\ndef dicerollbandit():\r\n\td6bandit = random.randint(1, 6)\r\n\td6you = random.randint(1, 6)\r\n\tif d6bandit == d6you:\r\n\t\tprint (\"You continue to travel through this harsh and unforgiving terrain.\")\r\n\t\tstart3()\r\n\telif d6bandit > d6you:\r\n\t\tprint (\"A figure jumps out from behind a large rocky outcropping and surprises you.\")\r\n\t\tdicerollbandit2()\r\n\telse:\r\n\t\tprint (\"You spot the figure hiding behind a large rocky outcropping.\")\r\n\t\tprint (\"What do you want to do? 'attack' or 'flee' back the way you came.\")\r\n\t\tchoice = input(\"> \")\r\n\t\tif choice == \"attack\":\r\n\t\t\tdicerollbandit2()\r\n\t\telif choice == \"flee\":\r\n\t\t\tstart2()\r\n\t\telse:\r\n\t\t\tdead(\"Your hesitation has cost you your life.\")\r\n\t\t\t\r\n# Second dice roll interaction with bandit. There has got to be a better way to set up the functions so that I don't keep repeating myself.\r\ndef dicerollbandit2():\r\n\td6bandit = random.randint(1, 6)\r\n\tprint (\"The attacker has rolled a: %d\") % d6bandit\r\n\td6you = random.randint(1, 6)\r\n\tprint (\"You have rolled a: %d\") % d6you\r\n\tif d6bandit > d6you:\r\n\t\tdead(\"The attacker got to you first and cut you down.\")\r\n\telif d6bandit == d6you:\r\n\t\tprint (\"You have both attacked at the same time resulting in a draw.\")\r\n\t\tdicerollbandit2()\r\n\telse:\r\n\t\tprint (\"You have slain the attacker with one mighty blow!\") \r\n\t\tstart3()\r\n\t\t\r\ndef dicerollknight():\r\n\td6knight = random.randint(1, 6)\r\n\tprint (\"The attacker has rolled a: %d\") % d6knight\r\n\td6you = random.randint(1, 6)\r\n\tprint (\"You have rolled a: %d\") % d6you\r\n\tif d6knight > d6you:\r\n\t\tdead(\"The attacker got to you first and cut you down.\")\r\n\telif d6knight == d6you:\r\n\t\tprint (\"You have both attacked at the same time resulting in a draw.\")\r\n\t\tdicerollknight()\r\n\telse:\r\n\t\tprint (\"You have slain the attacker with one mighty blow!\") \r\n\t\tprint (\"Would you like to go 'north' to Troll Town, or \\n'east' to the lake?\")\r\n\t\tchoice = input(\"> \")\r\n\t\tif choice == \"north\":\r\n\t\t\ttrolltown()\r\n\t\telif choice == \"east\":\r\n\t\t\tlake()\r\n\t\telse:\r\n\t\t\tdead(\"You wander around until you starve.\")\r\n\t\t\t\r\ndef highplains():\r\n\tprint (\"You have reached the High plains.\")\r\n\tprint (\"This area is the headwaters of the rivers which flow into the lake.\")\r\n\tprint (\"You feel that there is a growing magical energy in this area.\")\r\n\t# try setting up a random spawn of a monster here.\r\n\t# something like: generate a random number, then use that as a key to query a list, then use that list item to call the monster's function.\r\n\trandmonster()\r\n\tprint (\"Would you like to 'attack' or 'flee'?\")\r\n\tchoice = input(\"> \")\r\n\tif choice == \"attack\":\r\n\t\tdicerollmonster1()\r\n\telif choice == \"flee\":\r\n\t\tstart3()\r\n\telse:\r\n\t\tdead(\"Your hesitation has cost you your life.\")\r\n\r\ndef dicerollmonster1():\r\n\td6monster = random.randint(1, 6)\r\n\tprint (\"The monster has rolled a: %d\") % d6monster\r\n\td6you = random.randint(1, 6)\r\n\tprint (\"You have rolled a: %d\") % d6you\r\n\tif d6monster > d6you:\r\n\t\tdead(\"The monster got to you first and destroyed you.\")\r\n\telif d6monster == d6you:\r\n\t\tprint (\"You have both attacked at the same time resulting in a draw.\")\r\n\t\tdicerollmonster1()\r\n\telse:\r\n\t\tprint (\"You have slain the beast with one mighty blow!\")\r\n\t\tstart4()\r\n\r\ndef dicerollmonster2():\r\n\td6monster = random.randint(1, 6)\r\n\tprint (\"The monster has rolled a: %d\") % d6monster\r\n\td6you = random.randint(1, 6)\r\n\tprint (\"You have rolled a: %d\") % d6you\r\n\tif d6monster > d6you:\r\n\t\tdead(\"The monster got to you first and destroyed you.\")\r\n\telif d6monster == d6you:\r\n\t\tprint (\"You have both attacked at the same time resulting in a draw.\")\r\n\t\tdicerollmonster2()\r\n\telse:\r\n\t\tprint (\"You have slain the beast with one mighty blow!\")\r\n\t\tstart5()\t\r\n\t\t\r\ndef knights():\r\n\tprint (\"You have reached the land of the Black Knights.\")\r\n\tprint (\"Be wary, these knights are not friendly, they may attack at random.\")\r\n\tprint (\"You still feel a growing magical energy in this area.\")\r\n\tdiceroll = random.randint(1, 6)\r\n\t#print diceroll # for testing purposes\r\n\tif diceroll <= 3:\r\n\t\tprint (\"The streets are quiet. You proceed with caution.\")\r\n\t\tprint (\"You continue on to the lake.\")\r\n\t\tlake()\r\n\telif diceroll == 4:\r\n\t\tprint (\"You have stepped into a teleportation trap. You return to the High plains.\")\r\n\t\thighplains()\r\n\telse:\r\n\t\trandknight()\r\n\t\tprint (\"Would you like to 'attack' or 'flee'?\")\r\n\t\tchoice = input(\"> \")\r\n\t\tif choice == \"attack\":\r\n\t\t\tdicerollknight()\r\n\t\telif choice == \"flee\":\r\n\t\t\tstart3()\r\n\t\telse:\r\n\t\t\tdead(\"Your hesitation has cost you your life.\")\r\n\r\ndef trolltown():\r\n\tprint (\"You have entered Troll Town.\")\r\n\tprint (\"The trolls here are eyeing you with suspicion, and a glint of hatred fills their eyes.\")\r\n\tprint (\"A well dressed and well armed troll approaches you.\")\r\n\ttrollchief()\r\n\r\n# The enchanted lake\r\ndef lake():\r\n\tprint (\"You have arrived at a large and beautiful lake.\")\r\n\tprint (\"You feel an intense magical energy all around you.\")\r\n\tprint (\"There is a strange feeling of danger, but it is not distinct.\")\r\n\twaterytart()\r\n\r\n# List of Monsters to be randomly spawned.\r\n# Monster 1, Monster 2, Monster 3\r\ndef randmonster():\r\n\tmonsters = ['orc', 'kobold', 'werewolf']\r\n\tprint (\"A random %r has crossed paths with you.\") % (random.choice(monsters)) \r\n\treturn(random.choice(monsters))\r\n\r\n# Random list of human enemies to be randomly spawned.\r\n# Bandit 1, Bandit 2, Bandit 3\r\ndef randbandit():\r\n\tbandits = ['thief', 'assassin', 'barbarian']\r\n\tprint (\"A random %r has been spotted by you.\") % (random.choice(bandits))\r\n\treturn (random.choice(bandits))\r\n\t\r\n# Knight 1, Knight 2, Knight 3\t\r\ndef randknight():\r\n\tknights = ['Black Knight captain', 'Black Knight corporal', 'Black Knight general']\r\n\tprint (\"A %r has challenged you.\") % (random.choice(knights))\r\n\treturn(random.choice(knights))\r\n\t\r\n# Maiden of the Lake\r\ndef waterytart():\r\n\tprint (\"Suddenly, woman's voice arises from the lake.\")\r\n\tprint (\"You have disturbed my sleep, mortal. What is it you desire?\\n'power'? \\n'wealth'? \\n'peace'? \\n'war'?\")\r\n\tchoice = input(\"> \")\r\n\tif choice == \"power\":\r\n\t\tdead(\"You are just another corrupt mortal. Die!\")\r\n\telif choice == \"wealth\":\r\n\t\tdead(\"You are just another greedy mortal. Die!\")\r\n\telif choice == \"war\":\r\n\t\tdead(\"You are just another vengeful and violent mortal. Die!\")\r\n\telse:\r\n\t\tprint (\"You are a worthy mortal. You have earned my favor.\")\r\n\t\tprint (\"---A watery tart throws a sword at you!---\")\r\n\t\twinner()\r\n\t\tprint (\"Would you like to play again? 'y' or 'n'.\")\r\n\t\tchoice = input(\"> \")\r\n\t\tif choice == \"y\":\r\n\t\t\tstart()\r\n\t\telse:\r\n\t\t\texit(0)\r\n\t\t\t\r\ndef trollchief():\r\n\tprint (\"The troll chief would like to challenge you to a guessing game.\")\r\n\tprint (\"Do you accept? 'y' or 'n'.\")\r\n\tchoice = input(\"> \")\r\n\tif choice == 'y':\r\n\t\tnumguessgame()\r\n\telse:\r\n\t\tprint (\"Awww, you're no fun!\")\r\n\t\ttrolltown()\r\n\t\t\r\ndef numguessgame():\r\n\tguessestaken = 0\r\n\trandnum = random.randint(1, 100)\r\n\tprint (\"The troll king says, 'I'm thinking of a number between 1 and 100.'\")\r\n\tprint (\"'You must guess the number in 5 attempts in order to pass.'\")\r\n\tprint (\"'If you fail, you will fight a random creature.'\")\r\n\tprint (\"'If you pass you will be teleported to the lake.'\")\r\n\twhile guessestaken < 6:\r\n\t\tprint (\"What is your guess?\")\r\n\t\tguess = int(input())\r\n\t\tguessestaken += 1\r\n\t\t\r\n\t\tif guess < randnum:\r\n\t\t\tprint (\"Your guess is too low.\")\r\n\t\t\t\r\n\t\telif guess > randnum:\r\n\t\t\tprint (\"Your guess is too high.\")\r\n\t\t\t\r\n\t\telif guess == randnum:\r\n\t\t\tprint (\"You have guessed the number.\")\r\n\t\t\tprint (\"You may pass!\")\r\n\t\t\tlake()\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\trandmonster()\r\n\t\t\tprint (\"Would you like to 'attack' or 'flee'?\")\r\n\t\t\tchoice = input(\"> \")\r\n\t\t\tif choice == \"attack\":\r\n\t\t\t\tdicerollmonster2()\r\n\t\t\telif choice == \"flee\":\r\n\t\t\t\tstart4()\r\n\t\t\telse:\r\n\t\t\t\tdead(\"Your hesitation has cost you your life.\")\r\n\t\t\t\r\n\tif guessestaken >= 6:\r\n\t\trandmonster()\r\n\t\tprint (\"Would you like to 'attack' or 'flee'?\")\r\n\t\tchoice = input(\"> \")\r\n\t\tif choice == \"attack\":\r\n\t\t\tdicerollmonster2()\r\n\t\telif choice == \"flee\":\r\n\t\t\tstart4()\r\n\t\telse:\r\n\t\t\tdead(\"Your hesitation has cost you your life.\")\r\n\t\t\r\n# Dead function\r\ndef dead(reason):\r\n\tprint (reason, \"\\nYou have passed through the field of reeds, and are of this earth no more.\")\r\n\t# Insert an option to restart the game from the beginning, or just quit the game.\r\n\tprint (\"Would you like to play again? 'y' or 'n'.\")\r\n\tchoice = input(\"> \")\r\n\tif choice == \"y\":\r\n\t\tstart()\r\n\telse:\r\n\t\texit(0)\r\n\r\n# Winner screen\r\ndef winner():\r\n\tprint (\"*****************************************************************\")\r\n\tprint (\"You have acquired the great sword \\'Rex Calibur\\'.\")\r\n\tprint (\"You have been crowned King of the Land for the remainder of your days.\")\r\n\tprint (\"You go on to do many great things in the world.\")\r\n\tprint (\"You are the Champion of the People!\")\r\n\tprint (\"*****************************************************************\")\r\n\t\r\nstart()\r\n# ","sub_path":"RexCaliburGamePy3.py","file_name":"RexCaliburGamePy3.py","file_ext":"py","file_size_in_byte":13781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"426992039","text":"ZP = []\nmin_zp = []\nmin_zp_name = []\nwith open(\"text_3.txt\", \"r\", encoding=\"utf-8\") as user_list:\n user_file = user_list.read().split('\\n')\n for i in user_file:\n i = i.split()\n ZP.append(float(i[1]))\n if float(i[1]) < 20000:\n min_zp.append(i[1])\n min_zp_name.append(i[0])\nres = zip(min_zp_name, min_zp)\nres_list = list(res)\nprint(\"Сотрудники у которых зп меньше 20 000\")\nfor i in res_list:\n print(f\" {i[0]} - {i[1]}\")\nprint(f\"Средняя зп по компании: {sum(map(float, ZP)) / len(ZP)}\")\n","sub_path":"hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"265661171","text":"import sys\nfrom model_test_utils import *\nfrom pylab import *\n\nfilename= sys.argv[1]\nfilename = str(filename)\ncls_init = np.load(filename)\ncls_init = cls_init[:,:2500]\ncl_fid = np.load('clfid.npy')[:2500]\nmodes = get_modes()\nplot(modes[0,1,:])\nplot(modes[0,13,:])\nplot(modes[1,13,:])\nshow()\ncls_rec = cls_init.copy()\nfor i in arange(cls_init[:,0].size - 1):\n\tcls_rec[i,:] = reconstruct_cl(cls_init[i,:], cl_fid, modes)\n\nnp.save(filename[:-7]+'reconstructedcls.npy', cls_rec)\n","sub_path":"model_test/mnuneff/get_reconstructed_cls.py","file_name":"get_reconstructed_cls.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"519839227","text":"#*****************************************************************************/\n# @file sty.py\n# @author Majid Nasiri 95340651\n# @version V1.0.0\n# @date 29 May 2017\n# @brief \n#*****************************************************************************/\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport os\n\n\nif (not os.path.exists('./results')):\n os.makedirs('./results')\n\ndef evaluate_function(xi):\n X = np.power(xi, 4) - 16 * np.power(xi, 2) + 5 * xi\n return np.sum(X, axis=1)/2\n\ndef map_index(n,i,j):\n if (n == 2):\n mat = np.array([[0,0],[0,0]])\n return mat[i,j]\n elif (n == 3):\n mat = np.array([[0,0,1],[0,0,2],[1,2,0]]) \n return mat[i,j]\n elif (n == 4):\n mat = np.array([[0,0,1,2],[0,0,3,4],[1,3,0,5],[2,4,5,0]]) \n return mat[i,j]\n \n\n\nclass evolutionary_computing():\n def __init__(self, rep='float', crossover='single', mutation='uniform'):\n \n \n self.representation_type = rep\n self.max_generation = 4\n self.population_size = 4*5\n self.gene_num = 2\n\n self.xover_type = crossover\n self.crossover_prob = 0.9 \n \n self.mutation_type = mutation\n self.mutation_prob = 0.5\n self.mutation_prob = 1/self.gene_num \n \n \n self.termination = 0\n self.termination_count = 0\n self.termination_max_unchage = 25\n \n self.best_fittness_array = []\n \n print('representation type : ', rep)\n print(' crossover type : ', crossover)\n print(' mutation type : ', mutation)\n \n def fittness(self):\n \"\"\"\n EVALUATE each candidate\n \"\"\"\n \n self.gen_evaluation = evaluate_function(self.generation)\n self.gen_fittness = -self.gen_evaluation\n gen_value = self.gen_fittness - np.min(self.gen_fittness)\n self.gen_probability = gen_value / np.sum(gen_value)\n \n # save best fittness\n self.best_fittness_arg = self.gen_fittness.argmax()\n self.best_fittness = self.gen_fittness.max()\n self.best_chromosome = self.generation[self.best_fittness_arg]\n self.best_fittness_array.append(self.best_fittness)\n \n def initialize(self):\n \"\"\"\n INITIALISE population with random candidate solutions\n \"\"\"\n \n if (self.representation_type == 'float'):\n self.generation = np.random.uniform(-5, 5, size=self.gene_num)\n for _ in range(self.population_size-1):\n self.generation = np.vstack([self.generation, np.random.uniform(-5, 5, size=self.gene_num)])\n \n if (self.mutation_type =='uncorr_one_sigma'):\n self.tao = 1 / np.sqrt(self.gene_num)\n self.eps = 0.1\n self.generation_sigma = np.random.uniform(-5, 5, size=1)\n for _ in range(self.population_size-1):\n self.generation_sigma = np.vstack([self.generation_sigma, np.random.uniform(-5, 5, size=1)])\n \n if (self.mutation_type =='uncorr_n_sigma'):\n self.tao = 1 / np.sqrt(2 * np.sqrt(self.gene_num))\n self.taoh = 1 / np.sqrt(2 * self.gene_num)\n self.eps = 0.1\n self.generation_sigma = np.random.uniform(-5, 5, size=self.gene_num)\n for _ in range(self.population_size-1):\n self.generation_sigma = np.vstack([self.generation_sigma, np.random.uniform(-5, 5, size=self.gene_num)])\n\n if (self.mutation_type =='corr'):\n self.tao = 1 / np.sqrt(2 * np.sqrt(self.gene_num))\n self.taoh = 1 / np.sqrt(2 * self.gene_num)\n self.eps = 0.1\n self.generation_sigma = np.random.uniform(-5, 5, size=self.gene_num)\n for _ in range(self.population_size-1):\n self.generation_sigma = np.vstack([self.generation_sigma, np.random.uniform(-5, 5, size=self.gene_num)])\n \n self.alpha_num = int((self.gene_num * (self.gene_num-1))/2)\n self.generation_alpha = np.random.uniform(-5, 5, size=self.alpha_num)\n for _ in range(self.population_size-1):\n self.generation_alpha = np.vstack([self.generation_alpha, np.random.uniform(-5, 5, size=self.alpha_num)])\n\n\n\n\n def display(self, num):\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n X = np.arange(-5, 5, 0.05)\n Y = np.arange(-5, 5, 0.05)\n X, Y = np.meshgrid(X, Y) \n R1 = np.power(X, 4) - 16 * np.power(X, 2) + 5 * X\n R2 = np.power(Y, 4) - 16 * np.power(Y, 2) + 5 * Y\n Z = 0.5 * (R1+R2)\n # Plot a basic wireframe.\n ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)\n ax.set_xlim(-6, 6)\n ax.set_ylim(-6, 6)\n #ax.set_zlim(-100, 100)\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n ax.set_title('best fittness = '+ str(self.best_fittness))\n \n\n \n for i,cor in enumerate(self.generation):\n ax.scatter(cor[0], cor[1], self.gen_evaluation[i], c='r')\n \n fig.savefig('./results/'+self.xover_type+'_#_'+self.mutation_type+'_#_'+str(num)+'.png', dpi=fig.dpi)\n\n def run(self, display='off'):\n \"\"\"\n largest quality smaller than self.cost_max is desire solution\n \"\"\"\n \n #INITIALISE population with random candidate solutions\n self.initialize()\n \n \n #EVALUATE each candidate\n self.fittness()\n \n # show function and solutions \n if (self.gene_num == 2 and display == 'on'):\n self.display(num=0)\n\n #REPEAT UNTIL ( TERMINATION CONDITION is satisfied )\n gen = 0\n while(self.termination == 0):\n \n #SELECT parents\n self.offspring_num = int(self.population_size/2);\n pair_parents_num = int(self.offspring_num/2)\n random_parents = np.random.choice(np.arange(0, self.population_size), size=(pair_parents_num,5), p = self.gen_probability)\n arg_sort = np.argsort(self.gen_fittness[random_parents])\n best_parent = np.column_stack((arg_sort[:,-1], arg_sort[:,-2]))\n self.offspring = self.generation[best_parent]\n \n \n ################################################################### \n ###################################################################\n ###################################################################\n #RECOMBINE pairs of parents\n recombination_prob_array = np.random.choice(np.arange(0, 2) , size=pair_parents_num, p = [1-self.crossover_prob, self.crossover_prob])\n if (self.xover_type =='single'):\n xover_point = np.random.randint(self.gene_num, size=pair_parents_num)\n \n alpha = 0.5\n # relpace parent's gene with new gene\n for off in range(pair_parents_num):\n if (recombination_prob_array[off] == True):\n self.offspring[off, :, xover_point[off]] = (alpha * self.offspring[off, 0, xover_point[off]])+((1-alpha) * self.offspring[off, 1, xover_point[off]])\n \n self.offspring = np.reshape(self.offspring, (-1,self.gene_num))\n \n \n elif (self.xover_type =='simple'):\n xover_point = np.random.randint(self.gene_num, size=pair_parents_num)\n alpha = 0.5\n # relpace parent's gene with new gene\n for off in range(pair_parents_num):\n if (recombination_prob_array[off] == True):\n for xpi in range(xover_point[off], self.gene_num):\n self.offspring[off, :, xpi] = (alpha * self.offspring[off, 0, xpi])+((1-alpha) * self.offspring[off, 1, xpi])\n self.offspring = np.reshape(self.offspring, (-1,self.gene_num))\n\n\n elif (self.xover_type =='whole'):\n alpha = 0.5\n # relpace parent's gene with new gene\n for off in range(pair_parents_num):\n if (recombination_prob_array[off] == True):\n for xpi in range(self.gene_num):\n self.offspring[off, :, xpi] = (alpha * self.offspring[off, 0, xpi])+((1-alpha) * self.offspring[off, 1, xpi])\n self.offspring = np.reshape(self.offspring, (-1,self.gene_num))\n \n \n elif (self.xover_type =='blend'):\n #print(self.offspring)\n alpha = 0.5\n # relpace parent's gene with new gene\n for off in range(pair_parents_num):\n if (recombination_prob_array[off] == True):\n di = np.abs(self.offspring[off,0,:] - self.offspring[off,1,:])\n zxi1 = self.offspring[off,0,:] - alpha * di\n zxi2 = self.offspring[off,0,:] + alpha * di \n zyi1 = self.offspring[off,1,:] - alpha * di\n zyi2 = self.offspring[off,1,:] + alpha * di\n for g in range(self.gene_num):\n self.offspring[off,0,g] = np.random.uniform(zxi1[g], zxi2[g], size=1)\n self.offspring[off,1,g] = np.random.uniform(zyi1[g], zyi2[g], size=1)\n self.offspring = np.reshape(self.offspring, (-1,self.gene_num))\n \n \n \n ###################################################################\n ###################################################################\n ###################################################################\n #MUTATE the resulting offspring \n mutation_probs = np.random.choice(np.arange(0, 2) , size=self.offspring_num, p = [1-self.mutation_prob, self.mutation_prob])\n if (self.mutation_type =='uniform'):\n mutation_points = np.random.randint(self.gene_num, size=self.offspring_num) \n new_genes = np.random.uniform(-5, 5, size=self.offspring_num)\n \n for off in range(self.offspring_num):\n if (mutation_probs[off] == True):\n self.offspring[off, mutation_points[off]] = new_genes[off]\n \n \n elif (self.mutation_type =='non_uniform'):\n for off in range(self.offspring_num):\n N = np.random.normal()\n self.offspring[off, :] = self.offspring[off, :] + N\n \n #elif (self.mutation_type =='self_adaptive'): \n \n elif (self.mutation_type =='uncorr_one_sigma'):\n self.offspring_sigma = self.generation_sigma[best_parent]\n self.offspring_sigma = np.reshape(self.offspring_sigma, (-1,1))\n N = np.random.normal(size=self.offspring_num)\n for off in range(self.offspring_num):\n self.offspring_sigma[off] = self.offspring_sigma[off] * np.exp(self.tao * N[off])\n if (self.offspring_sigma[off] < self.eps): self.offspring_sigma[off] = self.eps\n \n for off in range(self.offspring_num):\n for g in range(self.gene_num):\n Ni = np.random.normal()\n self.offspring[off, g] = self.offspring[off, g] + self.offspring_sigma[off] * Ni\n \n elif (self.mutation_type =='uncorr_n_sigma'):\n self.offspring_sigma = self.generation_sigma[best_parent]\n self.offspring_sigma = np.reshape(self.offspring_sigma, (-1,self.gene_num))\n #print(self.offspring_sigma.shape)\n \n N = np.random.normal(size=self.offspring_num)\n Ni = np.random.normal(size=(self.offspring_num,self.gene_num))\n \n for off in range(self.offspring_num):\n for g in range(self.gene_num):\n self.offspring_sigma[off, g] = self.offspring_sigma[off, g] * np.exp(self.taoh * N[off] + self.tao * Ni[off, g])\n if (self.offspring_sigma[off, g] < self.eps): self.offspring_sigma[off, g] = self.eps\n \n for off in range(self.offspring_num):\n for g in range(self.gene_num):\n Ni = np.random.normal()\n self.offspring[off, g] = self.offspring[off, g] + self.offspring_sigma[off, g] * Ni\n\n\n elif (self.mutation_type =='corr'):\n self.offspring_sigma = self.generation_sigma[best_parent]\n self.offspring_sigma = np.reshape(self.offspring_sigma, (-1,self.gene_num))\n self.offspring_alpha = self.generation_alpha[best_parent]\n self.offspring_alpha = np.reshape(self.offspring_alpha, (-1,self.alpha_num))\n #print(self.offspring_sigma.shape)\n #print(self.offspring_alpha.shape)\n \n b = (np.pi/180)*5\n N = np.random.normal(size=self.offspring_num)\n Ni = np.random.normal(size=(self.offspring_num,self.gene_num))\n for off in range(self.offspring_num):\n for g in range(self.gene_num):\n self.offspring_sigma[off, g] = self.offspring_sigma[off, g] * np.exp(self.taoh * N[off] + self.tao * Ni[off, g])\n if (self.offspring_sigma[off, g] < self.eps): self.offspring_sigma[off, g] = self.eps\n \n N = np.random.normal(size=self.offspring_num)\n for off in range(self.offspring_num):\n for a in range(self.alpha_num):\n self.offspring_alpha[off, a] = self.offspring_alpha[off, a] + b * N[off]\n if (np.abs(self.offspring_sigma[off, g]) > np.pi): self.offspring_sigma[off, g] -= 2*np.pi* np.sign(self.offspring_sigma[off, g])\n \n for off in range(self.offspring_num):\n cov_matrix = np.zeros((self.gene_num, self.gene_num))\n #print(cov_matrix.shape)\n for i in range(self.gene_num):\n cov_matrix[i,i] = self.offspring_sigma[off, i]**2\n \n for i in range(self.gene_num):\n for j in range(self.gene_num):\n if (i != j):\n cov_matrix[i,j] = (self.offspring_sigma[off, i]**2 - self.offspring_sigma[off, j]**2) * np.tan(2*self.offspring_alpha[off,map_index(self.gene_num,i,j)]) / 2\n\n #print(cov_matrix)\n N = np.random.multivariate_normal([0,0], cov_matrix, 1)\n self.offspring[off, :] = self.offspring[off, :] + N \n\n\n \n \n # SELECT individuals for the next generation\n # best to worst\n gen_new_idxs = self.gen_probability.argsort()[::-1] \n # save indices of best chromosomes\n gen_new_idxs = gen_new_idxs[0:self.offspring_num] \n # 50% of old generation have been replaced with new offsprings \n self.generation = np.concatenate((self.generation[gen_new_idxs], self.offspring))\n \n \n #EVALUATE new candidates \n self.fittness()\n \n # show function and solutions \n if (self.gene_num == 2 and display == 'on'):\n self.display(gen+1)\n \n\n gen +=1\n if (gen == self.max_generation):\n self.termination = 1\n \n\n\n\n\n\n\nplt.close(\"all\")\n#xover_list = ['single']\n#mutation_list = ['uniform']\n\nxover_list = ['single', 'simple', 'whole', 'blend']\nmutation_list = ['uniform', 'non_uniform', 'uncorr_one_sigma', 'uncorr_n_sigma', 'corr']\n\n\nfor xover_type in xover_list:\n for mutation_type in mutation_list:\n stylinski_model = evolutionary_computing(rep='float', crossover=xover_type, mutation=mutation_type)\n stylinski_model.run(display='on')\n\n fig = plt.figure()\n plt.plot(stylinski_model.best_fittness_array)\n plt.xlabel('Generation')\n plt.ylabel('Best fittness')\n print(' best chromosome :', stylinski_model.best_chromosome)\n print(' best fittness : ', stylinski_model.best_fittness)\n print('--------------------:-------------------')\n fig.savefig('./results/'+stylinski_model.xover_type+'_#_'+stylinski_model.mutation_type+'_#_result.png', dpi=fig.dpi)\n\n\n \n \n#INITIALISE population with random candidate solutions\n#EVALUATE each candidate\n#REPEAT UNTIL ( TERMINATION CONDITION is satisfied )\n #SELECT parents\n #RECOMBINE pairs of parents\n #MUTATE the resulting offspring \n #EVALUATE new candidates\n #SELECT individuals for the next generation\n \n\n","sub_path":"evolutionary_computing/ECHW3_styblink-tang_xover_mutation/styblinktang_xover_mutation.py","file_name":"styblinktang_xover_mutation.py","file_ext":"py","file_size_in_byte":17301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"348248505","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom products.models import Product\nfrom datetimewidget.widgets import DateTimeWidget, DateWidget, TimeWidget \n\n\nclass PostForm(forms.ModelForm):\n\n class Meta:\n model = Product\n fields = ('title', 'description', 'active', 'quantity','address', 'zip_Code', 'expire_date', )\n\n\n\nclass DocumentForm(forms.Form):\n docfile = forms.FileField(\n label='Select image (Image can not be update in future)',\n )\n title = forms.CharField(\n label='title',\n )\n \n description = forms.CharField(\n label='description',\n ) \n \n active = forms.BooleanField(\n label='active',\n )\n quantity = forms.IntegerField(\n label='quantity',\n )\n zip_Code = forms.CharField(\n label='zip_Code',\n )\n address = forms.CharField(\n label='address',\n )\n expire_date = forms.DateTimeField(widget=DateTimeWidget(usel10n=True, bootstrap_version=3))\n \n \n \n","sub_path":"products/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"98448456","text":"from numba import cuda\nimport math\n\n@cuda.jit(device=True, inline=True)\ndef fair(w1,raf1,w2,raf2,random_number, f):\n p = 0.5 + f * math.fabs(w1 - w2) / (w1 + w2)\n dw = min((1.0 - raf1) * w1, (1.0 - raf2) * w2)\n return math.copysign(dw, (p - random_number) * (w2 - w1))\n\n@cuda.jit(device=True, inline=True)\ndef loser(w1,raf1,w2,raf2,random_number, f):\n p = 0.5 + f * math.fabs(w1 - w2) / (w1 + w2)\n a = math.copysign(1, (p -random_number) * (w2 - w1))\n dw = (a - 1) * (1.0 - raf1) * w1 + (1 + a) * (1.0 - raf2) * w2\n return 0.5 * dw\n","sub_path":"econophysics.py","file_name":"econophysics.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"538640508","text":"from base import kernels\nimport copy\nfrom simulation.Simulation_features import *\n\n\ndef LS_feature_fit(time_feature_list, T='None', beta_t=1, kernel_t='exp', beta_a=1, kernel_a='gaussian', NonNeg=False):\n # TODO: define an object of the input data\n\n def exp_lasting_time(time_list, T='None', beta=1, kernel='exp'):\n output = copy.deepcopy(time_list)\n if T == 'None':\n T = np.max(time_list)\n elif type(T) not in [int, float]:\n raise ValueError('T should be a number.')\n\n if kernel == 'exp':\n for seq in np.arange(time_list.__len__()):\n for timestamp in np.arange(time_list[seq].__len__()):\n output[seq][timestamp] = kernels.KernelExp((T - time_list[seq][timestamp]), beta) / beta\n return output\n\n def feature_kernels_sum_over_all_dimension(alpha, feature_dataframe, beta=1, kernel='gaussian'):\n # this function is to calculate \\dfrac1n \\sum_{\\alpha^i_k in feature_dataframe} \\kappa (\\alpha, \\alpha^i_k )\n # n is the length of feature_dataframe\n # feature_dataframe must be a pandas dataframe containing all the feature vectors of the events (all dimensions)\n output = 0\n if type(feature_dataframe) is pd.core.frame.DataFrame:\n for i in np.arange(feature_dataframe.shape[0]):\n output += kernels.kernelRBF(np.linalg.norm(alpha - feature_dataframe.iloc[i]), beta)\n else:\n raise ValueError('feature_dataframe should be a pandas dataframe.')\n return output / feature_dataframe.shape[0]\n\n def feature_kernels_sum_over_all_dimension_dual(alpha, alpha_prime, feature_dataframe, beta=1, kernel='gaussian'):\n # this function is to calculate \\dfrac1n \\sum_{\\alpha^i_k in feature_dataframe} \\kappa (\\alpha, \\alpha^i_k )\\kappa (\\alpha', \\alpha^i_k )\n # n is the length of feature_dataframe\n # feature_dataframe must be a pandas dataframe containing all the feature vectors of the events (all dimensions)\n output = 0\n if type(feature_dataframe) is pd.core.frame.DataFrame:\n for i in np.arange(feature_dataframe.shape[0]):\n output += kernels.kernelRBF(np.linalg.norm(alpha - feature_dataframe.iloc[i])) * kernels.kernelRBF(\n np.linalg.norm(alpha_prime - feature_dataframe.iloc[i]))\n else:\n raise ValueError('feature_dataframe should be a pandas dataframe.')\n return output / feature_dataframe.shape[0]\n\n def z_function(time_list, feature_list, feature_df, T='None', beta_t=1, kernel_t='exp', beta_a=1,\n kernel_a='gaussian'):\n exp_lasting_time_matrix = exp_lasting_time(time_list, T, beta_t, kernel_t)\n dimensionality = time_list.__len__()\n output = np.zeros((dimensionality + 1, dimensionality + 1))\n\n if T == 'None':\n T = np.max(time_list)\n elif type(T) not in [int, float, np.ndarray, np.float64]:\n raise ValueError('T should be a number.')\n\n output[0, 0] = T\n for i in np.arange(dimensionality):\n for k in np.arange(exp_lasting_time_matrix[i].__len__()):\n exp_time = exp_lasting_time_matrix[i][k]\n fea = feature_kernels_sum_over_all_dimension(feature_list[i][k], feature_df, beta_a, kernel_a)\n output[0, i + 1] += (1. - exp_time)\n output[0, i + 1] *= fea\n output[i + 1, 0] += (1. - exp_time)\n output[i + 1, 0] *= fea\n\n for i in np.arange(dimensionality):\n for j in np.arange(dimensionality):\n for k in np.arange(exp_lasting_time_matrix[i].__len__()):\n for k_prime in np.arange(exp_lasting_time_matrix[j].__len__()):\n lower_bound = np.abs(time_list[i][k] - time_list[j][k_prime])\n output[i + 1, j + 1] += beta_t * (\n np.exp(-beta_t * lower_bound) - exp_lasting_time_matrix[i][k] *\n exp_lasting_time_matrix[j][k_prime]) / 2. * \\\n feature_kernels_sum_over_all_dimension_dual(feature_list[i][k],\n feature_list[j][k_prime],\n feature_df, beta_a,\n kernel_a)\n return output\n\n def y_function(time_list, feature_list, T='None', beta_t=1, kernel_t='exp', beta_a=1, kernel_a='gaussian'):\n dimensionality = time_list.__len__()\n output = np.zeros((dimensionality + 1, dimensionality))\n\n for i in np.arange(dimensionality):\n # output[0, i] = np.sum(time_list[i])\n output[0, i] = time_list[i].__len__()\n for i in np.arange(dimensionality):\n for j in np.arange(dimensionality):\n for k in np.arange(time_list[i].__len__()):\n k_prime_length = sum([t_k > time_list[i][k] for t_k in time_list[j]])\n for k_prime in np.arange(k_prime_length) + (time_list[j].__len__() - k_prime_length):\n output[i + 1, j] += kernels.KernelExp(time_list[j][k_prime] - time_list[i][k], beta_t) * \\\n kernels.kernelRBF(\n np.linalg.norm(feature_list[j][k_prime] - feature_list[i][k]),\n beta_a)\n return output\n\n time_list = []\n feature_list = []\n for i in np.arange(self.dimension):\n time_list.append(list(time_feature_list[time_feature_list['1dimension'] == i]['2timestamp']))\n feature_list.append(list(np.array(time_feature_list[time_feature_list['1dimension'] == i].iloc[:,3:])))\n feature_df = time_feature_list.iloc[:, 3:]\n\n z_mat = z_function(time_list, feature_list, feature_df, T, beta_t, kernel_t, beta_a, kernel_a)\n y_mat = y_function(time_list, feature_list, T, beta_t, kernel_t, beta_a, kernel_a)\n dimensiodnality = time_list.__len__()\n\n if NonNeg == False:\n out_mat = np.linalg.solve(z_mat, y_mat)\n return [out_mat[0, :]], [out_mat[1:, :].T]\n'''\n elif NonNeg == True:\n G = cvxopt.matrix(-np.identity(dimensiodnality + 1))\n h = cvxopt.matrix(np.zeros(dimensiodnality + 1))\n out_mat = np.zeros((dimensiodnality + 1, dimensiodnality))\n\n for i in np.arange(dimensiodnality):\n cvxopt.solvers.options['show_progress'] = False\n sol = cvxopt.solvers.qp(cvxopt.matrix(z_mat), cvxopt.matrix(-y_mat[:, i]), G, h)\n out_mat[:, i] = np.array(sol['x']).reshape((1, dimensiodnality + 1))[0]\n\n return [out_mat[0, :]], [out_mat[1:, :].T]\n else:\n raise ValueError('\\'NonNeg\\' is a boolean parameter')\n'''\n\n\nif __name__ == '__main__':\n d = 2\n T = 30\n# mu_true = np.random.rand(d)\n# mu_true = np.array([0.4, 0.6, 0.5])\n mu_true = np.random.rand(d)\n print(mu_true)\n alpha_true = 1./d*np.random.rand(d, d)\n print(alpha_true)\n beta_true = np.ones((d,d))\n\n self = HawkesProcessSimulation_feature(t = T, dimension = d, alpha = alpha_true, beta = beta_true, mu = mu_true, kernel_bandwidth=0.2)\n self.generate_CR()\n time_feature_list = self.generated_time\n\n result = LS_feature_fit(time_feature_list, T)\n print(result)\n\n\n","sub_path":"learning/hp_estimate_ls_features.py","file_name":"hp_estimate_ls_features.py","file_ext":"py","file_size_in_byte":7493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"416907925","text":"import maya.cmds as cmds\n\n\n# Detects empty resolutions in the scene\nclass ResolutionsCheck(object):\n name = \"Empty resolutions\" # Check's main title\n label = \"Checks asset for resolutions have any geometry under it\" # Short description\n failLabel = \"Delete resolution nul, or add a mesh to it.\" # Tip to show on fail\n failObjects = [] # List of object names causing fail. List will reset every time block runs.\n failType = 1 # 0=Error (must be fixed on fail), 1=Warning (is optional to fix on fail)\n \n # Un-comment out for auto-fix button to show up and run this\n def fix(self):\n if self.failObjects:\n cmds.delete(self.failObjects)\n \n # Return True if check passes, or False if it fails\n def run(self):\n scene_objs = cmds.ls(transforms=True, l=True)\n \n for obj in scene_objs:\n has_resolution_attr = cmds.attributeQuery(\"RESOLUTION_TYPE\", node=obj, exists=True)\n if not has_resolution_attr:\n continue\n \n children = cmds.listRelatives(obj, ad=True, f=True, type=\"mesh\") or []\n if children:\n continue\n \n self.failObjects.append(obj)\n \n if self.failObjects:\n return False\n return True\n","sub_path":"maya/general/tools/oa_shield/asset_export/resolutions_check.py","file_name":"resolutions_check.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"350340252","text":"import pytest\nimport time\n\nfrom tests.integration.aurorabridge_test.client import api\nfrom tests.integration.aurorabridge_test.util import (\n get_job_update_request,\n start_job_update,\n wait_for_rolled_back,\n)\n\n# disable auto rollback given its flaky behavior\npytestmark = [pytest.mark.aurorabridge,\n pytest.mark.random_order(disabled=True)]\n\n\ndef test__simple_auto_rolled_back(client):\n \"\"\"\n Create a job, then issue a bad config update and validate\n job is rolled back to previous version\n \"\"\"\n start_job_update(\n client,\n 'test_dc_labrat.yaml',\n 'start job update test/dc/labrat')\n\n # Add some wait time for lucene index to build\n time.sleep(10)\n\n res = client.start_job_update(\n get_job_update_request('test_dc_labrat_bad_config.yaml'),\n 'rollout bad config')\n wait_for_rolled_back(client, res.key)\n\n # validate job is rolled back to previous config\n res = client.get_tasks_without_configs(api.TaskQuery(\n jobKeys={res.key.job},\n statuses={api.ScheduleStatus.RUNNING}\n ))\n\n tasks = res.tasks\n assert len(tasks) == 3\n\n for t in tasks:\n for r in t.assignedTask.task.resources:\n if r.numCpus > 0:\n assert r.numCpus == 0.25\n elif r.ramMb > 0:\n assert r.ramMb == 128\n elif r.diskMb > 0:\n assert r.diskMb == 128\n else:\n assert False, 'unexpected resource {}'.format(r)\n","sub_path":"tests/integration/aurorabridge_test/test_rollback.py","file_name":"test_rollback.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"353419874","text":"\r\ndef euclids_algorithm(m: int, n: int) -> int:\r\n \"\"\"\r\n Given unsigned int m, n. Find the highest common denominator.\r\n :rtype: int\r\n :param m: first number in question\r\n :param n: second number in question\r\n :return: Highest common denominator\r\n \"\"\"\r\n # Check if m is greater than n to skip the first loop.\r\n if m < n:\r\n m, n = n, m\r\n\r\n # Initialize the remainder value.\r\n r = 1\r\n while r:\r\n r = m % n\r\n if r != 0:\r\n m = n\r\n n = r\r\n\r\n return n\r\n","sub_path":"euclids.py","file_name":"euclids.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"425627420","text":"\"\"\"\nexcel文件批处理\njondou\ndongjinlong_china@yeah.net\n\"\"\"\nimport xlrd, xlwt\n\ndict01 = {}\n\n\ndef create():\n \"\"\"\n 这是一个存储格式\n \"\"\"\n workbook = xlwt.Workbook(encoding='utf-8') # 创建一个workbook 设置编码\n worksheet = workbook.add_sheet('shell2') # 创建一个worksheet\n row = 1 # 行等于0\n count = 1\n column = count # 列\n main_row = 0\n main_colimn = 0\n for k, v in dict01.items(): # 取字典的每一项\n line_label = (\"set\") # 行头名称组\n for item in line_label: # 取每一个值为行头名\n main_colimn += 1 # 每循环一次列加1\n worksheet.write(main_row, main_colimn, label=k + item) # 写行头的名称\n for j in range(len(v)): # 取键值对的值的长度\n for m in range(3):\n worksheet.write(row, column + main_colimn - 3, label=v[j][m]) # 写入数据\n column += count # 每写一次列加1\n column = count # 重置作用\n row += 1 # 行加1\n row = 1 # 重置作用\n # 保存\n workbook.save(\"%s\" % out_name) # 保存文件的命名\n\n\ndef get_table_info(nrows, table):\n \"\"\"\n 查询行的数据步长为三\n 把行数据存储到列表中\n 数据名为键数据为值存储到字典中\n \"\"\"\n for i in range(nrows):\n list02 = []\n for k in range(1, len(table.row_values(i)), 3):\n data1 = table.row_values(i)[k]\n data2 = table.row_values(i)[k + 1]\n data3 = table.row_values(i)[k + 2]\n list02.append([data1, data2, data3])\n dict01[table.row_values(i)[0]] = list02\n create()\n\n\ndef open_file(file):\n \"\"\"\n 打开一个文件,获取表格的行数\n \"\"\"\n data = xlrd.open_workbook(file) # 打开文件\n table = data.sheets()[0] # 打开第一张表格\n nrows = table.nrows # 获取表格的行数\n get_table_info(nrows, table)\n\n # 写入excel\n # 参数对应 行, 列, 值\n\n\nif __name__ == '__main__':\n file_name = input(\"请输入文件名,如果文件不在当前路径下,请附带路径:\")\n out_name = input(\"请输入编辑后的文件名,默认保存在当前路径:\")\n open_file(\"%s\" % file_name)\n","sub_path":"excel_format_conversion.py","file_name":"excel_format_conversion.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"429402619","text":"import math\ndef sieve(n):\n if n == 1:\n return[0]\n if n == 2:\n return[0,0]\n mark = [i % 2 for i in range(n)]\n mark[1] = 0\n mark[2] = 1\n for value in range(3,n,2):\n if mark[value] == 1:\n for i in range(value * 3, n, value * 2):\n mark[i] = 0\n return mark\n\ninpu1 = input().split()\ndef prime_factorization(num):\n primes = sieve(math.ceil(num ** .5)) # if primes are not prebuilt\n reduced = {}\n for i in range(len(primes)):\n if primes[i] == 1:\n prime = i\n if prime <= num and num % prime == 0:\n reduced[prime] = 0\n while num % prime == 0:\n reduced[prime] += 1\n num //= prime\n return reduced\ndef prime_factorization2(num):\n #primes = sieve(math.ceil(num ** .5))\n reduced = {}\n if num % 2 == 0:\n reduced[2] = 0\n while num % 2 == 0:\n reduced[2] += 1\n num //= 2\n for i in range(3, math.ceil(num ** .5)+1, 2):\n if num % i == 0:\n reduced[i] = 0\n while num % i == 0:\n reduced[i] += 1\n num //= i\n if num > 2:\n reduced[num] = 1\n return reduced\ndef factor1(a, k, n):\n counter = 1\n if k == counter:\n return 1\n if k == n:\n return a\n for i in range(2, int(a**0.5)+1):\n if a % i == 0:\n counter += 1\n if k == counter:\n return(i)\n if k == (n-counter+1):\n return int(a/i)\n return -1\n\nn = int(inpu1[0])\nk = int(inpu1[1])\na = prime_factorization2(n)\nsum =1\nfor i in a:\n sum *= (a[i]+1)\nprint(a)\nif k > sum:\n print(-1)\nelse:\n print(factor1(n, k, sum))\n\n","sub_path":"ACM/2017Winter/k-th divisor.py","file_name":"k-th divisor.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"447820435","text":"from selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport time\nimport os\nimport math\nfrom icsd import queryer\nfrom selenium.webdriver.support.ui import Select\nfrom tqdm import tqdm\n\n\nclass CollectionCoder():\n def __init__(self, first_code, last_code):\n self.previous_code = 0\n self.code_range = \"{0}-{1}\".format(first_code, last_code)\n self.combined_csv_path = \"combined/comb_{}.csv\".format(self.code_range)\n\n def init_driver(self):\n self.q = queryer.Queryer(structure_source=\"A\")\n self.q.select_structure_source()\n textbox = self.q.driver.find_element_by_id(\n \"content_form:uiCodeCollection:input:input\")\n textbox.send_keys(self.code_range)\n self.q._run_query()\n self.q._check_list_view()\n\n def run(self):\n\n self.init_driver()\n\n select = Select(self.q.driver.find_element_by_id(\n \"display_form:listViewTable:j_id12\"))\n select.select_by_value('50')\n\n n_hits = self.q.hits\n n_pages = math.ceil(n_hits / 50)\n\n df_list = []\n\n for page in range(1, n_pages + 1):\n\n print(\"({0} of {1})\".format(page, n_pages))\n\n WebDriverWait(self.q.driver, 60).until(\n ec.text_to_be_present_in_element(\n (By.CLASS_NAME, 'ui-paginator-current'),\n \"({0} of {1})\".format(page, n_pages)\n )\n )\n\n self.q._wait_until_dialogue_disappears()\n self.q.wait_for_ajax()\n element = WebDriverWait(self.q.driver, 20).until(\n ec.presence_of_element_located((\n By.CSS_SELECTOR, \".ui-icon-seek-next\"\n )))\n _df = self._get_df()\n filename = \"each/{0}-p{1}outof{2}ps.csv\".format(\n self.code_range, page, n_pages)\n _df.to_csv(filename)\n\n df_list.append(_df)\n\n self.q.driver.execute_script(\"arguments[0].click();\", element)\n self.q._wait_until_dialogue_disappears()\n self.q.wait_for_ajax()\n\n combined_df = pd.concat(df_list)\n\n combined_df.to_csv(self.combined_csv_path)\n\n def _get_df(self):\n _df = self._get_current_df()\n while self.previous_code == _df['Coll. Code'].min():\n _df = self._get_current_df()\n time.sleep(0.1)\n\n self.previous_code = _df['Coll. Code'].min()\n print(_df)\n return(_df)\n\n def _get_current_df(self):\n table = self.q.get_html_table(idx=1)\n df = pd.read_html(table)[0]\n self.q.page_obatained = False # Refresh\n return(df)\n\n def quit(self):\n self.q.quit()\n\n\ndef main():\n for i in tqdm(range(100)):\n try:\n cc = CollectionCoder(i * 10000 + 1, i * 10000 + 10000)\n print(cc.combined_csv_path)\n if not os.path.exists(cc.combined_csv_path):\n cc.run()\n except queryer.QueryerError:\n with open(cc.combined_csv_path, \"w\") as f:\n f.write(\"\")\n\n print(\"No entry found in this step\")\n cc.quit()\n time.sleep(5)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"icsd/collection_coder.py","file_name":"collection_coder.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"473443270","text":"import json\nfrom collections import OrderedDict\nfrom unittest import skip\nfrom unittest.mock import MagicMock\n\nfrom django.test import TestCase\nfrom rest_framework.renderers import JSONRenderer\n\nfrom challenges.models import Challenge, MainCategory, ChallengeDescription, SubCategory, User, \\\n UserSubcategoryProficiency, Proficiency, Submission, Language\nfrom challenges.serializers import MainCategorySerializer, SubCategorySerializer, LimitedChallengeSerializer, \\\n LimitedSubCategorySerializer\nfrom challenges.tests.factories import ChallengeDescFactory, UserFactory, MainCategoryFactory\nfrom challenges.tests.base import TestHelperMixin\n\n\nclass CategoryModelTest(TestCase):\n def setUp(self):\n self.c1 = MainCategory.objects.create(name='Test')\n self.sub1 = SubCategory(name='Unit', meta_category=self.c1)\n self.sub2 = SubCategory(name='Mock', meta_category=self.c1)\n self.sub3 = SubCategory(name='Patch', meta_category=self.c1)\n self.sub1.save();self.sub2.save();self.sub3.save()\n\n def test_relationships(self):\n \"\"\" The categories should be connected\"\"\"\n self.assertIn(self.sub1, self.c1.sub_categories.all())\n self.assertEqual(self.sub1.meta_category, self.c1)\n\n def test_serialize(self):\n \"\"\" the Category should show all its subcategories \"\"\"\n expected_json = f'{{\"id\":{self.c1.id},\"name\":\"Test\",\"sub_categories\":[\"Unit\",\"Mock\",\"Patch\"]}}'\n received_data = JSONRenderer().render(MainCategorySerializer(self.c1).data)\n\n self.assertEqual(received_data.decode('utf-8'), expected_json)\n\n\nclass CategoryViewTest(TestCase):\n def setUp(self):\n self.c1 = MainCategoryFactory()\n self.c2 = MainCategoryFactory()\n self.c3 = MainCategoryFactory()\n self.c4 = MainCategoryFactory()\n self.c5 = MainCategoryFactory()\n\n def test_view_all_should_return_all_categories(self):\n response = self.client.get('/challenges/categories/all')\n self.assertEqual(response.data, MainCategorySerializer([self.c1, self.c2, self.c3, self.c4, self.c5],\n many=True).data)\n\n\nclass SubCategoryModelTest(TestCase, TestHelperMixin):\n def setUp(self):\n self.c1 = MainCategory.objects.create(name='Test')\n self.sub1 = SubCategory.objects.create(name='Unit', meta_category=self.c1)\n self.sub2 = SubCategory.objects.create(name='Mock', meta_category=self.c1)\n self.sub3 = SubCategory.objects.create(name='Patch', meta_category=self.c1)\n Proficiency.objects.create(name='starter', needed_percentage=0)\n self.create_user_and_auth_token()\n self.sample_desc = ChallengeDescFactory()\n\n # @skip # serialization does not currently work correctly as we want to return max score for challenge\n def test_serialize(self):\n \"\"\" Ths Subcategory should show all its challenges\"\"\"\n self.subcategory_progress = UserSubcategoryProficiency.objects.filter(subcategory=self.sub1,\n user=self.auth_user).first()\n c = Challenge.objects.create(name='TestThis', difficulty=5, score=10, description=self.sample_desc,\n test_case_count=5, category=self.sub1)\n c.save()\n Language.objects.create(name=\"Python\")\n self.subcategory_progress.user_score = 5\n self.subcategory_progress.save()\n self.sub1.max_score = c.score\n req_mock = MagicMock(user=self.auth_user)\n expected_data = {'name': 'Unit',\n 'challenges': LimitedChallengeSerializer(many=True).to_representation(self.sub1.challenges.all()),\n 'max_score': self.sub1.max_score,\n 'proficiency': {'name': self.subcategory_progress.proficiency.name,\n 'user_score': self.subcategory_progress.user_score},\n 'next_proficiency': {}\n }\n received_data = SubCategorySerializer(self.sub1, context={'request': req_mock}).data\n self.assertEqual(received_data, expected_data)\n\n def test_serialize_shows_next_proficiency(self):\n Proficiency.objects.create(name='starter3', needed_percentage=50)\n next_prof = Proficiency.objects.create(name='starter2', needed_percentage=30)\n req_mock = MagicMock(user=self.auth_user)\n received_data = SubCategorySerializer(self.sub1, context={'request': req_mock}).data\n\n expected_prof = {'name': next_prof.name, 'needed_percentage': next_prof.needed_percentage}\n self.assertEqual(received_data['next_proficiency'], expected_prof)\n\n def test_subcategory_max_score_is_updated(self):\n \"\"\"\n Test if the SubCategory's max score is updated on Challenge creation\n This is done to capture the fact that sometimes we'll have new challenges added or removed and\n it needs to reflex the max score in a subcategory\n \"\"\"\n c1 = Challenge(name='Sub1', difficulty=5, score=200, description=ChallengeDescFactory(),\n test_case_count=5, category=self.sub1)\n c2 = Challenge(name='Sub1_2', difficulty=5, score=200, description=ChallengeDescFactory(),\n test_case_count=5, category=self.sub1)\n c3 = Challenge(name='Sub2', difficulty=5, score=200, description=ChallengeDescFactory(),\n test_case_count=5, category=self.sub2)\n c1.save(); c2.save(); c3.save()\n\n self.sub1.refresh_from_db()\n self.sub2.refresh_from_db()\n self.sub3.refresh_from_db()\n self.assertEqual(self.sub1.max_score, 400)\n self.assertEqual(self.sub2.max_score, 200)\n self.assertEqual(self.sub3.max_score, 0)\n\n\nclass SubCategoryViewTest(TestCase, TestHelperMixin):\n def setUp(self):\n self.sample_desc = ChallengeDescription(content='What Up', input_format='Something',\n output_format='something', constraints='some',\n sample_input='input sample', sample_output='output sample',\n explanation='gotta push it to the limit')\n self.sample_desc.save()\n Proficiency.objects.create(name='starter', needed_percentage=0)\n self.c1 = MainCategory(name='Test')\n self.c1.save()\n self.sub1 = SubCategory(name='Unit Tests', meta_category=self.c1)\n self.sub1.save()\n self.create_user_and_auth_token()\n\n self.c = Challenge.objects.create(name='TestThis', difficulty=5, score=10, description=self.sample_desc, test_case_count=5, category=self.sub1)\n\n def test_view_subcategory_detail_should_show(self):\n self.c.user_max_score = 0\n req_mock = MagicMock(user=self.auth_user)\n expected_data = SubCategorySerializer(instance=self.sub1, context={'request': req_mock}).data\n\n response = self.client.get('/challenges/subcategories/{}'.format(self.sub1.name),\n HTTP_AUTHORIZATION=self.auth_token)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, expected_data)\n\n def test_view_unauthorized_should_401(self):\n response = self.client.get('/challenges/subcategories/{}'.format(self.sub1.name))\n self.assertEqual(response.status_code, 401)\n\n def test_view_invalid_subcategory_should_404(self):\n response = self.client.get('/challenges/subcategories/{}'.format('\" OR 1=1;'),\n HTTP_AUTHORIZATION=self.auth_token)\n self.assertEqual(response.status_code, 404)\n\n\nclass CategorySubcategoriesListViewTests(TestCase, TestHelperMixin):\n def setUp(self):\n self.sample_desc = ChallengeDescription(content='What Up', input_format='Something',\n output_format='something', constraints='some',\n sample_input='input sample', sample_output='output sample',\n explanation='gotta push it to the limit')\n self.sample_desc.save()\n Proficiency.objects.create(name='starter', needed_percentage=0)\n Proficiency.objects.create(name='starter 2', needed_percentage=50)\n self.c1 = MainCategory.objects.create(name='Test bate')\n self.c2 = MainCategory.objects.create(name='TANK')\n self.sub1 = SubCategory.objects.create(name='Unit Tests', meta_category=self.c1)\n self.sub2 = SubCategory.objects.create(name='Unit Tests II', meta_category=self.c1)\n self.sub4 = SubCategory.objects.create(name='TANK 2', meta_category=self.c2)\n self.create_user_and_auth_token()\n self.c = Challenge.objects.create(name='TestThis', difficulty=5, score=10, description=self.sample_desc, test_case_count=5, category=self.sub1)\n\n def test_returns_serialized_data(self):\n req_mock = MagicMock(user=self.auth_user)\n response = self.client.get(f'/challenges/categories/{self.c1.id}/subcategories',\n HTTP_AUTHORIZATION=self.auth_token)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, LimitedSubCategorySerializer(many=True,\n instance=[self.sub1, self.sub2],\n context={'request': req_mock}).data)\n\n def test_works_with_name(self):\n req_mock = MagicMock(user=self.auth_user)\n response = self.client.get(f'/challenges/categories/{self.c1.name}/subcategories',\n HTTP_AUTHORIZATION=self.auth_token)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, LimitedSubCategorySerializer(many=True,\n instance=[self.sub1, self.sub2],\n context={'request': req_mock}).data)\n\n def test_invalid_category_id_returns_404(self):\n response = self.client.get(f'/challenges/categories/111/subcategories',\n HTTP_AUTHORIZATION=self.auth_token)\n self.assertEqual(response.status_code, 404)\n\n def test_returns_empty_data_if_no_subcategories(self):\n c3 = MainCategory.objects.create(name='TANK 3')\n response = self.client.get(f'/challenges/categories/{c3.id}/subcategories',\n HTTP_AUTHORIZATION=self.auth_token)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, [])\n\n def test_requires_authentication(self):\n response = self.client.get(f'/challenges/categories/{self.c1.id}/subcategories')\n self.assertEqual(response.status_code, 401)\n","sub_path":"deadline_/challenges/tests/test_categories.py","file_name":"test_categories.py","file_ext":"py","file_size_in_byte":10941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"533386090","text":"class Image(object):\n \"\"\" Class representing a RECONSTRUCT Image.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\" Apply given keyword arguments as instance attributes.\n \"\"\"\n self.src = kwargs.get(\"src\")\n self.mag = kwargs.get(\"mag\")\n self.contrast = kwargs.get(\"contrast\")\n self.brightness = kwargs.get(\"brightness\")\n self.red = kwargs.get(\"red\")\n self.green = kwargs.get(\"green\")\n self.blue = kwargs.get(\"blue\")\n self.transform = kwargs.get(\"transform\")\n\n # RECONSTRUCT has a Contour for Images\n self.name = kwargs.get(\"name\")\n self.hidden = kwargs.get(\"hidden\")\n self.closed = kwargs.get(\"closed\")\n self.simplified = kwargs.get(\"simplified\")\n self.border = kwargs.get(\"border\")\n self.fill = kwargs.get(\"fill\")\n self.mode = kwargs.get(\"mode\")\n self.points = list(kwargs.get(\"points\", []))\n\n # Metadata\n self._path = kwargs.get(\"_path\")\n\n def __eq__(self, other):\n \"\"\" Allow use of == operator.\n \"\"\"\n return (\n self.src == other.src and\n self.brightness == other.brightness and\n self.contrast == other.contrast and\n self.name == other.name and\n self.closed == other.closed and\n self.simplified == other.simplified and\n self.border == other.border and\n self.fill == other.fill and\n self.mode == other.mode and\n self.points == other.points\n )\n\n def __ne__(self, other):\n \"\"\" Allow use of != operator.\n \"\"\"\n return not self.__eq__(other)\n\n def attributes(self):\n \"\"\" Return relevent attributes as dict.\n \"\"\"\n return {\n \"src\": self.src,\n \"mag\": self.mag,\n \"contrast\": self.contrast,\n \"brightness\": self.brightness,\n \"path\": self._path or \"\" + self.src\n }\n","sub_path":"pyrecon/classes/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"65446407","text":"def writeFile(fname, title, author, tags, slides):\n order = []\n for tag in tags:\n if tag.lower() == \"solo\":\n order.append('s')\n else:\n tag_parts = tag.split(' ')\n order.append(tag_parts[0][0] + tag_parts[1])\n order = list(set(order)) #remove duplicates\n \n with open(fname, 'w') as f:\n f.write(\"\\n\")\n f.write(\"\" + title + \"\\n\")\n f.write(\"\" + author + \"\\n\")\n f.write(\"\\n\")\n f.write(\"\" + \", \".join(order) + \"\\n\")\n f.write(\"\\n\")\n for i in range(len(tags)):\n f.write(\"<\" + tags[i] + \"/>\\n\")\n f.write(str(slides[i]) + \"\\n\\n\")\n \n \n\n","sub_path":"LifeVerse.py","file_name":"LifeVerse.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"273256446","text":"\n__all__ = [ 'Base_RM_Device' ]\n\nfrom collections import OrderedDict\n\nfrom .. interface import IRegMapDevice\nfrom .. common import Offline_AccessManager, \\\n RegisterMapInterface, \\\n RegMapAccessError, \\\n RegMapNameError, \\\n RegMapAddressError, \\\n RegMapValueError\nfrom register import Base_RM_Register\n\nclass Base_RM_Device(IRegMapDevice):\n\n def __init__(self, rmio, label, name, svd_info):\n self.__dict__['zz_frozen'] = False\n self.zz_name = name\n self.zz_label = label\n self.zz_svd_info = svd_info\n if rmio is None:\n self.offline = True\n accessMgr = Offline_AccessManager(self.zz_label)\n self.zz_rmio = RegisterMapInterface(accessMgr.ReadRegister,\n accessMgr.WriteRegister,\n simulated=True)\n else:\n self.offline = False\n self.zz_rmio = rmio\n self.zz_pdict = OrderedDict()\n self.zz_reg_addr_to_name = {}\n\n def __setattr__(self, name, value):\n if self.__dict__['zz_frozen']:\n if name not in self.zz_pdict:\n raise AttributeError(\"ERROR: Invalid peripheral '{}'\\n\".format(name))\n else:\n raise AttributeError(\"ERROR: Unable to set '{}' to '{}'\\n\".format(name, value))\n else:\n self.__dict__[name] = value\n\n def __repr__(self):\n out = \"{} ({} peripherals)\\n\".format(self.zz_name, len(self.zz_pdict))\n for key in sorted(self.zz_pdict.iterkeys()):\n out += \" {}\\n\".format(key)\n return out\n\n @property\n def svdInfo(self):\n return self.zz_svd_info\n\n def addressToName(self, address):\n if not isinstance(address, (int, long)):\n raise RegMapAddressError(\"Invalid address '{}'\".format(address))\n try:\n return self.zz_reg_addr_to_name[address]\n except KeyError:\n raise RegMapAddressError(\"No register found for address {:#010x}\".format(address))\n\n def nameToAddress(self, name):\n reg_or_field = self.getObjectByName(name)\n return reg_or_field.baseAddress + reg_or_field.addressOffset\n\n def writeByName(self, name, value):\n if value is None:\n raise RegMapValueError(\"None value for '{}'\".format(name))\n self.getObjectByName(name).io = value\n\n def readByName(self, name):\n return self.getObjectByName(name).io\n\n def getObjectByName(self, name):\n if len(name.split('.')) == 2:\n per_name, reg_name = name.split('.')\n try:\n return self.zz_pdict[per_name].zz_rdict[reg_name]\n except KeyError:\n raise RegMapNameError(\"Invalid register name '{}'\".format(name))\n elif len(name.split('.')) == 3:\n per_name, reg_name, field_name = name.split('.')\n try:\n return self.zz_pdict[per_name].zz_rdict[reg_name].zz_fdict[field_name]\n except KeyError:\n raise RegMapNameError(\"Invalid register field name '{}'\".format(name))\n else:\n raise RegMapNameError(\"Invalid name '{}', must be PER.REG or PER.REG.FIELD\".format(name))\n\n def isReadable(self, name):\n obj = self.getObjectByName(name)\n return obj.isReadable()\n\n def isWriteable(self, name):\n obj = self.getObjectByName(name)\n return obj.isWriteable()\n\n def getRegisterNameFromFieldName(self, name):\n periods = len(name.split('.'))\n if periods == 2 or periods == 3:\n return '.'.join(name.split('.')[:2])\n else:\n raise RegMapNameError(\"Invalid name '{}', must be PER.REG or PER.REG.FIELD\".format(name))\n\n def forceRegister(self, name, value):\n if not self.offline:\n raise RegMapAccessError(\"Cannot directly assign within a live connection\")\n reg = self.getObjectByName(name)\n if not isinstance(reg, Base_RM_Register):\n raise RegMapNameError(\"Name must be register name\")\n self.zz_rmio.forceRegister(reg, value)\n\n def clearAccessedFlags(self):\n for key in sorted(self.zz_pdict.iterkeys()):\n self.zz_pdict[key].clearAccessedFlags()\n\n def setAccessedFlags(self):\n for key in sorted(self.zz_pdict.iterkeys()):\n self.zz_pdict[key].setAccessedFlags()\n\n def getAccessedRegisterNames(self):\n nameList = []\n for key in sorted(self.zz_pdict.iterkeys()):\n nameList.extend(self.zz_pdict[key].getAccessedRegisterNames())\n return nameList\n\n def getAccessedFieldNames(self):\n nameList = []\n for key in sorted(self.zz_pdict.iterkeys()):\n nameList.extend(self.zz_pdict[key].getAccessedFieldNames())\n return nameList\n\n def writeData(self, dataDict):\n for key in dataDict:\n self.writeByName(key, dataDict[key])\n\n def readData(self, dataDict):\n for key in dataDict:\n dataDict[key] = self.readByName(key)\n\n def verifyData(self, expectedDict):\n diffDict = {}\n for key in expectedDict:\n actualValue= self.readByName(key)\n if actualValue != expectedDict[key]:\n diffDict[key] = actualValue\n return diffDict\n\n def readAccessedRegisters(self):\n valueDict = {}\n for key in self.getAccessedRegisterNames():\n valueDict[key] = self.readByName(key)\n return valueDict\n\n def readAccessedFields(self):\n valueDict = {}\n for key in self.getAccessedFieldNames():\n valueDict[key] = self.readByName(key)\n return valueDict\n\n def buildRegFilterList(self, filename, listname='regFilterList'):\n filterList = []\n with open(filename, 'w') as outFH:\n outFH.write(\"\\n# Register Map Register Name Filter List\\n\")\n outFH.write(\"\\n{} = [\\n\".format(listname))\n for key in sorted(self.zz_pdict.iterkeys()):\n self.zz_pdict[key].buildRegFilterList(outFH, filterList)\n outFH.write(\"]\\n\\n\")\n return filterList\n\n def dump(self, filename, regFilterList=None):\n valueDict = {}\n with open(filename, 'w') as outFH:\n outFH.write(\"\\nREGISTER_DUMP = {\\n\")\n if regFilterList:\n for name in sorted(regFilterList):\n obj = self.getObjectByName(name)\n if isinstance(obj, Base_RM_Register):\n obj.dump(outFH, valueDict)\n else:\n outFH.write(\" # Skipping invalid reg name '{}'\\n\".format(name))\n else:\n for key in sorted(self.zz_pdict.iterkeys()):\n self.zz_pdict[key].dump(outFH, valueDict)\n outFH.write(\"}\\n\\n\")\n return valueDict\n","sub_path":".closet/jython.configurator.efr32/1.0.0.201606231656-435/host_py_rm_studio_internal/host_py_rm_studio_internal_efr32xg2xfull/revA0/static/base/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"613037686","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# hourglass function\n#a b c\n# d\n#e f g\n\n\n\n# Complete the hourglassSum function below.\ndef hourglassSum(arr):\n max_sum = -9 * 7 # total 7 elements in hourglass function and minimum value each element can have is -9. \n # So to compare the minimum sum is -63.\n for i in range(len(arr)-2):\n for j in range(len(arr)-2):\n a = arr[i][j]\n b = arr[i][j+1]\n c = arr[i][j+2]\n d = arr[i+1][j+1]\n e = arr[i+2][j]\n f = arr[i+2][j+1]\n g = arr[i+2][j+2]\n hgs = a+b+c+d+e+f+g\n max_sum = max(max_sum,hgs)\n \n return max_sum\n \n\n\n \nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n arr = []\n\n for i in range(6):\n arr.append(list(map(int, input().rstrip().split())))\n\n result = hourglassSum(arr)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"2D_array_hourglass_function.py","file_name":"2D_array_hourglass_function.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"380867643","text":"import sqlite3\n\n###creating a connection to a database/creating new database if not exists\n\nconn = sqlite3.connect(\"new.db\")\n\n###setting a coursor object to execute queries\n\ncur = conn.cursor()\n\n\n###executing sql query through coursor\ncur.execute(\"INSERT INTO population VALUES('New York City','NY',8200000)\")\ncur.execute(\"INSERT INTO population VALUES('San Francisco','CA',800000)\")\n\n###commit changes to a database\n\nconn.commit()\n\n###always close a connection to a database\n\nconn.close()\n\t\t","sub_path":"sqlb.py","file_name":"sqlb.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"362100117","text":"from healthid.apps.orders.models.suppliers import Suppliers\nfrom healthid.apps.products.models import (DispensingSize, Product,\n ProductCategory)\n\n\ndef map_quickbooks_data_helper(row, business, user, default_quickbox_supplier):\n product_name = row.get('Item Name')\n brief_description = row.get('Brief Description') or ''\n item_description = row.get('Item Description') or ''\n manufacturer = row.get('Manufacturer') or 'n/a'\n backup_supp = row.get('Vendor Name 2')\n price = row.get('Regular Price')\n base_unit_of_measure = row.get(\n 'Base Unit of Measure')\n attributes = row.get('Attributes')\n preferred_supp = row.get('Vendor Name')\n department_name = row.get('Department Name')\n dispensing_size = base_unit_of_measure or attributes\n description = (brief_description+' '+item_description) if (\n brief_description+item_description) else 'n/a'\n qty_1 = row.get('Qty 1')\n\n product_meta_args = {\n 'Alternate Lookup': row.get('Alternate Lookup'),\n 'Size': row.get('Size'),\n 'Average Unit Cost': row.get('Average Unit Cost'),\n 'MSRP': row.get('MSRP'),\n 'Custom Price 1': row.get('Custom Price 1'),\n 'Custom Price 2': row.get('Custom Price 2'),\n 'Custom Price 3': row.get('Custom Price 3'),\n 'Custom Price 4': row.get('Custom Price 4'),\n 'UPC ': row.get('UPC'),\n 'Order By Unit': row.get('Order By Unit'),\n 'Sell By Unit': row.get('Sell By Unit'),\n 'Item Type': row.get('Item Type'),\n 'Income Account': row.get('Income Account'),\n 'COGS Account': row.get('COGS Account'),\n 'Asset Account': row.get('Asset Account'),\n 'Print Tags': row.get('Print Tags'),\n 'Unorderable': row.get('Unorderable'),\n 'Serial Tracking': row.get('Serial Tracking'),\n 'Department Code': row.get('Department Code'),\n 'Vendor Code': row.get('Vendor Code'),\n 'Qty 2': row.get('Qty 2'),\n 'On Order Qty': row.get('On Order Qty'),\n 'unit_cost': row.get('Order Cost')\n }\n preferred_supplier = Suppliers.objects.filter(\n supplier_id=preferred_supp).first()\n backup_supplier = Suppliers.objects.filter(\n supplier_id=backup_supp).first()\n check_product_duplicates = Product.objects.filter(\n product_name=product_name,\n business_id=business.id\n )\n if not check_product_duplicates and product_name:\n get_product_category, create_product_category =\\\n ProductCategory.objects.get_or_create(\n name=department_name, business_id=business.id)\n product_category = get_product_category or create_product_category\n if dispensing_size:\n get_dispensing_size_id, create_dispensing_size_id =\\\n DispensingSize.objects.get_or_create(\n name=dispensing_size)\n dispensing_size = get_dispensing_size_id or create_dispensing_size_id\n product_instance = Product(\n product_name=product_name,\n description=description,\n brand='n/a',\n manufacturer=manufacturer,\n backup_supplier_id=backup_supplier.id if backup_supplier else None,\n dispensing_size_id=dispensing_size.id if dispensing_size else None,\n preferred_supplier_id=preferred_supplier.id if preferred_supplier else default_quickbox_supplier.id,\n product_category_id=product_category.id if product_category else None,\n loyalty_weight=2,\n sales_price=price,\n vat_status=True,\n is_approved=True,\n business_id=business.id,\n is_active=True,\n user_id=user.id if user else None\n )\n\n return {'product_instance': product_instance, 'product_meta_args': product_meta_args}\n\n return None\n","sub_path":"healthid/utils/product_utils/csv_upload_helper.py","file_name":"csv_upload_helper.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"1188081","text":"import re\r\nimport sys\r\nimport MySQLdb\r\nfrom datetime import date\r\ndb= MySQLdb.connect(host=\"localhost\", # host name\r\n user=\"root\", # username\r\n passwd=\"hadoop123\", # password\r\n db=\"NCDCTemperatureDb\") # name of the data base\r\n\r\ncursor=db.cursor()\r\nf = open(\"1986.txt\",\"r\") #\r\n\r\nsql =\"CREATE TABLE NCDC_1986 (usWStnID INT, wbanWStnID INT, obsDate DATE,\" \r\nsql+=\"obsHour VARCHAR(10), lat VARCHAR(10), longi VARCHAR(10), elevation VARCHAR(10),\" \r\nsql+=\"wDirection VARCHAR(10), WDir_qCode VARCHAR(1),SkyCeliengHeight INT, sky_qcode varchar(1),\"\r\nsql+=\"vDist VARCHAR(6), vDist_qCode VARCHAR(1),airTemp INT, aTemp_qCode VARCHAR(1), dewPt VARCHAR(10),\"\r\nsql+=\"dewPt_qCode VARCHAR(1), atmPres INT, atmP_qCode VARCHAR(1))\"\r\nrows=cursor.execute(sql)\r\ndb.commit()\r\nMISSING = \"9999\"\r\nfor line in f:\r\n tempid= line[0:4]\r\n usWStnID = line[4:10]\r\n wbanWStnID= line[10:15]\r\n obsDate=line[15:23]\r\n obsHour=line[23:27]\r\n lat=line[28:34]\r\n longi=line[34:41]\r\n elevation=line[46:51]\r\n wDirection=line[60:63]\r\n WDir_qCode=line[63]\r\n SkyCeliengHeight= line[70:75]\r\n sky_qcode= line[76]\r\n vDist=line[78:84]\r\n vDist_qCode=line[84]\r\n if(line[87] == '+'):\r\n airTemp=line[88:92]\r\n else:\r\n airTemp=line[87:92]\r\n aTemp_qCode=line[92]\r\n dewPt=line[93:98]\r\n dewPt_qCode=line[99]\r\n atmPres=line[99:104]\r\n atmP_qCode=line[104]\r\n mydate = date(int(obsDate[0:4]),int(obsDate[4:6]),int(obsDate[6:]))\r\n\r\n sql= \"insert into NCDC_1986 VALUES('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')\" %(usWStnID,wbanWStnID,\\\r\n mydate,obsHour,lat,longi,elevation,wDirection,WDir_qCode,SkyCeliengHeight,sky_qcode,vDist,vDist_qCode,airTemp,aTemp_qCode,dewPt,\\\r\n dewPt_qCode,atmPres,atmP_qCode)\r\n #print(sql)\r\n if (airTemp != MISSING and re.match(\"[01459]\",aTemp_qCode)):\r\n rows=cursor.execute(sql)\r\n db.commit()\r\nf.close()\r\ndb.close()\r\n","sub_path":"dataInsert1986.py","file_name":"dataInsert1986.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222911017","text":"from django.db import models\nimport uuid\n\n\nPRODUCT_CATEGORY_CHOICES = (\n ('bjj', 'BJJ'),\n ('mma', 'MMA'),\n ('muay thai', 'MUAY THAI'),\n)\n\nPRODUCT_TYPE_CHOICES = (\n ('shorts', 'SHORTS'),\n ('gloves', 'GLOVES'),\n ('shin guards', 'SHIN GUARDS'),\n ('mouth guards', 'MOUTH GUARDS'),\n ('handwraps', 'HANDWRAPS'),\n ('gi', 'GI'),\n ('belts', 'BELTS'),\n ('rash guards', 'RASH GUARDS'),\n ('spats', 'SPATS'),\n)\n\nGENDER_CHOICES = (\n ('men', 'MEN'),\n ('women', 'WOMEN'),\n ('unisex', 'UNISEX'),\n)\n\nALLOWED_SIZES = (\n ('xs', 'XS'),\n ('s', 'S'),\n ('m', 'M'),\n ('l', 'L'),\n ('xl', 'XL'),\n ('false', 'FALSE'),\n)\n\n\nclass Product(models.Model):\n product_id = models.UUIDField(\n primary_key=True, default=uuid.uuid4, editable=False)\n name = models.CharField(max_length=254, blank=False, null=False)\n description = models.TextField(null=False, blank=False)\n gender = models.CharField(\n max_length=20, choices=GENDER_CHOICES, blank=False, null=False)\n sale = models.BooleanField(null=False, blank=False, default=False)\n rrp = models.DecimalField(\n verbose_name='Recommended Retail Price',\n max_digits=6, decimal_places=2, blank=False, null=False)\n price = models.DecimalField(\n max_digits=6, decimal_places=2, blank=False, null=False)\n category = models.CharField(\n max_length=20, choices=PRODUCT_CATEGORY_CHOICES,\n blank=False, null=False)\n product_type = models.CharField(\n max_length=20, choices=PRODUCT_TYPE_CHOICES, blank=False, null=False)\n image = models.ImageField(upload_to='product_images/',\n blank=False, null=False,\n default='misc/image-not-found.jpg')\n\n def __str__(self):\n return self.name\n\n\nclass ProductSizesStock(models.Model):\n product_sizes_stock_id = models.AutoField(primary_key=True)\n product_id = models.ForeignKey(\n 'Product', on_delete=models.CASCADE, null=True)\n size = models.CharField(\n max_length=10, choices=ALLOWED_SIZES, null=True)\n stock = models.PositiveIntegerField()\n\n def __str__(self):\n my_tuple = (str(self.product_id), str(self.size))\n return ' | '.join(my_tuple)\n\n class Meta:\n verbose_name_plural = 'Product Sizes and Stock'\n","sub_path":"products/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"267515605","text":"# helper fn to partition L, pivot, R\ndef partitioner(arr):\n left = []\n right = []\n pivot = arr[0]\n\n # iterate over the arr\n ## if less than \n for num in arr[1:]:\n if num <= pivot:\n left.append(num)\n if num > pivot:\n right.append(num)\n return left, right, pivot\n\n\ndef quick_sort(arr):\n if len(arr) == 0:\n return arr\n\n # partition here into left, pivot, right\n #divide\n left, right, pivot = partitioner(arr)\n\n # and conquer!\n return quick_sort(left) + pivot+ quick_sort(right)\n","sub_path":"src/quick_sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"537989916","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 6 14:26:56 2018\n\n@author: Ben Tomhave\n\"\"\"\n\nimport pandas\ncsvPath=r\"C:\\RCP_Data\\Output\\correctionsOutputData.csv\" #From output of query \ndf=pandas.read_csv(csvPath)\nimport collections\nfrom itertools import chain\n\n########\n# F1 #\n######## \ndef getExpandedClientDict(csvFilePath,keyIndexColumn,startRouteIndexColumns,endRouteIndexColumns): \n '''\n Creates a list of dictionaries comprised of one key (client) and the associated values (routes)\n but where client dictionaries are not unique \n IN: csvFilePath (String), column index number for the dictionary keys (int),\n column index numbers for the dictionary values for start and end of range (int)\n OUT: List of Dictionaries\n '''\n import csv\n dictPerStop=[]\n with open(csvFilePath) as f:\n reader = list(csv.reader(f))\n for row in reader[1:]:\n temp_dict = {}\n temp_dict[row[keyIndexColumn]] =row[startRouteIndexColumns:endRouteIndexColumns]\n dictPerStop.append(temp_dict)\n return(dictPerStop)\n \n########\n# F2 #\n######## \ndef getClientDict(dictList):\n '''\n Returns dictionary of all routes within threshold distance (0.25miles) of a client\n IN: List of Dictionaries with the key of each dictionary as the client ID\n Can be multiple dictionaries for the same client (same dict key)\n OUT: List of dictionaries with only one dict per client\n '''\n from collections import defaultdict\n finalDict=defaultdict(list)\n for d in dictList:\n for key, value in d.items():\n for i,val in enumerate(value):\n if val!='NA' and val not in finalDict[key]:\n finalDict[key].append(val)\n return(finalDict)\n \n#------------------------------------------------------------------------------------ \n#------------------------------------------------------------------------------------ \n \n#Create cxpanded list of all dictionaries (multiple per client because one per stop) \ndictPerStop=getExpandedClientDict(csvPath,2,10,30) \n \n#Create dictionary with client as key and routes within 0.25miles as values\nroutesPerClientDict=getClientDict(dictPerStop)\n\n#Check for specific clients' route access\nclientId='138299'\nprint(routesPerClientDict[clientId])\n\n#Create counter that prints a tuple with (route#, occurence of route w/in distance [0.25miles], of clients)\ncounter_dict = collections.Counter(chain(*routesPerClientDict.values())).most_common()\nprint(counter_dict)\n\n\n#PERHAPS IN FUTURE: Color code route shapes based off of this count","sub_path":"routesPerPerson.py","file_name":"routesPerPerson.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"338397189","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 3 00:32:47 2019\n\n@author: Brandon\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nl=1\nn=2\nx=-l\nX=np.arange(-l,l,.001)\ndx=.001\npsi=np.sqrt(2/l)*np.sin(n*np.pi*x/l)\ndpsi=np.sqrt(2)*np.pi*n*np.cos(n*np.pi*x/l)/(l**1.5)\nd2psi=np.sqrt(2)*np.pi**2*n**2*np.sin(n*np.pi*x/l)/(l**2.5)\nPSI=[]\nwhile x<=(l):\n d2psi=np.sqrt(2)*np.pi**2*n**2*np.sin(n*np.pi*x/l)/(l**2.5)\n dpsi=dpsi+d2psi*dx\n psi=psi+dpsi*dx\n PSI.append(psi)\n x=x+dx\nplt.plot(X,PSI)\nplt.show() \n\npsii=np.sqrt(2/l)*np.sin(n*np.pi*X/l)\n\nplt.plot(X,psii)\nplt.show()\n\ne0=8.8541878128e-12\necharge=1.60217662e-19\nhbar=1.0545718e-34\nme=9.10938356e-31\na0=5.29177210903e-11\nc=299792458\nh=6.6260693e-34\n\ndef v(n):\n vn=hbar/(me*a0*n)\n return vn\n\nx0=a0*np.cos(45)\ny0=a0*np.sin(30)\nz0=0\nvx0=-1e-10\nvy0=1e-10\nvz0=0\n\nm=1e-5\nt=0\ndt=.0001\nx=x0\ny=y0\nz=z0\nvx=vx0\nvy=vy0\nvz=vz0\nX=[x]\nY=[y]\nZ=[z]\nT=[t]\nVx=[vx]\nVy=[vy]\nVz=[vz]\n\n\nwhile t<=10:\n Fx=-echarge**2*x/((x**2+y**2+z**2)**1.5)\n Fy=-echarge**2*y/((x**2+y**2+z**2)**1.5)\n Fz=-echarge**2*z/((x**2+y**2+z**2)**1.5)\n vx=vx+Fx/m*dt\n vy=vy+Fy/m*dt\n vz=vz+Fz/m*dt\n x=x+vx*dt\n y=y+vy*dt\n z=z+vz*dt\n X.append(x)\n Y.append(y)\n Z.append(z)\n T.append(t)\n t=t+dt\n \nplt.plot(X,Y)\nplt.show()\n\n\n\n\n\n","sub_path":"New folder/practice5.py","file_name":"practice5.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"267699859","text":"# Variavel Global\nvar = 123\n\ndef funcao():\n # Variavel Local\n var_local = 456\n # criei uma variavel local com mesmo nome de uma global\n var = 789\n print(\"Var Local com mesmo nome da global\", var)\n print(\"Var Local\", var_local)\n print(\"Var Global dentro da funcao\", globals()['var'])\n\nif __name__ == \"__main__\":\n print(\"Var Global fora da funcao\", var)\n funcao()","sub_path":"functions/variavel global.py","file_name":"variavel global.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"88046864","text":"from django.conf.urls import url, include\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n url(r'^singin$', views.singin, name=\"singin\"),\n url(r'^singup$', views.singup, name=\"singup\"),\n url(r'^singinteacher$', views.singinteacher, name=\"singinteacher\"),\n url(r'^singupteacher$', views.singupteacher, name=\"singupteacher\"),\n]\n","sub_path":"sing/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"294573863","text":"# Copyright 2015 Huawei Technologies Co., Ltd.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# Copyright (c) 2017 Wind River Systems, Inc.\n#\n\nimport collections\n\nfrom keystoneauth1 import loading\nfrom keystoneauth1 import session\n\nfrom keystoneclient.v3 import client as keystone_client\n\nfrom oslo_config import cfg\n\n\nclass EndpointCache(object):\n def __init__(self):\n self.endpoint_map = collections.defaultdict(dict)\n self.admin_session = None\n self.keystone_client = None\n self._update_endpoints()\n\n @staticmethod\n def _get_endpoint_from_keystone(self):\n proj_domain_name = cfg.CONF.keystone_authtoken.project_domain_name\n loader = loading.get_plugin_loader(\n cfg.CONF.keystone_authtoken.auth_type)\n auth = loader.load_from_options(\n auth_url=cfg.CONF.keystone_authtoken.auth_uri,\n username=cfg.CONF.keystone_authtoken.username,\n user_domain_name=cfg.CONF.keystone_authtoken.user_domain_name,\n password=cfg.CONF.keystone_authtoken.password,\n project_name=cfg.CONF.keystone_authtoken.project_name,\n project_domain_name=proj_domain_name\n )\n self.admin_session = session.Session(auth=auth)\n cli = keystone_client.Client(session=self.admin_session)\n self.keystone_client = cli\n\n service_id_name_map = {}\n for service in cli.services.list():\n service_dict = service.to_dict()\n service_id_name_map[service_dict['id']] = service_dict['name']\n\n region_service_endpoint_map = {}\n for endpoint in cli.endpoints.list():\n endpoint_dict = endpoint.to_dict()\n if endpoint_dict['interface'] != 'internal':\n continue\n region_id = endpoint_dict['region']\n service_id = endpoint_dict['service_id']\n url = endpoint_dict['url']\n service_name = service_id_name_map[service_id]\n if region_id not in region_service_endpoint_map:\n region_service_endpoint_map[region_id] = {}\n region_service_endpoint_map[region_id][service_name] = url\n return region_service_endpoint_map\n\n def _get_endpoint(self, region, service, retry):\n if service not in self.endpoint_map[region]:\n if retry:\n self.update_endpoints()\n return self._get_endpoint(region, service, False)\n else:\n return ''\n else:\n return self.endpoint_map[region][service]\n\n def _update_endpoints(self):\n endpoint_map = EndpointCache._get_endpoint_from_keystone(self)\n\n for region in endpoint_map:\n for service in endpoint_map[region]:\n self.endpoint_map[region][\n service] = endpoint_map[region][service]\n\n def get_endpoint(self, region, service):\n \"\"\"Get service endpoint url.\n\n :param region: region the service belongs to\n :param service: service type\n :return: url of the service\n \"\"\"\n return self._get_endpoint(region, service, True)\n\n def update_endpoints(self):\n \"\"\"Update endpoint cache from Keystone.\n\n :return: None\n \"\"\"\n self._update_endpoints()\n\n def get_all_regions(self):\n \"\"\"Get region list.\n\n return: List of regions\n \"\"\"\n return self.endpoint_map.keys()\n\n def get_session_from_token(self, token, project_id):\n \"\"\"Get session based on token to communicate with openstack services.\n\n :param token: token with which the request is triggered.\n :param project_id: UUID of the project.\n\n :return: session object.\n \"\"\"\n loader = loading.get_plugin_loader('token')\n auth = loader.load_from_options(auth_url=cfg.CONF.cache.auth_uri,\n token=token, project_id=project_id)\n sess = session.Session(auth=auth)\n return sess\n","sub_path":"dcmanager/common/endpoint_cache.py","file_name":"endpoint_cache.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"116648779","text":"# -*- coding: utf-8 -*-\n# python3\n# Copyright (c) 2017 by Dr. Justin Klotz\n\nimport os\nimport pickle\nimport numpy as np\n\n# serial\nport_serial = '/dev/ttyAMA0'\nbaud_serial = 9600\nserial_timeout = .1\nmax_serial_bytes = 100\n\n# UDP sockets for commands, settings, updates\nudp_ip_self = '192.168.1.1'\nudp_ip_user = None\nudp_ip_monitor = '192.168.1.13'\nudp_port_command = 40000\nudp_port_settings = 40001\nudp_port_update = 50000\nudp_port_ranging = 50001 # for debug monitoring\nudp_timeout_duration = 0.001 # in seconds, max time to wait for UDP\nmax_udp_bytes = 1024 # maximum number of bytes to read from socket\n\n# range-finding anchors (x, y, z, sig_d are in cm)\nanchors = [{'id': 2, 'x': 12.75*.5*2.54, 'y': 0.0, 'z': 11.5*2.54, 'sig_r': 15},\n {'id': 3, 'x': -12.75*.5*2.54, 'y': 0.0, 'z': 11.5*2.54, 'sig_r': 15},\n {'id': 4, 'x': 0.0, 'y': -23.25*2.54, 'z': 11.5*2.54, 'sig_r': 50}]\n# load calibration coefficients from pickle file\ndir_pickle = os.path.dirname(os.path.realpath(__file__))\npath_pickle = os.path.join(dir_pickle, 'calibration.p')\ncalibration = pickle.load(open(path_pickle, 'rb'))\nids_anchors = [a['id'] for a in anchors]\nfor c in calibration:\n if c['id'] in ids_anchors:\n idx = ids_anchors.index(c['id'])\n anchors[idx]['coeffs'] = c['coeffs']\n\n# positioning\nz_tag = 36*2.54 # cm\ndt_old_ranging_data = 1 # s\nx_hat_0_pos = np.array([0, 200]).reshape((2, 1)) # cm\nupper_limit_range = 2000 # cm\nupper_limit_estimate = 1000 # cm\n# for ls\nunstable_range_ls = 100000 # cm\n# for ekf\nx_hat_0_pos_vel = np.array([0, 200, 0, 0]).reshape((4, 1)) # cm, cm, cm/s, cm/s\nP0 = np.diag([np.square(100), np.square(100), np.square(10), np.square(10)])\nsig_x_dd = 20 # cm/s^2\nsig_y_dd = 20 # cm/s^2\n\n# exponential smoothing filter for LS positioning\nalpha = .3 # .3\n\n# guidance\nr_lb_guidance = 100 # cm\nr_des_guidance = 300 # cm\nr_ub_guidance = 700 # cm\nmin_theta_diff_for_follow = np.pi/4\nspeed_coast = 50 # out of 255\nmax_speed = 100 # out of 255\nmax_turn_rate = 100 # out of 255\n\n# motors object & control\nmotors_kill_time_manual = .5 # in seconds, time since last socket read to kill motors\nmotors_kill_time_follow = 1\nturn_ramp_limit = 20\nspeed_ramp_limit = 20\n","sub_path":"Raspberry_Pi/coolerbot/settings_.py","file_name":"settings_.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"15176573","text":"#-*-coding:utf-8 -*-\n\"\"\"\n 121. Best Time to Buy and Sell Stock\n Directed by user zhongch4g\n current system date 2017/4/29\n\"\"\"\n\nimport sys\n\nclass Solution(object):\n def maxProfit(self, prices):\n\n pre = 0\n max_profit, min_profit = -sys.maxsize - 1, 0\n for i in range(1, len(prices)):\n pre += (prices[i] - prices[i-1])\n max_profit = max(max_profit, pre - min_profit)\n min_profit = min(min_profit, pre)\n if max_profit < 0:\n return 0\n return max_profit\n\n\ninstance = Solution()\nprint(instance.maxProfit([1, 6]))\n","sub_path":"LeetCode/121. Best Time to Buy and Sell Stock.py","file_name":"121. Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207750431","text":"class Solution(object):\n def anagramMappings(self, A, B):\n \"\"\"\n :type A: List[int]\n :type B: List[int]\n :rtype: List[int]\n \"\"\"\n dic={}\n for i,e in enumerate(B):\n if e in dic:\n dic[e].append(i)\n else:\n dic[e]=[i]\n ans=[]\n for e in A:\n ans.append(dic[e].pop())\n return ans","sub_path":"Python/760. Find Anagram Mappings.py","file_name":"760. Find Anagram Mappings.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"327714759","text":"import C45\r\nimport treePlotter\r\nfr = open(r'C:\\Users\\Lenovo\\AppData\\Local\\Programs\\Python\\Python38\\Data.txt')\r\nlDataSet = [inst.strip().split('\\t') for inst in fr.readlines()]\r\nlabels = ['Job', 'Age', 'Credit score', 'House']\r\n# 样本特征类型,0为离散,1为连续\r\nlabelProperties = [0, 1, 1, 0]\r\n# 是否放贷\r\nclassList = ['Yes', 'No']\r\n# 验证集,用于剪枝\r\ndataSet_test = [['full-time job', '35', '85', 'have house', 1, 'Yes'], ['no job', '72', '90', 'have house', 1, 'No']]\r\n# 构建决策树\r\ntrees = C45.createTree(lDataSet, labels, labelProperties)\r\ntreePlotter.createPlot(trees)\r\n# 利用验证集对决策树剪枝\r\nC45.postPruningTree(trees, classList, lDataSet, dataSet_test, labels, labelProperties)\r\n# 绘制剪枝后的决策树\r\ntreePlotter.createPlot(trees)\r\n# 重新赋值类别标签和类型\r\nlabels = ['Job', 'Age', 'Credit score', 'House']\r\nlabelProperties = [0, 1, 1, 0]\r\n# 测试样本\r\ntestVec = ['part-time job', 50, 88, 'have house']\r\nclassLabel = C45.classify(trees, classList, labels, labelProperties, testVec)\r\n# 打印测试样本的分类结果\r\nprint(classLabel)\r\n\r\n\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"332950065","text":"from pycocotools.coco import COCO\r\nimport requests\r\nimport pickle\r\nfrom PIL import Image\r\nimport os\r\n\r\n# instantiate COCO specifying the annotations json path\r\ncoco = COCO('instances_val2014.json')\r\n# Specify a list of category names of interest\r\ncatIds = coco.getCatIds(catNms=['person'])\r\n# Get the corresponding image ids and images using loadImgs\r\nimgIds = coco.getImgIds(catIds=catIds)\r\nimages = coco.loadImgs(imgIds)\r\n\r\n# Save the images into a local folder\r\nnum=0\r\nimg_arr = []\r\n\r\nfor im in images:\r\n num+=1\r\n print(num)\r\n img_data = requests.get(im['coco_url']).content\r\n\r\n img = Image.open('../val2014/' + im['file_name'])\r\n file_type = img.format\r\n img.save(\"person\"+str(num).zfill(6)+\".png\", \"PNG\")\r\n \r\n if num is 1000:\r\n break\r\n","sub_path":"val_person.py","file_name":"val_person.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"63803582","text":"# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# Multiply NOAA Atlas 14 data by deltas to get final precipitation estimates\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\nif __name__ == '__main__':\n import os, glob\n import re\n import xarray as xr\n import rasterio\n import numpy as np\n # import multiprocessing as mp\n import argparse\n\n # parse some args\n parser = argparse.ArgumentParser( description='Compute deltas for historical vs. projected data.' )\n parser.add_argument( \"-p\", \"--path\", action='store', dest='path', type=str, help=\"input directory storing the return interval data.\" )\n parser.add_argument( \"-a\", \"--atlas\", action='store', dest='atlas_path', type=str, help=\"input directory storing NOAA Atlas 14 data\")\n parser.add_argument( \"-o\", \"--out_path\", action='store', dest='out_path', type=str, help=\"output directory to write outputs\" )\n parser.add_argument( \"-d\", \"--data_group\", action='store', dest='data_group', type=str, help=\"name of the model to use: either 'NCAR-CCSM4' or 'GFDL-CM3'\" )\n \n # parse the args and unpack\n args = parser.parse_args()\n path = args.path\n atlas_path = args.atlas_path\n out_path = args.out_path\n data_group = args.data_group\n\n # names of the durations in the deltas files\n DURATIONS = ['60m','2h', '3h', '6h', '12h','24h', '2d', '3d','4d','7d','10d','20d','30d','45d','60d',]\n # Those same durations, but as they are named in the NOAA files \n DURATIONS_NOAA = ['01h','02h','03h','06h','12h','24h', '48h', '3d','4d','7d','10d','20d','30d','45d','60d',]\n # Intervals as they are named in NOAA files\n INTERVALS = ['2yr', '5yr', '10yr', '25yr', '50yr', '100yr', '200yr', '500yr', '1000yr']\n\n interval_regex = re.compile(r'^ak(\\d+yr)')\n # Get the index of the interval for a NOAA Atlas file.\n def interval_index(filename):\n base = os.path.basename(filename)\n interval = interval_regex.match(base).group(1)\n return INTERVALS.index(interval)\n\n TIMESLICES = [ ('2020','2049'), ('2050','2079'), ('2080','2099') ]\n\n for (d, d_noaa) in zip(DURATIONS, DURATIONS_NOAA):\n print(\" duration: {}\".format(d), flush=True)\n\n # Get NOAA Atlas files for this duration\n atlas_files = glob.glob(os.path.join(atlas_path,'ak*{}a_ams.tif'.format(d_noaa)))\n # Ana sort by interval\n atlas_files.sort(key=interval_index)\n\n for ts in [\"{}-{}\".format(x[0],x[1]) for x in TIMESLICES]:\n print(\" time period: {}\".format(ts), flush=True)\n\n # Find the (downscaled) deltas file. (There should only be one)\n deltas_file = glob.glob(os.path.join(path,'*_{}_*_{}_{}*_warp.nc'.format(data_group,d,ts)))[0]\n\n ds = xr.open_dataset(deltas_file)\n\n # Iterate through each return interval\n for i in range(len(ds.interval)):\n arr = ds['pf' ][i,...,...].values\n with rasterio.open(atlas_files[i]) as tmp:\n atlas_arr = tmp.read(1).astype(np.float32)\n\n # Multiply data\n multiplied = arr * atlas_arr\n below_threshold = multiplied < 0\n multiplied[below_threshold] = float('nan')\n ds['pf'][i,...,...] = multiplied\n\n # Save file\n out_fn = os.path.join(out_path,os.path.basename(deltas_file).replace('_warp.nc','_multiply.nc'))\n ds.to_netcdf(out_fn)\n\n ds.close()\n\n","sub_path":"pipeline/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"392332910","text":"import io\nimport os\nimport tempfile\n\nimport pdfrw\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom django.template import Context, Template\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView, ListView\nfrom weasyprint import HTML\n\nfrom documents.forms import DocumentForm, PhotosDocumentForm, SmallClaimsDocumentForm\nfrom documents.models import DocumentTemplate\nfrom lib.views import ProtectedView, get_next_page_from_request\n\nANNOT_KEY = \"/Annots\"\nANNOT_FIELD_KEY = \"/T\"\nANNOT_VAL_KEY = \"/V\"\nANNOT_RECT_KEY = \"/Rect\"\nSUBTYPE_KEY = \"/Subtype\"\nWIDGET_SUBTYPE_KEY = \"/Widget\"\n\n\nclass DocumentListView(ListView):\n model = DocumentTemplate\n context_object_name = \"document_list\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"model\"] = self.model\n return context\n\n def get_queryset(self):\n return DocumentTemplate.objects.all()\n\n\nclass DocumentFormView(FormView, ProtectedView):\n template_name = \"documents/document_form.html\"\n form_class = DocumentForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"form_name\"] = DocumentTemplate.objects.get(id=self.kwargs[\"id\"]).name\n context[\"next_page\"] = get_next_page_from_request(self.request, reverse_lazy(\"documents:document-list\"))\n return context\n\n def get_form_kwargs(self):\n form_kwargs = super().get_form_kwargs()\n form_kwargs[\"document_template\"] = DocumentTemplate.objects.get(id=self.kwargs[\"id\"])\n form_kwargs[\"user\"] = self.request.user\n return form_kwargs\n\n def form_valid(self, form):\n document_template = DocumentTemplate.objects.get(id=self.kwargs[\"id\"])\n\n body = Template(document_template.body).render(Context(form.cleaned_data))\n context = {**form.cleaned_data, **{\"body\": body, \"user\": self.request.user}}\n pdf_html = render_to_string(\"basic_letter.html\", context)\n\n pdf = io.BytesIO()\n HTML(string=pdf_html).write_pdf(pdf)\n\n response = HttpResponse(pdf.getvalue(), content_type=\"application/pdf\")\n response[\"Content-Disposition\"] = f\"attachment; filename={document_template.file_name}.pdf\"\n messages.add_message(self.request, messages.SUCCESS, _(\"File downloaded.\"))\n return response\n\n\nclass PhotosDocumentFormView(FormView, ProtectedView):\n template_name = \"documents/document_form.html\"\n form_class = PhotosDocumentForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"form_name\"] = _(\"Date-verified Photo Report\")\n context[\"next_page\"] = get_next_page_from_request(self.request, reverse_lazy(\"documents:document-list\"))\n return context\n\n def get_form_kwargs(self):\n form_kwargs = super().get_form_kwargs()\n form_kwargs[\"user\"] = self.request.user\n return form_kwargs\n\n def form_valid(self, form):\n context = {\n **form.cleaned_data,\n **{\"user\": self.request.user, \"site_name\": settings.SITE_NAME, \"site_url\": self.request.build_absolute_uri(\"/\")},\n }\n pdf_html = render_to_string(\"photo_report.html\", context)\n\n pdf = io.BytesIO()\n HTML(string=pdf_html).write_pdf(pdf)\n\n response = HttpResponse(pdf.getvalue(), content_type=\"application/pdf\")\n response[\"Content-Disposition\"] = \"attachment; filename=PhotoReport.pdf\"\n messages.add_message(self.request, messages.SUCCESS, _(\"File downloaded.\"))\n return response\n\n\nclass SmallClaimsDocumentFormView(FormView, ProtectedView):\n template_name = \"documents/document_form.html\"\n form_class = SmallClaimsDocumentForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"form_name\"] = _(\"Small Claims Court Form\")\n context[\"next_page\"] = get_next_page_from_request(self.request, reverse_lazy(\"documents:document-list\"))\n return context\n\n def get_form_kwargs(self):\n form_kwargs = super().get_form_kwargs()\n form_kwargs[\"user\"] = self.request.user\n return form_kwargs\n\n def form_valid(self, form):\n template_pdf = pdfrw.PdfReader(os.path.abspath(os.path.join(os.path.dirname(__file__), \"templates/AOC-175.pdf\")))\n\n plaintiff_name = (\n f\"{self.request.user.first_name} {self.request.user.first_name}\"\n if (self.request.user.first_name and self.request.user.first_name)\n else f\"{form.cleaned_data['sender_first_name']} {form.cleaned_data['sender_last_name']}\"\n )\n\n data_dict = {\n \"county\": form.cleaned_data[\"county\"],\n \"plaintiff_full_name\": plaintiff_name,\n \"plaintiff_full_name_2\": plaintiff_name,\n \"plaintiff_address_1\": form.cleaned_data[\"sender_address_1\"],\n \"plaintiff_address_2\": form.cleaned_data[\"sender_address_2\"],\n \"plaintiff_city_state_zip\": f\"{form.cleaned_data['sender_city']}, {form.cleaned_data['sender_state']} {form.cleaned_data['sender_zip_code']}\",\n \"defendant_full_name\": form.cleaned_data[\"unit\"].landlord_name,\n \"defendant_address_1\": form.cleaned_data[\"unit\"].landlord_address_1,\n \"defendant_address_2\": form.cleaned_data[\"unit\"].landlord_address_2,\n \"defendant_city_state_zip\": f\"{form.cleaned_data['unit'].landlord_city}, {form.cleaned_data['unit'].landlord_state} {form.cleaned_data['unit'].landlord_zip_code}\",\n \"claims_sum\": \"${0:.2f}\".format(form.cleaned_data[\"claims_sum\"]),\n \"court_costs\": \"${0:.2f}\".format(form.cleaned_data[\"court_costs\"]),\n \"claims\": form.cleaned_data[\"claims\"],\n }\n\n if form.cleaned_data[\"is_landlord_company\"]:\n data_dict[\"defendant_company\"] = \"X\"\n else:\n data_dict[\"defendant_individual\"] = \"X\"\n\n for page in template_pdf.pages:\n annotations = page[ANNOT_KEY]\n for annotation in annotations:\n if annotation[SUBTYPE_KEY] == WIDGET_SUBTYPE_KEY:\n if annotation[ANNOT_FIELD_KEY]:\n key = annotation[ANNOT_FIELD_KEY][1:-1]\n if key in data_dict.keys():\n annotation.update(pdfrw.PdfDict(V=\"{}\".format(data_dict[key])))\n\n template_pdf.Root.AcroForm.update(pdfrw.PdfDict(NeedAppearances=pdfrw.PdfObject(\"true\")))\n with tempfile.TemporaryFile() as fp:\n pdfrw.PdfWriter().write(fp, template_pdf)\n\n fp.seek(0)\n\n response = HttpResponse(fp.read(), content_type=\"application/pdf\")\n response[\"Content-Disposition\"] = \"attachment; filename=SmallClaims.pdf\"\n messages.add_message(self.request, messages.SUCCESS, _(\"File downloaded.\"))\n return response\n","sub_path":"renters_rights/documents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207759559","text":"\r\nListeD= [1,8,2,1,6,5,6,6,3,35,63,2,65,9,2,2]\r\nListeS= []\r\n\r\n\r\nfor i in range(len(ListeD)):\r\n if [i] in ListeD:\r\n ListeS.append[i]\r\n \r\nprint (ListeS)\r\n \r\n \r\n ","sub_path":"Doublon.py","file_name":"Doublon.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"258576894","text":"import urllib.request\nurl=\"http://tieba.baidu.com\"\nheaders={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36'}\nhandler=urllib.request.ProxyHandler(#通过ProxyHandler设置代理,模拟多个不同的客户端,欺骗网站,从而获得数据\n{\n 'http':'172.12.24.45:8080',\n 'https':'120.34.5.46:8080'\n})\nopener=urllib.request.build_opener(handler)\nurllib.request.install_opener(opener)\nrequest=urllib.request.Request(url=url,headers=headers)\nrrsponse=urllib.request.urlopen(request)\nprint(response.read().decode('utf-8'))","sub_path":"网络爬虫开发/设置代理模拟客户端欺骗网站.py","file_name":"设置代理模拟客户端欺骗网站.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"389790210","text":"participants = [[\"leo\", \"kiki\", \"eden\"],\n ['marina', 'josipa', 'nikola', 'vinko', 'filipa'],\n ['mislav', 'stanko', 'mislav', 'ana']]\ncompletions = [['eden', 'kiki'],\n ['josipa', 'filipa', 'marina', 'nikola'],\n ['stanko', 'ana', 'mislav']]\n\npart = participants[2]\ncomp = completions[2]\n\npart\ncomp\n\nfor p in range(len(part) - 1, -1, -1):\n '============='\n p, part[p]\n match = False\n r = []\n i = len(comp) - 1\n '_____________'\n while i > -1:\n i, comp[i]\n if part[p] == comp[i]:\n 'match'\n match = True\n else: \n 'unmatch'\n r.append(comp[i])\n i = i - 1\n if not match :\n 'return {}'.format(part[p])\n comp = r\n\n","sub_path":"2019-2020_programmers/_01_/src/python/an_unfulfilled_player_double_loop.py","file_name":"an_unfulfilled_player_double_loop.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106296832","text":"# coding: utf8 \nimport json\nfrom datetime import datetime, timedelta\nfrom model.seller_model import SellerModel\nfrom model.bee_define import *\nfrom model.bee_errors import *\nfrom app_request_handler import AppRequestHandler\n\n\nclass SellerRequestHandler(AppRequestHandler):\n \"\"\"处理来自商家的请求\"\"\"\n def __init__(self, *args, **kwargs):\n super(SellerRequestHandler, self).__init__(*args, **kwargs)\n self.model = SellerModel()\n\n def _do(self, option):\n if option == '_get_latest_app':\n self._get_latest_app()\n elif option == '_modify_user':\n self._modify_user()\n elif option == '_get_categories':\n self._get_categories()\n elif option == '_get_items':\n self._get_items()\n elif option == '_update_item':\n self._update_item()\n elif option == '_delete_item':\n self._delete_item()\n elif option == '_get_xin_orders':\n self._get_xin_orders()\n elif option == '_accept_order':\n self._accept_order()\n elif option == '_reject_order':\n self._reject_order()\n elif option == '_get_daifahuo_orders':\n self._get_daifahuo_orders()\n elif option == '_get_yifahuo_orders':\n self._get_yifahuo_orders()\n elif option == '_get_refund_orders':\n self._get_refund_orders()\n elif option == '_accept_refund_orders':\n self._accept_refund_orders()\n elif option == '_get_settlements_between':\n self._get_settlements_between()\n elif option == '_get_settlements':\n self._get_settlements()\n elif option == '_get_orders_on_day':\n self._get_orders_on_day()\n else:\n self.write_fail(ERR_INVALID_OPTION)\n \n def _get_latest_app(self):\n \"\"\"获取最新版本的APP\"\"\"\n local_version = self.get_param('local_version', essential=True)\n self.write_success({'latest_version':local_version, \n 'download_url':''})\n \n def _modify_user(self):\n \"\"\"修改商家信息\"\"\"\n user_data = self.get_param('user_data')\n _user = json.loads(user_data) if user_data else {}\n user = self.model.modify_user(_user)\n user = user.jsonlike()\n user['session'] = self.get_param('session', essential=True)\n self.write_success({'user': user})\n\n def _get_categories(self): \n \"\"\"获取商家签订的商品类目\"\"\"\n categories = self.model.get_categories()\n self.write_success({'categories': categories}) \n \n def _get_items(self): \n \"\"\"获取类目下的商品\"\"\"\n category_id = self.get_param('category_id', essential=True, rtype=int)\n items = self.model.get_items(category_id)\n self.write_success({'items': \n [x.jsonlike() for x in items]})\n\n def _update_item(self):\n \"\"\"更新商品\"\"\"\n item = self.get_param('item', essential=True)\n item = json.loads(item)\n if not item['id']:\n err = self.model.add_item(item)\n else:\n err = self.model.modify_item(item)\n if err:\n self.write_fail(err[0], err[1])\n else:\n self.write_success()\n \n def _delete_item(self):\n \"\"\"删除商品\"\"\"\n item_id = self.get_param('item_id', essential=True, rtype=int)\n err = self.model.delete_item(int(item_id))\n if err:\n self.write_fail(err[0], err[1])\n else:\n self.write_success()\n \n def _get_xin_orders(self):\n \"\"\"获取新订单\"\"\"\n orders = self.model.get_xin_orders()\n self.write_success({'orders': [x.jsonlike() for x in orders]})\n\n def _accept_order(self):\n \"\"\"接收订单\"\"\"\n order_id = self.get_param('order_id', essential=True, rtype=int)\n err = self.model.accept_order(order_id)\n if err:\n self.write_fail(err[0], err[1])\n else:\n self.write_success()\n \n def _reject_order(self):\n \"\"\"拒绝订单\"\"\"\n order_id = self.get_param('order_id', essential=True, rtype=int)\n err = self.model.reject_order(order_id)\n if err:\n self.write_fail(err[0], err[1])\n else:\n self.write_success()\n \n def _get_daifahuo_orders(self):\n \"\"\"获取待发货的订单\"\"\"\n orders = self.model.get_daifahuo_orders()\n self.write_success({'orders': [x.jsonlike() for x in orders]})\n\n def _get_yifahuo_orders(self):\n \"\"\"获取已发货的订单\"\"\"\n orders = self.model.get_yifahuo_orders()\n self.write_success({'orders': [x.jsonlike() for x in orders]})\n\n def _get_refund_orders(self): \n \"\"\"获取退货订单\"\"\"\n courier_id = self.get_param('courier_id', essential=True, rtype=int)\n orders = self.model.get_refund_orders(courier_id)\n self.write_success({'orders': [x.jsonlike() for x in orders]})\n \n def _accept_refund_orders(self):\n \"\"\"接收退货订单\"\"\"\n orders = self.get_param('orders', essential=True)\n err = self.model.accept_refund_orders(orders)\n if err:\n self.write_fail(err[0], err[1])\n else:\n self.write_success()\n \n def _get_settlements_between(self):\n \"\"\"获取日期内每天的结算信息\"\"\"\n from_day = self.get_param('from_day', essential=True, rtype=int)\n to_day = self.get_param('to_day', essential=True, rtype=int)\n ss = self.model.get_settlements_between(from_day, to_day)\n self.write_success({'settlements': [x.jsonlike() for x in ss]})\n\n def _get_settlements(self):\n \"\"\"获取最近days天内每天的结算信息\"\"\"\n now = datetime.now()\n days = self.get_param('days', essential=True, rtype=int)\n from_day = int((now - timedelta(days=days)).strftime('%Y%m%d'))\n to_day = int(now.strftime('%Y%m%d'))\n ss = self.model.get_settlements_between(from_day, to_day)\n self.write_success({'settlements': [x.jsonlike() for x in ss]})\n\n def _get_orders_on_day(self):\n \"\"\"获取某天的订单\"\"\"\n day = self.get_param('day', essential=True, rtype=int)\n orders = self.model.get_orders_on_day(day)\n self.write_success({'orders': [x.jsonlike() for x in orders]})\n\n","sub_path":"app/service/app_service/seller_request_handler.py","file_name":"seller_request_handler.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"462442773","text":"\nfrom flask import Flask\nfrom flask_pymongo import PyMongo\nfrom flask_cors import CORS\n\ndef create_app():\n\n # Flask Config\n app = Flask(__name__)\n CORS(app)\n\n app.config['MONGO_URI'] = 'mongodb+srv://admintest:admintest@marcoapicluster.ys7ce.mongodb.net/SE691?retryWrites=true&w=majority'\n\n app.mongo = PyMongo(app)\n\n print('connection successful')\n return app\n","sub_path":"backend/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"345269664","text":"def converting(source_num, source_hex, target_hex):\n # (2, 36)之间的进制转换\n if source_hex > 36 or source_hex < 2:\n return '2 <= source_hex <= 36'\n if target_hex > 36 or target_hex < 2:\n return '2 <= target_hex <= 36'\n str_36 = '0123456789abcdefghijklmnopqrstuvwxyz'\n dict_36 = {}\n for i in range(len(str_36)):\n dict_36[str_36[i]] = i\n str_b = str_36[:target_hex]\n result = ''\n source_str = str(source_num).lower()\n decimal_num = 0\n for i in range(len(source_str)):\n decimal_num += dict_36[source_str[-i-1]] * (source_hex ** i)\n quotient_int = decimal_num\n while quotient_int >= target_hex:\n remainder = quotient_int % target_hex\n quotient_int = quotient_int // target_hex\n result = str_b[remainder] + result\n if quotient_int < target_hex:\n result = str_b[quotient_int] + result\n break\n return result\n","sub_path":"conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"487509117","text":"#--------------------------------------------------------------------------\n# File and Version Information:\n# $Id$\n#\n# Description:\n# Module PlotG2Widget...\n#\n#------------------------------------------------------------------------\n\n\"\"\"Plot array as a graphic and as a histogram.\n\nThis software was developed for the SIT project. If you use all or \npart of it, please give an appropriate acknowledgment.\n\n@see RelatedModule\n\n@version $Id: \n\n@author Mikhail S. Dubrovin\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\n\n#------------------------------\n# Module's version from CVS --\n#------------------------------\n__version__ = \"$Revision$\"\n# $Source$\n\n#--------------------------------\n# Imports of standard modules --\n#--------------------------------\nimport sys\nimport os\nimport random\nimport numpy as np\nfrom math import log10\n\n# For self-run debugging:\nif __name__ == \"__main__\" :\n import matplotlib\n matplotlib.use('Qt4Agg') # forse Agg rendering to a Qt4 canvas (backend)\n\n#from matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n#import matplotlib.ticker as mtick\nfrom matplotlib.ticker import MaxNLocator, NullFormatter\n#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n#---------------------\n# Class definition --\n#---------------------\n\nclass PlotG2Widget (QtWidgets.QWidget) :\n \"\"\"Plot array as a graphic and as a histogram\"\"\"\n\n def __init__(self, parent=None, arrays=None, figsize=(10,10), title=''):\n QtWidgets.QWidget.__init__(self, parent)\n self.setWindowTitle('Matplotlib image embadded in Qt widget')\n self.setGeometry(10, 25, 1000, 700)\n \n self.arr_g2, self.arr_tau, self.arr_q = arrays\n # Expected shape arr_g2.shape = (Ntau, Nq)\n\n self.set_xarray(np.array(self.arr_tau))\n self.title = title\n self.parent = parent\n self.figsize = figsize\n self.nwin_max = 9\n\n self.fig = plt.figure(figsize=figsize, dpi=100, facecolor='w',edgecolor='w',frameon=True)\n\n #-----------------------------------\n self.canvas = self.fig.canvas\n self.vbox = QtWidgets.QVBoxLayout() # <=== Begin to combine layout \n self.vbox.addWidget(self.canvas) # <=== Add figure \n #self.vbox.addStretch(1)\n self.setLayout(self.vbox)\n #-----------------------------------\n\n self.canvas.mpl_connect('button_press_event', self.processMouseButtonPress) \n self.canvas.mpl_connect('button_release_event', self.processMouseButtonRelease) \n self.canvas.mpl_connect('motion_notify_event', self.processMouseMotion)\n self.canvas.mpl_connect('axes_leave_event', self.processAxesLeaveEvent)\n self.canvas.mpl_connect('axes_enter_event', self.processAxesEnterEvent)\n self.canvas.mpl_connect('figure_leave_event', self.processFigureLeaveEvent)\n\n self.setFrame()\n self.initParameters()\n self.on_draw()\n\n\n def setFrame(self):\n self.frame = QtWidgets.QFrame(self)\n self.frame.setFrameStyle( QtWidgets.QFrame.Box | QtWidgets.QFrame.Sunken )\n self.frame.setLineWidth(0)\n self.frame.setMidLineWidth(1)\n self.frame.setGeometry(self.rect())\n #self.frame.setVisible(False)\n\n\n def getCanvas(self):\n return self.canvas\n\n\n def resizeEvent(self, e):\n #print 'resizeEvent' \n self.frame.setGeometry(self.rect())\n\n\n def closeEvent(self, event): # is called for self.close() or when click on \"x\"\n #print 'PlotG2Widget: closeEvent'\n pass\n\n\n #def set_array(self, arr, title=''):\n # self.arry = arr\n # self.title = title\n # self.on_draw()\n # #self.on_draw_in_limits()\n\n\n def set_xarray(self,arr):\n if arr is None :\n self.arrx = np.arange(self.arr_g2.shape[0])\n else :\n self.arrx = arr\n\n\n def initParameters(self) :\n self.gr_xmin = None\n self.gr_xmax = None\n self.gr_ymin = None\n self.gr_ymax = None\n self.gridIsOn = False\n self.logIsOn = True\n self.iq_begin = 0\n\n\n def get_iq_list(self, iq_begin=0) :\n iq_min = 0\n iq_max = self.arr_q.shape[0]\n #return [0,1,2,3,4,5,6,7,8]\n\n if iq_max <= self.nwin_max : return list(range(iq_max))\n elif iq_max-iq_begin <= self.nwin_max : return list(range(iq_max-self.nwin_max, iq_max))\n else : return list(range(iq_begin, iq_begin+self.nwin_max))\n\n\n\n def on_draw_in_limits(self) :\n self.on_draw(self.gr_xmin, self.gr_xmax, self.gr_ymin, self.gr_ymax, self.iq_begin)\n\n\n\n def on_draw(self, gr_xmin=None, gr_xmax=None, gr_ymin=None, gr_ymax=None, iq_begin=0):\n \"\"\"Redraws the figure\"\"\"\n\n self.fig.clear()\n\n if gr_xmin is None : xmin = self.arrx[0]\n else : xmin = gr_xmin\n\n if gr_xmax is None : xmax = self.arrx[-1] # Last element\n else : xmax = gr_xmax\n\n if xmin==xmax : xmax=xmin+1 # protection against equal limits\n\n wwidth = 0.26 \n wheight = 0.24 \n\n self.list_of_axgr = []\n\n #iq_begin = 5\n iq_list = self.get_iq_list(iq_begin)\n #print 'iq_list:', iq_list, ' at self.iq_max =',self.arr_q.shape[0]\n\n for iwin, iq in enumerate(iq_list) :\n\n iwin_row = int(iwin/3)\n iwin_col = int(iwin%3)\n wx0 = 0.08 + iwin_col*0.32\n wy0 = 0.70 - iwin_row*0.3\n\n xarr = self.arrx\n yarr = self.arr_g2[:,iq]\n q_ave = self.arr_q[iq]\n q_str = 'q(%d)=%8.4f' % (iq, q_ave) \n\n if gr_ymin is None : ymin = min(yarr)\n else : ymin = gr_ymin\n\n if gr_ymax is None : ymax = max(yarr)\n else : ymax = gr_ymax\n\n axgr = self.fig.add_axes([wx0, wy0, wwidth, wheight])\n if self.logIsOn :\n axgr.set_xscale('log')\n else :\n axgr.xaxis.set_major_locator(MaxNLocator(5))\n\n axgr.plot(xarr, yarr, '-bo')# '-ro'\n\n axgr.set_xlim(xmin,xmax) \n axgr.set_ylim(ymin,ymax) \n axgr.set_title(q_str, fontsize=10, color='b')\n axgr.tick_params(axis='both', which='major', labelsize=8)\n axgr.yaxis.set_major_locator(MaxNLocator(5))\n axgr.grid(self.gridIsOn)\n\n\n if iwin_col == 0 :\n axgr.set_ylabel(r'$g_{2}$', fontsize=14)\n if iwin_row == 2 :\n axgr.set_xlabel(r'$\\tau$ (in number of frames)', fontsize=12)\n else :\n axgr.xaxis.set_major_formatter(NullFormatter())\n\n self.list_of_axgr.append(axgr)\n\n self.canvas.draw()\n\n\n def processAxesEnterEvent(self, event) :\n #print 'AxesEnterEvent'\n if self.event_is_in_axgr(event) :\n #QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CrossCursor))\n QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.SizeHorCursor))\n #QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.SizeAllCursor))\n\n\n def processAxesLeaveEvent(self, event) :\n #print 'AxesLeaveEvent'\n try : self.curstext.remove()\n except : pass\n QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\n\n\n def processFigureLeaveEvent(self, event) :\n #print 'FigureLeaveEvent'\n QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\n\n\n def event_is_in_axgr(self, event) :\n for axgr in self.list_of_axgr :\n if event.inaxes == axgr : return True\n return False\n\n\n def processMouseMotion(self, event) :\n if self.event_is_in_axgr(event) :\n self.drawXCoordinateOfCoursor(event)\n self.drawVerticalLineThroughCoursor(event)\n else :\n pass\n\n\n def drawXCoordinateOfCoursor(self, event) :\n axes = event.inaxes\n #xmin, xmax = axes.get_xlim()\n #ymin, ymax = axes.get_ylim()\n x, y = event.xdata, event.ydata\n s = '%6.1f' % (event.xdata)\n try : self.curstext.remove()\n except : pass\n self.curstext = axes.text(x, y, s) #, ha='center')\n self.canvas.draw()\n\n\n def drawVerticalLineThroughCoursor(self, event) :\n axes = event.inaxes\n fb = self.canvas.figure.bbox\n bb = axes.bbox\n #print bb\n\n bbx0, bby0, bbh, bbw = bb.x0, bb.y0, bb.height, bb.width\n fbx0, fby0, fbh, fbw = fb.x0, fb.y0, fb.height, fb.width\n\n xd = event.xdata\n yd = event.ydata\n x = event.x\n y = event.y\n\n x0 = bbx0 \n y0 = fbh - bby0 - bbh # -1\n w = x - x0\n\n rect = [x0, y0, w, bbh]\n self.fig.canvas.drawRectangle( rect ) \n #self.fig.canvas.draw()\n\n\n def processMouseButtonPress(self, event) :\n #print 'MouseButtonPress'\n if self.event_is_in_axgr(event) : self.mousePressOnGraph(event)\n\n\n def mousePressOnGraph(self, event) :\n #print 'PressOnGraph'\n #print 'event.xdata, ydata, x, y =', event.xdata, event.ydata, event.x, event.y\n\n if event.button == 1 :\n self.gr_xmin = float(event.xdata)\n elif event.button == 3 :\n self.gr_xmax = float(event.xdata)\n else :\n self.gr_xmin = None\n self.gr_xmax = None\n\n self.on_draw_in_limits()\n\n\n def processMouseButtonRelease(self, event) :\n #print 'MouseButtonRelease'\n\n if event.button == 1 :\n pass\n\n elif event.button == 2 : # middle or right button\n if self.event_is_in_axgr(event) : \n self.gr_xmin = None\n self.gr_xmax = None\n\n #elif event.inaxes == self.axhi :\n # self.gr_ymin = None\n # self.gr_ymax = None\n\n self.on_draw_in_limits()\n #self.on_draw()\n\n elif event.button == 3 :\n pass\n\n\n def saveFigure(self, fname='fig.png'):\n self.fig.savefig(fname)\n\n#-----------------------------\n# Test\n#-----------------------------\n\ndef get_arrays_for_test() :\n rows, cols = 31, 20 # for q and tau\n mu, sigma = 1., 0.2\n arr_g2 = mu + sigma*np.random.standard_normal( size = rows*cols )\n arr_g2.shape = (rows,cols)\n arr_tau = np.arange(rows)\n arr_q = np.arange(cols)\n return arr_g2, arr_tau, arr_q\n\ndef print_array(arr, msg='') :\n print('\\n' + msg + ':\\n', arr)\n print('shape:', arr.shape)\n\n#-----------------------------\n\ndef main():\n\n app = QtWidgets.QApplication(sys.argv)\n w = PlotG2Widget(arrays=get_arrays_for_test())\n w.move(QtCore.QPoint(50,50))\n w.show() \n app.exec_()\n \n#-----------------------------\n# In case someone decides to run this module\n#\nif __name__ == \"__main__\" :\n main()\n\n#-----------------------------\n","sub_path":"src/PlotG2Widget.py","file_name":"PlotG2Widget.py","file_ext":"py","file_size_in_byte":11066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"262874999","text":"import numpy as np\nimport cv2\nimport glob\nfrom p4constant import *\nimport matplotlib.pyplot as plt\n\ndef calibrateCamera():\n nx = 9\n ny = 6\n objp = np.zeros((nx*ny, 3), np.float32)\n objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\n\n objpoints = []\n imgpoints = []\n\n images = glob.glob('./camera_cal/calibration*.jpg')\n \n for idx, fname in enumerate(images):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if idx == 1:\n img_size = gray.shape[::-1]\n ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)\n\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\n\n return ret, mtx, dist, rvecs, tvecs\n\ndef cal_undistort(img, mtx, dist):\n undist_img = cv2.undistort(img, mtx, dist, None, mtx)\n if debug_on == True:\n cv2.imwrite(\"output_images/undistor.jpg\", undist_img)\n return undist_img\n\ndef grayscale(img):\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if debug_on == True:\n cv2.imwrite(\"output_images/grayscale.jpg\", gray_img)\n #cv2.imshow('gray_img', gray_img)\n #cv2.waitKey(0)\n return gray_img\n\ndef hls(img, thresh=(0,255)):\n hls_img = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n s = hls_img[:,:,2]\n # Threshold color channel\n s_binary = np.zeros_like(s)\n s_binary[(s >= thresh[0]) & (s <= thresh[1])] = 1 \n if debug_on == True:\n cv2.imwrite(\"output_images/s_channel.jpg\", s_binary*255)\n #cv2.imshow('saturation', s_binary*255)\n #cv2.waitKey(0)\n return s_binary\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel= 3, abs_thresh=(0,255)):\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize = sobel_kernel))\n else:\n abs_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = sobel_kernel))\n\n scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))\n\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= abs_thresh[0]) & (scaled_sobel <= abs_thresh[1])] = 1\n if debug_on == True:\n file_name = orient + \"sobel.jpg\"\n cv2.imwrite(\"output_images/\" + file_name, binary_output*255)\n #cv2.imshow(orient, binary_output*255)\n #cv2.waitKey(0)\n return binary_output\n\ndef mag_thresh(img, sobel_kernel=3, mag_thresh=(0,255)):\n sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = sobel_kernel)\n sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize = sobel_kernel)\n\n abs_sobelxy = np.sqrt(sobelx**2 + sobely**2)\n scaled_sobelxy = np.uint8(255*abs_sobelxy/np.max(abs_sobelxy))\n binary_output = np.zeros_like(scaled_sobelxy)\n binary_output[(scaled_sobelxy >= mag_thresh[0]) & (scaled_sobelxy <= mag_thresh[1])] = 1\n if debug_on == True:\n binary_output = binary_output * 255\n cv2.imshow(\"mag\", binary_output)\n cv2.waitKey(0)\n return binary_output\n\ndef dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n \n # 2) Take the gradient in x and y separately\n sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = sobel_kernel)\n sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize = sobel_kernel)\n # 3) Take the absolute value of the x and y gradients\n abs_sobelx = np.absolute(sobelx)\n abs_sobely = np.absolute(sobely)\n # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient \n dir_gredient = np.arctan2(abs_sobely, abs_sobelx)\n # 5) Create a binary mask where direction thresholds are met\n binary_output = np.zeros_like(dir_gredient)\n binary_output[(dir_gredient >= thresh[0]) & (dir_gredient <= thresh[1])] = 1\n if debug_on == True:\n binary_output = binary_output * 255\n cv2.imshow(\"direction\", binary_output)\n cv2.waitKey(0)\n\n # 6) Return this mask as your binary_output image\n \n return binary_output\n\ndef combined(img):\n gradx = abs_sobel_thresh(img, orient='x', sobel_kernel=sobelx_kernel_size, abs_thresh=sobelx_threshold)\n grady = abs_sobel_thresh(img, orient='y', sobel_kernel=sobely_kernel_size, abs_thresh=sobely_threshold)\n mag_binary = mag_thresh(img, sobel_kernel= mag_kernel_size, mag_thresh=mag_threshold)\n dir_binary = dir_threshold(img, sobel_kernel=dir_kernel_size, thresh=direction_threshold)\n combined = np.zeros_like(gradx)\n combined[((gradx == 1)&(grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1\n if debug_on == True:\n combined[((gradx == 255)&(grady == 255)) | ((mag_binary == 255) & (dir_binary == 255))] = 255\n cv2.imshow('combined', combined)\n cv2.waitKey(0)\n return combined\n\ndef combined_binary(s_channel, edge_image):\n combined_binary = np.zeros_like(s_channel)\n combined_binary[(s_channel == 1) |( edge_image == 1)] = 1\n if debug_on == True:\n cv2.imwrite(\"output_images/combined_binary.jpg\", combined_binary*255)\n return combined_binary\n\ndef region_of_interest(img, vertices):\n mask = np.zeros_like(img)\n\n if len(img.shape) > 2:\n channel_count = img.shape[2]\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n cv2.fillPoly(mask, vertices, ignore_mask_color)\n\n masked_image = cv2.bitwise_and(img, mask)\n\n if debug_on == True:\n cv2.imwrite(\"output_images/masked_image.jpg\", masked_image*255)\n\n return masked_image\n\ndef warpPerspective(img, M, size, saved_name):\n # calculate warped image\n binary_warped_img = cv2.warpPerspective(img, M, size)\n if debug_on == True:\n cv2.imwrite(\"output_images/\"+ saved_name, binary_warped_img*255)\n return binary_warped_img\n\n\n\ndef calculate_radius(left_fitx, right_fitx, ploty):\n left_fitx = left_fitx[::-1] # Reverse to match top-to-bottom in y\n right_fitx = right_fitx[::-1] # Reverse to match top-to-bottom in y\n \n y_eval = np.max(ploty)\n # Fit new polynomials to x,y in world space\n left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)\n right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)\n # Calculate the new radii of curvature\n left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])\n right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])\n center_l_r = (left_fitx[0] + right_fitx[0])/2\n \n return left_curverad, right_curverad, center_l_r\n\nimport p4constant\n\ndef sanity_check(left_fitx, right_fitx, left_curverad, right_curverad):\n look_good = True\n diff = np.subtract(right_fitx, left_fitx)\n abs_diff = np.absolute(diff)\n indx = np.argmin(diff)\n min_distance = diff[indx]\n mean_distance = np.mean(diff)\n \n curved_distance = len(abs_diff[(abs_diff < (mean_distance - 100)) | (abs_diff > (mean_distance + 100)) ])\n if curved_distance > 5:\n look_good = False\n return look_good\n\ndef p4reset():\n p4constant.leftLine = Line()\n p4constant.rightLine = Line()\n p4constant.frame_counter = 0\n \ndef sliding_search(img, margin):\n histogram = np.sum(img[int(img.shape[0]/2):,:], axis=0)\n # Create an output image to draw on and visualize the result\n out_img = np.dstack((img, img, img))*255\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram.shape[0]/2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n # Choose the number of sliding windows\n nwindows = 9\n # Set height of windows\n window_height = np.int(img.shape[0]/nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Current positions to be updated for each window\n leftx_current = leftx_base\n rightx_current = rightx_base\n # Set the width of the windows +/- margin\n #margin = 100\n # Set minimum number of pixels found to recenter window\n minpix = 50\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n\n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = img.shape[0] - (window+1)*window_height\n win_y_high = img.shape[0] - window*window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n # Draw the windows on the visualization image\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) \n cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2) \n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\\\n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\\\n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix: \n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n \n # Concatenate the arrays of indices\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n # Extract left and right line pixel positions\n p4constant.leftLine.allx = nonzerox[left_lane_inds]\n p4constant.leftLine.ally = nonzeroy[left_lane_inds] \n p4constant.rightLine.allx = nonzerox[right_lane_inds]\n p4constant.rightLine.ally = nonzeroy[right_lane_inds]\n\n # Fit a second order polynomial to each (get back coefficients\n left_current_fit = np.polyfit(p4constant.leftLine.ally, p4constant.leftLine.allx, 2)\n right_current_fit = np.polyfit(p4constant.rightLine.ally, p4constant.rightLine.allx, 2)\n \n # Generate x and y values for plotting\n ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )\n left_fitx = left_current_fit[0]*ploty**2 + left_current_fit[1]*ploty + left_current_fit[2]\n right_fitx = right_current_fit[0]*ploty**2 + right_current_fit[1]*ploty + right_current_fit[2]\n\n left_curvature, right_curvature, center_l_r = calculate_radius(left_fitx, right_fitx, ploty)\n\n look_good = sanity_check(left_fitx, right_fitx, left_curvature, right_curvature)\n\n if look_good == True:\n if p4constant.frame_counter < p4constant.n_threshold: \n p4constant.leftLine.current_fit.append(left_current_fit)\n p4constant.rightLine.current_fit.append(right_current_fit)\n\n p4constant.leftLine.recent_xfitted.append(left_fitx)\n p4constant.rightLine.recent_xfitted.append(right_fitx)\n else:\n indx = p4constant.frame_counter % p4constant.n_threshold\n p4constant.leftLine.current_fit[indx] = left_current_fit\n p4constant.rightLine.current_fit[indx] = right_current_fit\n\n p4constant.leftLine.recent_xfitted[indx] = left_fitx\n p4constant.rightLine.recent_xfitted[indx] = right_fitx\n \n p4constant.leftLine.bestx = np.divide(np.sum(p4constant.leftLine.recent_xfitted, axis=0), len(p4constant.leftLine.recent_xfitted))\n p4constant.rightLine.bestx = np.divide(np.sum(p4constant.rightLine.recent_xfitted, axis=0), len(p4constant.rightLine.recent_xfitted))\n \n p4constant.leftLine.best_fit = np.divide(np.sum(p4constant.leftLine.current_fit, axis=0), len(p4constant.leftLine.current_fit))\n p4constant.rightLine.best_fit = np.divide(np.sum(p4constant.rightLine.current_fit, axis=0),len(p4constant.rightLine.current_fit))\n\n p4constant.leftLine.radius_of_curvature = left_curvature\n p4constant.rightLine.radius_of_curvature = right_curvature\n \n p4constant.frame_counter += 1\n p4constant.leftLine.detected = True\n p4constant.rightLine.detected = True\n else:\n if len(p4constant.leftLine.recent_xfitted) == 0:\n left_fitx = []\n right_fitx = []\n else:\n left_fitx = p4constant.leftLine.bestx\n right_fitx = p4constant.rightLine.bestx\n p4constant.leftLine.detected = False\n p4constant.rightLine.detected = False \n\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n\n return out_img, left_fitx, right_fitx, left_curvature, right_curvature, center_l_r\n\ndef fillPoly(img, left_fitx, right_fitx, margin, ploty):\n # Generate a polygon to illustrate the search window area\n # And recast the x and y points into usable format for cv2.fillPoly()\n left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])\n left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])\n left_line_pts = np.hstack((left_line_window1, left_line_window2))\n right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])\n right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])\n right_line_pts = np.hstack((right_line_window1, right_line_window2))\n \n # Draw the lane onto the warped blank image\n #cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))\n #cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))\n #result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(img).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n return color_warp\n\n\n\ndef search_previous_data(img, margin, ploty):\n nonzero = img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n \n left_lane_inds = ((nonzerox > (p4constant.leftLine.best_fit[0]*(nonzeroy**2) + p4constant.leftLine.best_fit[1]*nonzeroy + p4constant.leftLine.best_fit[2] - margin))\\\n & (nonzerox < (p4constant.leftLine.best_fit[0]*(nonzeroy**2) + p4constant.leftLine.best_fit[1]*nonzeroy + p4constant.leftLine.best_fit[2] + margin))) \n right_lane_inds = ((nonzerox > (p4constant.rightLine.best_fit[0]*(nonzeroy**2) + p4constant.rightLine.best_fit[1]*nonzeroy + p4constant.rightLine.best_fit[2] - margin))\\\n & (nonzerox < (p4constant.rightLine.best_fit[0]*(nonzeroy**2) + p4constant.rightLine.best_fit[1]*nonzeroy + p4constant.rightLine.best_fit[2] + margin))) \n\n\n # Again, extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n # Fit a second order polynomial to each\n left_current_fit = np.polyfit(lefty, leftx, 2)\n right_current_fit = np.polyfit(righty, rightx, 2)\n \n # Generate x and y values for plotting\n left_fitx = left_current_fit[0]*ploty**2 + left_current_fit[1]*ploty + left_current_fit[2]\n right_fitx = right_current_fit[0]*ploty**2 + right_current_fit[1]*ploty + right_current_fit[2]\n\n left_curvature, right_curvature, center_l_r = calculate_radius(left_fitx, right_fitx, ploty)\n\n look_good = sanity_check(left_fitx, right_fitx, left_curvature, right_curvature)\n\n if look_good == True:\n if p4constant.frame_counter < p4constant.n_threshold: \n p4constant.leftLine.current_fit.append(left_current_fit)\n p4constant.rightLine.current_fit.append(right_current_fit)\n\n p4constant.leftLine.recent_xfitted.append(left_fitx)\n p4constant.rightLine.recent_xfitted.append(right_fitx)\n else:\n indx = p4constant.frame_counter % p4constant.n_threshold\n p4constant.leftLine.current_fit[indx] = left_current_fit\n p4constant.rightLine.current_fit[indx] = right_current_fit\n\n p4constant.leftLine.recent_xfitted[indx] = left_fitx\n p4constant.rightLine.recent_xfitted[indx] = right_fitx\n \n p4constant.leftLine.bestx = np.divide(np.sum(p4constant.leftLine.recent_xfitted, axis=0), len(p4constant.leftLine.recent_xfitted))\n p4constant.rightLine.bestx = np.divide(np.sum(p4constant.rightLine.recent_xfitted, axis=0), len(p4constant.rightLine.recent_xfitted))\n \n p4constant.leftLine.best_fit = np.divide(np.sum(p4constant.leftLine.current_fit, axis=0), len(p4constant.leftLine.current_fit))\n p4constant.rightLine.best_fit = np.divide(np.sum(p4constant.rightLine.current_fit, axis=0),len(p4constant.rightLine.current_fit))\n\n p4constant.leftLine.radius_of_curvature = left_curvature\n p4constant.rightLine.radius_of_curvature = right_curvature\n \n p4constant.frame_counter += 1\n p4constant.leftLine.detected = True\n p4constant.rightLine.detected = True\n\n else:\n if len(p4constant.leftLine.recent_xfitted) == 0:\n left_fitx = []\n right_fitx = []\n else:\n left_fitx = p4constant.leftLine.bestx\n right_fitx = p4constant.rightLine.bestx\n\n p4constant.leftLine.detected = False\n p4constant.rightLine.detected = False\n\n return left_fitx, right_fitx,left_curvature, right_curvature, center_l_r\n\ndef search_from_scratch(img, margin, ploty):\n window_img, left_fitx, right_fitx, left_curvature, right_curvature, center_l_r = sliding_search(img, margin)\n color_warp = fillPoly(img, left_fitx, right_fitx, margin, ploty)\n \n return color_warp, left_curvature, right_curvature, center_l_r\n\n\ndef find_lines(img):\n margin = 20\n ploty = np.linspace(0, img.shape[0]-1, img.shape[0])\n if p4constant.leftLine.detected == False and p4constant.rightLine.detected == False:\n color_warp, left_curverad, right_curverad, center_l_r = search_from_scratch(img, margin, ploty)\n else:\n left_fitx, right_fitx, left_curverad, right_curverad, center_l_r = search_previous_data(img, margin, ploty) \n color_warp = fillPoly(img, left_fitx, right_fitx, margin, ploty)\n \n return color_warp, left_curverad, right_curverad, center_l_r\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., λ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + λ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n result = cv2.addWeighted(initial_img, α, img, β, λ)\n return result\n\ndef putText(img, left_curverad, right_curverad, center_l_r):\n cv2.putText(img, \"Radius of Curvature = \" + \"{0:.2f}\".format(left_curverad) + \"(m)\", (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255))\n vehicle_center = img.shape[1]/2\n vehicle_pos = (center_l_r - vehicle_center) * xm_per_pix\n abs_pos = np.absolute(vehicle_pos)\n if vehicle_pos < 0:\n cv2.putText(img, \"Vehicle is \" + \"{0:.2f}\".format(abs_pos) +\"m left of the center\", (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255))\n elif vehicle_pos > 0:\n cv2.putText(img, \"Vehicle is \" + \"{0:.2f}\".format(abs_pos) +\"m right of the center\", (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255))\n else:\n cv2.putText(img, \"Vehicle is in the center of the lane\", (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255))\n if debug_on == True:\n cv2.imwrite(\"output_images/result.jpg\", img)\n return img\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":20724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"438260776","text":"import re; import time\n\nclass Word:\n\n def __init__(self, text):\n self.text = text\n self.corresponding = []\n\n def getCorresponding(self):\n return self.corresponding\n\n\n def addToCorresponding(self, word):\n self.corresponding.append(word)\n \n def getText(self):\n return self.text\n\nf = open(\"textToRead.txt\", encoding='utf-8')\ntext = f.read()\ntext = text.replace(\"\\n\", \" \")\n\n\nf2 = open(\"blacklist.txt\", encoding='utf-8')\nblacklist = f2.read()\nblacklist = blacklist.split(\"\\n\")\n\nsentences = re.split(r'(? len(sentence):\n lastIndex = len(sentence)\n\n for subWord in sentence[firstIndex : 1+lastIndex]:\n newWord.addToCorresponding(subWord.lower())\n \n words.append(newWord)\n return words\n\ndef initialize():\n global words; global bestPairs; global bestSentences; global sentences\n words = createWords()\n bestPairs = getBestWords(0.01)\n bestPairs = removeDuplicates(bestPairs)\n bestSentences = []\n\n sentences = removeDuplicates(sentences)\n\ndef printBestPairs():\n getBestWords(0.02)\n return(bestPairs)\n\ndef findBestSentences():\n global words; global bestPairs; global bestSentences; global sentences\n for sentence in sentences:\n currScore = 0\n for pair in bestPairs:\n if sentence.lower().find(pair[0].lower()) != -1:\n currScore += 1\n if currScore > 0:\n bestSentences.append((sentence,currScore))\n \n bestSentences = removeDuplicates(bestSentences)\n bestSentences.sort()\n bestSentences.sort(key = lambda x: -x[1])\n\n\n return(bestSentences)\n\ninitialize()\n","sub_path":"Independent Study/importantWords.py","file_name":"importantWords.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"304291611","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ntry: from torch.hub import load_state_dict_from_url\nexcept ImportError: from torch.utils.model_zoo import load_url as load_state_dict_from_url\n\ntorch.manual_seed(0)\n\n\n# Model\n# - ImageNet_resnet18, ImageNet_resnet34, ImageNet_resnet50, ImageNet_resnet101, ImageNet_resnet152\n# - CIFAR10_resnet20, CIFAR10_resnet32, CIFAR10_resnet44, CIFAR10_resnet56, CIFAR10_resnet110\n\n# Pretrained model weights url (pretrained on ImageNet)\npretrained_model_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\nclass BasicBlock_A(nn.Module):\n expansion = 1\n def __init__(self, inplanes, stride=1, first=False):\n super(BasicBlock_A, self).__init__()\n self.stride = stride\n self.inplanes = inplanes\n if self.stride == 1:\n self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size=3, stride=self.stride, padding=1, bias=False)\n else:\n self.conv1 = nn.Conv2d(inplanes//2, inplanes, kernel_size=3, stride=self.stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.conv2 = nn.Conv2d(inplanes, inplanes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(inplanes)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n if self.stride == 1:\n identity = x\n else:\n identity = F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, self.inplanes//4, self.inplanes//4), \"constant\", 0)\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass BasicBlock_B(nn.Module):\n expansion = 1\n def __init__(self, inplanes, stride=1, first=False):\n super(BasicBlock_B, self).__init__()\n self.stride = stride\n if self.stride == 1:\n self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size=3, stride=self.stride, padding=1, bias=False)\n else:\n self.conv1 = nn.Conv2d(inplanes//2, inplanes, kernel_size=3, stride=self.stride, padding=1, bias=False)\n self.downsample = nn.Sequential(\n nn.Conv2d(inplanes//2, inplanes, kernel_size=1, stride=self.stride, bias=False),\n nn.BatchNorm2d(inplanes)\n )\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.conv2 = nn.Conv2d(inplanes, inplanes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(inplanes)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n if self.stride == 1:\n identity = x\n else:\n identity = self.downsample(x)\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n def __init__(self, inplanes, stride=1, first=False):\n super(Bottleneck, self).__init__()\n self.stride = stride\n if self.stride != 1:\n self.conv1 = nn.Conv2d(2*inplanes, inplanes, kernel_size=1, stride=self.stride, bias=False)\n self.downsample = nn.Sequential(\n nn.Conv2d(2*inplanes, 4*inplanes, kernel_size=1, stride=self.stride, bias=False),\n nn.BatchNorm2d(4*inplanes)\n )\n elif first == True:\n self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size=1, stride=self.stride, bias=False)\n self.downsample = nn.Sequential(\n nn.Conv2d(inplanes, 4*inplanes, kernel_size=1, stride=self.stride, bias=False),\n nn.BatchNorm2d(4*inplanes)\n )\n else:\n self.conv1 = nn.Conv2d(4*inplanes, inplanes, kernel_size=1, stride=self.stride, bias=False)\n\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.conv2 = nn.Conv2d(inplanes, inplanes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(inplanes)\n self.conv3 = nn.Conv2d(inplanes, 4*inplanes, kernel_size=1, stride=1, bias=False)\n self.bn3 = nn.BatchNorm2d(4*inplanes)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n if self.stride == 1:\n identity = x\n else:\n identity = self.downsample(x)\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass _resnet(nn.Module):\n\n def __init__(self, mode, block, layers, num_classes=1000):\n super(_resnet, self).__init__()\n self.mode = mode\n\n if self.mode == 'ImageNet':\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(block.expansion*512, num_classes)\n\n elif self.mode == 'CIFAR10':\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 16, layers[0])\n self.layer2 = self._make_layer(block, 32, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 64, layers[2], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(block.expansion*64, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n layers = [block(planes, stride=stride, first=True)]\n for _ in range(1, blocks):\n layers.append(block(planes))\n\n return nn.Sequential(*layers)\n\n def _forward_ImageNet(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n\n return x\n\n def _forward_CIFAR10(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n\n return x\n\n def forward(self, x):\n if self.mode == 'ImageNet':\n return self._forward_ImageNet(x)\n\n elif self.mode == 'CIFAR10':\n return self._forward_CIFAR10(x)\n\n\n# Model info\ncfgs = {\n # ImageNet Model\n 18 : ['ImageNet', BasicBlock_B, [2, 2, 2, 2]],\n 34 : ['ImageNet', BasicBlock_B, [3, 4, 6, 3]],\n 50 : ['ImageNet', Bottleneck, [3, 4, 6, 3]],\n 101 : ['ImageNet', Bottleneck, [3, 4, 23, 3]],\n 152 : ['ImageNet', Bottleneck, [3, 8, 36, 3]],\n\n # CIFAR-10 Model\n 20 : ['CIFAR10', BasicBlock_A, [3, 3, 3]],\n 32 : ['CIFAR10', BasicBlock_A, [5, 5, 5]],\n 44 : ['CIFAR10', BasicBlock_A, [7, 7, 7]],\n 56 : ['CIFAR10', BasicBlock_A, [9, 9, 9]],\n 110 : ['CIFAR10', BasicBlock_A, [18, 18, 18]]\n}\n\n\ndef resnet(depth, num_classes, pretrained):\n\n model = _resnet(mode=cfgs[depth][0], block=cfgs[depth][1], layers=cfgs[depth][2], num_classes=num_classes)\n arch = 'resnet'+str(depth)\n\n if pretrained and (num_classes == 1000) and (arch in pretrained_model_urls):\n state_dict = load_state_dict_from_url(pretrained_model_urls[arch], progress=True)\n model.load_state_dict(state_dict)\n elif pretrained:\n raise ValueError('No pretrained model in resnet {} model with class number {}'.format(depth, num_classes))\n\n return model\n","sub_path":"Implementation/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"84006917","text":"from flask import Blueprint, make_response, current_app\nfrom flask_wtf import csrf\n\nweb_html = Blueprint('web_html', __name__)\n\n@web_html.route(\"/\")\ndef get_html(html_name):\n if not html_name:\n # :5000/\n html_name = 'index.html'\n\n # 如果资源名不是favicon.ico\n if html_name != 'favicon.ico':\n html_name = 'html/' + html_name\n\n # 创建一个csrf_token的值\n csrf_token = csrf.generate_csrf()\n\n # flask提供的返回静态文件的方法\n resp = make_response(current_app.send_static_file(html_name))\n\n # 设置csrf_token的cookie\n resp.set_cookie('csrf_token', csrf_token)\n return resp\n","sub_path":"ihome/web_html.py","file_name":"web_html.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"376703568","text":"from keras.models import Sequential\n\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D,ZeroPadding2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.noise import GaussianDropout,GaussianNoise\nfrom keras.optimizers import SGD,RMSprop\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nimport pandas as pd\nimport numpy as np\n# from tqdm import tqdm\nfrom keras.utils import np_utils\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import roc_auc_score,accuracy_score\nfrom sklearn import svm\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier,RandomForestRegressor,GradientBoostingClassifier\nfrom keras.constraints import maxnorm\n# import cv2\nimport os\nfrom PIL import Image\nimport pandas as pd\nfrom pandas import DataFrame\nfrom collections import OrderedDict\n# from tqdm import tqdm\nimport pickle\nimport datetime\nimport dependent_initialization as DI\n#\n# def rotateImage(image, angle):\n# image_center = tuple(np.array(image[:,:,0].shape)/2)\n# rot_mat = cv2.getRotationMatrix2D(image_center,angle,1.0)\n# result = cv2.warpAffine(image, rot_mat, image[:,:,0].shape,flags=cv2.INTER_LINEAR)\n# return result\n# '''\n\n########################################### Label Conversion to One Hot Encoding ###############################################\ntrainPath = '/home/ankit/Desktop/dataset/'\npath = os.path.join(trainPath,'trainLabelsCIFAR_Kaggle.csv')\ntrain_df = pd.read_csv(path)\ntrain_labels = pd.get_dummies(train_df['label'])\n\nY_train = train_labels\nprint(\"Read and converted the Training Labels\")\n################################################################################################################################\n\n\n\n\ndef get_array(fname):\n img = load_img(fname,target_size=(128,128));\n x = img_to_array(img); \n x = x.transpose(2,1,0)\n x = x.reshape((1,3,128,128))\n x = x/255;\n return x;\n\n\ndr = dict()\npath = os.path.join('/media/sai/New Volume1/Practice/statefarm/data', 'driver_imgs_list.csv')\nprint('Read drivers data')\nf = open(path, 'r')\nline = f.readline()\nwhile (1):\n line = f.readline()\n if line == '':\n break\n arr = line.strip().split(',')\n dr[arr[2]] = arr[0];\nf.close()\n#print dr;\nunique_drivers = sorted(list(set(dr.values())))\nprint('Unique drivers: {}'.format(len(unique_drivers)))\nprint(unique_drivers)\nunique_list_train = ['p002', 'p012', 'p014', 'p015', 'p016', 'p021', 'p022', 'p024', 'p026', 'p035', 'p041', 'p042', \n 'p045', 'p047', 'p049', 'p050', 'p051', 'p052', 'p061', 'p064', 'p066', 'p072', 'p075', 'p081']\nunique_list_valid = ['p056','p039']\nprint('Train drivers: ', unique_list_train)\nprint('Test drivers: ', unique_list_valid) \npath = \"/media/sai/New Volume1/Practice/statefarm/images/train/c{0}/\"\n'''\nX_train = np.zeros((20925,3,128,128),dtype='float32');\nY_train = np.zeros((20925,1),dtype='int8');\nX_valid = np.zeros((1499,3,128,128),dtype='float32');\nY_valid = np.zeros((1499,1),dtype='int8');\nj = 0;\nk = 0;\n'''\nfor i in tqdm(range(10)):\n newPath = path.format(i);\n if os.path.exists(newPath): \n #j = j + len(os.listdir(newPath))\n for l in tqdm(os.listdir(newPath)):\n fname = newPath + l;\n Xtrain = get_array(fname);\n if dr[l] in unique_list_train:\n X_train[k,:,:,:] = Xtrain;\n Y_train[k] = i;\n k = k +1;\n elif dr[l] in unique_list_valid:\n X_valid[j,:,:,:] = Xtrain;\n Y_valid[j] = i;\n j = j +1;\n\nprint(j,k);\n#X_train = np.asarray(X_train);\n#Y_train = np.asarray(Y_train);\n#X_valid = np.asarray(X_valid);\n#Y_valid = np.asarray(Y_valid);\n'''\n#X_train = np.load(\"/media/sai/New Volume1/Practice/statefarm/trainx.npy\")\n#Y_train = np.load(\"/media/sai/New Volume1/Practice/statefarm/trainy.npy\")\n#X_valid = np.load(\"/media/sai/New Volume1/Practice/statefarm/validx.npy\")\n#Y_valid = np.load(\"/media/sai/New Volume1/Practice/statefarm/validy.npy\")\nprint \"done extracting data\"\nprint X_train.shape\nprint X_valid.shape\n#'''\nY_train = Y_train.reshape(len(Y_train),1);\nY_valid = Y_valid.reshape(len(Y_valid),1);\nY_train = np_utils.to_categorical(Y_train, 10);\nY_valid = np_utils.to_categorical(Y_valid, 10);\nact = 'relu'\n# print \"creating model\"\nmodel = Sequential()\n\nmodel.add(ZeroPadding2D((1,1),input_shape=(3,128,128)))\nmodel.add(Convolution2D(32, 3, 3, W_constraint = maxnorm(2)))\nmodel.add(Activation(act))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(32, 3, 3, W_constraint = maxnorm(2)))\nmodel.add(Activation(act))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(64, 3, 3, W_constraint = maxnorm(2)))\nmodel.add(Activation(act))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(128, 3, 3, W_constraint = maxnorm(2)))\nmodel.add(Activation(act))\nmodel.add(ZeroPadding2D((1,1)))\nmodel.add(Convolution2D(128, 3, 3, W_constraint = maxnorm(2)))\nmodel.add(Activation(act))\nmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\nmodel.add(Flatten());\nmodel.add(Dense(4096,W_constraint = maxnorm(2)))\nmodel.add(Activation(act))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(4096,W_constraint = maxnorm(2)))\nmodel.add(Activation(act))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\nsgd = SGD(lr=0.001, decay=1e-4, momentum=0.5, nesterov=True);\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd,metrics =[\"accuracy\"]);\nmodel = DI.initialize(model,\"/media/sai/New Volume1/Practice/statefarm/images/train/\")\ncheckpointer = ModelCheckpoint(filepath=\"/media/sai/New Volume1/Practice/statefarm/model_best/model-{epoch:02d}-{val_loss:.2f}.model\", verbose=1, save_best_only=True)\nmodel.fit(X_train,Y_train, batch_size=32, nb_epoch=100,validation_data=(X_valid,Y_valid),shuffle=True,callbacks=[checkpointer]);\nmodel.save_weights('/media/sai/New Volume1/Practice/statefarm/second_try.h5')\n#'''\n\n\n\n","sub_path":"batch_training.py","file_name":"batch_training.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"555994507","text":"import scrapy\n\nclass IntroSpider(scrapy.Spider):\n name = \"introduccion_spider_fybeca\"\n\n urls = [\n 'https://www.fybeca.com/FybecaWeb/pages/search-results.jsf?cat=639&s=0&pp=25'\n ]\n\n def start_requests(self):\n for url in self.urls:\n yield scrapy.Request(url=url)\n\n def parse(self,response):\n etiqueta_contenedora = response.css(\n 'article.product_pod'\n )\n precio = response.xpath(\n '//*[@id=\"container-result\"]/div/ul/li[16]/div/div[1]/div/div[2]'\n )\n \n print(precio)\n\n\n\n","sub_path":"04 - scrapy/03-intro-spider/arania_basica/arania_basica/spiders/arania_fybeca.py","file_name":"arania_fybeca.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"582647865","text":"import numpy as np\nfrom Preprocess_utils import camera_raw_data\nfrom Preprocess_utils import RFID_raw_data\nfrom Preprocess_utils import readHW\nfrom Preprocess_utils import Gradient\nimport CONFIG\nimport matplotlib.pyplot as plt\n\nclass Input():\n def __init__(self):\n self.rgb_dic = {}\n self.XYZ_dist_dic = {}\n self.rss_dic = {}\n self.phase_dist_dic = {}\n self.camera_ts = {}\n self.rfid_ts = {}\n self.rfid_velocity_ts = {}\n self.num_of_scen = 0\n self.rfid_velocity = {}\n return\n\n def add_from_npy(self, rgb_src, XYZ_dist_src, phase_dist_src, rss_src, camera_ts_src, rfid_ts_src,\n rfid_velocity_ts_src, rfid_velocity_src, id):\n self.rgb_dic[id] = np.load(rgb_src)\n self.XYZ_dist_dic[id] = np.load(XYZ_dist_src)\n self.phase_dist_dic[id] = np.load(phase_dist_src)\n self.rss_dic[id] = np.load(rss_src)\n self.camera_ts[id] = np.load(camera_ts_src)\n self.rfid_ts[id] = np.load(rfid_ts_src)\n self.rfid_velocity_ts[id] = np.load(rfid_velocity_ts_src)\n self.rfid_velocity[id] = np.load(rfid_velocity_src)\n self.num_of_scen += 1\n return\n\n def plots(self,id):\n camera_dis_list = np.array([])\n\n xy_list = readHW('labeled_gnome_nose')\n\n dis_list_head = []\n for i in range(len(xy_list)):\n dis_list_head.append(self.XYZ_dist_dic[id][i, xy_list[i][1], xy_list[i][0]])\n dis_list_head = np.array(dis_list_head)\n # print(dis_list_head)\n\n camera_dis_list = np.concatenate((camera_dis_list, dis_list_head))\n camera_dis_list = np.concatenate((camera_dis_list, self.XYZ_dist_dic[id][277:285, 200, 235]))\n camera_dis_list = np.concatenate((camera_dis_list, self.XYZ_dist_dic[id][285:291, 234, 235]))\n camera_dis_list = np.concatenate((camera_dis_list, self.XYZ_dist_dic[id][291:298, 270, 235]))\n camera_dis_list = np.concatenate((camera_dis_list, self.XYZ_dist_dic[id][298:307, 305, 230]))\n camera_dis_list = np.concatenate((camera_dis_list, self.XYZ_dist_dic[id][307:311, 340, 225]))\n camera_dis_list = np.concatenate((camera_dis_list, self.XYZ_dist_dic[id][311:326, 320, 240]))\n camera_dis_list = np.concatenate((camera_dis_list, self.XYZ_dist_dic[id][326:332, 290, 246]))\n camera_dis_list = np.concatenate((camera_dis_list, self.XYZ_dist_dic[id][332:337, 250, 246]))\n camera_dis_list = np.concatenate((camera_dis_list, self.XYZ_dist_dic[id][337:351, 230, 246]))\n camera_dis_list = np.concatenate((camera_dis_list, self.XYZ_dist_dic[id][351:, 220, 253]))\n\n t1 = self.camera_ts[id][0: 179]\n t2 = self.camera_ts[id][277:]\n t_XYZ = np.concatenate((t1, t2))\n plt.figure('distance')\n plt.plot(self.rfid_ts[id], self.phase_dist_dic[id])\n plt.plot(t_XYZ, camera_dis_list, 'orangered')\n\n plt.figure('velocity')\n plt.plot(self.rfid_velocity_ts[id], self.rfid_velocity[id])\n camera_velocity = Gradient(camera_dis_list, t_XYZ)\n plt.plot(t_XYZ[1:], camera_velocity, 'orangered')\n zero = np.zeros(len(self.rfid_velocity_ts[id]))\n plt.plot(self.rfid_velocity_ts[id], zero, 'black')\n plt.show()\n\ndef RawData_Save(color_subdir, XYZ_subdir, rfid_src, Tag_ID, save_dir):\n #load RFID raw data\n rfid_dist_smooth, rfid_velocity, rfid_dist_ts, velocity_ts, timestamp_bias, rss = RFID_raw_data(rfid_src, Tag_ID)\n\n\n #load camera data\n color_matrix, dismatrix, camera_ts = camera_raw_data(color_subdir, XYZ_subdir, timestamp_bias)\n\n np.save(save_dir + Tag_ID + '/' +'rfid_dist_smooth.npy',rfid_dist_smooth)\n np.save(save_dir + Tag_ID + '/' + 'rfid_velocity.npy',rfid_velocity)\n np.save(save_dir + Tag_ID + '/' + 'rfid_dist_ts.npy',rfid_dist_ts)\n np.save(save_dir + Tag_ID + '/' + 'velocity_ts.npy', velocity_ts)\n np.save(save_dir + Tag_ID + '/' + 'rss.npy', rss)\n np.save(save_dir + Tag_ID + '/' + 'color_matrix.npy', color_matrix)\n np.save(save_dir + Tag_ID + '/' + 'dismatrix.npy',dismatrix)\n np.save(save_dir + Tag_ID + '/' + 'camera_ts.npy',camera_ts)\n\nif __name__ == '__main__':\n #RawData_Save('./kinect/color/', './kinect/XYZmatrix/', './kinect/7.25.14.00.csv', CONFIG.TAG_1, './train_data/')\n\n input = Input()\n save_dir = './train_data/'\n Tag_ID = CONFIG.TAG_1\n rgb_src = save_dir + Tag_ID + '/' + 'color_matrix.npy'\n XYZ_dist_src = save_dir + Tag_ID + '/' + 'dismatrix.npy'\n phase_dist_src = save_dir + Tag_ID + '/' + 'rfid_dist_smooth.npy'\n rss_src = save_dir + Tag_ID + '/' + 'rss.npy'\n camera_ts_src = save_dir + Tag_ID + '/' + 'camera_ts.npy'\n rfid_ts_src = save_dir + Tag_ID + '/' + 'rfid_dist_ts.npy'\n rfid_velocity_ts_src = save_dir + Tag_ID + '/' + 'velocity_ts.npy'\n rfid_velocity_src = save_dir + Tag_ID + '/' + 'rfid_velocity.npy'\n input.add_from_npy(rgb_src, XYZ_dist_src, phase_dist_src, rss_src, camera_ts_src, rfid_ts_src,\n rfid_velocity_ts_src, rfid_velocity_src, 0)\n input.plots(0)\n","sub_path":"Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":5047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"607040453","text":"import numpy as np\nimport cProfile\n\nfrom graph_storage.storage import GraphStorage\n\nNODES = 10000\n\n\ndef put():\n global adj, graph, mat_to_graph\n empty_props = {'props': {}}\n mat_to_graph = np.zeros(NODES, dtype=int)\n for i in range(NODES):\n mat_to_graph[i] = graph.create_node(empty_props)\n\n for i in range(NODES):\n for j in range(NODES):\n if adj[i][j] == 1:\n edge_prop = graph.create_node(empty_props)\n graph.create_edge(mat_to_graph[i], mat_to_graph[j], edge_prop)\n\n\ndef get():\n global graph, mat_to_graph\n for i in range(NODES):\n u = mat_to_graph[i]\n graph.get_node(u)\n graph.edges_from(u)\n\n\ndef remove():\n global graph, mat_to_graph\n for i in range(NODES):\n u = mat_to_graph[i]\n edges = graph.edges_from(u)\n for e in edges:\n graph.remove_edge(e)\n graph.delete_node(u)\n\n\ndef get_ids():\n global graph\n node_ids = 0\n for _ in graph.get_node_ids():\n node_ids += 1\n print(node_ids, 'node_ids')\n edge_ids = 0\n for _ in graph.get_edge_ids():\n edge_ids += 1\n print(edge_ids, 'edge_ids')\n\ngraph = None\nadj = None\nmat_to_graph = None\n\n\ndef setup():\n global adj, graph\n edge_prob = 0.005\n adj = np.random.rand(NODES, NODES)\n adj[adj > 1 - edge_prob] = 1\n adj[adj <= 1 - edge_prob] = 0\n print(len(adj[adj == 1]), 'edges,', NODES, 'nodes')\n graph = GraphStorage(True, 'test')\n\n\nif __name__ == '__main__':\n setup()\n print('Profiling put()...')\n cProfile.run('put()', sort='cumtime')\n print('Profiling get_ids()...')\n cProfile.run('get_ids()', sort='cumtime')\n print('Profiling get()...')\n cProfile.run('get()', sort='cumtime')\n print('Profiling remove()...')\n cProfile.run('remove()', sort='cumtime')\n","sub_path":"graph_storage/graphstorage_times.py","file_name":"graphstorage_times.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"396443323","text":"from flask_restful import Resource, reqparse\nfrom db import query, connectToHost, encode\nimport base64\nimport pymysql\nfrom flask_jwt_extended import jwt_required\n\nclass ViewMessages(Resource):\n \n @jwt_required\n\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('roll', type=str, required=True, help=\"roll cannot be left blank!\")\n data = parser.parse_args()\n\n # a transaction is made, so not using query function from db module\n # we use connectToHost function from db module and commit explicitly\n # the query function from db module commits for each query which is not desirable in \n # a transaction sequence as follows.\n # here we execute several queries then commit.\n try:\n connection = connectToHost()\n\n #start connection, create cursor and execute query from cursor\n connection.begin()\n cursor = connection.cursor()\n\n \n\n qstr = f\"\"\"\n select Message.roll, message, Message.cid, place, time_c\n from Message join Complaints\n on Message.cid = Complaints.cid\n where Message.roll = '{data['roll']}'\n order by cid desc;\n \"\"\"\n\n cursor.execute(qstr) \n result = encode(cursor.fetchall())\n\n qstr = f\"\"\"\n delete from Unopened\n where roll = '{data['roll']}';\n \"\"\"\n\n cursor.execute(qstr) \n \n connection.commit() #commit the changes made\n \n #close the cursor and connection\n cursor.close()\n connection.close() \n\n except (pymysql.err.InternalError, pymysql.err.ProgrammingError, pymysql.err.IntegrityError) as e:\n return {\n \"message\" : \"MySQL error: \" + str(e)\n }, 500\n except Exception as e:\n return {\n \"message\" : \"There was an error connecting.\" + str(e)\n }, 500\n \n return result, 200","sub_path":"antirag-api/resources/view_messages.py","file_name":"view_messages.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"107765030","text":"from set_mark.start import start\n\nimport re\nimport html\nimport sqlite3\nfrom urllib import parse\nimport time\nimport threading\n\ndef load_conn2(data):\n global conn\n global curs\n\n conn = data\n curs = conn.cursor()\n\ndef send_parser(data):\n data = html.escape(data)\n \n javascript = re.compile('javascript:', re.I)\n \n data = javascript.sub('', data)\n data = re.sub('<a href=\"(?:[^\"]*)\">(?P(?:(?!<).)*)<\\/a>', '').replace('/','%2F') + '\">\\g', data) \n \n return data\n \ndef plusing(name, link, backtype):\n curs.execute(\"select title from back where title = ? and link = ? and type = ?\", [link, name, backtype])\n if not curs.fetchall():\n curs.execute(\"insert into back (title, link, type) values (?, ?, ?)\", [link, name, backtype])\n\ndef namumark(title, data, num, lang):\n data = start(conn, data, title, lang)\n if num == 1:\n i = 0\n while 1:\n try:\n _ = data[2][i][0]\n except:\n break\n\n thread_start = threading.Thread(target = plusing, args = [data[2][i][0], data[2][i][1], data[2][i][2]])\n thread_start.start()\n thread_start.join()\n\n i += 1\n\n conn.commit()\n \n return data[0] + data[1]","sub_path":"mark.py","file_name":"mark.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"401191768","text":"#!/usr/bin/python\n# coding=utf-8\n\nfrom flask.json import JSONEncoder\nfrom .models import User, Pet, Question, Vote\nfrom .exceptions import *\n\nclass PoapiJSONEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, User):\n return{\n 'id':obj.id,\n 'username':obj.username,\n 'email':obj.email,\n 'job':obj.job\n }\n\n if isinstance(obj, Exception):\n return {\n 'message':obj.message\n }\n\n if isinstance(obj, Question):\n return {\n 'id':obj.id,\n 'content':obj.content,\n 'chapter_id':obj.chapter_id,\n 'votes': obj.votes\n }\n\n if isinstance(obj, Vote):\n return {\n 'question_id':obj.question_id,\n 'user_id':obj.user_id\n }\n\n return super(PoapiJSONEncoder, self).default(obj)\n","sub_path":"app/json_encoder.py","file_name":"json_encoder.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622428436","text":"\"\"\"doc\"\"\"\n\n\ndef utf_len(string: bytes) -> int:\n \"\"\"convert\"\"\"\n count = 0\n for byte in string:\n # ascii chars\n if byte & 0b10000000 == 0b000:\n count += 1\n # others\n elif byte & 0b01000000 == 0b01000000:\n count += 1\n\n return count\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"504008233","text":"from sys import stdin\nfrom collections import deque\nn, m = map(int, stdin.readline().split())\ncheck = [False for _ in range(n+1)]\narr = deque()\n\ndef find(count):\n if count == m:\n print(' '.join(map(str, arr)))\n return\n for i in range(1, n+1):\n if check[i] == False:\n check[i] = True\n arr.append(i)\n find(count + 1)\n check[i] = False\n arr.pop()\nfind(0)","sub_path":"baekjoon/6주차/15649-N과-M-1/우섭/15649.py","file_name":"15649.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"46418251","text":"#!/usr/bin/env python\n#\n# Copyright (C) 2013 DNAnexus, Inc.\n#\n# This file is part of reads_fastq_exporter.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy\n# of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os, sys, tempfile\nimport dxpy\n\n@dxpy.entry_point('main')\ndef main(**kwargs):\n from dxpy.scripts import dx_reads_to_fastq\n\n kwargs['output'] = tempfile.NamedTemporaryFile(prefix='reads_to_fastq_1_', suffix='.fastq', delete=False).name\n kwargs['output2'] = tempfile.NamedTemporaryFile(prefix='reads_to_fastq_2_', suffix='.fastq', delete=False).name\n os.unlink(kwargs['output2']) # dx_reads_to_fastq will create it anew, we detect paired reads by its presence\n\n dx_reads_to_fastq.main(**kwargs)\n\n if kwargs['output_FASTA']:\n ext = '.fa'\n else:\n ext = '.fq'\n\n if 'name' in kwargs:\n out_name = kwargs['name']\n else:\n out_name = table = dxpy.DXGTable(kwargs['reads_table']).describe()['name']\n\n try:\n fq_file = dxpy.upload_local_file(filename = kwargs['output'], keep_open=True)\n fq_file.rename( out_name + \"_export\" + ext )\n details = fq_file.get_details()\n details['original_readstable'] = kwargs['reads_table']\n fq_file.set_details( details )\n fq_file.close()\n except Exception as e:\n raise\n raise dxpy.AppError(\"Error uploading exported file to system: \" + str(e))\n outs = [dxpy.dxlink(fq_file.get_id())]\n\n if os.path.exists(kwargs['output2']):\n try:\n fq_file2 = dxpy.upload_local_file(filename = kwargs['output2'], keep_open=True)\n fq_file2.rename( out_name + \"_export_2\" + ext )\n details = fq_file2.get_details()\n details['original_readstable'] = kwargs['reads_table']\n fq_file2.set_details( details )\n fq_file2.close()\n except:\n raise\n raise dxpy.AppError(\"Error uploading exported file to system: \" + str(e))\n\n outs.append(dxpy.dxlink(fq_file2.get_id()))\n\n return {'fastq': outs}\n\ndxpy.run()\n","sub_path":"readsToFASTQ.py","file_name":"readsToFASTQ.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525988152","text":"\nimport os\nimport json\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.tensorrt as trt ## WARNNING: must load when do prediction!!!!\n\n\"\"\"\n@time: 2018-10-07 19:27\n@platform: vim\n@author: YunYang1994\n@email: yyang@nullmax.ai\n\"\"\"\n################################################################################\n# Run this script\n# => Sub-Graph Optimizations within TensorFlow\n# Requirement: tensorrt version ==> '4.0.1.6'\n################################################################################\n\n_RESIZE_MIN = 256\n_R_MEAN = 123.68 # ADJUST\n_G_MEAN = 116.78 # ADJUST\n_B_MEAN = 103.94 # ADJUST\n_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]\nmeans = np.expand_dims(np.expand_dims(_CHANNEL_MEANS, 0), 0)\ntf.logging.set_verbosity(tf.logging.INFO)\n\nclass Trtmodel(object):\n \"\"\"\n ==> apply TensorRT optimizations to the frozen graph\n \"\"\"\n\n def __init__(self, pb_file, labels_file, input_node, output_node):\n \"\"\"\n Args:\n pb_file: The location of a Frozen Graph.\n labels_file: json file, {id: label}\n input_node: The name of the graph input node.\n output_node: The names of the graph output node.\n \"\"\"\n\n assert pb_file.endswith(\".pb\")\n self.pb_file = pb_file\n self.pb_name = os.path.basename(self.pb_file)[:-3]\n self.INPUT_NODE = input_node\n self.OUTPUT_NODE = output_node\n\n with open(labels_file, 'r') as labels_file:\n self.labels = json.load(labels_file)\n\n # with tf.gfile.FastGFile(self.pb_file, 'rb') as f:\n # self.frozen_graph_def = tf.GraphDef()\n # self.frozen_graph_def.ParseFromString(f.read())\n self.frozen_graph_def = self.get_frozen_graph(self.pb_file)\n\n def get_frozen_graph(self, pb_file):\n \"\"\"Read Frozen Graph file from disk.\"\"\"\n with tf.gfile.FastGFile(pb_file, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n return graph_def\n\n def optimize(self, output_dir, mode):\n\n \"\"\"\n Args:\n output_dir: The location of a optimized graph directory\n mode: benchmark the model with TensorRT\n \"\"\"\n\n assert mode in [\"FP16\", \"FP32\", \"INT8\"]\n graph_def = trt.create_inference_graph(input_graph_def=self.frozen_graph_def,\n outputs=[self.OUTPUT_NODE],\n max_batch_size=1,\n max_workspace_size_bytes=1 << 32,\n precision_mode=mode)\n\n output_path = os.path.join(output_dir, self.pb_name+\"-{}.pb\".format(mode.lower()))\n\n with tf.gfile.GFile(output_path, 'wb') as f:\n f.write(graph_def.SerializeToString())\n return graph_def\n\n def _smallest_size_at_least(self, height, width, resize_min):\n resize_min = tf.cast(resize_min, tf.float32)\n height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)\n\n smaller_dim = tf.minimum(height, width)\n scale_ratio = resize_min / smaller_dim\n\n new_height = tf.cast(height * scale_ratio, tf.int32)\n new_width = tf.cast(width * scale_ratio, tf.int32)\n\n return new_height, new_width\n\n def _resize_image(self, image, height, width):\n return tf.image.resize_images(\n image, [height, width], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)\n\n def _aspect_preserving_resize(self, image, resize_min):\n shape = tf.shape(image)\n height, width = shape[0], shape[1]\n\n new_height, new_width = self._smallest_size_at_least(height, width, resize_min)\n\n return self._resize_image(image, new_height, new_width)\n\n def _central_crop(self, image, crop_height, crop_width):\n shape = tf.shape(image)\n height, width = shape[0], shape[1]\n\n amount_to_be_cropped_h = (height - crop_height)\n crop_top = amount_to_be_cropped_h // 2\n amount_to_be_cropped_w = (width - crop_width)\n crop_left = amount_to_be_cropped_w // 2\n return tf.slice(image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])\n\n def preprocess_image(self, image_file, num_channels, output_height, output_width):\n\n image_buffer = tf.read_file(image_file)\n image = tf.image.decode_jpeg(image_buffer, channels=num_channels)\n image = self._aspect_preserving_resize(image, _RESIZE_MIN)\n print(image, output_height, output_width)\n image = self._central_crop(image, output_height, output_width)\n\n image.set_shape([output_height, output_width, num_channels])\n image = image - means\n\n with tf.Session() as sess:\n return sess.run([image])[0]\n\n def predict(self, image_file, verbose=True, num_loops=100):\n\n import time\n\n image_data = self.preprocess_image(image_file, 3, 224, 224)\n image_data = np.tile(image_data, [1, 1, 1, 1])\n\n tf.reset_default_graph()\n graph = tf.Graph()\n\n # tf.logging.info(\"Starting execution\")\n with graph.as_default():\n images = tf.placeholder(\"float\", [1, 224, 224, 3])\n return_tensors = tf.import_graph_def(graph_def=self.frozen_graph_def,\n input_map={self.INPUT_NODE: images},\n return_elements=[self.OUTPUT_NODE])\n\n # Unwrap the returned output node. For now, we assume we only\n # want the tensor with index `:0`, which is the 0th element of the\n # `.outputs` list.\n output = return_tensors[0].outputs[0]\n\n with tf.Session(graph=graph) as sess:\n tf.logging.info(\"Starting Warmup cycle\")\n for _ in range(10): sess.run([output], feed_dict={images: image_data})\n\n tf.logging.info(\"Starting timing.\")\n timing = []\n for _ in range(num_loops):\n start = time.time()\n embeddings = sess.run([output], feed_dict={images:image_data})\n result = [self.labels[str(np.argmax(embedding[0]))] for embedding in embeddings]\n timing.append(time.time()-start)\n\n tf.logging.info(\"Timing loop done!\")\n speed = 1/np.array(timing) # BATCH_SIZE = 1\n t_max, t_min, t_mean, t_std = max(speed), min(speed), speed.mean(), speed.std()\n print(\"=> prediction: {}\".format(result))\n print(\"=> Frame Per Second info: max {:.2f} fps, min {:.2f} fps, mean {:.2f} fps, std {:.2f} fps\"\\\n .format(t_max, t_min, t_mean, t_std))\n\n return result\n\nif __name__ == \"__main__\":\n pass\n # from scipy import misc\n ### TODO\n # #----------------------------- Op type registered -------------------------#\n # ### => contrib ops are lazily registered when the module is first accessed.\n # ### => first registered process!\n # ### => Sub-Graph Optimizations within TensorFlow\n\n # mod = \"FP16\"\n # model = Trtmodel(\"./model/resnetv2.pb\", \"./data/labellist.json\", \"input_tensor\", \"softmax_tensor\")\n # model.optimize(\"./model\", mod)\n # model = Trtmodel(\"./model/resnetv2-fp16.pb\", \"./data/labellist.json\", \"input_tensor\", \"softmax_tensor\")\n # image_data = misc.imread(\"./data/image.jpg\")\n # image_data = np.tile(image_data, [1, 1, 1, 1])\n # model.predict(image_data)\n # #----------------------------- Op type registered -------------------------#\n","sub_path":"tftensort.py","file_name":"tftensort.py","file_ext":"py","file_size_in_byte":7525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"66788525","text":"\"\"\" Defines the User repository \"\"\"\n\nfrom models import Notation\nfrom models import Film\nfrom models import db\n\nfrom sqlalchemy import func\n\nclass RechercheRepository:\n \"\"\"The repository for the notation average truc\"\"\"\n @staticmethod\n def get(research):\n films=db.session.query(Film.title).all()\n recherche=[]\n for film in films :\n if research in film[0]:\n avg=db.session.query(func.avg(Notation.note)).filter_by(movie_title=film[0]).group_by(Notation.movie_title).one()\n recherche.append([(film,avg)])\n L=[]\n for i in recherche :\n l=[i[0][0][0],float(i[0][1][0])]\n L.append(l)\n return L\n","sub_path":"server/src/repositories/recherche.py","file_name":"recherche.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"483556521","text":"from application import *\r\nfrom vec2d import vec2d\r\n\r\nSimulationContextId=\"simulation\"\r\nDebugPaneContextId=\"debugpane\"\r\n\r\nclass AddObjectNotif:\r\n def __init__(self,object):\r\n self.object=object\r\nclass AddPlayerNotif(AddObjectNotif): \r\n pass \r\nclass RemoveObjectNotif:\r\n def __init__(self,object):\r\n self.object=object \r\n \r\nclass ModelPrototypeV(AbstractAnimatedView):\r\n def __init__(self,model,contextId):\r\n AbstractAnimatedView.__init__(self, model, contextId)\r\n try:\r\n self.pos=self.model.pos \r\n except AttributeError:\r\n self.pos=(0,0)\r\n \r\n def Build(self): \r\n AbstractAnimatedView.Build(self)\r\n #remove animation\r\n self.ClearFrames()\r\n #create resources \r\n surfaces= self.model.prototype.LoadRessources() \r\n #animation enabled\r\n for s in surfaces: \r\n bb=s.get_rect()\r\n pos=(self.pos[0]-bb.width/2, self.pos[1]-bb.height/2)\r\n self.pick=s.get_rect()\r\n self.pick.topleft=pos\r\n self.DrawPrimitive(pygame.Surface.blit, (s, pos))\r\n #self.DrawPrimitive(pygame.draw.rect, ((255,0,255), self.pick,1))\r\n self.PushFrame()\r\n\r\nclass SwitchContextIdNotif(AddObjectNotif):\r\n def __init__(self,contextId):\r\n self.contextId=contextId\r\n\r\nclass GameV(AbstractView):\r\n def __init__(self,model,contextId):\r\n AbstractView.__init__(self,model,contextId)\r\n \r\n def Build(self): \r\n self.teamchilds=[t.BuildView(DebugPaneContextId) for t in self.model.teams]\r\n self.otherchilds=[self.model.map.BuildView(self.contextId)] \r\n self.otherchilds+=[t.BuildView(self.contextId) for t in self.model.toolpalettes if self.contextId in t.prototype.interfaces] \r\n self.childs=self.teamchilds+self.otherchilds\r\n \r\n def ProcessModelNotif(self, notif):\r\n if isinstance(notif,ModelUpdateNotif):\r\n self.Build()\r\n elif isinstance(notif,SwitchContextIdNotif):\r\n self.contextId=notif.contextId\r\n self.Build()\r\n def Draw(self,surface):\r\n AbstractView.Draw(self,surface)\r\n if self.model.DelayedTasksQueue: \r\n sw,sh=surface.get_size()\r\n rw=.8\r\n rh=.1\r\n #white\r\n percent=1.-float(len(self.model.DelayedTasksQueue))/self.model.NbDelayedTasks\r\n r=pygame.Rect(sw*(.5-rw/2.),sh*(.5-rh/2.),sw*rw*percent,sh*rh)\r\n pygame.draw.rect(surface,(150,150,150),r,0)\r\n #black\r\n r=pygame.Rect(sw*(.5-rw/2.)+sw*rw*percent,sh*(.5-rh/2.),sw*rw*(1.-percent),sh*rh)\r\n pygame.draw.rect(surface,(0,0,0),r,0)\r\n #border\r\n r=pygame.Rect(sw*(.5-rw/2.),sh*(.5-rh/2.),sw*rw,sh*rh)\r\n pygame.draw.rect(surface,(255,255,255),r,3)\r\n \r\nclass PlayerV(AbstractSpriteView):\r\n def __init__(self,model,contextId):\r\n AbstractSpriteView.__init__(self,model,contextId)\r\n self.surface=self.model.prototype.LoadRessources()[0]\r\n def GetPosition(self):\r\n return self.model.fcoord*64\r\n def GetAngle(self):\r\n return -self.model.angle\r\n def Draw(self,surface):\r\n AbstractSpriteView.Draw(self,surface)\r\n self.model.iaplayer.Draw(surface)\r\n def GetSurface(self):\r\n return self.surface\r\n \r\n\r\n\r\nclass TransientPrototypeSpriteV(AbstractSpriteView):\r\n def __init__(self, model, contextId):\r\n AbstractSpriteView.__init__(self, model, contextId)\r\n self.surface=self.model.prototype.LoadRessources()\r\n def GetPosition(self):\r\n return self.model.pos\r\n def GetAngle(self):\r\n return self.model.angle\r\n def GetSurface(self):\r\n return self.surface[self.GetFrame()] \r\n def GetFrame(self):\r\n if self.model.elapsed>=self.model.total: return len(self.surface)-1\r\n return int(self.model.elapsed*len(self.surface)/float(self.model.total))\r\n\r\nclass RemoveAllPlayersNotif:\r\n pass\r\n\r\nclass MapV(AbstractCompiledView):\r\n def Build(self):\r\n AbstractCompiledView.Build(self)\r\n self.childs=[c.BuildView(self.contextId) for r in self.model.map for c in r]\r\n if self.contextId==DefaultContextId:\r\n self.childs+=[o.BuildView(self.contextId) for o in self.model.objects]\r\n self.childs+=[o.BuildView(self.contextId) for o in self.model.transientobjects] \r\n self.area=pygame.Rect(10,40,64*self.model.size[0],64*self.model.size[1]) \r\n self.pick=pygame.Rect(0, 0, self.area.width,self.area.height)\r\n \r\n \r\n def ProcessModelNotif(self, notif):\r\n if notif.__class__ is ModelUpdateNotif:\r\n self.Build()\r\n elif notif.__class__ is AddObjectNotif:\r\n self.childs.append(notif.object.BuildView(self.contextId)) \r\n elif notif.__class__ is RemoveObjectNotif:\r\n self.childs=filter(lambda c:c.model!=notif.object,self.childs)\r\n \r\n \r\nclass CellSimulationV(ModelPrototypeV):\r\n def __init__(self, model,contextId): \r\n ModelPrototypeV.__init__(self, model,contextId) \r\n def Build(self): \r\n m=self.model \r\n #self.area=pygame.Rect(self.model.pos[0]-32,self.model.pos[1]-32, 64, 64) \r\n ModelPrototypeV.Build(self)\r\n l=[((-1, 0),((255,255,255),(m.pos[0]-32+ 0,m.pos[1]-32+ 0),(m.pos[0]-32+ 0,m.pos[1]-32+64),2)),\r\n (( 1, 0),((255,255,255),(m.pos[0]-32+62,m.pos[1]-32+ 0),(m.pos[0]-32+62,m.pos[1]-32+64),2)),\r\n (( 0, 1),((255,255,255),(m.pos[0]-32+ 0,m.pos[1]-32+62),(m.pos[0]-32+64,m.pos[1]-32+62),2)),\r\n (( 0,-1),((255,255,255),(m.pos[0]-32+ 0,m.pos[1]-32+ 0),(m.pos[0]-32+64,m.pos[1]-32+ 0),2))]\r\n map=self.model.map\r\n if not self.model.prototype.walk:\r\n for d,arg in l: \r\n if map.IsWalk(self.model.coord+d):\r\n self.DrawPrimitiveOnAllFrames(pygame.draw.line,arg) \r\n\r\nclass CellEditionV(ModelPrototypeV):\r\n def __init__(self, model,contextId): \r\n ModelPrototypeV.__init__(self, model,contextId) \r\n self.so=GetApplication().game.prototypes['celldebug-so'].LoadRessources()\r\n def Build(self):\r\n ModelPrototypeV.Build(self)\r\n m=self.model \r\n if m.prototype.solid==1: \r\n self.DrawPrimitiveOnAllFrames(pygame.Surface.blit, (self.so[0], (m.pos[0]-10,m.pos[1]))) \r\n if m.prototype.transparent==0: \r\n self.DrawPrimitiveOnAllFrames(pygame.Surface.blit, (self.so[1], (m.pos[0]+10,m.pos[1])))\r\n\r\nclass PlayerCornerV(AbstractCompiledView):\r\n def __init__(self,model,contextId):\r\n AbstractCompiledView.__init__(self,model,contextId)\r\n self.pos=0\r\n self.font=GetApplication().game.prototypes['teamconrner-font'].LoadRessources()[0]\r\n\r\n def Build(self):\r\n AbstractCompiledView.Build(self) \r\n self.ClearPrimitives() \r\n if self.pos: \r\n weapon_name=[\"Pistol\",\"Shotgun\",\"Chaingun\",\"Shrink\",\"Rpg\"]\r\n color=[(120,120,120),(255,255,255)]\r\n y=self.pos[1]\r\n y=self.DrawText((self.pos[0],y),\"Health:%d%% Armour:%d%%\"%(self.model.hp,self.model.armour),self.font,(255,255,255))\r\n for i in range(0,len(weapon_name)):\r\n self.DrawText((self.pos[0],y),\"%s:\"%(weapon_name[i]),self.font,color[self.model.weapons[i]])\r\n y=self.DrawText((self.pos[0]+65,y),\"%d/%d\"%(self.model.ammo[i],self.model.maxammo[i]),self.font,color[self.model.weapons[i]])\r\n text=\"Pistol:%d/%d\"\r\n \r\n\r\n \r\nclass StatUpdateNotif: \r\n pass\r\n \r\nclass TeamCornerV(AbstractCompiledView): \r\n def __init__(self,model,contextId):\r\n AbstractCompiledView.__init__(self,model,contextId) \r\n i = self.model.game.teams.index(self.model)\r\n n = len(self.model.game.teams)\r\n pad=5\r\n xsize=1200\r\n ysize=900\r\n ypos=50+64*10\r\n self.area=pygame.Rect(xsize/n*i+pad,ypos,xsize/n-2*pad,ysize-pad-ypos)\r\n self.pick=pygame.Rect(self.area)\r\n self.pick.topleft=(0,0) \r\n self.childs_uptodate=False\r\n self.stat_uptodate=False\r\n self.font=GetApplication().game.prototypes['teamconrner-font'].LoadRessources()[0]\r\n def ProcessModelNotif(self, notif):\r\n if isinstance(notif, ModelUpdateNotif):\r\n self.childs_uptodate=False\r\n elif isinstance(notif, StatUpdateNotif):\r\n self.stat_uptodate=False\r\n \r\n def Build(self):\r\n AbstractCompiledView.Build(self) \r\n if self.childs_uptodate==False or self.stat_uptodate==False:\r\n self.ClearPrimitives() \r\n self.DrawPrimitive(pygame.draw.rect, ((255,0,255), self.pick,1)) \r\n \r\n text=\"\"\"Spawn count:%(nbspawn)d\\nFrag count:%(nbfrag)d\\nShot:%(shot)d\\nPrecision: %(precision)2.2d%%\"\"\"%self.model.__dict__ \r\n y = self.DrawText((10,10),text,self.font)\r\n if self.childs_uptodate==False:\r\n x=10 \r\n self.childs=[self.model.face.BuildView(self.contextId)]\r\n for c in self.model.players:\r\n v=c.BuildView(self.contextId)\r\n v.pos=(x,y+10)\r\n v.Build()\r\n self.childs.append(v)\r\n x+=100\r\n self.childs_uptodate=True\r\n self.stat_uptodate=True\r\n \r\n \r\n \r\n def Draw(self,surface):\r\n if self.childs_uptodate==False or self.stat_uptodate==False: self.Build()\r\n AbstractCompiledView.Draw(self, surface)\r\n \"\"\"if self.area: surface=surface.subsurface(self.area)\r\n surface.fill((0,0,0))\r\n for fn, arg in self.primitives:\r\n fn(surface,*arg) \r\n for c in self.childs:\r\n c.Draw(surface) \"\"\" \r\n \r\n \r\n \r\n \r\n \r\n\r\nclass TransientPrototypeV(AbstractMultiframeView):\r\n def Build(self):\r\n AbstractMultiframeView.Build(self)\r\n #remove animation\r\n self.ClearFrames()\r\n #create resources \r\n surfaces= self.model.prototype.LoadRessources()\r\n if(self.model.angle):\r\n surfaces=[pygame.transform.rotate(c, self.model.angle) for c in surfaces]\r\n #animation enabled\r\n for s in surfaces: \r\n p0=self.model.pos-vec2d(s.get_rect().size)*.5 \r\n p0=(int(p0[0]),int(p0[1]))\r\n self.pick=s.get_rect()\r\n self.pick.topleft=p0\r\n self.DrawPrimitive(pygame.Surface.blit,(s,p0)) \r\n self.PushFrame() \r\n def GetFrame(self):\r\n return int(self.model.elapsed*(len(self.frames)-1)/float(self.model.total))\r\n \r\nclass BulletImpactV(TransientPrototypeV):\r\n def Build(self):\r\n TransientPrototypeV.Build(self) \r\n self.GotoFrame(0)\r\n self.DrawPrimitive(pygame.draw.line,((200,200,200),self.model.pos,self.model.p1,2))\r\n \r\nclass GoodV(ModelPrototypeV): \r\n def Draw(self,surface):\r\n if not self.model.remain:\r\n ModelPrototypeV.Draw(self, surface)\r\n \r\n \r\n \r\n ","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"130492568","text":"import threading\nimport logging\nimport psycopg2\nimport dejimautils\nimport os\nimport json\nimport requests\n\n# params_dict for execution thread for base table\n# \"sql_statements\" : sql statements for base table\n#\n# params_dict for execution thread for dejima view\n# \"update_json\" : update contents for dejima view. json format str.\n\nclass ExecutionThread(threading.Thread):\n def __init__(self, conn, lock):\n threading.Thread.__init__(self)\n self.conn = conn\n self.lock = lock\n\n def run(self):\n logging.info(\"ExecutionThread : start\")\n req = self.conn.recv(1024).decode()\n logging.info(\"request : {}\".format(req))\n header, message_body = req.split(\"\\r\\n\\r\\n\")\n url = header.split()[1]\n params_dict = json.loads(message_body)\n\n my_peer_name = os.environ['PEER_NAME']\n\n # switch statements for url\n if url == \"/lock\":\n if self.lock[\"lock\"] == False:\n self.lock[\"lock\"] = True\n self.lock[\"holder\"] = params_dict[\"holder\"]\n logging.info(\"Accept Lock Request.\")\n self.conn.send(\"HTTP/1.1 200 OK\".encode())\n else:\n logging.info(\"Request Blocked.\")\n self.conn.send(\"HTTP/1.1 423 Locked\".encode())\n\n self.conn.close()\n\n elif url == \"/unlock\":\n if self.lock[\"holder\"] == params_dict[\"holder\"]:\n self.lock[\"lock\"] = False\n self.lock[\"holder\"] = None\n logging.info(\"Unlocked.\")\n self.conn.send(\"HTTP/1.1 200 OK\".encode())\n self.conn.close()\n\n elif url == \"/exec_transaction\":\n if self.lock[\"lock\"] == True:\n self.conn.send(\"HTTP/1.1 423 Locked\".encode())\n self.conn.close()\n exit()\n else:\n self.lock[\"lock\"] = True\n self.lock[\"holder\"] = my_peer_name\n\n result = dejimautils.global_locking()\n if result == False:\n logging.info(\"couldn't get all locks. Release all locks and end this thread.\")\n dejimautils.global_unlocking()\n self.conn.send(\"HTTP/1.1 423 Locked\".encode())\n self.conn.close()\n self.lock[\"lock\"] = False\n self.lock[\"holder\"] = None\n exit()\n\n logging.info(\"execute update for dejima view ...\")\n dejima_setting = {}\n with open(\"/proxy/dejima_setting.json\") as f:\n dejima_setting = json.load(f)\n\n child_result = [] \n child_conns = []\n\n db_conn = psycopg2.connect(\"dbname=postgres user=dejima password=barfoo host={}-postgres port=5432\".format(my_peer_name))\n with db_conn.cursor() as cur:\n # note : in psycopg2, transaction is valid as default, so no need to exec \"BEGIN;\"\n try:\n # phase1 : execute update for base table.\n cur.execute(params_dict[\"sql_statements\"])\n\n # phase 1' : take a ticket\n cur.execute(\"UPDATE ticket set value=0 WHERE value=0\")\n\n # phase2 : detect update for other dejima view and member of the view.\n dv_set_for_propagate = set(dejima_setting[\"dejima_view\"][my_peer_name])\n\n # phase 3 : propagate update for child peer\n if dv_set_for_propagate:\n # phase3-2 : propagate dejima view update\n thread_list = []\n for dv_name in dv_set_for_propagate:\n cur.execute(\"SELECT non_trigger_{}_detect_update();\".format(dv_name))\n update_json, *_ = cur.fetchone()\n for peer_name in dejima_setting[\"peer_member\"][dv_name]:\n if peer_name != my_peer_name:\n t = threading.Thread(target=dejimautils.send_json_for_child, args=(update_json, peer_name, child_result, child_conns))\n t.start()\n thread_list.append(t)\n\n logging.info(\"wait ack from child\")\n for thread in thread_list:\n thread.join()\n ack = True\n except psycopg2.Error as e:\n logging.info(\"error: {}\".format(e))\n logging.info(\"Execption occurs. Abort start.\")\n ack = False\n\n # check ack/nak from children.\n commit_or_abort = \"commit\"\n for result in child_result:\n if result != \"200\" :\n commit_or_abort = \"abort\"\n if ack == False:\n commit_or_abort = \"abort\"\n\n # phase 7 : commit or abort\n if commit_or_abort == \"commit\":\n db_conn.commit()\n for s in child_conns:\n s.sendall(\"commit\".encode())\n s.close()\n logging.info(\"execution thread finished : commit\")\n elif commit_or_abort == \"abort\":\n db_conn.rollback()\n for s in child_conns:\n s.sendall(\"abort\".encode())\n s.close()\n logging.info(\"execution thread finished : abort\")\n\n self.conn.send(\"HTTP/1.1 200 OK\".encode())\n db_conn.close()\n self.conn.close()\n\n self.lock[\"lock\"] = False\n self.lock[\"holder\"] = None\n\n elif url == \"/update_dejima_view\":\n\n logging.info(\"execute update for base table...\")\n dejima_setting = {}\n with open(\"/proxy/dejima_setting.json\") as f:\n dejima_setting = json.load(f)\n view_name, sql_for_dejima_view = dejimautils.convert_to_sql_from_json(params_dict[\"view_update\"])\n view_name = view_name.replace(\"public.\", \"\")\n\n child_result = [] \n child_conns = []\n\n db_conn = psycopg2.connect(\"dbname=postgres user=dejima password=barfoo host={}-postgres port=5432\".format(my_peer_name))\n with db_conn.cursor() as cur:\n try:\n # phase1 : execute update for certain dejima view\n cur.execute(sql_for_dejima_view)\n\n # phase2 : detect update for other dejima view and member of the view.\n dv_set_for_propagate = set(dejima_setting[\"dejima_view\"][my_peer_name])\n dv_set_for_propagate = dv_set_for_propagate - { view_name }\n\n # phase 3 : propagate update for child peer\n if dv_set_for_propagate:\n # phase3-2 : propagate dejima view update\n thread_list = []\n for dv_name in dv_set_for_propagate:\n cur.execute(\"SELECT non_trigger_{}_detect_update();\".format(dv_name))\n update_json, *_ = cur.fetchone()\n for peer_name in dejima_setting[\"peer_member\"][dv_name]:\n if peer_name != my_peer_name:\n t = threading.Thread(target=dejimautils.send_json_for_child, args=(update_json, peer_name, child_result, child_conns))\n t.start()\n thread_list.append(t)\n logging.info(\"wait ack from child\")\n for thread in thread_list:\n thread.join()\n ack = True\n except psycopg2.Error as e:\n logging.info(\"error: {}\".format(e))\n logging.info(\"Execption occurs. Abort start.\")\n ack = False\n\n # check ack/nak from children.\n for result in child_result:\n if result != \"200\" :\n ack = False\n break\n\n if ack:\n self.conn.send(\"HTTP/1.1 200 OK\".encode())\n else:\n self.conn.send(\"HTTP/1.1 500 Internal Server Error\".encode())\n\n logging.info(\"wait commit/abort\")\n commit_or_abort = self.conn.recv(1024).decode()\n\n # phase 7 : commit or abort\n if commit_or_abort == \"commit\":\n db_conn.commit()\n for s in child_conns:\n s.sendall(\"commit\".encode())\n s.close()\n logging.info(\"execution thread finished : commit\")\n elif commit_or_abort == \"abort\":\n db_conn.rollback()\n for s in child_conns:\n s.sendall(\"abort\".encode())\n s.close()\n logging.info(\"execution thread finished : abort\")\n\n db_conn.close()\n self.conn.close()\n\n self.lock[\"lock\"] = False\n self.lock[\"holder\"] = None\n\n else:\n self.conn.send(\"HTTP/1.1 404 Not Found\".encode())\n self.conn.close()\n exit()","sub_path":"proxy/global_lock_thread.py","file_name":"global_lock_thread.py","file_ext":"py","file_size_in_byte":9182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"648631576","text":"#!/usr/bin/env python\nimport numpy as np\nimport rospy\nimport sys\nfrom nav_msgs.msg import Odometry\n\n'''\nslows the odometry informaion rate, needs to be called with desired rate\nand name of topic to contrict, will publish to robotic_games/Odometry\n'''\n\ndef position_callback(data):\n global fast_odo\n fast_odo=data\n\ndef constrictor():\n global fast_odo\n rospy.init_node(\"limited_odometry\")\n frequency=rospy.Rate(float(sys.argv[1]))\n rospy.Subscriber(sys.argv[2],Odometry,position_callback)\n pub=rospy.Publisher(\"robotic_games/Odometry\",Odometry,queue_size=1)\n while not rospy.is_shutdown():\n pub.publish(fast_odo)\n frequency.sleep()\n\nif __name__==\"__main__\":\n global fast_odo\n fast_odo=Odometry()\n try:\n constrictor()\n except rospy.ROSInterruptException:\n pass\n\n","sub_path":"scripts/position_info.py","file_name":"position_info.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"473925416","text":"# -*- coding:utf-8 -*-\n\"\"\"\nCreated on Jan 1, 2016\n\n@author: Wasim\n\"\"\"\n\nimport errno\nimport glob\nimport os\nimport shutil\n\n\ndef main():\n zf = r'zurb-foundation-sites/'\n df = r'foundation/static/foundation/'\n\n pathes = (\n (glob.glob(zf + 'scss'), df + 'scss'),\n (glob.glob(zf + '_vendor'), df + '_vendor'),\n\n (glob.glob(zf + 'js'), df + 'js/foundation-es6'),\n (zf + 'dist/css/foundation.css', df + 'css'),\n (zf + 'dist/js/foundation.js', df + 'js'),\n )\n for src, dst in pathes:\n print('copy %s to %s' % (src, dst))\n\n if isinstance(src, list):\n try:\n shutil.rmtree(dst)\n except OSError as e:\n if e.errno == 2:\n pass\n else:\n raise\n\n for s in src:\n cp(s, dst)\n else:\n try:\n os.makedirs(dst)\n except OSError:\n pass\n shutil.copy(src, dst)\n\n\ndef cp(src, dst):\n try:\n shutil.copytree(src, dst)\n\n except OSError as exc:\n if exc.errno == errno.ENOTDIR:\n shutil.copy(src, dst)\n else:\n raise\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"copy_statics.py","file_name":"copy_statics.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"112836889","text":"import config\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import explained_variance_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import tree\n\n'''\n#####################################\n Preparing data set form CSV\n#####################################\n'''\n\n# Read csv file\nraw_data = pd.read_csv(config.processed_data_path + \"Car_data_cleaned.csv\")\n\n# Get x and y values\ny_values = raw_data[\"MSRP\"]\nx_values = raw_data.loc[:, raw_data.columns != \"MSRP\"]\n\n# Split set to training and testing\nx_train, x_test, y_train, y_test = train_test_split(x_values, y_values, test_size=0.003)\n\n'''\n#####################################\n Create and train and test the model\n#####################################\n'''\n\n# Create the model\nclf = tree.DecisionTreeRegressor()\n\n# Train the model\nclf.fit(x_train, y_train)\n\n# Test the model\npredicted = clf.predict(x_test)\n\n# Get the variance score (1 is best, lower is worse)\ntest_score = explained_variance_score(y_test, predicted)\n\n'''\n#####################################\n Show outcome in graph\n#####################################\n'''\n\n# Create a large window for easy viewing\nplt.rcParams[\"figure.figsize\"] = [16, 9]\n\n# Set labels\nplt.title(\"DECISION TREE [Testing set size: %d] [Test score: %f]\" % (y_test.size, test_score))\nplt.xlabel(\"Counter\")\nplt.ylabel(\"MSRP\")\n\n# Create lines\nplt.plot(y_test.values, color=\"Green\")\nplt.plot(predicted, color=\"Red\")\n\n# Create dots\nplt.plot(y_test.values, \"ro\", color=\"Green\", label=\"Actual values\")\nplt.plot(predicted, \"ro\", color=\"Red\", label=\"Predicted\")\n\n# Prepare and show\nplt.legend()\nplt.show()\n","sub_path":"Supervised/Regression/CarPricePrediction/DecisionTreeRegressor.py","file_name":"DecisionTreeRegressor.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"181722932","text":"\"\"\"\nFUEL TEMPERATURE COEFFICIENT (MCNP)\n\nWritten by Patrick Park (RO, Physics '22)\nppark@reed.edu\n\nThis project should be available at\nhttps://github.com/patrickpark910/pntc/\n\nFirst written Feb. 16, 2021\nLast updated Feb. 16, 2021\n\n__________________\nDefault MCNP units\n\nLength: cm\nMass: g\nEnergy & Temp.: MeV\nPositive density (+x): atoms/barn-cm\nNegative density (-x): g/cm3\nTime: shakes\n(1 barn = 10e-24 cm2, 1 sh = 10e-8 sec)\n\n_______________\nTechnical Notes\n\nTo fully calculate fuel temp coef, you need to change the TMP= value on each cell with a fuel mat,\nAND switch out the S(a,b) cross section library (e.g., 92235.80c <-- ) for the right TMP.\nLook up the right S(a,b) library names in LA-UR-13-21822.\n\n\"\"\"\n\nimport os, sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\n\nfrom mcnp_funcs import *\n\nFILEPATH = os.path.dirname(os.path.abspath(__file__))\nWATER_MAT_CARD = '102'\nFUEL_TEMPS = [200,250,300,350,400,450,500,650,700,750,800,850,900,950,1000,1050,1100,1150,1200,1250,1300,1350,1400]\n# Prefer hardcoded lists rather than np.arange, which produces imprecise floating points, e.g., 0.7000000...003\n# Select temperature range that covers all study ranges:\n# https://mcnp.lanl.gov/pdf_files/la-ur-12-20338.pdf (slide 9)\nINPUTS_FOLDER_NAME = 'inputs'\nOUTPUTS_FOLDER_NAME = 'outputs'\nMODULE_NAME = 'pntc'\nKEFF_CSV_NAME = f'{MODULE_NAME}_keff.csv'\nRHO_CSV_NAME = f'{MODULE_NAME}_rho.csv'\nPARAMS_CSV_NAME = f'{MODULE_NAME}_parameters.csv'\nFIGURE_NAME = f'{MODULE_NAME}_results.png'\n\n\ndef main():\n initialize_rane()\n \"\"\"\n BASE_INPUT_NAME = 'pntc-a100-h100-r100.i' # find_base_file(FILEPATH)\n check_kcode(FILEPATH, BASE_INPUT_NAME)\n\n num_inputs_created = 0\n num_inputs_skipped = 0\n for i in range(0, len(FUEL_TEMPS)):\n cell_temps_dict = {}\n for fe_id in list(FE_ID.values()): cell_temps_dict[fe_id] = FUEL_TEMPS[i]\n input_created = change_cell_temps(FILEPATH, MODULE_NAME, cell_temps_dict, BASE_INPUT_NAME, INPUTS_FOLDER_NAME)\n if input_created: num_inputs_created += 1\n if not input_created: num_inputs_skipped += 1\n\n print(f\"Created {num_inputs_created} new input decks.\\n\"\n f\"--Skipped {num_inputs_skipped} input decks because they already exist.\")\n\n if not check_run_mcnp(): sys.exit()\n\n # Run MCNP for all .i files in f\".\\{inputs_folder_name}\".\n tasks = get_tasks()\n for file in os.listdir(f\"{FILEPATH}/{INPUTS_FOLDER_NAME}\"):\n run_mcnp(FILEPATH,f\"{FILEPATH}/{INPUTS_FOLDER_NAME}/{file}\",OUTPUTS_FOLDER_NAME,tasks)\n\n # Deletes MCNP runtape and source dist files.\n delete_files(f\"{FILEPATH}/{OUTPUTS_FOLDER_NAME}\",r=True,s=True)\n\n # Setup a dataframe to collect keff values\n keff_df = pd.DataFrame(columns=[\"x\", \"keff\", \"keff unc\"]) # use lower cases to match 'rods' def above\n keff_df[\"x\"] = FUEL_TEMPS\n keff_df.set_index(\"x\",inplace=True)\n\n for fuel_temp in FUEL_TEMPS:\n keff, keff_unc = extract_keff(f\"{FILEPATH}/{OUTPUTS_FOLDER_NAME}/o_{MODULE_NAME}-fuel-{str(fuel_temp)}.o\")\n keff_df.loc[fuel_temp, 'keff'] = keff\n keff_df.loc[fuel_temp, 'keff unc'] = keff_unc\n \n print(f\"\\nDataframe of keff values and their uncertainties:\\n{keff_df}\\n\")\n keff_df.to_csv(KEFF_CSV_NAME)\n \"\"\"\n convert_keff_to_rho_coef(300, KEFF_CSV_NAME, RHO_CSV_NAME)\n calc_params_coef(RHO_CSV_NAME, PARAMS_CSV_NAME, MODULE_NAME)\n for rho_or_dollars in ['rho','dollars']: plot_data_void(KEFF_CSV_NAME, RHO_CSV_NAME, PARAMS_CSV_NAME, FIGURE_NAME, rho_or_dollars)\n\n print(f\"\\n************************ PROGRAM COMPLETE ************************\\n\")\n\n\n\n\n'''\nPlots integral and differential worths given a CSV of rho and uncertainties.\n\nrho_csv_name: str, name of CSV of rho and uncertainties, e.g. \"rho.csv\"\nfigure_name: str, desired name of resulting figure, e.g. \"figure.png\"\n\nDoes not return anything. Only produces a figure.\n\nNB: Major plot settings have been organized into variables for your personal convenience.\n'''\ndef plot_data_void(keff_csv_name, rho_csv_name, params_csv_name, figure_name, rho_or_dollars, for_fun=False):\n if rho_or_dollars.lower() in ['r','p','rho']: rho_or_dollars = 'rho'\n elif rho_or_dollars.lower() in ['d','dollar','dollars']: rho_or_dollars = 'dollars'\n\n keff_df = pd.read_csv(keff_csv_name, index_col=0)\n rho_df = pd.read_csv(rho_csv_name, index_col=0)\n params_df = pd.read_csv(params_csv_name, index_col=0)\n water_densities = rho_df.index.values.tolist()\n\n # Personal parameters, to be used in plot settings below.\n label_fontsize = 16\n legend_fontsize = \"x-large\"\n # fontsize: int or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'}\n my_dpi = 320\n x_label = r\"Water density \"# (g/cm$^3$)\"\n y_label_keff, y_label_rho, y_label_void = r\"Effective multiplication factor ($k_{eff}$)\", \\\n r\"Reactivity ($\\%\\Delta k/k$)\", \\\n r\"Void coefficient ((%$\\Delta k/k$)/%)\"\n if rho_or_dollars == 'dollars':\n y_label_rho, y_label_void= r\"Reactivity ($\\Delta$\\$)\", r\"Void coefficient (\\$/%)\"\n\n plot_color = [\"tab:red\",\"tab:blue\",\"tab:green\"]\n\n ax_x_min, ax_x_max = 0.05, 1.05\n ax_x_major_ticks_interval, ax_x_minor_ticks_interval = 0.1, 0.025\n if for_fun:\n ax_x_min, ax_x_max = 0, 2\n ax_x_major_ticks_interval, ax_x_minor_ticks_interval = 0.1, 0.05\n\n ax_keff_y_min, ax_keff_y_max = 0.8, 1.15\n ax_keff_y_major_ticks_interval, ax_keff_y_minor_ticks_interval = 0.05, 0.025\n\n ax_rho_y_min, ax_rho_y_max = -16, 1\n ax_rho_y_major_ticks_interval, ax_rho_y_minor_ticks_interval = 2, 1\n if rho_or_dollars == 'dollars':\n ax_rho_y_min, ax_rho_y_max = -21, 1.0\n ax_rho_y_major_ticks_interval, ax_rho_y_minor_ticks_interval = 2, 1\n\n ax_void_y_min, ax_void_y_max = -0.4, 0.1\n ax_void_y_major_ticks_interval, ax_void_y_minor_ticks_interval = 0.1, 0.025\n if rho_or_dollars == 'dollars':\n ax_void_y_min, ax_void_y_max = -0.5, 0.1\n ax_void_y_major_ticks_interval, ax_void_y_minor_ticks_interval = 0.1, 0.025\n\n fig, axs = plt.subplots(3, 1, figsize=(1636 / 96, 3 * 673 / 96), dpi=my_dpi, facecolor='w', edgecolor='k')\n ax_keff, ax_rho, ax_void = axs[0], axs[1], axs[2] # integral, differential worth on top, bottom, resp.\n\n # Plot data for keff.\n x = [water_density for water_density in water_densities if water_density <= 1]\n if for_fun: x = water_densities\n x_fit = np.linspace(min(x), max(x), len(water_densities))\n y_keff, y_keff_unc = [], []\n for water_density in x:\n y_keff.append(keff_df.loc[water_density,'keff']), y_keff_unc.append(keff_df.loc[water_density,'keff unc'])\n\n ax_keff.errorbar(x, y_keff, yerr=y_keff_unc,\n marker=\"o\", ls=\"none\",\n color=plot_color[0], elinewidth=2, capsize=3, capthick=2)\n\n eq_keff = find_poly_reg(x, y_keff, 2)['polynomial'] # n=2 order fit\n r2_keff = find_poly_reg(x, y_keff, 2)['r-squared']\n sd_keff = np.average(np.abs(np.polyval(np.polyfit(x, y_keff, 2), x) - y_keff))\n y_fit_keff = np.polyval(eq_keff, x)\n\n ax_keff.plot(x, y_fit_keff, color=plot_color[0],\n label=r'y=-{:.3f}$x^2$+{:.2f}$x$+{:.2f}, $R^2$={:.2f}, $\\sigma$={:.4f}'.format(\n np.abs(eq_keff[0]),eq_keff[1], eq_keff[2], r2_keff, sd_keff))\n\n # Plot data for reactivity\n y_rho, y_rho_unc = [], []\n for water_density in x:\n if rho_or_dollars == 'rho': y_rho.append(rho_df.loc[water_density,'rho']), y_rho_unc.append(rho_df.loc[water_density,'rho unc'])\n if rho_or_dollars == 'dollars': y_rho.append(rho_df.loc[water_density, 'dollars']), y_rho_unc.append(rho_df.loc[water_density, 'dollars unc'])\n\n ax_rho.errorbar(x, y_rho, yerr=y_rho_unc,\n marker=\"o\", ls=\"none\",\n color=plot_color[1], elinewidth=2, capsize=3, capthick=2)\n\n eq_rho = find_poly_reg(x, y_rho, 2)['polynomial'] # n=2 order fit\n r2_rho = find_poly_reg(x, y_rho, 2)['r-squared']\n sd_rho = np.average(np.abs(np.polyval(np.polyfit(x, y_rho, 2), x) - y_rho))\n y_fit_rho = np.polyval(eq_rho, x_fit)\n\n ax_rho.plot(x_fit, y_fit_rho, color=plot_color[1],\n label=r'y=-{:.1f}$x^2$+{:.0f}$x${:.0f}, $R^2$={:.2f}, $\\sigma$={:.2f}'.format(\n np.abs(eq_rho[0]), eq_rho[1], eq_rho[2], r2_rho, sd_rho))\n\n # Plot data for coef_void\n y_void, y_void_unc = [], []\n for water_density in x:\n if rho_or_dollars == 'rho': y_void.append(params_df.loc[water_density,'coef rho']), y_void_unc.append(params_df.loc[water_density, 'coef rho unc'])\n else: y_void.append(params_df.loc[water_density, 'coef dollars']), y_void_unc.append(params_df.loc[water_density, 'coef dollars unc'])\n\n ax_void.errorbar(x, y_void, yerr=y_void_unc,\n marker=\"o\", ls=\"none\",\n color=plot_color[2], elinewidth=2, capsize=3, capthick=2)\n\n eq_void = find_poly_reg(x, y_void, 1)['polynomial']\n r2_void = find_poly_reg(x, y_void, 1)['r-squared']\n sd_void = np.average(np.abs(np.polyval(np.polyfit(x, y_void, 1), x) - y_void))\n y_fit_void = np.polyval(eq_void, x_fit)\n\n ax_void.plot(x_fit, y_fit_void, color=plot_color[2],\n label=r'y={:.2f}$x${:.2f}, $R^2$={:.2f}, $\\bar x$$\\pm\\sigma$={:.3f}$\\pm${:.3f}'.format(\n np.abs(eq_void[0]), eq_void[1], r2_void, np.mean(y_fit_void), sd_void))\n\n eq_void_der = -1*np.polyder(eq_rho)/100 # n=2 order fit\n y_fit_void_der = np.polyval(eq_void_der, x_fit)\n\n ax_void.plot(x_fit, y_fit_void_der, color=plot_color[2], linestyle='dashed',\n label=r'y={:.2f}$x${:.2f}, $\\bar x$={:.3f}'.format(\n np.abs(eq_void_der[0]), eq_void_der[1], np.mean(y_fit_void_der)))\n\n\n\n\n # Keff plot settings\n ax_keff.set_xlim([ax_x_min, ax_x_max])\n ax_keff.set_ylim([ax_keff_y_min, ax_keff_y_max])\n ax_keff.xaxis.set_major_locator(MultipleLocator(ax_x_major_ticks_interval))\n ax_keff.yaxis.set_major_locator(MultipleLocator(ax_keff_y_major_ticks_interval))\n ax_keff.minorticks_on()\n ax_keff.xaxis.set_minor_locator(MultipleLocator(ax_x_minor_ticks_interval))\n ax_keff.yaxis.set_minor_locator(MultipleLocator(ax_keff_y_minor_ticks_interval))\n\n ax_keff.tick_params(axis='both', which='major', labelsize=label_fontsize)\n ax_keff.grid(b=True, which='major', color='#999999', linestyle='-', linewidth='1')\n ax_keff.grid(which='minor', linestyle=':', linewidth='1', color='gray')\n\n ax_keff.set_xlabel(x_label, fontsize=label_fontsize)\n ax_keff.set_ylabel(y_label_keff, fontsize=label_fontsize)\n ax_keff.legend(title=f'Key', title_fontsize=legend_fontsize, ncol=1, fontsize=legend_fontsize, loc='lower right')\n\n\n # Reactivity worth plot settings\n ax_rho.set_xlim([ax_x_min, ax_x_max])\n ax_rho.set_ylim([ax_rho_y_min, ax_rho_y_max])\n ax_rho.xaxis.set_major_locator(MultipleLocator(ax_x_major_ticks_interval))\n ax_rho.yaxis.set_major_locator(MultipleLocator(ax_rho_y_major_ticks_interval))\n ax_rho.minorticks_on()\n ax_rho.xaxis.set_minor_locator(MultipleLocator(ax_x_minor_ticks_interval))\n ax_rho.yaxis.set_minor_locator(MultipleLocator(ax_rho_y_minor_ticks_interval))\n\n # Use for 2 decimal places after 0. for dollars units\n if rho_or_dollars == \"dollars\": ax_rho.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))\n\n ax_rho.tick_params(axis='both', which='major', labelsize=label_fontsize)\n ax_rho.grid(b=True, which='major', color='#999999', linestyle='-', linewidth='1')\n ax_rho.grid(which='minor', linestyle=':', linewidth='1', color='gray')\n\n ax_rho.set_xlabel(x_label, fontsize=label_fontsize)\n ax_rho.set_ylabel(y_label_rho, fontsize=label_fontsize)\n ax_rho.legend(title=f'Key', title_fontsize=legend_fontsize, ncol=1, fontsize=legend_fontsize, loc='lower right')\n\n\n # Void worth plot settings\n ax_void.set_xlim([ax_x_min, ax_x_max])\n ax_void.set_ylim([ax_void_y_min, ax_void_y_max])\n ax_void.xaxis.set_major_locator(MultipleLocator(ax_x_major_ticks_interval))\n ax_void.yaxis.set_major_locator(MultipleLocator(ax_void_y_major_ticks_interval))\n ax_void.minorticks_on()\n ax_void.xaxis.set_minor_locator(MultipleLocator(ax_x_minor_ticks_interval))\n ax_void.yaxis.set_minor_locator(MultipleLocator(ax_void_y_minor_ticks_interval))\n\n # Use for 2 decimal places after 0. for dollars units\n if rho_or_dollars == \"dollars\": ax_void.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))\n\n ax_void.tick_params(axis='both', which='major', labelsize=label_fontsize)\n ax_void.grid(b=True, which='major', color='#999999', linestyle='-', linewidth='1')\n ax_void.grid(which='minor', linestyle=':', linewidth='1', color='gray')\n\n ax_void.set_xlabel(x_label, fontsize=label_fontsize)\n ax_void.set_ylabel(y_label_void, fontsize=label_fontsize)\n ax_void.legend(title=f'Key', title_fontsize=legend_fontsize, ncol=1, fontsize=legend_fontsize, loc='lower right')\n\n\n plt.savefig(f\"{figure_name.split('.')[0]}_{rho_or_dollars}.{figure_name.split('.')[-1]}\", bbox_inches='tight',\n pad_inches=0.1, dpi=my_dpi)\n print(\n f\"\\nFigure '{figure_name.split('.')[0]}_{rho_or_dollars}.{figure_name.split('.')[-1]}' saved!\\n\") # no space near \\\n\n\nif __name__ == '__main__':\n main()","sub_path":"without changing xs libraries/pntc.py","file_name":"pntc.py","file_ext":"py","file_size_in_byte":13405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"461726091","text":"import os\nimport json\n\nimport regex\nimport numpy as np\nimport pandas as pd\n\nimport torch\nimport torch.utils.data\nfrom torch.utils.data import DataLoader, BatchSampler, SequentialSampler, RandomSampler, Sampler\n\nfrom src.audio_utils import open_audio\n\n\nclass AudioDataset(torch.utils.data.Dataset):\n @staticmethod\n def load_constraint_dataset(path, min_duration, max_duration):\n min_duration = 0.0 if min_duration is None else min_duration\n max_duration = 1e10 if max_duration is None else max_duration\n\n dataset = pd.read_csv(path, header=None, names=['audio_path', 'text', 'duration'])\n dataset['duration'] = dataset['duration'].astype(float)\n dataset = dataset[dataset['duration'] > min_duration]\n dataset = dataset[dataset['duration'] < max_duration]\n return dataset\n\n def __init__(\n self, dataset_path, vocab, sample_rate=8000,\n audio_transforms=None, min_duration=None, max_duration=None, evaluate_stats=False\n ):\n self._epoch = 0\n\n self.vocab = vocab\n self.sample_rate = sample_rate\n self.min_duration = min_duration\n self.max_duration = max_duration\n self.audio_transforms = audio_transforms\n\n if isinstance(dataset_path, list):\n data = pd.DataFrame()\n self.min_duration = (\n self.min_duration if isinstance(self.min_duration, list) else [self.min_duration] * len(dataset_path)\n )\n self.max_duration = (\n self.max_duration if isinstance(self.max_duration, list) else [self.max_duration] * len(dataset_path)\n )\n for min_duration, max_duration, path in zip(self.min_duration, self.max_duration, dataset_path):\n dataset = self.load_constraint_dataset(path, min_duration, max_duration)\n data = data.append(dataset)\n else:\n data = self.load_constraint_dataset(dataset_path, self.min_duration, self.max_duration)\n\n self.data = data.sort_values(by='duration')\n\n self.idx_to_text_len = dict()\n self.idx_to_audio_len = dict()\n if evaluate_stats:\n for idx in range(self.data.shape[0]):\n self.idx_to_text_len[idx] = len(self.data.iloc[idx]['text'])\n self.idx_to_audio_len[idx] = self.data.iloc[idx]['duration']\n\n def __len__(self):\n return self.data.shape[0]\n\n def __getitem__(self, idx, supress_effects=False):\n apply_transforms = (self.audio_transforms is not None) and (not supress_effects)\n\n text = self.data.iloc[idx]['text']\n audio_path = self.data.iloc[idx]['audio_path']\n\n # write your code here\n text_len = len(text)\n tokens = torch.tensor(self.vocab.lookup_indices(text))\n audio, audio_len = open_audio(\n audio_path, self.sample_rate,\n effects=self.audio_transforms.sample() if apply_transforms else None\n )\n\n # Note: Fix for dataleak\n return (\n audio, audio_len, np.array(text.encode('utf-8'), dtype=np.bytes_), text_len, tokens\n )\n\n\ndef convert_libri_manifest_to_common_voice(manifest_path):\n cv_manifest_path = manifest_path.replace('.json', '.common_voice.csv')\n with open(manifest_path, 'r') as in_file:\n with open(cv_manifest_path, 'w') as out_file:\n for line in in_file:\n sample = json.loads(line, parse_float=lambda x: x)\n audio_filepath = os.path.join(\n os.path.dirname(os.path.abspath(manifest_path)), sample['audio_filepath']\n )\n out_file.write(','.join([audio_filepath, sample['text'], sample['duration']]) + '\\n')\n\n return cv_manifest_path\n\n\ndef convert_open_stt_manifest_to_common_voice(manifest_path, min_duration=2.0):\n cv_manifest_path = manifest_path.replace('.csv', '.common_voice.csv')\n\n with open(manifest_path, 'r') as in_file:\n with open(cv_manifest_path, 'w') as out_file:\n for line in in_file:\n audio_filepath, test_filepath, duration = line.strip().split(',')\n if float(duration) < min_duration:\n continue\n\n audio_filepath = os.path.join(\n os.path.dirname(os.path.abspath(manifest_path)), './..', audio_filepath\n )\n test_filepath = os.path.join(\n os.path.dirname(os.path.abspath(manifest_path)), './..', test_filepath\n )\n text = ' '.join(map(str.strip, open(test_filepath, 'r').readlines()))\n text = regex.sub(r'\\P{Cyrillic}', ' ', text)\n text = regex.sub(' +', ' ', text)\n\n out_file.write(','.join([audio_filepath, text, duration]) + '\\n')\n return cv_manifest_path\n\n\ndef manifest_train_test_split(manifest_path, ratio=0.3, seed=42):\n test_manifest_path = manifest_path.replace('.csv', '_test.csv')\n train_manifest_path = manifest_path.replace('.csv', '_train.csv')\n\n data = pd.read_csv(manifest_path)\n permutation = np.random.RandomState(seed=seed).permutation(data.shape[0])\n data = data.iloc[permutation]\n\n test_size = int(ratio * data.shape[0])\n test_data = data[:test_size]\n train_data = data[test_size:]\n test_data.to_csv(test_manifest_path, index=False)\n train_data.to_csv(train_manifest_path, index=False)\n\n return test_manifest_path, train_manifest_path\n\n\ndef collate_fn(batch):\n \"\"\"\n Inputs:\n batch: list of elements with length=batch_ize\n Returns:\n dict\n \"\"\"\n # Note: Fix for dataleak\n batch = [{\n \"audio\": audio, # torch tensor, (num_timesteps)\n \"audio_len\": audio_len, # int\n \"text\": text, # np.ndarray(dtype=np.bytes_)\n \"text_len\": text_len, # int\n 'tokens': tokens, # torch tensor, (text_len)\n } for audio, audio_len, text, text_len, tokens in batch\n ]\n\n # write your code here\n audios = torch.nn.utils.rnn.pad_sequence(\n [obj['audio'] for obj in batch], batch_first=True, padding_value=0.0\n )\n audio_lens = torch.tensor([obj['audio'].shape[0] for obj in batch])\n\n # Note: Fix for dataleak\n # texts = [obj['text'] for obj in batch]\n texts = np.array([obj['text'] for obj in batch]).astype(np.bytes_)\n\n # Note: Fix for dataleak\n # text_lens = torch.tensor([len(obj['text']) for obj in batch])\n text_lens = torch.tensor([obj['text_len'] for obj in batch])\n tokens = torch.nn.utils.rnn.pad_sequence(\n [obj['tokens'] for obj in batch], batch_first=True, padding_value=0.0\n )\n\n # Note: Fix for dataleak\n return audios, audio_lens, texts, text_lens, tokens\n\n\nclass AudioDataloaderWrapper:\n def __init__(self, dataloader):\n self._iterator = None\n self.dataloader = dataloader\n self.dataset = self.dataloader.dataset\n\n def __iter__(self):\n self._iterator = iter(self.dataloader)\n return self\n\n def __next__(self):\n audios, audio_lens, texts, text_lens, tokens, *_ = next(self._iterator)\n return {\n 'audios': audios,\n 'audio_lens': audio_lens,\n 'texts': texts,\n 'text_lens': text_lens,\n 'tokens': tokens,\n }\n\n def __len__(self):\n return len(self.dataloader)\n\n\nclass AudioDatasetSampler(Sampler):\n def __init__(self, dataset, batch_size):\n super().__init__(None)\n self.epoch = 0\n self.dataset = dataset\n self.batch_size = batch_size\n\n # Assume that data in dataset is sorted w.r. to duration\n self.batches = list(BatchSampler(SequentialSampler(self.dataset), batch_size=self.batch_size, drop_last=False))\n\n def __iter__(self):\n if self.epoch == 0:\n for batch_idx in SequentialSampler(self.batches):\n for idx in self.batches[batch_idx]:\n yield idx\n else:\n for batch_idx in RandomSampler(self.batches):\n for idx in self.batches[batch_idx]:\n yield idx\n self.epoch += 1\n\n def __len__(self):\n return len(self.dataset)\n","sub_path":"src/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":8145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"351703204","text":"import matplotlib\nmatplotlib.use('Agg') # Can change to 'Agg' for non-interactive mode\nimport matplotlib.pyplot as plt\nimport argparse\nimport os\nfrom baselines.bench.monitor import load_results\nimport numpy as np\n\ndef ts2xy(ts):\n x = np.cumsum(ts.l.values)\n y = ts.r.values\n return x, y\n\ndef rolling_window(a, window):\n shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n strides = a.strides + (a.strides[-1],)\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n\ndef window_func(x, y, window, func):\n yw_func = []\n for i in range(len(y)):\n yw = rolling_window(y[i], window)\n yw_func.append(func(yw, axis=-1))\n return x[window-1:], yw_func\n\ndef plot_single_directory (env_id, directory, method_name, num_folds, EPISODES_WINDOW=100):\n directory_name = directory + '/' + 'logs_' + env_id + '_'\n if not os.path.isdir(directory_name+'0'):\n print ('Warning: directory ' + directory_name + '0' + 'does not exist, skipping...')\n return\n results_x = []\n results_y = []\n results_x_all = []\n for i in range (0, num_folds):\n directory_name_i = directory_name + str(i)\n current_results = load_results(directory_name_i)\n current_results_x, current_results_y = ts2xy (current_results)\n results_x.append (current_results_x)\n results_y.append (current_results_y)\n results_x_all.extend (current_results_x)\n #plt.plot (current_results_x, current_results_y)\n results_x_all = np.sort (results_x_all)\n results_y_all = []\n for i in range (num_folds):\n np.append(results_x[i], results_x_all[-1])\n np.append(results_y[i], results_y[i][-1])\n results_y_all.append (np.interp(results_x_all, results_x[i], results_y[i]))\n results_x_all, results_y_all = window_func(results_x_all, results_y_all, EPISODES_WINDOW, np.mean) \n plt.plot (results_x_all, np.mean (results_y_all, 0), label=method_name)\n plt.fill_between (results_x_all, np.mean(results_y_all, 0) - np.std (results_y_all, 0), np.mean(results_y_all, 0) + np.std (results_y_all, 0), alpha = 0.3)\n\ndef plot_results (env_id, directories, method_names, num_folds, postfix=''):\n plt.clf()\n for i in range(len(directories)):\n directory = directories[i]\n method_name = method_names[i]\n plot_single_directory (env_id, directory, method_name, num_folds)\n plt.gca().set_xlabel('Steps')\n plt.gca().set_ylabel('Rewards')\n plt.legend()\n #plt.show ()\n plt.savefig (env_id + postfix + '.png', bbox_inches='tight', pad_inches=0)\n\ndef main():\n #directories = ['experimental_gradient/log_bak_01_05', 'acktr']\n #directories = ['experimental_gradient', 'ddpg/log_10_05', 'acktr/log_results', 'experimental_gradient_09_08_buffer_3', 'trpo_mpi']\n #method_names = ['Proposed method', 'DDPG', 'ACKTR', 'Proposed_3', 'TRPO_MPI']\n #directories = ['experimental_gradient_09_08_buffer_3', 'experimental_gradient_12_08_buffer_5', 'trpo_mpi/log_bak', 'ppo1', 'acktr/log_results']\n #method_names = ['Proposed, Buffer Size=3', 'Proposed, Buffer Size=5', 'TRPO_MPI', 'PPO', 'ACKTR']\n #env_ids = {'HumanoidStandup-v2', 'Striker-v2', 'Thrower-v2', 'Pusher-v2', 'Reacher-v2', 'HalfCheetah-v2', 'Swimmer-v2', 'Ant-v2', 'Humanoid-v2', 'Hopper-v2', 'Walker2d-v2', 'InvertedPendulum-v2', 'InvertedDoublePendulum-v2'}\n directories = ['trpo_replay']\n method_names = ['Proposed']\n env_ids = {'Ant-v2'}\n #env_ids = {'Reacher-v2', 'HalfCheetah-v2', 'Swimmer-v2', 'Ant-v2', 'Humanoid-v2', 'Hopper-v2', 'Walker2d-v2', 'InvertedPendulum-v2', 'InvertedDoublePendulum-v2', 'swimmer_swimmer6', 'fish_swim', 'walker_stand', 'ball_in_cup_catch', 'humanoid_stand', 'fish_upright', 'finger_spin', 'cheetah_run', 'walker_walk', 'walker_run'}\n num_folds = 1\n for env_id in env_ids:\n plot_results (env_id, directories, method_names, num_folds, postfix='_3_minus_cov_acktr')\n \n #directories = ['experimental_gradient'] \n #method_names = ['Proposed method']\n #env_ids = {'Reacher-v2', 'HalfCheetah-v2', 'Swimmer-v2', 'Ant-v2', 'Humanoid-v2', 'Hopper-v2', 'Walker2d-v2', 'InvertedPendulum-v2', 'InvertedDoublePendulum-v2'} \n #num_folds = 1\n #postfix = '_tr_perf'\n #for env_id in env_ids:\n # plot_results (env_id, directories, method_names, num_folds, postfix)\nif __name__ == '__main__':\n main()\n\n","sub_path":"baselines/customised_plotter.py","file_name":"customised_plotter.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"334679265","text":"import os\nimport logging\nfrom code.util import register\nfrom code.util.db import Submission, Problem, User\nimport time\nimport shutil\nimport re\nfrom uuid import uuid4\nfrom zipfile import ZipFile\nimport base64\nimport json\nimport io\nimport traceback\n\ndef addSubmission(probId, lang, code, user, type, custominput):\n sub = Submission()\n sub.problem = Problem.get(probId)\n sub.language = lang\n sub.code = code\n sub.result = \"pending\"\n sub.custominput = custominput\n sub.user = user\n sub.timestamp = time.time() * 1000\n sub.type = type\n sub.status = \"Review\"\n \n if type == Submission.TYPE_SUBMIT:\n sub.save()\n else:\n sub.id = str(uuid4())\n\n return sub\n\nexts = {\n \"c\": \"c\",\n \"cpp\": \"cpp\",\n \"cs\": \"cs\",\n \"java\": \"java\",\n \"python2\": \"py\",\n \"python3\": \"py\",\n \"ruby\": \"rb\",\n \"vb\": \"vb\"\n}\n\ndef readFile(path):\n \"\"\"Reads file at `path` and returns string of at most Submission.MAX_OUTPUT_LEN\"\"\"\n try:\n with open(path, \"rb\") as f:\n data = bytearray(f.read(Submission.MAX_OUTPUT_LEN))\n\n for i, value in enumerate(data):\n if value == 10 or (value <= 127 and chr(value).isprintable()):\n pass \n elif value == '\\r':\n data[i] = 32 # ignore carriage returns\n else:\n data[i] = 63 # other characters map to ?\n\n result = data.decode('ascii')\n if f.read(1):\n result += \"... additional data truncated ...\"\n return result\n except Exception:\n traceback.print_exc()\n return ''\n\ndef writeFile(path: str, data: str):\n with open(path, \"w\") as f:\n if data != None:\n f.write(data)\n\n# Saves and truncates \ndef saveData(id: str, data: list, fileType: str):\n for i in range(len(data)):\n writeFile(f\"/db/submissions/{id}/{fileType}{i}.txt\", data[i])\n data[i] = Submission.truncateForDisplay(data[i])\n\n# Remove trailing whitespace\ndef strip(text):\n return re.sub(\"[ \\t\\r]*\\n\", \"\\n\", text or \"\").rstrip()\n\n# Checks if contains only lines from in order\n# Can be missing some lines in the middle or at the end\ndef compareStrings(incomplete: list, full: list) -> bool:\n lineNumOfFull = 0\n for line in incomplete:\n while lineNumOfFull < len(full):\n if line == full[lineNumOfFull]:\n break\n lineNumOfFull += 1\n else:\n return False\n lineNumOfFull += 1\n return True\n\ndef runCode(sub: Submission, user: User) -> list:\n \"\"\"Executes submission `sub` and returns lists of data files\"\"\"\n extension = exts[sub.language]\n\n try:\n shutil.rmtree(f\"/tmp/{id}\", ignore_errors=True)\n os.makedirs(f\"/tmp/{sub.id}\", exist_ok=True)\n\n # Copy the code over to the runner /tmp folder\n writeFile(f\"/tmp/{sub.id}/code.{extension}\", sub.code)\n \n prob = sub.problem\n \n if sub.type == Submission.TYPE_TEST and not user.isAdmin():\n numTests = prob.samples \n elif sub.type == Submission.TYPE_CUSTOM:\n numTests = 1\n else:\n numTests = prob.tests \n\n # Copy the input over to the tmp folder for the runner\n if sub.type == Submission.TYPE_CUSTOM:\n writeFile(f\"/tmp/{sub.id}/in0.txt\", sub.custominput) \n else:\n for i in range(numTests):\n shutil.copyfile(f\"/db/problems/{prob.id}/input/in{i}.txt\", f\"/tmp/{sub.id}/in{i}.txt\") \n\n\n # Output files will go here\n os.makedirs(f\"/tmp/{sub.id}/out\", exist_ok=True)\n\n # Run the runner\n cmd = f\"docker run --rm --network=none -m 256MB -v /tmp/{sub.id}/:/source nathantheinventor/open-contest-dev-{sub.language}-runner {numTests} {prob.timelimit} > /tmp/{sub.id}/result.txt\"\n logging.debug(cmd) \n if os.system(cmd) != 0:\n raise Exception(\"Problem testing submission with Docker: Review log\")\n\n # Check for compile error\n if readFile(f\"/tmp/{sub.id}/result.txt\") == \"compile_error\\n\":\n sub.results = \"compile_error\"\n sub.delete()\n sub.compile = readFile(f\"/tmp/{sub.id}/out/compile_error.txt\")\n return None, None, None, None\n\n # Submission ran; process test results\n\n inputs = []\n outputs = []\n answers = []\n errors = []\n results = []\n result = \"ok\"\n\n for i in range(numTests):\n if sub.type == Submission.TYPE_CUSTOM:\n inputs.append(sub.custominput)\n answers.append(\"\")\n else:\n inputs.append(sub.problem.testData[i].input)\n answers.append(sub.problem.testData[i].output)\n\n errors.append(readFile(f\"/tmp/{sub.id}/out/err{i}.txt\"))\n outputs.append(readFile(f\"/tmp/{sub.id}/out/out{i}.txt\"))\n\n anstrip = strip(answers[-1])\n outstrip = strip(outputs[-1])\n answerLines = anstrip.split('\\n')\n outLines = outstrip.split('\\n')\n\n print(answerLines, outLines)\n\n res = readFile(f\"/tmp/{sub.id}/out/result{i}.txt\")\n if res == None:\n res = \"tle\"\n elif res == \"ok\" and anstrip != outstrip:\n if sub.type == Submission.TYPE_CUSTOM:\n pass # custom input cannot produce incorrect result\n elif compareStrings(outLines, answerLines):\n res = \"incomplete_output\"\n elif compareStrings(answerLines, outLines):\n res = \"extra_output\"\n else:\n res = \"wrong_answer\"\n \n results.append(res)\n\n # Make result the first incorrect result\n if res != \"ok\" and result == \"ok\":\n result = res\n\n\n sub.result = result\n if sub.result in [\"ok\", \"runtime_error\", \"tle\"] or user.isAdmin():\n sub.status = \"Judged\"\n \n sub.results = results\n \n if sub.type == Submission.TYPE_SUBMIT:\n saveData(sub.id, inputs, 'in')\n saveData(sub.id, outputs, 'out')\n saveData(sub.id, answers, 'answer')\n saveData(sub.id, errors, 'error')\n sub.save()\n\n return inputs, outputs, answers, errors\n\n finally:\n shutil.rmtree(f\"/tmp/{sub.id}\", ignore_errors=True)\n\n# Process contestant test or submission\ndef submit(params, setHeader, user):\n probId = params[\"problem\"]\n lang = params[\"language\"]\n code = params[\"code\"]\n type = params[\"type\"] # Submission.TYPE_*\n custominput = params.get(\"input\")\n submission = addSubmission(probId, lang, code, user, type, custominput)\n inputs, outputs, answers, errors = runCode(submission, user)\n response = submission.toJSON()\n if (submission.type == Submission.TYPE_SUBMIT or\n (submission.type == Submission.TYPE_TEST and user.isAdmin())):\n response[\"result\"] = submission.getContestantResult()\n response[\"results\"] = submission.getContestantIndividualResults()\n\n response[\"inputs\"] = inputs\n response[\"outputs\"] = outputs\n response[\"answers\"] = answers\n response[\"errors\"] = errors\n return response\n\ndef changeResult(params, setHeader, user):\n version = int(params[\"version\"])\n id = params[\"id\"]\n sub = Submission.get(id)\n if not sub:\n return \"Error: no such submission\"\n elif sub.version != version:\n return \"The submission has been changed by another judge since you loaded it. Please reload the sumbission to modify it.\"\n sub.result = params[\"result\"]\n sub.status = params[\"status\"]\n sub.version += 1\n sub.checkout = None\n sub.save()\n return \"ok\"\n\ndef rejudge(params, setHeader, user):\n \"\"\"Ajax method: Rejudge a single submission `id`\"\"\"\n\n id = params[\"id\"]\n submission = Submission.get(id)\n runCode(submission, user)\n return submission.result\n\ndef rejudgeAll(params, setHeader, user):\n \"\"\"Ajax method: Rejudge all submissions for problem `id`\"\"\"\n\n ctime = time.time() * 1000\n id = params[\"id\"]\n numSubmissions = 0\n for sub in Submission.all():\n if sub.problem.id == id and sub.timestamp < ctime and sub.result != 'reject':\n runCode(sub, user)\n numSubmissions += 1\n return f\"Rejudged {numSubmissions} submissions\"\n\n# Create and return zip of submission data\ndef download(params, setHeader, user):\n id = params[\"id\"]\n submission = Submission.get(id)\n\n buf = io.BytesIO()\n with ZipFile(buf,'w') as zip:\n sourceFile = f\"source.{exts[submission.language]}\"\n zip.writestr(sourceFile, submission.code)\n\n for index in range(submission.problem.tests):\n for fileType in [\"in\", \"out\", \"answer\", \"error\"]:\n filename = f\"/db/submissions/{id}/{fileType}{index}.txt\"\n output_dest_filename = f\"{fileType}{index}.txt\"\n if os.path.exists(filename):\n zip.write(filename, output_dest_filename)\n \n data = {\"download.zip\": base64.b64encode(buf.getvalue()).decode('ascii')}\n \n return json.dumps(data)\n \n\nregister.post(\"/submit\", \"loggedin\", submit)\nregister.post(\"/changeResult\", \"admin\", changeResult)\nregister.post(\"/rejudge\", \"admin\", rejudge)\nregister.post(\"/download\", \"admin\", download)\nregister.post(\"/rejudgeAll\", \"admin\", rejudgeAll)","sub_path":"src/main/web/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":9478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"29974223","text":"# Map Lesson!\n\n# Most of the times we want to pass all the list elements to a function one-by-one and then collect the output.\n\n# eg.\nitems = [1, 2, 3, 4, 5]\nsquared = []\nfor i in items:\n squared.append(i**2)\nprint(squared)\n\n# It works well but there's a much simpler and nicer way!\n\n# map is the solution\n\nitems = [1, 2, 3, 4, 5]\nsquared2 = list(map(lambda x: x**2, items))\nprint(squared2)\n\n# Both achieve the same result! You can use map to perform a function on all items in a list easily!\n\n# Here's a blueprint: map(function_to_apply, list_of_inputs)\n","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"199785612","text":"import requests\n\nparams = {\n \"wd\":\"美女a\",\n}\n\nurl = \"https://www.baidu.com/s?\"\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36\",\n }\n\nresponse = requests.get(url, headers=headers,params=params)\ndata = response.content.decode()\n\nwith open(\"baidu.html\", \"w\", encoding='utf-8') as f:\n f.write(data)\n\n# 发送post 和添加参数","sub_path":"day03-requests/reques_use.py","file_name":"reques_use.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"285761719","text":"from wofs.util import config\nfrom os.path import exists, join \n\ndef _determine_base_path( date):\n '''\n Find the correct base directory path; based on the date.\n '''\n if exists( join( '/work3/JTTI/HMT_FFaIR/FCST', date)):\n return join( '/work3/JTTI/HMT_FFaIR/FCST', date)\n else:\n return join( join( '/work3/wof/realtime/FCST', date[:4]), date) \n\n\ndatetimes = config.datetimes_ml\nfor date in datetimes.keys():\n for time in datetimes[date]:\n #print( date, _determine_base_path( str(date) ), exists(_determine_base_path(str(date))) )\n base_path = join(_determine_base_path( str(date) ), time)\n if not exists(base_path):\n base_path = join( join( _determine_base_path(str(date)), 'RLT'), time)\n if not exists(base_path):\n print ( date, time, exists(base_path)) \n \n\n\n\n","sub_path":"forecasts/check_file_paths_test.py","file_name":"check_file_paths_test.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"467476982","text":"from google.cloud import pubsub_v1 as pubsub\nfrom google.cloud import storage\nfrom google.cloud import datastore\nimport time\nimport multiprocessing\nimport pandas as pd\nimport tensorflow as tf\nfrom histcnn import (choose_input_list,\n handle_tfrecords,\n handle_google_cloud_apis,\n util,\n choose_input_list,\n run_classification,\n plotting_cnn)\nimport os\nimport sys\nimport re\nimport glob\nimport pickle\n\nfrom histcnn import inception_multitasklearning_retrain as incret\n\nproject_id = PROJECT_ID\nsubscription_name = SUBSCRIPTION_NAME\ninput_bucket = INPUT_BUCKET\ntask_kind = TASK_KIND\nannotations_path = ANNOTATIONS_PATH\nresults_path = RESULTS_PATH\npancancer_tfrecords_path = PANCANCER_TFRECORDS_PATH\n\n\n\nsubscriber = pubsub.SubscriberClient()\nsubscription_path = subscriber.subscription_path(\n project_id, subscription_name)\n\nNUM_MESSAGES = 1\nACK_DEADLINE = 60\nSLEEP_TIME = 30\n\ndef mark_done(client, task_id, completed_time, elapsed_time_s):\n with client.transaction():\n key = client.key(task_kind, task_id)\n task = client.get(key)\n\n if not task:\n raise ValueError('{} {} does not exist.'.format(task_kind, task_id))\n\n task['status'] = 'Done'\n task['completed_time'] = completed_time\n task['elapsed_time_s'] = elapsed_time_s\n client.put(task)\n\ndef mark_in_progress(client, task_id):\n with client.transaction():\n key = client.key(task_kind, task_id)\n task = client.get(key)\n\n if not task:\n raise ValueError(\n '{} {} does not exist.'.format(task_kind, task_id))\n\n task['status'] = 'InProgress'\n client.put(task)\n\n\n\ndef run_tumor_normal_classification(cancertype, how_many_training_steps = 2000, dropout_keep_prob = 0.8, label_names = ['is_tumor'],\n optimizer = 'adam', is_weighted = 0, nClass = 2, treat_validation_as_test=True,\n do_not_train=True, avoid_gpu_for_testing=True, train_test_percentage = [70, 30]):\n# annotations_path='data/pancancer_annotations/tn_frozen_cache_anns'\n# results_path = 'data/run-results/frozen_undersampled/'\n# pancancer_tfrecords_path='tfrecords/frozen/tn'\n\n image_file_metadata_filename = '{:s}/{:s}/caches_basic_annotations.txt'.format(annotations_path, cancertype)\n# tfrecords_path = os.path.join(pancancer_tfrecords_path, cancertype, 'caches_512x512/')\n tfrecords_path = os.path.join(pancancer_tfrecords_path, cancertype, '')\n print('copying files from GCS')\n input_bucket_path = 'gs://'+input_bucket+'/'\n util.gsutil_cp(os.path.join(input_bucket_path, tfrecords_path, 'tfrecord*'), '/sdata/'+ tfrecords_path, make_dir=True)\n util.gsutil_cp(os.path.join(input_bucket_path, image_file_metadata_filename), '/sdata/'+ image_file_metadata_filename, make_dir=False)\n \n # output paths\n trecords_prefix = '/sdata/'+ tfrecords_path + 'tfrecord'\n saved_model_path = os.path.join(results_path, 'saved_models/{:s}'.format(cancertype))\n tensorboard_path = os.path.join(results_path, 'tensorboard_logs/{:s}'.format(cancertype))\n pickle_path = os.path.join(results_path, \n 'pickles/pickles_train{:d}_test{:d}/run_cnn_output_{:s}.pkl'.format(*train_test_percentage, cancertype))\n\n tfrecordfiles = glob.glob('{:s}*'.format(trecords_prefix))\n assert len(tfrecordfiles)>0\n num_tfrecords = int(len(tfrecordfiles)/3)\n\n tfrecordfiles_dict = {s: ['{:s}{:d}.{:s}'.format(trecords_prefix, n, s) for n in range(num_tfrecords)] for s in ['training', 'testing', 'validation']}\n image_files_metadata = pd.read_csv('/sdata/' + image_file_metadata_filename, index_col=0)\n\n if treat_validation_as_test:\n image_files_metadata['crossval_group'].replace('validation', 'testing', inplace=True)\n tfrecordfiles_dict['testing'] = tfrecordfiles_dict['testing']+tfrecordfiles_dict.pop('validation')\n\n test_batch_size = (image_files_metadata['crossval_group'] == 'testing').sum()\n\n if is_weighted:\n label_ratio = image_files_metadata[label_names].mean()\n pos_weight = (1/label_ratio - 1).tolist()\n else:\n pos_weight = 1\n\n class_probs = image_files_metadata.loc[image_files_metadata['crossval_group'] == 'training', label_names[0]].value_counts(normalize=True, sort=False).sort_index().values\n\n test_accuracies_list, predictions_list, confusion_matrices_list, imagefilenames, final_softmax_outputs_list = \\\n run_classification.run_multilabel_classification_with_inception_CNN(label_names, tfrecordfiles_dict, test_batch_size=test_batch_size, nClass=nClass,\n train_batch_size = 512, how_many_training_steps=how_many_training_steps, avoid_gpu_for_testing=avoid_gpu_for_testing,\n do_not_train = do_not_train, pos_weight = pos_weight, dropout_keep_prob = dropout_keep_prob,\n saved_model_path = os.path.join('/sdata', saved_model_path, 'mychckpt'),\n summaries_dir = '/sdata/'+ tensorboard_path, optimizer = optimizer, \n class_probs=class_probs)\n\n util.mkdir_if_not_exist(os.path.dirname('/sdata/' + pickle_path))\n\n pickle.dump([image_files_metadata, test_accuracies_list, predictions_list, \n confusion_matrices_list, imagefilenames, final_softmax_outputs_list], \n open('/sdata/' + pickle_path, 'wb'))\n\n util.gsutil_cp(os.path.join('/sdata', saved_model_path), os.path.join(input_bucket_path, saved_model_path))\n util.gsutil_cp(os.path.join('/sdata', tensorboard_path), os.path.join(input_bucket_path, tensorboard_path))\n util.gsutil_cp(os.path.join('/sdata', pickle_path), os.path.join(input_bucket_path, pickle_path))\n\n\ndef worker(msg):\n start_time = time.time()\n print(msg.message.data)\n\n task_id = int(msg.message.data)\n client = datastore.Client(project_id)\n key = client.key(task_kind, task_id)\n params = client.get(key)\n\n # Setting the status to 'InProgress'\n mark_in_progress(client, task_id)\n\n cancertype = params['cancertype']\n\n label_names = ['is_tumor']\n \n run_tumor_normal_classification(cancertype, label_names = label_names, treat_validation_as_test=True, do_not_train=False)\n\n elapsed_time_s = round((time.time() - start_time), 1) # in seconds\n completed_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n # We now can comfirm the job\n client = datastore.Client(project_id)\n mark_done(client=client, task_id=task_id, completed_time=completed_time,\n elapsed_time_s=elapsed_time_s)\n\n print('Finish Timestamp: {} - Time elapsed: {} seconds.'.format(completed_time, elapsed_time_s))\n\n subscriber = pubsub.SubscriberClient()\n subscription_path = subscriber.subscription_path(project_id, subscription_name)\n\n # Acknowledging the message\n subscriber.acknowledge(subscription_path, [msg.ack_id])\n print(\"{}: Acknowledged {}\".format(\n time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()), msg.message.data))\n\n\n# The subscriber pulls a specific number of messages.\nresponse = subscriber.pull(subscription_path, max_messages=NUM_MESSAGES)\n\n# `processes` stores process as key and ack id and message as values.\nprocesses = dict()\nfor message in response.received_messages:\n process = multiprocessing.Process(target=worker, args=(message,))\n processes[process] = (message.ack_id, message.message.data)\n process.start()\n\nwhile processes:\n for process in list(processes):\n ack_id, msg_data = processes[process]\n # If the process is still running, reset the ack deadline as\n # specified by ACK_DEADLINE once every while as specified\n # by SLEEP_TIME.\n if process.is_alive():\n # `ack_deadline_seconds` must be between 10 to 600.\n subscriber.modify_ack_deadline(\n subscription_path,\n [ack_id],\n ack_deadline_seconds=ACK_DEADLINE)\n print('{}: Reset ack deadline for {} for {}s'.format(\n time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()),\n msg_data, ACK_DEADLINE))\n\n # If the processs is finished, acknowledges using `ack_id`.\n else:\n #subscriber.acknowledge(subscription_path, [ack_id])\n processes.pop(process)\n\n # If there are still processes running, sleeps the thread.\n if processes:\n time.sleep(SLEEP_TIME)\n\n\n","sub_path":"k8s/k8s-app-runcnn-tn/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":8756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"233258141","text":"\"\"\"\n// Time Complexity : \n put - O(1)\n get - O(1)\n remove - O(1)\n\n// Space Complexity : \n O(n) for using array\n// Did this code successfully run on Leetcode : N/A\n// Any problem you faced while coding this : None\n// Your code here along with comments explaining your approach\nAlgorithm explanation\n- Initialize hashtable with size 1000, Each index holds a pointer to head of doubly linked list\n- Define a hash function using prime number modulo for given key\n- put\n Get the hash value of the key to be inserted(index)\n add the value at the obtained index and append the element at tail of the linked list\n- get\n - Get the hash value of the key to be fetched\n - Iterate along the linked list till the element is found\n - else return -1\n- remove\n - Get the hash value of the key to be removed\n - Iterate along the linked list till the value is found, delete the node using prev and next pointers\n - else return -1\n\"\"\"\n\nclass Node:\n def __init__(self,key,value):\n self.prev = None\n self.next = None\n self.key = key\n self.value = value\n\nclass MyHashMap:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.hashtable = [None]*13\n self.prime_mod = 13\n \n def get_hash_value(self,key):\n return key % self.prime_mod\n \n def print_list(self,head):\n curr = head\n while curr:\n #print((curr.key,curr.value),end = \" \")\n curr = curr.next\n\n def put(self, key: int, value: int) -> None:\n \"\"\"\n value will always be non-negative.\n \"\"\"\n hv = self.get_hash_value(key)\n newnode = Node(key,value)\n if not self.hashtable[hv]:\n self.hashtable[hv] = newnode\n self.hashtable[hv].next = None\n self.hashtable[hv].prev = None\n else:\n curr = self.hashtable[hv]\n #find for the key \n update = False\n while curr:\n if curr.key == key:\n curr.value = value\n update = True\n curr = curr.next\n if not update:\n newnode.next = self.hashtable[hv].next\n if self.hashtable[hv].next:\n self.hashtable[hv].next.prev = newnode\n newnode.prev = self.hashtable[hv]\n self.hashtable[hv].next = newnode\n \n # curr.next = newnode\n # newnode.prev = curr\n #print(\"Printing for hv\",hv,key,value)\n #self.print_list(self.hashtable[hv])\n #print()\n\n def get(self, key: int) -> int:\n \"\"\"\n Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key\n \"\"\"\n hv = self.get_hash_value(key)\n curr = self.hashtable[hv]\n value = -1\n #print(\"GET ME THE KEY\",key)\n while curr:\n #print(\"Keys in the get for the key\",curr.key)\n if curr.key == key:\n value = curr.value\n #print(\"KEy to fetch found for value\",key,value)\n curr = curr.next\n return value\n\n def remove(self, key: int) -> None:\n \"\"\"\n Removes the mapping of the specified value key if this map contains a mapping for the key\n \"\"\"\n hv = self.get_hash_value(key)\n curr = self.hashtable[hv]\n while curr:\n if curr.key == key:\n #print(\"KEy to remove found\",key)\n break\n curr = curr.next\n \n if curr:\n #print(\"curr value\",curr.key,curr.value)\n if not curr.prev:\n self.hashtable[hv] = self.hashtable[hv].next\n if self.hashtable[hv]:\n self.hashtable[hv].prev = None\n else:\n currprev = curr.prev\n print(currprev.key,currprev.value)\n currnext = curr.next\n currprev.next = currnext\n if currnext:\n print(currnext.key,currnext.value)\n currnext.prev = currprev\n #currnext.prev = currprev\n # curr.next.prev = curr.prev\n # currnext = curr.next\n \n #curr.prev.next = currnext\n \n #print(\"Removing the key\",key,hv)\n #self.print_list(self.hashtable[hv])\n \n# Your MyHashMap object will be instantiated and called as such:\n# obj = MyHashMap()\n# obj.put(key,value)\n# param_2 = obj.get(key)\n# obj.remove(key)","sub_path":"hashmap.py","file_name":"hashmap.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372278842","text":"\"\"\"\r\nCreated on Wed Sep 29 09:30:01 2021\r\n\r\n@author: Andres Ducaud / Marcela Linconao\r\n\"\"\"\r\n#%% \r\nimport socket\r\nimport random\r\nfrom Crypto.Cipher import DES\r\n#%% Funcion para eliminar el padding\r\ndef quitarPadding(texto):\r\n contador = 0\r\n for i in range(1,len(texto)+2):\r\n if texto[-i] == 32: # b' ' es 32, que es el byte utilizado para padding\r\n contador +=1\r\n else:\r\n break\r\n return texto[0:len(texto)-contador]\r\n\r\n#%% Se inicia la conexion con el host\r\nprint(\"Cliente\") # A veces no sabiamos que consola era que\r\nHost = \"LocalHost\"\r\nPuerto = 8000\r\n\r\nMi_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nMi_Socket.connect((Host, Puerto))\r\n#%% Llaves\r\nkey1 = b'chiritos' # Llave para des\r\nkey2 = b'cachupin' # Llave para 3des\r\nkey3 = b'apodador' # Llave para 3des\r\n\r\n#%% DH\r\n\r\nRecibir = Mi_Socket.recv(1024)\r\nRecibir = Recibir.decode(encoding = \"ascii\", errors = \"ignore\")\r\nP,G,B = Recibir.split(\",\") # El numero primo, G y el numero de bob\r\n\r\na = random.randint(1,int(P)-1) # Se determina el numero secreto de Alice\r\nA = (int(G)**a)%int(P) # Se calcula el numero a intercambiar con bob\r\n\r\nEnviar = str(A)\r\nMi_Socket.send(Enviar.encode(encoding = \"ascii\", errors = \"ignore\"))\r\n\r\nK = (int(B)**a)%int(P)\r\n\r\n#%% Autenticar la llave para la recepcion del mensaje\r\nEnviar = str(K)\r\nMi_Socket.send(Enviar.encode(encoding = \"ascii\", errors = \"ignore\"))\r\n\r\ntry: # en caso de que no exista respuesta del servidor se finaliza\r\n Recibir = Mi_Socket.recv(1024)\r\n \r\n #%% DES\r\n #'''\r\n '''\r\n print(\"Desencriptando mensaje con DES...\\n\") # usado para testear el codigo\r\n des = DES.new(key1, DES.MODE_ECB) # Se crea un objeto para usar DES\r\n \r\n textoplano = des.decrypt(Recibir) # Se desencrypta el mensaje\r\n \r\n '''\r\n #%% 3DES\r\n print(\"Desencriptando mensaje con 3DES...\\n\") # usado para testear el codigo\r\n des1 = DES.new(key1, DES.MODE_ECB)\r\n des2 = DES.new(key2, DES.MODE_ECB)\r\n des3 = DES.new(key3, DES.MODE_ECB)\r\n \r\n textoplano = des1.decrypt(Recibir)\r\n textoplano = des2.encrypt(Recibir)\r\n textoplano = des3.decrypt(Recibir)\r\n \r\n \r\n #%%\r\n #print(Recibir)\r\n #print(textoplano)\r\n print(\"Escribiendo el resultado...\\n\") # Los puntos son para mas tension owo\r\n textoplano = quitarPadding(textoplano) \r\n \r\n archivo = open('mensajerecibido.txt','w+') # Se escribe el mensaje traducido\r\n archivo.writelines(textoplano.decode('ascii'))\r\n archivo.close()\r\n \r\nexcept:\r\n pass\r\n\r\nMi_Socket.close()\r\nprint(\"DONE\") # Se ve lindo este print al final del codigo :D\r\n\r\n","sub_path":"Lab5SI/ClienteDESy3DES.py","file_name":"ClienteDESy3DES.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"129465791","text":"import datetime\nimport pytz\n\nfrom django.contrib.auth import get_user_model\nfrom django.utils import timezone\n\nfrom rest_framework.authtoken.models import Token\n\nfrom app.portfolio.models import PortFolio\nfrom app.exchange.models import Exchange\nfrom app.assets.models import Asset\n\ndef create_user(**params):\n \"\"\" Helper function créer un nouvel user \"\"\"\n\n user = get_user_model().objects.create_user(**params)\n token = Token.objects.create(user=user)\n\n return user\n\ndef create_portfolio(**params):\n \"\"\" creer un portfolio \"\"\"\n\n defaults = {\n 'name': 'default folio'\n }\n defaults.update(**params)\n return PortFolio.objects.create(**defaults)\n\ndef create_exchange(**params):\n \"\"\" creer un exhange \"\"\"\n\n defaults = {\n 'name': 'Binance'\n }\n defaults.update(params)\n return Exchange.objects.create(**defaults)\n\ndef create_asset(**params):\n \"\"\" creer un asset \"\"\"\n\n defaults = {\n 'date': datetime.datetime.now(tz=timezone.utc),\n 'amount': 2,\n 'paire': 'USD',\n 'price': 10,\n 'type': 'buy',\n 'name': 'bitcoin'\n }\n defaults.update(params)\n return Asset.objects.create(**defaults)\n","sub_path":"holderFolioBack/holderFolioBack/tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"104514705","text":"# -*- coding: utf-8 -*-\n__author__ = 'yusongmin'\n\nfrom A_Initialization.Base_para import system_para_s, conn_sensitivity\nfrom B_Global_functions.Result_read.Para_scenario_average import para_scenario_result\nfrom B_Global_functions.Figure.Para_scenario import para_scenario_average_plot\nfrom D_Papers.Sensitivity.Para_ScenarioSetting import ParaRange_dict\n\n\"\"\"\n设置分析参数\n\n1. \"interest_rate\"\n2. \"energy_price\"\n3. \"output_price_initial\"\n4. \"elasticity\"\n5. \"payback\"\n6. \"technical_weight\"\n7. \"memory_length\"\n8. \"eye\"\n9. \"GFA_target\"\n10. \"OBA_target\"\n\"\"\"\npara = \"OBA_target\"\nallocation_method = \"OBA\"\nscenario_length = len(ParaRange_dict[para])\n\n\"\"\"\n设置结果类别\n\n1. market_result_average\n2. tech_adoption_average\n3. firm_result_average\n\"\"\"\nresult_category = \"tech_adoption_average\"\n\n\"\"\"\n设置分析变量\n\n1. market_result\n(1) output_market_price\n(2) output_market_quantity\n(3) carbon_market_price\n(4) carbon_market_volume\n\n2. tech_result\n(1) technology_adoption\n\n3. firm_result\n(1) energy_intensity_final\n(2) fine\n\"\"\"\nresult_variable = \"technology_adoption\"\n\n\"\"\"\n返回结果\n\n注:返回结果矩阵为3行,分别为该市场结果在各情景下的均值、最大值、最小值\n\"\"\"\nresult_matrix = para_scenario_result(system_para_s,\n para,\n scenario_length,\n result_category,\n allocation_method,\n result_variable,\n conn_sensitivity)\n\nif result_variable == \"output_market_quantity\":\n result_matrix = result_matrix/10000\nelse:\n pass\n\n\"\"\"\n输出图形\n\"\"\"\nresult_mark_dict = {\"output_market_price\": [\"Output price (Yuan / t)\", para, (3220, 3270)],\n \"output_market_quantity\": [\"Output ($10^4$t)\", para, (188, 192)],\n \"carbon_market_price\": [\"Average allowance price (Yuan / t)\", para, (0, 200)],\n \"carbon_market_volume\": [\"Allowance trading volume (t)\", para, (0, 60000)],\n \"technology_adoption\": [\"Technology adoption\", para, (200, 800)],\n \"energy_intensity_final\": [\"Energy intensity (tce / t)\", para, (0.64, 0.67)],\n \"fine\": [\"Fine (Million Yuan)\", para, (0, 450)]}\n\nx_ticks = ParaRange_dict[para]\npara_scenario_average_plot(result_matrix, x_ticks, result_mark_dict[result_variable])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Carbon_Market_ABM/D_Papers/Sensitivity/Main_ResultAnalysis_average.py","file_name":"Main_ResultAnalysis_average.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"477379697","text":"\nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom scipy.stats import expon\nfrom scipy.stats import gamma\nfrom scipy.stats import poisson\nimport time\nimport math\nimport sys\n\n# We implement the Panjer recursion\n\nsys.setrecursionlimit(3000)\n#print(sys.getrecursionlimit())\n\ndef PanjerPS(PS0, Lambda, m, a, b):\n '''\n\n Parameters\n ----------\n PS0 : POSITIVE REAL NUMBER\n P(S = 0) IS THE PNG OF THE NUMBER OF CLAIMS R.V EVALUATED AT P(Y = 0), Y BEING THE INDIVIDUAL CLAIM SIZE.\n fY : POSITIVE REAL NUMBERS\n PMF OF Y.\n m : INTEGER\n THE POINT AT WHICH WE EVALUATE THE PMF OF S.\n a : INTEGER\n a OF THE (a, b, 0) CLASS.\n b : INTEGER\n b OF THE (a, b, 0) CLASS.\n\n Returns\n -------\n P(S = m)\n\n '''\n if m == 0:\n print(\"m = \", m)\n return PS0\n else:\n A = 1/(1- a * poisson.pmf(0, Lambda))\n somme = 0\n vec = [PS0]\n print(\"m = \", m)\n for k in range(1, m+1):\n print(\"k = \", k) #in order to keep track of iterations.\n B = (a + b * k / m)\n C = PanjerPS(PS0, Lambda, m - k, a, b)\n somme += poisson.pmf(k, Lambda) * C * B\n return A * somme\n \nif __name__ == \"__main__\":\n \n # In the discrete, we'll pick a negative bionomial rv for the number of claims, and poisson random variables for the claim sizes. \n\n #For the negative binomial we'll chose p equal to 0.5 and m = 80, so in average we have 80 claims per year \n #For the claim sizes, we'll take poisson random variable of parameter 60, so our expected claims are 60 each. \n \n m = 80 \n p = 0.5\n Lambda = 60\n \n # Here we choose the number n such that the probability that our negative binomial is equal to this n is too small\n # we take n equal to 160, nbinom.pmf(160, 80, 0.5) = 2.2743878591369746e-08\n n = 160\n # We'll try to answer the following questions: What is the probability that the aggregate claim size will be smaller than some value s0 ? \n # And what is the probability that the aggregate claim size will be larger than s1 ?\n \n s0 = 2500\n s1 = 7000\n \n values = np.arange(0, 8000, 1)\n \n \n fY_0 = poisson.pmf(0, Lambda)\n print(\"P Y = \", 0, \" = \", fY_0)\n PS0 = (p/ (1 - (1-p)* fY_0)) ** m\n # findinf a, b of the class (a, b, 0) that the Negative binomial (m,p) is belonging to\n a = (1 - p)\n b = (1 - p) * (m - 1)\n \n t0 = time.time()\n AggDist = PanjerPS(PS0, Lambda, s0, a, b)\n t1 = time.time() \n \n print(\"Using the Panjer recursion (recursive version), the probability that S is equal to \", s0, \" is : \", AggDist, \". Found in \", t1 - t0, \" seconds.\")\n \n print(\"PS\",3,\" = \",PanjerPS(PS0, Lambda, 3, a, b) )\n #plotting\n #fig, ax = plt.subplots( nrows=1, ncols=1 ) # create figure & 1 axis\n #ax.plot(values, AggDist)\n #ax.set_ylabel('P(S<=s)')\n #ax.set_xlabel('s')\n #ax.set_title(\"Exact method plot\", loc='center')\n #fig.savefig('ExactMethodplot.png') # save the figure to file\n #plt.close(fig) # close the figure window\n #plt.savefig(sys.stdout.buffer)\n \n\n\n","sub_path":"final_project3/Discrete_case/Panjer_Recursive.py","file_name":"Panjer_Recursive.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552955886","text":"import json\r\nimport logging\r\nimport os\r\nimport pickle\r\nimport re\r\nimport shutil\r\nimport socket\r\nfrom multiprocessing import Process\r\nfrom pathlib import Path\r\nfrom time import sleep\r\n\r\nfrom pika.exceptions import AMQPConnectionError\r\n\r\nfrom tp2_utils.blocking_socket_transferer import BlockingSocketTransferer\r\nfrom tp2_utils.interfaces.dummy_state_commiter import DummyStateCommiter\r\nfrom tp2_utils.leader_election.ack_process import AckProcess\r\nfrom tp2_utils.message_pipeline.message_pipeline import WINDOW_END_MESSAGE, message_is_end\r\nfrom tp2_utils.rabbit_utils.rabbit_consumer_producer import RabbitQueueConsumerProducer\r\nfrom tp2_utils.rabbit_utils.special_messages import BroadcastMessage\r\n\r\nBUSINESSES_QUEUE = 'yelp_businesses_news'\r\nBUSINESS_NOTIFY_END = 'notify_business_load_end'\r\nPATH_TO_SAVE_BUSINESSES = \"%s/businesses.pickle\"\r\nPATH_TO_SAVE_CLIENTS_ENDED = \"%s/clients_ended.pickle\"\r\nPATH_TO_SAVE_LOGFILE = '%s/logfile'\r\nBUSINESSES_READY = '%s/BUSINESSES_READY'\r\nSAFE_BACKUP_END = \".copy\"\r\nACK_LISTENING_PORT = 8000\r\nEND_REGEX_MATCH = \"END_(.+)\"\r\n\r\nlogger = logging.getLogger()\r\n\r\n\r\nclass SocketDataDownloader():\r\n def _safe_pickle_dump(self, obj, path):\r\n if os.path.exists(path):\r\n shutil.copy2(path, path + SAFE_BACKUP_END)\r\n with open(path, \"wb\") as dumpfile:\r\n pickle.dump(obj, dumpfile)\r\n\r\n def _safe_pickle_load(self, path):\r\n result = None\r\n try:\r\n if os.path.exists(path):\r\n with open(path, \"rb\") as dumpfile:\r\n result = pickle.load(dumpfile)\r\n except Exception:\r\n try:\r\n if os.path.exists(path + SAFE_BACKUP_END):\r\n shutil.copy2(path + SAFE_BACKUP_END, path)\r\n with open(path, \"rb\") as dumpfile:\r\n result = pickle.load(dumpfile)\r\n except Exception:\r\n pass\r\n if result != None:\r\n return True, result\r\n else:\r\n return False, None\r\n\r\n def __init__(self, port, listen_backlog, clients, data_path):\r\n self.port = port\r\n self.listen_backlog = listen_backlog\r\n self.clients_to_end = clients\r\n if os.path.exists(PATH_TO_SAVE_CLIENTS_ENDED % data_path):\r\n success, data = self._safe_pickle_load(PATH_TO_SAVE_CLIENTS_ENDED % data_path)\r\n if success:\r\n self.clients_ended, self.ends_seen = data\r\n else:\r\n self.clients_ended, self.ends_seen = 0, set()\r\n else:\r\n self.clients_ended, self.ends_seen = 0, set()\r\n self.data_path = data_path\r\n self.process_list = []\r\n self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self._server_socket.bind(('', port))\r\n self._server_socket.listen(listen_backlog)\r\n\r\n def start_download_listening(self):\r\n while self.clients_ended < self.clients_to_end:\r\n client_sock = self.__accept_new_connection()\r\n self.__handle_client_connection(client_sock)\r\n self.clients_ended, self.ends_seen = 0, set()\r\n if os.path.exists(PATH_TO_SAVE_CLIENTS_ENDED % self.data_path):\r\n os.remove(PATH_TO_SAVE_CLIENTS_ENDED % self.data_path)\r\n\r\n def close(self):\r\n self._server_socket.close()\r\n\r\n def __handle_client_connection(self, client_sock):\r\n \"\"\"\r\n Read message from a specific client socket and closes the socket\r\n\r\n If a problem arises in the communication with the client, the\r\n client socket will also be closed\r\n \"\"\"\r\n socket_transferer = BlockingSocketTransferer(client_sock)\r\n try:\r\n msg = socket_transferer.receive_plain_text()\r\n if re.match(END_REGEX_MATCH, msg):\r\n if msg not in self.ends_seen:\r\n self.clients_ended += 1\r\n self.ends_seen.add(msg)\r\n self._safe_pickle_dump((self.clients_ended, self.ends_seen),\r\n PATH_TO_SAVE_CLIENTS_ENDED % self.data_path)\r\n socket_transferer.send_plain_text(\"REGISTERED\")\r\n socket_transferer.close()\r\n return\r\n if msg != \"SEND FILE\":\r\n socket_transferer.close()\r\n return\r\n except (OSError, TimeoutError) as e:\r\n socket_transferer.abort()\r\n return\r\n socket_transferer.send_file(PATH_TO_SAVE_BUSINESSES % self.data_path)\r\n socket_transferer.send_plain_text(\"ALL SENT\")\r\n socket_transferer.close()\r\n return\r\n\r\n def __accept_new_connection(self):\r\n \"\"\"\r\n Accept new connections\r\n\r\n Function blocks until a connection to a client is made.\r\n Then connection created is printed and returned\r\n \"\"\"\r\n c, addr = self._server_socket.accept()\r\n return c\r\n\r\n\r\nclass DataGatherer:\r\n def __init__(self, data_path, clients):\r\n self.business_locations = {}\r\n self.logfile = None\r\n self.data_path = data_path\r\n self.clients = clients\r\n self._restore_and_open_logfile()\r\n\r\n def _restore_and_open_logfile(self):\r\n if os.path.exists(PATH_TO_SAVE_LOGFILE % self.data_path):\r\n logfile = open(PATH_TO_SAVE_LOGFILE % self.data_path, \"r\")\r\n line = logfile.readline()\r\n while line:\r\n try:\r\n item = json.loads(line)\r\n self.business_locations[item['business_id']] = item['city']\r\n except Exception:\r\n pass\r\n line = logfile.readline()\r\n logfile.close()\r\n self.logfile = open(PATH_TO_SAVE_LOGFILE % self.data_path, \"w\")\r\n\r\n def gather_business_locations(self, item):\r\n if message_is_end(item):\r\n with open(PATH_TO_SAVE_BUSINESSES % self.data_path, 'wb') as business_file:\r\n pickle.dump(self.business_locations, business_file)\r\n Path(BUSINESSES_READY % self.data_path).touch()\r\n self.logfile.close()\r\n os.remove(PATH_TO_SAVE_LOGFILE % self.data_path)\r\n logging.info(\"Business gathering ended\")\r\n return [BroadcastMessage(WINDOW_END_MESSAGE) for _ in range(self.clients)], True\r\n else:\r\n self.logfile.write(\"%s\\n\" % json.dumps(item))\r\n self.logfile.flush()\r\n self.business_locations[item['business_id']] = item['city']\r\n return [], False\r\n\r\n\r\ndef notify_data_available(item):\r\n if message_is_end(item):\r\n return [BroadcastMessage(WINDOW_END_MESSAGE)], False\r\n return [], False\r\n\r\n\r\ndef empty_queue(item):\r\n if item == WINDOW_END_MESSAGE:\r\n return [], True\r\n return [], False\r\n\r\n\r\ndef run_process(port, listen_backlog, rabbit_host, clients,\r\n data_path=\"data\"):\r\n # simple fail if file is not accesible\r\n open(PATH_TO_SAVE_BUSINESSES % data_path, 'wb').close()\r\n socket_downloader = SocketDataDownloader(port, listen_backlog, clients, data_path)\r\n while True:\r\n if not os.path.exists(BUSINESSES_READY % data_path):\r\n logger.info(\"Consuming businesses\")\r\n data_gatherer = DataGatherer(data_path, clients)\r\n cp = RabbitQueueConsumerProducer(rabbit_host, BUSINESSES_QUEUE,\r\n [BUSINESS_NOTIFY_END],\r\n DummyStateCommiter(data_gatherer.gather_business_locations),\r\n messages_to_group=1, logger=logger)\r\n try:\r\n cp()\r\n except Exception as e:\r\n logger.exception(\"Error while consuming businesses\")\r\n raise e\r\n\r\n try:\r\n logger.info(\"Starting download service\")\r\n socket_downloader.start_download_listening()\r\n except Exception as e:\r\n logger.exception(\"Error accepting connections for downloading\")\r\n raise e\r\n logger.info(\"Stoping downloader service\")\r\n os.remove(BUSINESSES_READY % data_path)\r\n if os.path.exists(PATH_TO_SAVE_CLIENTS_ENDED % data_path):\r\n os.remove(PATH_TO_SAVE_CLIENTS_ENDED % data_path)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n logging.basicConfig(\r\n level=logging.INFO,\r\n format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',\r\n datefmt='%Y-%m-%d %H:%M:%S',\r\n )\r\n\r\n ack_process = AckProcess(ACK_LISTENING_PORT, os.getpid())\r\n ack_process_aux = Process(target=ack_process.run)\r\n ack_process_aux.start()\r\n logger.info(\"Starting business downloader\")\r\n port = int(os.getenv('PORT'))\r\n listen_backlog = int(os.getenv('LISTEN_BACKLOG'))\r\n rabbit_host = os.getenv('RABBIT_HOST')\r\n clients = int(os.getenv('CLIENTS'))\r\n while True:\r\n try:\r\n run_process(port, listen_backlog, rabbit_host, clients)\r\n except AMQPConnectionError:\r\n sleep(2)\r\n logger.info(\"Retrying connection to rabbit...\")\r\n except Exception as e:\r\n logger.exception(\"Fatal error in consumer\")\r\n ack_process_aux.terminate()\r\n raise e\r\n","sub_path":"business_download_service/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":9211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"524240345","text":"#coding: utf-8\nfrom datetime import datetime, timedelta, timezone\nimport MySQLdb\nimport sys\nimport copy\nimport numpy\nimport pandas\nimport talib\nimport matplotlib.pyplot as plt\n\n#データは全てnumpy配列であること前提\n#まあnumpyでなくpy3標準のリストでも動くとは思うけど\n\ndef plotrawdata(rang, isgrid, raw):\n fig = plt.figure()\n ax = plt.subplot()\n plt.grid(isgrid)\n ax.plot(rang, raw, color='black')\n plt.show()\n\n#原系列と移動平均線を任意の数引く\ndef plot_withMA(rang, isgrid, raw, malist, macolors):\n fig = plt.figure()\n ax = plt.subplot()\n plt.grid(isgrid)\n ax.plot(rang, raw, color='black')\n for i in range(len(malist)):\n ax.plot(rang, malist[i], color=macolors[i])\n plt.show()\n\n#とりあえず指定したデータ全部重ねて描く\ndef plot_overlap(rang, isgrid, datas, colors):\n fig = plt.figure()\n ax = plt.subplot()\n plt.grid(isgrid)\n for i in range(len(datas)):\n ax.plot(rang, datas[i], color=colors[i])\n plt.show()\n\n#インジケータも描画\ndef plot_with_indicator(rang, isgrid, raw, datas, colors):\n fig = plt.figure()\n ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)\n ax2 = plt.subplot2grid((2,2), (1,0), colspan=2)\n plt.grid(isgrid)\n ax1.plot(rang, raw, color='black')\n for i in range(len(datas)):\n ax2.plot(rang, datas[i], color=colors[i])\n plt.show()\n\n#短期長期2つのMA線からGC,DCのインデックスを検出\ndef MAcross(mashr, malng):\n cross = {\"GC\":None, \"DC\":None}\n crosses = []\n for i in range(len(mashr) - 2):\n if (mashr[i] - malng[i] < 0) and (mashr[i+1] - malng[i+1] > 0):\n cross[\"GC\"] = i + 1\n elif (mashr[i] - malng[i]) - (mashr[i+1] - malng[i+1]) == 0:\n if (mashr[i] - malng[i] < 0) and (mashr[i+2] - malng[i+2] > 0):\n cross[\"GC\"] = i + 1\n\n elif (mashr[i] - malng[i] > 0) and (mashr[i+1] - malng[i+1] < 0):\n cross[\"DC\"] = i + 1\n elif (mashr[i] - malng[i]) - (mashr[i+1] - malng[i+1]) == 0:\n if (mashr[i] - malng[i] > 0) and (mashr[i+2] - malng[i+2] < 0):\n cross[\"DC\"] = i + 1\n \n if (cross[\"GC\"] != None) and (cross[\"DC\"] != None):\n crosses.append(copy.deepcopy(cross))#Important!!!!!\n cross[\"GC\"] = None\n cross[\"DC\"] = None\n\n return crosses\n\n#たまにDCが最初に来るので最初のDCを飛ばしてGCからのリストにする\ndef XInverter(crosslist):\n newcrosslist = []\n cross = {\"GC\":None, \"DC\":None}\n if crosslist[0][\"GC\"] > crosslist[0][\"DC\"]:\n for i in range(len(crosslist) - 1):\n cross[\"GC\"] = crosslist[i][\"GC\"]\n cross[\"DC\"] = crosslist[i+1][\"DC\"]\n newcrosslist.append(copy.deepcopy(cross))\n #print(\"そうですね.\")\n return newcrosslist\n else:\n return crosslist\n\n#MA系の買い・売りシグナル(交差点)より売買結果を返す\ndef MAcross_trading(crosslist, raw):\n result = []\n for i in range(len(crosslist)):\n amount = int(raw[crosslist[i][\"DC\"]] - raw[crosslist[i][\"GC\"]]) #損益額\n rate = raw[crosslist[i][\"DC\"]] / raw[crosslist[i][\"GC\"]] #損益率\n iswon = amount > 0\n res = {\"amount\":amount, \"rate\":rate, \"iswon\":iswon}\n result.append(copy.deepcopy(res))\n #print(\"まあ頑張れや.\")\n return result\n\n#売買結果をそれなりに分析\ndef analize_trading_result(result, isprint=True):\n tradenum = len(result)\n sumamount = 0\n rate = 0\n wonnum = 0\n for i in range(tradenum):\n sumamount += result[i][\"amount\"]\n rate += result[i][\"rate\"]\n wonnum += result[i][\"iswon\"]\n totalrate = rate / tradenum\n wonrate = wonnum / tradenum\n if isprint:\n print(\"売買回数: \" + str(tradenum) + \"\\n損益合計: \" + str(sumamount) + \"\\n合計収益率: \" + str(totalrate) + \"\\n勝利回数: \" + str(wonnum) + \"\\n勝率: \" + str(wonrate))\n return {\"tradenum\":tradenum, \"total\":sumamount, \"totalrate\":totalrate, \"wonnum\":wonnum, \"wonrate\":wonrate}\n","sub_path":"BTC/technical_neo/technicalkit.py","file_name":"technicalkit.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"561062192","text":"import random\nfrom poodle import Object\nfrom kalc.model.system.base import ModularKind\nfrom kalc.model.system.primitives import TypePolicy\nfrom kalc.misc.const import *\nfrom kalc.misc.util import convertPriorityValue\n\n\nclass PriorityClass(ModularKind):\n metadata_name: str\n\n priority: int\n preemptionPolicy: TypePolicy\n\n def __init__(self, *args, **kwargs):\n super().__init__( *args, **kwargs)\n self.metadata_name = \"modelPriorityClass\"+str(random.randint(100000000, 999999999))\n # self.metadata_name = \"model-default-name\"\n self.preemptionPolicy = POLICY[\"PreemptLowerPriority\"]\n self.priority = 0\n\n @property\n def value(self):\n pass\n @value.setter \n def value(self, value):\n norm_pri = convertPriorityValue(value)\n if norm_pri > 1000: norm_pri = 1000\n self.priority = norm_pri\n \n def __str__(self): return str(self._get_value())\n\nzeroPriorityClass = PriorityClass(\"ZERO\")\nzeroPriorityClass.metadata_name = \"Normal-zero\"\n","sub_path":"kalc/model/kinds/PriorityClass.py","file_name":"PriorityClass.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"79267456","text":"# try to use the customized version of the kernel in SVR\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\nfrom sklearn.svm import SVR\n\nfrom mykernel import *\n\na = lambda_kernel(kernel='lk')\nb = lambda_kernel(kernel='rk', gamma=0.1)\nc = half_combined_kernel(degree=3, gamma=0.1)\nd = three_combined_kernel()\n\nmat = scipy.io.loadmat('data/02-solar.mat')\nX = mat['X']\ny = mat['y'].ravel()\n\n#svr_rbf = SVR(kernel=b)\n#y_rbf = svr_rbf.fit(X, y).predict(X)\n\nsvr_comb = SVR(kernel=c)\ny_comb = svr_comb.fit(X, y).predict(X)\n\nplt.scatter(X, y, c='k', label='data')\nplt.hold('on')\nplt.plot(X, y_comb, c='g', label='half model')\nplt.xlabel('data')\nplt.ylabel('target')\nplt.xticks()\nplt.yticks()\nplt.title('customized rbf')\nplt.legend()\n\nplt.show()\n","sub_path":"code/test_code/kernel/svr_solar.py","file_name":"svr_solar.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"505099909","text":"#!/usr/bin/env python3\n\nimport datetime\nimport json\nimport logging\nfrom googleapiclient.discovery import build\nfrom google_calendar_event import GoogleCalendarEvent\n\nlogger = logging.getLogger(__name__)\n\n\nclass GoogleCalendarHandler(object):\n\n def __init__(self, credentials, classroom_calendar_id, registration_calendar_id, streaming_keywords=[\"[streaming]\"], private_keywords=[\"[private]\"]):\n self.credentials = credentials\n self.service = build('calendar', 'v3', credentials=self.credentials)\n self.classroom_calendar_id = classroom_calendar_id\n self.registration_calendar_id = registration_calendar_id\n self.streaming_keywords = streaming_keywords\n self.private_keywords = private_keywords\n self.registered_events = None\n\n def get_classroom_events(self, previous_days=0, future_days=30, max_results=100):\n time_min = (datetime.datetime.utcnow() - datetime.timedelta(days=previous_days)).isoformat() + 'Z'\n time_max = (datetime.datetime.utcnow() + datetime.timedelta(days=future_days)).isoformat() + 'Z'\n\n events_result = self.service.events().list(calendarId=self.classroom_calendar_id, \n timeMin=time_min, timeMax=time_max,\n maxResults=max_results, singleEvents=True,\n orderBy='startTime').execute()\n __events = events_result.get('items', [])\n events = [GoogleCalendarEvent(__event, self.streaming_keywords, self.private_keywords) for __event in __events]\n\n logger.debug(\"Number of events found: {}\".format(len(events)))\n return events\n\n def get_classroom_streaming_events(self, previous_days=0, future_days=30, max_results=100):\n events = self.get_classroom_events(previous_days, future_days)\n streaming_events = list(filter(lambda e: e.is_streaming(), events))\n streaming_events_accepted = streaming_events[:max_results]\n logger.debug(\"Number of streaming events accepted: {} ({})\".format(len(streaming_events_accepted), len(streaming_events)))\n return streaming_events_accepted\n\n def get_classroom_next_streaming_event(self):\n events = self.get_classroom_streaming_events(0, 7, 1)\n\n if not events:\n logger.info(\"No events for the next week\")\n return\n elif len(events) == 1:\n event = events[0]\n logger.debug(\"One event for the next week: '{}'\".format(event.title))\n return event\n else:\n logger.error(\"Something really wrong has happened with Google Calendar API\")\n return","sub_path":"src/google_calendar_handler.py","file_name":"google_calendar_handler.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"458528920","text":"class Solution:\n def largeGroupPositions(self, S):\n \"\"\"\n :type S: str\n :rtype: List[List[int]]\n \"\"\"\n start, i, n=0, 0, len(S)\n res=[]\n while i=3: res.append((start, i-1))\n start=i\n \n return res\n","sub_path":"python/positions-of-large-groups.py","file_name":"positions-of-large-groups.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"77820503","text":"import itertools as it\n\nimport gym\nfrom gym.agents.yumi import YumiReachAgent\nfrom gym.utils.mjviewer import add_selection_logger\n\n\n# ENV = 'YumiReachTwoArms-v1'\n# ENV = 'YumiReachRightArm-v1'\nENV = 'YumiReachLeftArm-v1'\n\n\ndef main():\n\n env = gym.make(ENV)\n raw_env = env.unwrapped\n sim = raw_env.sim\n env.render()\n add_selection_logger(raw_env.viewer, sim)\n\n # env.reset()\n # sim.data.qpos[:] = 0.0\n # sim.data.qvel[:] = 0.0\n # sim.step()\n\n agent = YumiReachAgent(env)\n done = True\n obs = None\n\n for _ in it.count():\n if done:\n obs = env.reset()\n u = agent.predict(obs)\n\n # print(u)\n obs, rew, done, _ = env.step(u)\n env.render()\n print(rew)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"playground/yumi/yumi_with_agent.py","file_name":"yumi_with_agent.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"280092756","text":"import pygame\nimport classes.dice\nfrom deck import *\nfrom classes.screen import *\nfrom classes.functions import *\n\npygame.mixer.init()\n\nclass Fight:\n def __init__(self, dice, turn, roll, rolled, p1choice, p2choice):\n self.roll = roll #Local boolean to keep function active in main loop while roll variable is lost.\n self.rolled = rolled #Remove dice if you have rolled to determine attack / defend choices\n self.turn = 1\n self.p1choice = p1choice\n self.p2choice = p2choice\n\n def reset(self):\n self.roll = False\n current_player.done = False\n self.rolled = False\n self.turn = 1\n self.p1choice = False\n self.p2choice = False\n\nreadytoroll = False\nfight = Fight(False, 1, False, False, False, False)\n\ndef checkfight():\n if current_player.fight == True:\n if current_player.fighttype == 'super':\n superfight(current_player.fighter, False, fight.rolled)\n elif current_player.fighttype == 'hoek':\n hoekfight(current_player.fighter, current_player.fightp2, False, fight.rolled)\n\n\ndef superfight(p, roll=False, superfightrolled=False):\n option1p = False\n option2p = False\n option3p = False\n color1 = 0\n color2 = 0\n color3 = 0\n global current_player\n current_player.fight = True\n\n superlijst[0].draw(background)\n if not superfightrolled:\n dicebutton = recttext(\"Fight\", 25, 1050, 220, 100, 50, white, playerhighlight, playerhighlight2)\n\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = event.pos\n if dicebutton.collidepoint(x, y):\n dice.roll('superfight')\n fight.rolled = True\n fight.roll = True\n\n if roll or fight.roll:\n fight.roll = True #Set the storage to True, so it will keep running untill shut down\n stats = p.stats #Stats = stats variable of the player\n draw_card_highlight_pos = [(500, 230), (500, 260), (500, 290), (500, 320), (500, 350), (500, 380)]\n pygame.draw.rect(background, (red), (draw_card_highlight_pos[dice.value-1][0], draw_card_highlight_pos[dice.value-1][1], 200, 28), 3)\n\n text(\"Defend:\", white, 26, 890, 100)\n\n option4 = roundrect(background, playerhighlight, pygame.Rect(890, 135, 140, 65), 0, 10, 10)\n text(\"Damage: 0\", white, 32, 900, 140)\n text(\"Condition: 0\", white, 32, 900, 170)\n\n if p.condition < stats[dice.value-1][0][1]:\n option1p = False\n color1 = red3\n else:\n option1p = True\n color1 = playerhighlight\n option1 = roundrect(background, color1, pygame.Rect(890, 135, 140, 65), 0, 10, 10)\n text((\"Damage: \" + str(stats[dice.value - 1][0][0])), white, 32, 900, 140)\n text((\"Condition: \" + str(stats[dice.value - 1][0][1])), white, 32, 900, 170)\n if p.condition < stats[dice.value-1][1][1]:\n option2p = False\n color2 = red3\n else:\n option2p = True\n color2 = playerhighlight\n\n option2 = roundrect(background, color2, pygame.Rect(890, 235, 140, 65), 0, 10, 10)\n text((\"Damage: \" + str(stats[dice.value - 1][1][0])), white, 32, 900, 240)\n text((\"Condition: \" + str(stats[dice.value - 1][1][1])), white, 32, 900, 270)\n\n if p.condition < stats[dice.value-1][2][1]:\n option3p = False\n color3 = red3\n else:\n option3p = True\n color3 = playerhighlight\n option3 = roundrect(background, color3, pygame.Rect(890, 335, 140, 65), 0, 10, 10)\n text((\"Damage: \" + str(stats[dice.value - 1][2][0])), white, 32, 900, 340)\n text((\"Condition: \" + str(stats[dice.value - 1][2][1])), white, 32, 900, 370)\n\n option4 = roundrect(background, playerhighlight, pygame.Rect(890, 435, 140, 65), 0, 10, 10)\n text(\"Damage: 0\", white, 32, 900, 440)\n text(\"Condition: 0\", white, 32, 900, 470)\n\n result = False\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = event.pos\n if (option1.collidepoint(x, y)) and (option1):\n result = p.superfight(0, dice.value, p) #Check if clicked on 1rst attack button\n elif (option2.collidepoint(x, y)) and (option2p):\n result = p.superfight(1, dice.value, p) #Check if clicked on 2rst attack button\n elif (option3.collidepoint(x, y)) and (option3p):\n result = p.superfight(2, dice.value, p) #Check if clicked on 3rst attack button\n elif option4.collidepoint(x, y):\n result = p.superfight(3, dice.value, p) #Check if clicked on 3rst attack button\n\n\n if current_player.done: #If done, allow ending turn\n current_player.endturn = True\n current_player.fight = False\n fight.rolled = False\n fight.reset()\n\n pygame.display.flip()\n\ndef hoekfight(p1, p2, hoekroll=False, hoekrolled=False):\n option1p = False\n option2p = False\n option3p = False\n color1 = 0\n color2 = 0\n color3 = 0\n if not hoekrolled:\n dicebutton = recttext(\"Fight\", 25, 1050, 220, 100, 50, white, playerhighlight, playerhighlight2)\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = event.pos\n if dicebutton.collidepoint(x, y):\n dice.roll('hoek')\n current_player.fighttype = 'hoek'\n fight.rolled = True\n fight.roll = True\n\n if current_player.done: #If done, allow ending turn\n current_player.endturn = True\n current_player.fight = False\n fight.reset()\n\n if fight.turn == 1:\n p = p1\n else:\n p = p2\n\n if fight.roll is True:\n stats = p.stats #Stats = stats variable of the player\n\n if fight.turn == 1:\n text(\"Attack:\", white, 26, 890, 100)\n else:\n text(\"Defend:\", white, 26, 890, 100)\n\n if p.condition < stats[dice.value-1][0][1]:\n option1p = False\n color1 = red3\n else:\n option1p = True\n color1 = playerhighlight\n option1 = roundrect(background, color1, pygame.Rect(890, 135, 140, 65), 0, 10, 10)\n text((\"Damage: \" + str(stats[dice.value - 1][0][0])), white, 32, 900, 140)\n text((\"Condition: \" + str(stats[dice.value - 1][0][1])), white, 32, 900, 170)\n if p.condition < stats[dice.value-1][1][1]:\n option2p = False\n color2 = red3\n else:\n option2p = True\n color2 = playerhighlight\n\n option2 = roundrect(background, color2, pygame.Rect(890, 235, 140, 65), 0, 10, 10)\n text((\"Damage: \" + str(stats[dice.value - 1][1][0])), white, 32, 900, 240)\n text((\"Condition: \" + str(stats[dice.value - 1][1][1])), white, 32, 900, 270)\n\n if p.condition < stats[dice.value-1][2][1]:\n option3p = False\n color3 = red3\n else:\n option3p = True\n color3 = playerhighlight\n option3 = roundrect(background, color3, pygame.Rect(890, 335, 140, 65), 0, 10, 10)\n text((\"Damage: \" + str(stats[dice.value - 1][2][0])), white, 32, 900, 340)\n text((\"Condition: \" + str(stats[dice.value - 1][2][1])), white, 32, 900, 370)\n\n option4 = roundrect(background, playerhighlight, pygame.Rect(890, 435, 140, 65), 0, 10, 10)\n text(\"Damage: 0\", white, 32, 900, 440)\n text(\"Condition: 0 \", white, 32, 900, 470)\n\n both_choice = False\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = event.pos\n if option1.collidepoint(x, y) and option1p:\n if fight.turn == 1:\n fight.p1choice = 0\n fight.turn = 2\n elif fight.turn == 2:\n fight.p2choice = 0\n both_choice = True\n elif option2.collidepoint(x, y) and option2p:\n if fight.turn == 1:\n fight.p1choice = 1\n fight.turn = 2\n elif fight.turn == 2:\n fight.p2choice = 1\n both_choice = True\n elif option3.collidepoint(x, y) and option3p:\n if fight.turn == 1:\n fight.p1choice = 2\n fight.turn = 2\n elif fight.turn == 2:\n fight.p2choice = 2\n both_choice = True\n elif option4.collidepoint(x, y):\n if fight.turn == 1:\n fight.p1choice = 3\n fight.turn = 2\n elif fight.turn == 2:\n fight.p2choice = 3\n both_choice = True\n\n if both_choice:\n p1.fight(dice.value, p2, fight.p1choice, fight.p2choice)\n","sub_path":"classes/fight.py","file_name":"fight.py","file_ext":"py","file_size_in_byte":9308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"25908986","text":"import numpy as np\nimport cclib\n\n\nclass Molecule:\n \"\"\"\n Class to store molecule information\n\n Attributes\n ----------\n n_atom : int\n number of atoms\n xyz : float\n xyz coordinates. Size: (n_atom,3)\n sym : string\n List of atomic symbol. Size: (n_atom,1)\n at_num :\n List of atomic numbers. Size: (n_atom,1)\n \"\"\"\n __nuc = {'H': 1, 'B': 5, 'C': 6, 'N': 7, 'O': 8, 'F': 9,\n 'P': 15, 'S': 16, 'Cl': 17, 'Se': 34, 'Br': 35, 'I': 53}\n __accepted_file_formats = ['xyz', 'sdf', 'mol']\n\n def __init__(self, fname=None):\n if fname is not None:\n self.import_file(fname)\n return None\n\n def sym2num(self, sym):\n \"\"\"\n Given a chemical symbol, returns the atomic number defined within the class\n\n Parameters\n -----------\n sym : string\n chemical symbol\n\n Returns\n --------\n at_num : int\n atomic number for symbol argument\n \"\"\"\n try:\n atomic_number = self.__nuc['{}'.format(sym)]\n return atomic_number\n except:\n print('{} is not defined.'.format(sym))\n\n def import_file(self, fname):\n filetype = fname.split('.')[1]\n if filetype not in Molecule.__accepted_file_formats:\n parsed_properly = self.import_cclib(fname)\n if not parsed_properly:\n formatted_aff = str(\n Molecule.__accepted_file_formats).strip('[]')\n raise NotImplementedError(\n 'file type \\'{}\\' is unsupported. Accepted formats: {} or any cclib suported format.'.format(filetype, formatted_aff))\n if filetype == 'xyz':\n self.import_xyz(fname)\n elif filetype == 'sdf' or filetype == 'mol':\n self.import_sdf(fname)\n\n def import_xyz(self, fname):\n \"\"\"\n Imports xyz file as a Molecule class instance\n\n Parameters\n ----------\n fname : string\n xyz filename\n \"\"\"\n self.ftype = 'xyz'\n with open(fname) as f:\n lines = f.readlines()\n self.n_atom = int(lines[0].split()[0])\n\n # reading lines to build up class data\n self.sym = []\n self.at_num = []\n self.xyz = np.zeros((self.n_atom, 3))\n for i, line in enumerate(lines[2:]):\n tmp = line.split()\n self.sym.append(tmp[0])\n self.at_num.append(self.sym2num(tmp[0]))\n self.xyz[i, 0] = float(tmp[1])\n self.xyz[i, 1] = float(tmp[2])\n self.xyz[i, 2] = float(tmp[3])\n\n def import_sdf(self, fname):\n \"\"\"\n Imports xyz file as a Molecule class instance\n\n Parameters\n ----------\n fname : string\n sdf or mol file name\n \"\"\"\n self.ftype = 'sdf'\n with open(fname) as f:\n lines = f.readlines()\n self.n_atom = int(lines[3].split()[0])\n self.n_connect = int(lines[3].split()[1])\n self.sym = []\n self.at_num = []\n self.n_place = []\n self.xyz = np.zeros((self.n_atom, 3))\n for i, line in enumerate(lines[4:4+self.n_atom]):\n tmp = line.split()\n self.sym.append(tmp[3])\n self.at_num.append(self.sym2num(tmp[3]))\n self.xyz[i, 0] = float(tmp[0])\n self.xyz[i, 1] = float(tmp[1])\n self.xyz[i, 2] = float(tmp[2])\n self.n_place.append(i)\n self.connect = np.zeros((self.n_connect, 2))\n for i, line in enumerate(lines[4+self.n_atom:4+self.n_atom+self.n_connect]):\n tmp = line.split()\n self.connect[i, 0] = tmp[0]\n self.connect[i, 1] = tmp[1]\n\n def import_cclib(self, fname):\n \"\"\"\n Imports any cclib parsable file as a Molecule class instance\n\n Parameters\n -----------\n fname : string\n cclib parsable output file name\n \"\"\"\n try:\n self.ftype = 'cclib'\n data = cclib.io.ccread(fname)\n self.n_atom = data.natom\n self.at_num = data.atomnos\n # This gets the atomic symbols by looking up the keys of the\n # atomic numbers. It looks somewhat crazy but it is looking\n # through a list of the values stored in the dictionary,\n # matching the value to the atomic number and returning\n # the key that corresponds to that atomic number. It works\n # with this dictionary because the keys to values are 1 to 1.\n self.sym = []\n for i in data.atomnos:\n self.sym.append(list(self.__nuc.keys())[\n list(self.__nuc.values()).index(i)])\n # cclib stores the atomic coordinates in a array of shape\n # [molecule, num atoms, 3 for xyz] because I think they might\n # have many \"molecules\" from each step of an optimization or\n # something. Here we are taking just the last one.\n self.xyz = data.atomcoords[-1]\n return True\n except:\n return False\n","sub_path":"chemreps/utils/molecule.py","file_name":"molecule.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"583330925","text":"\"\"\"Create your own implementation of a built-in function enumerate, named `with_index`,\nwhich takes two parameters: `iterable` and `start`, default is 0.\nTips: see the documentation for the enumerate function\"\"\"\n\n\ndef with_index(iterable, start=0):\n i = start\n for n in iterable:\n yield i, n\n i += 1\n\n\ndict = {'a': 1, 'b': 2, 'c': 3}\nfor j in with_index(dict):\n print(j, type(j))\n","sub_path":"homework/topic_16_iterators_and_generators/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"288547915","text":"from flask import Flask, jsonify\nfrom flask_cors import CORS, cross_origin\nimport os\nimport numpy as np\n\nstatic_folder = \"../results/tables/\"\nstatic_name = \"LSTM_from_pretrained\"\nn = 20\n\ndef write_to_memory(current_length):\n file = open(\"./store/memory.txt\", \"w\")\n file.write(str(current_length))\n file.close()\n\n\ndef read_from_memory():\n file = open(\"./store/memory.txt\", \"r\")\n lines = file.read()\n file.close()\n if len(lines) > 0:\n return int(lines.strip())\n else:\n return 0\n\ndef process(arr):\n processed_array = np.asarray(arr, dtype=np.float32)\n\n # Averaging every Nth element:\n shortening_index = len(processed_array) % n\n\n shortened_array = processed_array[: len(processed_array) - shortening_index]\n averaged_array = np.mean(np.array(shortened_array).reshape(-1, n), axis=1)\n\n return averaged_array.tolist()\n\n\ndef get_best(arr):\n processed_array = np.asarray(arr, dtype=np.float32)\n print(\"processed: \", processed_array)\n print(\"BEST: \", np.argmax(processed_array))\n return int(np.argmax(processed_array))\n\n\ndef get_results_from_filename(filename):\n directory = os.path.join(static_folder, filename)\n\n result_array = []\n\n for dirName, subdir, fileList in os.walk(directory):\n for i, file in enumerate(fileList):\n with open(os.path.join(dirName, file)) as f:\n lines = f.readlines()\n if i == 0:\n write_to_memory(len(lines))\n result_array.append(process(list(map(str.strip, lines))))\n\n return result_array\n\n\ndef get_last_results_from_filename(filename):\n directory = os.path.join(static_folder, filename)\n\n result_array = []\n current_line = 0\n\n for dirName, subdir, fileList in os.walk(directory):\n for i, file in enumerate(fileList):\n with open(os.path.join(dirName, file)) as f:\n lines = f.readlines()\n if i == 0:\n current_line = read_from_memory()\n if current_line + n < len(lines):\n write_to_memory(len(lines))\n else:\n return\n result_array.append(process(list(map(str.strip, lines[current_line:]))))\n\n return result_array\n\n\ndef get_best_results_from_filename(filename):\n directory = os.path.join(static_folder, filename)\n\n result_array = []\n\n with open(os.path.join(directory, \"reward.txt\")) as f:\n best_result_index = get_best(f.readlines())\n\n for dirName, subdir, fileList in os.walk(directory):\n for i, file in enumerate(fileList):\n with open(os.path.join(dirName, file)) as f:\n result_array.append(f.readlines()[best_result_index].strip())\n return result_array\n\n\napp = Flask(__name__)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\n@app.route(\"/\")\n@cross_origin()\ndef index():\n return \"Index!\"\n\n\n@app.route(\"/hello\")\n@cross_origin()\ndef hello():\n return jsonify(\"Hello CORS World!\")\n\n\n@app.route(\"/result\")\n@cross_origin()\ndef get_result():\n return jsonify(get_results_from_filename(static_name))\n\n\n@app.route(\"/result/update\")\n@cross_origin()\ndef update_result():\n return jsonify(get_last_results_from_filename(static_name))\n\n\n@app.route(\"/result/best\")\n@cross_origin()\ndef get_best_result():\n return jsonify(get_best_results_from_filename(static_name))\n\n\n@app.route(\"/members//\")\n@cross_origin()\ndef getMember(name):\n return name\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"dev/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"153161649","text":"from __future__ import division\n\nimport onmt\nimport onmt.Markdown\nimport onmt.modules\nimport argparse\nimport torch\nimport torch.nn as nn\nfrom torch import cuda\nfrom torch.autograd import Variable\nimport math\nimport time, datetime\nfrom onmt.train_utils.trainer import XETrainer\nfrom onmt.train_utils.multiGPUtrainer import MultiGPUXETrainer\nfrom onmt.modules.Loss import NMTLossFunc\nfrom onmt.ModelConstructor import build_model, init_model_parameters\n\nparser = argparse.ArgumentParser(description='train.py')\nonmt.Markdown.add_md_help_argument(parser)\n\n# Data options\n\nparser.add_argument('-data', required=True,\n help='Path to the *-train.pt file from preprocess.py')\nparser.add_argument('-save_model', default='model',\n help=\"\"\"Model filename (the model will be saved as\n _epochN_PPL.pt where PPL is the\n validation perplexity\"\"\")\nparser.add_argument('-load_from', default='', type=str,\n help=\"\"\"If training from a checkpoint then this is the\n path to the pretrained model.\"\"\")\nparser.add_argument('-model', default='recurrent',\n help=\"Optimization method. [recurrent|transformer|stochastic_transformer]\")\nparser.add_argument('-layers', type=int, default=2,\n help='Number of layers in the LSTM encoder/decoder') \n# Recurrent Model options\nparser.add_argument('-rnn_size', type=int, default=512,\n help='Size of LSTM hidden states')\nparser.add_argument('-word_vec_size', type=int, default=512,\n help='Word embedding sizes')\nparser.add_argument('-input_feed', type=int, default=1,\n help=\"\"\"Feed the context vector at each time step as\n additional input (via concatenation with the word\n embeddings) to the decoder.\"\"\")\nparser.add_argument('-brnn', action='store_true',\n help='Use a bidirectional encoder')\nparser.add_argument('-brnn_merge', default='concat',\n help=\"\"\"Merge action for the bidirectional hidden states:\n [concat|sum]\"\"\")\n\n# Transforer Model options\nparser.add_argument('-model_size', type=int, default=512,\n help='Size of embedding / transformer hidden') \nparser.add_argument('-inner_size', type=int, default=2048,\n help='Size of inner feed forward layer') \nparser.add_argument('-n_heads', type=int, default=8,\n help='Number of heads for multi-head attention') \nparser.add_argument('-checkpointing', type=int, default=0,\n help='Number of checkpointed layers in the Transformer') \nparser.add_argument('-attn_dropout', type=float, default=0.1,\n help='Dropout probability; applied on multi-head attention.') \nparser.add_argument('-emb_dropout', type=float, default=0.1,\n help='Dropout probability; applied on top of embedding.') \nparser.add_argument('-weight_norm', action='store_true',\n help='Apply weight normalization on linear modules')\nparser.add_argument('-layer_norm', default='fast',\n help='Layer normalization type')\nparser.add_argument('-death_rate', type=float, default=0.5,\n help='Stochastic layer death rate') \nparser.add_argument('-activation_layer', default='linear_relu_linear', type=str,\n help='The activation layer in each transformer block') \nparser.add_argument('-time', default='positional_encoding', type=str,\n help='Type of time representation positional_encoding|gru|lstm') \nparser.add_argument('-version', type=float, default=1.0,\n help='Transformer version. 1.0 = Google type | 2.0 is different') \nparser.add_argument('-attention_out', default='default',\n help='Type of attention out. default|combine')\nparser.add_argument('-residual_type', default='regular',\n help='Type of residual type. regular|gated')\n# Optimization options\nparser.add_argument('-encoder_type', default='text',\n help=\"Type of encoder to use. Options are [text|img].\")\nparser.add_argument('-batch_size_words', type=int, default=2048,\n help='Maximum batch size in word dimension')\nparser.add_argument('-batch_size_sents', type=int, default=128,\n help='Maximum number of sentences in a batch')\nparser.add_argument('-max_generator_batches', type=int, default=32,\n help=\"\"\"Maximum batches of words in a sequence to run\n the generator on in parallel. Higher is faster, but uses\n more memory.\"\"\")\nparser.add_argument('-batch_size_update', type=int, default=2048,\n help='Maximum number of words per update') \n\nparser.add_argument('-epochs', type=int, default=13,\n help='Number of training epochs')\nparser.add_argument('-start_epoch', type=int, default=1,\n help='The epoch from which to start')\nparser.add_argument('-param_init', type=float, default=0.1,\n help=\"\"\"Parameters are initialized over uniform distribution\n with support (-param_init, param_init)\"\"\")\nparser.add_argument('-optim', default='adam',\n help=\"Optimization method. [sgd|adagrad|adadelta|adam]\")\nparser.add_argument('-max_grad_norm', type=float, default=0,\n help=\"\"\"If the norm of the gradient vector exceeds this,\n renormalize it to have the norm equal to max_grad_norm\"\"\")\nparser.add_argument('-dropout', type=float, default=0.3,\n help='Dropout probability; applied between LSTM stacks.')\nparser.add_argument('-word_dropout', type=float, default=0.0,\n help='Dropout probability; applied on embedding indices.')\nparser.add_argument('-label_smoothing', type=float, default=0.0,\n help='Label smoothing value for loss functions.')\nparser.add_argument('-scheduled_sampling_rate', type=float, default=0.0,\n help='Scheduled sampling rate.')\nparser.add_argument('-curriculum', type=int, default=-1,\n help=\"\"\"For this many epochs, order the minibatches based\n on source sequence length. Sometimes setting this to 1 will\n increase convergence speed.\"\"\")\nparser.add_argument('-extra_shuffle', action=\"store_true\",\n help=\"\"\"By default only shuffle mini-batch order; when true,\n shuffle and re-assign mini-batches\"\"\")\nparser.add_argument('-normalize_gradient', action=\"store_true\",\n help=\"\"\"Normalize the gradients by number of tokens before updates\"\"\")\nparser.add_argument('-virtual_gpu', type=int, default=1,\n help='Number of virtual gpus. The trainer will try to mimic asynchronous multi-gpu training')\n# learning rate\nparser.add_argument('-learning_rate', type=float, default=1.0,\n help=\"\"\"Starting learning rate. If adagrad/adadelta/adam is\n used, then this is the global learning rate. Recommended\n settings: sgd = 1, adagrad = 0.1,\n adadelta = 1, adam = 0.001\"\"\")\nparser.add_argument('-learning_rate_decay', type=float, default=1,\n help=\"\"\"If update_learning_rate, decay learning rate by\n this much if (i) perplexity does not decrease on the\n validation set or (ii) epoch has gone past\n start_decay_at\"\"\")\nparser.add_argument('-start_decay_at', type=int, default=99999,\n help=\"\"\"Start decaying every epoch after and including this\n epoch\"\"\")\nparser.add_argument('-warmup_steps', type=int, default=4096,\n help=\"\"\"Number of steps to increase the lr in noam\"\"\")\nparser.add_argument('-noam_step_interval', type=int, default=1,\n help=\"\"\"How many steps before updating the parameters\"\"\")\n\nparser.add_argument('-reset_optim', action='store_true',\n help='Reset the optimizer running variables')\nparser.add_argument('-beta1', type=float, default=0.9,\n help=\"\"\"beta_1 value for adam\"\"\")\nparser.add_argument('-beta2', type=float, default=0.98,\n help=\"\"\"beta_2 value for adam\"\"\")\nparser.add_argument('-weight_decay', type=float, default=0.0,\n help=\"\"\"weight decay (L2 penalty)\"\"\")\nparser.add_argument('-amsgrad', action='store_true',\n help='Using AMSGRad for adam') \nparser.add_argument('-update_method', default='regular',\n help=\"Type of update rule to use. Options are [regular|noam].\") \n# pretrained word vectors\nparser.add_argument('-tie_weights', action='store_true',\n help='Tie the weights of the encoder and decoder layer')\nparser.add_argument('-join_embedding', action='store_true',\n help='Jointly train the embedding of encoder and decoder in one weight')\nparser.add_argument('-pre_word_vecs_enc',\n help=\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the encoder side.\n See README for specific formatting instructions.\"\"\")\nparser.add_argument('-pre_word_vecs_dec',\n help=\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the decoder side.\n See README for specific formatting instructions.\"\"\")\n\n# GPU\nparser.add_argument('-gpus', default=[], nargs='+', type=int,\n help=\"Use CUDA on the listed devices.\")\nparser.add_argument('-seed', default=9999, type=int,\n help=\"Seed for deterministic runs.\")\n\nparser.add_argument('-log_interval', type=int, default=100,\n help=\"Print stats at this interval.\")\nparser.add_argument('-save_every', type=int, default=-1,\n help=\"Save every this interval.\")\n\nopt = parser.parse_args()\n\nprint(opt)\n\n# An ugly hack to have weight norm on / off\nonmt.Constants.weight_norm = opt.weight_norm\nonmt.Constants.checkpointing = opt.checkpointing\n\n# Use static dropout if checkpointing > 0\nif opt.checkpointing > 0:\n onmt.Constants.static = True\n\nif torch.cuda.is_available() and not opt.gpus:\n print(\"WARNING: You have a CUDA device, should run with -gpus 0\")\n\n\n\ntorch.manual_seed(opt.seed)\n\n\ndef main():\n \n \n \n start = time.time()\n print(\"Loading data from '%s'\" % opt.data)\n dataset = torch.load(opt.data)\n elapse = str(datetime.timedelta(seconds=int(time.time() - start)))\n print(\"Done after %s\" % elapse )\n \n #~ dict_checkpoint = opt.load_from \n #~ if dict_checkpoint:\n #~ print('Loading dicts from checkpoint at %s' % dict_checkpoint)\n #~ checkpoint = torch.load(dict_checkpoint, map_location=lambda storage, loc: storage)\n #~ dataset['dicts'] = checkpoint['dicts']\n #~ else:\n #~ checkpoint = None\n \n\n trainData = onmt.Dataset(dataset['train']['src'],\n dataset['train']['tgt'], opt.batch_size_words, opt.gpus,\n data_type=dataset.get(\"type\", \"text\"), max_seq_num=opt.batch_size_sents)\n validData = onmt.Dataset(dataset['valid']['src'],\n dataset['valid']['tgt'], opt.batch_size_words, opt.gpus,\n volatile=True,\n data_type=dataset.get(\"type\", \"text\"), max_seq_num=opt.batch_size_sents)\n\n dicts = dataset['dicts']\n print(' * vocabulary size. source = %d; target = %d' %\n (dicts['src'].size(), dicts['tgt'].size()))\n print(' * number of training sentences. %d' %\n len(dataset['train']['src']))\n print(' * maximum batch size (words per batch). %d' % opt.batch_size_words)\n\n print('Building model...')\n model = build_model(opt, dicts)\n \n \n \"\"\" Building the loss function \"\"\"\n loss_function = NMTLossFunc(dataset['dicts']['tgt'].size(), \n label_smoothing=opt.label_smoothing,\n shard_size=opt.max_generator_batches)\n \n\n #~ print(model)\n #~ print(loss_function)\n\n nParams = sum([p.nelement() for p in model.parameters()])\n print('* number of parameters: %d' % nParams)\n \n optim = None\n \n if len(opt.gpus) > 1 or opt.virtual_gpu > 1:\n trainer = MultiGPUXETrainer(model, loss_function, trainData, validData, dataset, opt)\n print(\"Warning! Multi-GPU training is used. Not fully tested and potential bugs can happen.\")\n else:\n trainer = XETrainer(model, loss_function, trainData, validData, dataset, opt)\n\n \n trainer.run(save_file=opt.load_from)\n \n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"468935900","text":"import sys\nimport operator\n\ninput_dir = '/home/detectivelyw/Documents/projects/tracks/linuxkit-auto-experiment/gcov-parsed-data/' \n\ncontainer_name = sys.argv[1]\nnum_iterations = sys.argv[2]\n\ngcov_data_sets = dict()\ntotal_set = set()\n\narray = [[]]\n\ncommon_kernel_areas_set = set()\ncommon_kernel_areas_filename = \"common_kernel_areas.txt\"\nwith open(common_kernel_areas_filename) as common_kernel_areas_file: \n kernel_line = common_kernel_areas_file.readline()\n while kernel_line:\n if kernel_line != \"\\n\": \n common_kernel_areas_set.add(kernel_line) \n kernel_line = common_kernel_areas_file.readline()\ncommon_kernel_areas_file.close()\n\nfor i in range(1, int(num_iterations)+1):\n inputfile_set = set()\n input_file_name = input_dir + \"gcov-data-\" + container_name + \"-parsed-\" + str(i) + \".txt\"\n with open(input_file_name) as input_file: \n line = input_file.readline()\n while line:\n inputfile_set.add(line) \n line = input_file.readline()\n input_file.close()\n gcov_data_sets[str(i)] = inputfile_set - common_kernel_areas_set\n total_set = total_set | gcov_data_sets[str(i)]\n item = []\n item.append(i)\n item.append(len(inputfile_set))\n array.append(item)\n\ndel array[0]\n\nfor j in range(1, int(num_iterations)+1):\n others_set = set()\n for k in range(1, int(num_iterations)+1):\n if (k != j):\n others_set = others_set | gcov_data_sets[str(k)]\n unique_set = set()\n unique_set = total_set - others_set\n print(str(j) + \": \" + str(len(unique_set))) \n item2 = []\n item2.append(j)\n item2.append(len(unique_set))\n for item3 in array:\n if item2[0] == item3[0]:\n item3.append(item2[1])\n\narray.sort(key = operator.itemgetter(2, 1))\n\ntotal_num_lines = len(total_set)\ncurrent_union_set = set()\n\noutput_file_name = 'cdf-revised-output-' + container_name + num_iterations + '.txt'\noutput_file = open(output_file_name, 'w+')\noutput_file.write(\"iterations \" + container_name + \"\\n\")\n\ncounter = 0\nfor item4 in array:\n counter += 1\n current_union_set = current_union_set | gcov_data_sets[str(item4[0])]\n current_num_lines = len(current_union_set)\n current_ratio = float(current_num_lines) / float(total_num_lines)\n if ((counter % 10) == 0):\n output_file.write(str(counter) + \" \" + str(current_ratio) + \"\\n\")\n else:\n output_file.write(\". \" + str(current_ratio) + \"\\n\")\noutput_file.close() \n\n","sub_path":"tools/generate_cdf_results_remove_8_areas.py","file_name":"generate_cdf_results_remove_8_areas.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407389558","text":"#\n# Copyright (c) 2019 Analog Devices Inc.\n#\n# This file is part of libm2k\n# (see http://www.github.com/analogdevicesinc/libm2k).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 2.1 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see .\n#\n\n# Python code to talk to LT3099 trim circuit via ADALM2000\n# Gets address, data and then pushes Morse code out to LT3099\n#\n# Code to LT3099 is 0.5usec pulse for zero, 1.5usec pulse for one\n# pulses are in a 5usec window\n# base level of code is 1V to 3V\n\n\n\nn_bits=4\n\nsr = 2000000 # 2Msps gives us 0.5us resolution\nzero = [3,2,2,2,2,2,2,2,2,2]\none = [3,3,3,2,2,2,2,2,2,2]\nnul = [2,2,2,2,2,2,2,2,2,2]\noff = [0,0,0,0,0,0,0,0,0,0]\nfrm = one\n\n\ndef lt3099_write(ctxin=None, regs=[]):\n import libm2k\n if ctxin==None:\n ctx=libm2k.m2kOpen()\n else:\n ctx=ctxin\n\n if ctx is None:\n \tprint(\"Connection Error: No ADALM2000 device available/connected to your PC.\")\n \texit(1)\n\n dig=ctx.getDigital()\n\n dig.setSampleRateIn(sr)\n dig.setSampleRateOut(sr)\n\n for i in range(4):\n dig.setDirection(i,libm2k.DIO_OUTPUT)\n dig.enableChannel(i,True)\n\n buff = nul + nul + nul # Set enable line high\n\n for reg in regs:\n buff += (frm) # Frame bit\n addr = reg[0] # Extract address\n data = reg[1] # Extract data bit\n bitcnt = 0 # Counter for parity\n for bit in range(6): # Parse through address bits\n if (addr & 0b100000 >> bit) != 0:\n buff += (one)\n bitcnt += 1\n else:\n buff += (zero)\n if data == 1: # Set data bit accordingly\n buff += (one)\n bitcnt += 1\n else:\n buff += (zero)\n if bitcnt % 2 == 0: # detect even parity\n buff += (one)\n buff += (frm) # Final frame bit\n buff += (nul) # Null for good luck\n\n buff += (nul) # Couple of extra nulls for good luck\n buff += (nul)\n buff += (off) # Disable D1 (programming bit.)\n\n# UN-comment to print out buffer.\n# print(\"Buffer:\")\n# print(buff)\n\n dig.setCyclic(False)\n dig.push(buff)\n\n if ctxin==None: # Clean up context if not passed from main program\n libm2k.contextClose(ctx)\n\n\n# Test program. If you run this module by itself, it will run this code.\n# If imported from a higher level program, you get the one and only\n# lt3099_write() function. Leave ctxin undefined to open / close the M2K with\n# each call, this should be fine for now.\n\n#NOTE that there's still a bug in closing down the context - for some reason\n# the M2K is not \"released\", you have to cycle USB after each call.\n\nif __name__ == '__main__':\n x = [[0,0],[1,1],[2,0],[3,1]]\n lt3099_write(regs=x)\n print(\"Done!\")\n","sub_path":"m2k_experiments/morse_chip_code/LT3099_write.py","file_name":"LT3099_write.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"100110826","text":"import os\r\nimport time\r\nimport re\r\nfrom slackclient import SlackClient\r\n\r\n\r\n# instantiate Slack client\r\nslack_client = SlackClient(os.environ['SLACK_TOKEN'])\r\n# starterbot's user ID in Slack: value is assigned after the bot starts up\r\nstarterbot_id = None\r\n\r\n# constants\r\nRTM_READ_DELAY = 1 # 1 second delay between reading from RTM\r\nEXAMPLE_COMMAND = \"do\"\r\nMENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\r\nMAIN_CHANNEL = \"#general\"\r\n\r\ndef parse_bot_commands(slack_events):\r\n \"\"\"\r\n Parses a list of events coming from the Slack RTM API to find bot commands.\r\n If a bot command is found, this function returns a tuple of command and channel.\r\n If its not found, then this function returns None, None.\r\n \"\"\"\r\n for event in slack_events:\r\n #print(event)\r\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\r\n user_id, message = parse_direct_mention(event[\"text\"])\r\n if user_id == starterbot_id:\r\n return message, event[\"channel\"]\r\n \r\n if event[\"type\"] == \"team_join\":\r\n user_name = event[\"user\"][\"name\"]\r\n user_id = event[\"user\"][\"id\"]\r\n commmand = event[\"type\"] + \"|\" + user_name + \"|\" + user_id\r\n return commmand, MAIN_CHANNEL\r\n \r\n return None, None\r\n\r\ndef parse_direct_mention(message_text):\r\n \"\"\"\r\n Finds a direct mention (a mention that is at the beginning) in message text\r\n and returns the user ID which was mentioned. If there is no direct mention, returns None\r\n \"\"\"\r\n matches = re.search(MENTION_REGEX, message_text)\r\n # the first group contains the username, the second group contains the remaining message\r\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)\r\n\r\ndef handle_command(command, channel):\r\n \"\"\"\r\n Executes bot command if the command is known\r\n \"\"\"\r\n\t\r\n # Default response is help text for the user\r\n default_response = \"Bonjour, voici les commandes à ma disposition :\\n`babyfoot` pour afficher le champion actuel de babyfoot\\n`birthday` pour lister les 3 prochains anniversaires\"\r\n attach=\"\"\r\n\r\n # Finds and executes the given command, filling in response\r\n response = None\r\n attach = \"\"\r\n\r\n # This is where you start to implement more commands!\r\n if \"babyfoot\" in command:\r\n response = \"Voici les résultats du *babyfoot* :soccer:\"\r\n attach=[{\"color\": \"#36a64f\", \"fields\": [{\"value\": \"*Alan* avec *10 victoires*\"},{\"value\": \"*François* avec *5 victoires*\"}],\"footer\": \"starterbot\", \"footer_icon\": \"https://platform.slack-edge.com/img/default_application_icon.png\", \"ts\": 123456789}]\r\n\r\n if command.startswith(\"team_join\"):\r\n a = command.split(\"|\")\r\n response = \":tada: Bienvenue parmi nous <@\" + a[1] + \"> ! :tada:\"\r\n\r\n #Send IM message to user\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=a[2],\r\n text=\"Bienvenue chez nous, voici quelques règles générales pour bien utiliser les channels :\\n- vos articles et liens sur divers sujets iront très bien sur #veille\\n- les infos relatives aux mises à jour du wikilyo peuvent aller sur #wiki\\n- tout ce qui est relatif à un événement interne à Datalyo ou externe sur #events\\n- les infos et tips techniques sur #tech\",\r\n as_user = 'true'\r\n )\r\n\r\n # Sends the response back to the channel\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=channel,\r\n text=response or default_response,\r\n attachments=attach\r\n )\r\n\r\nif __name__ == \"__main__\":\r\n if slack_client.rtm_connect(with_team_state=False):\r\n print(\"Starter Bot connected and running!\")\r\n # Read bot's user ID by calling Web API method `auth.test`\r\n starterbot_id = slack_client.api_call(\"auth.test\")[\"user_id\"]\r\n while True:\r\n command, channel = parse_bot_commands(slack_client.rtm_read())\r\n if command:\r\n handle_command(command, channel)\r\n time.sleep(RTM_READ_DELAY)\r\n else:\r\n print(\"Connection failed. Exception traceback printed above.\")","sub_path":"starterbot.py","file_name":"starterbot.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"195970451","text":"\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/en/latest/distributing.html\nhttps://github.com/pypa/sampleproject\n\"\"\"\nfrom setuptools import setup, find_packages\nfrom os import path\nfrom io import open\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nwith open(path.join(here, 'requirements.txt')) as f:\n requirements = f.read().splitlines()\n\nsetup(\n name='nbburndetectionzillow',\n\n version='0.1.0',\n\n description='A set of helper functions that provide the backend mechanics of the Digital Globe notebook',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/GeoBigData/nbburndetectionzillow',\n\n author='Rachel Wegener',\n author_email='rachel.wegener@digitalglobe.com',\n\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n ],\n # keywords\n packages=find_packages(exclude=['contrib', 'docs', 'tests']),\n install_requires=requirements\n # extras require, package data, data files, entry points, project urls\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"6498247","text":"# Copyright 2018 Iguazio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\n\nfrom conftest import here\nfrom v3io_frames import pbutils\nimport v3io_frames.frames_pb2 as fpb\n\n\ndef test_encode_df():\n labels = {\n 'int': 7,\n 'str': 'wassup?',\n }\n\n df = pd.read_csv('{}/weather.csv'.format(here))\n msg = pbutils.df2msg(df, labels)\n\n names = [col.name for col in msg.columns]\n assert set(names) == set(df.columns), 'columns mismatch'\n assert not msg.indices, 'has index'\n assert pbutils.pb2py(msg.labels) == labels, 'lables mismatch'\n\n # Now with index\n index_name = 'DATE'\n df.index = df.pop(index_name)\n msg = pbutils.df2msg(df, None)\n\n names = [col.name for col in msg.columns]\n assert set(names) == set(df.columns), 'columns mismatch'\n assert msg.indices, 'no index'\n assert msg.indices[0].name == index_name, 'bad index name'\n\n\ndef test_multi_index():\n tuples = [\n ('bar', 'one'),\n ('bar', 'two'),\n ('baz', 'one'),\n ('baz', 'two'),\n ('foo', 'one'),\n ('foo', 'two'),\n ('qux', 'one'),\n ('qux', 'two')]\n index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])\n df = pd.DataFrame(index=index)\n df['x'] = range(len(df))\n\n data = pbutils.df2msg(df).SerializeToString()\n msg = fpb.Frame.FromString(data)\n\n for col in msg.indices:\n values = col.strings\n assert len(values) == len(df), 'bad index length'\n","sub_path":"clients/py/tests/test_pbutils.py","file_name":"test_pbutils.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"364347088","text":"#-*- coding:utf8 -*-\n\nimport requests\nimport MySQLdb\nfrom bs4 import BeautifulSoup\nimport re\nimport time\n\ndef get_url(url):\n\theaders = {\n 'Host': 'www.amazon.cn',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0',\n 'Accept': '*/*',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Accept-Encoding': 'gzip, deflate',\n 'Referer': 'https://www.amazon.cn/',\n 'Connection': 'keep-alive',\n 'Cookie': '''session-id=459-0382294-8045805; session-id-time=2082729601l; ubid-acbcn=461-0072912-2527735; session-token=dFkVXRW7Ow39hcEkzlnaV0Wv6q0YVZYh3yXHinndOskS0BiGNcZ0XpNJ5BXTuAPnFE0cD18ft2Zb2vaAoYcv3EE5r3XkFs5ydWn9PMhCboO7befXNBGJ+tzMSzuZWtAjQptOoHJSCnWzuV2L98K0GTufP5GxY+2zYpMLpceL9bFnGfYGRI8KaQGageRaqeFw; csm-hit=s-923RXQ83V02BA5DA66R4|1519261746537; x-wl-uid=1jFFSZNo53sOykp3zxT1/FXm4aQQ2q4xsezsrDwkquYt/0EEVZHxZYqEo+KwIBMVVz82Z3TfP3k4=; s_nr=1518579715331-New; s_vnum=1950579693547%26vn%3D1; s_dslv=1518579715332'''\n };\n\tr = requests.get(url,headers=headers);\n\tprint(\"Getting url:\",url);\n\tc = r.text;\n\tprint(\"Status code:\",r.status_code);\n#\twith open('index.html','w') as fd:\n#\t\tfd.write(c);\n\treturn c;\n\ndef parse_zbook():\n\t#c = get_url(\"http://t2.bookdna.cn:8088\");\n\tc = get_url(\"https://www.amazon.cn/b/ref=sa_menu_kindle_l3_b1875254071?ie=UTF8&node=1875254071\");\n\tsoup = BeautifulSoup(c,'lxml');\n\ta = soup.body.find_all(href=re.compile('^/dp/*'));\n\tm = [];\n\tfor i in a:\n\t\tm.append(i['href']);\n\tm = sorted(set(m),key=m.index);\n\treturn m;\n\ndef parse_weekly():\n\tc = get_url('''https://www.amazon.cn/s/ref=s9_acsd_hps_bw_clnk_r?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&sort=popularity-rank&search-alias=digital-text&node=1852543071&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=merchandised-search-8&pf_rd_r=7X3PC84AJ9GMNZ12DVC0&pf_rd_t=101&pf_rd_p=2d85e30d-2e9e-416c-8325-cd9f23dc0653&pf_rd_i=1875254071''');\n\tsoup = BeautifulSoup(c, 'lxml');\n\tb = soup.body.find_all(href=re.compile('^https://www.amazon.cn/dp/*'));\n\tw = [];\n\tif len(b) == 0:\n\t\treturn w;\n\tfor i in b:\n\t\tw.append(i['href'][21:35]);\n\tw = sorted(set(w),key=w.index);\n\treturn w;\n\n\ndef parse_one(l):\n\tbook = {};\n\tbook['dp'] = l;\n\tprint(\"Parse book:\",l);\n\tc = get_url(\"https://www.amazon.cn\"+book['dp']);\n\tsoup = BeautifulSoup(c, 'lxml');\n\tbook['image'] = soup.find('img',id='ebooksImgBlkFront')['src'];\n\tbook['title'] = soup.find('span',id='ebooksProductTitle').text;\n\tbook['author']= soup.find('span',class_='author notFaded').text.split('\\n')[1];\n\tbook['oprice']= re.findall(r\"\\d+\\.?\\d*\",soup.find('span', \\\n\t\tclass_='a-color-base a-text-strike').text)[0];\n\tbook['price'] = re.findall(r\"\\d+\\.?\\d*\",soup.find('span', \\\n\t\tclass_='a-size-base a-color-price a-color-price').text)[0];\n\tbook['date'] = time.strftime(\"%Y-%m-%d\", time.localtime());\n\tbook['score'] = 3.0;\n\ttry:\n\t\tscor = re.findall(r\"\\d+\\.?\\d*\",soup.find('span', \\\n\t\tid='acrPopover')['title'])[0];\n\t\tbook['score'] = float(scor);\n\texcept Exception as e:\n\t\tprint(e);\n\t\tprint(\"Book:\",l,\" does not have score.\")\n\tprint(book);\n\treturn book;\n\ndef init_db():\n\tdb = MySQLdb.connect(host=\"localhost\",user=\"root\",passwd=\"password\",db=\"bookworm\",charset=\"utf8\");\n\tcs = db.cursor();\n\ttry:\n\t\tcs.execute(\"SELECT VERSION()\");\n\t\tdata = cs.fetchone();\n\t\tprint(\"Database Version: %s \" % data);\n\texcept Exception as e:\n\t\tprint(e);\n\t\tprint(\"Unable to connect to the MySQL Server.\");\n\t\treturn None;\n\treturn db;\n\ndef check_book(book, db):\n\tcs = db.cursor();\n\tsql = \"SELECT COUNT(1) FROM zbook WHERE BOOK_KID='\"+book['dp']+\"'\";\n\tprint(\"SQL: \",sql);\n\ttry:\n\t\tcs.execute(sql);\n\t\tcount = int(cs.fetchone());\n\t\tprint(\"BOOK: \",book['dp'],\" nums: \",count);\n\t\treturn count;\n\texcept Exception as e:\n\t\tprint(e);\n\t\treturn 0;\n\t\t\ndef insert_book(book, db):\n\tcs = db.cursor();\n\tsql = \"INSERT INTO zbook(book_kid, book_name, book_covr, author, hisl_price, hisl_date, curr_price, curr_date, score, oprice)\"+\\\n\t\" VALUES('\"+book['dp']+\"','\"+book['title']+\"','\"+book['image']+\"','\"+book['author']+\"',\"+book['price']+\",'\"+\\\n\tbook['date']+\"',\"+book['price']+\",'\"+book['date']+\"',\"+str(book['score'])+\",\"+book['oprice']+\")\";\n\tprint(sql);\n\ttry:\n\t\tcs.execute(sql);\n\t\tdb.commit();\n\t\tprint(\"INSERT SUCCESSFULLY.\");\n\texcept Exception as e:\n\t\tprint(e);\n\t\tdb.rollback();\n\ndef merge_book(book, db):\n\tcs = db.cursor();\n\tsql1 = \"SELECT hisl_price, hisl_date FROM zbook WHERE book_kid='\"+book['dp']+\"'\";\n\thisl_price = 10000.0;\n\thisl_date = \"2000-01-01\";\n\tprint(sql1);\n\ttry:\n\t\tcs.execute(sql1);\n\t\tresults = cs.fetchall();\n\t\tfor row in results:\n\t\t\thisl_price = float(row[0]);\n\t\t\thisl_date = str(row[1]);\n\t\t\tprint(hisl_date+\" : \"+str(hisl_price));\n\t\t\tbreak;\n\n\texcept Exception as e:\n\t\tprint(e);\n\t\tprint(\"Unable to fetch \"+book['dp']+\" data from ZBOOK.\");\n\n\tif hisl_price > float(book['price']):\n\t\thisl_price = float(book['price']);\n\t\thisl_date = book['date'];\n\t\n\tsql2 = \"UPDATE zbook SET book_name='%s', book_covr='%s', author='%s', oprice=%s, curr_price=%s, curr_date='%s', hisl_price=%s, hisl_date='%s', score=%.1f WHERE book_kid='%s' \"%(\\\n\t\tbook['title'], book['image'], book['author'], book['oprice'], book['price'], book['date'], hisl_price, hisl_date, book['score'], book['dp']);\n\tprint(sql2);\n\ttry:\n\t\t#print(hisl_date+\" : \"+str(hisl_price));\n\t\tcs.execute(sql2);\n\t\tdb.commit();\n\texcept Exception as e:\n\t\tprint(e);\n\t\tprint(\"Unable to update \"+book['dp']+\" date into ZBOOK.\");\n\tprint(\"UPDATE SUCCESSFULLY.\");\n\ndef sel_daily(db):\n\tbooks = [];\n\tcs = db.cursor();\n\ttoday = time.strftime(\"%Y-%m-%d\", time.localtime());\n\tsql = \"SELECT book_kid, book_name, book_covr, author, oprice, hisl_price, hisl_date, curr_price, score, FORMAT(curr_price/oprice*10,1) AS discount FROM zbook WHERE curr_date='%s'\"%(today);\n\tprint(sql);\n\tbook = {};\n\ttry:\n\t\tcs.execute(sql);\n\t\tresults = cs.fetchall();\n\t\tfor row in results:\n\t\t\tbook = {};\n\t\t\tbook['dp'] = row[0];\n\t\t\tbook['title'] = row[1];\n\t\t\tbook['image'] = row[2];\n\t\t\tbook['author']= row[3];\n\t\t\tbook['oprice']= row[4];\n\t\t\tbook['lprice']= row[5];\n\t\t\tbook['ldate'] = row[6];\n\t\t\tbook['price'] = row[7];\n\t\t\tbook['score'] = row[8];\n\t\t\tbook['disct'] = row[9];\n\t\t\tbooks.append(book);\n\texcept Exception as e:\n\t\tprint(e);\n\t\tprint(\"Could not select books from DB.\");\n\treturn books;\n\ndef reset_db(db):\n\tdb.close();\n\ndef parse_amazon():\n\tl = parse_zbook();\n\tl = l+parse_weekly();\n\tprint(\"There are \",len(l),\" books to parse\");\n\tbooks = [];\n\tfor x in l:\n\t\tbooks.append(parse_one(x[0:14]));\n\n\t#parse_one(\"/dp/B076ZKV2MZ\");\t\n\tprint(books);\n\n\tdb = init_db();\n\tfor book in books:\n\t\ttmp = check_book(book,db);\n\t\tif tmp == 0:\n\t\t\tinsert_book(book, db);\n\t\telse:\n\t\t\tmerge_book(book, db);\n\n\treset_db();\n\nif __name__ == '__main__':\n\tdb = init_db();\n\tbooks = sel_daily(db);\n\tprint(len(books));","sub_path":"parse_zbook.py","file_name":"parse_zbook.py","file_ext":"py","file_size_in_byte":6656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"630167959","text":"import nltk\nimport heapq\nimport pickle\nfrom urllib import urlopen\nfrom reviews import Reviews\nfrom bs4 import BeautifulSoup\nfrom yelp import API, YelpPage\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\napp.config.from_envvar('YELPADS_SETTINGS')\n\nclassifier = pickle.load(open('classifier.pickle'))\nstopwords = nltk.corpus.stopwords.words('english')\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\napi = API(app.config['YELP_CONSUMER_KEY'],\n app.config['YELP_CONSUMER_SECRET'],\n app.config['YELP_TOKEN'],\n app.config['YELP_TOKEN_SECRET'])\n\n@app.route('/', methods=['GET'])\ndef search():\n query = request.args.get('q', '')\n location = request.args.get('location', '')\n\n if (query and location):\n res = api.search(query, location)\n #There's no results page, so just take best result\n business = res['businesses'][0]\n\n phone = business['display_phone']\n name = business['name']\n yelp_id = business['id']\n return reviews(yelp_id, name)\n return render_template('search.html')\n\ndef reviews(yelp_id, name):\n page = YelpPage(yelp_id)\n imgs = page.images()\n comments = page.comments()\n\n scores = []\n\n #Split comments by sentences and score them\n for u, c in comments:\n for s in tokenizer.tokenize(c):\n encoded = s.encode('utf8')\n score = classifier.prob_classify(features(encoded)).prob('pos')\n scores.append( (u, encoded, score) )\n\n top = heapq.nlargest(5, scores, lambda x: x[2])\n users = [i[0]for i in top]\n reviews = [i[1] for i in top]\n return render_template('advert.html', reviews=reviews,\n images=imgs, users=users, name=name)\n\ndef features(rev):\n features = {}\n for w in [w for w in nltk.word_tokenize(rev) if w.lower() not in stopwords]:\n features['contains(%s)' % w.lower()] = True\n return features\n\nif __name__ == '__main__':\n import logging\n logger = app.logger\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n logger.addHandler(ch)\n\n app.run()\n","sub_path":"site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"383259447","text":"# encoding: utf-8\n\"\"\"\ngravatar.py\n\nCreated by gavin on 2010-03-12.\nCopyright (c) 2010 __MyCompanyName__. All rights reserved.\n\"\"\"\nfrom django import template\n\nimport urllib, hashlib\n\nregister = template.Library()\n\ndef gravatar(email, size=80):\n gravatar_url = \"http://www.gravatar.com/avatar.php?\"\n gravatar_url += urllib.urlencode({\n 'gravatar_id':hashlib.md5(email).hexdigest(),\n 'size':str(size)})\n return \"\"\"\"gravatar\"\"\" % (gravatar_url, email)\n\nregister.simple_tag(gravatar)","sub_path":"blog/templatetags/gravatar.py","file_name":"gravatar.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"23982028","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.6/dist-packages/sella/eigensolvers.py\n# Compiled at: 2020-01-27 12:54:08\n# Size of source mod 2**32: 5139 bytes\nimport numpy as np\nfrom scipy.linalg import eigh, solve\nfrom sella.utilities.math import modified_gram_schmidt\nfrom .hessian_update import symmetrize_Y\n\ndef exact(A, gamma=None, P=None):\n if isinstance(A, np.ndarray):\n lams, vecs = eigh(A)\n else:\n n, _ = A.shape\n if P is None:\n P = np.eye(n)\n vecs_P = np.eye(n)\n else:\n _, vecs_P, _ = exact(P)\n B = np.zeros((n, n))\n for i in range(n):\n v = vecs_P[i]\n B += np.outer(v, A.dot(v))\n\n B = 0.5 * (B + B.T)\n lams, vecs = eigh(B)\n return (\n lams, vecs, lams[np.newaxis, :] * vecs)\n\n\ndef rayleigh_ritz(A, gamma, P, B=None, v0=None, vref=None, vreftol=0.99, method='jd0', maxiter=None):\n n, _ = A.shape\n if B is None:\n B = np.eye(n)\n if maxiter is None:\n maxiter = 2 * n + 1\n else:\n if gamma <= 0:\n return exact(A, gamma, P)\n if v0 is not None:\n V = modified_gram_schmidt(v0.reshape((-1, 1)))\n else:\n P_lams, P_vecs, _ = exact(P, 0)\n nneg = max(1, np.sum(P_lams < 0))\n V = modified_gram_schmidt(P_vecs[:, :nneg])\n v0 = V[:, 0]\n AV = A.dot(V)\n symm = 2\n seeking = 0\n while True:\n Atilde = V.T @ symmetrize_Y(V, AV, symm=symm)\n lams, vecs = eigh(Atilde, V.T @ B @ V)\n nneg = max(1, np.sum(lams < 0))\n AV = AV @ vecs\n V = V @ vecs\n vecs = np.eye(V.shape[1])\n if V.shape[1] >= maxiter:\n return (\n lams, V, AV)\n Ytilde = symmetrize_Y(V, AV, symm=symm)\n R = Ytilde @ vecs[:, :nneg] - B @ V @ vecs[:, :nneg] * lams[np.newaxis, :nneg]\n Rnorm = np.linalg.norm(R, axis=0)\n print(Rnorm, lams[:nneg], Rnorm / lams[:nneg], seeking)\n if vref is not None:\n x0 = V @ vecs[:, 0]\n print(np.abs(x0 @ vref))\n if np.abs(x0 @ vref) > vreftol:\n print('Dot product between your v0 and the final answer:', np.abs(v0 @ x0) / np.linalg.norm(v0))\n return (\n lams, V, AV)\n for seeking, (rinorm, thetai) in enumerate(zip(Rnorm, lams)):\n if V.shape[1] == 1 or rinorm >= gamma * np.abs(thetai):\n ri = R[:, seeking]\n thetai = lams[seeking]\n break\n else:\n return (\n lams, V, AV)\n\n t = expand(V, Ytilde, P, B, lams, vecs, thetai, method, seeking)\n t /= np.linalg.norm(t)\n if np.linalg.norm(t - V @ V.T @ t) < 0.01:\n t = ri / np.linalg.norm(ri)\n t = modified_gram_schmidt(t[:, np.newaxis], V)\n if t.shape[1] == 0:\n for rj in R.T:\n t = modified_gram_schmidt(rj[:, np.newaxis], V)\n if t.shape[1] == 1:\n break\n else:\n t = modified_gram_schmidt(np.random.normal(size=(n, 1)), V)\n if t.shape[1] == 0:\n return (lams, V, AV)\n\n V = np.hstack([V, t])\n AV = np.hstack([AV, A.dot(t)])\n\n\ndef expand(V, Y, P, B, lams, vecs, shift, method='jd0', seeking=0):\n d, n = V.shape\n R = Y @ vecs - B @ V @ vecs * lams[np.newaxis, :]\n Pshift = P - shift * B\n if method == 'lanczos':\n return R[:, seeking]\n if method == 'gd':\n return np.linalg.solve(Pshift, R[:, seeking])\n if method == 'jd0_alt':\n vi = V @ vecs[:, seeking]\n Pprojr = solve(Pshift, R[:, seeking])\n Pprojv = solve(Pshift, vi)\n alpha = vi.T @ Pprojr / (vi.T @ Pprojv)\n return Pprojv * alpha - Pprojr\n if method == 'jd0':\n vi = V @ vecs[:, seeking]\n Aaug = np.block([[Pshift, vi[:, np.newaxis]], [vi, 0]])\n raug = np.zeros(d + 1)\n raug[:d] = R[:, seeking]\n z = solve(Aaug, -raug)\n return z[:d]\n if method == 'mjd0_alt':\n Pprojr = solve(Pshift, R[:, seeking])\n PprojV = solve(Pshift, V @ vecs)\n alpha = solve((V @ vecs).T @ PprojV, (V @ vecs).T @ Pprojr)\n return solve(Pshift, V @ vecs @ alpha - R[:, seeking])\n if method == 'mjd0':\n Vrot = V @ vecs\n Aaug = np.block([[Pshift, Vrot], [Vrot.T, np.zeros((n, n))]])\n raug = np.zeros(d + n)\n raug[:d] = R[:, seeking]\n z = solve(Aaug, -raug)\n return z[:d]\n raise ValueError('Unknown diagonalization method {}'.format(method))","sub_path":"pycfiles/Sella-1.0.3.linux-x86_64.tar/eigensolvers.cpython-36.py","file_name":"eigensolvers.cpython-36.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"385884067","text":"import sys\nfrom firebase import firebase\nimport Adafruit_DHT\nfrom smbus import SMBus\nfrom itertools import cycle\nfrom time import sleep\nimport RPi.GPIO as GPIO\n\n\nGPIO.setmode(GPIO.BCM)\nPIR = 21\nGPIO.setup(PIR, GPIO.IN)\n\nled_alert = 0xFE\nled_machine = 0xBF\n\ntotal = 0\nsensor = 11\npin = 4\nhumidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\nbus = SMBus(1)\nfb = firebase.FirebaseApplication('https://smartindus-80239.firebaseio.com', None)\nwhile True:\n reslt = fb.get('/control', None)\n\n alert = reslt[\"alert\"]\n machine = reslt[\"machine\"]\n\n output = 0xff\n if alert:\n output = output & led_alert\n if machine:\n output = output & led_machine\n\n bus.write_byte(0x38, output)\n\n humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n if GPIO.input(PIR):\n total += 1\n\n data = {\n 'humidity': humidity,\n 'temp': temperature,\n 'piece': total,\n }\n ab3ath = fb.patch('/sensors', data)\n","sub_path":"smartindus/smartindus.py","file_name":"smartindus.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"646832375","text":"import random as r\nimport math as m\nimport time\nimport multiprocessing as mp\n\nstart_time = time.time()\n# Number of darts that land inside.\ninside = 0\n# Total number of darts to throw.\ntotal = 10000000\ncounter = mp.Queue()\n\n\ndef rand_Point(counter):\n global inside\n # Generate random x, y in [0, 1].\n x2 = r.random()**2\n y2 = r.random()**2\n # Increment if inside unit circle.\n if m.sqrt(x2 + y2) < 1.0:\n counter.put(0)\n\n\nprocesses = [mp.Process(target=rand_Point, args=(counter)) for x in range(total)]\n\n# Run processes\nfor p in processes:\n p.start()\n\n# Exit completed processes\nfor p in processes:\n p.join()\n\noutput = [counter.get() for p in processes]\n\n# inside / total = pi / 4\npi = (float(len(output)) / total) * 4\n\n# It works!\nprint(pi)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n","sub_path":"MonCar_Pi_Par.py","file_name":"MonCar_Pi_Par.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"501997648","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 17 15:00:00 2018\r\n\r\n@author: samsung\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport data_augment as da\r\nimport os.path as op\r\nimport csv\r\nimport cv2\r\n\r\nclass DataLoader:\r\n def __init__(self, csv_path='', dataset_root=''):\r\n self.minibatch_=10\r\n self.net_input_size_=256\r\n self.csv_name_=csv_path\r\n self.dataset_root_=dataset_root\r\n self.dataset_=[]\r\n with open(self.csv_name_,'r') as f:\r\n reader=csv.reader(f)\r\n for row in reader:\r\n self.dataset_.append(list(row))\r\n self.header_=self.dataset_[0]\r\n self.dataset_=self.dataset_[1:]\r\n self.num_samples_=len(self.dataset_)\r\n self.cur_index_=0\r\n self.center_perterb_max_=40\r\n self.angle_max_=20\r\n self.target_scale_=0.7\r\n self.scale_range_=[0.8,1.2]\r\n self.stride_=8\r\n self.num_parts_=24\r\n self.label_channels_=25\r\n self.sigma_=7.0\r\n self.visualize_=False\r\n self.savedir='./visualize'\r\n self.image_index_=0\r\n self.shuffle()\r\n \r\n def shuffle(self):\r\n self.random_order_=np.random.permutation(np.arange(self.num_samples_))\r\n self.cur_index_=0\r\n \r\n def get_shape(self):\r\n return [self.minibatch_,self.net_input_size_, self.net_input_size_,3],[self.minibatch_,self.net_input_size_/self.stride_,self.net_input_size_/self.stride_,self.num_parts_+1]\r\n \r\n def load_minibatch(self): #probs for scale, rotation, flip, crop\r\n label_side=int(self.net_input_size_/self.stride_)\r\n imagedata=np.zeros((self.minibatch_,self.net_input_size_, self.net_input_size_,3), dtype=np.float32)\r\n imagelabel=np.zeros((self.minibatch_,label_side,label_side,self.label_channels_),dtype=np.float32)\r\n image_names=['']*self.minibatch_\r\n keypoints_gt=np.zeros((self.minibatch_,24,3), dtype=np.float32)\r\n \r\n for i in range(self.cur_index_,self.cur_index_+self.minibatch_):\r\n row=self.dataset_[self.random_order_[i]]\r\n image_path=op.join(self.dataset_root_,row[0])\r\n image_names[i-self.cur_index_]=image_path\r\n kpstrs=row[2:]\r\n keypoints=np.zeros((24,3),dtype=np.float32)\r\n for k, kpstr in enumerate(kpstrs):\r\n kps=list(map(float, kpstr.split('_')))\r\n keypoints[k]=np.asarray(kps)\r\n keypoints_gt[i-self.cur_index_]=keypoints\r\n for i,image_path in enumerate(image_names):\r\n image=cv2.imread(image_path)\r\n base_scale=1.0*self.target_scale_/(1.0*image.shape[0]/self.net_input_size_)\r\n image=da.aug_scale(image, base_scale, self.scale_range_, keypoints_gt[i])\r\n image=da.aug_rotate(image, self.angle_max_, keypoints_gt[i])\r\n labeled_index=keypoints_gt[i,:,-1]!=-1 \r\n center=np.mean(keypoints_gt[i,labeled_index,:2],axis=0) \r\n image=da.aug_crop(image, center, self.net_input_size_, self.center_perterb_max_, keypoints_gt[i])\r\n imagedata[i,:,:,:]=(image-128)/256.0\r\n\r\n for i in range(self.minibatch_):\r\n self.putGaussianMap(imagelabel[i], keypoints_gt[i])\r\n if self.visualize_:\r\n for i in range(self.minibatch_):\r\n g_map=imagelabel[i,:,:,-1]\r\n g_map=cv2.resize(g_map, (0,0), fx=self.stride_,fy=self.stride_,interpolation=cv2.INTER_CUBIC)\r\n raw_image=(imagedata[i]*256.0+128).astype(np.uint8)\r\n vis_img=self.visualize(raw_image,g_map)\r\n if self.image_index_<100:\r\n cv2.imwrite(op.join(self.savedir,'sample_%d.jpg'%self.image_index_),vis_img)\r\n self.image_index_+=1\r\n self.cur_index_+=self.minibatch_\r\n if self.cur_index_+self.minibatch_>self.num_samples_:\r\n self.shuffle()\r\n #print('batch loaded')\r\n return imagedata,imagelabel\r\n \r\n def putGaussianMap(self, label, keypoints, sigma=7.0):\r\n assert(label.shape[2]==keypoints.shape[0]+1)\r\n start = self.stride_ / 2.0 - 0.5\r\n for i in range(label.shape[2]-1): #[h,w,c]\r\n kp=keypoints[i]\r\n if kp[-1]!=-1:\r\n for y in range(label.shape[0]):\r\n for x in range(label.shape[1]):\r\n yy = start + y * self.stride_\r\n xx = start + x * self.stride_\r\n dis = ((xx - kp[0]) * (xx - kp[0]) + (yy - kp[1]) * (yy - kp[1])) / 2.0 / sigma / sigma\r\n if dis > 4.6052:\r\n continue\r\n label[y,x,i] += np.exp(-dis)\r\n label[y,x,i]=min(1,label[y,x,i])\r\n label[:,:,-1]=np.max(label[:,:,:-1],axis=2)\r\n \r\n def visualize(self,image, g_map):\r\n heatmap_bgr=np.zeros(image.shape, dtype=np.uint8)\r\n for i in range(heatmap_bgr.shape[0]):\r\n for j in range(heatmap_bgr.shape[1]):\r\n heatmap_bgr[i,j,[2,1,0]]=self.getJetColor(1-g_map[i,j],0,1)\r\n out_image=cv2.addWeighted(image, 0.7, heatmap_bgr, 0.3, 0).astype(np.uint8)\r\n return out_image\r\n \r\n def getJetColor(self, v, vmin, vmax):\r\n c = np.zeros((3))\r\n if (v < vmin):\r\n v = vmin\r\n if (v > vmax):\r\n v = vmax\r\n dv = vmax - vmin\r\n if (v < (vmin + 0.125 * dv)): \r\n c[0] = 256 * (0.5 + (v * 4)) #B: 0.5 ~ 1\r\n elif (v < (vmin + 0.375 * dv)):\r\n c[0] = 255\r\n c[1] = 256 * (v - 0.125) * 4 #G: 0 ~ 1\r\n elif (v < (vmin + 0.625 * dv)):\r\n c[0] = 256 * (-4 * v + 2.5) #B: 1 ~ 0\r\n c[1] = 255\r\n c[2] = 256 * (4 * (v - 0.375)) #R: 0 ~ 1\r\n elif (v < (vmin + 0.875 * dv)):\r\n c[1] = 256 * (-4 * v + 3.5) #G: 1 ~ 0\r\n c[2] = 255\r\n else:\r\n c[2] = 256 * (-4 * v + 4.5) #R: 1 ~ 0.5 \r\n return c\r\n \r\n","sub_path":"fashionai/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"80339582","text":"#!/usr/bin/env python\n\n'''\nGOAL:\n- create a super list of all galaxies in hyperleda, agc, NSAv1, NSAv0\n\nUSAGE:\n- example will print out when you run\n\n- to create new mastertable\n s = sample()\n s.get_smart()\n\n- to create new cutouts, run from supersample directory\n- make sure there is a cutouts and plots subdirectory\n t = fulltable()\n t.plot_all()\n \n\n\n\n'''\nimport numpy as np\nimport os\nimport sys\n\nfrom astropy.io import fits, ascii\nfrom astropy.table import Table, join, hstack, Column, MaskedColumn\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nfrom astropy.wcs import WCS\nfrom astropy.visualization import simple_norm\n\nfrom astroquery.skyview import SkyView\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom PIL import Image\n\nfrom urllib.parse import urlencode\nfrom urllib.request import urlretrieve\n\nimport pandas as pd\n\nhomedir = os.getenv('HOME')\nsys.path.append(homedir+'/github/APPSS/')\nfrom join_catalogs import make_new_cats, join_cats\n\n\nimport argparse\nparser = argparse.ArgumentParser(description ='Create a crazy big catalog from HL, AGC, NSA')\nparser.add_argument('--version',dest = 'version', default='v1',help='version of tables. default is v1')\nparser.add_argument('--evcc',dest = 'evcc', default=False,action='store_true',help='run for evcc catalog containing galaxies not in our original table')\n \nargs = parser.parse_args()\n\nif args.evcc:\n outfile_suffix = '_'+args.version+'_evcc'\nelse:\n outfile_suffix = '_'+args.version\n\n\n ## CATALOG VERSION NUMBER\n## V1 = USE NEWER VERSION OF NSA (this is what we used for all of visual classifications)\n## V2 = USE ORIGINAL VERSION OF NSA\nVERSION = 2\n# max offset in arcsec b/w sources from different catalogs\n# and still be considered the same source\nmax_match_offset = 5.\n\n\n\n## BOUNDARIES OF SURVEY REGION\ndecmin = -1.2\ndecmin = -35\ndecmax = 75 \nramax = 280.\nramin = 100. \nvmax = 3300.\nvmin = 500.\n\n## LEGACY SURVEY\nlegacy_pixel_scale = 0.262 # arcsec/pixel, don't actually use this\nimage_size = 60 # image size to download from legacy survey, in pixels\ndefault_image_size = image_size\ndef duplicates(table,column,flag=None):\n if flag is None:\n unique, counts = np.unique(table[column], return_counts=True)\n elif flag is not None:\n unique, counts = np.unique(table[column][flag], return_counts=True)\n print('number of duplicates = ',sum(counts > 1))\n #print('duplicates = ',unique[counts > 1])\n return unique, counts\n\ndef getlegacyimages(ra,dec):\n '''\n new function to download images in one fell swoop\n\n doing this so we can make cutouts of entire catalog, and then check each galaxy by hand\n\n This will need to be re-run each time the matching is altered, and the kitchen-sink catalog changes.\n \n '''\n for i in range(len(ra)):\n # name image files by ra and dec of galaxy\n gra = '%.5f'%(ra[i]) # accuracy is of order .1\"\n gdec = '%.5f'%(dec[i])\n galnumber = gra+'-'+gdec\n rootname = 'cutouts/legacy-im-'+str(galnumber)+'-'+str(image_size)\n jpeg_name = rootname+'.jpg'\n\n fits_name = rootname+'.fits'\n # check if images already exist\n # if not download images\n if not(os.path.exists(jpeg_name)):\n print('retrieving ',jpeg_name)\n url='http://legacysurvey.org/viewer/jpeg-cutout?ra='+str(ra[i])+'&dec='+str(dec[i])+'&layer=dr8&size='+str(image_size)+'&pixscale=1.00'\n urlretrieve(url, jpeg_name)\n else:\n print('previously downloaded ',jpeg_name)\n if not(os.path.exists(fits_name)):\n print('retrieving ',fits_name)\n url='http://legacysurvey.org/viewer/cutout.fits?ra='+str(ra[i])+'&dec='+str(dec[i])+'&layer=dr8&size='+str(image_size)+'&pixscale=1.00'\n urlretrieve(url, fits_name)\n else:\n print('previously downloaded ',fits_name)\n pass\n\ndef getlegacy(ra1,dec1,ra2=None,dec2=None, ra3=None,dec3=None,agcflag=False,onlyflag=False,jpeg=True,imsize=None):\n '''\n imsize is size of desired cutout in arcmin\n '''\n gra = '%.5f'%(ra1) # accuracy is of order .1\"\n gdec = '%.5f'%(dec1)\n galnumber = gra+'-'+gdec\n if imsize is not None:\n image_size=imsize\n else:\n image_size=default_image_size\n rootname = 'cutouts/legacy-im-'+str(galnumber)+'-'+str(image_size)\n jpeg_name = rootname+'.jpg'\n\n fits_name = rootname+'.fits'\n\n # check if images already exist\n # if not download images\n if not(os.path.exists(jpeg_name)):\n print('downloading image ',jpeg_name)\n url='http://legacysurvey.org/viewer/jpeg-cutout?ra='+str(ra1)+'&dec='+str(dec1)+'&layer=dr8&size='+str(image_size)+'&pixscale=1.00'\n urlretrieve(url, jpeg_name)\n #else:\n # pass\n # print('found image ',jpeg_name)\n if not(os.path.exists(fits_name)):\n print('downloading image ',fits_name)\n url='http://legacysurvey.org/viewer/cutout.fits?ra='+str(ra1)+'&dec='+str(dec1)+'&layer=dr8&size='+str(image_size)+'&pixscale=1.00'\n urlretrieve(url, fits_name)\n #else:\n # pass\n # print('found image ',fits_name)\n \n try:\n t,h = fits.getdata(fits_name,header=True)\n except IndexError:\n print('problem accessing image')\n print(fits_name)\n url='http://legacysurvey.org/viewer/cutout.fits?ra='+str(ra1)+'&dec='+str(dec1)+'&layer=dr8&size='+str(image_size)+'&pixscale=1.00'\n print(url)\n return None\n '''\n try: # try redownloading it\n print('downloading image ',jpeg_name)\n url='http://legacysurvey.org/viewer/jpeg-cutout?ra='+str(ra1)+'&dec='+str(dec1)+'&layer=dr8&size='+str(image_size)+'&pixscale=1.00'\n urlretrieve(url, jpeg_name)\n print('downloading image ',fits_name)\n url='http://legacysurvey.org/viewer/cutout.fits?ra='+str(ra1)+'&dec='+str(dec1)+'&layer=dr8&size='+str(image_size)+'&pixscale=1.00'\n urlretrieve(url, fits_name)\n t,h = fits.getdata(fits_name,header=True)\n except IndexError:\n '''\n \n # write out r-band image\n # nevermind - John M figured out how to use MEF with WCS\n #fits.writeto('r-test.fits',t[1],header=h,overwrite=True)\n if np.mean(t[1]) == 0:\n return None\n norm = simple_norm(t[1],stretch='asinh',percent=99.5)\n if jpeg:\n t = Image.open(jpeg_name)\n plt.imshow(t,origin='lower')\n else:\n plt.imshow(t[1],origin='upper',cmap='gray_r', norm=norm)\n\n # update this to plot the jpeg color image\n \n dx=20\n if agcflag:\n colors = ['cyan','blue','red']\n else:\n colors = ['red','blue','cyan']\n if onlyflag:\n colors = ['k','k','k']\n w = WCS(fits_name,naxis=2) \n if (ra2 is not(None)) & (ra3 is not(None)):\n ra = np.array([ra1,ra2,ra3])\n dec = np.array([dec1,dec2, dec3]) \n #w = WCS('r-test.fits')\n #px,py = w.wcs_world2pix(ra,dec)\n\n px,py = w.wcs_world2pix(ra,dec,1)\n #print(px,py)\n r1 = Rectangle((px[0]-dx/2, py[0]-dx/2), dx, dx, edgecolor=colors[0], facecolor='none')\n dx=17.5\n #r2 = Rectangle((px[1]-dx/2, py[1]-dx/2), dx, dx, edgecolor=colors[1], facecolor='none')\n #dx=15\n #r3 = Rectangle((px[2]-dx/2, py[2]-dx/2), dx, dx, edgecolor=colors[2], facecolor='none')\n plt.gca().add_patch(r1)\n #plt.gca().add_patch(r2)\n #plt.gca().add_patch(r3)\n \n return w\n\n\nclass sample:\n def __init__(self, max_match_offset=7.5):\n \n ## read in my HL catalog\n '''\n Hyperleda query: http://leda.univ-lyon1.fr/fullsql.html\n\n parameters described here: http://leda.univ-lyon1.fr/leda/meandata.html\n\n SQL QUERY:\n \n select\n objname,objtype,de2000,al2000,v,e_v,vopt,e_vopt,vrad,e_vrad,bt,e_bt,type,bar,ring,multiple,compactness,t,e_t,logd25,e_logd25,logr25,e_logr25,pa,incl,logdc,btc,itc,ubtc,bvtc,m21c,hic,mabs,agnclass,kt,e_kt,it,e_it,ut,vt,mfir,e_ut,e_vt, modz, e_modz, mod0, e_mod0,vmaxg, e_vmaxg, vmaxs,e_vmaxs,vdis,e_vdis\n \n where\n\n de2000 > -35 and de2000 < 75 and al2000 < 280./360.*24. and al2000 > 100./360.*24. and v < 3300 and v > 500 and objtype='G'\n\n - output as csv, separator is other, ,\n\n delete header lines at beginning and end\n\n 2020-10-25: downloaded again but removed the vr > 500 km/s cut.\n we are adding galaxies with redshift-independent distances > 500/H0, and \n they should be in this updated catalog\n\n - will add this functionality in a separate function\n\n\n \n not using Gialuca's catalog b/c I'm not sure if there were other cuts made already\n gl = fits.getdata('/Users/rfinn/research/VirgoFilaments/Gianluca/nsa_HyperLeda_NED_Steer2017dist_Virgo_field_sources_extension_H0_74_0_final_Kim2016corr_inclCOsample.fits')\n \n '''\n self.max_match_offset = max_match_offset\n\n ################################################################\n ## READ IN HYPERLEDA CATALOG\n ################################################################\n if not(args.evcc):\n hlfile = homedir+'/github/Virgo/tables/hyperleda-finn-09dec2019-full.csv'\n hlfile = homedir+'/github/Virgo/tables/hyperleda-finn-05Feb20.csv'\n hlfile = homedir+'/github/Virgo/tables/hyperleda-finn-24Feb20.csv' \n self.hl = ascii.read(hlfile)\n self.hl = Table(self.hl)\n self.cull_hl()\n else:\n hlfile = homedir+'/research/Virgo/ancil-tables/Steer_EVCC_toadd_HL_cols.fits'\n self.hl = Table.read(hlfile)\n c1 = Column(self.hl['al2000']*15,name='RAdeg')\n self.hl.add_column(c1)\n \n self.hl.write('temp.fits',format='fits',overwrite=True)\n\n self.hl = fits.getdata('temp.fits')\n\n os.remove('temp.fits')\n\n \n\n ################################################################\n ## READ IN NED CATALOG\n ################################################################ \n # downloaded in 10-dec-2019\n '''\n Search by by Parameters\n decmin = -1.2\n decmax = 75\n ramax = 280.\n ramin = 100.\n vmax = 3300.\n vmin = 500.\n object = Galaxy\n \n RA has to be in hours\n ramin = 6.6666666667\n ramax = 18.666666667\n\n output = text, ascii, bar separated\n velocity lower limit = -99\n\n Dowloaded text file - there was garbage at top of file (unnecessary info) that I deleted.\n\n Saved as\n\n /Users/rfinn/github/Virgo/tables/ned-noprolog-10dec2019.txt\n '''\n\n '''\n nedfile = homedir+'/github/Virgo/tables/ned-noprolog-10dec2019.txt'\n self.ned = ascii.read(nedfile,delimiter='|')\n\n # having issues with ned, maybe because of masked array?\n # going to write out fits and read it back in\n #\n self.ned.write('temp.fits',format='fits',overwrite=True)\n self.ned = fits.getdata('temp.fits')\n os.remove('temp.fits')\n self.cull_ned()\n\n '''\n ################################################################\n ## read in NSA catalog\n # using newest version of NSA\n ################################################################\n '''\n using new NSA catalog\n\n 2020-04-18\n switching back to v0 because found some issues with redshifts\n | NEDname | NOTES |\n |-------------------------|--------|\n | NGC 2793| NSA 135797 (not in version 2 of NSA!, in version 1), PG 026189|\n | SHOC 206b| NSA 015877 (in v2, but ZDIST = .13!; v1 ZDIST=.0077) | \n | UGC 08656 NOTES01| LEDA 214137 |\n | UGC 09348| in NSA 002473 (NSA version 2 vr = 27000!), SDSS, PGC 051957; should be in cat|\n |WISEA J150535.77+590537.2| NSA 019809 (in v2, ZDIST=.0459; v1, ZDIST=.008867)|\n '''\n\n nfile = homedir+'/research/NSA/nsa_v1_0_1.fits'\n\n\n self.nsa = fits.getdata(nfile)\n self.cull_nsa()\n #self.nsa = Table(self.nsa)\n\n ################################################################ \n ## read in AGC catalog\n ################################################################\n # got a new version from Martha on 11/19/19\n #agcfile = '/Users/rfinn/research/AGC/agcm1.sh191118.fits'\n #self.agc = fits.getdata(agcfile)\n #self.cull_agc()\n\n # using full agc so I can remove HIonly\n agcfile = '/Users/rfinn/research/AGC/agcnorthminus1.2019Sep24.fits'\n agcfile = homedir+'/research/AGC/agcnorthminus1.fits' \n self.agc = fits.getdata(agcfile)\n self.agc = Table(self.agc)\n self.cull_agc_full()\n #self.cull_agc()\n #self.agc = Table(self.agc)\n\n \n ################################################################\n ## read in NSA catalog\n # using original version of NSA\n ################################################################\n '''\n 2020-04-18\n switching back to v0 because found some issues with redshifts\n | NEDname | NOTES |\n |-------------------------|--------|\n | NGC 2793| NSA 135797 (not in version 2 of NSA!, in version 1), PG 026189|\n | SHOC 206b| NSA 015877 (in v2, but ZDIST = .13!; v1 ZDIST=.0077) | \n | UGC 08656 NOTES01| LEDA 214137 |\n | UGC 09348| in NSA 002473 (NSA version 2 vr = 27000!), SDSS, PGC 051957; should be in cat|\n |WISEA J150535.77+590537.2| NSA 019809 (in v2, ZDIST=.0459; v1, ZDIST=.008867)|\n '''\n\n nfile = homedir+'/research/NSA/nsa_v0_1_2.fits'\n\n\n self.nsa2 = fits.getdata(nfile)\n self.cull_nsa2()\n #self.nsa = Table(self.nsa)\n\n\n ########################################################################\n ## SET UP FLAGS FOR MATCHING BETWEEN CATALOGS\n ########################################################################\n \n \n # flags to track nsa matches to HL and AGC\n self.hl_2_nsa_matchflag = np.zeros(len(self.hl['al2000']),'bool')\n self.hl_2_agc_matchflag = np.zeros(len(self.hl['de2000']),'bool')\n self.hl_2_nsa2_matchflag = np.zeros(len(self.hl['al2000']),'bool')\n # flags to track nsa matches to HL and AGC\n self.nsa_2_hl_matchflag = np.zeros(len(self.nsa['RA']),'bool')\n self.nsa_2_agc_matchflag = np.zeros(len(self.nsa['RA']),'bool')\n self.nsa_2_nsa2_matchflag = np.zeros(len(self.nsa['RA']),'bool')\n\n # flags to track AGC matches to HL and NSA\n self.agc_2_hl_matchflag = np.zeros(len(self.agc[self.agc_ra_key]),'bool')\n self.agc_2_nsa_matchflag = np.zeros(len(self.agc[self.agc_dec_key]),'bool')\n self.agc_2_nsa2_matchflag = np.zeros(len(self.agc[self.agc_dec_key]),'bool') \n # flags to track nsa matches to HL and AGC\n self.nsa2_2_hl_matchflag = np.zeros(len(self.nsa2['RA']),'bool')\n self.nsa2_2_agc_matchflag = np.zeros(len(self.nsa2['RA']),'bool')\n self.nsa2_2_nsa_matchflag = np.zeros(len(self.nsa2['RA']),'bool')\n\n self.hcoord = SkyCoord(self.hl['al2000']*u.hr,self.hl['de2000']*u.deg,frame='icrs')\n self.ncoord = SkyCoord(self.nsa['RA']*u.deg,self.nsa['DEC']*u.deg,frame='icrs')\n self.acoord = SkyCoord(self.agc[self.agc_ra_key]*u.deg,self.agc[self.agc_dec_key]*u.deg, frame='icrs')\n self.n2coord = SkyCoord(self.nsa2['RA']*u.deg,self.nsa2['DEC']*u.deg,frame='icrs') \n #self.nedcoord = SkyCoord(self.ned['RA']*u.deg,self.ned['DEC']*u.deg, frame='icrs')\n\n self.hvel = self.hl['v'] # mean of optical and radio velocities\n self.nvel = self.nsa['Z']*3.e5\n self.avel = self.agc_vbest\n self.n2vel = self.nsa2['Z']*3.e5 \n #self.nedvel = self.ned['Velocity']\n \n def run_it(self, maxoffset=10):\n self.max_match_offset = maxoffset\n self.match_nsa_2_hl()\n self.match_agc_2_hl()\n self.match_nsa_2_agc()\n self.count_sample()\n \n def cull_hl(self):\n vbest = self.hl['v']\n vflag = (vbest > vmin) & (vbest < vmax)\n raflag = (self.hl['al2000']*15 > ramin) & (self.hl['al2000']*15 < ramax) \n decflag = (self.hl['de2000'] < decmax) & (self.hl['de2000']> decmin)\n overlap = raflag & decflag\n self.hl = self.hl[overlap]\n\n def cull_nsa(self):\n vbest = self.nsa['Z']*3.e5\n vflag = (vbest > vmin) & (vbest < vmax)\n raflag = (self.nsa['RA'] > ramin) & (self.nsa['RA'] < ramax) \n decflag = (self.nsa['DEC'] < decmax) & (self.nsa['DEC'] > decmin)\n overlap = vflag & raflag & decflag\n self.nsaOverlagFlag = overlap\n self.nsa = self.nsa[overlap]\n \n def cull_nsa2(self):\n vbest = self.nsa2['Z']*3.e5\n vflag = (vbest > vmin) & (vbest < vmax)\n raflag = (self.nsa2['RA'] > ramin) & (self.nsa2['RA'] < ramax) \n decflag = (self.nsa2['DEC'] < decmax) & (self.nsa2['DEC'] > decmin)\n overlap = vflag & raflag & decflag\n self.nsa2 = self.nsa2[overlap]\n\n def cull_ned(self):\n vbest = self.ned['Velocity']\n vflag = (vbest > vmin) & (vbest < vmax)\n raflag = (self.ned['RA'] > ramin) & (self.ned['RA'] < ramax) \n decflag = (self.ned['DEC'] < decmax) & (self.ned['DEC'] > decmin)\n # only keep objects with spectroscopic redshifts\n # https://ned.ipac.caltech.edu/help/faq5.html#5f\n \n speczflag = (self.ned['Redshift Flag'] == 'SPEC') | ((self.ned['Redshift Flag'] == 'N/A') & (self.ned['Redshift Points'] > 2.1))\n print('ned speczflag = ',sum(speczflag))\n #speczflag = (self.ned['Redshift Flag'] == 'N/A') \n overlap = vflag & raflag & decflag & speczflag\n self.ned = self.ned[overlap]\n\n def cull_agc(self):\n # create velocity that is V21 if present, and VOPT otherwise\n flag = self.agc['V21'] > 1.\n vbest = ~flag*self.agc['VOPT'] + flag*self.agc['V21']\n\n # or create velocit that is vopt if present, and V21 otherwise\n flag = self.agc['OPT'] > 1.\n vbest = flag*self.agc['VOPT'] + ~flag*self.agc['V21']\n\n # newest version of agc has vhelagc\n # using this as velocity\n vbest = self.agc['vhelagc']\n #avflag1 = (agc['VOPT'] > vmin) & (agc['VOPT'] < vmax)\n #avflag2 = (agc['V21'] > vmin) & (agc['V21'] < vmax)\n avflag = (vbest > vmin) & (vbest < vmax)\n raflag = (self.agc['radeg'] > ramin) & (self.agc['radeg'] < ramax) \n decflag = (self.agc['decdeg'] < decmax) & (self.agc['decdeg'] > decmin)\n\n # cut based on iposition\n # keep iposition > 7\n\n # cut based on description - remove HIonly sources\n overlap = avflag & raflag & decflag\n self.agc = self.agc[overlap]\n self.agc_vbest = vbest[overlap]\n self.agc_ra_key = 'radeg'\n self.agc_dec_key = 'decdeg'\n \n def cull_agc_full(self):\n # create velocity that is V21 if present, and VOPT otherwise\n v21flag = self.agc['v21'] > 1.\n vbest = ~v21flag*self.agc['vopt'] + v21flag*self.agc['v21']\n\n # newest version of agc has vhelagc\n # using this as velocity\n #vbest = self.agc['vhelagc']\n #avflag1 = (agc['VOPT'] > vmin) & (agc['VOPT'] < vmax)\n #avflag2 = (agc['V21'] > vmin) & (agc['V21'] < vmax)\n avflag = (vbest > vmin) & (vbest < vmax)\n #raflag = (self.agc['RA'] > ramin) & (self.agc['RA'] < ramax) \n #decflag = (self.agc['DEC'] < decmax) & (self.agc['DEC'] > decmin)\n self.agc_ra_key = 'RA'\n self.agc_dec_key = 'DEC'\n raflag = (self.agc['radeg'] > ramin) & (self.agc['radeg'] < ramax) \n decflag = (self.agc['decdeg'] < decmax) & (self.agc['decdeg'] > decmin)\n self.agc_ra_key = 'radeg'\n self.agc_dec_key = 'decdeg'\n\n # cut based on iposition\n # keep iposition > 7\n ipflag = self.agc['iposition'] > 7\n # cut based on description - remove HIonly sources\n description_flag = self.agc['description'] != 'HIonly'\n overlap = avflag & raflag & decflag & ipflag & description_flag\n self.agc = self.agc[overlap]\n self.agc_vbest = vbest[overlap]\n c = Column(self.agc_vbest,name='vhelagc')\n self.agc.add_column(c)\n c1 = Column(self.agc['radeg'],name='RA')\n c2 = Column(self.agc['decdeg'],name='DEC')\n self.agc.add_columns([c1,c2])\n def match_nsa_2_hl(self):\n '''\n HL catalog is the start of the super sample\n \n identify HL galaxies with AGC or NSA w/in 5\"\n - track HL, NSA, and AGC name\n\n '''\n # match NSA to Hlleda\n self.insa, d2d, d3d = self.hcoord.match_to_catalog_sky(self.ncoord)\n # first look at number with match w/in 10\"\n self.n2h_matchflag = d2d < self.max_match_offset/3600*u.deg\n print('MATCHING NSA TO HYPERLEDA')\n print('number of matches w/in ',str(self.max_match_offset),' arcsec = ',sum(self.n2h_matchflag),'/',len(self.n2h_matchflag))\n\n # need to also keep track of which AGC galaxy was matched to HL source\n # and remove these from further searches\n\n self.nsa_matched2_hl = np.zeros(len(self.ncoord.ra),'bool')\n self.nsa_matched2_hl[self.insa[self.n2h_matchflag]] = np.ones(sum(self.n2h_matchflag),'bool') \n\n def match_agc_2_hl(self):\n '''\n HL catalog is the start of the super sample\n \n identify HL galaxies with AGC or NSA w/in 5\"\n - track HL, NSA, and AGC name\n\n '''\n # match NSA to Hlleda\n self.iagc, agcd2d, agcd3d = self.hcoord.match_to_catalog_sky(self.acoord)\n # first look at number with match w/in 10\"\n self.a2h_matchflag = agcd2d < self.max_match_offset/3600*u.deg\n print('MATCHING AGC TO HYPERLEDA')\n print('number of matches w/in ',str(self.max_match_offset),' arcsec = ',sum(self.a2h_matchflag),'/',len(self.a2h_matchflag))\n\n # need to also keep track of which AGC galaxy was matched to HL source\n # and remove these from further searches\n\n self.agc_matched2_hl = np.zeros(len(self.acoord.ra),'bool')\n self.agc_matched2_hl[self.iagc[self.a2h_matchflag]] = np.ones(sum(self.a2h_matchflag),'bool') \n\n def match_nsa_2_agc(self):\n '''\n for NSA and AGC galaxies NOT already matched to HL, match NSA to AGC\n\n if offset is < 5\", add galaxy to catalog\n\n track NSA and AGC names\n \n '''\n # match NSA to AGC\n self.insa_n2a, d2d_n2a, d3d_n2a = self.acoord.match_to_catalog_sky(self.ncoord)\n # first look at number with match w/in 10\"\n self.n2a_matchflag = d2d_n2a < self.max_match_offset/3600*u.deg\n print('MATCHING NSA TO AGC')\n print('number of matches w/in ',str(self.max_match_offset),' arcsec = ',sum(self.n2a_matchflag),'/',len(self.n2a_matchflag))\n print('')\n print('number of AGC sources matched to either HL or AGC = ',sum(self.n2a_matchflag | self.agc_matched2_hl))\n # keep track of NSA galaxies that are not matched to HL or AGC\n\n self.nsa_matched2_agc = np.zeros(len(self.ncoord.ra),'bool')\n self.nsa_matched2_agc[self.insa_n2a[self.n2a_matchflag]] = np.ones(sum(self.n2a_matchflag),'bool') \n \n def count_sample(self):\n # start with HL catalog\n n1 = len(self.hl)\n\n # add nsa to agc galaxy matches\n # that weren't matched to HL\n n2 = sum(self.n2a_matchflag & ~self.agc_matched2_hl)\n\n # add agc that weren't matched to either\n n3 = sum(~self.n2a_matchflag & ~self.agc_matched2_hl)\n\n # add nsa that weren't matched to agc or hl\n\n n4 = sum(~self.nsa_matched2_hl & ~self.nsa_matched2_agc)\n print(n1,n2,n3,n4)\n print('total number of galaxies in sample = ',n1+n2+n3+n4)\n ntotal = n1+n2+n3+n4\n\n # BUILD SAMPLE\n \n hlname = np.zeros(ntotal, dtype=self.hl['objname'].dtype)\n hlra = np.zeros(ntotal,dtype=self.hl['al2000'].dtype)\n hldec = np.zeros(ntotal,dtype=self.hl['de2000'].dtype)\n hvel = np.zeros(ntotal,dtype=self.hvel.dtype)\n\n aname = np.zeros(ntotal, dtype=self.agc['AGCnr'].dtype)\n ara = np.zeros(ntotal,dtype=self.agc[self.agc_ra_key].dtype)\n adec = np.zeros(ntotal,dtype=self.agc[self.agc_dec_key].dtype)\n avel = np.zeros(ntotal,dtype=self.avel.dtype)\n\n nname = np.zeros(ntotal, dtype=self.nsa['NSAID'].dtype)\n nra = np.zeros(ntotal,dtype=self.nsa['RA'].dtype)\n ndec = np.zeros(ntotal,dtype=self.nsa['DEC'].dtype)\n nvel = np.zeros(ntotal,dtype=self.nvel.dtype)\n\n hflag = np.zeros(ntotal,'bool')\n aflag = np.zeros(ntotal,'bool')\n nflag = np.zeros(ntotal,'bool')\n\n \n # first section includes HL with matches to AGC and NSA\n\n out_columns = [hlname,hlra,hldec,hvel,\\\n aname,ara,adec,avel,\\\n nname,nra,ndec,nvel]\n data_columns = [self.hl['objname'],15.*self.hl['al2000'],self.hl['de2000'],self.hvel,\\\n self.agc['AGCnr'],self.agc[self.agc_ra_key],self.agc[self.agc_dec_key],self.avel,\\\n self.nsa['NSAID'],self.nsa['RA'],self.nsa['DEC'],self.nvel]\n\n for i in range(4):\n out_columns[i][0:n1] = data_columns[i][0:n1]\n\n for i in range(4,8):\n out_columns[i][0:n1][self.a2h_matchflag] = data_columns[i][self.iagc[self.a2h_matchflag]]\n \n for i in range(8,12):\n out_columns[i][0:n1][self.n2h_matchflag] = data_columns[i][self.insa[self.n2h_matchflag]]\n \n hflag[0:n1] = np.ones(n1,'bool')\n aflag[0:n1][self.a2h_matchflag] = np.ones(sum(self.a2h_matchflag),'bool')\n nflag[0:n1][self.n2h_matchflag] = np.ones(sum(self.n2h_matchflag),'bool')\n\n # add in additional NSA matches to AGC\n \n iagc = np.arange(len(self.agc))[(self.n2a_matchflag & ~self.agc_matched2_hl)]\n insa = self.insa_n2a[(self.n2a_matchflag & ~self.agc_matched2_hl)]\n\n for i in range(4,8):\n out_columns[i][n1:n1+n2] = data_columns[i][iagc]\n \n for i in range(8,12):\n out_columns[i][n1:n1+n2] = data_columns[i][insa]\n \n aflag[n1:n1+n2] = np.ones(len(iagc),'bool')\n nflag[n1:n1+n2] = np.ones(len(insa),'bool')\n\n # add remainder of AGC\n iagc = np.arange(len(self.agc))[(~self.n2a_matchflag & ~self.agc_matched2_hl)]\n for i in range(4,8):\n out_columns[i][n1+n2:n1+n2+n3] = data_columns[i][iagc]\n\n aflag[n1+n2:n1+n2+n3] = np.ones(len(iagc),'bool')\n\n\n # add remainder of NSA\n insa = np.arange(len(self.nsa))[(~self.nsa_matched2_hl & ~self.nsa_matched2_agc)]\n for i in range(8,12):\n out_columns[i][n1+n2+n3:n1+n2+n3+n4] = data_columns[i][insa]\n nflag[n1+n2+n3:n1+n2+n3+n4] = np.ones(len(insa),'bool') \n \n out_column_names = ['HL_name','hlra','hldec','hvel','HLflag',\\\n 'AGC_name','ara','adec','avel','AGCflag',\\\n 'NSA_name','nra','ndec','nvel','NSAflag']\n\n self.sample_table = Table([hlname,hlra,hldec,hvel,hflag,aname,ara,adec,avel,aflag,nname,nra,ndec,nvel,nflag],\\\n names = out_column_names)\n self.sample_table.write('kitchen_sink.fits',format='fits',overwrite=True)\n self.check_duplicates_table1()\n \n def check_duplicates_table1(self):\n print('METHOD 1')\n fields = ['HL','AGC','NSA']\n for n in fields:\n print('checking HL ',n,' name')\n duplicates(self.sample_table,n+'_name',flag=self.sample_table[n+'flag'])\n \n \n def get_smart(self,maxoffset=10,veloffset=300.,matchThirdFlag=True):\n '''\n matching offset in arcsec\n\n veloffset in km/s\n\n set matchThirdFlag to false for backward compatability maybe? \n actually, I'm not sure why this is there...\n '''\n # use code I already wrote to match catalogs for A100+SDSS!!!\n self.max_match_offset = maxoffset\n\n ###############################################\n ## FIRST MATCH AGC AND HYPERLEDA\n ###############################################\n velocity1 = self.hvel\n velocity2 = self.avel\n\n # don't use velocity matching for now \n hl_2, hl_matchflag, agc_2, agc_matchflag = make_new_cats(self.hl, self.agc,RAkey1='RAdeg',DECkey1='de2000',RAkey2=self.agc_ra_key,DECkey2=self.agc_dec_key, velocity1=velocity1, velocity2=velocity2, maxveloffset = veloffset,maxoffset=maxoffset)\n\n # getting data coercion error\n # testing by matching agc and nsa - match\n # still get the same problem - odd\n\n # now trying without converting fits tables to Table\n # this worked fine!!!\n # so need to convert Hyperleda table to fits table\n # will write this out and read it back in in the __init__ function...\n # hl_2, hl_matchflag, agc_2, agc__matchflag = make_new_cats(self.nsa, self.agc,RAkey1='RA',DECkey1='DEC',RAkey2='radeg',DECkey2='decdeg', velocity1=None, velocity2=None, maxveloffset = voffset,maxoffset=max_match_offset)\n \n ###############################################\n # join HL and AGC into one table\n ###############################################\n joined_table = hstack([hl_2,agc_2])\n \n ###############################################\n # add columns that track if galaxy is in agc and in nsa\n ###############################################\n c1 = Column(hl_matchflag,name='HLflag')\n c2 = Column(agc_matchflag,name='AGCflag')\n ra = np.zeros(len(hl_matchflag),'f')\n dec = np.zeros(len(hl_matchflag),'f')\n ra = hl_matchflag*joined_table['RAdeg'] + ~hl_matchflag*joined_table[self.agc_ra_key]\n dec = hl_matchflag*joined_table['de2000'] + ~hl_matchflag*joined_table[self.agc_dec_key]\n\n c3 = Column(ra,name='RA-HL-AGC',dtype='f')\n c4 = Column(dec,name='DEC-HL-AGC',dtype='f')\n\n # adding another column to track velocity\n # uses HL velocity for all objects in HL catalog\n # uses AGC velocity for any objects in AGC but NOT in HL\n vel = hl_matchflag*joined_table['v'] + ~hl_matchflag*joined_table['vhelagc']\n c5 = Column(vel,name='HL-AGC-VEL',dtype='f')\n joined_table.add_columns([c1,c2,c3,c4,c5])\n\n #print('HLflag after first join = ',joined_table['HLflag'][0:10])\n #joined_table.write('table1.fits',format='fits',overwrite=True)\n #joined_table = Table(fits.getdata('table1.fits'))\n #print('HLflag after writing/reading temp.fits = ',joined_table['HLflag'][0:10])\n self.table1 = joined_table\n print('METHOD 2: AFTER FIRST MERGE')\n columns=['objname','AGCnr']\n fields = ['HL','AGC']\n for i,n in enumerate(fields):\n print('checking HL ',n,' name')\n \n ###############################################\n ## FIX BOOLEAN COLUMNS\n ###############################################\n # boolean columns are getting converted weird\n # when I write and then read the fits table\n\n #try:\n # joined_table['AGCflag'] = (joined_table['AGCflag'] == 84)\n # joined_table['HLflag'] = (joined_table['HLflag'] == 84)\n #except KeyError:\n # print('trouble in paradise')\n #print('HLflag after trying to fix boolean columns = ',joined_table['HLflag'][0:10])\n self.table1 = joined_table\n \n ###############################################\n ## SECOND MATCH NSA TO AGC+HYPERLEDA\n ############################################### \n # now repeat - join NSA to HL+AGC table\n print('MATCH HL+AGC to NSA')\n v1 = joined_table['HL-AGC-VEL']\n v2 = self.nsa['Z']*3.e5\n hlagc_2, hlagc_matchflag, nsa_2, nsa_matchflag = make_new_cats(joined_table, self.nsa, RAkey1='RA-HL-AGC',DECkey1='DEC-HL-AGC',RAkey2='RA',DECkey2='DEC', velocity1=v1, velocity2=v2, maxveloffset = veloffset,maxoffset=maxoffset)\n # write out joined a100-sdss-nsa catalog\n joined_table2 = hstack([hlagc_2,nsa_2])\n c1 = Column(nsa_matchflag,name='NSAflag')\n joined_table2.add_column(c1)\n\n ra = hlagc_matchflag*joined_table2['RA-HL-AGC'] + ~hlagc_matchflag*joined_table2['RA_2']\n dec = hlagc_matchflag*joined_table2['DEC-HL-AGC'] + ~hlagc_matchflag*joined_table2['DEC_2']\n\n # redo this as\n # HL if it exists, then NSA, then AGC\n ra = np.zeros(len(joined_table2),'f')\n dec = np.zeros(len(joined_table2),'f')\n\n HLflag = joined_table2['HLflag'] == 1\n AGCflag = joined_table2['AGCflag'] == 1\n NSAflag = joined_table2['NSAflag'] == 1\n print('sum of HL, AGC, and NSA flags = ',sum(HLflag), sum(AGCflag), sum(NSAflag))\n #print('merged ra at 1 : ',ra[0:10],HLflag[0:10])\n flag = HLflag\n ra[flag] = joined_table2['RAdeg'][flag] #HL RA in deg, instead of hrs\n dec[flag] = joined_table2['de2000'][flag]\n #print('merged ra at 2 : ',ra[0:10],HLflag[0:10])\n\n flag = ~HLflag & NSAflag\n ra[flag] = joined_table2['RA_2'][flag]\n dec[flag] = joined_table2['DEC_2'][flag]\n #print('merged ra at 3 : ',ra[0:10],flag[0:10],HLflag[0:10],NSAflag[0:10])\n flag =~HLflag & ~NSAflag & AGCflag \n ra[flag] = joined_table2['radeg'][flag]\n dec[flag] = joined_table2['decdeg'][flag]\n #print('merged ra at 4 : ',ra[0:10]) \n c3 = Column(ra,name='RA-HL-AGC-NSA',dtype='f')\n c4 = Column(dec,name='DEC-HL-AGC-NSA',dtype='f')\n #c3 = Column(ra,name='RA-COMBINED',dtype='f')\n #c4 = Column(dec,name='DEC-COMBINED',dtype='f')\n\n # adding another column to track velocity\n # uses HL velocity for all objects in HL catalog\n # uses AGC velocity for any objects in AGC but NOT in HL\n vel = hlagc_matchflag*joined_table2['HL-AGC-VEL'] + ~hlagc_matchflag*joined_table2['Z']*3.e5\n c5 = Column(vel,name='HL-AGC-NSA-VEL',dtype='f')\n joined_table2.add_columns([c3,c4,c5])\n #print('HLflag after matching to NSA = ',joined_table2['HLflag'][0:10])\n joined_table2.write('temp.fits',format='fits',overwrite=True)\n joined_table2 = Table(fits.getdata('temp.fits'))\n #print('HLflag after reading/writing temp.fits for joined_table2 = ',joined_table2['HLflag'][0:10])\n self.table2 = joined_table2\n if matchThirdFlag:\n ###############################################\n ## THIRD MATCH NSA2 (v_0_1_2_ TO AGC+HYPERLEDA\n ############################################### \n # now repeat - join NSA to HL+AGC table\n print('MATCH HL+AGC+NSA to NSA2') \n v1 = joined_table2['HL-AGC-NSA-VEL']\n v2 = self.nsa2['Z']*3.e5\n hlagc_2, hlnsa2_matchflag, nsa2_2, nsa2_matchflag = make_new_cats(joined_table2, self.nsa2, RAkey1='RA-HL-AGC-NSA',DECkey1='DEC-HL-AGC-NSA',RAkey2='RA',DECkey2='DEC', velocity1=v1, velocity2=v2, maxveloffset = veloffset,maxoffset=maxoffset)\n # write out joined a100-sdss-nsa catalog\n joined_table3 = hstack([hlagc_2,nsa2_2])\n c1 = Column(nsa2_matchflag,name='NSA0flag')\n joined_table3.add_column(c1)\n print('writing temp file')\n joined_table3.write('hello.fits',overwrite=True)\n HLflag = joined_table3['HLflag'] == 1\n AGCflag = joined_table3['AGCflag'] == 1\n NSAflag = joined_table3['NSAflag'] == 1\n NSA2flag = joined_table3['NSA0flag'] == 1\n \n # HL if it exists, then NSAv1, then NSAv2, then AGC\n ra = np.zeros(len(joined_table3),'f')\n dec = np.zeros(len(joined_table3),'f')\n \n ra[HLflag] = joined_table3['RAdeg'][HLflag]\n dec[HLflag] = joined_table3['de2000'][HLflag]\n # NSA coordinates (v1)\n ra[~HLflag & NSAflag] = joined_table3['RA_2'][~HLflag & NSAflag]\n dec[~HLflag & NSAflag] = joined_table3['DEC_2'][~HLflag & NSAflag]\n # NSA2 coordinates (v0)\n self.tab3 = joined_table3\n ra[~HLflag & ~NSAflag & NSA2flag] = joined_table3['RA'][~HLflag & ~NSAflag & NSA2flag]\n dec[~HLflag & ~NSAflag & NSA2flag] = joined_table3['DEC'][~HLflag & ~NSAflag & NSA2flag]\n # AGC coordinates\n ra[~HLflag & ~NSAflag & ~NSA2flag & AGCflag] = joined_table3['radeg'][~HLflag & ~NSAflag & ~NSA2flag & AGCflag]\n dec[~HLflag & ~NSAflag & ~NSA2flag & AGCflag] = joined_table3['decdeg'][~HLflag & ~NSAflag & ~NSA2flag & AGCflag]\n #c3 = Column(ra,name='RA-HL-AGC-NSA',dtype='f')\n #c4 = Column(dec,name='DEC-HL-AGC-NSA',dtype='f')\n c3 = Column(ra,name='RA-COMBINED',dtype='f')\n c4 = Column(dec,name='DEC-COMBINED',dtype='f')\n \n # adding another column to track velocity\n # uses HL velocity for all objects in HL catalog\n # uses AGC velocity for any objects in AGC but NOT in HL\n # HL if it exists, then NSAv1, then NSAv2, then AGC\n \n vel = np.zeros(len(joined_table3),'f')\n \n vel[HLflag] = joined_table3['v'][HLflag]\n # NSA coordinates (v1)\n vel[~HLflag & NSAflag] = joined_table3['Z_1'][~HLflag & NSAflag]*3.e5\n \n # NSA2 coordinates (v0)\n vel[~HLflag & ~NSAflag & NSA2flag] = joined_table3['Z_2'][~HLflag & ~NSAflag & NSA2flag]*3.e5\n \n # AGC coordinates\n vel[~HLflag & ~NSAflag & ~NSA2flag & AGCflag] = joined_table3['vhelagc'][~HLflag & ~NSAflag & ~NSA2flag & AGCflag]\n \n c5 = Column(vel,name='VEL-COMBINED',dtype='f')\n joined_table3.add_columns([c3,c4,c5])\n \n \n \n ###############################################\n ## FIX BOOLEAN COLUMNS\n ###############################################\n # boolean columns are getting converted weird\n # when I write and then read the fits table\n \n #try:\n # joined_table3['AGCflag'] = (joined_table3['AGCflag'] == 84)\n # joined_table3['HLflag'] = (joined_table3['HLflag'] == 84)\n # joined_table3['NSAflag'] = (joined_table3['NSAflag'] == 84) \n #except KeyError:\n # print('trouble in paradise')\n\n # fix column names for NSA v1 columns\n colnames = ['IAUNAME','SUBDIR','ISDSS','INED','ISIXDF','IALFALFA',\\\n 'IZCAT','ITWODF','MAG','Z','ZSRC','SIZE','RUN','CAMCOL',\\\n 'FIELD','RERUN','XPOS','YPOS','NSAID','ZDIST','EXTINCTION',\\\n 'XCEN','YCEN','NPROF','SERSIC_N','SERSIC_BA','SERSIC_PHI',\\\n 'ASYMMETRY','CLUMPY','DFLAGS','SERSIC_TH50','PLATE']\n for c in colnames:\n joined_table3.rename_column(c+'_1',c)\n colnames = ['RA','DEC'] # from NSA v2\n for c in colnames:\n joined_table3.rename_column(c,c+'_NSA0')\n if VERSION == 1:\n outfile = 'smart_kitchen_sink'+outfile_suffix+'.fits'\n elif VERSION == 2:\n outfile = 'smart_kitchen_sink_v2'+outfile_suffix+'.fits'\n if args.evcc:\n flag = joined_table3['HLflag']\n joined_table3[flag].write(outfile,format='fits',overwrite=True)\n else:\n joined_table3.write(outfile,format='fits',overwrite=True)\n self.table3 = joined_table3\n else:\n if VERSION == 1:\n outfile = 'smart_kitchen_sink'+outfile_suffix+'.fits'\n elif VERSION == 2:\n outfile = 'smart_kitchen_sink_v2'+outfile_suffix+'.fits'\n if args.evcc:\n flag = joined_table2['HLflag']\n joined_table2[flag].write(outfile,format='fits',overwrite=True)\n else:\n joined_table2.write(outfile,format='fits',overwrite=True)\n self.table2 = joined_table2\n \n #self.check_duplicates_t1()\n #joined_table2.write('temp.fits',format='fits',overwrite=True)\n #joined_table2 = fits.getdata('temp.fits')\n\n ##### SKIPPING NED MATCH FOR NOW ##############\n\n ## REDO THIS TO MATCH TO GIANLUCA'S CATALOG\n \n ## ###########################################\n ## ## THIRD MATCH NED TO AGC+HYPERLEDA+NSA\n ## ###############################################\n ## self.test = joined_table2\n ## v1 = joined_table2['HL-AGC-NSA-VEL']\n ## v2 = self.ned['Velocity']\n\n ## hlagc_3, hlagc_matchflag3, ned_2, ned_matchflag = make_new_cats(joined_table2, self.ned, RAkey1='RA_1',DECkey1='DEC_1',RAkey2='RA',DECkey2='DEC', velocity1=v1, velocity2=v2, maxveloffset = veloffset,maxoffset=maxoffset)\n ## # write out joined a100-sdss-nsa catalog\n ## joined_table3 = hstack([hlagc_3,ned_2])\n ## c1 = Column(ned_matchflag,name='NEDflag')\n ## joined_table3.add_column(c1)\n\n ## ra = hlagc_matchflag3*joined_table3['RA-HL-AGC-NSA'] + ~hlagc_matchflag3*joined_table3['RA']\n ## dec = hlagc_matchflag3*joined_table3['DEC-HL-AGC-NSA'] + ~hlagc_matchflag3*joined_table3['DEC']\n\n ## c3 = Column(ra,name='RA-COMBINED',dtype='f')\n ## c4 = Column(dec,name='DEC-COMBINED',dtype='f')\n\n ## # adding another column to track velocity\n ## # uses HL velocity for all objects in HL catalog\n ## # uses AGC velocity for any objects in AGC but NOT in HL\n ## vel = hlagc_matchflag3*joined_table3['HL-AGC-NSA-VEL'] + ~hlagc_matchflag3*joined_table3['Velocity']\n ## c5 = Column(vel,name='VEL-COMBINED',dtype='f')\n ## joined_table3.add_columns([c3,c4,c5])\n \n ## ###############################################\n ## ## FIX BOOLEAN COLUMNS...AGAIN\n ## ###############################################\n ## # boolean columns are getting converted weird\n ## # when I write and then read the fits table\n ## try:\n ## joined_table3['AGCflag'] = (joined_table3['AGCflag'] == 84)\n ## joined_table3['HLflag'] = (joined_table3['HLflag'] == 84)\n ## joined_table3['NSAflag'] = (joined_table3['NSAflag'] == 84)\n ## except KeyError:\n ## print('trouble in paradise')\n \n ## joined_table3.write('smart_kitchen_sink.fits',format='fits',overwrite=True)\n ## self.table2 = joined_table3\n ## self.check_duplicates_t2()\n def check_duplicates_t2(self):\n print('METHOD 2')\n columns=['objname','AGCnr','NSAID','Object Name']\n fields = ['HL','AGC','NSA','NED']\n for i,n in enumerate(fields):\n print('checking HL ',n,' name')\n duplicates(self.table2,columns[i],flag=self.table2[n+'flag'])\n def check_vel(self, table2flag=False):\n # compare recession velocities of \"matches\"\n plt.figure(figsize=(8,6))\n if table2flag:\n mytable = self.table2\n fields=['v','vhelagc','Z']\n else:\n mytable = self.sample_table\n fields=['hvel','avel','nvel']\n plt.plot(mytable[fields[0]],mytable[fields[1]],'bo',label='AGC',alpha=.5)\n if table2flag:\n plt.plot(mytable[fields[0]],mytable[fields[2]]*3.e5,'ro',label='NSA',alpha=.5)\n else:\n plt.plot(mytable[fields[0]],mytable[fields[2]],'ro',label='NSA',alpha=.5)\n xmin,xmax = plt.xlim()\n xl = np.linspace(xmin,xmax,100)\n plt.plot(xl,xl,'k-')\n plt.plot(xl,xl-300,'k--')\n plt.plot(xl,xl+300,'k--')\n plt.xlabel('Hyperleda v_r')\n plt.ylabel('v_r of matched galaxy')\n plt.legend()\n plt.savefig('dv_of_matches.pdf')\n\nclass panel_plots:\n def plotimages(self,flag, outfile_string='test',agcflag=False,nsaflag=False,nedflag=False,onlyflag=False,startindex=0):\n nsaindex = self.t.NSAID[flag]\n hra1 = self.hcoord.ra.deg[flag]\n hdec1 = self.hcoord.dec.deg[flag]\n nra2 = self.ncoord.ra.deg[flag]\n ndec2 = self.ncoord.dec.deg[flag]\n ara3 = self.acoord.ra.deg[flag]\n adec3 = self.acoord.dec.deg[flag]\n #nedra = self.nedcoord.ra.deg[flag]\n #neddec = self.nedcoord.dec.deg[flag]\n w21 = self.t.width[flag]\n hlname = self.t.objname[flag]\n nsaid = self.t.NSAID[flag]\n agcnumber = self.t.AGCnr[flag]\n #nedname = self.t['Object Name'][flag]\n galnumber = np.arange(len(self.t.NSAID))[flag]\n plt.figure(figsize=(12,10))\n plt.subplots_adjust(bottom=.05,left=.05,top=.9,right=.95,hspace=.5)\n # plots suddenly stopped working for AGC\n # tryting to see if starting at a different index will help\n if agcflag:\n startindex=10\n\n i=0 + startindex\n nsubplot = 1\n nrow=4\n ncol=5\n if sum(flag) == 0:\n print('no duplicates to plot')\n return\n while nsubplot < nrow*ncol+1:#for i in range(9):\n plt.subplot(nrow,ncol,nsubplot)\n #print('flag index = ',i)\n #try:\n if agcflag:\n print('agcflag is set',i,nsubplot)\n w = getlegacy(ara3[i],adec3[i],ra2=nra2[i],dec2=ndec2[i],ra3=hra1[i],dec3=hdec1[i],agcflag=agcflag,onlyflag=onlyflag)\n elif nsaflag:\n w = getlegacy(nra2[i], ndec2[i],ra2=hra1[i],dec2=hdec1[i],ra3=ara3[i],dec3=adec3[i],agcflag=agcflag,onlyflag=onlyflag)\n elif nedflag:\n w = getlegacy(nedra[i], neddec[i],ra2=hra1[i],dec2=hdec1[i],ra3=ara3[i],dec3=adec3[i],agcflag=agcflag,onlyflag=onlyflag)\n else:\n w = getlegacy(hra1[i], hdec1[i],ra2=nra2[i],dec2=ndec2[i],ra3=ara3[i],dec3=adec3[i],agcflag=agcflag,onlyflag=onlyflag)\n '''\n except:\n i = i + 1\n print('trouble in paradise',i)\n print('maybe coords are outside Legacy Survey?')\n if agcflag:\n print(ara3[i],adec3[i])\n elif nsaflag:\n print(nra2[i],ndec2[i])\n else:\n print(hra1[i],hdec1[i])\n continue\n '''\n #plt.axis([50,200,50,200])\n #plt.axis([75,175,75,175])\n #plt.title(str(hlname[i])+'\\n'+nedname[i]+'\\n NSA '+str(nsaid[i])+' / AGC '+str(agcnumber[i]),fontsize=8)\n plt.title(str(hlname[i])+'\\n'+'\\n NSA '+str(nsaid[i])+' / AGC '+str(agcnumber[i]),fontsize=8)\n if nsubplot == 1:\n plt.text(80, 205,str(outfile_string),fontsize=16,horizontalalignment='left')\n self.add_allgals(w, agcflag=agcflag)\n if w21[i] > .1:\n plt.text(.05, .05,'W21='+str(w21[i]),fontsize=8,c='.7', transform=plt.gca().transAxes)\n plt.text(.05,.85,'Gal '+str(galnumber[i]),fontsize=8,c='.7', transform=plt.gca().transAxes)\n if self.HLflag[i]:\n gname = self.t['objname'][i]\n elif self.AGCflag[i]:\n gname = 'AGC'+self.t['AGCnr'][i]\n elif self.NSAflag[i]:\n gname = 'NSAID'+self.t['NSAID'][i]\n plt.text(.05,.1,'Gal '+str(gname),fontsize=8,c='.7', transform=plt.gca().transAxes)\n plt.xticks(fontsize=8)\n plt.yticks(fontsize=8)\n i = i + 1\n nsubplot += 1\n plt.savefig('../plots/AGC-HL-NSA-'+outfile_string+'.png')\n def densearray(self,flag, outfile_string='test',agcflag=False,nsaflag=False,nedflag=False,onlyflag=False,startindex=0,endindex=None):\n nsaindex = self.t.NSAID[flag]\n galnumber = np.arange(len(self.t))[flag]\n hra1 = self.hcoord.ra.deg[flag]\n hdec1 = self.hcoord.dec.deg[flag]\n nra2 = self.ncoord.ra.deg[flag]\n ndec2 = self.ncoord.dec.deg[flag]\n ara3 = self.acoord.ra.deg[flag]\n adec3 = self.acoord.dec.deg[flag]\n #nedra = self.nedcoord.ra.deg[flag]\n #neddec = self.nedcoord.dec.deg[flag]\n super_ra = self.t['RA-COMBINED']\n super_dec = self.t['DEC-COMBINED']\n \n w21 = self.t.width[flag]\n hlname = self.t.objname[flag]\n nsaid = self.t.NSAID[flag]\n agcnumber = self.t.AGCnr[flag]\n #nedname = self.t['Object Name'][flag]\n\n plt.figure(figsize=(12,7))\n plt.subplots_adjust(bottom=.05,left=.05,top=.9,right=.95,hspace=.01,wspace=.01)\n # plots suddenly stopped working for AGC\n # tryting to see if starting at a different index will help\n if agcflag:\n startindex=10\n\n i=0 + startindex\n nsubplot = 1\n nrow=5\n ncol=10\n galids_in_fov = []\n if sum(flag) == 0:\n print('no duplicates to plot')\n return\n if endindex is not None:\n maxcount = endindex-startindex+1\n else:\n maxcount = nrow*ncol+1\n while nsubplot < maxcount:\n #print(i,nsubplot,maxcount)\n plt.subplot(nrow,ncol,nsubplot)\n #print('flag index = ',i)\n #try:\n massflag=False\n w = getlegacy(super_ra[i], super_dec[i],ra2=nra2[i],dec2=ndec2[i],ra3=ara3[i],dec3=adec3[i],agcflag=agcflag,onlyflag=onlyflag)\n jpegflag=True\n if w is None:\n jpegflag=False\n print('trouble in paradise',i)\n print('maybe coords are outside Legacy Survey?')\n print(super_ra[i],super_dec[i])\n # try to get 2MASS J image\n # check to see if 2MASS image exists\n gra = '%.5f'%(super_ra[i]) # accuracy is of order .1\"\n gdec = '%.5f'%(super_dec[i])\n galpos = gra+'-'+gdec\n rootname = 'cutouts/2MASS-J-'+str(galpos)\n rootname = 'cutouts/DSS2-'+str(galpos)+'-'+str(image_size)+'-1arcsecpix' \n\n fits_name = rootname+'.fits'\n if not(os.path.exists(fits_name)):\n #print('downloading 2MASS J image ')\n print('downloading DSS2 Image ') \n #\n c = SkyCoord(ra=super_ra[i]*u.deg,dec=super_dec[i]*u.deg)\n x = SkyView.get_images(position=c,survey=['DSS2 Red'],pixels=[60,60])\n # save fits image\n fits.writeto(fits_name, x[0][0].data, header=x[0][0].header)\n else:\n print('using 2mass image ',fits_name)\n im, h = fits.getdata(fits_name,header=True)\n w = WCS(h)\n norm = simple_norm(im,stretch='asinh',percent=99.5)\n plt.imshow(im,origin='upper',cmap='gray_r', norm=norm)\n # pixel scale is 1 arcsec\n # therefore, to show a 60x60 arcsec image, want to set boundary to center-30:center+30\n im_nrow,im_ncol=im.shape\n\n massflag=True\n #plt.axis([50,200,50,200])\n #plt.axis([75,175,75,175])\n\n #plt.title(str(hlname[i])+'\\n'+nedname[i]+'\\n NSA '+str(nsaid[i])+' / AGC '+str(agcnumber[i]),fontsize=8)\n #if nsubplot == 1:\n # plt.text(10, 205,str(outfile_string), dtype='i'),fontsize=16,horizontalalignment='left')\n ids = self.add_allgals(w, agcflag=agcflag,jpegflag=jpegflag)\n galids_in_fov.append(ids)\n if massflag:\n text_color='k'\n else:\n text_color='0.7'\n if w21[i] > .1:\n plt.text(.05, .05,'W21='+str(w21[i]),fontsize=8,c=text_color, transform=plt.gca().transAxes)\n plt.text(.05,.85,'Gal '+str(galnumber[i]),fontsize=8,c=text_color, transform=plt.gca().transAxes)\n # remove ticks for internal images\n #print(nsubplot,np.mod(nsubplot,ncol))\n # adjust ticksize of outer left and bottom images\n if massflag:\n plt.axis([int(im_nrow/2-image_size/2),int(im_nrow/2+image_size/2),int(im_ncol/2-image_size/2),int(im_ncol/2+image_size/2)])\n else:\n plt.xticks(np.arange(0,image_size,20),fontsize=8)\n plt.yticks(np.arange(0,image_size,20),fontsize=8)\n\n #plt.axis([20,80,20,80])\n if (nsubplot < (nrow-1)*(ncol)):\n plt.xticks([],[])\n if (np.mod(nsubplot,ncol) > 1) | (np.mod(nsubplot,ncol) == 0) :\n #print('no y labels')\n plt.yticks([],[])\n i = i + 1\n nsubplot += 1\n #plt.savefig('../plots/densearray-'+outfile_string+'.png')\n return galids_in_fov\n\n def one_gal(self,i,dssflag=False,imsize=None,plotsingle=True):\n if plotsingle:\n plt.figure(figsize=(4,4))\n flag = np.ones_like(self.AGCflag, dtype='bool')\n agcflag=False\n onlyflag=False\n nsaindex = self.t.NSAID[flag]\n hra1 = self.hcoord.ra.deg[flag]\n hdec1 = self.hcoord.dec.deg[flag]\n nra2 = self.ncoord.ra.deg[flag]\n ndec2 = self.ncoord.dec.deg[flag]\n ara3 = self.acoord.ra.deg[flag]\n adec3 = self.acoord.dec.deg[flag]\n #nedra = self.nedcoord.ra.deg[flag]\n #neddec = self.nedcoord.dec.deg[flag]\n super_ra = self.t['RA-COMBINED']\n super_dec = self.t['DEC-COMBINED']\n w21 = self.t.width[flag]\n if dssflag:\n w = None\n jpegflag = False\n else:\n w = getlegacy(super_ra[i], super_dec[i],ra2=nra2[i],dec2=ndec2[i],ra3=ara3[i],dec3=adec3[i],agcflag=agcflag,onlyflag=onlyflag,imsize=imsize)\n jpegflag = True\n if w is None:\n jpegflag = False\n if imsize is not None:\n image_size=imsize\n else:\n image_size = default_image_size\n print('trouble in paradise',i)\n print('maybe coords are outside Legacy Survey?')\n print(super_ra[i],super_dec[i])\n # try to get 2MASS J image\n # check to see if 2MASS image exists\n gra = '%.5f'%(super_ra[i]) # accuracy is of order .1\"\n gdec = '%.5f'%(super_dec[i])\n galpos = gra+'-'+gdec\n rootname = 'cutouts/DSS2-'+str(galpos)+'-'+str(image_size)+'-1arcsecpix' \n\n fits_name = rootname+'.fits'\n if not(os.path.exists(fits_name)):\n #print('downloading 2MASS J image ')\n print('downloading DSS2 Image ') \n #\n c = SkyCoord(ra=super_ra[i]*u.deg,dec=super_dec[i]*u.deg)\n x = SkyView.get_images(position=c,survey=['DSS2 Red'],pixels=[60,60])\n # save fits image\n fits.writeto(fits_name, x[0][0].data, header=x[0][0].header)\n else:\n print('using DSS2 image ',fits_name)\n im, h = fits.getdata(fits_name,header=True)\n w = WCS(h)\n norm = simple_norm(im,stretch='asinh',percent=99.5)\n plt.imshow(im,origin='upper',cmap='gray_r', norm=norm)\n\n ids = self.add_allgals(w, agcflag=agcflag,jpegflag = jpegflag,imsize=imsize)\n text_color='0.7'\n if plotsingle:\n if w21[i] > .1:\n plt.text(.05, .05,'W21='+str(w21[i]),fontsize=8,c=text_color, transform=plt.gca().transAxes)\n plt.text(.05,.85,'Gal '+str(i),fontsize=8,c=text_color, transform=plt.gca().transAxes)\n # remove ticks for internal images\n #print(nsubplot,np.mod(nsubplot,ncol))\n # adjust ticksize of outer left and bottom images\n return ids\n\n def add_allgals(self,w,agcflag=False,twomass_flag=False,jpegflag=False,imsize=None):\n\n if imsize is None:\n image_size = default_image_size\n else:\n image_size = imsize\n print('cutout image size is ',image_size)\n cats = [self.acoord, self.ncoord, self.hcoord,self.n2coord,self.glcoord]\n #symbols=['co','b*','r+']\n #edgecolors = ['c','w','r']\n # AGC, NSA, HL, NSA2, GL\n symbols=['co','r^','yD','k+','gs']\n edgecolors = ['c','b','r','xkcd:goldenrod', 'g']\n edgecolors = ['c','r','y','k', 'g',]\n #edgecolors = ['c0','c1','c2','c3','c4']\n if agcflag:\n facecolors = ['None','None','None','None','None']\n else:\n facecolors = ['None','None','None','None','None']#['c','b','r','None','g']\n sizes = [14,14,14,16,18]\n text_offsets = [(10,14),(10,7),(10,0),(10,-7),(10,-14)]\n gals_in_fov = []\n #w = WCS('hyper-nsa-test.fits',naxis=2)\n for i,c in enumerate(cats):\n px,py = w.wcs_world2pix(c.ra.deg,c.dec.deg,1)\n galnumber = np.arange(len(c.ra.deg))\n #print('number of galaxies in catalog = ',len(c.ra.deg))\n # only keep objects on image\n if twomass_flag:\n #assume image is 300 pixels(default), pix\n keepflag = (px > 0) & (py > 0) & (px < image_size) & (py < image_size)\n else:\n keepflag = (px > 0) & (py > 0) & (px < image_size) & (py < image_size)\n if jpegflag:\n plt.plot(px[keepflag],image_size - py[keepflag],symbols[i],mec=edgecolors[i],mfc=facecolors[i],markersize=sizes[i])\n else:\n plt.plot(px[keepflag],py[keepflag],symbols[i],mec=edgecolors[i],mfc=facecolors[i],markersize=sizes[i])\n # label points\n #print('number of galaxies in FOV = ',sum(keepflag))\n gnumbers = galnumber[keepflag]\n j = 0\n if i < (len(edgecolors)-1): # by stopping at 3, I am not labeling Gianluca's catalog id\n gals_in_fov.append(gnumbers.tolist())\n for x,y in zip(px[keepflag],py[keepflag]):\n if jpegflag:\n y = image_size - y\n label = str(gnumbers[j])\n\n plt.annotate(label, # this is the text\n (x,y), # this is the point to label\n textcoords=\"offset points\", # how to position the text\n xytext=text_offsets[i], # distance from text to points (x,y)\n ha='center', # horizontal alignment can be left, right or center\n color=edgecolors[i],fontsize=8)\n j += 1\n flattened_gnumbers = [val for sublist in gals_in_fov for val in sublist]\n gals_in_fov = list(sorted(set(flattened_gnumbers)))\n return gals_in_fov\n\nclass fulltable(panel_plots):\n def __init__(self,nedflag=False):\n # this file contains all columns from HL, AGC and NSA\n self.t = fits.getdata('smart_kitchen_sink_v2.fits')\n #self.t = fits.getdata('smart_kitchen_sink_05feb2020.fits')\n self.hcoord = SkyCoord(self.t['al2000']*u.hr,self.t['de2000']*u.deg,frame='icrs')\n self.ncoord = SkyCoord(self.t['RA_2']*u.deg,self.t['DEC_2']*u.deg,frame='icrs')\n self.acoord = SkyCoord(self.t['RA_1']*u.deg,self.t['DEC_1']*u.deg, frame='icrs')\n self.n2coord = SkyCoord(self.t['RA']*u.deg,self.t['DEC']*u.deg,frame='icrs')\n\n self.hvel = self.t['v'] # mean of optical and radio velocities\n try:\n self.nvel = self.t['Z']*3.e5\n except KeyError:\n self.nvel = self.t['Z_1']*3.e5\n self.avel = self.t['vhelagc']\n\n \n\n # Boolean columns are not preserved correctly\n # topcat recognizes them ok, but they come in as integers (84=True, 70-something= False)\n # resetting flags here to boolean arrays\n self.AGCflag = self.t.AGCflag == 84\n self.HLflag = self.t.HLflag == 84\n self.NSAflag = self.t.NSAflag == 84\n\n \n\n self.hl_only_flag = ~self.AGCflag & self.HLflag & ~self.NSAflag \n self.agc_only_flag = self.AGCflag & ~self.HLflag & ~self.NSAflag\n self.nsa_only_flag = ~self.AGCflag & ~self.HLflag & self.NSAflag\n\n\n if nedflag:\n self.nedcoord = SkyCoord(self.t['RA']*u.deg,self.t['DEC']*u.deg, frame='icrs')\n self.NEDflag = self.t.NEDflag == 84\n self.nedvel = self.t['Velocity']\n self.ned_only_flag = ~self.AGCflag & ~self.HLflag & ~self.NSAflag & self.NEDflag\n self.hl_only_flag = ~self.AGCflag & self.HLflag & ~self.NSAflag & ~self.NEDflag\n self.agc_only_flag = self.AGCflag & ~self.HLflag & ~self.NSAflag & ~self.NEDflag\n self.nsa_only_flag = ~self.AGCflag & ~self.HLflag & self.NSAflag & ~self.NEDflag\n\n self.nedflag = nedflag\n self.fields = ['HL','AGC','NSA','NED']\n self.fields = ['HL','AGC','NSA','GL']\n #self.temp_fix_for_ned()\n self.gl = fits.getdata(homedir+'/research/VirgoFilaments/Gianluca/nsa_HyperLeda_NED_Steer2017dist_Virgo_field_sources_extension_H0_74_0_final_Kim2016corr_inclCOsample.18Dec2020.fits')\n keepflag = self.gl['v_HL'] > 500.\n self.gl = self.gl[keepflag]\n self.glcoord = SkyCoord(self.gl['RA']*u.deg,self.gl['DEC']*u.deg, frame='icrs')\n self.gvel = self.gl['v_HL']\n self.galids_in_fov = len(self.AGCflag)*[None]\n print('LENGTH GALIDS_IN_FOV = ',len(self.galids_in_fov))\n def summary_statistics(self):\n print('number in combined mastertable = ',len(self.AGCflag))\n if self.nedflag:\n print('number in all cats = ',sum(self.AGCflag & self.HLflag & self.NSAflag & self.NEDflag))\n print('number in all cats except NED = ',sum(self.AGCflag & self.HLflag & self.NSAflag & ~self.NEDflag))\n totalflag = np.array(self.AGCflag,'i') + np.array(self.HLflag,'i') + np.array(self.NSAflag,'i') + np.array(self.NEDflag,'i')\n print('number in 3 cats = ',sum(totalflag > 2))\n print('number in 2 cats = ',sum(totalflag > 1))\n print('number in HL only = ',sum(~self.AGCflag & self.HLflag & ~self.NSAflag & ~self.NEDflag))\n print('number in AGC only = ',sum(self.AGCflag & ~self.HLflag & ~self.NSAflag & ~self.NEDflag))\n print('number in NSA only = ',sum(~self.AGCflag & ~self.HLflag & self.NSAflag & ~self.NEDflag))\n print('number in NED only = ',sum(~self.AGCflag & ~self.HLflag & ~self.NSAflag & self.NEDflag)) \n else:\n print('number in all cats = ',sum(self.AGCflag & self.HLflag & self.NSAflag ))\n totalflag = np.array(self.AGCflag,'i') + np.array(self.HLflag,'i') + np.array(self.NSAflag,'i') \n print('number in 3 cats = ',sum(totalflag > 2))\n print('number in 2 cats = ',sum(totalflag > 1))\n print('number in HL only = ',sum(~self.AGCflag & self.HLflag & ~self.NSAflag ))\n print('number in AGC only = ',sum(self.AGCflag & ~self.HLflag & ~self.NSAflag))\n print('number in NSA only = ',sum(~self.AGCflag & ~self.HLflag & self.NSAflag))\n \n def temp_fix_for_ned(self):\n # as a first test, I want to plot positions of NED sources on image cutouts\n # I haven't encorporated the NED columns into smart_kitchen_sink.fits\n # so just going to read in catalog separately so that it's available to plot in plotimage\n \n nedfile = homedir+'/github/Virgo/tables/ned-noprolog-10dec2019.txt'\n self.ned = ascii.read(nedfile,delimiter='|')\n self.nedcoord = SkyCoord(self.ned['RA']*u.deg,self.ned['DEC']*u.deg, frame='icrs')\n # updating allgals to include NED\n def download_images(self):\n getlegacyimages(self.t['RA-COMBINED'],self.t['DEC-COMBINED'])\n def all_cats(self):\n # keep galaxies that are in AGC and NOT in (HL and NSA)\n flag = self.AGCflag & self.HLflag & self.NSAflag & self.NEDflag\n self.plotimages(flag,outfile_string='All Catalogs',agcflag=False,onlyflag=False)\n #self.plotimages(flag,outfile_string='AGConly',onlyflag=True)\n plt.savefig('All-catalogs.png')\n pass\n def agc_only(self):\n # keep galaxies that are in AGC and NOT in (HL and NSA)\n if self.nedflag:\n flag = self.AGCflag & ~self.HLflag & ~self.NSAflag & ~self.NEDflag\n else:\n flag = self.AGCflag & ~self.HLflag & ~self.NSAflag \n self.plotimages(flag,outfile_string='AGConly',agcflag=True,onlyflag=True)\n #self.plotimages(flag,outfile_string='AGConly',onlyflag=True)\n self.agc_only_flag = flag\n plt.savefig('AGConly.png')\n pass\n def hl_only(self):\n if self.nedflag:\n flag = ~self.AGCflag & self.HLflag & ~self.NSAflag & ~self.NEDflag\n else:\n flag = ~self.AGCflag & self.HLflag & ~self.NSAflag \n self.plotimages(flag,outfile_string='HLonly',onlyflag=True)\n self.hl_only_flag = flag\n plt.savefig('HLonly.png')\n pass\n def nsa_only(self):\n if self.nedflag:\n flag = ~self.AGCflag & ~self.HLflag & self.NSAflag & ~self.NEDflag\n else:\n flag = ~self.AGCflag & ~self.HLflag & self.NSAflag \n self.plotimages(flag,outfile_string='NSAonly',nsaflag=True,onlyflag=True)\n self.nsa_only_flag = flag\n plt.savefig('NSAonly.png')\n pass\n def ned_only(self):\n flag = ~self.AGCflag & ~self.HLflag & ~self.NSAflag & self.NEDflag\n self.plotimages(flag,outfile_string='NEDonly',nedflag=True,onlyflag=True)\n self.ned_only_flag = flag\n plt.savefig('NEDonly.png')\n pass\n def plot_only(self):\n self.agc_only()\n self.hl_only()\n self.nsa_only()\n if not(self.nedflag):\n self.ned_only()\n def plot_duplicates(self):\n\n columns=['objname','AGCnr','NSAID','Object Name']\n if self.nedflag:\n fields = ['HL','AGC','NSA','NED']\n flags = [self.HLflag,self.AGCflag,self.NSAflag,self.NEDflag]\n else:\n fields = ['HL','AGC','NSA']\n flags = [self.HLflag,self.AGCflag,self.NSAflag]\n for i,n in enumerate(fields):\n print('checking HL ',n,' name')\n unique, counts = duplicates(self.t,columns[i],flag=flags[i])\n plotids = unique[counts > 1]\n plotflag = np.zeros(len(flags[i]),'bool')\n for id in plotids:\n matchflag = id == self.t[columns[i]]\n plotflag[matchflag] = np.ones(sum(matchflag),'bool')\n\n\n if i == 1:\n agcflag=True\n else:\n agcflag=False\n if i == 2:\n nsaflag=True\n else:\n nsaflag=False\n if i == 3:\n nedflag=True\n else:\n nedflag=False\n self.plotimages(plotflag,outfile_string=n+'-duplicates',nsaflag=nsaflag,agcflag=agcflag,nedflag=nedflag,onlyflag=True)\n print(i)\n plt.savefig(n+'-duplicates.png')\n def plot_special_cases(self):\n self.plot_only()\n self.plot_duplicates()\n def plot_all(self,startgal=None):\n plt.close('all')\n flag = np.ones_like(self.AGCflag, dtype='bool')\n #print('LENGTH OF GALIDS IN FOV = ',len(self.galids_in_fov))\n #self.plotimages(flag,outfile_string='All Galaxies',agcflag=False,onlyflag=True)\n ngal = len(self.AGCflag)\n ngalperplot = 50\n nplots = np.floor(ngal/ngalperplot)\n #galids_in_fov = []\n if (ngal/ngalperplot - nplots) > 0:\n nplots += 1\n nplots = int(nplots)\n endindex = None\n if startgal is None:\n allplots = [i for i in range(nplots)]\n else:\n first_plot = int(np.floor(startgal/ngalperplot))\n allplots = [i for i in range(first_plot,nplots)]\n for i in allplots:\n #for i in range(1):\n plt.close('all')\n startindex = i*ngalperplot\n s1 = '%04d'%(startindex)\n n2 = startindex+49\n if n2 > (ngal-1):\n n2 = ngal-1\n endindex=n2\n print('MAKING LAST PLOT')\n s2 = '%04d'%(n2)\n print(s1,s2)\n\n galids = self.densearray(flag,outfile_string='All-Galaxies',agcflag=False,onlyflag=True,startindex = startindex, endindex=endindex)\n\n #self.plotimages(flag,outfile_string='AGConly',onlyflag=True)\n #print('LENGTH OF GALIDS IN FOV = ',len(self.galids_in_fov))\n #print('startindex, n2+1 = ',startindex,n2+1)\n if endindex is None: \n self.galids_in_fov[startindex:n2+1] = galids\n else:\n self.galids_in_fov[startindex:n2] = galids\n #print('LENGTH OF GALIDS IN FOV = ',len(self.galids_in_fov))\n # include range of galaxy ids in name of pdf file\n plt.savefig('plots/gcutouts-'+s1+'-'+s2+'.pdf')\n\n self.write_spreadsheet()\n pass\n\n def write_spreadsheet(self):\n '''\n spreadsheet to use in reviewing each galaxy in the sample.\n\n * col 1: the galaxy id (row number in catalog)\n * col 2: our classification, where:\n - 1 = galaxy is fine (this will be the default setting)\n - 0 = galaxy should be removed (nothing there in the image)\n - 2 = galaxy should be merged (provide id of parent galaxy in col 3)\n - 3 = can't tell, more detailed followup required\n * col 3: parent id, if galaxy should be merged with another source (default is blank)\n * col 4: list of other galaxy ids that appear within the FOV of the cutout (sometimes the numbers are hard to see)\n * col 6-9: list of galaxy name in HL, NED, NSA, AGC surveys\n\n using pandas, based on this tutorial\n\n https://xlsxwriter.readthedocs.io/example_pandas_multiple.html\n '''\n galnumber = np.arange(len(self.AGCflag))\n flag = np.ones_like(self.AGCflag, dtype='i')\n parent = np.full(len(self.AGCflag),'')\n #other_gals = np.full(len(self.AGCflag),'')\n other_gals = self.galids_in_fov\n\n # with NED\n #t = Table([galnumber,flag,parent,other_gals,\\\n # self.t['objname'],self.t['Object Name'],self.t['NSAID'],self.t['AGCnr'],self.t['RA-COMBINED'],self.t['DEC-COMBINED'],\\\n # self.t['v'],self.t['vhelagc'],self.t['Z']*3.e5,self.t['Velocity'],self.t['Redshift Flag'],self.t['Redshift Points']],\n # names=['galnumber','class','parent','objid_in_fov','HL','NED','NSAID','AGC','RA','DEC',\\\n # 'vHL','vAGC','vNSA','vNED','NEDzflag','NEDzpoints'])\n t = Table([galnumber,flag,parent,other_gals,\\\n self.t['objname'],self.t['NSAID'],self.t['AGCnr'],self.t['RA-COMBINED'],self.t['DEC-COMBINED'],\\\n self.t['v'],self.t['vhelagc'],self.t['Z']*3.e5,self.t['Z_2']*3.e5],\n names=['galnumber','class','parent','objid_in_fov','HL','NSAID','AGC','RA','DEC',\\\n 'vHL','vAGC','vNSA','vNSA0'])\n #cols = [galnumber,flag,parent,other_gals,self.t['objname'],self.t['Object Name'],self.t['NSAID'],self.t['AGCnr'],self.t['RA-COMBINED'],self.t['DEC-COMBINED']]\n #for a in cols:\n # print(len(a))\n #t = Table([galnumber,flag,parent,other_gals,\\\n # self.t['objname'],self.t['Object Name'],self.t['NSAID'],self.t['AGCnr'],self.t['RA-COMBINED'],self.t['DEC-COMBINED']])\n #names=['galnumber','class','parent','objid_in_fov','HL','NED','NSAID','AGC','RA','DEC'])\n # Create a Pandas Excel writer using XlsxWriter as the engine.\n writer = pd.ExcelWriter('virgo_check_sample_by_eye_v2.xlsx', engine='xlsxwriter')\n gals_per_sheet=1000\n ngal = len(galnumber)\n nsheets = int(ngal/gals_per_sheet)\n if (ngal/gals_per_sheet - nsheets) > 0:\n nsheets += 1\n for i in range(nsheets):\n start_index = i*1000\n end_index = start_index + 1000\n if end_index > len(self.AGCflag):\n pdt = t[start_index:len(self.AGCflag)].to_pandas()\n else:\n pdt = t[start_index:end_index].to_pandas()\n\n sheet_name = 'Sheet'+str(i)\n # Write each dataframe to a different worksheet.\n pdt.to_excel(writer, sheet_name=sheet_name)\n\n # Close the Pandas Excel writer and output the Excel file.\n writer.save()\n def plot_stars(self,startgal=None):\n plt.close('all')\n\n # load finished sample\n byeye = ascii.read('virgo_check_sample_by_eye.csv',delimiter=',')\n\n \n self.starflag = byeye['class'] == 3\n star_indices = np.arange(len(self.starflag))[self.starflag]\n \n #print('LENGTH OF GALIDS IN FOV = ',len(self.galids_in_fov))\n #self.plotimages(flag,outfile_string='All Galaxies',agcflag=False,onlyflag=True)\n ngal = sum(self.starflag)\n ngalperplot = 20\n nplots = np.floor(ngal/ngalperplot)\n #galids_in_fov = []\n if (ngal/ngalperplot - nplots) > 0:\n nplots += 1\n nplots = int(nplots)\n endindex = None\n if startgal is None:\n allplots = [i for i in range(nplots)]\n else:\n first_plot = int(np.floor(startgal/ngalperplot))\n allplots = [i for i in range(first_plot,nplots)]\n ncol=5\n nrow=4\n for i in allplots:\n #for i in range(1):\n #plt.close('all')\n startindex = i*ngalperplot\n s1 = '%04d'%(startindex)\n n2 = startindex+19\n if n2 > (ngal-1):\n n2 = ngal-1\n endindex=n2\n print('MAKING LAST PLOT')\n s2 = '%04d'%(n2)\n print(s1,s2)\n plt.figure(figsize=(6,4))\n plt.subplots_adjust(hspace=.3,wspace=.3)\n for j in range(nrow*ncol):\n if (j+nrow*ncol*i) > (ngal-1):\n break\n plt.subplot(nrow,ncol,j+1)\n self.one_gal(star_indices[j + nrow*ncol*i],plotsingle=False)\n plt.title('gal '+str(star_indices[j+nrow*ncol*i]),fontsize=8)\n plt.xticks([])\n plt.yticks([])\n \n plt.savefig('plots/stars-'+s1+'-'+s2+'.pdf')\n\n def stars_only(self):\n # load finished sample\n byeye = ascii.read('virgo_check_sample_by_eye.csv',delimiter=',')\n\n \n self.starflag = byeye['class'] == 3\n for i in range(4):\n self.plotimages(self.starflag,outfile_string='stars',nsaflag=False,onlyflag=False)\n plt.savefig('stars-'+str(i)+'.png')\n\n\n def velhist(self):\n plt.figure()\n if self.nedflag:\n arrays = [self.hvel,self.avel,self.nvel,self.nedvel]\n names = ['HL','AGC','NSA','NED']\n flags = [self.HLflag,self.AGCflag,self.NSAflag,self.NEDflag]\n else:\n arrays = [self.hvel,self.avel,self.nvel]\n names = ['HL','AGC','NSA']\n flags = [self.HLflag,self.AGCflag,self.NSAflag]\n\n colors=['r','c','b','#ff7f0e']#'xkcd:goldenrod']\n mybins = np.linspace(450,3500,100)\n for i in range(len(names)):\n t = plt.hist(arrays[i][flags[i]],label=names[i],histtype='step',bins=mybins,color=colors[i])\n plt.legend(loc='upper right')\n plt.xlabel('Recession Velocity')\n plt.savefig('velhist.png')\n def positions_only(self):\n plt.figure(figsize=(10,8))\n self.nedonly = False\n if self.nedonly:\n ra = [self.hcoord.ra,self.acoord.ra,self.ncoord.ra,self.nedcoord.ra]\n dec = [self.hcoord.dec,self.acoord.dec,self.ncoord.dec,self.nedcoord.dec]\n vel = [self.hvel,self.avel,self.nvel,self.nedvel]\n flags = [self.hl_only_flag,self.agc_only_flag,self.nsa_only_flag,self.ned_only_flag]\n colors=['r','c','b','xkcd:goldenrod']\n symbols=['r+','co','b*','kD']\n edgecolors = ['r','c','w','xkcd:goldenrod']\n facecolors = ['r','c','b','None']\n sizes = [14,10,14,8]\n else:\n ra = [self.glcoord.ra,self.hcoord.ra,self.acoord.ra,self.ncoord.ra]\n dec = [self.glcoord.dec,self.hcoord.dec,self.acoord.dec,self.ncoord.dec]\n vel = [self.gvel,self.hvel,self.avel,self.nvel]\n flags = [np.ones(len(self.glcoord.ra),'bool'),self.hl_only_flag,self.agc_only_flag,self.nsa_only_flag]\n #colors=['r','c','b','xkcd:goldenrod']\n #symbols=['r+','co','b*','kD']\n #edgecolors = ['r','c','w','xkcd:goldenrod']\n #facecolors = ['r','c','b','None']\n symbols=['gs','yD','co','r^']\n facecolors = ['None','None','None','None']\n edgecolors = ['g','y','c','r']\n sizes = [2,4,4,3]\n \n \n for i,r in enumerate(ra):\n #plt.scatter(r,dec[i],c=colors[i],label=self.fields[i]+' only')\n plt.plot(r[flags[i]],dec[i][flags[i]],symbols[i],mec=edgecolors[i],mfc=facecolors[i],markersize=sizes[i],label=self.fields[i]+' only')\n plt.xlabel('RA (deg)')\n plt.ylabel('DEC (deg)')\n plt.legend()\n plt.savefig('positions-only.png')\n def positions(self):\n plt.figure(figsize=(10,8))\n self.nedonly = False\n ra = [self.glcoord.ra,self.hcoord.ra,self.acoord.ra,self.ncoord.ra]\n dec = [self.glcoord.dec,self.hcoord.dec,self.acoord.dec,self.ncoord.dec]\n vel = [self.gvel,self.hvel,self.avel,self.nvel]\n flags = [np.ones(len(self.glcoord.ra),'bool'),self.HLflag,self.AGCflag,self.NSAflag]\n symbols=['k.','yD','co','ro']\n facecolors = ['None','None','None','None']\n edgecolors = ['k','y','c','r']\n sizes = [1,4,6,7]\n fields=['GL','HL','AGC','NSA']\n \n \n for i in np.array([3,2,1,0],'i'):\n #plt.scatter(r,dec[i],c=colors[i],label=self.fields[i]+' only')\n plt.plot(ra[i][flags[i]],dec[i][flags[i]],symbols[i],mec=edgecolors[i],mfc=facecolors[i],markersize=sizes[i],label=fields[i])\n plt.xlabel('RA (deg)')\n plt.ylabel('DEC (deg)')\n plt.legend()\n plt.savefig('positions-only.png')\n \n pass\nif __name__ == '__main__':\n \n ## start with HL - match to NSA and AGC\n ## consider all matches with offsets less than 5\" to be the same galaxy\n \n\n #s = sample()\n #s = sample(max_match_offset=7.)\n print('Welcome!')\n print('')\n print('To build catalogs, try: \\n\\n \\t s=sample()\\n \\t s.get_smart() \\n')# \\n OR\\n\\t s.run_it()')\n print('\\n\\nTo read table and plot images, try: \\n\\n \\t t=fulltable()\\n \\t t.agc_only() )')\n ## track NSA and AGC names of matches\n\n ## add HL objects with closest match > 5\"\n\n ## match remaining AGC and NSA\n\n ## consider all matches with offsets less than 5\" to be the same galaxy\n\n ## list NSA name, track AGC name of match\n\n ## add remaining NSA galaxies with no HL and no AGC within 5\"\n\n ## add remaining AGC galaxies with no HL and no NSA within 5\"\n\n #t = fulltable()\n #t.plot_all(startgal=6850)\n #t.plot_all(startgal=8950)\n #t.plot_all()\n \n\n\n\n\n\n","sub_path":"programs/mksupersample.py","file_name":"mksupersample.py","file_ext":"py","file_size_in_byte":79717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"639696507","text":"import random\nfrom random import randint\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport itertools\n\nshares = 3\n#yes_vote = shares-1\n#no_vote = shares-2\nyes_vote = 2\nno_vote = 1\n\n#list_num_users = [10,100,1000, 10000, 100000]\n#num_traits = 3\nruns = 20\n\n\n##############################################################################\n\ndeltas = {}\nratios = {}\n\ndef random_user(num_traits):\n user = {}\n for j in range(num_traits):\n user[j] = random.getrandbits(1)\n return user\n\n\ndef random_dataset(num_users):\n records = {}\n for i in range(num_users):\n records[i] = {}\n for j in range(num_traits):\n records[i][j] = random.getrandbits(1)\n return records\n\n\n# Generates a new dataset where no one has all traits specified\ndef pseudorandom_dataset(num_users, lst_trait_indces):\n records = {}\n for i in range(num_users):\n records[i] = {}\n for j in range(num_traits):\n records[i][j] = random.getrandbits(1)\n\n #Fix users who have all these traits\n while is_user_positive(records[i], lst_trait_indces):\n records[i] = random_user(num_traits)\n\n return records\n\n\n\ndef empty_dataset(num_users):\n records = {}\n for i in range(num_users):\n records[i] = {}\n for j in range(num_traits):\n records[i][j] = 0\n return records\n\n\ndef dataset_with_trait(negative_num, positive_num, lst_trait_indces):\n records = {}\n for k in range(positive_num):\n i = negative_num+k\n records[i] = {}\n for j in range(num_traits):\n records[i][j] = random.getrandbits(1)\n\n for j in lst_trait_indces: #Set specific traits to True\n records[i][j] = 1\n\n return records\n\n\ndef split_dataset(dataset):\n split_dataset = []\n for i in range(len(dataset)):\n # tmp_user_share_block = shares * [traits*[0]]\n #tmp_user_share_block = [[0 for i in range(num_traits)] for j in range(shares)]\n tmp_user_share_block = []\n for s in range(shares):\n tmp_user_share_block.append({})\n\n #Spliting loop\n for j in range(len(dataset[0])):\n for s in range(shares): #Initialization with 0's\n tmp_user_share_block[s][j] = 0\n\n trait = dataset[i][j]\n if trait == 0:\n share_indces = random.sample(range(shares), no_vote)\n # share_indces = [0]\n elif trait == 1:\n share_indces = random.sample(range(shares), yes_vote)\n # share_indces = [0,1]\n else:\n print(\"Warning: Trait not defined.\")\n\n for si in share_indces:\n tmp_user_share_block[si][j] = 1\n\n split_dataset += tmp_user_share_block.copy()\n #print(tmp_user_share_block)\n\n # print(split_users)\n random.shuffle(split_dataset) # Shuffle\n # print(split_users)\n return split_dataset\n\n\n##############################################################################\n\n\ndef is_user_positive(user, lst_trait_indces):\n for j in lst_trait_indces:\n if user[j] == 0:\n return False\n return True\n\n\ndef conditional_stat(dataset, lst_trait_indces=[]):\n count_A = 0.0\n count_B = 0.0\n\n all_but = lst_trait_indces[:-1]\n examined = lst_trait_indces[-1]\n\n for i in range(len(dataset)):\n has_all_traits = True\n for t in all_but:\n if dataset[i][t] != 1:\n has_all_traits = False\n\n if has_all_traits == True:\n if dataset[i][examined] == 1:\n count_A += 1\n else:\n count_B += 1\n\n print(\"A: \" + str(count_A))\n print(\"B: \" + str(count_B))\n return count_A / (count_A + count_B)\n\n\n\ndef trait_percent(dataset, lst_traits, num_shares=1.0):\n trait_ratios = []\n for t in lst_traits:\n tmp_sum = 0.0\n for i in range(len(dataset)): #For each user/ballot/split\n tmp_sum += dataset[i][t]\n\n #Find actual percentage\n if num_shares != 1:\n trait_ratios.append((tmp_sum - len(dataset)/num_shares)/(len(dataset)/num_shares)) #Remove the extra counts\n #trait_ratios.append(tmp_sum * num_shares / len(dataset) - 1.0)\n else:\n trait_ratios.append(tmp_sum/len(dataset))\n\n return trait_ratios\n\n\ndef correlation_stat(dataset, lst_traits):\n vectorA=[]\n vectorB=[]\n from scipy import stats\n import numpy\n\n for i in range(len(dataset)): #For each user/ballot/split\n vectorA.append(dataset[i][0])\n vectorB.append(dataset[i][1])\n\n corr = stats.pearsonr(vectorA,vectorB)\n #corr = stats.matthews_corrcoef(vectorA,vectorB)\n #corr = numpy.corrcoef(vectorA, vectorB)\n #print(stats.ttest_ind(vectorA, vectorB))\n\n print(numpy.cov(vectorA, vectorB))\n\n\n print(corr)\n print(corr[0]*corr[0])\n return corr\n\n\ndef association_rules(observation_vecs, candidate_vec, num_shares=1, num_X_and_Y=-1, num_X_and_not_Y=-1):\n ##Support for X\n if num_shares == 1:\n supportX = 0\n for i in range(len(observation_vecs[0])):\n all_match = True\n for vec in observation_vecs:\n if vec[i]!=1:\n all_match=False\n if all_match:\n supportX += 1\n supportX /= len(observation_vecs[0])\n else:\n supportX = (num_X_and_not_Y + num_X_and_Y)/ (len(observation_vecs[0])/num_shares)\n\n\n\n ##Support for Y\n if num_shares == 1:\n supportY = np.sum(candidate_vec)/len(candidate_vec)\n else:\n supportY = (3*np.sum(candidate_vec))/len(candidate_vec)-1\n\n\n # Support for (X and Y)\n if num_shares!=1:\n supportIntersect = num_X_and_Y/(len(observation_vecs[0])/num_shares)\n else:\n supportIntersect = 0\n for i in range(len(observation_vecs[0])):\n all_match = True\n for vec in observation_vecs:\n if vec[i]!=1:\n all_match=False\n\n if (all_match == True) and (candidate_vec[i] == 1):\n supportIntersect += 1\n supportIntersect /= len(candidate_vec)\n\n confidence = supportIntersect / supportX\n lift = (supportIntersect) / (supportX * supportY)\n conviction = 1\n #conviction = (1 - supportY) / (1 - confidence)\n\n return (supportX, supportY, supportIntersect, confidence, lift, conviction)\n\n\n'''\ndef association_rules_split(observation_vecs, candidate_vec, intersection_counts):\n supports = []\n for t in observation_vecs:\n #print(t)\n supports.append((np.sum(t)-len(t)/3)/(len(t)/3))\n\n support_cand = np.sum(candidate_vec)/(len(candidate_vec))\n\n supportIntersect = intersection_counts\n supportIntersect /= (len(candidate_vec)/3)\n confidence = supportIntersect/((np.prod(supports)))\n lift = (supportIntersect)/((np.prod(supports)*support_cand))\n conviction = (1-support_cand)/(1-confidence)\n\n return(supports, supportIntersect, confidence, lift, conviction)\n'''\n\n\ndef shift(seq, n):\n n = n % len(seq)\n return seq[n:] + seq[:n]\n\n\ndef find_patterns(shares):\n patterns = zip(*shares)\n #print(shares)\n #print(list(patterns))\n return patterns\n\ndef compute_expected_values(shares, num_traits):\n possible_combinations = list(itertools.product([0, 1], repeat=num_traits)) #AnB, AnnotB, notAnB, notAnnotB\n counts_patterns_encountered = dict.fromkeys(possible_combinations, 0) #For two traits this would be: 00,01,10,11 etc\n\n permutations = []\n #permutations.append([shares[0]])\n #print(shares)\n #for r in shares[1:]:\n for r in shares:\n tmp_perms = []\n for i in range(len(r)):\n tmp_perms.append(shift(r,-i))\n permutations.append(tmp_perms)\n #print(permutations)\n combinations = list(itertools.product(*permutations))\n #print(combinations)\n\n for l in combinations:\n patterns_founds=find_patterns(l)\n for p in patterns_founds:\n counts_patterns_encountered[p]+=1\n\n coefficients = np.array([])\n for key, value in counts_patterns_encountered.items():\n counts_patterns_encountered[key] = value/(3**num_traits) #Divide by the number of shares to get the expected values\n #print(counts_combinations_encountered[key])\n coefficients = np.append(coefficients, counts_patterns_encountered[key]) #Convert to numpy array\n return(coefficients)\n\n\ndef compute_T_inv(num_shares, num_traits):\n from numpy.linalg import inv\n T = []\n\n possible_variables_combinations = list(itertools.product([0, 1], repeat=num_traits)) #AnB, AnnotB, notAnB, notAnnotB\n\n for c in possible_variables_combinations:\n #print(c)\n tmp_shares = []\n for e in c:\n if e == 1:\n tmp_shares.append([1,1,0])\n elif e == 0:\n tmp_shares.append([1,0,0])\n #print(tmp_shares)\n tmp_exp = compute_expected_values(tmp_shares, num_traits)\n T.append(tmp_exp)\n\n T = np.matrix(T)\n #print(T)\n T_inv = inv(T)\n #print(T_inv)\n return T_inv\n\n\ndef all_patterns_count(dataset):\n possible_patterns = list(itertools.product([0, 1], repeat=len(dataset[0]))) #AnB, AnnotB, notAnB, notAnnotB\n counts = []\n for p in possible_patterns:\n counts.append(pattern_count(dataset,p))\n return(counts)\n\n\ndef pattern_count(dataset, pattern=[]):\n count = 0\n for i in range(len(dataset)):\n match = True\n for ind in range(len(dataset[0])):\n if dataset[i][ind]!=pattern[ind]:\n match=False\n if match:\n count+=1\n\n return count\n\n\ndef split_and_compute(ds):\n ds_split = split_dataset(ds)\n # print(ds_split)\n\n # conv_matrix = 1/3 * np.matrix([ [4.0, -2, -2, 1],\n # [-2, 4, 1, -2],\n # [-2, 1, 4, -2],\n # [1, -2, -2, 4]])\n # print(conv_matrix)\n\n\n # count_original_11 = pattern_count(ds, lst_traits, [1,1])\n # count_original_10 = pattern_count(ds, lst_traits, [1,0])\n # count_original_01 = pattern_count(ds, lst_traits, [0,1])\n # count_original_00 = pattern_count(ds, lst_traits, [0,0])\n\n\n # ratio_10 = count_original_10/nu\n # ratio_01 = count_original_01/nu\n # ratio_00 = count_original_00/nu\n\n # count_split_11 = pattern_count(ds_split, lst_traits, [1,1])\n # count_split_10 = pattern_count(ds_split, lst_traits, [1,0])\n # count_split_01 = pattern_count(ds_split, lst_traits, [0,1])\n # count_split_00 = pattern_count(ds_split, lst_traits, [0,0])\n\n\n counts_original = all_patterns_count(ds)\n counts_split = all_patterns_count(ds_split)\n return (ds_split, counts_original, counts_split)\n\n\n'''\ndef full_col_count(dataset, lst_trait_indces=[], lowerlimit = -1, upperlimit=-1):\n\n if lowerlimit == -1:\n lowerlimit = 0\n\n if upperlimit == -1:\n upperlimit = len(dataset)\n\n count = 0\n for i in range(lowerlimit, upperlimit):\n if is_user_positive(dataset[i],lst_trait_indces):\n count += 1\n\n return count# / len(dataset)\n # print(ratio)\n\n\n\ndef prob_no_column(traits):\n if traits == 1:\n print(\"Error: Traits <2.\")\n elif traits == 2:\n return (2.0/3.0)*(1.0/3.0)\n else:\n sum = 0\n for i in range(traits-2):\n sum += (((2.0/3.0)**(traits-2))/(2**i))\n\n return (prob_no_column(traits-1) + (sum * (2.0/3.0)*(1.0/3.0)))\n'''\n\n\n########################################################################\n\nnum_shares = shares\ngen_dataset = 1\n\n\nlst_of_proportions = ['0.1','0.3','0.5','0.7']\n\nlst_lst_traits = []\nlst_lst_traits.append(['F0', 'F2', 'F9'])\nlst_lst_traits.append(['F0','F2', 'F4'])\nlst_lst_traits.append(['F2','F4', 'F9'])\nlst_lst_traits.append(['F2','F4', 'F5'])\n\n\nfor index,lst_traits in enumerate(lst_lst_traits):\n items = lst_traits\n print(items)\n\n ds = {}\n raw_data = open('../datasets/synthetic/varying_traits/0.99_tsz10_tct100.0k.txt')\n for i, line in enumerate(raw_data):\n tmp_record = {}\n\n #Initialize\n for item in items:\n tmp_record[items.index(item)] = 0\n\n #Add elements\n for item in line.split(\" \"):\n if item in items:\n tmp_record[items.index(item)] = 1\n\n ds[i] = tmp_record\n nu = len(ds)\n num_traits = len(ds[0])\n #print(nu)\n raw_data.close()\n\n #print(ds)\n\n\n print(\"\\n\\n====================== Experiment Params ==================================\")\n print(\"Shares : \" + str(shares))\n print(\"Users : \" + str(nu))\n # print(\"Positive Users : \" + str(positive_nu))\n # print(\"Negative Users : \" + str(negative_nu))\n # print(\"Ratio : \" + str(ratio))\n print(\"Traits : \" + str(lst_traits))\n print(\"===========================================================================\\n\")\n\n # print(\"====================== Dataset Details ====================================\")\n # print(\"Original Dataset Size : \" + str(len(ds)))\n # print(\"Split Dataset Size : \" + str(len(ds_split)))\n # print(\"===========================================================================\\n\\n\")\n\n\n for iter in range(100): #Experiment repeat\n (ds_split, counts_original, counts_split) = split_and_compute(ds)\n\n\n counts_split_vec = np.array(counts_split).reshape((-1, 1)) #Transpose\n #print(counts_split_vec)\n #print(counts_original)\n #print(counts_split)\n #counts_split = list(reversed(counts_split))\n #print(counts_split)\n\n conv_matrix = compute_T_inv(num_shares, num_traits)\n #print(\"---------\")\n #print(conv_matrix)\n estimates = (conv_matrix*counts_split_vec).tolist()\n #print(\"---------\")\n #print(estimates)\n #print(\"---------\")\n estimate_num_X_and_Y = estimates[2**num_traits-1][0]\n estimate_ratio_X_and_Y = estimates[2**num_traits-1][0]/nu\n\n estimate_num_X_and_not_Y = estimates[2**num_traits-2][0]\n estimate_ratio_X_and_not_Y = estimates[2**num_traits-2][0]/nu\n\n\n ################## GroundTruth\n observations = [[] for t in range(num_traits-1)]\n for i in range(nu): # For each user\n for t in range(num_traits-1):\n observations[t].append(ds[i][t])\n\n cand_vect =[]\n for i in range(nu):\n cand_vect.append(ds[i][num_traits-1])\n\n rules_orig = association_rules(observations, cand_vect, num_shares=1)\n\n\n ################## Evaluation\n observations = [[] for t in range(num_traits-1)]\n for i in range(nu*num_shares): # For each split\n for t in range(num_traits-1):\n observations[t].append(ds_split[i][t])\n\n cand_vect =[]\n for i in range(nu*num_shares):\n cand_vect.append(ds_split[i][num_traits-1])\n\n rules_split = association_rules(observations, cand_vect, num_shares=num_shares, num_X_and_not_Y=estimate_num_X_and_not_Y , num_X_and_Y=estimate_num_X_and_Y)\n\n support_err = ((abs(rules_split[2]-rules_orig[2])/float(rules_orig[2]))*100)\n confidence_err = ((abs(rules_split[3]-rules_orig[3])/float(rules_orig[3]))*100)\n lift_err = ((abs(rules_split[4]-rules_orig[4])/float(rules_orig[4]))*100)\n\n print(\"Iteration: \" + str(iter))\n print(\"Support Inters.(Percent Error) : \" + str(support_err) + \"%\")\n print(\"Confidence (Percent Error) : \" + str(confidence_err) + \"%\")\n print(\"Lift (Percent Error) : \" + str(lift_err) + \"%\")\n with open(\"../results//num_of_traits//\" + '-'.join(lst_traits)+\"_3.csv\", \"a+\") as results:\n print(','.join([str(counts_original[-1]), str(iter), str(support_err), str(confidence_err), str(lift_err)]))\n results.write(','.join([str(counts_original[-1]), lst_of_proportions[index], '-'.join(lst_traits), str(iter), str(support_err), str(confidence_err), str(lift_err)])+\"\\n\")\n\n ##################\n #trait_percent_original = trait_percent(ds, lst_traits, 1)\n #trait_percent_split = trait_percent(ds_split, lst_traits, num_shares)\n #strait_corr = correlation_stat(ds, lst_traits)\n #trait_corr = correlation_stat(ds_split, lst_traits)\n\n '''\n print(\"========================= Intermediate Values =============================\")\n #print(\"Ratio : \" + str(ratio))\n print(\"Ratio Estimate : \" + str(estimate_ratio_X_and_Y))\n print(\"Groundtruth : \" + str(counts_original[-1]))\n print(\"Estimate : \" + str(estimate_num_X_and_Y))\n print(\"Percent ratio ) : \" + str((float((abs(estimate_num_X_and_Y-counts_original[-1]))/counts_original[-1])*100)) + \"%\")\n print(\" \")\n \n \n \n print(\"========================= Results =========================================\")\n print(\"Original Dataset\")\n print(\"SupportX : \" + str(rules_orig[0]))\n print(\"SupportY : \" + str(rules_orig[1]))\n print(\"Support Intersection : \" + str(rules_orig[2]))\n print(\"Confidence : \" + str(rules_orig[3]))\n print(\"Lift : \" + str(rules_orig[4]))\n #print(\"Conviction : \" + str(rules_orig[5]))\n print(\"------\")\n print(\"Split Dataset\")\n print(\"SupportX : \" + str(rules_split[0]))\n print(\"SupportY : \" + str(rules_split[1]))\n print(\"Support Intersection : \" + str(rules_split[2]))\n print(\"Confidence : \" + str(rules_split[3]))\n print(\"Lift : \" + str(rules_split[4]))\n #print(\"Conviction : \" + str(rules_split[5]))\n print(\"------\")\n print(\"SupportX (Percent Error) : \" + str((abs(rules_split[0]-rules_orig[0])/rules_orig[0])*100) + \"%\")\n print(\"SupportY (Percent Error) : \" + str((abs(rules_split[1]-rules_orig[1])/rules_orig[1])*100) + \"%\")\n print(\"Support Inters.(Percent Error) : \" + str((abs(rules_split[2]-rules_orig[2])/rules_orig[2])*100) + \"%\")\n print(\"Confidence (Percent Error) : \" + str((abs(rules_split[3]-rules_orig[3])/rules_orig[3])*100) + \"%\")\n print(\"Lift (Percent Error) : \" + str((abs(rules_split[4]-rules_orig[4])/rules_orig[4])*100) + \"%\")\n #print(\"Conviction (Percent Error) : \" + str((abs(rules_split[5]-rules_orig[5])/rules_orig[5])*100) + \"%\")\n \n #print(\"Estimated Positives : \" + str(expected_PI_positives))\n #print(\"Estimated Negatives : \" + str(expected_PI_negatives))\n #print(\"Percent ratio (True Est.) : \" + str(math.ceil(float((abs(estimated_PI_ratio-ratio))/ratio)*100)) + \"%\")\n #print(\"Error Percent (True Est.) : \" + str(math.ceil(float((abs(estimated_PI_ratio-ratio)))*100)) + \"%\")\n print(\"===========================================================================\\n\\n\")\n '''\n\n\n\n '''\n print(\"10 Ratio : \" + str(ratio_10))\n print(\"10 Ratio Estimate : \" + str(estimate_ratio_10))\n print(\"Groundtruth #10 : \" + str(ratio_10*nu))\n print(\"Estimate 10 : \" + str(estimate_num_10))\n print(\"Percent ratio (10) : \" + str((float((abs(estimate_ratio_10-ratio_10))/ratio_10)*100)) + \"%\")\n print(\" \")\n print(\"01 Ratio : \" + str(ratio_01))\n print(\"01 Ratio Estimate : \" + str(estimate_ratio_01))\n print(\"Groundtruth #01 : \" + str(ratio_01*nu))\n print(\"Estimate 01 : \" + str(estimate_num_01))\n print(\"Percent ratio (01) : \" + str((float((abs(estimate_ratio_01-ratio_01))/ratio_01)*100)) + \"%\")\n print(\" \")\n print(\"00 Ratio : \" + str(ratio_00))\n print(\"00 Ratio Estimate : \" + str(estimate_ratio_00))\n print(\"Groundtruth #00 : \" + str(ratio_00*nu))\n print(\"Estimate 00 : \" + str(estimate_num_00))\n print(\"Percent ratio (00) : \" + str((float((abs(estimate_ratio_00-ratio_00))/ratio_00)*100)) + \"%\")\n print(\" \")\n #print(\"If independent uniformly distr. events: \" + str(0.5**len(lst_traits)))\n #print(\"Positives Orig : \" + str(count_original))\n #print(\"Total Columns in split dataset : \" + str(count_split))\n #print(\"Positives Estimated : \" + str(estimate_positive_nu_PI))\n #print(\"Percent ratio (Total) : \" + str(math.ceil(float(abs(count_split-expected_count_PI))/float(count_split)*100)) + \"%\")\n \n print(\"\")\n #print(\"Count Total : \" + str(count_positives+count_negatives))\n #print(\"Count Positives : \" + str(count_positives))\n #print(\"Expected Count Positives : \" + str(expected_count_PI_positives))\n #print(\"Percent ratio (Positives) : \" + str(math.ceil(float(abs(count_positives-expected_count_PI_positives))/float(count_positives)*100)) + \"%\")\n print(\"\")\n #print(\"Count Negatives : \" + str(count_negatives))\n #print(\"Expected Count Negatives : \" + str(expected_count_PI_negatives))\n #print(\"Percent ratio (Negatives) : \" + str(math.ceil(float(abs(count_negatives-expected_count_PI_negatives))/float(count_negatives)*100)) + \"%\")\n \n print(\"\")\n #print(\"Pr[ 1 column | negative] : \" + str(pr_negatives_one_column))\n #print(\"Pr[zero columns | positive] : \" + str(pr_positive_zero_columns))\n #print(\"Pr[only 1 column | positive] : \" + str(pr_positive_only_one_column))\n #print(\"Pr[two columns | positive] : \" + str(pr_positive_two_columns))\n #print(\"Pr[one or two columns | positive] : \" + str(pr_positive_two_columns+pr_positive_only_one_column))\n #print(\"Pr[one or (2X) two columns | positive] : \" + str(2*pr_positive_two_columns+pr_positive_only_one_column))\n #print(\"Pr[positive at least 1 column] old : \" + str(pr_positive_1st_column+pr_positive_2nd_column*pr_positive_1st_column))\n #print(\"Pr[negative 1 column] : \" + str(pr_negatives_one_column))\n #print(\"\\# of negative patients forming col.] : \" + str(pr_negative_column*nu)\n '''\n\n print(\"===========================================================================\\n\\n\")\n\n\n\n","sub_path":"ThreeBallot/exp_accuracy_numtraits/accuracy_numtraits_3.py","file_name":"accuracy_numtraits_3.py","file_ext":"py","file_size_in_byte":22890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"73110471","text":"\"\"\"\n\nPerform basic benchmarking to see how changes\nimpact performance.\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom ftperiodogram.modeler import FastTemplatePeriodogram\nfrom ftperiodogram.template import Template\nfrom ftperiodogram.utils import weights \n\nimport ftperiodogram.core as pdg\nimport ftperiodogram.pseudo_poly as ppol\nimport ftperiodogram.summations as tsums\n\nimport pstats\nfrom time import time as time\nimport cProfile\n\nrand = np.random.RandomState(42)\n\ndef get_data(n=30, timebase=1):\n t = timebase * np.sort(rand.rand(n))\n y = rand.randn(n)\n y_err = np.ones_like(y)\n\n return t, y, y_err\n\ndef get_template(nh=3):\n cn = 2 * rand.rand(nh) - 1\n sn = 2 * rand.rand(nh) - 1\n\n return Template(cn, sn)\n\ndef get_modeler(ndata, nh, precompute=True):\n\n t, y, yerr = get_data(n=ndata)\n template = get_template(nh=nh)\n if precompute:\n template.precompute()\n\n modeler = FastTemplatePeriodogram(template=template)\n modeler.fit(t, y, yerr)\n\n return modeler\n\ndef wrap_timer(func, nfreqs=None, name=None):\n if name is None:\n name = func.__name__\n def wfunc(*args, **kwargs):\n t0 = time()\n rvals = func(*args, **kwargs)\n dt = time() - t0\n ftext = \"\" if nfreqs is None else \"(%.3e s / freq)\"%(dt/nfreqs)\n print(\" %-30s: %.3e s %s\"%(name, dt, ftext))\n return rvals\n return wfunc\n\ndef wrap_timer_avg(func, name=None):\n def wfunc(agen, kwgen, ngen, name=name):\n dts = []\n rvals = []\n name = func.__name__ if name is None else name\n for i in range(ngen):\n args = agen(i)\n kwargs = kwgen(i)\n\n t0 = time()\n rvals.append(func(*args, **kwargs))\n dts.append(time() - t0)\n\n print(\" %-50s: %.3e s (%.3e +/- %.3e avg)\"%(name, sum(dts), np.mean(dts), np.std(dts)))\n return rvals\n return wfunc\n\n\ndef detailed_timing(mod):\n w = weights(mod.dy)\n freqs = mod.autofrequency()\n nh = len(mod.template.c_n)\n ptensors = mod.template.ptensors\n\n direct_summations = wrap_timer(tsums.direct_summations, nfreqs=len(freqs))\n fast_summations = wrap_timer(tsums.fast_summations, nfreqs=len(freqs))\n get_final_ppoly = wrap_timer_avg(ppol.get_final_ppoly)\n get_final_ppoly_components = wrap_timer_avg(ppol.get_final_ppoly_components)\n get_final_roots_faster = wrap_timer_avg(ppol.get_final_roots_faster)\n compute_zeros = wrap_timer_avg(ppol.compute_zeros)\n compute_zeros_multifrequency = wrap_timer(ppol.compute_zeros_multifrequency, nfreqs=len(freqs))\n autopower = wrap_timer(mod.autopower, nfreqs=len(freqs))\n\n\n # direct_summations\n #dsums = direct_summations(mod.t, mod.y, w, freqs, nh)\n\n # fast summations\n fsums = fast_summations(mod.t, mod.y, w, freqs, nh)\n\n # compute_zeros\n agen = lambda i, fsums=fsums, ptensors=ptensors : [ ptensors, fsums[i] ]\n kgen = lambda i : { }\n zeros = compute_zeros(agen, kgen, len(freqs))\n zeros = compute_zeros_multifrequency(ptensors, fsums)\n\n # get_final_ppoly (coefficients)\n pps = get_final_ppoly(agen, kgen, len(freqs))\n\n # real_roots_pm (finding roots from coefficients)\n func = lambda pp : pp.real_roots_pm()\n \n real_roots_pm = wrap_timer_avg(func, name='real_roots_pm')\n \n agenrr = lambda i, pps=pps : [ pps[i] ]\n kgenrr = lambda i : {}\n\n real_roots_pm(agenrr, kgenrr, len(freqs))\n\n print(\"alternative root finding (bypasses PseudoPolynomial)\")\n print(\"----------------------------------------------------\")\n # get_final_ppoly_components (coefficients, no PP)\n ps_and_qs = get_final_ppoly_components(agen, kgen, len(freqs))\n agenrf = lambda i, paq=ps_and_qs : paq[i]\n\n # get final roots faster (bypasses the PseudoPolynomial module)\n roots = get_final_roots_faster(agenrf, kgen, len(freqs))\n\n print(\"----------------------------------------------------\")\n # modeler.autopower()\n frqs, powers = autopower(maximum_frequency=1000)\n\n\ndef autopower_chooser(nfreqs, max_frequency=None, name_prefix=\"\"):\n name_suffix = \"\" if max_frequency is None else \" (max_freq=%.1f)\"%(max_frequency)\n name = \"%s : autopower%s\"%(name_prefix, name_suffix)\n\n func = mod.autopower if max_frequency is None \\\n else lambda : mod.autopower(maximum_frequency=max_frequency)\n\n return wrap_timer(func, nfreqs=nfreqs, name=name)\n\n\nif __name__ == '__main__':\n for max_freq in [ None, 3000 ]:\n for nh in [ 1, 5 ]:\n print(\"Nharmonics = %d\"%(nh))\n print(\" Ndata\")\n print(\" -----\")\n for ndata in [ 50, 100, 200 ]:\n mod = get_modeler(ndata, nh)\n\n nfreqs = len(mod.autofrequency(maximum_frequency=max_freq))\n autopower = autopower_chooser(nfreqs, max_freq, \" %-3d\"%(ndata)) \n frqs, powers = autopower()\n\n","sub_path":"timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"527833422","text":"# --- Color Operations --------------------------------------\n\n# Return the median between A and B\ndef meanCol(A, B):\n C = []\n for i in range(len(A)):\n C.append(0)\n if A[i] > B[i]:\n C[i] = B[i] + ((A[i] - B[i]) / 2)\n elif A[i] < B[i]:\n C[i] = A[i] + ((B[i] - A[i]) / 2)\n else:\n C[i] = A[i]\n return (C)\n\n\n# Return A*B\ndef multCol(A, B):\n C = []\n for i in range(len(A)):\n C.append(0)\n C[i] = A[i] * B[i]\n return (C)\n\n\n# Return A+B\ndef addCol(A, B):\n C = []\n for i in range(len(A)):\n C.append(0)\n C[i] = A[i] + B[i]\n return (C)\n\n\n# Return A minus B\ndef minusCol(A, B):\n C = []\n for i in range(len(A)):\n C.append(0)\n C[i] = A[i] - B[i]\n return (C)\n\n\n# Return a color value blend from A to B\n# C is the percentage of the blending, 1=100%\ndef linearCol(A, B, C):\n D = []\n for i in range(len(A)):\n D.append(0)\n\n D[i] = A[i] + ((B[i] - A[i]) * C[i])\n\n return (D)\n\n\n# Transfer hue and saturation from B to A\ndef hueSatCol(A, B):\n import colorsys\n\n hsvA = colorsys.rgb_to_hsv(A[0], A[1], A[2])\n hsvB = colorsys.rgb_to_hsv(B[0], B[1], B[2])\n\n rgbB = colorsys.hsv_to_rgb(hsvB[0], hsvB[1], hsvA[2])\n\n return (rgbB)\n\n\n# Return the median color of a list of colors (only works in RGB)\ndef contrastCol(list):\n R = []\n G = []\n B = []\n\n for col in list:\n R.append(col[0])\n G.append(col[1])\n B.append(col[2])\n\n contrastPointR = sum(R) / float(len(R))\n contrastPointG = sum(G) / float(len(G))\n contrastPointB = sum(B) / float(len(B))\n\n valRGB = [contrastPointR, contrastPointG, contrastPointB]\n return (valRGB)\n\n\n# Transfer Brightness from B to A\ndef brightnessCol(A, b):\n import colorsys\n\n hsvA = colorsys.rgb_to_hsv(A[0], A[1], A[2])\n rgbOut = colorsys.hsv_to_rgb(hsvA[0], hsvA[1], (hsvA[2] + b))\n return (rgbOut)\n\n\n# Add b in saturation to A color\ndef saturationCol(A, b):\n import colorsys\n\n hsvA = colorsys.rgb_to_hsv(A[0], A[1], A[2])\n rgbOut = colorsys.hsv_to_rgb(hsvA[0], (hsvA[1] + b), hsvA[2])\n return (rgbOut)","sub_path":"Maya/Snippets/ColorSnippets.py","file_name":"ColorSnippets.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"13080644","text":"from math import radians, cos, sin, asin, sqrt\nfrom prioqueue import PriorityQueue\n\ndef haversine(node1, node2):\n # convert decimal degrees to radians \n R = 6372.8 # Dalam km\n\n dLat = radians(node2.latitude - node1.latitude)\n dLon = radians(node2.longitude - node1.longitude)\n lat1 = radians(node1.latitude)\n lat2 = radians(node2.latitude)\n\n a = sin(dLat/2)**2 + cos(lat1)*cos(lat2)*sin(dLon/2)**2\n c = 2*asin(sqrt(a))\n\n return R * c\nclass Astar:\n def __init__(self, graph, start, goal):\n self.graph = graph\n self.start = start\n self.goal = goal\n self.frontier = PriorityQueue()\n self.came_from = {self.start: None}\n self.total_cost = {self.start: 0}\n\n def solve(self):\n self.start.f = 0 + haversine(self.start, self.goal)\n self.frontier.insert(self.start)\n while not self.frontier.empty():\n current = self.frontier.pop()\n if current == self.goal:\n break\n \n for neighbour in current.neighbour:\n new_cost = self.total_cost[current] + haversine(current, neighbour)\n if neighbour not in self.total_cost or new_cost < self.total_cost[neighbour]:\n self.total_cost[neighbour] = new_cost\n neighbour.f = new_cost + haversine(neighbour, self.goal)\n self.frontier.insert(neighbour)\n self.came_from[neighbour] = current\n\n if (self.goal not in self.came_from):\n self.came_from = {}\n\n return self.came_from, self.total_cost\n \n def get_path(self):\n current = self.goal\n path = [current]\n while current != self.start:\n current = self.came_from[current]\n path.append(current)\n\n path.reverse()\n return path\n","sub_path":"src/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"343636141","text":"# -*- coding: utf-8 -*-\nimport re\nfrom lxml import etree\n\nfrom osf.models import Guid, OSFUser\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nNAMESPACES = {\n 'default': 'http://www.tei-c.org/ns/1.0',\n 'xml': 'http://www.w3.org/XML/1998/namespace',\n 'xi': 'http://www.w3.org/2001/XInclude',\n}\n\nclass NotModifiedException(Exception):\n pass\n\n\nclass Annotator:\n def __init__(self):\n self.__xml = \"\"\n self.__json = {}\n self.__annotator_xml_id = \"\"\n\n self.__start = 0\n self.__end = 0\n self.__fragment_to_annotate = \"\"\n self.__tags = {}\n self.__annotators_xml_ids = []\n self.__first_free_certainty_number = 0\n\n self.__fragment_annotated = \"\"\n self.__certainty_to_add = None\n self.__annotator_to_add = None\n\n self.__xml_annotated = \"\"\n\n def add_annotation(self, xml, json, annotator_guid):\n self.__xml = xml\n self.__annotator_xml_id = 'person' + annotator_guid\n\n self.__json = self.__validate_request(json)\n\n self.__get_data_from_xml()\n self.__prepare_xml_parts()\n self.__check_if_new_elements_already_exist()\n self.__create_new_xml()\n\n return self.__xml_annotated\n\n def __validate_request(self, json):\n position_params_v1 = [\n 'start_row',\n 'start_col',\n 'end_row',\n 'end_col',\n ]\n\n position_params_v2 = [\n 'start_pos',\n 'end_pos',\n ]\n\n optional_params = [\n 'category',\n 'locus',\n 'certainty',\n 'asserted_value',\n 'description',\n 'tag',\n 'attribute_name',\n ]\n\n position_v1 = all(elements in json.keys() for elements in position_params_v1)\n position_v2 = all(elements in json.keys() for elements in position_params_v2)\n\n if not (position_v1 or position_v2):\n raise ValueError(\"No position arguments in request.\")\n\n positions_to_check = position_params_v1 if position_v1 else position_params_v2\n\n for position in positions_to_check:\n if not isinstance(json[position], (int, long)):\n raise TypeError(\"Value of '{}' is not a integer.\".format(position))\n\n if json[position] <= 0:\n raise ValueError(\"Value of '{}' must be a positive number.\".format(position))\n\n validated_json = {}\n\n if position_v1:\n start, end = self.__get_fragment_position(self.__xml, json)\n\n validated_json.update({'start_pos': start, 'end_pos': end})\n else:\n validated_json.update({'start_pos': json['start_pos'], 'end_pos': json['end_pos']})\n\n if validated_json['start_pos'] >= validated_json['end_pos']:\n raise ValueError(\"Start position of annotating fragment is greater or equal to end position.\")\n\n for param in optional_params:\n if param in json and json[param] is not None:\n validated_json.update({param: json[param]})\n else:\n validated_json.update({param: ''})\n\n return validated_json\n\n def __get_data_from_xml(self):\n self.__start, self.__end = self.__get_fragment_position(self.__xml, self.__json)\n self.__start, self.__end = self.__get_fragment_position_without_adhering_tags(self.__xml, self.__start, self.__end)\n\n self.__start, self.__end = self.__get_fragment_position_with_adhering_tags(self.__xml, self.__start, self.__end)\n self.__fragment_to_annotate = self.__xml[self.__start: self.__end]\n\n self.__tags = self.__get_adhering_tags_from_annotated_fragment(self.__fragment_to_annotate)\n self.__annotators_xml_ids = self.__get_annotators_xml_ids_from_file(self.__xml)\n certainties = self.__get_certainties_from_file(self.__xml)\n self.__first_free_certainty_number = self.__get_first_free_certainty_number(certainties, self.__json[\"tag\"])\n\n def __get_fragment_position(self, xml, json):\n if 'start_pos' in json and json['start_pos'] is not None and 'end_pos' in json and json['end_pos'] is not None:\n start = json['start_pos']\n end = json['end_pos']\n\n else:\n start, end = self.__convert_rows_and_cols_to_start_and_end(xml, json[\"start_row\"], json[\"start_col\"],\n json[\"end_row\"], json[\"end_col\"])\n\n return start, end\n\n def __convert_rows_and_cols_to_start_and_end(self, text, start_row, start_col, end_row, end_col):\n text_in_lines = text.splitlines(True)\n\n chars_to_start = 0\n chars_to_end = 0\n\n i = 0\n while i + 1 < start_row:\n chars_to_start += len(text_in_lines[i])\n i += 1\n\n chars_to_start += start_col - 1\n\n j = 0\n while j + 1 < end_row:\n chars_to_end += len(text_in_lines[j])\n j += 1\n\n chars_to_end += end_col\n\n return chars_to_start, chars_to_end\n\n def __get_fragment_position_without_adhering_tags(self, string, start, end):\n found_tag = True\n\n while found_tag:\n found_tag = False\n\n marked_fragment = string[start:end]\n\n match = re.search(r'^\\s*<[^<>]*?>\\s*', marked_fragment)\n if match is not None:\n tag_open = match.group()\n start += len(tag_open)\n found_tag = True\n\n match = re.search(r'\\s*<[^<>]*?>\\s*$', marked_fragment)\n if match is not None:\n tag_close = match.group()\n end -= len(tag_close)\n found_tag = True\n\n return start, end\n\n def __get_fragment_position_with_adhering_tags(self, string, start, end):\n found_tag = True\n\n while found_tag:\n found_tag = False\n\n text_before = string[:start]\n text_after = string[end:]\n\n match = re.search(r'<[^<>]*?>\\s*?$', text_before)\n if match is not None:\n tag_open = match.group()\n start -= len(tag_open)\n found_tag = True\n\n match = re.search(r'^\\s*?<[^<>]*?>', text_after)\n if match is not None:\n tag_close = match.group()\n end += len(tag_close)\n found_tag = True\n\n return start, end\n\n def __get_adhering_tags_from_annotated_fragment(self, fragment):\n tags = {}\n\n while re.search(r'^\\s*?<[^<>]*?>', fragment):\n match = re.search(r'^\\s*?<[^<>]*?>', fragment)\n\n tag_raw = match.group()\n tag = tag_raw.strip()\n tag_name = tag\n\n marks_to_remove = ['', '>']\n\n for mark in marks_to_remove:\n tag_name = tag_name.replace(mark, '')\n\n tag_name = tag_name.split(' ')[0]\n\n tag_to_add = {tag_name: {}}\n\n arguments = re.findall(r'[\\w:]+=\".*?\"', tag)\n\n for argument in arguments:\n arg_name = re.search(r'[\\w:]+=\"', argument)\n arg_name = arg_name.group()\n arg_name = arg_name.replace('=\"', '')\n\n arg_value = re.search(r'\".*?\"', argument)\n arg_value = arg_value.group()\n arg_value = arg_value.replace('\"', '')\n\n tag_to_add[tag_name].update({arg_name: arg_value})\n\n tags.update(tag_to_add)\n fragment = fragment[len(tag_raw):]\n\n return tags\n\n def __get_certainties_from_file(self, text):\n text_in_lines = text.splitlines()\n\n if 'encoding=' in text_in_lines[0]:\n text_to_parse = '\\n'.join(text_in_lines[1:])\n else:\n text_to_parse = text\n\n tree = etree.fromstring(text_to_parse)\n\n certainties = tree.xpath('//default:teiHeader'\n '//default:classCode[@scheme=\"http://providedh.eu/uncertainty/ns/1.0\"]'\n '/default:certainty', namespaces=NAMESPACES)\n\n return certainties\n\n def __get_annotators_xml_ids_from_file(self, text):\n text_in_lines = text.splitlines()\n\n if 'encoding=' in text_in_lines[0]:\n text_to_parse = '\\n'.join(text_in_lines[1:])\n else:\n text_to_parse = text\n\n tree = etree.fromstring(text_to_parse)\n\n annotators = tree.xpath('//default:teiHeader'\n '//default:listPerson[@type=\"PROVIDEDH Annotators\"]'\n '/default:person', namespaces=NAMESPACES)\n\n xml_ids = []\n for annotator in annotators:\n prefix = '{%s}' % NAMESPACES['xml']\n xml_id = annotator.get(prefix + 'id')\n\n xml_ids.append(xml_id)\n\n return xml_ids\n\n def __get_first_free_certainty_number(self, certainties, tag):\n if not tag:\n tag = 'ab'\n\n biggest_number = 0\n\n for certainty in certainties:\n id_value = certainty.attrib['target']\n\n if tag not in id_value:\n continue\n\n id_value = id_value.strip()\n\n split_values = id_value.split(' ')\n for value in split_values:\n number = value.replace('#' + tag, '')\n number = int(number)\n\n if number > biggest_number:\n biggest_number = number\n\n return biggest_number + 1\n\n def __prepare_xml_parts(self):\n # 1.Add tag to text\n if self.__json['locus'] == '' and self.__json['tag'] != '' and self.__json['attribute_name'] == '':\n self.__fragment_annotated, _ = self.__add_tag(self.__fragment_to_annotate, self.__json[\"tag\"])\n\n # 2.Add certainty without tag to text\n elif self.__json['locus'] == 'value' and self.__json['tag'] == '' and self.__json['attribute_name'] == '':\n self.__fragment_annotated, annotation_ids = self.__add_tag(self.__fragment_to_annotate, 'ab',\n uncertainty=True)\n self.__certainty_to_add = self.__create_certainty_description_for_value_or_name(self.__json, annotation_ids,\n self.__annotator_xml_id)\n self.__annotator_to_add = self.__create_annotator(self.__annotator_xml_id)\n\n # 3.Add certainty with tag to text\n elif self.__json['locus'] == 'value' and self.__json['tag'] != '' and self.__json['attribute_name'] == '':\n self.__fragment_annotated, annotation_ids = self.__add_tag(self.__fragment_to_annotate, self.__json[\"tag\"],\n uncertainty=True)\n self.__certainty_to_add = self.__create_certainty_description_for_value_or_name(self.__json, annotation_ids,\n self.__annotator_xml_id)\n self.__annotator_to_add = self.__create_annotator(self.__annotator_xml_id)\n\n # 4.Add certainty to tag\n elif self.__json['locus'] == 'name' and self.__json['tag'] != '' and self.__json['attribute_name'] == '':\n self.__fragment_annotated, annotation_ids = self.__add_tag(self.__fragment_to_annotate, self.__json[\"tag\"],\n uncertainty=True)\n self.__certainty_to_add = self.__create_certainty_description_for_value_or_name(self.__json, annotation_ids,\n self.__annotator_xml_id)\n self.__annotator_to_add = self.__create_annotator(self.__annotator_xml_id)\n\n if self.__json['tag'] not in self.__tags and self.__json['asserted_value'] != '':\n raise ValueError(\"You can't add asserted value for tag name when you creating new tag.\")\n\n # 5.Add reference to tag\n elif self.__json['locus'] == 'attribute' and self.__json['tag'] != '' and \\\n self.__json['attribute_name'] == 'sameAs' and self.__json['asserted_value'] != '':\n self.__fragment_annotated, annotation_ids = self.__add_tag(self.__fragment_to_annotate, self.__json[\"tag\"],\n uncertainty=True)\n self.__certainty_to_add = self.__create_certainty_description_for_attribute(self.__json, annotation_ids,\n self.__annotator_xml_id)\n self.__annotator_to_add = self.__create_annotator(self.__annotator_xml_id)\n\n # 6.Add attribute to tag\n elif self.__json['locus'] == 'attribute' and self.__json['tag'] != '' and \\\n self.__json['attribute_name'] != '' and self.__json['asserted_value'] != '':\n self.__fragment_annotated, annotation_ids = self.__add_tag(self.__fragment_to_annotate, self.__json[\"tag\"],\n uncertainty=True)\n self.__certainty_to_add = self.__create_certainty_description_for_attribute(self.__json, annotation_ids,\n self.__annotator_xml_id)\n self.__annotator_to_add = self.__create_annotator(self.__annotator_xml_id)\n\n else:\n raise ValueError(\"There is no method to modify xml according to given parameters.\")\n\n def __add_tag(self, annotated_fragment, tag, uncertainty=False):\n new_annotated_fragment = ''\n annotation_ids = []\n\n while len(annotated_fragment) > 0:\n # handle xml tag\n if re.search(r'^\\s*?<[^<>]*?>', annotated_fragment):\n match = re.search(r'^\\s*?<[^<>]*?>', annotated_fragment)\n tag_to_move = match.group()\n\n end_tag = '\\s]+', tag_to_move)\n tag_begin = match.group()\n\n if 'xml:id=\"' not in tag_to_move:\n id = '{0}{1:06d}'.format(tag, self.__first_free_certainty_number)\n attribute = ' xml:id=\"{0}\"'.format(id)\n\n annotation_ids.append('#' + id)\n\n new_tag_to_move = \"{0}{1}{2}\".format(tag_to_move[:len(tag_begin)], attribute,\n tag_to_move[len(tag_begin):])\n\n self.__first_free_certainty_number += 1\n else:\n match = re.search(r'xml:id=\".*?\"', tag_to_move)\n existing_id = match.group()\n existing_id = existing_id.replace('xml:id=\"', '')\n existing_id = existing_id.replace('\"', '')\n\n annotation_ids.append('#' + existing_id)\n new_tag_to_move = tag_to_move\n\n new_annotated_fragment += new_tag_to_move\n else:\n new_annotated_fragment += tag_to_move\n\n annotated_fragment = annotated_fragment[len(tag_to_move):]\n\n # handle text\n else:\n match = re.search(r'^\\s*[^<>]+', annotated_fragment)\n text_to_move = match.group()\n\n if tag in self.__tags:\n new_annotated_fragment += text_to_move\n\n annotated_fragment = annotated_fragment[len(text_to_move):]\n\n else:\n attribute = \"\"\n\n if uncertainty:\n id = '{0}{1:06d}'.format(tag, self.__first_free_certainty_number)\n attribute = ' xml:id=\"{0}\"'.format(id)\n\n annotation_ids.append('#' + id)\n\n tag_open = '<{0}{1}>'.format(tag, attribute)\n tag_close = ''.format(tag)\n\n new_annotated_fragment += tag_open + text_to_move + tag_close\n\n annotated_fragment = annotated_fragment[len(text_to_move):]\n\n if uncertainty:\n self.__first_free_certainty_number += 1\n\n return new_annotated_fragment, annotation_ids\n\n def __create_certainty_description_for_value_or_name(self, json, annotation_ids, user_uuid):\n target = u\" \".join(annotation_ids)\n\n certainty = u''.format(json['category'], json['locus'], json['certainty'], user_uuid, target)\n\n new_element = etree.fromstring(certainty)\n\n if json[\"asserted_value\"]:\n new_element.set('assertedValue', json[\"asserted_value\"])\n\n if json[\"description\"]:\n description = etree.Element(\"desc\")\n description.text = json[\"description\"]\n\n new_element.append(description)\n\n return new_element\n\n def __create_certainty_description_for_attribute(self, json, annotation_ids, user_uuid):\n target = u\" \".join(annotation_ids)\n\n certainty = u''.format(json['category'], json['attribute_name'], json['certainty'],\n user_uuid, target, json['asserted_value'])\n\n new_element = etree.fromstring(certainty)\n\n if json[\"description\"]:\n description = etree.Element(\"desc\")\n description.text = json[\"description\"]\n\n new_element.append(description)\n\n return new_element\n\n def __create_annotator(self, user_xml_id):\n user_guid = user_xml_id.replace('person', '')\n\n annotator_data = self.__get_user_data_from_db(user_guid)\n\n annotator = u\"\"\"\n \n \n {1}\n {2}\n {3}\n \n {4}\n \n \"\"\".format(user_xml_id, annotator_data['forename'], annotator_data['surname'], annotator_data['email'],\n annotator_data['link'])\n\n annotator_xml = etree.fromstring(annotator)\n\n return annotator_xml\n\n def __get_user_data_from_db(self, user_guid):\n guid = Guid.objects.get(_id=user_guid)\n osf_user = OSFUser.objects.get(id=guid.object_id)\n\n data = {\n 'forename': osf_user.given_name,\n 'surname': osf_user.family_name,\n 'email': osf_user.username,\n 'link': 'https://providedh.ehum.psnc.pl/' + user_guid + '/',\n }\n\n return data\n\n def __check_if_new_elements_already_exist(self):\n if self.__json['locus'] == '' and self.__json['tag'] in self.__tags:\n raise NotModifiedException('This tag already exist.')\n\n if self.__certainty_to_add is not None:\n xml = self.__xml\n\n xml_in_lines = xml.splitlines()\n if 'encoding=' in xml_in_lines[0]:\n xml = '\\n'.join(xml_in_lines[1:])\n\n tree = etree.fromstring(xml)\n xpath = '//default:teiHeader' \\\n '//default:classCode[' \\\n '@scheme=\"http://providedh.eu/uncertainty/ns/1.0\"]' \\\n '//default:certainty[' \\\n '@category=\"{0}\" and ' \\\n '@locus=\"{1}\" and ' \\\n '@cert=\"{2}\" and ' \\\n '@target=\"{3}\"'.format(self.__certainty_to_add.attrib['category'],\n self.__certainty_to_add.attrib['locus'],\n self.__certainty_to_add.attrib['cert'],\n self.__certainty_to_add.attrib['target'])\n\n if self.__json['asserted_value']:\n xpath += ' and @assertedValue=\"{0}\"'.format(self.__json['asserted_value'])\n\n xpath += ']'\n\n existing_certainties = tree.xpath(xpath, namespaces=NAMESPACES)\n\n if existing_certainties and self.__json['description']:\n descriptions = tree.xpath(xpath + '/default:desc', namespaces=NAMESPACES)\n\n for desc in descriptions:\n if desc.text == self.__json['description']:\n raise NotModifiedException('This certainty already exist.')\n\n elif existing_certainties and not self.__json['description']:\n raise NotModifiedException('This certainty already exist.')\n\n def __create_new_xml(self):\n xml_annotated = self.__add_tagged_string(self.__xml, self.__fragment_annotated)\n\n xml_annotated_in_lines = xml_annotated.splitlines()\n if 'encoding=' in xml_annotated_in_lines[0]:\n xml_annotated = '\\n'.join(xml_annotated_in_lines[1:])\n\n if self.__annotator_xml_id not in self.__annotators_xml_ids and self.__annotator_to_add is not None:\n xml_annotated = self.__add_annotator(xml_annotated, self.__annotator_to_add)\n\n if self.__certainty_to_add is not None:\n xml_annotated = self.__add_certainty(xml_annotated, self.__certainty_to_add)\n\n xml_annotated = self.__reformat_xml(xml_annotated)\n\n if 'encoding=' in xml_annotated_in_lines[0]:\n xml_annotated = '\\n'.join((xml_annotated_in_lines[0], xml_annotated))\n\n if 'xml version=\"' not in xml_annotated:\n xml_annotated = '\\n'.join((u'', xml_annotated))\n\n self.__xml_annotated = xml_annotated\n\n def __add_tagged_string(self, xml, new_fragment):\n new_xml = xml[:self.__start] + new_fragment + xml[self.__end:]\n\n return new_xml\n\n def __add_annotator(self, text, annotator):\n tree = etree.fromstring(text)\n\n list_person = tree.xpath('//default:teiHeader'\n '//default:listPerson[@type=\"PROVIDEDH Annotators\"]', namespaces=NAMESPACES)\n\n if not list_person:\n tree = self.__create_list_person(tree)\n list_person = tree.xpath('//default:teiHeader'\n '//default:listPerson[@type=\"PROVIDEDH Annotators\"]', namespaces=NAMESPACES)\n\n list_person[0].append(annotator)\n\n text = etree.tounicode(tree)\n\n return text\n\n def __create_list_person(self, tree):\n prefix = \"{%s}\" % NAMESPACES['default']\n\n ns_map = {\n None: NAMESPACES['default']\n }\n\n profile_desc = tree.xpath('//default:teiHeader/default:profileDesc', namespaces=NAMESPACES)\n\n if not profile_desc:\n tei_header = tree.xpath('//default:teiHeader', namespaces=NAMESPACES)\n profile_desc = etree.Element(prefix + 'profileDesc', nsmap=ns_map)\n tei_header[0].append(profile_desc)\n\n partic_desc = tree.xpath('//default:teiHeader/default:profileDesc/default:particDesc', namespaces=NAMESPACES)\n\n if not partic_desc:\n profile_desc = tree.xpath('//default:teiHeader/default:profileDesc', namespaces=NAMESPACES)\n partic_desc = etree.Element(prefix + 'particDesc', nsmap=ns_map)\n profile_desc[0].append(partic_desc)\n\n list_person = tree.xpath(\n '//default:teiHeader/default:profileDesc/default:particDesc/default:listPerson[@type=\"PROVIDEDH Annotators\"]',\n namespaces=NAMESPACES)\n\n if not list_person:\n partic_desc = tree.xpath('//default:teiHeader/default:profileDesc/default:particDesc',\n namespaces=NAMESPACES)\n list_person = etree.Element(prefix + 'listPerson', type=\"PROVIDEDH Annotators\", nsmap=ns_map)\n partic_desc[0].append(list_person)\n\n return tree\n\n def __add_certainty(self, text, certainty):\n tree = etree.fromstring(text)\n\n certainties = tree.xpath('//default:teiHeader'\n '//default:classCode[@scheme=\"http://providedh.eu/uncertainty/ns/1.0\"]',\n namespaces=NAMESPACES)\n\n if not certainties:\n tree = self.__create_annotation_list(tree)\n certainties = tree.xpath('//default:teiHeader'\n '//default:classCode[@scheme=\"http://providedh.eu/uncertainty/ns/1.0\"]',\n namespaces=NAMESPACES)\n\n certainties[0].append(certainty)\n\n text = etree.tounicode(tree)\n\n return text\n\n def __create_annotation_list(self, tree):\n default_namespace = NAMESPACES['default']\n default = \"{%s}\" % default_namespace\n\n ns_map = {\n None: default_namespace\n }\n\n profile_desc = tree.xpath('//default:teiHeader/default:profileDesc', namespaces=NAMESPACES)\n\n if not profile_desc:\n tei_header = tree.xpath('//default:teiHeader', namespaces=NAMESPACES)\n profile_desc = etree.Element(default + 'profileDesc', nsmap=ns_map)\n tei_header[0].append(profile_desc)\n\n text_class = tree.xpath('//default:teiHeader/default:profileDesc/default:textClass', namespaces=NAMESPACES)\n\n if not text_class:\n profile_desc = tree.xpath('//default:teiHeader/default:profileDesc', namespaces=NAMESPACES)\n text_class = etree.Element(default + 'textClass', nsmap=ns_map)\n profile_desc[0].append(text_class)\n\n class_code = tree.xpath(\n '//default:teiHeader/default:profileDesc/default:textClass/default:classCode[@scheme=\"http://providedh.eu/uncertainty/ns/1.0\"]',\n namespaces=NAMESPACES)\n\n if not class_code:\n text_class = tree.xpath('//default:teiHeader/default:profileDesc/default:textClass', namespaces=NAMESPACES)\n class_code = etree.Element(default + 'classCode', scheme=\"http://providedh.eu/uncertainty/ns/1.0\",\n nsmap=ns_map)\n text_class[0].append(class_code)\n\n return tree\n\n def __reformat_xml(self, text):\n parser = etree.XMLParser(remove_blank_text=True)\n tree = etree.fromstring(text, parser=parser)\n pretty_xml = etree.tounicode(tree, pretty_print=True)\n\n return pretty_xml\n","sub_path":"addons/teiclose/annotator.py","file_name":"annotator.py","file_ext":"py","file_size_in_byte":26509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"447777869","text":"# -*- coding: utf-8 -*-\nfrom postprocess.control_vars import *\nfrom postprocess import limit_state_data as lsd\nfrom postprocess.xcVtk import vtk_graphic_base\nfrom postprocess import output_handler\n\nmodel_path=\"../\"\n#Project directory structure\nexec(open(model_path+'env_config.py').read())\n\nmodelDataInputFile=model_path+\"model_data.py\" #data for FE model generation\nexec(open(modelDataInputFile).read())\n\n\n#Load properties to display:\nfName= cfg.projectDirTree.getVerifShearFile()\nexec(open(fName).read())\n\n\n\nlimitStateLabel= lsd.shearResistance.label\n#attributeName= limitStateLabel + 'Sect1' #Shear limit state direction 1.\n#attributeName= limitStateLabel + 'Sect2' #Shear limit state direction 2\n\n\n#Possible arguments: 'CF', 'N', 'My', 'Mz', 'Mu', 'Vy', 'Vz', 'theta', 'Vcu', 'Vsu', 'Vu'\nargument= 'CF'\n\n\n# #Flatten values.\n# if( \"FCCP\" in attributeName):\n# extrapolate_elem_attr.flatten_attribute(elemSet,attributeName,1,2)\n\n\nsetDisp= allShells\noh= output_handler.OutputHandler(modelSpace)\noh.outputStyle.cameraParameters= cameraParameters\noh.displayFieldDirs1and2(limitStateLabel,argument,setToDisplay=setDisp,component=None, fileName= None,defFScale=0.0)\n\n\n\n","sub_path":"ave_SR/OD_PF_103_17/display/display_shearULS.py","file_name":"display_shearULS.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"644810616","text":"\"\"\"\nBuild the jacobian for system of odes from metabolic models.\n\"\"\"\n\nfrom sympy import Matrix\nfrom sympy import Symbol\nimport numpy as np\n\nfrom bcodes.ratevector import subs_id_by_value\n\ndef odes_rhs2str(id_sp, id_rs, rates, mass_balances):\n \"\"\"\n Creates a string representing the rhs of a system of odes\n\n ACCEPTS\n id_sp [list of str] species ids\n id_rs [list of str] reacion ids\n rates [dict] {rxn_id: rxn_equation}\n mass_balances [dict] {species id: {rxn_id: stoic coefficient}}\n\n RETURNS\n rhs [str] rhs of the system of odes\n \"\"\"\n rhs = '' # rhs of the mass balances\n for sp in id_sp:\n sp_bal = ''\n for rxn in mass_balances[sp]:\n if mass_balances[sp][rxn] > 0:\n sign = '+'\n else:\n sign = '-'\n sp_bal += '{0} {1} * {2}'.format(\n sign, abs(mass_balances[sp][rxn]), rates[rxn])\n sp_bal += ',\\n'\n rhs += sp_bal\n return rhs\n\n\ndef odes_rhs2mat(id_sp, id_rs, rates, mass_balances, param_list):\n \"\"\"\n Build the code to construct a sympy Matrix for the rhs of a\n system of odes.\n\n ACCEPTS\n id_sp [list of str] species ids\n id_rs [list of str] reacion ids\n rates [dict] {rxn_id: rxn_equation}\n mass_balances [dict] {species id: {rxn_id: stoic coefficient}}\n param_list[list of str] parameter ids\n\n RETURNS\n rhs [str] code for a sympy Matrix with the rhs of the system of odes\n\n \"\"\"\n rhs = odes_rhs2str(id_sp, id_rs, rates, mass_balances)\n rhs_mat = ''\n for sp in id_sp:\n rhs_mat += \"{0} = Symbol('{0}')\\n\".format(sp)\n for p in param_list:\n rhs_mat += \"{0} = Symbol('{0}')\\n\".format(p)\n rhs_mat += 'rhs = Matrix([{0}])'.format(rhs)\n return rhs_mat\n\n\ndef odes_lhs2mat(id_sp):\n \"\"\"\n Build the code to construct a sympy martix for the lhs of a system of odes.\n\n ACCEPTS\n id_sp [list of str] species ids\n id_rs [list of str] reacion ids\n rates [dict] {rxn_id: rxn_equation}\n mass_balances [dict] {species id: {rxn_id: stoic coefficient}}\n param_list[list of str] parameter ids\n\n RETURNS\n sp_mat [str] code for a sympy Matrix with the rhs of the system of odes.\n \"\"\"\n sp_str = ''\n for sp in id_sp:\n sp_str += '{0}, '.format(sp)\n sp_str = sp_str[:-1] # remove last comma\n sp_mat = 'lhs = Matrix([{0}])'.format(sp_str)\n return sp_mat\n\n\ndef create_sym_jac(id_sp, id_rs, rates, mass_balances, param_list):\n \"\"\"\n Compute symbolically the Jacobian of a system of odes from a metabolic\n model.\n\n ACCEPTS\n id_sp [list of str] species ids\n id_rs [list of str] reacion ids\n rates [dict] {rxn_id: rxn_equation}\n mass_balances [dict] {species id: {rxn_id: stoic coefficient}}\n param_list[list of str] parameter ids\n\n RETURNS\n jac [Sympy Matrix] symbolic Jacobian for the system of odes.\n\n \"\"\"\n # Getting the necessary sympy classes within the scope of the exec\n # statement.\n d = {'Symbol': Symbol, 'Matrix': Matrix}\n\n rhs = odes_rhs2mat(id_sp, id_rs, rates, mass_balances, param_list)\n lhs = odes_lhs2mat(id_sp)\n exec(rhs, d)\n exec(lhs, d)\n # Use sympy to calculate the jacobian\n jac = d['rhs'].jacobian(d['lhs'])\n return jac\n\ndef func_from_str(func_str):\n return lambda t, y, p: eval(func_str)\n\ndef sym_jac2jac_func(sym_jac, trans_dict):\n \"\"\"\n Convert a symbolic jacobian (sympy.Matrix) into a numpy array function.\n\n ACCEPTS\n sym_jac [Sympy Matrix] symbolic Jacobian\n trans_dict [dict] dictionary to translate parameters and species into\n vector elements.\n\n RETURNS\n jac [2d array of lambdas] jacobian function\n \"\"\"\n j_str = np.array(sym_jac).astype(str).ravel()\n j_str_subs = [\n subs_id_by_value(expression, trans_dict) for expression in j_str]\n jac = lambda t, y, p: np.array([i(t, y, p) for i in\n [\n func_from_str(j_elem)\n for j_elem in j_str_subs\n ]\n ]).reshape(*sym_jac.shape)\n return jac\n\ndef build_jacobian(id_sp, id_rs, rates, mass_balances, param_list, trans_dict):\n \"\"\"\n Build a jacobian function for a metabolic model.\n\n ACCEPTS\n id_sp [list of str] species ids\n id_rs [list of str] reacion ids\n rates [dict] {rxn_id: rxn_equation}\n mass_balances [dict] {species id: {rxn_id: stoic coefficient}}\n param_list[list of str] parameter ids\n trans_dict [dict] dictionary to translate parameters and species into\n vector elements.\n\n RETURNS\n jac [2d array of lambdas] jacobian function\n \"\"\"\n symbolic_jacobian = create_sym_jac(\n id_sp, id_rs, rates, mass_balances, param_list\n )\n jacobian_function = sym_jac2jac_func(symbolic_jacobian, trans_dict)\n return jacobian_function\n\n","sub_path":"jacobian.py","file_name":"jacobian.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"153067143","text":"# -*- coding:utf-8 -*-\nimport MainRegressor as MR\nfrom transfunc import *\nfrom Dependencies import REALDB_COLUMNS,POINT_SUPERSHORT,POINT_SHORT\nfrom Dependencies import parser,time,threading,timedelta,pd\nimport optparse\n\ndef scheduled_gf(points,fake_dict,start_time,exam_file):\n # print('Current Thread is',threading.current_thread().name)\n fake_data,fake_index=generate_FakeData(points,start_time,exam_file)\n fake_dict['data']=fake_data\n fake_dict['index']=fake_index\n # print(threading.current_thread().name,'is Done.')\n # time.sleep(15)\n\ndef scheduled_pred(fake_dict,model):\n # print('Current Thread is',threading.current_thread().name)\n fake_data,fake_index=fake_dict['data'],fake_dict['index']\n model._check_error(POINT_SHORT)\n model.plot(POINT_SHORT)\n pred=model.predict(fake_data[:,:-1],True) \n model.save_model('./saved_model')\n pred=pd.DataFrame({'predicted':pred},index=fake_index)\n update_PredDB(pred)\n # print(threading.current_thread().name,'is Done.')\n # time.sleep(15)\n\ndef scheduled_push(fake_dict):\n # print('Current Thread is',threading.current_thread().name)\n fake_data,fake_index=fake_dict['data'],fake_dict['index']\n fake=pd.DataFrame(fake_data,columns=REALDB_COLUMNS,index=fake_index)\n update_RealDB(fake)\n # print(threading.current_thread().name,'is Done.')\n\ndef train():\n file=pd.read_csv('./data/test.csv',index_col=0)\n fake_data,fake_index=generate_FakeData(5000,file.index[0],file)\n fake=pd.DataFrame(fake_data,columns=REALDB_COLUMNS,index=fake_index)\n update_RealDB(fake)\n model=MR.MainRegressor('./config.json')\n model.auto_fit()\n model.save_model('./saved_model')\n\ndef run():\n file=pd.read_csv('./data/test.csv',index_col=0)\n start_time=parser.parse(get_LatestDate())+timedelta(0,15*60)\n model=MR.MainRegressor(None)\n model.load_model('./saved_model')\n\n fake_dict={\n 'data':None,\n 'index':None\n }\n\n scheduled_gf(POINT_SHORT,fake_dict,start_time,file)\n scheduled_pred(fake_dict,model)\n scheduled_push(fake_dict)\n\noptp=optparse.OptionParser()\noptp.add_option('-t','--type',default='test')\nif __name__=='__main__':\n options,_=optp.parse_args()\n if options.type=='train':\n train()\n elif options.type=='test':\n run()\n else:\n print('[ ERROR ] U should give correct -t/--type like \"test\" or \"train\"')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"459696102","text":"from imports import *\nimport objects as obj\nimport helpers.helpers as helper\nfrom exprtree import Expr\n\nclass Obj:\n def isLeaf(self):\n return isinstance(self, Leaf)\n\n def __add__(self, other):\n from exprtree import Expr\n return Expr(self, \"+\", other)\n\n def __sub__(self, other):\n from exprtree import Expr\n return Expr(self, \"-\", other)\n\n def __mul__(self, other):\n from exprtree import Expr\n return Expr(self, \"*\", other)\n\n def __truediv__(self, other):\n from exprtree import Expr\n return Expr(self, \"/\", other)\n\n def __pow__(self, other):\n from exprtree import Expr\n return Expr(self, \"^\", other)\n\n def distribute(self):\n if self.isLeaf():\n return self\n\n node1 = self.node1\n oper = self.oper\n node2 = self.node2\n\n # if oper == Oper[\"^\"]:\n # if isinstance(node2.Const) and isinstance(node2.num, int):\n # node1 = node1.distribute()\n # return \n \n if oper == Oper[\"+\"]:\n return node1.distribute() + node2.distribute()\n\n if oper == Oper[\"*\"]:\n if isinstance(node2, ExprTree) and node2.oper == Oper[\"+\"]:\n return (node1 * node2.node1).distribute() + (node1 * node2.node2).distribute()\n\n\n elif isinstance(node1, ExprTree) and node1.oper == Oper[\"+\"]:\n return (node1.node1 * node2).distribute() + (node1.node2 * node2).distribute()\n\n\n return self\n\n # Distribute multiplication and recurse through adds\n # Expression matrix\n def matrix(self): \n terms = helper.breaks(self.distribute(), \"+\")\n return [helper.breaks(x, \"*\") for x in terms]\n\nclass Leaf(Obj):\n def eval(self, wrt, expr):\n return self\n\n\nclass ExprTree(Obj):\n def __init__(self, node1, oper, node2):\n self.node1 = node1\n self.oper = Oper[oper]\n self.node2 = node2\n \n # TODO: More general with equality\n def eval(self, wrt, expr):\n node1 = self.node1\n oper = self.oper\n node2 = self.node2\n \n return Expr(node1.eval(wrt, expr), oper.name, node2.eval(wrt, expr))\n \n\n # Take derivative\n def deriv(self, wrt):\n node1 = self.node1\n oper = self.oper\n node2 = self.node2\n\n ############\n # ADDITION #\n ############\n\n if oper is Oper[\"+\"]:\n return node1.deriv(wrt) + node2.deriv(wrt)\n\n ##################\n # MULTIPLICATION #\n ##################\n \n if oper is Oper[\"*\"]:\n d1 = node1.deriv(wrt)\n d2 = node2.deriv(wrt)\n \n return d1*node2 + d2*node1\n\n ##################\n # EXPONENTIATION #\n ##################\n\n if oper is Oper[\"^\"]:\n if isinstance(node2, obj.Const):\n newCoeff = node2\n newDegree = node2 - obj.Const(1)\n \n return newCoeff * node1.deriv(wrt) * node1 ** newDegree\n \n def __str__(self):\n oper = self.oper.name\n return f\"({self.node1} {oper} {self.node2})\"\n","sub_path":"masterclass.py","file_name":"masterclass.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"107999847","text":"import csv, glob\n\nfor filename in sorted(glob.glob(\"metadata/*.csv\")):\n file_a = filename[:-4]\n file = file_a[9:]\n\n csvFile = 'metadata/' + file + '.csv'\n xmlFile = file + '.xml'\n\n csvData = csv.reader(open(csvFile, encoding='latin-1'))\n xmlData = open(xmlFile, 'w', encoding='latin-1')\n xmlData.write('' + \"\\n\")\n\n xmlData.write('' + \"\\n\")\n\n rowNum = 0\n for row in csvData:\n if rowNum == 0:\n tags = row\n\n for i in range(len(tags)):\n tags[i].replace(' ', '_')\n else:\n xmlData.write('' + \"\\n\")\n for i in range(len(tags)):\n xmlData.write(' ' + '<' + tags[i] + '>'\\\n + row[i] + '' + \"\\n\")\n xmlData.write('' + \"\\n\")\n rowNum +=1\n xmlData.write('' + \"\\n\")\n xmlData.close()","sub_path":"python/csv-to-xml.py","file_name":"csv-to-xml.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"100836358","text":"# (№ 2842) Назовём нетривиальным делителем натурального числа его делитель,\n# не равный единице и самому числу. Найдите все натуральные числа, принадлежащие\n# отрезку [12034679; 23175821] и имеющие ровно три нетривиальных делителя. Для каждого\n# найденного числа запишите в ответе само число и его наибольший нетривиальный делитель.\n# Найденные числа расположите в порядке возрастания.\n\n\ndef divizors(n):\n divizors=[1,n]\n for i in range(2,int(n**0.5)+1):\n if n%i==0:\n divizors+=[i,n//i]\n return sorted(set(divizors))\n\nfor i in range(12034679, 23175821+1):\n if len(divizors(i))==5:\n print(*divizors(i)[1:-1])\n","sub_path":"25/25_.py","file_name":"25_.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"527646235","text":"class Solution(object):\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: None Do not return anything, modify nums1 in-place instead.\n \"\"\"\n nums1 = nums1[:m]\n nums1.extend(nums2)\n nums1.sort()\n print(nums1)\n\n\nif __name__ == \"__main__\":\n x = [1, 2, 3, 0, 0, 0]\n y = [2, 5, 6]\n Solution().merge(x, 3, y, 3)\n","sub_path":"0088合并两个有序数组/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"307883212","text":"# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport glob\nimport os\nimport xml.dom.minidom as DOM\nfrom textwrap import dedent\n\nimport coverage\nfrom mock import patch\n\nfrom pants.backend.python.tasks.pytest_run import PytestRun\nfrom pants.base.exceptions import TestFailedTaskError\nfrom pants.util.contextutil import pushd\nfrom pants.util.timeout import TimeoutReached\nfrom pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase\n\n\nclass PythonTestBuilderTestBase(PythonTaskTestBase):\n @classmethod\n def task_type(cls):\n return PytestRun\n\n def run_tests(self, targets, **options):\n test_options = {\n 'colors': False,\n 'level': 'info' # When debugging a test failure it may be helpful to set this to 'debug'.\n }\n test_options.update(options)\n self.set_options(**test_options)\n context = self.context(target_roots=targets)\n pytest_run_task = self.create_task(context)\n with pushd(self.build_root):\n pytest_run_task.execute()\n\n def run_failing_tests(self, targets, failed_targets, **options):\n with self.assertRaises(TestFailedTaskError) as cm:\n self.run_tests(targets=targets, **options)\n self.assertEqual(set(failed_targets), set(cm.exception.failed_targets))\n\n\nclass PythonTestBuilderTestEmpty(PythonTestBuilderTestBase):\n def test_empty(self):\n self.run_tests(targets=[])\n\n\nclass PythonTestBuilderTest(PythonTestBuilderTestBase):\n def setUp(self):\n super(PythonTestBuilderTest, self).setUp()\n self.create_file(\n 'lib/core.py',\n dedent(\"\"\"\n def one(): # line 1\n return 1 # line 2\n # line 3\n # line 4\n def two(): # line 5\n return 2 # line 6\n \"\"\").strip())\n self.add_to_build_file(\n 'lib',\n dedent(\"\"\"\n python_library(\n name='core',\n sources=[\n 'core.py'\n ]\n )\n \"\"\"))\n\n self.create_file(\n 'tests/test_core_green.py',\n dedent(\"\"\"\n import unittest2 as unittest\n\n import core\n\n class CoreGreenTest(unittest.TestCase):\n def test_one(self):\n self.assertEqual(1, core.one())\n \"\"\"))\n self.create_file(\n 'tests/test_core_red.py',\n dedent(\"\"\"\n import core\n\n def test_two():\n assert 1 == core.two()\n \"\"\"))\n self.create_file(\n 'tests/test_core_red_in_class.py',\n dedent(\"\"\"\n import unittest2 as unittest\n\n import core\n\n class CoreRedClassTest(unittest.TestCase):\n def test_one_in_class(self):\n self.assertEqual(1, core.two())\n \"\"\"))\n self.create_file(\n 'tests/test_core_sleep.py',\n dedent(\"\"\"\n import core\n\n def test_three():\n assert 1 == core.one()\n \"\"\"))\n self.create_file(\n 'tests/test_error.py',\n dedent(\"\"\"\n def test_error(bad_fixture):\n pass\n \"\"\")\n )\n self.create_file(\n 'tests/test_failure_outside_function.py',\n dedent(\"\"\"\n def null():\n pass\n\n assert(False)\n \"\"\"\n )\n )\n self.add_to_build_file(\n 'tests',\n dedent(\"\"\"\n python_tests(\n name='error',\n sources=[\n 'test_error.py'\n ],\n )\n\n python_tests(\n name='failure_outside_function',\n sources=[\n 'test_failure_outside_function.py',\n ],\n )\n\n python_tests(\n name='green',\n sources=[\n 'test_core_green.py'\n ],\n dependencies=[\n 'lib:core'\n ],\n coverage=[\n 'core'\n ]\n )\n\n python_tests(\n name='red',\n sources=[\n 'test_core_red.py',\n ],\n dependencies=[\n 'lib:core'\n ],\n coverage=[\n 'core'\n ]\n )\n\n python_tests(\n name='red_in_class',\n sources=[\n 'test_core_red_in_class.py',\n ],\n dependencies=[\n 'lib:core'\n ],\n coverage=[\n 'core'\n ]\n )\n\n python_tests(\n name='sleep_no_timeout',\n sources=[\n 'test_core_sleep.py',\n ],\n timeout = 0,\n dependencies=[\n 'lib:core'\n ],\n coverage=[\n 'core'\n ]\n )\n\n python_tests(\n name='sleep_timeout',\n sources=[\n 'test_core_sleep.py',\n ],\n timeout = 1,\n dependencies=[\n 'lib:core'\n ],\n coverage=[\n 'core'\n ]\n )\n\n python_tests(\n name='all',\n sources=[\n 'test_core_green.py',\n 'test_core_red.py',\n ],\n dependencies=[\n 'lib:core'\n ]\n )\n\n python_tests(\n name='all-with-coverage',\n sources=[\n 'test_core_green.py',\n 'test_core_red.py'\n ],\n dependencies=[\n 'lib:core'\n ],\n coverage=[\n 'core'\n ]\n )\n \"\"\"))\n self.green = self.target('tests:green')\n\n self.red = self.target('tests:red')\n self.red_in_class = self.target('tests:red_in_class')\n self.sleep_no_timeout = self.target('tests:sleep_no_timeout')\n self.sleep_timeout = self.target('tests:sleep_timeout')\n self.error = self.target('tests:error')\n self.failure_outside_function = self.target('tests:failure_outside_function')\n\n self.all = self.target('tests:all')\n self.all_with_coverage = self.target('tests:all-with-coverage')\n\n def test_error(self):\n \"\"\"Test that a test that errors rather than fails shows up in TestFailedTaskError.\"\"\"\n\n self.run_failing_tests(targets=[self.red, self.green, self.error],\n failed_targets=[self.red, self.error])\n\n def test_error_outside_function(self):\n # If the test is outside a class or function, the failure line is in the following format:\n # F testprojects/tests/python/pants/constants_only/test_fail.py\n self.run_failing_tests(targets=[self.red, self.green, self.failure_outside_function],\n failed_targets=[self.red, self.failure_outside_function])\n\n def test_green(self):\n self.run_tests(targets=[self.green])\n\n def test_red(self):\n self.run_failing_tests(targets=[self.red], failed_targets=[self.red])\n\n def test_fail_fast_skips_second_red_test_with_single_chroot(self):\n self.run_failing_tests(targets=[self.red, self.red_in_class], failed_targets=[self.red],\n fail_fast=True,\n fast=False)\n\n def test_fail_fast_skips_second_red_test_with_isolated_chroot(self):\n self.run_failing_tests(targets=[self.red, self.red_in_class], failed_targets=[self.red],\n fail_fast=True,\n fast=True)\n\n def test_red_test_in_class(self):\n # for test in a class, the failure line is in the following format\n # F testprojects/tests/python/pants/constants_only/test_fail.py::TestClassName::test_boom\n self.run_failing_tests(targets=[self.red_in_class], failed_targets=[self.red_in_class])\n\n def test_mixed(self):\n self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])\n\n def test_one_timeout(self):\n # When we have two targets, any of them doesn't have a timeout, and we have no default,\n # then no timeout is set.\n\n with patch('pants.task.testrunner_task_mixin.Timeout') as mock_timeout:\n self.run_tests(targets=[self.sleep_no_timeout, self.sleep_timeout])\n\n # Ensures that Timeout is instantiated with no timeout.\n args, kwargs = mock_timeout.call_args\n self.assertEqual(args, (None,))\n\n def test_timeout(self):\n # Check that a failed timeout returns the right results.\n\n with patch('pants.task.testrunner_task_mixin.Timeout') as mock_timeout:\n mock_timeout().__exit__.side_effect = TimeoutReached(1)\n self.run_failing_tests(targets=[self.sleep_timeout],\n failed_targets=[self.sleep_timeout])\n\n # Ensures that Timeout is instantiated with a 1 second timeout.\n args, kwargs = mock_timeout.call_args\n self.assertEqual(args, (1,))\n\n def test_junit_xml_option(self):\n # We expect xml of the following form:\n # \n # \n # \n # ...\n # \n # \n report_basedir = os.path.join(self.build_root, 'dist', 'junit_option')\n self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],\n junit_xml_dir=report_basedir)\n\n files = glob.glob(os.path.join(report_basedir, '*.xml'))\n self.assertEqual(1, len(files), 'Expected 1 file, found: {}'.format(files))\n junit_xml = files[0]\n root = DOM.parse(junit_xml).documentElement\n\n self.assertEqual(2, len(root.childNodes))\n self.assertEqual(2, int(root.getAttribute('tests')))\n self.assertEqual(1, int(root.getAttribute('failures')))\n self.assertEqual(0, int(root.getAttribute('errors')))\n self.assertEqual(0, int(root.getAttribute('skips')))\n\n children_by_test_name = dict((elem.getAttribute('name'), elem) for elem in root.childNodes)\n self.assertEqual(0, len(children_by_test_name['test_one'].childNodes))\n self.assertEqual(1, len(children_by_test_name['test_two'].childNodes))\n self.assertEqual('failure', children_by_test_name['test_two'].firstChild.nodeName)\n\n def coverage_data_file(self):\n return os.path.join(self.build_root, '.coverage')\n\n def load_coverage_data(self, path):\n data_file = self.coverage_data_file()\n self.assertTrue(os.path.isfile(data_file))\n coverage_data = coverage.coverage(data_file=data_file)\n coverage_data.load()\n _, all_statements, not_run_statements, _ = coverage_data.analysis(path)\n return all_statements, not_run_statements\n\n def test_coverage_simple_option(self):\n # TODO(John Sirois): Consider eliminating support for \"simple\" coverage or at least formalizing\n # the coverage option value that turns this on to \"1\" or \"all\" or \"simple\" = anything formal.\n simple_coverage_kwargs = {'coverage': '1'}\n\n self.assertFalse(os.path.isfile(self.coverage_data_file()))\n covered_file = os.path.join(self.build_root, 'lib', 'core.py')\n\n self.run_tests(targets=[self.green], **simple_coverage_kwargs)\n all_statements, not_run_statements = self.load_coverage_data(covered_file)\n self.assertEqual([1, 2, 5, 6], all_statements)\n self.assertEqual([6], not_run_statements)\n\n self.run_failing_tests(targets=[self.red], failed_targets=[self.red], **simple_coverage_kwargs)\n all_statements, not_run_statements = self.load_coverage_data(covered_file)\n self.assertEqual([1, 2, 5, 6], all_statements)\n self.assertEqual([2], not_run_statements)\n\n self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],\n **simple_coverage_kwargs)\n all_statements, not_run_statements = self.load_coverage_data(covered_file)\n self.assertEqual([1, 2, 5, 6], all_statements)\n self.assertEqual([], not_run_statements)\n\n # The all target has no coverage attribute and the code under test does not follow the\n # auto-discover pattern so we should get no coverage.\n self.run_failing_tests(targets=[self.all], failed_targets=[self.all], **simple_coverage_kwargs)\n all_statements, not_run_statements = self.load_coverage_data(covered_file)\n self.assertEqual([1, 2, 5, 6], all_statements)\n self.assertEqual([1, 2, 5, 6], not_run_statements)\n\n self.run_failing_tests(targets=[self.all_with_coverage],\n failed_targets=[self.all_with_coverage],\n **simple_coverage_kwargs)\n all_statements, not_run_statements = self.load_coverage_data(covered_file)\n self.assertEqual([1, 2, 5, 6], all_statements)\n self.assertEqual([], not_run_statements)\n\n def test_coverage_modules_dne_option(self):\n self.assertFalse(os.path.isfile(self.coverage_data_file()))\n covered_file = os.path.join(self.build_root, 'lib', 'core.py')\n\n # modules: should trump .coverage\n self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],\n coverage='modules:does_not_exist,nor_does_this')\n all_statements, not_run_statements = self.load_coverage_data(covered_file)\n self.assertEqual([1, 2, 5, 6], all_statements)\n self.assertEqual([1, 2, 5, 6], not_run_statements)\n\n def test_coverage_modules_option(self):\n self.assertFalse(os.path.isfile(self.coverage_data_file()))\n covered_file = os.path.join(self.build_root, 'lib', 'core.py')\n\n self.run_failing_tests(targets=[self.all], failed_targets=[self.all], coverage='modules:core')\n all_statements, not_run_statements = self.load_coverage_data(covered_file)\n self.assertEqual([1, 2, 5, 6], all_statements)\n self.assertEqual([], not_run_statements)\n\n def test_coverage_paths_dne_option(self):\n self.assertFalse(os.path.isfile(self.coverage_data_file()))\n covered_file = os.path.join(self.build_root, 'lib', 'core.py')\n\n # paths: should trump .coverage\n self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],\n coverage='paths:does_not_exist/,nor_does_this/')\n all_statements, not_run_statements = self.load_coverage_data(covered_file)\n self.assertEqual([1, 2, 5, 6], all_statements)\n self.assertEqual([1, 2, 5, 6], not_run_statements)\n\n def test_coverage_paths_option(self):\n self.assertFalse(os.path.isfile(self.coverage_data_file()))\n covered_file = os.path.join(self.build_root, 'lib', 'core.py')\n\n self.run_failing_tests(targets=[self.all], failed_targets=[self.all], coverage='paths:core.py')\n all_statements, not_run_statements = self.load_coverage_data(covered_file)\n self.assertEqual([1, 2, 5, 6], all_statements)\n self.assertEqual([], not_run_statements)\n\n def test_sharding(self):\n self.run_failing_tests(targets=[self.red, self.green], failed_targets=[self.red],\n test_shard='0/2')\n self.run_tests(targets=[self.red, self.green], test_shard='1/2')\n\n def test_sharding_single(self):\n self.run_failing_tests(targets=[self.red], failed_targets=[self.red], test_shard='0/1')\n\n def test_sharding_invalid_shard_too_small(self):\n with self.assertRaises(PytestRun.InvalidShardSpecification):\n self.run_tests(targets=[self.green], test_shard='-1/1')\n\n def test_sharding_invalid_shard_too_big(self):\n with self.assertRaises(PytestRun.InvalidShardSpecification):\n self.run_tests(targets=[self.green], test_shard='1/1')\n\n def test_sharding_invalid_shard_bad_format(self):\n with self.assertRaises(PytestRun.InvalidShardSpecification):\n self.run_tests(targets=[self.green], test_shard='1')\n\n with self.assertRaises(PytestRun.InvalidShardSpecification):\n self.run_tests(targets=[self.green], test_shard='1/2/3')\n\n with self.assertRaises(PytestRun.InvalidShardSpecification):\n self.run_tests(targets=[self.green], test_shard='1/a')\n\n def test_resultlog_regex(self):\n regex = PytestRun.RESULTLOG_FAILED_PATTERN\n for error_failure in ['E', 'F']:\n self.assertEqual(regex.match('%s filename::class::method' % error_failure).group('file'),\n 'filename')\n self.assertEqual(regex.match('%s filename::method' % error_failure).group('file'),\n 'filename')\n self.assertEqual(regex.match('%s filename' % error_failure).group('file'), 'filename')\n self.assertIsNone(regex.match(' %s filename'))\n self.assertIsNone(regex.match(' filename'))\n self.assertIsNone(regex.match('filename'))\n self.assertIsNone(regex.match('%sfilename'))\n self.assertEqual(regex.match('%s filename::class:method' % error_failure).group('file'),\n 'filename')\n self.assertEqual(regex.match('%s file/name.py::class:method' % error_failure).group('file'),\n 'file/name.py')\n self.assertEqual(regex.match('%s file:colons::class::method' % error_failure).group('file'),\n 'file:colons')\n","sub_path":"tests/python/pants_test/backend/python/tasks/test_pytest_run.py","file_name":"test_pytest_run.py","file_ext":"py","file_size_in_byte":16964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"653747639","text":"import sys\nimport os\nimport csv\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass viewExperiment(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n\n self.init_ui(self)\n\n def init_ui(self, *args):\n self.setWindowTitle(\"P.O.D.\")\n\n # far down on page, far left on page, width, height\n self.setGeometry(325, 100, 650, 400)\n\n self.setStyleSheet(\"background-color:#FFFFFF\");\n #98FB98\n\n # Create new experiment labels\n self.createNewLabel = QtWidgets.QLabel(self)\n self.createNewLabel.setText('View Experiments')\n self.createNewLabel.move(300, 15)\n self.createFont = QtGui.QFont(\"Times\", 24, QtGui.QFont.Bold)\n self.createNewLabel.setFont(self.createFont)\n self.createNewLabel.setStyleSheet(\"background-color:#FFFFFF\")\n\n # labels\n # Background label\n self.menuLabel = QtWidgets.QLabel(self)\n self.menuLabel.setGeometry(QtCore.QRect(0, 0, 175, 70))\n self.menuLabel.setStyleSheet(\"background-color:#98FB98\")\n # Image logo\n self.podLogo = QtWidgets.QLabel(self)\n self.originalpixmap = QtGui.QPixmap('podLogo1.png')\n self.adjustedPixmap = self.originalpixmap.scaled(150, 150, QtCore.Qt.KeepAspectRatio, QtCore.Qt.FastTransformation)\n self.podLogo.setPixmap(self.adjustedPixmap)\n self.podLogo.setStyleSheet(\"background-color:#98FB98\")\n self.podLogo.move(15, 10)\n\n # Button font\n self.buttonFont = QtGui.QFont(\"Helvetica\", 12)\n\n self.exitButton = QtWidgets.QPushButton(self)\n self.exitButton.setText('Exit')\n self.exitButton.move(70, 300)\n self.exitButton.resize(100, 40);\n self.exitButton.setStyleSheet(\"background-color:#FFFFFF\")\n self.exitButton.setFont(self.buttonFont)\n self.exitButton.clicked.connect(self.close)\n\n self.currDir = 0\n # print(os.getcwd() + \"/currentExperiments\")\n self.directory = os.fsencode(os.getcwd() + \"/currentExperiments\")\n # print(self.directory)\n self.dirlist = os.listdir(self.directory) # dir is your directory path\n self.number_files = len(self.dirlist)\n # print(self.number_files)\n\n self.tableWidget = QtWidgets.QTableWidget(self)\n # set row count\n self.tableWidget.setRowCount(self.number_files)\n # set column count\n self.tableWidget.setColumnCount(13)\n self.tableWidget.setHorizontalHeaderLabels(('Experiment Name', 'Start Date', 'Start Time', 'End Date', 'End Time', 'Water Delay', 'Water Duration', 'Light Delay', 'Light Error', 'Temp Delay', 'Temp Error', 'Photo Delay', 'CSV File'))\n self.tableWidget.move(10, 75)\n self.tableWidget.resize(625, 275)\n self.tableWidget.setColumnWidth(0, 150)\n # # simple version for working with CWD\n # print(len([self.name for self.name in os.listdir('.') if os.path.isfile(self.name)]))\n\n\n for self.file in os.listdir(self.directory):\n self.filename = os.fsdecode(self.file)\n # print(self.filename)\n with open(\"currentExperiments/\" + self.filename, \"r\") as self.fileInput:\n self.reader = csv.reader(self.fileInput)\n self.fileContents = list(self.reader)\n # print(str(self.fileContents[0]).strip('[]'))\n self.tableWidget.setItem(self.currDir, 5, QtWidgets.QTableWidgetItem(str(self.fileContents[3]).strip(\"[]\").strip(\"''\")))\n self.tableWidget.setItem(self.currDir, 6, QtWidgets.QTableWidgetItem(str(self.fileContents[4]).strip(\"[]\").strip(\"''\")))\n self.tableWidget.setItem(self.currDir, 7, QtWidgets.QTableWidgetItem(str(self.fileContents[5]).strip(\"[]\").strip(\"''\")))\n self.tableWidget.setItem(self.currDir, 8, QtWidgets.QTableWidgetItem(str(self.fileContents[6]).strip(\"[]\").strip(\"''\")))\n self.tableWidget.setItem(self.currDir, 9, QtWidgets.QTableWidgetItem(str(self.fileContents[7]).strip(\"[]\").strip(\"''\")))\n self.tableWidget.setItem(self.currDir, 10, QtWidgets.QTableWidgetItem(str(self.fileContents[8]).strip(\"[]\").strip(\"''\")))\n self.tableWidget.setItem(self.currDir, 11, QtWidgets.QTableWidgetItem(str(self.fileContents[9]).strip(\"[]\").strip(\"''\")))\n # self.tableWidget.setItem(self.currDir, 12, QtWidgets.QTableWidgetItem(str(self.fileContents[11]).strip(\"[]\").strip(\"''\")))\n self.tableWidget.setItem(self.currDir, 12, QtWidgets.QTableWidgetItem(str(self.fileContents[2]).strip(\"[]\").strip(\"''\")))\n\n self.currDir = self.currDir + 1\n\n self.show()\n","sub_path":"PODui/PODViewExperiment.py","file_name":"PODViewExperiment.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"391284016","text":"from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import (BalanceViewSet, RefillViewSet, TransferViewSet,\n UserViewSet, WithdrawViewSet)\n\nrouter_v1 = DefaultRouter(trailing_slash='optional')\nrouter_v1.register('', UserViewSet, basename='users')\nrouter_v1.register(r'(?P\\d+)/refill', RefillViewSet,\n basename='refill')\nrouter_v1.register(r'(?P\\d+)/withdraw', WithdrawViewSet,\n basename='withdraw')\nrouter_v1.register(r'(?P\\d+)/transfer', TransferViewSet,\n basename='transfer')\nrouter_v1.register(r'(?P\\d+)/balance', BalanceViewSet,\n basename='balance')\n\nurlpatterns = [\n path('users/', include(router_v1.urls)),\n]\n","sub_path":"api_avito/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"447255897","text":"import sys\nimport bs4\nimport json\nimport psycopg2\nfrom urllib.request import urlopen as uReq\nfrom bs4 import BeautifulSoup as soup\n\n# open, read, close, parse\ndef oRCP(url):\n\tuClient = uReq(url)\n\tpage_html = uClient.read()\n\tuClient.close()\n\tpage_soup = soup(page_html, \"lxml\")\n\treturn page_soup\n\n# scrape posts\ndef getPosts(parsedMain):\n\t# find first post and parse it\n\tfirstPostLink = parsedMain.find(\"a\", {\"class\":\"read-more\"}).get('href')\n\tfirstPostPage = oRCP(base_url + firstPostLink)\n\tfirstPostText = firstPostPage.find(\"section\", {\"class\":\"post-content\"}).get_text().split()\n\tfirstPostAuthor = firstPostPage.find(\"span\", {\"class\":\"author-content\"}).find(\"h4\").get_text().strip()\n\t# filter data and store it in variable\n\tunwanted = ',.\"-”“:;?!\"”()[]„\\/|<>#+-*•{}–='\n\tfor word in firstPostText:\n\t\t# remove characters from beginning and ends of words\n\t\tword = word.strip(unwanted).lower()\n\t\tpostData = (word, firstPostAuthor)\n\t\tdata.append(postData)\n\t# look for previous post link\n\tprevPost = firstPostPage.select(\".pull-left a\")\n\t# if there is previous post link go to next post\n\tif prevPost:\n\t\t# keep going until there are more posts\n\t\twhile(prevPost):\n\t\t\tnextPostLink = base_url + prevPost[0]['href']\n\t\t\tnextPostPage = oRCP(nextPostLink)\n\t\t\tnextPostText = nextPostPage.find(\"section\", {\"class\":\"post-content\"}).get_text().split()\n\t\t\tnextPostAuthor = nextPostPage.find(\"span\", {\"class\":\"author-content\"}).find(\"h4\").get_text().strip()\n\t\t\tfor word in nextPostText:\n\t\t\t\tword.strip(unwanted).lower()\n\t\t\t\tpostData = (word, nextPostAuthor)\n\t\t\t\tdata.append(postData)\n\t\t\tprevPost = nextPostPage.select(\".pull-left a\")\n\telse:\n\t\treturn print(\"Nothing found\")\n# connect to database\ncon = None\n\ntry:\n\tcon = psycopg2.connect(database='teonite', user='scraper', host='127.0.0.1', password='password')\n\tcur = con.cursor()\n\tcur.execute(\"SELECT EXISTS(SELECT * FROM information_schema.tables WHERE table_name=%s)\", (\"posts\",))\n\t# if table exists just return data\n\tif cur.fetchone()[0]:\n\t\tif len(sys.argv) > 1:\n\t\t\tcur.execute(\"SELECT word, COUNT(*) FROM posts WHERE author=%s GROUP BY word ORDER BY count DESC LIMIT 10\", (sys.argv[1],))\n\t\t\tcolumns = ('word', 'count')\n\t\t\tresults = []\n\t\t\tfor row in cur.fetchall():\n\t\t\t\tresults.append(dict(zip(columns,row)))\n\t\t\tprint(json.dumps(results, indent=2, ensure_ascii=False))\n\t\telse:\n\t\t\tcur.execute(\"SELECT word, COUNT(*) FROM posts GROUP BY word, author ORDER BY count DESC LIMIT 10\")\n\t\t\tcolumns = ('word', 'count')\n\t\t\tresults = []\n\t\t\tfor row in cur.fetchall():\n\t\t\t\tresults.append(dict(zip(columns,row)))\n\t\t\tprint(json.dumps(results, indent=2, ensure_ascii=False))\n\t# else create table and insert scraped data\n\telse:\n\t\tprint(\"Table created!\")\n\t\tcur.execute(\"CREATE TABLE posts(ID SERIAL PRIMARY KEY, word TEXT, author VARCHAR(30))\")\n\t\t# scrape Teonite blog\n\t\tprint(\"Started scraping...\")\n\t\tbase_url = 'http://build.sh'\n\t\tdata = []\n\t\tmainPage = oRCP(base_url)\n\t\tscrapePosts = getPosts(mainPage)\n\t\t# insert data\n\t\tfor word in data:\n\t\t\tcur.execute(\"INSERT into posts(word, author) VALUES (%s, %s)\", word)\n\t\t# save changes to table\n\t\tcon.commit()\n\t\tprint(\"Scraping complete!\")\n\t\tcur.execute(\"SELECT word, author, COUNT(*) FROM posts GROUP BY word, author ORDER BY count DESC LIMIT 10\")\n\t\tcolumns = ('word', 'author', 'count')\n\t\tresults = []\n\t\tfor row in cur.fetchall():\n\t\t\tresults.append(dict(zip(columns,row)))\n\t\tprint(json.dumps(results, indent=2, ensure_ascii=False))\n\nexcept psycopg2.DatabaseError as exception:\n\tprint(exception)\n\tsys.exit(1)\n\nfinally:\n\tif con:\n\t\tcon.close()\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"346172759","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 25 14:29:10 2019\nThis script needs more work, mainly on updating new data\n@author: ziskin\nimplement a mode arg which will retain the long term update behaviour but will also\nadd a real-time mode, which with a timestamp and window takes a parameter (TD)\nsnapshot of all the stations togather and saves it to disk\nwrite another script with click!\n\"\"\"\n\n\n#def load_saved_station(path, station_id, channel):\n# from aux_gps import path_glob\n# import xarray as xr\n# files = path_glob(path, '*_{}_{}_10mins.nc'.format(station_id, channel))\n# if len(files) == 0:\n# return False\n# elif len(files) == 1:\n# return xr.load_dataset(files[0])\n# elif len(files) > 1:\n# raise ValueError('too many files with the same glob str')\n#\n#\n##def parse_filename(file_path):\n## filename = file_path.as_posix().split('/')[-1].split('.')[0]\n## station_name = filename.split('_')[0]\n## station_id = filename.split('_')[1]\n## channel = filename.split('_')[2]\n## return station_name, station_id, channel\n#\n#\ndef check_ds_last_datetime(ds, fmt=None):\n \"\"\"return the last datetime of the ds\"\"\"\n import pandas as pd\n import xarray as xr\n if isinstance(ds, xr.DataArray):\n ds = ds.to_dataset(name=ds.name)\n # assume time series with one time dim:\n time_dim = list(set(ds.dims))[0]\n dvars = [x for x in ds.data_vars]\n if dvars:\n dt = ds[dvars[0]].dropna(time_dim)[time_dim][-1].values\n dt = pd.to_datetime(dt)\n if fmt is None:\n return dt\n else:\n return dt.strftime(fmt)\n else:\n raise KeyError(\"dataset is empty ( no data vars )\")\n\n\ndef check_path(path):\n import os\n from pathlib import Path\n path = str(path)\n if not os.path.exists(path):\n raise argparse.ArgumentTypeError(path + ' does not exist...')\n return Path(path)\n\n\ndef generate_delete(savepath, channel_name):\n from aux_gps import query_yes_no\n from aux_gps import path_glob\n try:\n glob = '*_{}_10mins.nc'.format(channel_name)\n files_to_delete = path_glob(savepath, glob)\n except FileNotFoundError:\n print('skipping {} , because its empty or not existant..'.format(savepath))\n return\n print('WARNING for channel {}, ALL nc files in {} WILL BE DELETED!'.format(channel_name, savepath))\n to_delete = query_yes_no('ARE YOU SURE ?')\n if not to_delete:\n print('files NOT deleted...')\n else:\n [x.unlink() for x in files_to_delete]\n print('FILES DELETED!')\n return\n\n\ndef download_ims_single_station(stationid, savepath=None,\n channel_name='TD', update=None):\n \"\"\"download single station with channel_name from earliest to latest.\n if chanel_name is None, download all channels\"\"\"\n import requests\n import pandas as pd\n import logging\n from requests.exceptions import SSLError\n\n def parse_ims_to_df(raw_data, ch_name):\n \"\"\"gets ims station raw data, i.e., r.json()['data'] and returns\n a pandas dataframe\"\"\"\n import pandas as pd\n from pytz import timezone\n if ch_name is not None:\n datetimes = [x['datetime'] for x in raw_data]\n # Local datetimes:\n dts = [x.split('+')[0] for x in datetimes]\n # bool mask for DST:\n dts_dst = [x.split('+')[-1] for x in datetimes]\n dst_bool = [True if x == '03:00' else False for x in dts_dst]\n jer = timezone('Asia/Jerusalem')\n data = [x['channels'][0] for x in raw_data]\n df = pd.DataFrame.from_records(data, index=pd.to_datetime(dts))\n df.drop(['alias', 'description'], axis=1, inplace=True)\n cols = [ch_name + '_' + x for x in df.columns]\n df.columns = cols\n df = df.tz_localize(jer, ambiguous=dst_bool, nonexistent='shift_forward')\n df = df.tz_convert('UTC')\n elif ch_name is None:\n # add all channels d/l here:\n datetimes = [x['datetime'] for x in raw_data]\n names = [x['name'] for x in data['channels']]\n keys = [*data['channels'][0].keys()]\n return df\n\n def to_dataarray(df, meta):\n # add all channels d/l here:\n import pandas as pd\n ds = df.to_xarray()\n ds['time'] = pd.to_datetime(ds.time.values)\n channel_name = [*ds.data_vars.keys()][0].split('_')[0]\n channel_id = ds[channel_name + '_id'].isel(time=0).values.item()\n to_drop = [x for x in ds.data_vars.keys() if 'value' not in x]\n ds = ds.drop(to_drop)\n da = ds[channel_name + '_value'].reset_coords(drop=True)\n da.name = meta['name']\n da.attrs['channel_id'] = int(channel_id)\n da.attrs['channel_name'] = channel_name\n da.attrs['station_name'] = meta['name']\n da.attrs['station_id'] = meta['id']\n da.attrs['active'] = meta['active']\n da.attrs['station_lat'] = str(meta['loc']['latitude'])\n da.attrs['station_lon'] = str(meta['loc']['longitude'])\n for key, value in da.attrs.items():\n print(key, value)\n return da\n\n def get_dates_list(start_date, end_date):\n \"\"\"divide the date span into full 1 years and a remainder, tolist\"\"\"\n import numpy as np\n import pandas as pd\n end_date = pd.to_datetime(end_date)\n start_date = pd.to_datetime(start_date)\n s_year = start_date.year\n e_year = end_date.year\n years = np.arange(s_year, e_year + 1)\n dates = [start_date.replace(year=x) for x in years]\n if (end_date - dates[-1]).days > 0:\n dates.append(end_date)\n return dates\n\n logger = logging.getLogger('ims_downloader')\n myToken = 'f058958a-d8bd-47cc-95d7-7ecf98610e47'\n headers = {'Authorization': 'ApiToken ' + myToken}\n r = requests.get('https://api.ims.gov.il/v1/envista/stations/',\n headers=headers)\n stations_10mins = pd.DataFrame(r.json())\n meta = {}\n st_name = stations_10mins['name'].where(\n stations_10mins['stationId'] == stationid).dropna()\n location = stations_10mins['location'].where(\n stations_10mins['stationId'] == stationid).dropna()\n active = stations_10mins['active'].where(\n stations_10mins['stationId'] == stationid).dropna()\n meta['name'] = '-'.join(st_name.iloc[0].split())\n meta['id'] = stationid\n meta['loc'] = location.iloc[0]\n meta['active'] = active.iloc[0]\n r_early = requests.get('https://api.ims.gov.il/v1/envista/stations/' +\n str(stationid) + '/data/earliest', headers=headers)\n r_late = requests.get('https://api.ims.gov.il/v1/envista/stations/' +\n str(stationid) + '/data/latest', headers=headers)\n data = r_early.json()['data'][0]\n if update is not None:\n earliest = update + pd.Timedelta(10, unit='m')\n else:\n earliest = pd.to_datetime(data['datetime']).strftime('%Y-%m-%d')\n data = r_late.json()['data'][0]\n latest = pd.to_datetime(data['datetime']).strftime('%Y-%m-%d')\n # check if trying to update stations in the same day:\n if earliest == latest:\n logger.error('Wait for at least one day before trying to update...')\n logger.info(\n 'Downloading station {} with id: {}, from {} to {}'.format(\n st_name.values[0],\n stationid,\n earliest,\n latest))\n # one channel download:\n if channel_name is not None:\n channel_id = [x['id'] for x in data['channels']\n if x['name'] == channel_name]\n if channel_id:\n logger.info('getting just {} channel with id: {}'.format(channel_name,\n channel_id[0]))\n ch_id = channel_id[0]\n dates = get_dates_list(earliest, latest)\n df_list = []\n for i in range(len(dates) - 1):\n first_date = dates[i].strftime('%Y/%m/%d')\n last_date = dates[i + 1].strftime('%Y/%m/%d')\n logger.info('proccesing dates: {} to {}'.format(first_date,\n last_date))\n dl_command = ('https://api.ims.gov.il/v1/envista/stations/' +\n str(stationid) + '/data/' + str(ch_id) +\n '?from=' + first_date + '&to=' + last_date)\n try:\n r = requests.get(dl_command, headers=headers)\n except SSLError:\n logger.warning('SSLError')\n r = requests.get(dl_command, headers=headers)\n if r.status_code == 204: # i.e., no content:\n logger.warning('no content for this search, skipping...')\n continue\n logger.info('parsing to dataframe...')\n df_list.append(parse_ims_to_df(r.json()['data'], channel_name))\n logger.info('concatanating df and transforming to xarray...')\n try:\n df_all = pd.concat(df_list)\n except ValueError:\n logger.warning('no new data on station {}.'.format(stationid))\n return None\n # only valid results:\n # df_valid = df_all[df_all['valid']]\n df_all.index.name = 'time'\n # remove duplicated index values:\n df_all = df_all[~df_all.index.duplicated()]\n first = df_all.index[0]\n last = df_all.index[-1]\n new_index = pd.date_range(first, last, freq='10min')\n df_all = df_all.reindex(new_index)\n valid_name = channel_name + '_valid'\n value_name = channel_name + '_value'\n df_all[valid_name].fillna(False, inplace=True)\n # replace non valid measurments with nans\n new_vals = df_all[value_name].where(df_all[valid_name])\n df_all[value_name] = new_vals\n df_all.index.name = 'time'\n da = to_dataarray(df_all, meta)\n if update is not None:\n return da\n else:\n filename = '_'.join(['-'.join(meta['name'].split(' ')), str(meta['id']), channel_name,\n '10mins']) + '.nc'\n comp = dict(zlib=True, complevel=9) # best compression\n encoding = {var: comp for var in da.to_dataset().data_vars}\n logger.info('saving to {} to {}'.format(filename, savepath))\n da.to_netcdf(savepath / filename, 'w', encoding=encoding)\n # print('done!')\n # all channels download add support here:\n elif channel_name is None:\n logger.info('getting all channels...')\n dates = get_dates_list(earliest, latest)\n df_list = []\n for i in range(len(dates) - 1):\n first_date = dates[i].strftime('%Y/%m/%d')\n last_date = dates[i + 1].strftime('%Y/%m/%d')\n logger.info('proccesing dates: {} to {}'.format(first_date,\n last_date))\n dl_command = ('https://api.ims.gov.il/v1/envista/stations/' +\n str(stationid) + '/data?from=' + first_date +\n '&to=' + last_date)\n r = requests.get(dl_command, headers=headers)\n if r.status_code == 204: # i.e., no content:\n logger.warning('no content for this search, skipping...')\n break\n logger.info('parsing to dataframe...')\n df_list.append(parse_ims_to_df(r.json()['data'], None))\n return\n\n\ndef download_all_10mins_ims(savepath, channel_name='TD'):\n \"\"\"download all 10mins stations per specified channel, updateing fields is\n automatic\"\"\"\n from aux_gps import path_glob\n import xarray as xr\n import logging\n logger = logging.getLogger('ims_downloader')\n glob = '*_{}_10mins.nc'.format(channel_name)\n files = sorted(path_glob(savepath, glob, return_empty_list=True))\n files = [x for x in files if x.is_file()]\n if files:\n time_dim = list(set(xr.open_dataarray(files[0]).dims))[0]\n last_dates = [check_ds_last_datetime(xr.open_dataarray(x)) for x in files]\n st_id_downloaded = [int(x.as_posix().split('/')[-1].split('_')[1]) for x in files]\n d = dict(zip(st_id_downloaded, last_dates))\n stations = ims_api_get_meta(active_only=True, channel_name=channel_name)\n for index, row in stations.iterrows():\n st_id = row['stationId']\n if st_id not in d.keys():\n download_ims_single_station(savepath=savepath,\n channel_name=channel_name,\n stationid=st_id, update=None)\n elif st_id in d.keys():\n logger.info('updating station {}...'.format(st_id))\n da = download_ims_single_station(savepath=savepath,\n channel_name=channel_name,\n stationid=st_id, update=d[st_id])\n if da is not None:\n file = path_glob(savepath, '*_{}_{}_10mins.nc'.format(st_id, channel_name))[0]\n da_old = xr.load_dataarray(file)\n da = xr.concat([da, da_old], time_dim)\n filename = '_'.join(['-'.join(row['name'].split(' ')), str(st_id), channel_name,\n '10mins']) + '.nc'\n comp = dict(zlib=True, complevel=9) # best compression\n encoding = {var: comp for var in da.to_dataset().data_vars}\n logger.info('saving to {} to {}'.format(filename, savepath))\n try:\n da.to_netcdf(savepath / filename, 'w', encoding=encoding)\n except PermissionError:\n (savepath / filename).unlink()\n da.to_netcdf(savepath / filename, 'w', encoding=encoding)\n # print('done!')\n else:\n logger.warning('station {} is already in {}, skipping...'.format(st_id,\n savepath))\n return\n\n\nif __name__ == '__main__':\n import argparse\n import sys\n from ims_procedures import ims_api_get_meta\n from pathlib import Path\n from aux_gps import configure_logger\n logger = configure_logger('ims_downloader')\n channels = ['BP', 'DiffR', 'Grad', 'NIP', 'Rain', 'RH', 'STDwd', 'TD',\n 'TDmax', 'TDmin', 'TG', 'Time', 'WD', 'WDmax', 'WS', 'WS10mm',\n 'WS1mm', 'WSmax']\n savepath = Path('/home/ziskin/Work_Files/PW_yuval/IMS_T/10mins')\n parser = argparse.ArgumentParser(description='a command line tool for downloading all 10mins stations from the IMS with specific variable')\n optional = parser._action_groups.pop()\n required = parser.add_argument_group('required arguments')\n # remove this line: optional = parser...\n required.add_argument('--savepath', help=\"a full path to download the files, e.g., /home/ziskin/Work_Files/PW_yuval/IMS_T/10mins\", type=check_path)\n required.add_argument('--channel', help=\"10 mins channel name , e.g., TD, BP or RH\",\n choices=channels)\n required.add_argument('--delete', action='store_true') # its False\n #optional.add_argument('--station', nargs='+',\n # help='GPS station name, 4 UPPERCASE letters',\n # type=check_station_name)\n# metavar=str(cds.start_year) + ' to ' + str(cds.end_year))\n# optional.add_argument('--half', help='a spescific six months to download,\\\n# e.g, 1 or 2', type=int, choices=[1, 2],\n# metavar='1 or 2')\n parser._action_groups.append(optional) # added this line\n args = parser.parse_args()\n # print(parser.format_help())\n# # print(vars(args))\n if args.savepath is None:\n print('savepath is a required argument, run with -h...')\n sys.exit()\n# elif args.field is None:\n# print('field is a required argument, run with -h...')\n# sys.exit()\n if args.channel is not None and not args.delete:\n download_all_10mins_ims(args.savepath, channel_name=args.channel)\n logger.info('Done!')\n elif args.delete:\n generate_delete(args.savepath, args.channel)\n else:\n raise ValueError('need to specify channel name!')\n","sub_path":"ims_download_all_script.py","file_name":"ims_download_all_script.py","file_ext":"py","file_size_in_byte":16538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"66454336","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Han Xiao \n\n# NOTE: First install bert-as-service via\n# $\n# $ pip install bert-serving-server\n# $ pip install bert-serving-client\n# $\n\n# simple similarity search on FAQ\n\n# from MulticoreTSNE import MulticoreTSNE as TSNE\n\nwith open('README.md') as fp:\n data = [v for v in fp if v.strip() and v.startswith('#####')]\n","sub_path":"example/example8.py","file_name":"example8.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"435954934","text":"\"\"\"\n동적 속성과 프로퍼티\n- 프로퍼티를 통해 공개 데이터 속성을 클래스의 공개 인터페이스로 노출시키는 것은 완전히 안전하며,\n 실제로 그렇게 하도록 권장할 수 있다는 것이 프로퍼티의 가장 중요한 점이다\n\"\"\"\n\"\"\"\n- 파이썬에서는 데이터 속성과 메서드를 통틀어 속성이라고 한다\n- 메서드는 단지 호출할 수 있는 속성일 뿐이다\n- 데이터 속성과 메스드 외에도 프로퍼티를 정의할 수 있따\n- 프로퍼터리를 사용하면 클래스 인터페이스를 변경하지 않고도 공개 데이터 속성을 접근자 메서드(게터, 세터)로 대체할 수 있다\n - 통일된 접근 원칙에도 부합한다 \n - 모듈이 제공하는 모든 서비스는 통일된 표기법을 이용해서 접근할 수 있어야 한다. 통일된 표기법은 저장소를 이용해서 구현하거나\n 계산을 통해 구현하는 경우에도 모두 동일하게 적용된다(통일된 접근 원칙) \n \n- 프로퍼티 외에도 파이썬은 속성에 대한 접근을 제어하고 동적 속성을 구현할 수 있는 풍부한 API를 제공한다\n- 파이썬 인터프리터는 obj.attr과 같은 점 표기법으로 표현된 속성에 대한 접근을\n __getattr__과 __setattr__등 특별 메서드를 호출해서 평가한다. \n\"\"\"\n\n\"\"\"\n동적 속성을 이용한 데이터 랭글링\n\"\"\"\nfrom urllib.request import urlopen\nimport warnings\nimport os\nimport json\n\nURL = 'http://www.oreilly.com/pub/sc/osconfeed'\nJSON = '/Users/rayleigh/Desktop/Professional_Python/19_Meta_Programing/data/osconfeed.json'\n\n\ndef load():\n if not os.path.exists(JSON):\n msg = 'downloading {} to {}'.format(URL, JSON)\n warnings.warn(msg)\n\n with urlopen(URL) as remote, open(JSON, 'wb') as local:\n local.write(remote.read())\n\n with open(JSON) as fp:\n return json.load(fp)\n\n\nfeed = load()\nsorted(feed['Schedule'].keys())\nfor key, value in sorted(feed['Schedule'].items()):\n print('{:3} {}'.format(len(value), key))\n\nfrom collections import abc\n\n\nclass FrozenJSON:\n \"\"\"\n 점 표기법을 이용해서 JSON과 유사한 객체를 순회하는 읽기전용 퍼사드 클래스\n \"\"\"\n\n def __int__(self, mapping):\n self.__data = dict(mapping)\n\n def __getattr__(self, name):\n if hasattr(self.__data, name):\n return getattr(self.__data, name)\n else:\n return FrozenJSON.build(self.__data[name])\n\n @classmethod\n def build(cls, obj):\n if isinstance(obj, abc.Mapping):\n return cls(obj)\n elif isinstance(obj, abc.MutableSequence):\n return [cls.build(item) for item in obj]\n else:\n return obj\n\n\nraw_feed = load()\nfeed = FrozenJSON(raw_feed)\nsorted(feed.Schedule.keys()) # __getattr__때문에 가능하\nfor key, value in sorted(feed.Schedule.items()):\n print('{:3} {}'.format(len(value), key))\n\n\"\"\"\n- __getattr__ 특별 메소드는 속성을 가져오기 위한 일반적인 과정이 실패 할 때(즉, 지명한 속성을 객체, 클래스, 슈퍼클래스에서\n 찾지 못할때)만 인터프리터에서 호출한다\n\"\"\"","sub_path":"19_Meta_Programing/01_Getattr.py","file_name":"01_Getattr.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"220097386","text":"import json\nfrom functools import partial\nfrom datetime import datetime\n\nfrom flask_login import (\n LoginManager,\n login_required,\n current_user,\n)\nfrom flask import (\n Blueprint,\n jsonify,\n request,\n current_app,\n redirect,\n url_for,\n abort,\n)\nfrom oauthlib.oauth2 import WebApplicationClient\nimport jwt\n\nfrom .oauth2 import (\n GoogleOAuth2Client,\n FacebookOAuth2Client,\n)\nfrom .models import User\n\n\nlogin_manager = LoginManager()\nredis = None\n\ndef init_app(app):\n login_manager.init_app(app)\n\n global redis\n if app.testing:\n from fakeredis import FakeRedis\n redis = FakeRedis()\n else:\n from redis import Redis\n redis = Redis.from_url(app.config['REDIS_URL'])\n\n\n@login_manager.request_loader\ndef load_user_from_request(request):\n token = request.args.get('access_token', default='', type=str).strip()\n if not token:\n token = request.headers.get('authorization', '').replace('Bearer', '').strip()\n\n if not (token and redis.sismember('alive_token', token)):\n return None\n\n try:\n payload = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms='HS256')\n except jwt.DecodeError:\n return None\n\n user = User.query.get(payload['id'])\n return user\n\n\ndef validate_state(state):\n supported_providers = ('google', 'facebook')\n supported_actions = ('login', 'register')\n\n if state.get('action') not in supported_actions:\n return False\n if state.get('provider') not in supported_providers:\n return False\n\n return True\n\n\ndef handle_login(provider, userinfo, token_generator):\n user = User.query.filter(\n User.email == userinfo['email']\n ).first()\n if not user:\n abort(400, f'User {userinfo[\"email\"]} is not exist')\n\n dont_link_to_facebook = user.link_to_google and (not user.link_to_facebook and provider == 'facebook')\n dont_link_to_google = user.link_to_facebook and (not user.link_to_google and provider == 'google')\n if dont_link_to_facebook or dont_link_to_google:\n abort(400, f'User {userinfo[\"email\"]} is not linked to { provider}')\n\n token = token_generator({\n 'id': user.id,\n 'name': user.name,\n 'email': user.email,\n 'iss': datetime.now().timestamp(),\n 'iat': 1000 * 60 * 60 * 24,\n })\n\n redis.sadd('alive_token', token)\n\n return {\n 'access_token': token,\n }\n\n\ndef handle_register(provider, userinfo, token_generator):\n user = User.query.filter(\n User.email == userinfo['email']\n ).first()\n\n if user:\n dont_link_to_facebook = user.link_to_google and (not user.link_to_facebook and provider == 'facebook')\n dont_link_to_google = user.link_to_facebook and (not user.link_to_google and provider == 'google')\n if dont_link_to_facebook or dont_link_to_google:\n # need user confirm by sending to processing link\n token = token_generator({\n 'id': user.id,\n 'name': user.name,\n 'email': user.email,\n 'iss': datetime.now().timestamp(),\n 'iat': 1000 * 60 * 5,\n })\n redis.sadd('alive_token', token)\n\n return {\n 'message': f'Email {userinfo[\"email\"]} was used by {user.name}. Do you link to the {provider} account',\n 'data': {\n 'link': url_for('auth.link_account', access_token=token, provider=provider, _external=True),\n }\n }\n\n abort(400, f'User {userinfo[\"email\"]} existed')\n\n new_user = User(\n email=userinfo['email'],\n name=userinfo['name'],\n )\n if provider == 'google':\n new_user.link_to_google = True\n elif provider == 'facebook':\n new_user.link_to_facebook = True\n new_user.save()\n return {'message': f'Create {userinfo[\"email\"]} successful'}\n\n\nbp = Blueprint('auth', __name__)\n\n\n@bp.route('/')\ndef handle_auth():\n redirect_uri = url_for('auth.oauth_callback', _external=True)\n state = request.args.to_dict()\n if not validate_state(state):\n abort(400, 'Action or Provider is invalid')\n\n provider = state.get('provider')\n if provider == 'google':\n return redirect(GoogleOAuth2Client.get_grant_request_url(\n current_app.config['GOOGLE_CLIENT_ID'],\n redirect_uri,\n ['openid', 'email', 'profile'],\n state,\n ))\n\n if provider == 'facebook':\n return redirect(FacebookOAuth2Client.get_grant_request_url(\n current_app.config['FACEBOOK_CLIENT_ID'],\n redirect_uri,\n ['email'],\n state,\n ))\n\n\n@bp.route('/callback')\ndef oauth_callback():\n state = json.loads(request.args.get('state', type=str))\n if not validate_state(state):\n abort(400, 'Action or Provider is invalid')\n\n provider = state['provider']\n code = request.args.get('code', type=str)\n if provider == 'google':\n userinfo = GoogleOAuth2Client.get_userinfo(\n current_app.config['GOOGLE_CLIENT_ID'],\n current_app.config['GOOGLE_CLIENT_SECRET'],\n code,\n request.url,\n )\n\n elif provider == 'facebook':\n userinfo = FacebookOAuth2Client.get_userinfo(\n current_app.config['FACEBOOK_CLIENT_ID'],\n current_app.config['FACEBOOK_CLIENT_SECRET'],\n code,\n request.url,\n )\n\n action = state.get('action')\n if action == 'login':\n handle = handle_login\n elif action == 'register':\n handle = handle_register\n\n token_generator = partial(jwt.encode, key=current_app.config['SECRET_KEY'],\n algorithm='HS256')\n return handle(provider, userinfo, token_generator)\n\n\n@bp.route('/link_account')\n@login_required\ndef link_account():\n provider = request.args.get('provider')\n if provider not in ('facebook', 'google'):\n abort(400, 'Provider was not supported')\n\n if provider == 'google' and current_user.link_to_google:\n abort(400, 'This account was linked to Google account')\n\n if provider == 'facebook' and current_user.link_to_facebook:\n abort(400, 'This access_token was linked to Facebook account')\n\n current_user.link_to_facebook = True\n current_app.link_to_google = True\n current_user.save()\n return {\n 'message': f'This account is linked to {provider}'\n }\n\n\n@bp.route('/logout')\n@login_required\ndef logout():\n token = request.headers.get('authorization').replace('Bearer', '').strip()\n redis.srem('alive_token', token)\n return {\n 'message': 'Logout successful',\n }\n","sub_path":"app/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":6649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"178889191","text":"\"\"\"\n Copyright (c) 2022 https://github.com/SpaceLearner/SessionRec-pytorch(MIT LISENCE), \n Intel made modification based on original MSGIFSR project, \n reserve partial copyright for all modifications.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of\n this software and associated documentation files (the \"Software\"), to deal in\n the Software without restriction, including without limitation the rights to\n use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n the Software, and to permit persons to whom the Software is furnished to do so,\n subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\nfrom concurrent.futures import process\nimport pandas as pd\nimport pickle as pkl\nimport logging\nimport timeit\nimport numpy as np\nfrom tqdm import tqdm\nimport scipy.stats as ss\nimport os\nimport datetime\n\nclass Timer:\n level = 1\n\n def __init__(self, name, level = 'INFO'):\n self.name = name\n self.level = 2\n if level == \"DEBUG\":\n self.level = 2\n if level == \"INFO\":\n self.level = 1\n if level == \"WARN\":\n self.level = 0\n\n def __enter__(self):\n self.start = timeit.default_timer()\n Timer.level += 1\n\n def __exit__(self, *a, **kw):\n Timer.level -= 1\n if self.level == 0:\n logging.warn(f'{\" \" * Timer.level}{self.name} took {timeit.default_timer() - self.start} sec')\n if self.level == 1:\n logging.info(f'{\" \" * Timer.level}{self.name} took {timeit.default_timer() - self.start} sec')\n if self.level == 2:\n logging.debug(f'{\" \" * Timer.level}{self.name} took {timeit.default_timer() - self.start} sec')\n\ndef load_file(file_path):\n if file_path.endswith(\".csv\"):\n data = pd.read_csv(file_path)\n elif file_path.endswith(\".parquet\"):\n data = pd.read_parquet(file_path)\n elif file_path.endswith(\".pkl\") or file_path.endswith(\".txt\"):\n with open(file_path, 'rb') as f:\n data = pkl.load(f)\n else:\n raise NotImplementedError(f\"Unable to load {file_path}\") \n return data\n\ndef convert_to_sparse_table(pdf, row_idx = 0, col_idx = 1, val_idx = 2):\n assert(isinstance(pdf, pd.DataFrame))\n assert(pdf.shape[1] == 3)\n keys = pdf.keys().tolist()\n num_rows = pdf[keys[row_idx]].max() + 1\n num_cols = pdf[keys[col_idx]].max() + 1\n sparse_table = [[0] * num_cols for i in range(num_rows)]\n for _, row in pdf.iterrows():\n sparse_table[row[keys[row_idx]]][row[keys[col_idx]]] = row[keys[val_idx]]\n res = np.array(sparse_table)\n return res\n\ndef preprocess(train_session, kg_df, item_features_extra_df, train_target, recent_n_month = -1, candidate_list = None, add_features = False, save_path = None, enable_weighted_loss = False, sort = False, train_click = False, return_df = False, get_session_under_5 = False, get_session_above_5 = False, extra_feat_key = [], predict = False, exclude_feat_ids = []):\n train_session = load_file(train_session)\n train_target = load_file(train_target) if train_target is not None else None\n if train_target is not None:\n train_target = train_target.rename(columns={'item_id': 'y'})\n train_target = train_target.rename(columns={'date': 'purchase_date'})\n train_target['purchase_date'] = pd.to_datetime(train_target[\"purchase_date\"])\n train_session['date'] = pd.to_datetime(train_session[\"date\"])\n \n divider = None\n if recent_n_month != -1 and train_target is not None:\n # get time divider\n max_time = train_target['purchase_date'].max()\n divider = max_time - pd.to_timedelta(int(31 * recent_n_month), unit='d')\n\n train_target = train_target[train_target['purchase_date'] > divider]\n train_session = train_session[train_session['date'] > divider]\n\n logging.info(\"Start to sort input data by session_id and date\")\n with Timer(\"took \"):\n train_session.sort_values([\"session_id\", \"date\"], inplace=True)\n\n logging.info(\"Start to add elapse to start time and end time feature\")\n with Timer(\"Took \"):\n grouped = train_session.groupby('session_id').agg(start_time=('date','min'), end_time=('date','max'))\n train_session = train_session.merge(grouped, on='session_id', how='left')\n train_session['elapse_to_start'] = ((train_session['date'] - train_session['start_time']).dt.seconds/60).astype(int)\n train_session['elapse_to_end'] = ((train_session['end_time'] - train_session['date']).dt.seconds/60).astype(int)\n train_session['binned_elapse_to_start'] = pd.cut(train_session['elapse_to_start'], [-1, 0, 3, 15, 1434]).cat.codes\n train_session['binned_elapse_to_end'] = pd.cut(train_session['elapse_to_end'], [-1, 0, 3, 16, 1434]).cat.codes\n\n logging.info(\"Start to combine same session as one record\")\n with Timer(\"took \"):\n processed = train_session.groupby(\"session_id\", as_index = False).agg({'item_id':lambda x: list(x), 'binned_elapse_to_start':lambda x: list(x), 'binned_elapse_to_end':lambda x: list(x),})\n if train_target is not None:\n processed = train_target.merge(processed, how=\"inner\", on=\"session_id\")\n print(f\"merged to target, length is {len(processed)}\")\n\n # add feature column\n if add_features:\n logging.info(\"Start to add features to each item\")\n with Timer(\"took \"):\n kg_df['feature_category_id'] = kg_df['feature_category_id'].astype(\"string\")\n kg_df['feature_value_id'] = kg_df['feature_value_id'].astype(\"string\")\n kg_df[\"feature_merge\"] = \"f_\" + kg_df['feature_category_id'] + \"=\" + kg_df['feature_value_id']\n codes, uniques = pd.factorize(kg_df[\"feature_merge\"])\n # categorify all features in item_features\n kg_df[\"feature\"] = pd.Categorical(codes, categories=range(len(uniques)))\n num_unique_features = len(uniques)\n print(f\"num_unique_features is {num_unique_features}\")\n kg_feat_dict = dict()\n kg_feat_cat_dict = dict()\n for row in kg_df.to_dict('records'):\n if row['item_id'] not in kg_feat_dict:\n kg_feat_dict[row['item_id']] = []\n kg_feat_dict[row['item_id']].append(row['feature'])\n if row['item_id'] not in kg_feat_cat_dict:\n kg_feat_cat_dict[row['item_id']] = []\n kg_feat_cat_dict[row['item_id']].append(int(row['feature_category_id']))\n\n # map features to processed\n feature_list_series = []\n feature_cat_list_series = []\n for idx, item_id_list in tqdm(processed[\"item_id\"].items(), total = len(processed[\"item_id\"])):\n item_feature_list = []\n item_feature_cat_list = []\n for item_id in item_id_list:\n # we need to add item feature and other created features\n item_feature_list.append(kg_feat_dict[item_id])\n item_feature_cat_list.append(kg_feat_cat_dict[item_id])\n feature_list_series.append(item_feature_list)\n feature_cat_list_series.append(item_feature_cat_list)\n processed[\"feature\"] = pd.Series(feature_list_series)\n processed[\"feature_cat\"] = pd.Series(feature_cat_list_series)\n\n else:\n num_unique_features = -1\n\n # add weighted factor for session based on ts\n if train_target is not None and enable_weighted_loss:\n logging.info(\"Start to get the weighted factor for session based on ts\")\n total_duration = pd.to_datetime(\"2021/06/30\") - pd.to_datetime(\"2020/01/01\")\n start_ts = pd.to_datetime(\"2020/01/01\")\n with Timer(\"took \"):\n weighted_factor_list_series = []\n for _, ts in tqdm(processed[\"purchase_date\"].items(), total=len(processed[\"purchase_date\"])):\n weighted_factor_list_series.append((ts - start_ts) / (2 * total_duration) + 0.5)\n processed[\"wf\"] = pd.Series(weighted_factor_list_series)\n\n if not add_features:\n processed[\"feature\"] = pd.Series([None] * len(processed))\n processed[\"feature_cat\"] = pd.Series([None] * len(processed))\n\n if train_target is None or not enable_weighted_loss:\n processed[\"wf\"] = pd.Series([0] * len(processed))\n\n if candidate_list and train_target is not None:\n processed = processed[processed[\"y\"].isin(candidate_list)]\n\n if train_target is None:\n processed['y'] = pd.Series([None] * len(processed))\n processed['purchase_date'] = pd.Series([None] * len(processed))\n \n processed = processed[[\"item_id\", \"y\", \"session_id\", \"feature\", \"feature_cat\", 'binned_elapse_to_start', 'binned_elapse_to_end', \"purchase_date\", \"wf\"]]\n if save_path and train_target is not None:\n processed.to_parquet(save_path, compression = None)\n if sort:\n processed = processed.sort_values('purchase_date')\n\n # exclude some feature if configured\n extra_feat_key_1 = [key for key in extra_feat_key if key not in ['binned_elapse_to_end', 'binned_elapse_to_start']]\n extra_feat_key_2 = [key for key in extra_feat_key if key in ['binned_elapse_to_end', 'binned_elapse_to_start']]\n processed, num_unique_features = add_extra(processed, num_unique_features, extra_feat_key_1, item_features_extra_df, divider = divider)\n processed, num_unique_features = add_sesstime(processed, num_unique_features, extra_feat_key_2)\n processed = exclude_feat(processed, exclude_feat_ids)\n \n if train_click:\n pretrain_file = f\"{save_path[:-8]}.click.parquet\"\n processed = add_clicks(processed)\n\n if return_df:\n return processed\n\n if get_session_under_5:\n processed['len_session'] = processed['item_id'].apply(lambda x: len(x))\n processed = processed[processed['len_session'] <= 5]\n\n if get_session_above_5:\n processed['len_session'] = processed['item_id'].apply(lambda x: len(x))\n processed = processed[processed['len_session'] > 5]\n\n pd.set_option('display.max_columns', None)\n print(processed)\n\n return processed[[\"item_id\", \"y\", \"session_id\", \"feature\", \"feature_cat\", \"wf\"]].to_numpy().tolist(), num_unique_features\n\n \ndef load_preprocessed(file_path, item_features_extra_df_orig, recent_n_month = -1, candidate_list = None, add_features = False, sort = False, train_click = False, get_session_under_5 = False, get_session_above_5 = False, extra_feat_key = [], predict = False, exclude_feat_ids = []):\n item_features_extra_df = item_features_extra_df_orig\n pd.set_option('display.max_columns', None)\n process_pretrain = train_click\n print(f\"Loading {file_path}\")\n with Timer(\"\"):\n processed = load_file(file_path)\n divider = None\n if recent_n_month != -1:\n # get time divider\n max_time = processed['purchase_date'].max()\n divider = max_time - pd.to_timedelta(int(31 * recent_n_month), unit='d')\n processed = processed[processed['purchase_date'] > divider]\n\n if candidate_list:\n processed = processed[processed[\"y\"].isin(candidate_list)]\n if not add_features:\n print(processed.shape)\n processed[\"feature\"] = pd.Series([None] * len(processed[\"feature\"]))\n processed[\"feature_cat\"] = pd.Series([None] * len(processed[\"feature\"]))\n num_unique_features = -1\n else:\n num_unique_features = 904\n extra_feat_key_1 = [key for key in extra_feat_key if key not in ['binned_elapse_to_end', 'binned_elapse_to_start']]\n extra_feat_key_2 = [key for key in extra_feat_key if key in ['binned_elapse_to_end', 'binned_elapse_to_start']]\n processed, num_unique_features = add_extra(processed, num_unique_features, extra_feat_key_1, item_features_extra_df, divider = divider)\n processed, num_unique_features = add_sesstime(processed, num_unique_features, extra_feat_key_2)\n processed = exclude_feat(processed, exclude_feat_ids)\n\n if train_click and process_pretrain:\n processed = add_clicks(processed)\n\n if sort:\n processed['index'] = processed.index\n processed = processed.sort_values(['purchase_date', 'index']).drop(columns=['index'])\n\n if get_session_under_5:\n processed['len_session'] = processed['item_id'].apply(lambda x: len(x))\n processed = processed[processed['len_session'] <= 5]\n\n if get_session_above_5:\n processed['len_session'] = processed['item_id'].apply(lambda x: len(x))\n processed = processed[processed['len_session'] > 5]\n\n #pd.set_option('display.max_colwidth', 200)\n print(processed)\n #processed[[\"item_id\", \"y\", \"session_id\", \"feature\", \"feature_cat\", \"wf\"]].to_parquet('tmp_processed.parquet')\n return processed[[\"item_id\", \"y\", \"session_id\", \"feature\", \"feature_cat\", \"wf\"]].to_numpy().tolist(), num_unique_features\n\ndef add_clicks(processed):\n print(\"Start to expand current data with clicks, may take couple of minutes ...\")\n with Timer(\"\"):\n concat_list = [processed]\n aug_processed = pd.concat(concat_list).reset_index(drop=True)\n to_zip = [aug_processed['item_id'].to_list(), aug_processed['y'].to_list()]\n\n item_id_list = []\n for item_id, y in tqdm(zip(*to_zip), total = len(aug_processed['item_id'])):\n if not isinstance(item_id, list):\n item_id = item_id.tolist()\n item_id_new = item_id + [y]\n len_item_id_new = len(item_id_new)\n item_id_new = [item_id_new[:num_item] for num_item in range(2, len_item_id_new + 1)]\n item_id_list.append(item_id_new)\n aug_processed['item_id'] = pd.Series(item_id_list)\n aug_processed = aug_processed.explode('item_id').dropna(subset=['item_id']).reset_index(drop=True)\n\n item_id_list = []\n y_list = []\n feature_list = []\n feature_cat_list = []\n to_zip = [aug_processed['item_id'].to_list(), aug_processed['feature'].to_list(), aug_processed['feature_cat'].to_list()]\n for item_id_new, feature, feature_cat in tqdm(zip(*to_zip), total = len(aug_processed['item_id'])):\n if len(item_id_new) >= 2:\n item_id_list.append(item_id_new[:-1])\n y_list.append(item_id_new[-1])\n else:\n item_id_list.append(None)\n y_list.append(None)\n feature_list.append(feature[:len(item_id_new[:-1])])\n feature_cat_list.append(feature_cat[:len(item_id_new[:-1])])\n\n aug_processed['item_id'] = pd.Series(item_id_list)\n aug_processed['y'] = pd.Series(y_list)\n aug_processed['feature'] = pd.Series(feature_list)\n aug_processed['feature_cat'] = pd.Series(feature_cat_list)\n\n processed = aug_processed[[\"item_id\", \"y\", \"session_id\", \"feature\", \"feature_cat\", \"purchase_date\", \"wf\"]].reset_index(drop=True)\n #processed.to_parquet(pretrain_file)\n return processed\n\ndef add_extra(processed, num_unique_features, extra_feat_key, item_features_extra_df_orig, divider = None):\n if extra_feat_key is None or len(extra_feat_key) == 0:\n return processed, num_unique_features\n print(f\"Start to add {extra_feat_key} to original 904 features\")\n processed.reset_index(drop=True, inplace=True)\n item_features_extra_df = item_features_extra_df_orig.copy()\n num_feats = num_unique_features\n for feat_name in extra_feat_key:\n # because minimun value for one feature can be -1, add num_feats with 1 firstly\n num_feats += 1 #905\n # [ ...... ]904 + [-1, 0, 1, 2, 3] 909\n item_features_extra_df[feat_name] = item_features_extra_df[feat_name] + num_feats\n default = num_feats - 1\n print(f\"num_feats is {num_feats}\")\n num_feats = (item_features_extra_df[feat_name].max() + 1)\n print(f\"after add extra, num_feats is {num_feats}\")\n item_feat_dict = dict((iid, fid) for iid, fid in zip(item_features_extra_df['item_id'].to_list(), item_features_extra_df[feat_name].to_list()))\n new_feature = []\n for k, x in tqdm(zip(processed['item_id'].to_list(), processed['feature'].to_list()), total = len(processed[\"feature\"])):\n k = format_list(k)\n x = format_list(x)\n assert(len(k) == len(x))\n new_feature.append([format_list(fl) + [item_feat_dict[iid] if iid in item_feat_dict else default] for iid, fl in zip(k, x)])\n processed['feature'] = pd.Series(new_feature)\n num_unique_features = num_feats\n return processed, num_unique_features\n\ndef add_sesstime(processed, num_unique_features, extra_feat_key):\n if extra_feat_key is None or len(extra_feat_key) == 0:\n return processed, num_unique_features\n print(f\"Start to add {extra_feat_key} to original 904 features\")\n processed.reset_index(drop=True, inplace=True)\n num_feats = num_unique_features\n for feat_name in extra_feat_key:\n new_max = num_feats + 4\n # because minimun value for one feature can be -1, add num_feats with 1 firstly\n # [ ...... ]904 + [0, 1, 2, 3] 909\n print(f\"num_feats is {num_feats}\")\n new_feature = []\n for x, fl in tqdm(zip(processed['feature'].to_list(), processed[feat_name].to_list()), total = len(processed[\"feature\"])):\n x = format_list(x)\n fl = [f + num_feats for f in fl]\n new_feature.append([format_list(orig_fl) + [f] for orig_fl, f in zip(x, fl)])\n processed['feature'] = pd.Series(new_feature)\n num_feats = new_max\n print(f\"after add extra, num_feats is {num_feats}\")\n num_unique_features = num_feats\n return processed, num_unique_features\n\ndef exclude_feat(processed, exclude_feat_ids):\n if exclude_feat_ids is None or len(exclude_feat_ids) == 0:\n return processed\n if len(exclude_feat_ids) == 0:\n return processed\n print(f\"Start to exclude feature {exclude_feat_ids} in original 904 features\")\n processed.reset_index(drop=True, inplace=True)\n new_feature = []\n new_feature_cat = []\n for k, x in tqdm(zip(processed['feature'].to_list(), processed['feature_cat'].to_list()), total = len(processed[\"feature\"])):\n k = format_list(k)\n x = format_list(x)\n # k is [[feat0, feat8, feat1], [feat2, feat1, feat3], ... ]\n # x is [[cat0, cat1, cat3], [cat3, cat3, cat8], ...]\n assert(len(k) == len(x))\n new_fl = []\n new_cl = []\n for fl, cl in zip(k, x):\n # fl is [feat0, feat8, feat1]\n # cl is [cat0, cat1, cat3]\n fl = format_list(fl)\n cl = format_list(cl)\n new_fl.append([f for f in fl if f not in exclude_feat_ids])\n new_cl.append([c for f, c in zip(fl, cl) if f not in exclude_feat_ids])\n new_feature.append(new_fl)\n new_feature_cat.append(new_cl)\n processed['feature'] = pd.Series(new_feature)\n processed['feature_cat'] = pd.Series(new_feature_cat)\n return processed\n\ndef format_list(l):\n return l.tolist() if not isinstance(l, list) else l\n\ndef get_exclude_feat_list(categorical_item_features_df, exclude_feat_id):\n if exclude_feat_id is None:\n return []\n #exclude_col_name = \"binned_feat_count\"\n exclude_col_name = \"feature_category_id\"\n grouped = categorical_item_features_df.groupby(exclude_col_name, as_index = False).agg({'feature': lambda x: list(x)})\n tmp_dict = dict((cnt, fl) for cnt, fl in zip(grouped[exclude_col_name].to_list(), grouped['feature'].to_list()))\n exclude_feat_ids = []\n [exclude_feat_ids.extend(tmp_dict[int(i)]) for i in exclude_feat_id]\n return exclude_feat_ids\n\ndef bin_item_category(item_features_extra_df, num_cate = 100):\n if num_cate == -1:\n codes, uniques = pd.factorize(item_features_extra_df['itemcat_fp'])\n item_features_extra_df['itemcat_fp'] = pd.Series(codes, index=item_features_extra_df.index)\n return item_features_extra_df\n from sklearn.cluster import AgglomerativeClustering\n X = np.array(item_features_extra_df['itemcat_fp'].apply(lambda x: eval(x)).to_list())\n clustering = AgglomerativeClustering(n_clusters=num_cate).fit(X)\n item_features_extra_df['itemcat_fp'] = pd.Series(clustering.labels_, index=item_features_extra_df.index)\n return item_features_extra_df\n","sub_path":"v3_enhanced/src/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":20840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"403284041","text":"import yaml\n\nfrom st2tests.base import BaseActionTestCase\n\nfrom expand_repo_name import ExpandRepoName\n\n__all__ = [\n 'ExpandRepoNameTestCase'\n]\n\nMOCK_CONFIG_BLANK = \"\"\n\nMOCK_CONFIG_BLANK_REPOSITORIES = \"repositories:\"\n\nMOCK_CONFIG_FULL= \"\"\"\nrepositories:\n st2contrib:\n repo: \"https://github.com/StackStorm/st2contrib.git\"\n subtree: true\n auto_deployment:\n branch: \"master\"\n notify_channel: \"community\"\n\n st2incubator:\n repo: \"https://github.com/StackStorm/st2incubator.git\"\n subtree: true\n auto_deployment:\n branch: \"master\"\n notify_channel: \"community\"\n\"\"\"\n\nclass ExpandRepoNameTestCase(BaseActionTestCase):\n action_cls = ExpandRepoName\n\n def test_run_config_blank(self):\n config = yaml.safe_load(MOCK_CONFIG_BLANK)\n action = self.get_action_instance(config=config)\n\n self.assertRaises(Exception, action.run,\n repo_name=\"st2contrib\")\n\n def test_run_repositories_blank(self):\n config = yaml.safe_load(MOCK_CONFIG_BLANK_REPOSITORIES)\n action = self.get_action_instance(config=config)\n\n self.assertRaises(Exception, action.run,\n repo_name=\"st2contrib\")\n\n def test_run_st2contrib_expands(self):\n config = yaml.safe_load(MOCK_CONFIG_FULL)\n action = self.get_action_instance(config=config)\n\n expected = {'repo_url': 'https://github.com/StackStorm/st2contrib.git', 'subtree': True}\n\n result = action.run(repo_name=\"st2contrib\")\n self.assertEqual(result, expected)\n\n def test_run_st2incubator_expands(self):\n config = yaml.safe_load(MOCK_CONFIG_FULL)\n action = self.get_action_instance(config=config)\n\n expected = {'repo_url': 'https://github.com/StackStorm/st2incubator.git', 'subtree': True}\n\n result = action.run(repo_name=\"st2incubator\")\n self.assertEqual(result, expected)\n","sub_path":"contrib/packs/tests/test_action_expand_repo_name.py","file_name":"test_action_expand_repo_name.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"639771241","text":"from django import forms\nfrom movie.models import Movies, Wants\nfrom movie.utils import *\n\n\nclass AddMovieForm(forms.Form):\n kinopoisk_ref = forms.CharField(label='Ссылка на Кинопоиск', widget=forms.TextInput)\n want = forms.IntegerField(label='Ожидание', widget=forms.NumberInput, min_value=1, max_value=10)\n\n def __init__(self, user, *args, **kwargs):\n self._user = user\n super(AddMovieForm, self).__init__(*args, **kwargs)\n\n def clean_kinopoisk_ref(self):\n url = self.cleaned_data['kinopoisk_ref']\n if not is_correct_url(url):\n raise forms.ValidationError(u'Введённая ссылка некорректна', code=12)\n return url\n\n def save(self):\n offer = self.cleaned_data\n\n kinopoisk_parser = KinopoiskParser(self.cleaned_data['kinopoisk_ref'])\n movie_name = kinopoisk_parser.get_movie_title()\n movie_name_en = kinopoisk_parser.get_movie_title_en()\n rating_kinopoisk = kinopoisk_parser.get_movie_rating()\n\n rating_imdb = imdb_parser(movie_name_en)\n\n Movies.objects.create(movie_name=movie_name, movie_name_en=movie_name_en, rating_kinopoisk=rating_kinopoisk,\n rating_imdb=rating_imdb, user=self._user)\n Wants.objects.create(movie_name=movie_name, user=self._user, want=self.cleaned_data['want'])\n rating_recalculation(movie_name)\n\n return offer\n\n\nclass AddWantForm(forms.Form):\n movie_name = forms.ModelChoiceField(queryset=Movies.objects.all().order_by('movie_name'))\n want = forms.IntegerField(label='Ожидание', widget=forms.NumberInput, min_value=1, max_value=10)\n\n def __init__(self, user, *args, **kwargs):\n self._user = user\n super(AddWantForm, self).__init__(*args, **kwargs)\n\n def clean_want(self):\n want = self.cleaned_data['want']\n double_expectation = Wants.objects.filter(user=self._user,\n movie_name=self.cleaned_data['movie_name'])\n if double_expectation:\n raise forms.ValidationError(u'Попытка повторного голосования', code=12)\n return want\n\n def save(self):\n rating_recalculation(self.cleaned_data['movie_name'])\n Wants.objects.create(movie_name=self.cleaned_data['movie_name'],\n user=self._user, want=self.cleaned_data['want'])\n return True\n","sub_path":"movie/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"579964392","text":"# -*- coding: utf8 -*-\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom .Cache.Cache import Cache\n\n\nclass Crawler(object):\n\n def __init__(self, base_url, filtering_policy):\n\n self._headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'\n }\n\n self._session = requests.session()\n self._cache = Cache()\n self._filtered_cache = Cache()\n self._base_url = base_url\n self._filtering_policy = filtering_policy\n\n def get_content(self, url):\n \"\"\"\n\n :param url:\n :return:\n \"\"\"\n beautified_response = self._cache.get(url)\n if beautified_response is None:\n response = self._session.get(url, headers=self._headers)\n beautified_response = BeautifulSoup(response.text)\n self._cache.put(url, beautified_response)\n\n return beautified_response\n\n def get_filtered_content(self, url):\n \"\"\"\n\n :param url:\n :return:\n \"\"\"\n filtered_response = self._filtered_cache.get(url)\n if filtered_response is None:\n beautified_response = self.get_content(url)\n filtered_response = self._filtering_policy.filter(beautified_response)\n self._filtered_cache.put(url, filtered_response)\n return filtered_response\n","sub_path":"Crawler/UrlCrawler.py","file_name":"UrlCrawler.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"279775290","text":"import requests\nfrom datetime import date\n\n\n# Задание №3 - Получение вопросов с форума StackOverflow\nclass StackOverflow():\n def __init__(self, fromdate, todate, tagged, site):\n self.fromdate = fromdate\n self.todate = todate\n self.tagged = tagged\n self.site = site\n\n def get_questions(self):\n url = 'https://api.stackexchange.com/2.3/questions'\n params = {\n 'fromdate': self.fromdate,\n 'todate': self.todate,\n 'tagged': self.tagged,\n 'site': self.site\n }\n response = requests.get(url=url, params=params)\n if response.status_code == 200:\n return response.json()\n else:\n return 'Bad request'\n","sub_path":"stackoverflow.py","file_name":"stackoverflow.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"582153022","text":"import requests\nimport json\n\n\ndef retr_itemkeyvalues(key_name, fileout=False):\n keyvaluelist = [] # for storing the values of provide key\n # for numbering keyvalue list\n for data in git_starred_py_dict['items']:\n # storing key value in a variable and appending to the list\n # print(type(data))\n # print(type(data[key_name]))\n value_str = data[key_name]\n keyvaluelist.append(value_str)\n\n # write in file\n if fileout:\n\n with open(str(key_name + '.txt'), 'w+') as outfile:\n file_num = 0\n for i in keyvaluelist:\n try:\n outfile.write(f\"{file_num}. {str(i)} \\n\")\n except:\n outfile.write(f\"{file_num}. BLANK BLANK \\n\")\n file_num += 1\n return keyvaluelist\n\n\ngit_starred_py_apirequrl = 'https://api.github.com/search/repositories?q=language:python&sort=s'\n\nv3_header = {'Accept': 'application/vnd.github.v3+json'}\n\n# request api\napireq = requests.get(git_starred_py_apirequrl, headers=v3_header)\n\nprint(f\"Status Code: {apireq.status_code}\")\n\n# retrieve info from json\ngit_starred_py_dict = apireq.json()\n\n# process results\nv = retr_itemkeyvalues('name', fileout=True)\n# for i in v:\n# print(i)\n","sub_path":"github_python_repos.py","file_name":"github_python_repos.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"33100701","text":"# -*- coding: utf-8 -*\r\n\r\nimport numpy as np\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.pylab as pl\r\nimport random\r\nfrom sklearn import *\r\nimport self_Logistic_Regression\r\n\r\n'''two ways to import data'''\r\ndef import_data(file_path):\r\n\tdataset = np.loadtxt(file_path,delimiter = ',')\r\n\tX = dataset[:,1:3]\r\n\ty = dataset[:,3]\r\n\t#print(X)\r\n\t#print(y)\r\n\tlist_file = []\r\n\twith open(file_path,'r') as csv_file:\r\n\t\tall_lines = csv.reader(csv_file)\r\n\t\tfor one_line in all_lines:\r\n\t\t\tlist_file.append(one_line)\r\n\tarr_file = np.array(list_file,dtype = np.float64)\r\n\tlabel = arr_file[:,3]\r\n\tdata = arr_file[:,1:-1]\r\n\r\n\t'''here you should randomize the index of data&label'''\r\n\t#numpy.nparray is not allowed to use median_variable to swap two rows or columns;you should use its special grammar\r\n\tdata_dim = len(data)\r\n\tfor data_iter in range(data_dim):\r\n\t\ttemp_swap = random.randint(data_iter,data_dim-1)\r\n\t\tdata[[data_iter,temp_swap],:] = data[[temp_swap,data_iter],:]\r\n\t\tlabel[[data_iter,temp_swap]] = label[[temp_swap,data_iter]]\r\n\r\n\t#print(data)\r\n\t#print(label)\r\n\r\n\t'''draw scatter_pic'''\r\n\tf1 = plt.figure(1)\r\n\tplt.title('logistic_regression')\r\n\tplt.xlabel('density')\r\n\tplt.ylabel('ratio_sugar')\r\n\tplt.scatter(data[label == 0,0],data[label == 0,1],marker = 'o',color = 'r',s = 100,label = 'bad')\r\n\tplt.scatter(data[label == 1,0],data[label == 1,1],marker = 'o',color = 'g',s = 100,label = 'good')\r\n\tplt.legend(loc = 'upper left') \r\n\tplt.show()\r\n\r\n\treturn data,label\r\n\r\n'''use sklearn lib for logistic regression'''\r\ndef sklearn_logistic_regression(X,y):\r\n\tX_train,X_test,y_train,y_test = model_selection.train_test_split(X,y,test_size = 0.5,random_state = 0)\r\n\r\n\tlog_model = linear_model.LogisticRegression()\r\n\tlog_model.fit(X_train,y_train)\r\n\r\n\ty_pred = log_model.predict(X_test)\r\n\r\n\tprint(metrics.confusion_matrix(y_test,y_pred))\r\n\tprint(metrics.classification_report(y_test,y_pred))\r\n\r\n\tprecision,recall,thresholds = metrics.precision_recall_curve(y_test,y_pred)\r\n\r\n\tf2 = plt.figure(2)\r\n\th = 0.001\r\n\tdelta = 0.1#used to restrict the space for pic\r\n\tx0_min,x0_max = X[:,0].min()-delta,X[:,0].max()+delta\r\n\tx1_min,x1_max = X[:,1].min()-delta,X[:,1].max()+delta\r\n\tx0,x1 = np.meshgrid(np.arange(x0_min,x0_max,h),np.arange(x1_min,x1_max,h))\r\n\r\n\tz = log_model.predict(np.c_[x0.ravel(),x1.ravel()])\r\n\r\n\tz = z.reshape(x0.shape)\r\n\tplt.contourf(x0,x1,z,cmap = 'viridis')# pl.cm.Paired\r\n\r\n\tplt.title('watermelon_3a')\r\n\tplt.xlabel('density')\r\n\tplt.ylabel('ratio_sugar')\r\n\tplt.scatter(X[y == 0,0], X[y == 0,1], marker = 'o', color = 'k', s=100, label = 'bad')\r\n\tplt.scatter(X[y == 1,0], X[y == 1,1], marker = 'o', color = 'g', s=100, label = 'good')\r\n\tplt.show()\r\n\r\ndef self_LR(X,y):\r\n\tm,n = np.shape(X)\r\n\tX_ex = np.c_[X,np.ones(m)]#extend the variable matrix to x^0\r\n\tX_train,X_test,y_train,y_test = model_selection.train_test_split(X_ex,y,test_size = 0.5,random_state = 0)\r\n\r\n\t#using gradDescent to get the optimal param beta = [w,b]\r\n\tbeta = self_Logistic_Regression.gradDscent_1(X_train,y_train)\r\n\r\n\t#prediction,beta mapping to the model\r\n\ty_pred = self_Logistic_Regression.predict(X_test,beta)\r\n\r\n\tm_test = np.shape(X_test)[0]\r\n\t#calculation of confusion_matrix and prediction accuracy\r\n\tcfmat = np.zeros((2,2))\r\n\tfor i in range(m_test):\r\n\t\tif y_pred[i] == y_test[i] and y_test[i] == 0:\r\n\t\t\tcfmat[0,0] += 1\r\n\t\telif y_pred[i] == y_test[i] and y_test[i] == 1:\r\n\t\t\tcfmat[1,1] += 1\r\n\t\telif y_pred[i] == 0:\r\n\t\t\tcfmat[1,0] += 1\r\n\t\telif y_pred[i] == 1:\r\n\t\t\tcfmat[0,1] += 1\r\n\tprint(cfmat)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tdataset = import_data('D:\\\\FDU\\Template\\\\CS\\\\Machine Learning\\\\周志华西瓜书编程练习\\\\ch3线性模型\\\\3.3_logistic_regression\\\\watermelon_3a.csv')\r\n\tself_LR(dataset[0],dataset[1])\r\n\tsklearn_logistic_regression(dataset[0],dataset[1])","sub_path":"logistic_regression/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"348544953","text":"# The MIT License (MIT)\n\n# Copyright (c) 2020 HTW Dresden\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom machine import Pin\nimport time\nimport dlv_ps_ext as dlv_ps\n\nfrom machine import I2C\ni2c=I2C(-1, scl=Pin(25), sda=Pin(26), freq=400000) ## ESP32 Soft\n# i2c=I2C(1, scl=Pin(25), sda=Pin(26), freq=400000) ## ESP32 Hard\n\n# i2c=I2C(scl=Pin(5), sda=Pin(4), freq=400000) ## ESP8266 Soft\n\n# i2c=I2C(0, scl=Pin(Pin.PB_13), sda=Pin(Pin.PB_14), freq=400000) ## W600 Hard\n# i2c=I2C(-1, scl=Pin(Pin.PB_13), sda=Pin(Pin.PB_14), freq=400000) ## W600 Hard\n\n# i2c=I2C(scl=\"X1\", sda=\"X2\", freq=400000) ## PyBoard Soft\n# i2c=I2C(\"X\", freq=400000) ## PyBoard Hard\n# Pin(\"EN_3V3\")(1) # enable 3.3V Output on Pyboard D\n\n# i2c=I2C(0, pins=('P9','P10'), baudrate=400000) # Pycom Hard I2C\n# i2c=I2C(2, pins=('P9','P10'), baudrate=400000) # Pycom Soft I2C\n\n# import busio # Circuitpython\n# import board # Circuitpython\n# i2c = busio.I2C(board.SCL, board.SDA, frequency=400000) # Circuitpython Hard I2C\n# i2c.try_lock() # Circuitpython\n\n# i2c=I2C(1, freq=400000) # XBEE 3 Hard I2C\n\ndlv=dlv_ps.DLV_I2C(i2c, model=\"030g\", offset=1638)\n\n# from machine import SPI\n# spi =SPI(-1, baudrate=10000, sck=Pin(12, Pin.OUT), miso=Pin(13, Pin.IN), mosi=Pin(4), polarity=0, phase=0)\n# dlv=dlv_ps.DLV_SPI((spi, Pin(27, Pin.OUT),), model=\"015D\") # esp32 Soft SPI\n\n# dlv=dlv_ps.DLV_SPI((Pin(25, Pin.OUT), Pin(26, Pin.IN), Pin(27, Pin.OUT),),\n# model=\"015D\") # esp32 Pin BB\n\n# dlv=dlv_ps.DLV_SPI((Pin(\"D1\", Pin.OUT), Pin(\"D3\", PIN.IN), Pin(\"D11\", Pin.OUT),),\n# model=\"015D\") # XBEE Pin BB\n# XBEE Pin BB is practically useless. Freq 200 Hz, total time 160ms.\n# Fastest Pin toggle time 2.5 ms. Other ports are in the range 1-4 µs.\n\ndef run(readall=True):\n\n last_p = None\n last_t = None\n\n for i in range(1, 1000001):\n try:\n if readall:\n p, t, s = dlv.measure(all=True, cooked=False)\n if last_p is None:\n print(i, p, 0, t, 0, dlv.psi(p), dlv.celsius(t), s)\n else:\n print(i, p, last_p - p, t, last_t - t, dlv.psi(p), dlv.celsius(t), s)\n last_t = t\n else:\n p, s = dlv.measure(all=False, cooked=False)\n if last_p is None:\n print(i, p, 0, dlv.psi(p), s)\n else:\n print(i, p, last_p - p, dlv.psi(p), s)\n last_p = p\n time.sleep(0.1)\n except KeyboardInterrupt:\n break\n except Exception as err:\n print(\"Error: \", err)\n time.sleep(0.1)\n # if input(\"Next: \") == \"q\":\n # break\n\nrun()\n","sub_path":"dlvtest_ext.py","file_name":"dlvtest_ext.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"240552137","text":"from django.forms import forms, ModelForm\nfrom .models import Mensaje,MensajeAnonymous\nfrom construct_star.users.models import User\nfrom django.utils import timezone\nfrom django import forms\nfrom django.core.mail import EmailMessage\n\n\n\nclass FormularioMensaje(ModelForm):\n\n class Meta:\n model = Mensaje\n fields = ('texto',)\n\n def __init__(self, *args, **kwargs):\n super(FormularioMensaje, self).__init__(*args, **kwargs)\n self.fields['texto'].initial = \\\n 'Hola, quiciera un cambio al presupuesto enviado.'\n\n def save(self,trabajo,recipiente,enviador, msg_orig):\n data = self.data\n msg = Mensaje()\n if msg_orig is None:\n msg.autor = enviador\n else:\n msg.autor = msg_orig.autor\n msg.enviador = enviador\n msg.trabajo = trabajo\n msg.recipiente = recipiente\n msg.texto = data.get('texto')\n if msg_orig is not None:\n if msg_orig.status is 2:\n msg.status = 1\n else:\n msg.status = 2\n else:\n msg.status = 2\n msg.save()\n return msg\n\n\nclass FormMensajeEmailRespuesta(forms.Form):\n Asunto = forms.CharField(label='Asunto:',max_length=200)\n Mensaje = forms.CharField(widget=forms.Textarea,label='Mensaje:')\n\n def __init__(self, *args, **kwargs):\n super(FormMensajeEmailRespuesta, self).__init__(*args, **kwargs)\n self.fields['Asunto'].initial = \\\n 'Construct star: Respuesta desde administor'\n\n def enviar_email(self, admin):\n data = self.cleaned_data\n email = EmailMessage(\n subject=data['Asunto'],\n body='Respuesta desde administrador a su mensaje de contacto: '\n + admin.first_name + \" \" + admin.last_name + \"\\n\" + \"E-mail: \" + admin.email + \"\\nMensaje:\\n\" + data['Mensaje'],\n to=[admin.email]\n )\n email.send()\n","sub_path":"construct_star/mensajes/formularios.py","file_name":"formularios.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"124390854","text":"import numpy as np\nimport copy\n\ndef cut_grid(grid):\n live_cells = np.nonzero(grid)\n left = min(live_cells[0])\n right = max(live_cells[0])\n top = min(live_cells[1])\n bottom = max(live_cells[1])\n result = grid[left:right+1, top:bottom+1]\n return result\n\ndef new_cells(cells):\n grid = np.array(cells)\n grid_pad = np.pad(grid, 2)\n new_grid = copy.deepcopy(grid_pad) #make a deep cppy of the grid_pad\n \n print(grid_pad, '\\n')\n for x in range(1, grid_pad.shape[0]-1): \n for y in range(1, grid_pad.shape[1]-1): #run over all cells, exept outer ones\n cell = grid_pad[x][y]\n naighbours = [grid_pad[x-1][y-1], grid_pad[x-1][y], \n grid_pad[x-1][y+1], grid_pad[x][y-1],\n grid_pad[x][y+1], grid_pad[x+1][y-1],\n grid_pad[x+1][y], grid_pad[x+1][y+1]]\n naighbour_count = sum(naighbours)\n if cell == 1: \n if naighbour_count == 2 or naighbour_count == 3:\n new_grid[x][y] = 1\n else:\n new_grid[x][y] = 0\n else:\n if naighbour_count == 3:\n new_grid[x][y] = 1\n \n new_cells = cut_grid(new_grid)\n new_cells.tolist()\n return new_cells\n\ndef get_generation(cells, generations):\n print('generations = ', generations, '\\n')\n while generations:\n cells = new_cells(cells)\n generations-=1\n return cells\n","sub_path":"Game_of_Life.py","file_name":"Game_of_Life.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"155270979","text":"import os\nimport boto3\nimport requests\nfrom bs4 import BeautifulSoup\nfrom botocore.exceptions import NoCredentialsError\n\n\ndef url_scraper(page_soup):\n '''\n Scrapes City of Phoenix Open Data page and gathers csv links for each month for City Checkbook and returns results in a dictionary\n '''\n \n # grabs each checkbook heading which is descriptive of month and year\n headings = page_soup.findAll(\"a\", {\"class\":\"heading\"})\n # grabs each csv url link to data related to each checkbook heading\n resource_csv_urls = page_soup.findAll(\"a\", {\"class\":\"resource-url-analytics\"})\n \n # creates a list of headings\n checkbook_headings_list = []\n dump = ['City', 'Checkbook']\n\n for idx, heading in enumerate(headings):\n\n head = headings[idx]['title']\n new_head = [word for word in head.split() if word not in dump]\n checkbook_headings_list.append(\" \".join(new_head))\n \n # creates a list of urls \n csv_hrefs_list = []\n\n for idx, url in enumerate(resource_csv_urls):\n csv_hrefs_list.append(resource_csv_urls[idx]['href'])\n \n # creates a dictionary of header:url key, values\n heading_csv_url_dict = {key:value for key, value in zip(checkbook_headings_list, csv_hrefs_list)}\n \n return heading_csv_url_dict\n\n\ndef scrape_to_soup(url):\n '''Combines url_scraper with api call and returns heading csv url dictionary\n '''\n \n # establishing API connection\n response = requests.get(url)\n\n # checking connection status\n if response.status_code == 200:\n print('Connection established')\n else:\n print('There is a problem with the connection')\n\n # scraping with BeautifulSoup \n page_html = response.text\n\n page_soup = BeautifulSoup(page_html, 'html.parser')\n\n web_scrapings = url_scraper(page_soup)\n \n return web_scrapings\n\n\ndef writer(key, value):\n '''Input: Takes the key, value pair in a for loop supplied by the dict of csv links scraped earlier\n Output: Downloads csv data and saves to data directory and outupts file path of downloaded csv\n '''\n \n # gets http response from City Checkbook csv link\n r = requests.get(value)\n \n # creates desired filepath for download\n file_path = 'data/{}.csv'.format(key)\n \n # writes csv into filepath\n open(file_path, 'wb').write(r.content)\n \n return file_path\n\n\ndef upload_to_aws(local_file, bucket, s3_file):\n \n '''\n Uploads file to AWS S3 bucket\n '''\n \n # aws_access_key_id and aws_secret_access_key are stored in local .aws config file\n s3 = boto3.client('s3')\n \n # 3 parameters necessary for upload: (file to upload to s3, s3 bucket name, \n # the name by which we will save the file in s3)\n try:\n s3.upload_file(local_file, bucket, s3_file)\n print('Upload Successful')\n return True\n \n except FileNotFoundError:\n print('The file was not found')\n return False\n \n except NoCredentialsError:\n print('Credentials not available')\n return False\n \n \ndef bulk_upload_to_aws(dict):\n \n '''Input: Heading, Link (key, value) dictionary of scraped CSV links and headings\n \n Output: Downloads the CSV's to local data directory and uploads the csv to S3 instance\n '''\n \n # S3 bucket that I am uploading to\n bucket = 'city-of-phx-open-data-engineering-raw-db'\n\n # loop through each Header, Link in dictionary\n for key, value in dict.items():\n \n # download csv and create file path \n file_path = writer(key, value)\n \n # create s3 file name\n name = file_path[5:]\n s3_file = name\n \n # upload csv to s3\n upload_to_aws(file_path, bucket, s3_file)\n \n # deletes files from local data directory\n os.remove(file_path)\n \n \ndef clean_up(files):\n '''Takes in a list of files to remove from local data directory\n '''\n \n # loops through the list of files and removes them\n for file in files:\n\n os.remove(file)","sub_path":"dags/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"463908536","text":"import pickle\nfrom sklearn.metrics import recall_score, precision_score\nimport pathlib\nimport time\nfrom sklearn.model_selection import cross_val_score\nimport pandas as pd\n\n\nclass Validation:\n\n def __init__(self, model_dir, dataset_dir):\n # paths\n self.model_dir = model_dir\n self.dataset_dir = dataset_dir\n\n # dataset\n self.dataset = {'train': [], 'test': []}\n\n # model\n self.model = None\n\n # model_parameters\n self.metrics = {'train': {'accuracy': 0, 'recall': 0, 'precision': 0},\n 'test': {'accuracy': 0, 'recall': 0, 'precision': 0}}\n\n def model_load(self):\n with open(self.model_dir / 'model.pkl', 'rb') as file:\n self.model = pickle.load(file)\n\n def dataset_load(self):\n with open(self.dataset_dir / 'X_train.pkl', 'rb') as file:\n self.dataset['train'].insert(0, pickle.load(file))\n with open(self.dataset_dir / 'X_test.pkl', 'rb') as file:\n self.dataset['test'].insert(0, pickle.load(file))\n with open(self.dataset_dir / 'y_train.pkl', 'rb') as file:\n self.dataset['train'].insert(1, pickle.load(file))\n with open(self.dataset_dir / 'y_test.pkl', 'rb') as file:\n self.dataset['test'].insert(1, pickle.load(file))\n\n def dataset_validation(self, y, label):\n unique_values = {label: 0 for label in y.unique()}\n print('_________________________________________________________________')\n print('{0} set contain {1} unique labels'.format(label, len(unique_values)))\n\n for value in unique_values:\n unique_values[value] = y[y == value].count()\n\n for value in unique_values.keys():\n print('{0} - {1} record'.format(value, unique_values[value]))\n print('_________________________________________________________________')\n\n def model_validation(self):\n\n for data in self.dataset.keys():\n y_pred = self.model.predict(self.dataset[data][0])\n y_true = self.dataset[data][1]\n\n accuracy = self.k_fold_validation(X=self.dataset[data][0], y=self.dataset[data][1])\n precision = round(precision_score(y_true, y_pred) * 100, ndigits=2)\n recall = round(recall_score(y_true, y_pred) * 100, ndigits=2)\n\n self.metrics[data]['accuracy'] = accuracy\n self.metrics[data]['precision'] = precision\n self.metrics[data]['recall'] = recall\n print('_________________________________________________________________')\n print('For {0} data, accuracy = {1}%, precall = {2}%, recall = {3}%'.format(data, accuracy,\n precision, recall))\n print('_________________________________________________________________')\n\n def k_fold_validation(self, X, y):\n accuracy = cross_val_score(estimator=self.model, X=X, y=y, cv=10, scoring='accuracy', n_jobs=-1)\n accuracy = round(accuracy.mean() * 100, ndigits=2)\n return accuracy\n\n def start_validation(self):\n start = time.time()\n self.model_load()\n self.dataset_load()\n self.dataset_validation(y=pd.Series(self.dataset['train'][1]), label='Train')\n self.dataset_validation(y=pd.Series(self.dataset['test'][1]), label='Test')\n self.model_validation()\n stop = time.time()\n total_time = stop - start\n print('Validation process took {0} s'.format(round(total_time, ndigits=3)))\n\n\n","sub_path":"scripts/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"421881523","text":"\"\"\"\n\nLine length\n====================\n\nValidator name: ``linelength``\n\nCheck that that text file lines are not too long.\n\nPrerequisites\n----------------\n\nBuilt-in - no external software needed.\n\nSupported files\n----------------\n\n* All text files.\n\nOptions\n-----------\n\nlength\n+++++++\n\nSet maximum allowed line length. Defaults to ``80`` columns.\n\nExample ``validator-options.yaml``::\n\n # Allow long text lines\n linelength:\n length: 250\n\nMore info\n------------\n\n* http://www.kernel.org/doc/Documentation/CodingStyle\n\n\"\"\"\n\nimport logging\n\nfrom vvv.plugin import Plugin\n\nclass LineLengthPlugin(Plugin):\n \"\"\"\n Line length driver.\n \"\"\"\n\n def __init__(self):\n\n Plugin.__init__(self)\n \n #: Configuration file option\n self.line_length = None\n \n\n def get_default_matchlist(self):\n return [\"*\"]\n\n def setup_local_options(self):\n\n self.line_length = self.options.get_int_option(self.id, \"length\", 80)\n\n if not self.hint:\n self.hint = \"Text file line length must not exceed %d characteres per line\" % self.line_length\n\n def validate(self, fname):\n \"\"\"\n Tabs validator code runs in-line.\n \"\"\"\n\n errors = False\n\n i = 0\n f = open(fname, \"rt\", encoding=\"ascii\")\n try:\n for line in f:\n i += 1\n if len(line) > self.line_length:\n errors = True\n self.reporter.report_detailed(self.id, logging.ERROR, fname, i, None, None, \"Line is too long, %d characters\" % len(line), excerpt=line)\n except UnicodeDecodeError:\n # UnicodeDecodeError: 'utf8' codec can't decode byte 0xa5 in position 2: invalid start byte\n # For now, how to handle?\n pass\n\n f.close()\n\n return not errors","sub_path":"vvv/validators/linelength.py","file_name":"linelength.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"274535799","text":"import tensorflow as tf\nimport numpy as np\nimport os\nfrom .layers import DecoderRNNCellJointCopy\n\nclass EncoderDecoderContentSelection(tf.keras.Model):\n def __init__( self\n , encoder_content_selection\n , encoder_content_planner\n , encoder_from_cp\n , text_decoder):\n super(EncoderDecoderContentSelection, self).__init__()\n self._encoder_content_selection = encoder_content_selection\n self._encoder_content_planner = encoder_content_planner\n self._encoder_from_cp = encoder_from_cp\n self._text_decoder = text_decoder\n\n def compile( self\n , optimizer_cp\n , optimizer_txt\n , loss_fn_cp\n , loss_fn_decoder\n , scheduled_sampling_rate\n , truncation_size\n , truncation_skip_step):\n super(EncoderDecoderContentSelection, self).compile(run_eagerly=True)\n self._optimizer_cp = optimizer_cp\n self._optimizer_txt = optimizer_txt\n self._loss_fn_cp = loss_fn_cp\n self._loss_fn_decoder = loss_fn_decoder\n self._train_metrics = { \"accuracy_decoder\" : tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy_decoder')\n , \"accuracy_cp\" :tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy_cp')\n , \"loss_decoder\" :tf.keras.metrics.SparseCategoricalCrossentropy(name='loss_decoder')\n , \"loss_cp\": tf.keras.metrics.SparseCategoricalCrossentropy( name='loss_cp'\n , from_logits=True)}\n self._val_metrics = { \"val_accuracy_decoder\" : tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy_decoder')\n , \"val_accuracy_cp\" : tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy_cp')\n , \"val_loss_decoder\" : tf.keras.metrics.SparseCategoricalCrossentropy(name='loss_decoder')\n , \"val_loss_cp\" : tf.keras.metrics.SparseCategoricalCrossentropy(name='loss_cp'\n , from_logits=True)}\n self._scheduled_sampling_rate = scheduled_sampling_rate\n self._truncation_skip_step = truncation_skip_step\n self._truncation_size = truncation_size\n self._generator = tf.random.Generator.from_non_deterministic_state()\n\n \n def _calc_loss( self, x, y, loss_object, selected_metrics):\n \"\"\" use only the non-pad values \"\"\"\n mask = tf.math.logical_not(tf.math.equal(y, 0))\n loss_ = loss_object(y, x)\n mask = tf.cast(mask, loss_.dtype)\n loss_ *= mask\n for metric in self._train_metrics.values():\n if metric.name in selected_metrics:\n metric.update_state(y, x, sample_weight=mask)\n return tf.reduce_mean(loss_)\n \n @tf.function\n def bppt_step( self\n , batch_data\n , last_out\n , initial_state=None):\n loss_cp = 0\n loss_txt = 0\n dec_in, targets, gen_or_teach, cp_in, cp_targets, *tables = batch_data\n batch_size = cp_in.shape[0]\n final_state = None\n final_last_out = None\n cp_enc_outs = tf.TensorArray(tf.float32, size=cp_targets.shape[1])\n cp_enc_ins = tf.TensorArray(tf.int16, size=cp_targets.shape[1])\n with tf.GradientTape() as tape:\n enc_outs, avg = self._encoder_content_selection(tables)\n states = (avg, avg)\n next_input = enc_outs[:, 0, :]\n # create content plan, evaluate the loss from the \n # gold content plan\n for t in range(cp_in.shape[1]):\n (_, alignment), states = self._encoder_content_planner( (next_input, enc_outs)\n , states=states\n , training=True)\n # content_plan generation is updated only once per batch to not be affected\n # by the truncated BPTT\n if initial_state is None:\n # the neural network is taught to predict\n # indices shifted by 1\n loss_cp += self._calc_loss( alignment\n , cp_targets[:, t]\n , self._loss_fn_cp\n , [\"loss_cp\", \"accuracy_cp\"])\n \n # prepare inputs for encoder\n # indices are shifted by 1\n # enc_outs[:, enc_outs.shape[1], :] is either\n # encoded <> record or <> record\n ic = tf.where(cp_targets[:, t] != 0, cp_targets[:, t] - 1, enc_outs.shape[1] - 1)\n indices = tf.stack([tf.range(batch_size), tf.cast(ic, tf.int32)], axis=1)\n next_input = tf.gather_nd(enc_outs, indices)\n\n # the next input should be zeroed out if the indices point to the end of the table - <> or <> tokens\n # then the encoder_from_cp wouldn't take them into acount\n enc_outs_zeroed = tf.where(tf.expand_dims(indices[:, 1] == (enc_outs.shape[1] - 1), 1), tf.zeros(next_input.shape), next_input)\n vals = tf.gather_nd(tables[2], indices)\n cp_enc_outs = cp_enc_outs.write(t, enc_outs_zeroed)\n cp_enc_ins = cp_enc_ins.write(t, vals)\n\n cp_enc_outs = tf.transpose(cp_enc_outs.stack(), [1, 0, 2])\n cp_enc_ins = tf.transpose(cp_enc_ins.stack(), [1, 0])\n\n # encode generated content plan\n cp_enc_outs, *last_hidden_rnn = self._encoder_from_cp(cp_enc_outs, training=True)\n\n # prepare states and inputs for the text decoder\n if initial_state is None:\n initial_state = [ last_hidden_rnn[-1]\n , *last_hidden_rnn ]\n\n if isinstance(self._text_decoder, DecoderRNNCellJointCopy):\n print(\"using joint copy mechanism !\")\n enc_ins = tf.one_hot(tf.cast(cp_enc_ins, tf.int32), self._text_decoder._word_vocab_size) # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n aux_inputs = (cp_enc_outs, enc_ins) # value portion of the record needs to be copied\n else:\n print(\"using vanilla attention\")\n aux_inputs = (cp_enc_outs,)\n \n # decode text from the encoded content plan\n states = initial_state\n for t in range(dec_in.shape[1]):\n if (self._truncation_skip_step is not None) and (t == self._truncation_skip_step):\n final_state = states\n final_last_out = last_out\n if gen_or_teach[t] > self._scheduled_sampling_rate:\n _input = last_out\n else:\n _input = dec_in[:, t, :]\n last_out, states = self._text_decoder( (_input, *aux_inputs)\n , states=states\n , training=True)\n loss_txt += self._calc_loss( last_out\n , targets[:, t]\n , self._loss_fn_decoder\n , [\"loss_decoder\", \"accuracy_decoder\"])\n last_out = tf.expand_dims(tf.cast(tf.argmax(last_out, axis=1), tf.int16), -1)\n loss = loss_cp + loss_txt\n\n variables_cp = []\n for var in self._encoder_content_planner.trainable_variables + \\\n self._encoder_content_selection.trainable_variables:\n if (initial_state is None) or (var.name != 'encoder/linear_transform/kernel:0'):\n variables_cp.append(var)\n\n variables_txt = []\n for var in self._text_decoder.trainable_variables + \\\n self._encoder_from_cp.trainable_variables:\n if (initial_state is None) or (var.name != 'encoder/linear_transform/kernel:0'):\n variables_txt.append(var)\n \n variables = variables_cp + variables_txt\n gradients = tape.gradient(loss, variables)\n self._optimizer_txt.apply_gradients(zip(gradients, variables))\n\n if (self._truncation_skip_step is None) or (self._truncation_skip_step == dec_in.shape[1]):\n final_state = states\n final_last_out = last_out\n\n return final_state, final_last_out\n\n\n @property\n def metrics(self):\n return list(self._train_metrics.values()) + list(self._val_metrics.values())\n\n\n def train_step(self, batch_data):\n summaries, content_plan, *tables = batch_data\n sums = tf.expand_dims(summaries, axis=-1)\n last_out = sums[:, 0]\n start = 0\n length = summaries.shape[1]\n cp_length = content_plan.shape[1]\n state = None\n for end in range(self._truncation_size, length-1, self._truncation_skip_step):\n gen_or_teach = np.zeros(shape=(end-start))\n for i in range(len(gen_or_teach)):\n gen_or_teach[i] = self._generator.uniform(shape=(), maxval=1.0)\n # create data for teacher forcing\n truncated_data = ( sums[:, start:end, :]\n , summaries[:, start+1:end+1]\n , tf.convert_to_tensor(gen_or_teach)\n , content_plan[:, :cp_length - 1]\n , content_plan[:, 1:cp_length]\n , *tables)\n state, last_out = self.bppt_step( truncated_data\n , last_out\n , initial_state=state)\n start += self._truncation_skip_step\n # finish the truncated bppt if the truncation_size cannot divide properly\n # the length of sequence\n if (length - self._truncation_size) % self._truncation_skip_step != 0:\n gen_or_teach = np.zeros(shape=(length-1-start))\n for i in range(len(gen_or_teach)):\n gen_or_teach[i] = self._generator.uniform(shape=(), maxval=1.0)\n truncated_data = ( sums[:, start:length-1, :]\n , summaries[:, start+1:length]\n , tf.convert_to_tensor(gen_or_teach)\n , content_plan[:, :cp_length - 1]\n , content_plan[:, 1:cp_length]\n , *tables)\n state, last_out = self.bppt_step( truncated_data\n , last_out\n , initial_state=state)\n return dict([(metric.name, metric.result()) for metric in self._train_metrics.values()])\n\n def test_step(self, batch_data):\n summaries, content_plan, *tables = batch_data\n # prepare summaries\n max_sum_size = summaries.shape[1] - 1\n dec_in = tf.expand_dims(summaries, axis=-1)[:, :max_sum_size, :]\n targets = summaries[:, 1:max_sum_size+1]\n\n # prepare content plans\n cp_length = content_plan.shape[1]\n cp_in = content_plan[:, :cp_length-1]\n cp_targets = content_plan[:, 1:cp_length]\n\n batch_size = cp_in.shape[0]\n cp_enc_outs = tf.TensorArray(tf.float32, size=cp_targets.shape[1])\n cp_enc_ins = tf.TensorArray(tf.int16, size=cp_targets.shape[1])\n enc_outs, avg = self._encoder_content_selection(tables)\n states = (avg, avg)\n\n next_input = enc_outs[:, 0, :]\n # create content plan, evaluate the loss from the \n # gold content plan\n for t in range(cp_in.shape[1]):\n (_, alignment), states = self._encoder_content_planner( (next_input, enc_outs)\n , states=states\n , training=False)\n \n mask = tf.math.logical_not(tf.math.equal(cp_targets[:, t], 0))\n for metric in self._val_metrics.values():\n if metric.name in [\"accuracy_cp\", \"loss_cp\"]:\n metric.update_state( cp_targets[:, t]\n , alignment\n , sample_weight=mask )\n \n # prepare inputs for encoder\n # indices are shifted by 1\n # enc_outs[:, enc_outs.shape[1], :] is either\n # encoded <> record or <> record\n ic = tf.where(cp_targets[:, t] != 0, cp_targets[:, t] - 1, enc_outs.shape[1] - 1)\n indices = tf.stack([tf.range(batch_size), tf.cast(ic, tf.int32)], axis=1)\n next_input = tf.gather_nd(enc_outs, indices)\n\n # the next input should be zeroed out if the indices point to the end of the table - <> or <> tokens\n # then the encoder_from_cp wouldn't take them into acount\n enc_outs_zeroed = tf.where(tf.expand_dims(indices[:, 1] == (enc_outs.shape[1] - 1), 1), tf.zeros(next_input.shape), next_input)\n vals = tf.gather_nd(tables[2], indices)\n cp_enc_outs = cp_enc_outs.write(t, enc_outs_zeroed)\n cp_enc_ins = cp_enc_ins.write(t, vals)\n\n cp_enc_outs = tf.transpose(cp_enc_outs.stack(), [1, 0, 2])\n cp_enc_ins = tf.transpose(cp_enc_ins.stack(), [1, 0])\n\n # encode generated content plan\n cp_enc_outs, *last_hidden_rnn = self._encoder_from_cp(cp_enc_outs, training=False)\n\n # prepare states and inputs for the text decoder\n if isinstance(self._text_decoder, DecoderRNNCellJointCopy):\n enc_ins = tf.one_hot(tf.cast(cp_enc_ins, tf.int32), self._text_decoder._word_vocab_size) # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n aux_inputs = (cp_enc_outs, enc_ins) # value portion of the record needs to be copied\n else:\n aux_inputs = (cp_enc_outs,)\n \n # decode text from the encoded content plan\n states = [ last_hidden_rnn[-1], *last_hidden_rnn ]\n for t in range(dec_in.shape[1]):\n _input = dec_in[:, t, :]\n last_out, states = self._text_decoder( (_input, *aux_inputs)\n , states=states\n , training=False)\n\n mask = tf.math.logical_not(tf.math.equal(targets[:, t], 0))\n for metric in self._val_metrics.values():\n if metric.name in [\"accuracy_decoder\", \"loss_decoder\"]:\n metric.update_state( targets[:, t]\n , last_out\n , sample_weight=mask )\n return dict([(metric.name, metric.result()) for metric in self._val_metrics.values()])\n\n def predict_step(self, batch_data):\n summaries, content_plan, *tables = batch_data\n # prepare summaries\n max_sum_size = summaries.shape[1] - 1\n dec_in = tf.expand_dims(summaries, axis=-1)[:, :max_sum_size, :]\n\n batch_size = content_plan.shape[0]\n cp_enc_outs = tf.TensorArray(tf.float32, size=content_plan.shape[1])\n cp_enc_ins = tf.TensorArray(tf.int16, size=content_plan.shape[1])\n cp_cp_ix = tf.TensorArray(tf.int32, size=content_plan.shape[1])\n enc_outs, avg = self._encoder_content_selection(tables)\n states = (avg, avg)\n\n # the first input to the encoder_content_planner is 0th record\n # zeroth record is the <> record\n next_input = enc_outs[:, 0, :]\n\n # create content plan\n # next input of the encoder_content_planner is its last output\n for t in range(content_plan.shape[1] - 1):\n # indices are shifted by 1\n # enc_outs[:, enc_outs.shape[1], :] is either\n # encoded <> record or <> record\n (_, alignment), states = self._encoder_content_planner( (next_input, enc_outs)\n , states=states\n , training=False)\n \n # prepare next_input and gather inputs for the encoder\n\n # get max indices\n max_alignment = tf.argmax(alignment, axis=-1, output_type=tf.dtypes.int32)\n ic = tf.where(max_alignment != 0, max_alignment - 1, enc_outs.shape[1] - 1)\n indices = tf.stack([tf.range(batch_size), tf.cast(ic, tf.int32)], axis=1)\n\n # get correct values from tables\n vals = tf.gather_nd(tables[2], indices)\n next_input = tf.gather_nd(enc_outs, indices)\n\n # save for decoder\n cp_cp_ix = cp_cp_ix.write(t, ic)\n enc_outs_zeroed = tf.where(tf.expand_dims(indices[:, 1] == (enc_outs.shape[1] - 1), 1), tf.zeros(next_input.shape), next_input)\n cp_enc_outs = cp_enc_outs.write(t, enc_outs_zeroed)\n cp_enc_ins = cp_enc_ins.write(t, vals)\n\n cp_enc_outs = tf.transpose(cp_enc_outs.stack(), [1, 0, 2])\n cp_enc_ins = tf.transpose(cp_enc_ins.stack(), [1, 0])\n cp_cp_ix = tf.transpose(cp_cp_ix.stack(), [1, 0])\n\n # encode generated content plan\n cp_enc_outs, *last_hidden_rnn = self._encoder_from_cp(cp_enc_outs, training=False)\n\n # prepare states and inputs for the text decoder\n if isinstance(self._text_decoder, DecoderRNNCellJointCopy):\n enc_ins = tf.one_hot(tf.cast(cp_enc_ins, tf.int32), self._text_decoder._word_vocab_size) # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n aux_inputs = (cp_enc_outs, enc_ins) # value portion of the record needs to be copied\n else:\n aux_inputs = (cp_enc_outs,)\n\n # decode text from the encoded content plan\n states = [ last_hidden_rnn[-1], *last_hidden_rnn ]\n # zeroth token is the <> token\n _input = dec_in[:, 0, :]\n result_preds = np.zeros(summaries.shape, dtype=np.int)\n for t in range(dec_in.shape[1]):\n last_out, states = self._text_decoder( (_input, *aux_inputs)\n , states=states\n , training=False)\n\n predicted = tf.argmax(last_out, axis=1).numpy()\n result_preds[:, t] = predicted\n _input = tf.expand_dims(predicted, axis=1)\n self.last_content_plan = cp_cp_ix\n return result_preds","sub_path":"rotowire/baseline_model/cp_model.py","file_name":"cp_model.py","file_ext":"py","file_size_in_byte":18735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"42660702","text":"# map types in args files to socket types\n__RMAN_SOCKET_MAP__ = {\n 'float': 'RendermanNodeSocketFloat',\n 'color': 'RendermanNodeSocketColor',\n 'string': 'RendermanNodeSocketString',\n 'int': 'RendermanNodeSocketInt',\n 'integer': 'RendermanNodeSocketInt',\n 'struct': 'RendermanNodeSocketStruct',\n 'normal': 'RendermanNodeSocketVector',\n 'vector': 'RendermanNodeSocketVector',\n 'point': 'RendermanNodeSocketVector',\n 'void': 'RendermanNodeSocketStruct',\n 'vstruct': 'RendermanNodeSocketStruct',\n}\n\ndef update_inputs(node):\n if node.bl_idname == 'PxrMeshLightLightNode':\n return\n for page_name in node.prop_names:\n if node.prop_meta[page_name]['renderman_type'] == 'page':\n for prop_name in getattr(node, page_name):\n if prop_name.startswith('enable'):\n recursive_enable_inputs(node, getattr(\n node, page_name), getattr(node, prop_name))\n break\n\n\ndef recursive_enable_inputs(node, prop_names, enable=True):\n for prop_name in prop_names:\n if type(prop_name) == str and node.prop_meta[prop_name]['renderman_type'] == 'page':\n recursive_enable_inputs(node, getattr(node, prop_name), enable)\n elif hasattr(node, 'inputs') and prop_name in node.inputs:\n node.inputs[prop_name].hide = not enable\n else:\n continue\n\ndef find_enable_param(params):\n for prop_name in params:\n if prop_name.startswith('enable'):\n return prop_name\n\n# add input sockets\ndef node_add_inputs(node, node_name, prop_names, first_level=True, label_prefix='', remove=False):\n for name in prop_names:\n meta = node.prop_meta[name]\n param_type = meta['renderman_type']\n\n if name in node.inputs.keys() and remove:\n node.inputs.remove(node.inputs[name])\n continue\n elif name in node.inputs.keys():\n continue\n\n # if this is a page recursively add inputs\n if 'renderman_type' in meta and meta['renderman_type'] == 'page':\n if first_level and node.bl_idname in ['PxrLayerPatternNode', 'PxrSurfaceBxdfNode'] and name != 'Globals':\n # add these\n enable_param = find_enable_param(getattr(node, name))\n if enable_param and getattr(node, enable_param):\n node_add_inputs(node, node_name, getattr(node, name),\n label_prefix=name + ' ',\n first_level=False)\n else:\n node_add_inputs(node, node_name, getattr(node, name),\n label_prefix=name + ' ',\n first_level=False, remove=True)\n continue\n\n else:\n node_add_inputs(node, node_name, getattr(node, name),\n first_level=first_level,\n label_prefix=label_prefix, remove=remove)\n continue\n\n if remove:\n continue\n # # if this is not connectable don't add socket\n if param_type not in __RMAN_SOCKET_MAP__:\n continue\n if '__noconnection' in meta and meta['__noconnection']:\n continue\n\n param_name = name\n\n param_label = label_prefix + meta.get('label', param_name)\n\n socket = node.inputs.new(\n __RMAN_SOCKET_MAP__[param_type], param_name, identifier=param_label)\n socket.link_limit = 1\n\n if param_type in ['struct', 'normal', 'vector', 'vstruct', 'void']:\n socket.hide_value = True\n\n update_inputs(node)\n\n\n# add output sockets\ndef node_add_outputs(node):\n for name, meta in node.output_meta.items():\n rman_type = meta['renderman_type']\n if rman_type in __RMAN_SOCKET_MAP__ and 'vstructmember' not in meta:\n socket = node.outputs.new(__RMAN_SOCKET_MAP__[rman_type], name)\n socket.label = name\n","sub_path":"rman_bl_nodes/rman_socket_utils.py","file_name":"rman_socket_utils.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"250990923","text":"available_exits=['north','south','east','west']\r\n\r\nchosen_exit=\"\"\r\nwhile chosen_exit not in available_exits:\r\n chosen_exit=input('please choose a direction: ')\r\n if chosen_exit.casefold()== \"quit\":\r\n print('game over')\r\n break\r\n\r\nprint('arent you glad you are out of there')\r\n\r\nfor i in range(0, 100, 7):\r\n print(i)\r\n if i > 0 and i % 11 == 0:\r\n break\r\n\r\n\r\nfor x in range(21):\r\n if x % 3 == 0 or x % 5 == 0:\r\n continue\r\n print(x)\r\n","sub_path":"ADVENTURE.py","file_name":"ADVENTURE.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"301367346","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom .models import Customer\nfrom .serializers import jsontopython, pytojson\nimport concurrent.futures\n\n@api_view(['GET'])\ndef get_customer_by_id(req,id):\n try:\n with concurrent.futures.ThreadPoolExecutor() as executor:\n future = executor.submit(getCustomerById, (id))\n data = future.result()\n return Response(data, status=status.HTTP_201_CREATED)\n except:\n return Response('Error', status=400)\n\n@api_view(['GET'])\ndef get_all_customers(req):\n try:\n with concurrent.futures.ThreadPoolExecutor() as executor:\n future = executor.submit(getCustomers, )\n data = future.result()\n return Response(data, status=status.HTTP_201_CREATED)\n except:\n return Response('Error', status=400)\n \ndef getCustomers():\n try:\n Cust = Customer.objects.values_list()\n data = pytojson(Cust)\n return data\n except:\n print('Error in Multithreading')\n\ndef getCustomerById(id):\n try:\n Cust = Customer.objects.filter(id=id).values()\n data = pytojson(Cust)\n return data\n except:\n print('Error in Multithreading')\n \n # https://www.googleapis.com/geolocation/v1/geolocate?key=YOUR_API_KEY\n \n \n\n","sub_path":"oowlish/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"6657723","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport logging\n\n# 日志设置\nlogging.basicConfig(level = logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')\nlogger = logging.getLogger(__name__)\n\nclass CNN:\n\n # 初始化\n # 参数为: epoch: 训练次数\n # learning_rate: 使用GD优化时的学习率\n # save_model_path: 模型保存的绝对路径\n def __init__(self, epoch, learning_rate, save_model_path):\n\n self.epoch = epoch\n self.learning_rate = learning_rate\n self.save_model_path = save_model_path\n\n \"\"\"\n 第一层 卷积层和池化层\n x_image(batch, 16, 20, 1) -> h_pool1(batch, 8, 10, 10)\n \"\"\"\n x = tf.placeholder(tf.float32, [None, 320])\n self.x = x\n x_image = tf.reshape(x, [-1, 16, 20, 1]) # 最后一维代表通道数目,如果是rgb则为3\n W_conv1 = self.weight_variable([3, 3, 1, 10])\n b_conv1 = self.bias_variable([10])\n\n h_conv1 = tf.nn.relu(self.conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = self.max_pool_2x2(h_conv1)\n\n \"\"\"\n 第二层 卷积层和池化层\n h_pool1(batch, 8, 10, 10) -> h_pool2(batch, 4, 5, 20)\n \"\"\"\n W_conv2 = self.weight_variable([3, 3, 10, 20])\n b_conv2 = self.bias_variable([20])\n\n h_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = self.max_pool_2x2(h_conv2)\n\n \"\"\"\n 第三层 全连接层\n h_pool2(batch, 4, 5, 20) -> h_fc1(1, 100)\n \"\"\"\n W_fc1 = self.weight_variable([4 * 5 * 20, 200])\n b_fc1 = self.bias_variable([200])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 4 * 5 * 20])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n \"\"\"\n 第四层 Dropout层\n h_fc1 -> h_fc1_drop, 训练中启用,测试中关闭\n \"\"\"\n self.keep_prob = tf.placeholder(dtype=tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)\n\n \"\"\"\n 第五层 Softmax输出层\n \"\"\"\n W_fc2 = self.weight_variable([200, 26])\n b_fc2 = self.bias_variable([26])\n\n self.y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n \"\"\"\n 训练和评估模型\n ADAM优化器来做梯度最速下降,feed_dict中加入参数keep_prob控制dropout比例\n \"\"\"\n self.y_true = tf.placeholder(shape = [None, 26], dtype=tf.float32)\n self.cross_entropy = -tf.reduce_mean(tf.reduce_sum(self.y_true * tf.log(self.y_conv), axis=1)) # 计算交叉熵\n\n # 使用adam优化器来以0.0001的学习率来进行微调\n self.train_model = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cross_entropy)\n\n self.saver = tf.train.Saver()\n logger.info('Initialize the model...')\n\n def train(self, x_data, y_data):\n\n logger.info('Training the model...')\n\n with tf.Session() as sess:\n # 对所有变量进行初始化\n sess.run(tf.global_variables_initializer())\n\n feed_dict = {self.x: x_data, self.y_true: y_data, self.keep_prob:1.0}\n # 进行迭代学习\n for i in range(self.epoch + 1):\n sess.run(self.train_model, feed_dict=feed_dict)\n if i % int(self.epoch / 50) == 0:\n # to see the step improvement\n print('已训练%d次, loss: %s.' % (i, sess.run(self.cross_entropy, feed_dict=feed_dict)))\n\n # 保存ANN模型\n logger.info('Saving the model...')\n self.saver.save(sess, self.save_model_path)\n\n def predict(self, data):\n\n with tf.Session() as sess:\n logger.info('Restoring the model...')\n self.saver.restore(sess, self.save_model_path)\n predict = sess.run(self.y_conv, feed_dict={self.x: data, self.keep_prob:1.0})\n\n return predict\n\n \"\"\"\n 权重初始化\n 初始化为一个接近0的很小的正数\n \"\"\"\n def weight_variable(self, shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(self, shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n \"\"\"\n 卷积和池化,使用卷积步长为1(stride size),0边距(padding size)\n 池化用简单传统的2x2大小的模板做max pooling\n \"\"\"\n def conv2d(self, x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n def max_pool_2x2(self, x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n","sub_path":"machinelearningwork1/cnn/VerifyCodeCNN.py","file_name":"VerifyCodeCNN.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"220584734","text":"import tkinter as tk\ndef click_btn():\n amount=int(amount_entry.get())\n people=int(people_entry.get())\n dnum=amount/people\n pay=dnum//100*100\n if dnum>pay:\n pay=int(pay+100)\n payorg=amount-pay*(people-1)\n show_label['text']='1人あたり{}円({}人)、幹事は{}円です'.format(pay,people-1,payorg)\nroot=tk.Tk()\nroot.title('割り勘くん')\nroot.resizable(False,False)\ncanvas=tk.Canvas(root,width=400,height=600,bg='skyblue')\ncanvas.pack()\nbutton=tk.Button(text='計算する',font=('Times New Roman',18),command=click_btn)\nbutton.place(x=10,y=180)\namount_entry=tk.Entry(width=20)\namount_entry.place(x=10,y=50)\npeople_entry=tk.Entry(width=20)\npeople_entry.place(x=10,y=120)\namount_label=tk.Label(root,text='���額',font=('Times New Roman',16),bg='skyblue')\namount_label.place(x=12,y=20)\npeople_label=tk.Label(root,text='人数',font=('Times New Roman',16),bg='skyblue')\npeople_label.place(x=12,y=90)\nshow_label=tk.Label(root,text='',font=('Times New Roman',20),bg='skyblue')\nshow_label.place(x=10,y=230)\nroot.mainloop()\n","sub_path":"python/0407/tk14.py","file_name":"tk14.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"514519855","text":"# Copyright 2012 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"General utility functions common to all of CourseBuilder.\"\"\"\n\n__author__ = 'Mike Gainer (mgainer@google.com)'\n\nimport cStringIO\nimport random\nimport re\nimport string\nimport sys\nimport zipfile\n\nimport appengine_config\nfrom google.appengine.api import namespace_manager\n\nBACKWARD_COMPATIBLE_SPLITTER = re.compile(r'[\\[\\] ,\\t\\n]+', flags=re.M)\nSPLITTER = re.compile(r'[ ,\\t\\n]+', flags=re.M)\nALPHANUM = string.ascii_letters + string.digits\n\n\ndef text_to_list(text, splitter=SPLITTER):\n if not text:\n return []\n return [item for item in splitter.split(text) if item]\n\n\ndef list_to_text(items):\n if not items:\n return ''\n return ' '.join([unicode(item) for item in items])\n\n\ndef generate_instance_id():\n length = 12\n return ''.join([random.choice(ALPHANUM) for _ in xrange(length)])\n\n\ndef truncate(x, precision=2):\n assert isinstance(precision, int) and precision >= 0\n factor = 10 ** precision\n return int(x * factor) / float(factor)\n\n\ndef iter_all(query, batch_size=100):\n \"\"\"Yields query results iterator. Proven method for large datasets.\"\"\"\n prev_cursor = None\n any_records = True\n while any_records:\n any_records = False\n query = query.with_cursor(prev_cursor)\n for entity in query.run(batch_size=batch_size):\n any_records = True\n yield entity\n prev_cursor = query.cursor()\n\n\ndef run_hooks(hooks, *args, **kwargs):\n \"\"\"Run all the given callback hooks.\n\n Args:\n hooks: iterable. The callback functions to be invoked. Each function is\n passed the remaining args and kwargs.\n *args: List of arguments passed the hook functions.\n **kwargs: Dict of keyword args passed to the hook functions.\n \"\"\"\n for hook in hooks:\n # TODO(jorr): Add configurable try-catch around call\n hook(*args, **kwargs)\n\n\nclass Namespace(object):\n \"\"\"Save current namespace and reset it.\n\n This is inteded to be used in a 'with' statement. The verbose code:\n old_namespace = namespace_manager.get_namespace()\n try:\n namespace_manager.set_namespace(self._namespace)\n app_specific_stuff()\n finally:\n namespace_manager.set_namespace(old_namespace)\n\n can be replaced with the much more terse:\n with Namespace(self._namespace):\n app_specific_stuff()\n\n This style can be used in classes that need to be pickled; the\n @in_namespace function annotation (see below) is arguably visually\n cleaner, but can't be used with pickling.\n\n The other use-case for this style of acquire/release guard is when\n only portions of a function need to be done within a namespaced\n context.\n \"\"\"\n\n def __init__(self, new_namespace):\n self.new_namespace = new_namespace\n\n def __enter__(self):\n self.old_namespace = namespace_manager.get_namespace()\n namespace_manager.set_namespace(self.new_namespace)\n return self\n\n def __exit__(self, *unused_exception_info):\n namespace_manager.set_namespace(self.old_namespace)\n return False # Don't suppress exceptions\n\n\ndef find(predicate, iterable):\n \"\"\"Find the first matching item in a list, or None if not found.\n\n This is as a more-usable alternative to filter(), in that it does\n not raise an exception if the item is not found.\n\n Args:\n predicate: A function taking one argument: an item from the iterable.\n iterable: A list or generator providing items passed to \"predicate\".\n Returns:\n The first item in \"iterable\" where \"predicate\" returns True, or\n None if no item matches.\n \"\"\"\n for item in iterable:\n if predicate(item):\n return item\n return None\n\n\nclass ZipAwareOpen(object):\n \"\"\"Provide open() services for third party libraries in .zip files.\n\n Some libraries that are commonly downloaded and pushed alongside\n CourseBuilder are shipped with data files. These libraries make the\n assumption that when shipped in a product, they are packaged as plain\n files in a normal directory hierarchy. Thus, when that library is\n actually packaged in a .zip file, the open() call will fail. This\n class provides a convenient syntax around functionality that wraps\n calls to the builtin open() (or in the case of AppEngine, the version\n of 'open()' that AppEngine itself provides). When an attempt is made\n to open a file that is actually packaged within a .zip file, this\n wrapper will intelligently look within the .zip file for that member.\n\n Only read access is supported.\n\n No recursion into .zip files within other .zip files is supported.\n\n Example:\n with common_utils.ZipAwareOpen():\n third_party_module.some_func_that_calls_open()\n \"\"\"\n\n THIRD_PARTY_LIB_PATHS = {\n l.file_path: l.full_path for l in appengine_config.THIRD_PARTY_LIBS}\n\n def zip_aware_open(self, name, *args, **kwargs):\n \"\"\"Override open() iff opening a file in a library .zip for reading.\"\"\"\n\n # First cut: Don't even consider checking .zip files unless the\n # open is for read-only and \".zip\" is in the filename.\n mode = args[0] if args else kwargs['mode'] if 'mode' in kwargs else 'r'\n if '.zip' in name and (not mode or mode == 'r' or mode == 'rb'):\n\n # Only consider .zip files known in the third-party libraries\n # registered in appengine_config.py\n for path in ZipAwareOpen.THIRD_PARTY_LIB_PATHS:\n\n # Don't use zip-open if the file we are looking for _is_\n # the sought .zip file. (We are recursed into from the\n # zipfile module when it needs to open a file.)\n if path in name and path != name:\n zf = zipfile.ZipFile(path, 'r')\n\n # Possibly extend simple path to .zip file with relative\n # path inside .zip file to meaningful contents.\n name = name.replace(\n path, ZipAwareOpen.THIRD_PARTY_LIB_PATHS[path])\n\n # Strip off on-disk path to .zip file. This leaves\n # us with the absolute path within the .zip file.\n name = name.replace(path, '').lstrip('/')\n\n # Return a file-like object containing the data extracted\n # from the .zip file for the given name.\n data = zf.read(name)\n return cStringIO.StringIO(data)\n\n # All other cases pass through to builtin open().\n return self._real_open(name, *args, **kwargs)\n\n def __enter__(self):\n \"\"\"Wrap Python's internal open() with our version.\"\"\"\n # No particular reason to use __builtins__ in the 'zipfile' module; the\n # set of builtins is shared among all modules implemented in Python.\n self._real_open = sys.modules['zipfile'].__builtins__['open']\n sys.modules['zipfile'].__builtins__['open'] = self.zip_aware_open\n\n def __exit__(self, *unused_exception_info):\n \"\"\"Reset open() to be the Python internal version.\"\"\"\n sys.modules['zipfile'].__builtins__['open'] = self._real_open\n return False # Don't suppress exceptions.\n","sub_path":"appengine/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"74172806","text":"from subprocess import Popen\n\nfrom hokusai.lib.command import command\nfrom hokusai.lib.config import config\nfrom hokusai.lib.common import shout\nfrom hokusai.services.kubectl import Kubectl\n\n@command\ndef logs(context, timestamps, nlines, follow):\n kctl = Kubectl(context)\n\n opts = ''\n if timestamps:\n opts += ' --timestamps'\n if nlines:\n opts += \" --tail=%s\" % nlines\n if follow:\n opts += ' --follow'\n\n pods = kctl.get_object('pod', selector=\"app=%s,layer=application\" % config.project_name)\n pods = filter(lambda pod: pod['status']['phase'] == 'Running', pods)\n containers = []\n for pod in pods:\n for container in pod['spec']['containers']:\n containers.append({'pod': pod['metadata']['name'], 'name': container['name']})\n\n if follow:\n processes = [Popen(kctl.command(\"logs %s %s%s\" % (container['pod'], container['name'], opts)), shell=True) for container in containers]\n try:\n for p in processes:\n p.wait()\n except KeyboardInterrupt:\n for p in processes:\n p.terminate()\n else:\n for container in containers:\n shout(kctl.command(\"logs %s %s%s\" % (container['pod'], container['name'], opts)), print_output=True)\n","sub_path":"hokusai/commands/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"580418289","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 15 08:20:34 2018\n\n This problem was asked by Dropbox.\n\nConway's Game of Life takes place on an infinite two-dimensional board of\nsquare cells. Each cell is either dead or alive, and at each tick, the\nfollowing rules apply:\n\n Any live cell with less than two live neighbours dies.\n Any live cell with two or three live neighbours remains living.\n Any live cell with more than three live neighbours dies.\n Any dead cell with exactly three live neighbours becomes a live cell.\n\nA cell neighbours another cell if it is horizontally, vertically, or\ndiagonally adjacent.\n\nImplement Conway's Game of Life. It should be able to be initialized with a\nstarting list of live cell coordinates and the number of steps it should run\nfor. Once initialized, it should print out the board state at each step. Since\nit's an infinite board, print out only the relevant coordinates, i.e. from the\ntop-leftmost live cell to bottom-rightmost live cell.\n\nYou can represent a live cell with an asterisk (*) and a dead cell with a dot\n(.).\n@author: carlgval\n\"\"\"\n\n\nimport numpy as np\nfrom copy import deepcopy\n\n\ndef game_of_life(list_of_cells, steps):\n cols, rows = zip(*list_of_cells)\n grid = [[0 for i in range(max(cols))] for j in range(max(rows))]\n for j, i in list_of_cells:\n grid[i-1][j-1] = 1\n plot_grid(grid)\n for s in range(steps):\n grid = step(grid)\n\n\ndef plot_grid(grid):\n g = np.array(grid)\n if any([any(row) for row in grid]):\n starting_col = int(np.argwhere(np.sum(g, axis=0))[0])\n ending_col = int(np.argwhere(np.sum(g, axis=0))[-1]) + 1\n starting_row = int(np.argwhere(np.sum(g, axis=1))[0])\n ending_row = int(np.argwhere(np.sum(g, axis=1))[-1]) + 1\n\n for row in grid[starting_row:ending_row]:\n s = [str(r) for r in row[starting_col:ending_col]]\n print(' '.join(s).replace('0', '.').replace('1', '*'))\n print('')\n\n\ndef step(grid):\n if any(grid[0]):\n grid.insert(0, [0 for i in range(len(grid[0]))])\n if any(grid[-1]):\n grid.append([0 for i in range(len(grid[0]))])\n if any([row[0] for row in grid]):\n for row in grid:\n row.insert(0, 0)\n if any([row[-1] for row in grid]):\n for row in grid:\n row.append(0)\n p_grid = deepcopy(grid)\n for i, row in enumerate(p_grid[1:len(p_grid)]):\n for j, piece in enumerate(p_grid[i][1:len(p_grid[0])]):\n neighbours = 0\n neighbours += sum(p_grid[(i-1)][(j-1):(j+2)])\n neighbours += p_grid[(i)][(j-1)] + p_grid[(i)][(j+1)]\n neighbours += sum(p_grid[(i+1)][(j-1):(j+2)])\n if neighbours < 2:\n grid[i][j] = 0\n elif neighbours == 3:\n grid[i][j] = 1\n elif neighbours > 3:\n grid[i][j] = 0\n\n plot_grid(grid)\n return grid\n\nif __name__ == '__main__':\n cells = [[2, 3],\n [3, 3],\n [1, 4],\n [2, 4],\n [3, 4],\n [3, 5],\n [4, 5],\n [5, 5],\n [6, 6],\n [4, 6],\n [3, 7]]\n game_of_life(cells, 5)\n","sub_path":"conwalls_game_of_life.py","file_name":"conwalls_game_of_life.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"432895311","text":"#tusr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 25 17:13:22 2018\n\n@author: kitreatakataglushkoff\nKitrea's hand-written copied/adjusted version of the analyze_massredistribution.py, \nwhich was last significantly edited Thursday July 18. \n\nUPDATE - Oct 9, 2018 - Kitrea double-checked code, added some comments. \nlast updated Wed Nov 14 - to clean out bad data in the new large dataset.\n\nUPDATE - March/April, 2020 (ongoing) - Zoe edited script to integrate\nnew parameters into the existing functions\n\"\"\"\nimport pandas as pd\nimport pickle\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n#from scipy.stats import median_absolute_deviation\n\n#%% ===== FUNCTIONS =====\nrgi_fp = os.getcwd() + '/../RGI/rgi60/00_rgi60_attribs/'\nassert os.path.exists(rgi_fp), '01_rgi60_Alaska.csv'\n\ndef selectglaciersrgitable(glac_no=None,\n rgi_regionsO1=None,\n rgi_regionsO2=None,\n rgi_glac_number=None,\n rgi_fp=rgi_fp,\n rgi_cols_drop=['GLIMSId','BgnDate','EndDate','Status','Connect','Linkages','Name'],\n rgi_O1Id_colname='glacno',\n rgi_glacno_float_colname='RGIId_float',\n indexname='GlacNo'):\n \"\"\"\n Select all glaciers to be used in the model run according to the regions and glacier numbers defined by the RGI\n glacier inventory. This function returns the rgi table associated with all of these glaciers.\n\n glac_no : list of strings\n list of strings of RGI glacier numbers (e.g., ['1.00001', '13.00001'])\n rgi_regionsO1 : list of integers\n list of integers of RGI order 1 regions (e.g., [1, 13])\n rgi_regionsO2 : list of integers or 'all'\n list of integers of RGI order 2 regions or simply 'all' for all the order 2 regions\n rgi_glac_number : list of strings\n list of RGI glacier numbers without the region (e.g., ['00001', '00002'])\n\n Output: Pandas DataFrame of the glacier statistics for each glacier in the model run\n (rows = GlacNo, columns = glacier statistics)\n \"\"\"\n if glac_no is not None:\n glac_no_byregion = {}\n rgi_regionsO1 = [int(i.split('.')[0]) for i in glac_no]\n rgi_regionsO1 = list(set(rgi_regionsO1))\n for region in rgi_regionsO1:\n glac_no_byregion[region] = []\n for i in glac_no:\n region = i.split('.')[0]\n glac_no_only = i.split('.')[1]\n glac_no_byregion[int(region)].append(glac_no_only)\n\n for region in rgi_regionsO1:\n glac_no_byregion[region] = sorted(glac_no_byregion[region])\n\n # Create an empty dataframe\n rgi_regionsO1 = sorted(rgi_regionsO1)\n glacier_table = pd.DataFrame()\n for region in rgi_regionsO1:\n\n if glac_no is not None:\n rgi_glac_number = glac_no_byregion[region]\n\n for i in os.listdir(rgi_fp):\n if i.startswith(str(region).zfill(2)) and i.endswith('.csv'):\n rgi_fn = i\n print(rgi_fn)\n try:\n csv_regionO1 = pd.read_csv(rgi_fp + rgi_fn)\n except:\n csv_regionO1 = pd.read_csv(rgi_fp + rgi_fn, encoding='latin1')\n \n # Populate glacer_table with the glaciers of interest\n if rgi_regionsO2 == 'all' and rgi_glac_number == 'all':\n print(\"All glaciers within region(s) %s are included in this model run.\" % (region))\n if glacier_table.empty:\n glacier_table = csv_regionO1\n else:\n glacier_table = pd.concat([glacier_table, csv_regionO1], axis=0)\n elif rgi_regionsO2 != 'all' and rgi_glac_number == 'all':\n print(\"All glaciers within subregion(s) %s in region %s are included in this model run.\" %\n (rgi_regionsO2, region))\n for regionO2 in rgi_regionsO2:\n if glacier_table.empty:\n glacier_table = csv_regionO1.loc[csv_regionO1['O2Region'] == regionO2]\n else:\n glacier_table = (pd.concat([glacier_table, csv_regionO1.loc[csv_regionO1['O2Region'] ==\n regionO2]], axis=0))\n else:\n if len(rgi_glac_number) < 20:\n print(\"%s glaciers in region %s are included in this model run: %s\" % (len(rgi_glac_number), region,\n rgi_glac_number))\n else:\n print(\"%s glaciers in region %s are included in this model run: %s and more\" %\n (len(rgi_glac_number), region, rgi_glac_number[0:50]))\n\n rgiid_subset = ['RGI60-' + str(region).zfill(2) + '.' + x for x in rgi_glac_number]\n rgiid_all = list(csv_regionO1.RGIId.values)\n rgi_idx = [rgiid_all.index(x) for x in rgiid_subset]\n if glacier_table.empty:\n glacier_table = csv_regionO1.loc[rgi_idx]\n else:\n glacier_table = (pd.concat([glacier_table, csv_regionO1.loc[rgi_idx]],\n axis=0))\n\n glacier_table = glacier_table.copy()\n # reset the index so that it is in sequential order (0, 1, 2, etc.)\n glacier_table.reset_index(inplace=True)\n # change old index to 'O1Index' to be easier to recall what it is\n glacier_table.rename(columns={'index': 'O1Index'}, inplace=True)\n # Record the reference date\n glacier_table['RefDate'] = glacier_table['BgnDate']\n # if there is an end date, then roughly average the year\n enddate_idx = glacier_table.loc[(glacier_table['EndDate'] > 0), 'EndDate'].index.values\n glacier_table.loc[enddate_idx,'RefDate'] = (\n np.mean((glacier_table.loc[enddate_idx,['BgnDate', 'EndDate']].values / 10**4).astype(int),\n axis=1).astype(int) * 10**4 + 9999)\n # drop columns of data that is not being used\n glacier_table.drop(rgi_cols_drop, axis=1, inplace=True)\n # add column with the O1 glacier numbers\n glacier_table[rgi_O1Id_colname] = (\n glacier_table['RGIId'].str.split('.').apply(pd.Series).loc[:,1].astype(int))\n glacier_table['rgino_str'] = [x.split('-')[1] for x in glacier_table.RGIId.values]\n glacier_table[rgi_glacno_float_colname] = (np.array([np.str.split(glacier_table['RGIId'][x],'-')[1]\n for x in range(glacier_table.shape[0])]).astype(float))\n # set index name\n glacier_table.index.name = indexname\n\n print(\"This study is focusing on %s glaciers in region %s\" % (glacier_table.shape[0], rgi_regionsO1))\n\n return glacier_table\n\n\ndef weighted_avg_and_std(values, weights):\n \"\"\"\n Return the weighted average and standard deviation.\n\n values, weights -- Numpy ndarrays with the same shape.\n \"\"\"\n average = np.average(values, weights=weights)\n # Fast and numerically precise:\n variance = np.average((values-average)**2, weights=weights)\n return average, variance**0.5\n\n\ndef weighted_percentile(sorted_list, weights, percentile):\n \"\"\"\n Calculate weighted percentile of a sorted list\n \"\"\"\n assert percentile <= 1 or percentile >=0, 'Error: Percentile outside of 0-1'\n weights_cumsum_norm_high = np.cumsum(weights) / np.sum(weights)\n# print(weights_cumsum_norm_high)\n weights_norm = weights / np.sum(weights)\n weights_cumsum_norm_low = weights_cumsum_norm_high - weights_norm\n# print(weights_cumsum_norm_low)\n \n percentile_idx_high = np.where(weights_cumsum_norm_high >= percentile)[0][0]\n# print(percentile_idx_high)\n percentile_idx_low = np.where(weights_cumsum_norm_low <= percentile)[0][-1]\n# print(percentile_idx_low)\n \n if percentile_idx_low == percentile_idx_high:\n value_percentile = sorted_list[percentile_idx_low]\n else:\n value_percentile = np.mean([sorted_list[percentile_idx_low], sorted_list[percentile_idx_high]])\n\n return value_percentile\n\n\ndef normalized_stats(norm_list):\n # Merge norm_list to make array of all glaciers with same elevation normalization space \n max_length = len(max(norm_list,key=len)) #len of glac w most norm values\n # All data normalized: 1st column is normalized elev, the others are norm dhdt for each glacier\n norm_all = np.zeros((max_length, len(norm_list)+1))\n # First column is normalized elevation, pulled from the glac with most norm vals\n norm_all[:,0] = max(norm_list,key=len)[:,0]\n norm_all_area = norm_all.copy()\n \n norm_elev_binsize = (norm_all_area[0,0] - norm_all_area[1,0])\n \n # Loop through each glacier's normalized array (where col1 is elev_norm and col2 is norm dhdt)\n for n in range(len(norm_list)):\n norm_single = norm_list[n] # get one glacier at a time \n\n # Fill in nan values for elev_norm of 0 and 1 with nearest neighbor\n norm_single[0,1] = norm_single[np.where(~np.isnan(norm_single[:,1]))[0][0], 1]\n norm_single[-1,1] = norm_single[np.where(~np.isnan(norm_single[:,1]))[0][-1], 1]\n norm_single[0,2] = norm_single[np.where(~np.isnan(norm_single[:,2]))[0][0], 2]\n norm_single[-1,2] = norm_single[np.where(~np.isnan(norm_single[:,2]))[0][-1], 2]\n # Remove nan values\n norm_single = norm_single[np.where(~np.isnan(norm_single[:,2]))] #~ is the same as !\n elev_single = norm_single[:,0]\n dhdt_single = norm_single[:,1]\n area_single = norm_single[:,2]\n area_single_cumsum = np.cumsum(area_single)\n #loop through each area value of the glacier, and add it and interpolate to add to the norm_all array.\n for r in range(0, max_length):\n \n # Find value need to interpolate to\n norm_elev_value = norm_all_area[r,0]\n norm_elev_lower = norm_elev_value - norm_elev_binsize/2\n if norm_elev_lower <= 0:\n norm_elev_lower = 0\n\n # ----- AREA CALCULATION -----\n if r == 0:\n area_cumsum_upper = 0\n \n if norm_elev_lower > 0:\n# if r < max_length-1:\n# print(r, norm_elev_value, norm_elev_value - norm_elev_binsize/2) \n # Find index of value above it from dhdt_norm, which is a different size\n upper_idx = np.where(elev_single == elev_single[elev_single >= norm_elev_lower].min())[0][0]\n # Find index of value below it\n# print(len(elev_single), max_length)\n# print(elev_single, norm_elev_lower)\n lower_idx = np.where(elev_single == elev_single[elev_single < norm_elev_lower].max())[0][0]\n #get the two values, based on the indices\n upper_elev = elev_single[upper_idx]\n upper_value = area_single_cumsum[upper_idx]\n lower_elev = elev_single[lower_idx]\n lower_value = area_single_cumsum[lower_idx]\n \n #Linearly Interpolate between two values, and plug in interpolated value into norm_all\n area_cumsum_interp = (lower_value + (norm_elev_lower - lower_elev) / (upper_elev - lower_elev) * \n (upper_value - lower_value))\n else:\n area_cumsum_interp = area_single_cumsum[-1]\n # Calculate area within that bin\n norm_all_area[r,n+1] = area_cumsum_interp - area_cumsum_upper\n # Update area_lower_cumsum\n area_cumsum_upper = area_cumsum_interp\n\n # ----- DH/DT CALCULATION -----\n if r == 0:\n #put first value dhdt value into the norm_all. n+1 because the first col is taken by the elevnorms.\n norm_all[r,n+1] = dhdt_single[0] \n elif r == (max_length - 1):\n #put last value into the the last row for the glacier's 'stretched out'(interpolated) normalized curve.\n norm_all[r,n+1] = dhdt_single[-1] \n else:\n # Find value need to interpolate to\n norm_elev_value = norm_all[r,0] #go through each row in the elev (col1)\n # Find index of value above it from dhdt_norm, which is a different size\n upper_idx = np.where(elev_single == elev_single[elev_single >= norm_elev_value].min())[0][0]\n # Find index of value below it\n lower_idx = np.where(elev_single == elev_single[elev_single < norm_elev_value].max())[0][0]\n #get the two values, based on the indices\n upper_elev = elev_single[upper_idx]\n upper_value = dhdt_single[upper_idx]\n lower_elev = elev_single[lower_idx]\n lower_value = dhdt_single[lower_idx]\n #Linearly Interpolate between two values, and plug in interpolated value into norm_all\n norm_all[r,n+1] = (lower_value + (norm_elev_value - lower_elev) / (upper_elev - lower_elev) * \n (upper_value - lower_value))\n \n # Compute mean and standard deviation\n norm_all_stats = pd.DataFrame()\n norm_all_stats['norm_elev'] = norm_all[:,0]\n # DH/DT STATISTICS\n norm_all_stats['norm_dhdt_mean'] = np.nanmean(norm_all[:,1:], axis=1) \n norm_all_stats['norm_dhdt_med'] = np.nanmedian(norm_all[:,1:], axis=1) \n norm_all_stats['norm_dhdt_std'] = np.nanstd(norm_all[:,1:], axis=1)\n norm_all_stats['norm_dhdt_16perc'] = np.percentile(norm_all[:,1:], 16, axis=1)\n norm_all_stats['norm_dhdt_84perc'] = np.percentile(norm_all[:,1:], 84, axis=1)\n # AREA STATISTICS\n norm_all_stats['norm_area'] = np.nansum(norm_all_area[:,1:], axis=1)\n norm_all_stats['norm_area_perc'] = norm_all_stats['norm_area'] / norm_all_stats['norm_area'].sum() * 100\n norm_all_stats['norm_area_perc_cumsum'] = np.cumsum(norm_all_stats['norm_area_perc'])\n # area-weighted stats\n norm_all_stats['norm_dhdt_mean_areaweighted'] = np.nan\n norm_all_stats['norm_dhdt_med_areaweighted'] = np.nan\n norm_all_stats['norm_dhdt_std_areaweighted'] = np.nan\n norm_all_stats['norm_dhdt_16perc_areaweighted'] = np.nan\n norm_all_stats['norm_dhdt_84perc_areaweighted'] = np.nan\n for nrow in np.arange(0,norm_all.shape[0]):\n # Select values\n norm_values = norm_all[nrow,1:]\n area_values = norm_all_area[nrow,1:]\n # Sorted values\n area_values_sorted = [x for _,x in sorted(zip(norm_values, area_values))]\n norm_values_sorted = sorted(norm_values)\n # Statistics\n weighted_mean, weighted_std = weighted_avg_and_std(norm_values_sorted, area_values_sorted)\n weighted_med = weighted_percentile(norm_values_sorted, area_values_sorted, 0.5)\n weighted_16perc = weighted_percentile(norm_values_sorted, area_values_sorted, 0.16)\n weighted_84perc = weighted_percentile(norm_values_sorted, area_values_sorted, 0.84)\n # record stats \n norm_all_stats.loc[nrow,'norm_dhdt_mean_areaweighted'] = weighted_mean\n norm_all_stats.loc[nrow,'norm_dhdt_std_areaweighted'] = weighted_std\n norm_all_stats.loc[nrow,'norm_dhdt_med_areaweighted'] = weighted_med\n norm_all_stats.loc[nrow,'norm_dhdt_16perc_areaweighted'] = weighted_16perc\n norm_all_stats.loc[nrow,'norm_dhdt_84perc_areaweighted'] = weighted_84perc\n \n return norm_all_stats\n\n\ndef pickle_data(fn, data):\n \"\"\"Pickle data\n \n Parameters\n ----------\n fn : str\n filename including filepath\n data : list, etc.\n data to be pickled\n \n Returns\n -------\n .pkl file\n saves .pkl file of the data\n \"\"\"\n with open(fn, 'wb') as f:\n pickle.dump(data, f)\n \n#%%\n# TO-DO LIST:\nprint('\\nTo-do list:\\n - code Larsen! \\n\\n')\n\n#%% ===== REGION AND GLACIER FILEPATH OPTIONS =====\n# User defines regions of interest\ngroup1 = ['01', '02', '09', '12', '13', '14', '15', '16', '17', '18']\ngroup2 = ['03', '04']\ngroup3 = ['05', '06', '07', '08', '10', '11']\nall_regions = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18']\n\nrois = all_regions\n\nif 'davidrounce' in os.getcwd():\n binnedcsv_fp = ('/Users/davidrounce/Documents/Dave_Rounce/DebrisGlaciers_WG/Melt_Intercomparison/output/' + \n 'mb_bins_all/csv/')\nelif 'zoescrewvala' in os.getcwd():\n binnedcsv_fp = '/Users/zoescrewvala/Documents/Alaska_REU_2019/mb_binned_data/'\nelse:\n assert True == False, 'add correct binnedcsv_fp'\n#for roi in rois:\n# assert os.path.exists(rgi_fp), roi\n# OPTION\noption_plot_multipleglaciers_multiplethresholds = False\noption_plot_multipleregions = True\n\n# Columns to use for mass balance and dhdt (specify mean or median)\n#dhdt_stat = 'mean'\ndhdt_stat = 'med'\nif dhdt_stat == 'mean':\n mb_cn = 'mb_bin_mean_mwea'\n dhdt_cn = 'dhdt_bin_mean_ma'\nelse:\n mb_cn = 'mb_bin_med_mwea'\n dhdt_cn = 'dhdt_bin_med_ma'\ndhdt_max = 2.5\ndhdt_min = -50\n\nadd_dc_classification_to_termtype = False\ndc_perc_threshold = 5\n\n# Quality control options\nbinsize = 50 # resample bins to remove noise\nmin_elevbins = 5 # minimum number of elevation bins\nmin_glac_area = 2 # minimum total glacier area size (km2) to consider (removes small glaciers)\nperc_remove = 2.5 # percentage of glacier area to remove (1 means 1 - 99% are used); set to 0 to keep everything\nmin_bin_area_km2 = 0.02 # minimum binned area (km2) to remove everything else; set to 0 to keep everything\noption_remove_surge_glac = True\noption_remove_all_pos_dhdt = True\noption_remove_dhdt_acc = True\noption_remove_acc_lt_abl = True\n\n# ===== PLOT OPTIONS =====\n# Option to save figures\noption_savefigs = True\n\nfig_fp = binnedcsv_fp + '../figs/'\nglacier_plots_transparency = 0.3\n\n#%% Select Files\n# # Load file if it already exists\noverwrite = False\npkl_fp = binnedcsv_fp + '../pickle_datasets/'\nif not os.path.exists(pkl_fp):\n os.makedirs(pkl_fp)\nbinnedcsv_all_fullfn = pkl_fp + 'binnedcsv_all.pkl'\nmain_glac_rgi_fullfn = pkl_fp + 'main_glac_rgi_all.pkl'\n\n# Load pickle data if it exists\nif os.path.exists(binnedcsv_all_fullfn) and not overwrite:\n # Binnedcsv data\n with open(binnedcsv_all_fullfn, 'rb') as f:\n binnedcsv_all = pickle.load(f)\n # Main_glac_rgi data\n with open(main_glac_rgi_fullfn, 'rb') as f:\n main_glac_rgi = pickle.load(f)\n\n# Otherwise, process the data (all regions)\nelse:\n print('redoing pickle datasets')\n # Process all regions\n rois = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18']\n # Find files for analysis; create list of all binned filenames\n binnedcsv_fullfns_all = []\n rgiids_all = []\n binnedcsv_fullfns_allrois = []\n for roi in rois:\n binnedcsv_fullfns_roi = []\n rgiids_roi = []\n if roi in ['13','14','15']:\n roi_4fp = 'HMA'\n else:\n roi_4fp = roi\n binnedcsv_fp_roi = binnedcsv_fp + roi_4fp + '/'\n for i in os.listdir(binnedcsv_fp_roi):\n if i.startswith(str(int(roi))) and i.endswith('_mb_bins.csv'):\n rgiids_roi.append(i.split('_')[0])\n binnedcsv_fullfns_roi.append(binnedcsv_fp_roi + i)\n # Sorted files \n binnedcsv_fullfns_roi = [x for _,x in sorted(zip(rgiids_roi, binnedcsv_fullfns_roi))]\n rgiids_roi = sorted(rgiids_roi)\n \n binnedcsv_fullfns_all.extend(binnedcsv_fullfns_roi)\n binnedcsv_fullfns_allrois.append(binnedcsv_fullfns_roi)\n rgiids_all.extend(rgiids_roi)\n \n main_glac_rgi_all = selectglaciersrgitable(glac_no=rgiids_all)\n b = main_glac_rgi_all.copy()\n main_glac_rgi_all['binnedcsv_fullfn'] = binnedcsv_fullfns_all\n main_glac_rgi_all['roi'] = [x.split('-')[1].split('.')[0] for x in main_glac_rgi_all.RGIId.values]\n main_glac_rgi = main_glac_rgi_all[main_glac_rgi_all['Area'] > min_glac_area].copy()\n main_glac_rgi.reset_index(drop=True, inplace=True)\n \n # Add statistics for each glacier\n main_glac_rgi['Zmean'] = np.nan\n main_glac_rgi['PercDebris'] = np.nan\n main_glac_rgi['HypsoIndex'] = np.nan\n main_glac_rgi['AAR'] = np.nan\n main_glac_rgi['Z_maxloss_norm'] = np.nan\n main_glac_rgi['mb_abl_lt_acc'] = np.nan\n main_glac_rgi['nbins'] = np.nan\n main_glac_rgi['Size'] = np.nan\n binnedcsv_all = []\n for nglac, rgiid in enumerate(main_glac_rgi.rgino_str.values):\n# for nglac, rgiid in enumerate(main_glac_rgi.rgino_str.values[0:1]):\n if nglac%100 == 0:\n print(nglac, rgiid)\n binnedcsv_fullfn = main_glac_rgi.loc[nglac,'binnedcsv_fullfn']\n binnedcsv = pd.read_csv(binnedcsv_fullfn)\n \n # Elevation bin statistics\n bins_elev = binnedcsv['bin_center_elev_m'].values\n bins_area = binnedcsv['z1_bin_area_valid_km2'].values\n zmin = bins_elev.min()\n zmax = bins_elev.max()\n zmean, zstd = weighted_avg_and_std(bins_elev, bins_area)\n zmed = weighted_percentile(bins_elev, bins_area, 0.5)\n\n # Size -- size classes from Huss et al., 2010\n if main_glac_rgi['Area'][nglac] <= 5:\n glac_size = 'Small'\n elif main_glac_rgi['Area'][nglac] > 5 and main_glac_rgi['Area'][nglac] <= 20:\n glac_size = 'Medium'\n else:\n glac_size = 'Large' \n # Hypsometry index (McGrath et al. 2017)\n hyps_idx = (zmax - zmed) / (zmed - zmin)\n if hyps_idx > 0 and hyps_idx < 1:\n hyps_idx = -1/hyps_idx\n \n # Accumulation-area ratio (assuming median is the equilibrium line altitude)\n aar = bins_area[bins_elev >= zmed].sum() / bins_area.sum()\n \n # Relative debris-covered area\n if 'dc_bin_area_valid_km2' in binnedcsv.columns:\n dc_perc = binnedcsv['dc_bin_area_valid_km2'].sum() / binnedcsv['z1_bin_area_valid_km2'].sum() * 100\n else: \n dc_perc = 0\n # Classify land-terminating glaciers are debris-covered if selected option (Debris = 6)\n if add_dc_classification_to_termtype:\n if main_glac_rgi.loc[nglac,'TermType'] == 0:\n if dc_perc >= dc_perc_threshold:\n main_glac_rgi.loc[nglac,'TermType'] = 6\n \n # Normalized elevation of most negative bin (0 = Zmin, 1 = Zmax)\n maxloss_idx = np.where(binnedcsv[dhdt_cn] == binnedcsv[dhdt_cn].min())[0][0]\n z_maxloss = binnedcsv.loc[maxloss_idx,'bin_center_elev_m']\n z_maxloss_norm = (z_maxloss - zmin) / (zmax - zmin)\n \n \n # Is mass balance in ablation area more negative than the accumulation area (as we would expect)?\n binnedcsv_acc = binnedcsv[binnedcsv.bin_center_elev_m >= zmed]\n mb_acc = ((binnedcsv_acc['z1_bin_area_valid_km2'] * binnedcsv_acc[mb_cn]).sum() / \n binnedcsv_acc['z1_bin_area_valid_km2'].sum())\n binnedcsv_abl = binnedcsv[binnedcsv.bin_center_elev_m < zmed]\n mb_abl = ((binnedcsv_abl['z1_bin_area_valid_km2'] * binnedcsv_abl[mb_cn]).sum() / \n binnedcsv_abl['z1_bin_area_valid_km2'].sum())\n if mb_abl < mb_acc:\n mb_abl_lt_acc = True\n else:\n mb_abl_lt_acc = False\n \n # Add attributes\n main_glac_rgi.loc[nglac,'Zmin'] = zmin\n main_glac_rgi.loc[nglac,'Zmax'] = zmax\n main_glac_rgi.loc[nglac,'Zmed'] = zmed\n main_glac_rgi.loc[nglac,'Zmean'] = zmean\n main_glac_rgi.loc[nglac,'PercDebris'] = dc_perc\n main_glac_rgi.loc[nglac,'HypsoIndex'] = hyps_idx\n main_glac_rgi.loc[nglac,'AAR'] = aar\n main_glac_rgi.loc[nglac,'Z_maxloss_norm'] = z_maxloss_norm\n main_glac_rgi.loc[nglac,'mb_abl_lt_acc'] = mb_abl_lt_acc\n # ===== Filter out bad values ==========================================================\n # Remove bad values of dhdt\n binnedcsv.loc[binnedcsv[dhdt_cn] > dhdt_max, dhdt_cn] = np.nan\n binnedcsv.loc[binnedcsv[dhdt_cn] < dhdt_min, dhdt_cn] = np.nan\n # If dhdt is nan, remove row\n null_bins = binnedcsv.loc[pd.isnull(binnedcsv[dhdt_cn])].index.values\n binnedcsv = binnedcsv.drop(null_bins)\n binnedcsv.reset_index(inplace=True, drop=True)\n if binnedcsv.shape[0] > 0:\n #sort out glaciers based on if they have all positive dh/dt, all negative, dh/dt, or both\t\n #based on evaluating, for each glacier, the max from the list of dhdt and the min from the list.\n if np.nanmin(binnedcsv[dhdt_cn].astype(float)) >= 0: \t\t\n glacwide_dhdt_sign = 1 #glaciers with all positive dh/dt\t\t\n elif np.nanmax(binnedcsv[dhdt_cn].astype(float)) <= 0: \t\t\n glacwide_dhdt_sign = -1 #glaciers with all negative dh/dt\t\t\n else: \t\t\n glacwide_dhdt_sign = 0 #glaciers with both, + and - dh/dt \t\t\n main_glac_rgi.loc[nglac, 'dhdt_sign'] = glacwide_dhdt_sign\n\n # ===== OPTION: RESAMPLE BIN SIZES =====\n elev_bins_resampled = np.arange(0 + binsize/2, binnedcsv.bin_center_elev_m.max() + binsize, binsize)\n try:\n elev_bins_resampled_idx_low = np.where(elev_bins_resampled < binnedcsv.bin_center_elev_m.min())[0][-1]\n except:\n elev_bins_resampled_idx_low = 0\n elev_bins_resampled = elev_bins_resampled[elev_bins_resampled_idx_low:]\n binnedcsv_resampled = pd.DataFrame(np.zeros((len(elev_bins_resampled), binnedcsv.shape[1])),\n columns=binnedcsv.columns)\n binnedcsv_resampled['bin_center_elev_m'] = elev_bins_resampled\n for nbin, elev_bin in enumerate(list(elev_bins_resampled)):\n elev_bins = binnedcsv.bin_center_elev_m.values\n elevbin_idx = np.where((elev_bins >= elev_bin - binsize/2) & (elev_bins < elev_bin + binsize/2))[0]\n\n if len(elevbin_idx) > 0 and binnedcsv.loc[elevbin_idx,'z1_bin_area_valid_km2'].sum() > 0:\n binnedcsv_resampled.loc[nbin,'z1_bin_count_valid'] = (\n binnedcsv.loc[elevbin_idx,'z1_bin_count_valid'].sum())\n binnedcsv_resampled.loc[nbin,'z1_bin_area_valid_km2'] = (\n binnedcsv.loc[elevbin_idx,'z1_bin_area_valid_km2'].sum())\n binnedcsv_resampled.loc[nbin,'slope_bin_med'] = (\n weighted_avg_and_std(binnedcsv.loc[elevbin_idx,'slope_bin_med'],\n binnedcsv.loc[elevbin_idx,'z1_bin_area_valid_km2'])[0])\n binnedcsv_resampled.loc[nbin,'aspect_bin_med'] = (\n weighted_avg_and_std(binnedcsv.loc[elevbin_idx,'aspect_bin_med'], \n binnedcsv.loc[elevbin_idx,'z1_bin_area_valid_km2'])[0])\n binnedcsv_resampled.loc[nbin,'dhdt_bin_count'] = (\n binnedcsv.loc[elevbin_idx,'dhdt_bin_count'].sum())\n binnedcsv_resampled.loc[nbin,'dhdt_bin_area_valid_km2'] = (\n binnedcsv.loc[elevbin_idx,'dhdt_bin_area_valid_km2'].sum())\n binnedcsv_resampled.loc[nbin,'dhdt_bin_mean_ma'] = (\n weighted_avg_and_std(binnedcsv.loc[elevbin_idx,'dhdt_bin_mean_ma'], \n binnedcsv.loc[elevbin_idx,'z1_bin_area_valid_km2'])[0])\n binnedcsv_resampled.loc[nbin,'dhdt_bin_med_ma'] = (\n weighted_avg_and_std(binnedcsv.loc[elevbin_idx,'dhdt_bin_med_ma'], \n binnedcsv.loc[elevbin_idx,'z1_bin_area_valid_km2'])[0])\n binnedcsv_resampled.loc[nbin,'mb_bin_mean_mwea'] = (\n weighted_avg_and_std(binnedcsv.loc[elevbin_idx,'mb_bin_mean_mwea'], \n binnedcsv.loc[elevbin_idx,'z1_bin_area_valid_km2'])[0])\n binnedcsv_resampled.loc[nbin,'mb_bin_med_mwea'] = (\n weighted_avg_and_std(binnedcsv.loc[elevbin_idx,'mb_bin_med_mwea'], \n binnedcsv.loc[elevbin_idx,'z1_bin_area_valid_km2'])[0])\n if 'dc_bin_area_valid_km2' in binnedcsv.columns:\n binnedcsv_resampled.loc[nbin,'dc_bin_area_valid_km2'] = (\n binnedcsv.loc[elevbin_idx,'dc_bin_area_valid_km2'].sum())\n \n binnedcsv_resampled['z1_bin_area_perc'] = (binnedcsv_resampled['z1_bin_area_valid_km2'] / \n binnedcsv_resampled['z1_bin_area_valid_km2'].sum() * 100)\n binnedcsv_resampled['z1_bin_areas_perc_cum'] = np.cumsum(binnedcsv_resampled['z1_bin_area_perc'])\n binnedcsv_resampled['z2_bin_count_valid'] = binnedcsv_resampled['z1_bin_count_valid']\n binnedcsv_resampled['z2_bin_area_valid_km2'] = binnedcsv_resampled['z1_bin_area_valid_km2']\n binnedcsv_resampled['z2_bin_area_perc'] = binnedcsv_resampled['z1_bin_area_perc']\n binnedcsv_resampled['dhdt_bin_area_perc'] = (binnedcsv_resampled['dhdt_bin_area_valid_km2'] / \n binnedcsv_resampled['dhdt_bin_area_valid_km2'].sum() * 100)\n binnedcsv_resampled['dhdt_bin_std_ma'] = np.nan\n binnedcsv_resampled['dhdt_bin_mad_ma'] = np.nan\n binnedcsv_resampled['mb_bin_std_mwea'] = np.nan\n binnedcsv_resampled['mb_bin_mad_mwea'] = np.nan\n if 'dc_bin_area_valid_km2' in binnedcsv.columns:\n binnedcsv_resampled['dc_dhdt_bin_count'] = np.nan\n binnedcsv_resampled['dc_dhdt_bin_mean_ma'] = np.nan \n binnedcsv_resampled['dc_dhdt_bin_std_ma'] = np.nan\n binnedcsv_resampled['dc_dhdt_bin_med_ma'] = np.nan\n binnedcsv_resampled['dc_dhdt_bin_mad_ma'] = np.nan\n binnedcsv_resampled['dc_mb_bin_mean_mwea'] = np.nan \n binnedcsv_resampled['dc_mb_bin_std_mwea'] = np.nan\n binnedcsv_resampled['dc_mb_bin_med_mwea'] = np.nan\n binnedcsv_resampled['dc_mb_bin_mad_mwea'] = np.nan\n binnedcsv_resampled['dc_bin_count_valid'] = np.nan\n binnedcsv_resampled['dc_bin_area_perc'] = (binnedcsv_resampled['dc_bin_area_valid_km2'] / \n binnedcsv_resampled['dc_bin_area_valid_km2'].sum() * 100)\n binnedcsv_resampled['dc_bin_area_perc_cum'] = np.cumsum(binnedcsv_resampled['dc_bin_area_perc'])\n binnedcsv_resampled['vm_med'] = np.nan\n binnedcsv_resampled['vm_mad'] = np.nan\n binnedcsv_resampled['H_mean'] = np.nan\n binnedcsv_resampled['H_std'] = np.nan\n\n # ===== Filter out the edges, where bins may be very small =====\n binnedcsv_resampled = binnedcsv_resampled[(binnedcsv_resampled['z1_bin_areas_perc_cum'] > perc_remove) & \n (binnedcsv_resampled['z1_bin_areas_perc_cum'] < 100 - perc_remove)]\n binnedcsv_resampled.reset_index(inplace=True, drop=True)\n # ===== Filter out any bins that are too small =====\n binnedcsv_resampled = binnedcsv_resampled[binnedcsv_resampled['z1_bin_area_valid_km2'] > min_bin_area_km2]\n binnedcsv_resampled.reset_index(inplace=True, drop=True)\n \n # ===== Record number of elevation bins =====\n main_glac_rgi.loc[nglac, 'nbins'] = binnedcsv_resampled.shape[0]\n \n # ===== Normalized elevation vs. ice thickness change ===============================\n if main_glac_rgi.loc[nglac, 'nbins'] > 1:\n # Normalized elevation\n # (max elevation - bin elevation) / (max_elevation - min_elevation)\n elev_bins_resampled = binnedcsv_resampled['bin_center_elev_m'].values\n zmin_resampled = elev_bins_resampled.min()\n zmax_resampled = elev_bins_resampled.max()\n binnedcsv_resampled['elev_norm'] = (zmax_resampled - elev_bins_resampled) / (zmax_resampled - zmin_resampled)\n # Normalized ice thickness change [ma]\n # dhdt / dhdt_max\n glac_dhdt = binnedcsv_resampled[dhdt_cn].values.astype(float)\n # Shifted normalized ice thickness change such that everything is negative\n # binnedcsv['dhdt_norm_shifted'] = (glac_dhdt - np.nanmax(glac_dhdt)) / np.nanmin(glac_dhdt - np.nanmax(glac_dhdt))\n # binnedcsv.loc[binnedcsv['dhdt_norm_shifted'] == -0, 'dhdt_norm_shifted'] = 0\n # Replace positive values to zero\n glac_dhdt[glac_dhdt >= 0] = 0\n if np.nanmin(glac_dhdt) != 0:\n binnedcsv_resampled['dhdt_norm_huss'] = glac_dhdt / np.nanmin(glac_dhdt)\n binnedcsv_resampled.loc[binnedcsv_resampled['dhdt_norm_huss'] == -0, 'dhdt_norm_huss'] = 0\n else:\n binnedcsv_resampled['dhdt_norm_huss'] = np.nan\n # Replace dhdt sign as this will fail if there all values are zero\n main_glac_rgi.loc[nglac, 'dhdt_sign'] = 1\n \n # Store binnedcsv data\n binnedcsv_all.append(binnedcsv_resampled)\n \n # print(zmin, zmax, np.round(zmean), np.round(zstd), zmed, np.round(hyps_idx,2), np.round(dc_perc))\n \n # ===== Quality control ===== \n # Remove glaciers with too few elevation bins\n main_glac_rgi = main_glac_rgi[main_glac_rgi.nbins >= min_elevbins]\n \n # Remove surging glaciers (listed as 1 possible, 2 probable, or 3 observed in main_glac_rgi)\t\n if option_remove_surge_glac:\n main_glac_rgi = main_glac_rgi[(main_glac_rgi.Surging == 0) | (main_glac_rgi.Surging == 9)]\n \n # Remove glaciers with all positive dh/dt values (listed as 1 in main_glac_rgi)\n if option_remove_all_pos_dhdt: \n main_glac_rgi = main_glac_rgi[main_glac_rgi.dhdt_sign <= 0]\n \n # Remove glaciers with max surface lowering in accumulation area\n if option_remove_dhdt_acc:\n main_glac_rgi = main_glac_rgi[main_glac_rgi.Z_maxloss_norm <= 0.5]\n \n # Remove glaciers where accumulation area has more negative mass balance than ablation area\n if option_remove_acc_lt_abl:\n main_glac_rgi = main_glac_rgi[main_glac_rgi.mb_abl_lt_acc == True]\n \n # Select subset of binnedcsv files consistent with main_glac_rgi \n # (do this after all removed to ensure indices are correct)\n binnedcsv_all = [binnedcsv_all[x] for x in main_glac_rgi.index.values]\n main_glac_rgi.reset_index(inplace=True, drop=True) \n \n # Pickle datasets\n pickle_data(binnedcsv_all_fullfn, binnedcsv_all)\n pickle_data(main_glac_rgi_fullfn, main_glac_rgi)\n \n#%% ===== SUBSET OF REGIONS =====\nif option_plot_multipleglaciers_multiplethresholds:\n\n # ===== Thresholds =====\n Area_thresholds = [5, 20]\n Slope_thresholds = [15]\n HypsoIndex_thresholds = [-1.2, 1.2]\n AAR_thresholds = [0.51]\n PercDebris_thresholds = [5, 10]\n TermType_thresholds = [0.5, 1.5, 2.5]\n Form_thresholds = [0.5, 1.5]\n \n # ===== Parameter dictionary =====\n all_pars = {'TermType': TermType_thresholds, 'Area': Area_thresholds,\n 'Slope': Slope_thresholds, 'HypsoIndex': HypsoIndex_thresholds,\n 'AAR': AAR_thresholds, 'PercDebris': PercDebris_thresholds,\n 'Form': Form_thresholds}\n \n # plot options\n stat_type = '_MEDIANS'\n option_shading = True\n \n # ===== Parameters to loop through =====\n #pars_list = ['Area', 'Slope', 'HypsoIndex', 'AAR', 'PercDebris', 'TermType']\n pars_list = ['Area']\n\n # ===== Divide glaciers by threshold =====\n for n in range(len(pars_list)): \n parameter = pars_list[n]\n # parameter = 'Area'\n # parameter = 'Slope'\n # parameter = 'HypsoIndex'\n # parameter = 'AAR'\n # parameter = 'PercDebris'\n # parameter = 'TermType'\n # thresholds_var = parameter + '_thresholds'\n \n # Determine subset\n if parameter == 'TermType':\n subset_idxs = []\n thresholds = list(main_glac_rgi.TermType.unique())\n thresholds.sort()\n termtype_dict = {0: 'Land', 1:'Marine', 2:'Lake', 5:'Other', 6:'Debris'}\n termtype_list = [termtype_dict[x] for x in thresholds]\n print('Term type thresholds are:', thresholds)\n for termtype_value in thresholds:\n subset_idxs.append(np.where(main_glac_rgi.TermType == termtype_value)[0])\n elif parameter == 'Form':\n subset_idxs = []\n thresholds = list(main_glac_rgi.Form.unique())\n thresholds.sort()\n form_dict = {0: 'Glacier', 1:'Ice cap', 2:'Perennial snowfield', \n 3:'Seasonal snowfield', 9: 'Not assigned'}\n form_list = [form_dict[x] for x in thresholds]\n print('Form thresholds are:', thresholds)\n for form_value in thresholds:\n subset_idxs.append(np.where(main_glac_rgi.Form == form_value)[0])\n else:\n thresholds = all_pars[parameter]\n \n # Add maximum\n thresholds.append(main_glac_rgi[parameter].max() + 1)\n # Loop through and get subsets\n subset_idxs = []\n for n_threshold, threshold in enumerate(thresholds):\n if n_threshold == 0:\n main_glac_rgi_subset = main_glac_rgi[main_glac_rgi[parameter] <= threshold]\n else:\n main_glac_rgi_subset = main_glac_rgi[(main_glac_rgi[parameter] <= threshold) & \n (main_glac_rgi[parameter] > thresholds[n_threshold-1])]\n subset_idxs.append(list(main_glac_rgi_subset.index.values))\n \n # Loop through thresholds\n normlist_glac_per_threshold = []\n count_glac_per_threshold = []\n for n_threshold, threshold in enumerate(thresholds):\n # Subset indices\n subset_idx = subset_idxs[n_threshold]\n binnedcsv_subset = [binnedcsv_all[x] for x in subset_idx]\n main_glac_rgi_subset = main_glac_rgi.loc[subset_idx,:]\n main_glac_rgi_subset.reset_index(inplace=True, drop=True)\n \n normlist_glac = []\n for nglac in main_glac_rgi_subset.index.values:\n binnedcsv_glac = binnedcsv_subset[nglac]\n glac_elevnorm = binnedcsv_glac['elev_norm'].values\n# glac_dhdt_norm_huss = binnedcsv_glac['dhdt_norm_huss']\n glac_dhdt_norm_huss = binnedcsv_glac['dhdt_norm_huss']\n glac_area = binnedcsv_glac['z1_bin_area_valid_km2']\n# normlist_array = np.array([glac_elevnorm, glac_dhdt_norm_huss, glac_area]).transpose()\n normlist_array = np.array([glac_elevnorm, glac_dhdt_norm_huss, glac_area]).transpose()\n# normlist_array = np.array([glac_elevnorm, glac_dhdt_norm_huss]).transpose()\n normlist_glac.append(normlist_array)\n\n normlist_glac_per_threshold.append(normlist_glac)\n count_glac_per_threshold.append(len(normlist_glac))\n print('len normlist_glac:', len(normlist_glac))\n \n #%%\n # ===== PLOT =====\n option_plotarea = True\n # Plot the normalized curves\n fig_width = 5\n if option_plotarea:\n n_cols = 2\n else:\n n_cols = 1\n fig, ax = plt.subplots(len(thresholds), n_cols, squeeze=False, figsize=(fig_width,int(3*len(thresholds))), \n gridspec_kw = {'wspace':0.5, 'hspace':0.5})\n \n roi_str = None\n for roi_raw in rois:\n if roi_str is None:\n roi_str = str(roi_raw)\n else:\n roi_str = roi_str + '-' + str(roi_raw) \n \n normlist_stats_all = []\n for n, threshold in enumerate(thresholds):\n if len(normlist_glac_per_threshold[n]) > 0:\n # Extract values to plot\n normlist = normlist_glac_per_threshold[n]\n \n for glac in range(len(normlist)):\n normlist_glac = normlist[glac]\n # Normalized elevation vs. normalized dh/dt\n ax[n,0].plot(normlist_glac[:,0], normlist_glac[:,1], linewidth=1, alpha=glacier_plots_transparency, \n label=None)\n ax[n,0].set_ylim(max(normlist_glac[:,0]), min(normlist_glac[:,0]))\n ax[n,0].set_xlim(0,1)\n ax[n,0].set_ylabel('dh/dt [-]', size=12)\n ax[n,0].set_xlabel('Elevation [-]', size=12)\n ax[n,0].yaxis.set_major_locator(plt.MultipleLocator(0.2))\n ax[n,0].yaxis.set_minor_locator(plt.MultipleLocator(0.1))\n ax[n,0].xaxis.set_major_locator(plt.MultipleLocator(0.2))\n ax[n,0].xaxis.set_minor_locator(plt.MultipleLocator(0.1))\n\n \n if parameter == 'TermType':\n ax[n,0].set_title(('Regions_ ' + roi_str + ' -- ' + parameter + ' = ' + termtype_list[n] + \n ' (' + str(len(normlist)) + ' Glaciers)'), size=12)\n elif parameter == 'Form':\n ax[n,0].set_title(('Regions_ ' + roi_str + ' -- ' + parameter + ' = ' + form_list[n] + \n ' (' + str(len(normlist)) + ' Glaciers)'), size=12)\n else:\n if threshold == thresholds[0]:\n ax[n,0].set_title((parameter + '<' + str(threshold) + ' (' + str(len(normlist)) + \n ' Glaciers)'), size=12)\n elif threshold != thresholds[-1]:\n ax[n,0].set_title((str(thresholds[n-1]) + '<' + parameter + '<' + str(threshold) + ' (' + \n str(len(normlist)) + ' Glaciers)'), size=12)\n else:\n ax[n,0].set_title((parameter + '>' + str(thresholds[n-1]) + ' (' + str(len(normlist)) + \n ' Glaciers)'), size=12)\n \n # Add statistics to plot\n normlist_stats = normalized_stats(normlist)\n if stat_type == '_MEDIANS':\n ax[n,0].plot(normlist_stats.norm_elev, normlist_stats.norm_dhdt_med_areaweighted, color='black', linewidth=2)\n if stat_type == '_MEANS':\n ax[n,0].plot(normlist_stats.norm_elev, normlist_stats.norm_dhdt_mean_areaweighted, color='black', linewidth=2)\n ax[n,0].plot(normlist_stats.norm_elev, normlist_stats.norm_dhdt_16perc_areaweighted, '--', color='black', \n linewidth=1.5) \n ax[n,0].plot(normlist_stats.norm_elev, normlist_stats.norm_dhdt_84perc_areaweighted, '--', color='black', \n linewidth=1.5)\n \n if option_plotarea:\n ax[n,1].plot(normlist_stats.norm_elev, normlist_stats.norm_area, color='black', \n linewidth=2)\n ax[n,1].set_xlim(0,1)\n# ax[n,1].set_ylim(0,100)\n ax[n,1].set_ylabel('Cumulative Area [%]', size=12)\n ax[n,1].set_xlabel('Elevation [-]', size=12)\n max_area = normlist_stats.norm_area.max()\n if max_area < 100:\n ax[n,1].yaxis.set_major_locator(plt.MultipleLocator(10))\n ax[n,1].yaxis.set_minor_locator(plt.MultipleLocator(5))\n else:\n ax[n,1].yaxis.set_major_locator(plt.MultipleLocator((max_area/100).round()*10))\n ax[n,1].yaxis.set_minor_locator(plt.MultipleLocator((max_area/100).round()*5))\n ax[n,1].xaxis.set_major_locator(plt.MultipleLocator(0.2))\n ax[n,1].xaxis.set_minor_locator(plt.MultipleLocator(0.1))\n \n perc_upperbnd = 100 - perc_remove\n perc_lowerbnd = perc_remove\n vline_upperbnd_idx = np.where(normlist_stats.norm_area_perc_cumsum < perc_upperbnd)[0][-1]\n vline_upperbnd = normlist_stats.loc[vline_upperbnd_idx, 'norm_elev']\n vline_lowerbnd_idx = np.where(normlist_stats.norm_area_perc_cumsum > perc_lowerbnd)[0][0]\n vline_lowerbnd = normlist_stats.loc[vline_lowerbnd_idx, 'norm_elev']\n ax[n,1].axvline(vline_upperbnd, linewidth=1, linestyle=':', color='grey')\n ax[n,1].axvline(vline_lowerbnd, linewidth=1, linestyle=':', color='grey')\n \n # Record stats to plot on separate graph\n normlist_stats_all.append(normlist_stats)\n \n # Save figure\n fig.set_size_inches(fig_width, int(len(thresholds)*3))\n threshold_str_list = [str(i) for i in thresholds]\n threshold_str_list[-1] = 'max'\n threshold_str = '-'.join(threshold_str_list)\n print(threshold_str)\n fig_fp_all = fig_fp + 'resampled_bins/'\n if not os.path.exists(fig_fp_all):\n os.makedirs(fig_fp_all)\n fig.savefig(fig_fp_all + ('rgi_' + roi_str + '-normcurves' + parameter + '_' + threshold_str + '.png'), \n bbox_inches='tight', dpi=300)\n plt.show()\n \n #%%\n # ===== PLOT ALL ON ONE =====\n fig_width_all = 4\n fig_height_all = 3\n fig, ax = plt.subplots(1, 1, squeeze=False, figsize=(fig_width_all,fig_height_all), \n gridspec_kw = {'wspace':0.2, 'hspace':0.5})\n for n, normlist_stats in enumerate(normlist_stats_all):\n # Threshold label\n threshold = thresholds[n]\n num_glac = count_glac_per_threshold[n]\n if threshold == thresholds[0]:\n threshold_label = '< ' + str(threshold) + ' (' + str(num_glac) + ')'\n elif threshold != thresholds[-1]:\n threshold_label = str(thresholds[n-1]) + '-' + str(threshold) + ' (' + str(num_glac) + ')'\n else:\n threshold_label = '> ' + str(thresholds[n-1]) + ' (' + str(num_glac) + ')'\n \n \n # Add statistics to plot \n # ===== VARIABLES TO PLOT =====\n x_var = normlist_stats.norm_elev\n if stat_type=='_MEDIANS':\n y_var = normlist_stats.norm_dhdt_med_areaweighted\n if stat_type=='_MEANS':\n y_var = normlist_stats.norm_dhdt_mean_areaweighted\n err_low = normlist_stats.norm_dhdt_16perc_areaweighted\n err_high = normlist_stats.norm_dhdt_84perc_areaweighted\n \n # Plot median of each\n ax[0,0].plot(x_var, y_var, linewidth=2, label = threshold_label) #add label\n if option_shading:\n ax[0,0].fill_between(x_var, err_low, err_high, alpha = 0.5, linewidth=1)\n ax[0,0].set_ylim(max(normlist_glac[:,0]), min(normlist_glac[:,0]))\n ax[0,0].set_xlim(0,1)\n ax[0,0].set_ylabel('dh/dt [-]', size=12)\n ax[0,0].set_xlabel('Elevation [-]', size=12)\n ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(0.2))\n ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(0.1))\n ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(0.2))\n ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(0.1))\n print(n)\n ax[0,0].set_title('Region ' + roi_str + ' -- ' + parameter, size=12) \n if parameter == 'TermType':\n ax[0,0].legend(termtype_list, loc='lower left', \n handlelength=0.5, labelspacing=1, columnspacing=1)\n elif parameter == 'Form':\n ax[0,0].legend(form_list, loc='lower left', \n handlelength=0.5, labelspacing=1, columnspacing=1) # + ' (' + str(num_glac) + ')'\n else:\n ax[0,0].legend(loc='lower left', handlelength=0.5, labelspacing=1, columnspacing=1)\n print(threshold_str)\n # Save figure\n fig_fp_all = fig_fp + 'resampled_bins/MEDIANS/'\n if not os.path.exists(fig_fp_all):\n os.makedirs(fig_fp_all)\n fig_fn = None\n if option_shading:\n if rois==group1:\n fig_fn = 'rgi_' + 'group1_' + 'normcurves' + parameter + '_' + threshold_str + stat_type + '.png'\n if rois==group2:\n fig_fn = 'rgi_' + 'group2_' + 'normcurves' + parameter + '_' + threshold_str + stat_type + '.png'\n if rois==group3:\n fig_fn = 'rgi_' + 'group3_' + 'normcurves' + parameter + '_' + threshold_str + stat_type + '.png'\n else:\n if rois==group1:\n fig_fn = ('rgi_' + 'group1_' + 'normcurves' + parameter + '_' + threshold_str + '_noshading' + \n stat_type + '.png')\n if rois==group2:\n fig_fn = ('rgi_' + 'group2_' + 'normcurves' + parameter + '_' + threshold_str + '_noshading' + \n stat_type + '.png')\n if rois==group3:\n fig_fn = ('rgi_' + 'group3_' + 'normcurves' + parameter + '_' + threshold_str + '_noshading' + \n stat_type + '.png')\n if fig_fn is None:\n fig_fn = 'rgi_' + roi_str + 'normcurves' + parameter + '_' + threshold_str + stat_type + '.png'\n \n fig.savefig(fig_fp_all + fig_fn , bbox_inches='tight', dpi=300)\n plt.show()\n \n#%% ===== COMPARE REGIONS =====\nif option_plot_multipleregions:\n\n # ===== plot specification options =====\n option_shading = False\n\n # ===== Ranges =====\n Area_range = [5, 20]\n Slope_range = [15, 20]\n TermType_range = [-0.5, 0.5]\n Form_range = [0.5, 1.5]\n \n # ==== Ranges dictionary =====\n ranges_dict = {'Area': Area_range, 'Slope': Slope_range, \n 'TermType': TermType_range, 'Form': Form_range}\n\n \n # ===== Select parameter and subset ======\n parameter = 'Area'\n subset = 'Small'\n # ===== Parameter dictionaries =====\n if parameter == 'TermType':\n param_dict = {0: 'Land', 1:'Marine', 2:'Lake', 5:'Other', 6:'Debris'}\n elif parameter == 'Form':\n param_dict = {0: 'Glacier', 1:'Ice cap', 2:'Perennial snowfield', \n 3:'Seasonal snowfield', 9: 'Not assigned'}\n\n# ======== loop through and compare regions ==========\n# ======== select if/for statements based on how you want to compare regions\n# Option 1. group compare (more than 2)\n# Option 2. all pairs in a group (all possible pairings, 1-2, 2-3, 1-3, etc)\n# Option 3. all rois to one region compare (in pairs, 1-2, 1-3, 1-4, etc.)\n\n if 1 == 1: # option 1 or 3\n# for roi1 in rois: # option 2\n# roi1 = '12' # option 3\n if 1 == 1: # option 1\n# for roi2 in rois: # option 2 or 3\n if 1 == 1: # option 1\n# if roi1 != roi2 and int(roi1) < int(roi2): # option 2\n# if roi1 != roi2: # option 3\n # select regions\n regs_to_comp = rois # option 1\n# regs_to_comp = [roi1, roi2] # option 2 or 3\n # option for error shading \n # option_shading = True\n \n # ===== Plot setup =====\n fig_width_all = 4\n fig_height_all = 3\n fig, ax = plt.subplots(1, 1, squeeze=False, figsize=(fig_width_all,fig_height_all), \n gridspec_kw = {'wspace':0.2, 'hspace':0.5})\n ax[0,0].set_ylabel('Normalized Ice Thinning [-]', size=12)\n ax[0,0].set_xlabel('Normalized Elevation [-]', size=12)\n ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(0.2))\n ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(0.1))\n ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(0.2))\n ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(0.1))\n \n for roi in regs_to_comp:\n # Loop through and get subset\n subset_idxs = []\n for n in range(len(main_glac_rgi)):\n if main_glac_rgi['roi'][n] == roi:\n if parameter == 'Area':\n if (main_glac_rgi[parameter][n] <= 5):\n glac_size = 'Small'\n elif (main_glac_rgi['Area'][n] > 5 and main_glac_rgi['Area'][n] <= 20):\n glac_size = 'Medium'\n else:\n glac_size = 'Large'\n if glac_size == subset:\n subset_idxs.append(n)\n else:\n if (main_glac_rgi[parameter][n] > ranges_dict[parameter][0] and \n main_glac_rgi[parameter][n] < ranges_dict[parameter][1]):\n subset_idxs.append(n)\n if subset_idxs != []:\n binnedcsv_subset = [binnedcsv_all[x] for x in subset_idxs]\n main_glac_rgi_subset = main_glac_rgi.loc[subset_idxs,:]\n main_glac_rgi_subset.reset_index(inplace=True, drop=True)\n normlist_glac = []\n for nglac in main_glac_rgi_subset.index.values:\n binnedcsv_glac = binnedcsv_subset[nglac]\n glac_elevnorm = binnedcsv_glac['elev_norm'].values\n glac_dhdt_norm_huss = binnedcsv_glac['dhdt_norm_huss']\n glac_area = binnedcsv_glac['z1_bin_area_valid_km2']\n normlist_array = np.array([glac_elevnorm, glac_dhdt_norm_huss, glac_area]).transpose()\n normlist_glac.append(normlist_array)\n # ===== PLOT =====\n # Plot the normalized curves\n \n # Add statistics to plot \n normlist_stats_all = []\n normlist_stats = normalized_stats(normlist_glac)\n \n stat_type = 'median'\n x_var = normlist_stats.norm_elev\n y_var = normlist_stats.norm_dhdt_med\n err_low = normlist_stats.norm_dhdt_16perc\n err_high = normlist_stats.norm_dhdt_84perc\n # error = normlist_stats.norm_dhdt_mad\n error = normlist_stats.norm_dhdt_std\n \n # Plot median of each\n ax[0,0].plot(x_var, y_var, linewidth=2, label = roi) #add label\n if option_shading:\n ax[0,0].fill_between(x_var, err_low, err_high, alpha = 0.5, linewidth=1)\n \n ax[0,0].set_ylim(0,1)\n ax[0,0].set_xlim(0,1)\n plt.gca().invert_yaxis()\n ax[0,0].set_title(parameter + ' -- ' + subset, size=12) \n \n legend = ax[0,0].legend(loc='lower left', ncol=3, fontsize='large', handlelength=0.5) \n # Save figure\n if option_savefigs:\n fig_fp_all = fig_fp + 'resampled_bins/region_comparisons/'\n if not os.path.exists(fig_fp_all):\n os.makedirs(fig_fp_all)\n if option_shading==False:\n fig_fn = ('compareregions' + ''.join(regs_to_comp) + parameter + '_' + subset + \n '_noshading_MEDIANS.png')\n else:\n if stat_type == 'median':\n fig_fn = ('compareregions' + '_' + ''.join(regs_to_comp) + '_' + parameter + '_' + subset + \n '_MEDIANS.png')\n else:\n fig_fn = ('compareregions' + '_' + ''.join(regs_to_comp) + parameter + '_' + subset + \n '_MEANS.png')\n fig.savefig(fig_fp_all + fig_fn , bbox_inches='tight', dpi=300)\n plt.show()","sub_path":"analyze_massredistribution.py","file_name":"analyze_massredistribution.py","file_ext":"py","file_size_in_byte":56989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"499673506","text":"import argparse\nimport yaml\nimport gym\nfrom robamine.utils.memory import ReplayBuffer\nfrom robamine.algo.util import Transition\nimport numpy as np\nimport robamine as rm\nimport logging\nimport os\n\ndef run(yml):\n with open(\"../yaml/\" + yml + \".yml\", 'r') as stream:\n try:\n params = yaml.safe_load(stream)\n rm.rb_logging.init(directory=params['logging_directory'], file_level=logging.INFO)\n logger = logging.getLogger('robamine')\n replay_buffer = []\n for i in range(params['nr_primitives']):\n replay_buffer.append(ReplayBuffer(params['buffer_size']))\n env = gym.make(params['env']['name'], params=params['env'])\n timestep = 0\n while True:\n if timestep >= params['timesteps']:\n break\n observation = env.reset()\n\n while True:\n if timestep >= params['timesteps']:\n break\n timestep += 1\n action = env.action_space.sample()\n observation_new, reward, done, info = env.step(action)\n transition = Transition(observation, action, reward, observation_new, done)\n replay_buffer[int(np.floor(transition.action / params['nr_substates']))].store(transition)\n observation = observation_new.copy()\n if done:\n break\n\n print('Timestep: ', timestep, 'Buffer sizes:', replay_buffer[0].size(), replay_buffer[1].size(), replay_buffer[2].size())\n\n log_dir = rm.rb_logging.get_logger_path()\n for i in range(params['nr_primitives']):\n replay_buffer[i].save(os.path.join(log_dir, 'replay_buffer' + str(i) + '.pkl'))\n\n except yaml.YAMLError as exc:\n print(exc)\n\ndef parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--yml', type=str, default='store_buffers', help='The yaml file to load')\n args = parser.parse_args()\n dict_args = vars(args)\n return dict_args\n\nif __name__ == '__main__':\n args = parse_args()\n run(**args)\n","sub_path":"examples/store_replay_buffers.py","file_name":"store_replay_buffers.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"42791204","text":"import numpy as np\nimport pandas as pd\n# IMPORTANT: DO NOT USE ANY OTHER 3RD PARTY PACKAGES\n# (math, random, collections, functools, etc. are perfectly fine)\n\n\nclass KMeans(object):\n\n def __init__(self, myK, method=\"Frogy\", preprocessing=False):\n # NOTE: Feel free add any hyperparameters\n # (with defaults) as you see fit\n #self.centroids = np.empty(0)\n self.k = myK # number of clusters\n self.centroids = np.empty(0) # centroids\n self.method = method # initial assignment\n self.Xcent = np.empty(0) # 1-D array size=X size, centroid assignment for each X datapoint\n self.cent = [] # 2-D array(M, ..) all indexes in X for centoid i\n self.preprocessing = preprocessing\n\n def assignToCenter(self, xNumb, cNumb):\n oldC = int(self.Xcent[xNumb]) # previous cluster number\n if oldC != -1: # if this is not its first assignment\n self.cent[oldC].remove(xNumb) # remove from centroid->X number datastructure\n self.Xcent[xNumb] = cNumb # update 1d list\n self.cent[cNumb].append(xNumb) # update 2d list\n\n def assignClosest(self, X): # assign all points to closest centroid\n for i, sample in enumerate(X): # go through all points\n dist = [] # list for distances\n for c in self.centroids: # go through all centroids\n dist.append(euclidean_distance(np.array(sample), np.array(c)))\n myC = int(np.argmin(dist)) # centroid with lowest distance\n self.assignToCenter(i, myC) # assign datapoint i to cluster myC\n\n def fit(self, X):\n \"\"\"\n Estimates parameters for the classifier\n\n Args:\n X (array): a matrix of floats with\n m rows (#samples) and n columns (#features)\n k Integer, how many clusters to use\n random Boolean, if true selects K random samples in X\n as initial centroids\n \"\"\"\n # TODO: Implement\n # X-preprocessing\n X = np.array(X)\n if self.preprocessing:\n self.xMax = np.max(X[:, 0]) # save ranges.\n self.yMax = np.max(X[:, 1])\n X[:, 0] = X[:,0]/np.max(X[:,0]) # normalise\n X[:, 1] = X[:, 1]/np.max(X[:,1])\n \n # initialize centroids, either random or k-first\n if self.method == \"Frogy\": # frogy, choose random k observations\n indexes = np.random.randint(len(X),size=(self.k))\n self.centroids = np.array(X)[indexes]\n elif self.method == \"First K\": # select the first k observations\n self.centroids = np.array(X[:self.k].copy())\n # initliallize other arrays\n self.Xcent = np.zeros(X.shape[0])\n self.Xcent[:] = -1\n self.cent = [[] for i in range(self.k)]\n\n return X\n def upscaleCentroids(self): # upscale the resulting centroids\n self.centroids[:,0] = self.centroids[:,0]*self.xMax\n self.centroids[:,1] = self.centroids[:,1]*self.yMax\n def predict(self, X):\n \"\"\"\n Generates predictions\n\n Note: should be called after .fit()\n\n Args:\n X (array): a matrix of floats with\n m rows (#samples) and n columns (#features)\n\n Returns:\n A length m integer array with cluster assignments\n for each point. E.g., if X is a 10xn matrix and\n there are 3 clusters, then a possible assignment\n could be: array([2, 0, 0, 1, 2, 1, 1, 0, 2, 2])\n \"\"\"\n X = np.array(X)\n dist = 2\n while dist != 0: # while converging\n self.assignClosest(X) # assign all points to closest centroid\n dist = 0\n for i in range(self.k):\n new = [0,0]\n new[0] = np.mean(X[self.cent[i], 0]) # calculate means\n new[1] = np.mean(X[self.cent[i], 1])\n\n dist += euclidean_distance(self.centroids[i], new) # add change in distance\n self.centroids[i] = new # assign new centroid\n print(np.max(self.centroids[:,0]))\n return np.array(self.Xcent, np.int)\n\n def get_centroids(self):\n \"\"\"\n Returns the centroids found by the K-mean algorithm\n\n Example with m centroids in an n-dimensional space:\n >>> model.get_centroids()\n numpy.array([\n [x1_1, x1_2, ..., x1_n],\n [x2_1, x2_2, ..., x2_n],\n .\n .\n .\n [xm_1, xm_2, ..., xm_n]\n ])\n \"\"\"\n return self.centroids\n\n\n\n\n# --- Some utility functions\n\n\ndef euclidean_distortion(X, z):\n \"\"\"\n Computes the Euclidean K-means distortion\n\n Args:\n X (array): m x n float matrix with datapoints\n z (array): m-length integer vector of cluster assignments\n\n Returns:\n A scalar float with the raw distortion measure\n \"\"\"\n X, z = np.asarray(X), np.asarray(z)\n assert len(X.shape) == 2\n assert len(z.shape) == 1\n assert X.shape[0] == z.shape[0]\n\n distortion = 0.0\n for c in np.unique(z):\n Xc = X[z == c]\n mu = Xc.mean(axis=0)\n distortion += ((Xc - mu) ** 2).sum()\n\n return distortion\n\ndef euclidean_distance(x, y):\n \"\"\"\n Computes euclidean distance between two sets of points\n\n Note: by passing \"y=0.0\", it will compute the euclidean norm\n\n Args:\n x, y (array<...,n>): float tensors with pairs of\n n-dimensional points\n\n Returns:\n A float array of shape <...> with the pairwise distances\n of each x and y point\n \"\"\"\n return np.linalg.norm(x - y, ord=2, axis=-1)\n\ndef cross_euclidean_distance(x, y=None):\n \"\"\"\n Compute Euclidean distance between two sets of points\n\n Args:\n x (array): float tensor with pairs of\n n-dimensional points.\n y (array): float tensor with pairs of\n n-dimensional points. Uses y=x if y is not given.\n\n Returns:\n A float array of shape with the euclidean distances\n from all the points in x to all the points in y\n \"\"\"\n y = x if y is None else y\n assert len(x.shape) >= 2\n assert len(y.shape) >= 2\n return euclidean_distance(x[..., :, None, :], y[..., None, :, :])\n\n\ndef euclidean_silhouette(X, z):\n \"\"\"\n Computes the average Silhouette Coefficient with euclidean distance\n\n More info:\n - https://www.sciencedirect.com/science/article/pii/0377042787901257\n - https://en.wikipedia.org/wiki/Silhouette_(clustering)\n\n Args:\n X (array): m x n float matrix with datapoints\n z (array): m-length integer vector of cluster assignments\n\n Returns:\n A scalar float with the silhouette score\n \"\"\"\n X, z = np.asarray(X), np.asarray(z)\n assert len(X.shape) == 2\n assert len(z.shape) == 1\n assert X.shape[0] == z.shape[0]\n\n # Compute average distances from each x to all other clusters\n clusters = np.unique(z)\n D = np.zeros((len(X), len(clusters)))\n for i, ca in enumerate(clusters):\n for j, cb in enumerate(clusters):\n in_cluster_a = z == ca\n in_cluster_b = z == cb\n d = cross_euclidean_distance(X[in_cluster_a], X[in_cluster_b])\n div = d.shape[1] - int(i == j)\n D[in_cluster_a, j] = d.sum(axis=1) / np.clip(div, 1, None)\n\n # Intra distance\n a = D[np.arange(len(X)), z]\n # Smallest inter distance\n inf_mask = np.where(z[:, None] == clusters[None], np.inf, 0)\n b = (D + inf_mask).min(axis=1)\n\n return np.mean((b - a) / np.maximum(a, b))\n","sub_path":"k_means/k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":7564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"318185557","text":"from django.shortcuts import get_object_or_404\nfrom django.contrib.auth import get_user_model\nfrom rest_framework.decorators import api_view\nfrom django.contrib.auth import get_user_model\nfrom django.http import HttpResponse\nfrom rest_framework.response import Response\n# from rest_framework import viewsets\nfrom .serializers import *\nfrom .models import *\nfrom trip.models import Trip\nfrom rest_framework import status\nimport datetime\n\n# Create your views here.\n\n@api_view(['POST'])\ndef create_party(request, trip_pk):\n User = get_user_model()\n user = get_object_or_404(User, pk=request.user.pk)\n trip = get_object_or_404(Trip, pk=trip_pk)\n serializer = PartySerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=user, id=trip_pk)\n trip.party_chk = 1\n trip.save()\n return Response(data=serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(data=serializer.errors)\n\n@api_view(['GET'])\ndef list_party(request):\n now = datetime.datetime.now()\n nowDate = now.strftime('%Y%m%d')\n parties = Party.objects.filter(trip_date__gte=int(nowDate)).order_by('trip_date')\n serializer = PartyListSerializer(parties, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef detail_party(request, party_pk):\n party = Party.objects.filter(pk=party_pk)\n serializer = PartyListSerializer(party, many=True)\n return Response(serializer.data)\n\n@api_view(['PUT'])\ndef update_party(request, party_pk):\n User = get_user_model()\n user = get_object_or_404(User, pk=request.user.pk)\n party = get_object_or_404(Party, pk=party_pk)\n if party.user == user:\n serializer = PartySerializer(data=request.data)\n if serializer.is_valid():\n serializer.update(party, request.data)\n return Response(serializer.data)\n \n return HttpResponse('Something Wrong')\n\n@api_view(['DELETE'])\ndef delete_party(request, party_pk):\n User = get_user_model()\n user = get_object_or_404(User, pk=request.user.pk)\n trip = get_object_or_404(Trip, pk=party_pk)\n party = get_object_or_404(Party, id=party_pk)\n if party.user == user:\n party.delete()\n trip.party_chk = 0\n trip.save()\n return HttpResponse('잘 지워짐')\n return HttpResponse('니 글 아님 ㅅㄱ')\n\n@api_view(['POST'])\ndef create_message(request, party_pk):\n User = get_user_model()\n user = get_object_or_404(User, pk=request.user.pk)\n party = get_object_or_404(Party, pk=party_pk)\n serializer = PartyMessageSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=user, party_id=party)\n return Response(data=serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(data=serializer.errors)\n\n@api_view(['GET'])\ndef list_message(request, party_pk):\n messages = PartyMessage.objects.filter(party_id=party_pk).order_by('-created_at')\n serializer = PartyMessageListSerializer(messages, many=True)\n return Response(serializer.data)","sub_path":"backend/sikdorang/party/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"509248053","text":"#\n# Class diary\n#\n# Create program for handling lesson scores.\n# Use python to handle student (highscool) class scores, and attendance.\n# Make it possible to:\n# - Get students total average score (average across classes)\n# - get students average score in class\n# - hold students name and surname\n# - Count total attendance of student\n# The default interface for interaction should be python interpreter.\n# Please, use your imagination and create more functionalities.\n# Your project should be able to handle entire school.\n# If you have enough courage and time, try storing (reading/writing)\n# data in text files (YAML, JSON).\n# If you have even more courage, try implementing user interface.\n\nfrom pony.orm import *\nfrom datetime import datetime\n\ndb = Database(\"sqlite\", \"Dairy2016.sqlite\", create_db=False)\n\n\nclass Student(db.Entity):\n name = Required(str)\n surname = Required(str)\n name = Required(str)\n group = Required('Group')\n scores = Set('Score')\n attendance = Set('Attendance')\n\n\nclass Course(db.Entity):\n name = Required(str)\n score = Set('Score')\n attendance = Set('Attendance')\n\n\nclass Score(db.Entity):\n date = Required(datetime)\n value = Required(int)\n student = Required(Student)\n course = Required(Course)\n\n\nclass Attendance(db.Entity):\n date = Required(datetime)\n value = Required(bool)\n student = Required(Student)\n course = Required(Course)\n\n\nclass Group(db.Entity):\n name = Required(str)\n student = Set('Student')\n\ndb.generate_mapping(create_tables=False)\n\n\n\n\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"83589708","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 20 22:48:51 2019\n\n@author: Gololobov\n\"\"\"\n\n# import libraries for network learning\nimport tensorflow as tf\nfrom tensorflow import keras\n\nimport tensorflow_datasets as tfds\ntfds.disable_progress_bar()\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n(train_data, test_data), info = tfds.load(\n # use pre-encoded version with 8k vocablurary\n 'imdb_reviews/subwords8k',\n # return train and test datasets as a tuple\n split = (tfds.Split.TRAIN, tfds.Split.TEST),\n # return (example, label) pairs from the dataset\n as_supervised=True,\n # return info structure\n with_info=True)\n\nencoder = info.features['text'].encoder\n\n# preparing data for training \nBUFFER_SIZE = 1000\n\ntrain_batches = (\n train_data\n .shuffle(BUFFER_SIZE)\n .padded_batch(32, train_data.output_shapes))\n\ntest_batches = (\n test_data\n .padded_batch(32, train_data.output_shapes))\n\n# building a model\nmodel = keras.Sequential([\n keras.layers.Embedding(encoder.vocab_size, 16),\n keras.layers.GlobalAveragePooling1D(),\n keras.layers.Dense(1, activation='sigmoid')])\n\nmodel.summary()\n\n# configure model \nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# training model\nhistory = model.fit(train_batches, \n epochs=10,\n validation_data=test_batches,\n validation_steps=30)\n\n# evaluate the model\nloss, accuracy = model.evaluate(test_batches)\n\nprint('Loss: ', loss)\nprint('Accuracy: ', accuracy)\n\n#create graph of accuracy and loss over time\nhistory_dict = history.history\n\n#plotting graph\nacc = history_dict['accuracy']\nval_acc = history_dict['val_accuracy']\nloss = history_dict['loss']\nval_loss = history_dict['val_loss']\n\nepochs = range(1, len(acc) + 1)\nplt.subplot(2, 1, 1)\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.subplot(2, 1, 2)\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='validation acc')\nplt.title('Training and validationg accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend(loc = 'lower right')\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Desktop/tensorflow/movie_classification.py","file_name":"movie_classification.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"275212341","text":"import numpy as np\n\nf= open(\"2in.txt\").read().split(\"\\n\")\nwriteF=open(\"2out.txt\",\"w\")\n\n\ndef getdistance(X,Y,x,y):\n value=0\n for i in range(0,len(X)):\n value+=max(abs(X[i]-x),abs(Y[i]-y))\n return value\n \ndef getdistanceX(X,x,compare):\n value=0\n for i in range(0,len(X)):\n value+=abs(X[i]-x)\n if value > compare:\n return 1000000000\n return value\n \ndef getsmallestx(X):\n temp=1000000000\n returnx=[]\n valuebefore=1000000000\n for i in np.arange(min(X),max(X)+0.01,0.005):\n value=getdistanceX(X,i,temp)\n if valuebefore value:\n temp=value\n returnx=[]\n returnx.append(i)\n # print returnx,value\n valuebefore=value\n return returnx\n \ndef mininumpercoord(X,Y):\n temp=1000000000\n candidatesx=getsmallestx(X)\n candidatesy=getsmallestx(Y)\n # print(candidatesy,candidatesx)\n for i in candidatesx:\n for j in candidatesy:\n value=getdistance(X,Y,i,j)\n if value\n date: 03. Aug 2019 at 16:48\n \n version: 0.0.1\n see: https://git.felix-scholz.org/felix.scholz/python_bewerbung\n\"\"\"\nfrom __future__ import absolute_import\n\nimport locale\nimport os\nimport re\nfrom configparser import ConfigParser\nfrom enum import Enum\nfrom typing import List\n\nfrom django.core.management import BaseCommand, CommandParser\n\nfrom applications.models import Applications, Company\n\n\nclass Nemo(Enum):\n CURRENT_URI = 'NEMO_SCRIPT_CURRENT_URI'\n SELECTED_PATHS = 'NEMO_SCRIPT_SELECTED_FILE_PATHS'\n\n\nclass Art(Enum):\n __order__ = 'PERSONAL WRITTEN PHONE ONLINE EMAIL'\n PERSONAL = 1, ['persönlich'],\n WRITTEN = 2, ['schriftlich'],\n PHONE = 3, ['telefonisch'],\n ONLINE = 4, ['online', 'xing', 'webformular', 'linkedin'],\n EMAIL = 5, ['email', 'e-mail', '']\n\n @staticmethod\n def has(search: str) -> 'Art':\n for _art in Art:\n if search in _art.value[1]:\n return _art\n\n return Art.EMAIL\n\n\nclass RE(Enum):\n COMPANY = r'\\\\recipient%?\\n\\t{(?P.*)}%?\\n\\t{(?P.*)\\\\\\\\(?:\\w-)?(?P[0-9]{5}) (?P[\\w \\-]*)}'\n PERSON = r'\\\\contactPerson{(.*)}'\n PHONE = r'\\\\contactTelefone{(.*)}'\n EMAIL = r'\\\\contactEmail{(.*)}'\n ART = r'\\\\contactArt{(.*)}'\n LINK = r'\\\\contactLink{(.*)}'\n DATE = r'\\\\date{(.*)}'\n REF = r'\\\\referenznummer{(.+)}'\n\n\nclass Command(BaseCommand):\n \"\"\" A class of package applications\n\n class: Command\n package: management.commands.view\n \"\"\"\n help = 'Import applications'\n\n def add_arguments(self, parser: CommandParser):\n parser.add_argument('path', required=False, nargs='+', type=str, help='The import path.')\n parser.add_argument('--from', nargs=1, type=str, help='The import source (fs|nemo). ')\n\n def handle(self, *args, **options):\n if 'from' in options:\n if options['from'] == 'nemo':\n locale.setlocale(locale.LC_TIME, 'de_DE.utf8')\n if Nemo.CURRENT_URI.value in os.environ and os.path.isdir(os.environ[Nemo.CURRENT_URI.value][7:]):\n path = os.environ[Nemo.CURRENT_URI.value][7:]\n if Nemo.SELECTED_PATHS.value in os.environ and os.environ[Nemo.SELECTED_PATHS.value] != '':\n self.from_paths(os.environ[Nemo.SELECTED_PATHS.value].split('\\n'))\n else:\n self.from_dir(path)\n\n def from_ini(self, _path: str) -> __class__:\n ini = ConfigParser()\n ini.read(_path)\n place = ini.defaults().get('place').strip('\" ').split(' ')\n options = {\n 'contact_person': ini.defaults().get('contact_person').strip('\" '),\n 'telephone': ini.defaults().get('telefone').strip('\" '),\n 'art': ini.defaults().get('art').strip('\" '),\n 'date': ini.defaults().get('date').strip('\" '),\n }\n if ini.defaults().get('email') is not None:\n options['email'] = ini.defaults().get('email').strip('\" ')\n if ini.defaults().get('job_ad') is not None:\n options['job_ad'] = ini.defaults().get('stellenanzeige').strip('\" ')\n\n Applications.objects.create(\n art=Art.has(ini.defaults().get('art').strip('\" ')).name, company=Company())\n\n return self\n\n def from_tex(self, _path: str) -> __class__:\n with open(_path) as tex_file:\n options = {}\n tex = tex_file.read()\n matches = re.search(RE.COMPANY.value, tex)\n if matches:\n company = matches.group('name')\n street = matches.group('street')\n plz = matches.group('plz')\n place = matches.group('place')\n match_person = re.search(RE.PERSON.value, tex)\n if match_person:\n options['contact_person'] = match_person.group(1)\n matches = re.search(RE.PHONE.value, tex)\n if matches:\n options['telephone'] = matches.group(1)\n matches = re.search(RE.EMAIL.value, tex)\n if matches:\n options['email'] = matches.group(1)\n matches = re.search(RE.ART.value, tex)\n if matches:\n options['art'] = matches.group(1).lower()\n matches = re.search(RE.LINK.value, tex)\n if matches:\n options['job_ad'] = matches.group(1)\n matches = re.search(RE.DATE.value, tex)\n if matches:\n options['date'] = matches.group(1)\n matches = re.search(RE.REF.value, tex)\n if matches:\n options['employment_agency'] = matches.group(1)\n\n return self\n\n def from_paths(self, paths: List[str]) -> __class__:\n for _path in paths:\n if _path.endswith('.ini'):\n self.from_ini(_path)\n elif _path.endswith('.tex'):\n self.from_tex(_path)\n\n return self\n\n def from_dir(self, dir_path: str) -> __class__:\n return self.from_paths([dir_path + '/' + _path for _path in os.listdir(dir_path)])\n\n","sub_path":"applications/management/commands/import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"352330580","text":"#!/usr/bin/python\nfrom mininet.net import Mininet\nfrom mininet.node import OVSController, RemoteController\nfrom mininet.cli import CLI\nfrom mininet.log import lg, info\nfrom mininet.nodelib import NAT\nfrom mininet.topolib import Topo #TreeTopo, TreeNet\nfrom mininet.link import TCLink\n# sudo python proj_mn.py\n# *** topology connects to internet via nat0\n# *** in mininet cli, try: \n# h1 ping google.com\n# h1 ping 8.8.8.8\n# xterm h1\n# firefox &\n# wireshark & <- monitors mininet ethX ports: RESULT: all traffic goes through mininet\n# https://stackoverflow.com/questions/12120935/wget-output-document-and-headers-to-stdout\n#\n# if nameserver problems (name not found), try adding 8.8.8.8 to /etc/resolv.conf (or similar)\n# https://unix.stackexchange.com/questions/128220/how-do-i-set-my-dns-when-resolv-conf-is-being-overwritten/163506#163506\n# (Tested with Ubuntu VM in VirtualBox)\n#\n# https://github.com/mininet/mininet/wiki/FAQ#how-can-i-set-up-nat (ipbase)\n\nNUM_HOSTS = 1\n#NUM_SWITCH = 4\n\n\nclass ProjTopo(Topo):\n # nh: number of hosts; ns: number of servers\n def build(self, nh=1):\n natIP = '10.0.0.254' # (default nat settings; default gateway)\n # top level \"router\": switch + nat\n nat0 = self.addNode('nat0', cls=NAT, ip=natIP, inNamespace=False)\n #edge swith to connect with the NAT switch\n s1 = self.addSwitch('s1')\n self.addLink(s1, nat0, port1=1)\n # backbone swtich of ISP network\n s2 = self.addSwitch('s2')\n s3 = self.addSwitch('s3')\n self.addLink(s1, s2, port1=2, port2=1,cls=TCLink, bw=20)\n self.addLink(s1, s3, port1=3, port2=1,cls=TCLink, bw=100)\n # edge switch to connect with the host \n s4 = self.addSwitch('s4')\n self.addLink(s4, s2, port1=1, port2=2, cls=TCLink, bw=20)\n self.addLink(s4, s3, port1=nh+2, port2=2,cls=TCLink, bw=100)\n # add hosts to s4\n for i in range(1, nh+1):\n sh = self.addHost('h%d' % i, ip='10.0.1.%d' % i, \n defaultRoute='via ' + natIP)\n self.addLink(s4, sh, port1=i+1)\n \n # add servers to s3\n #for i in range(1, ns+1):\n # ss = self.addHost('h%d' % (nh+i), ip='10.0.1.%d' % (127+i),\n # defaultRoute='via ' + natIP)\n # self.addLink(s3, ss, port1=i)\n\n\n\nif __name__ == '__main__':\n # NOTE: Haven't figure out this part\n autoStaticArp_flag = True\n\n lg.setLogLevel( 'info')\n #need to modify it \n topo = ProjTopo(NUM_HOSTS)\n #\n #net = Mininet(topo=topo, controller=None, autoSetMacs=True, autoStaticArp=True)\n natSubnet = '10.0.0.0/23' # restrict ip range of Mininet\n net = Mininet(topo=topo, controller=None, autoSetMacs=True, \n autoStaticArp=autoStaticArp_flag, ipBase=natSubnet)\n #net.addController('c0', controller=OVSController) # L2 switch\n net.addController('c0', controller=RemoteController, \n ip = \"127.0.0.1\", port = 6633, protocols=\"OpenFlow13\")\n\n # Add NAT connectivity (default connects to s1 as nat0; nat0 uses eth0)\n # net.addNAT().configDefault()\n net.start()\n\n hosts = net.hosts[:NUM_HOSTS]\n \n info( \"***\\n*** Hosts are running and should have internet connectivity\\n\" )\n CLI( net )\n # Shut down NAT, cleanup background processes\n net.stop()\n\n\n# reference mininet examples: nat.py multitest.py\n# http://mininet.org/walkthrough/#run-a-simple-web-server-and-client\n","sub_path":"proj_mn.py","file_name":"proj_mn.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"391457880","text":"from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn as bi_rnn\nfrom tensorflow.compat.v1.nn.rnn_cell import BasicLSTMCell\nfrom utils.prepare_data import *\nfrom .base_model import BaseModel\nfrom .base_train import BaseTrain\nfrom tqdm import tqdm\nimport time\nfrom os.path import join\nfrom os import makedirs\nfrom utils.model_helper import *\nfrom utils.logger import Logger\nfrom bunch import Bunch\nfrom sklearn.metrics import precision_recall_fscore_support, classification_report\nimport tensorflow as tf\nfrom keras_preprocessing.text import tokenizer_from_json\nimport pickle\nimport pandas as pd\ntf.compat.v1.disable_eager_execution()\n\nDEFAULT_CONFIG = {\n \"hidden_size\": 64,\n \"embedding_size\": 128,\n }\n\nclass AttentionBiLSTM(BaseModel):\n def __init__(self, config):\n super(AttentionBiLSTM, self).__init__(config)\n self.max_len = config.max_len\n self.hidden_size = config.hidden_size\n self.vocab_size = config.vocab_size\n self.embedding_size = config.embedding_size\n self.n_class = config.n_class\n self.learning_rate = config.learning_rate\n self.build_model()\n self.init_saver()\n\n def build_model(self):\n # placeholder\n self.x = tf.compat.v1.placeholder(tf.int32, [None, self.max_len])\n self.y = tf.compat.v1.placeholder(tf.int32, [None, self.n_class])\n self.keep_prob = tf.compat.v1.placeholder(tf.float32)\n # Word embedding\n embeddings_var = tf.Variable(tf.random.uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),\n trainable=True)\n self.batch_embedded = tf.nn.embedding_lookup(params=embeddings_var, ids=self.x)\n\n rnn_outputs, _ = bi_rnn(BasicLSTMCell(self.hidden_size),\n BasicLSTMCell(self.hidden_size),\n inputs=self.batch_embedded, dtype=tf.float32)\n\n fw_outputs, bw_outputs = rnn_outputs\n\n W = tf.Variable(tf.random.normal([self.hidden_size], stddev=0.1))\n H = fw_outputs + bw_outputs # (batch_size, seq_len, HIDDEN_SIZE)\n M = tf.tanh(H) # M = tanh(H) (batch_size, seq_len, HIDDEN_SIZE)\n\n self.alpha = tf.nn.softmax(tf.reshape(tf.matmul(tf.reshape(M, [-1, self.hidden_size]),\n tf.reshape(W, [-1, 1])),\n (-1, self.max_len))) # batch_size x seq_len\n r = tf.matmul(tf.transpose(a=H, perm=[0, 2, 1]),\n tf.reshape(self.alpha, [-1, self.max_len, 1]))\n r = tf.squeeze(r, [2])\n h_star = tf.tanh(r) # (batch , HIDDEN_SIZE\n\n h_drop = tf.nn.dropout(h_star, 1 - (self.keep_prob))\n\n # Fully connected layer(dense layer)\n FC_W = tf.Variable(tf.random.truncated_normal([self.hidden_size, self.n_class], stddev=0.1))\n FC_b = tf.Variable(tf.constant(0., shape=[self.n_class]))\n y_hat = tf.compat.v1.nn.xw_plus_b(h_drop, FC_W, FC_b)\n\n self.loss = tf.reduce_mean(\n input_tensor=tf.compat.v1.nn.softmax_cross_entropy_with_logits_v2(logits=y_hat, labels=self.y))\n\n # prediction\n self.probabilities = tf.nn.softmax(y_hat)\n self.prediction = tf.argmax(input=self.probabilities, axis=1)\n\n self.accuracy = tf.reduce_mean(input_tensor=tf.cast(tf.equal(self.prediction, tf.argmax(input=self.y, axis=1)), tf.float32))\n\n # optimization\n loss_to_minimize = self.loss\n tvars = tf.compat.v1.trainable_variables()\n gradients = tf.gradients(ys=loss_to_minimize, xs=tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)\n grads, global_norm = tf.clip_by_global_norm(gradients, 1.0)\n\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate)\n self.train_op = self.optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step_tensor,\n name='train_step')\n\n def init_saver(self):\n self.saver = tf.compat.v1.train.Saver(max_to_keep=self.config.max_to_keep)","sub_path":"models/attn_bi_lstm.py","file_name":"attn_bi_lstm.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"548240020","text":"\nfrom compas_fea.cad import blender\nfrom compas_fea.structure import ElasticIsotropic\nfrom compas_fea.structure import ElementProperties as Properties\nfrom compas_fea.structure import GeneralStep\nfrom compas_fea.structure import PinnedDisplacement\nfrom compas_fea.structure import PointLoad\nfrom compas_fea.structure import SolidSection\nfrom compas_fea.structure import Structure\n\nfrom compas_blender.utilities import get_objects\n\n\n__author__ = ['Andrew Liew ']\n__copyright__ = 'Copyright 2018, BLOCK Research Group - ETH Zurich'\n__license__ = 'MIT License'\n__email__ = 'liew@arch.ethz.ch'\n\n\n# Structure\n\nmdl = Structure(name='block_tets', path='/home/al/temp/')\n\n# Tetrahedrons\n\nblender.add_tets_from_bmesh(mdl, name='elset_tets', bmesh=get_objects(layer=0)[0])\n\n# Sets\n\nblender.add_nset_from_bmeshes(mdl, layer=1, name='base')\nblender.add_nset_from_bmeshes(mdl, layer=2, name='top')\n\n# Materials\n\nmdl.add_material(ElasticIsotropic(name='mat_elastic', E=100*10**9, v=0.3, p=1))\n\n# Sections\n\nmdl.add_section(SolidSection(name='sec_solid'))\n\n# Properties\n\nmdl.add_element_properties(\n Properties(name='ep_tets', material='mat_elastic', section='sec_solid', elsets='elset_tets'))\n\n# Displacementss\n\nmdl.add_displacement(PinnedDisplacement(name='disp_pinned', nodes='base'))\n\n# Loads\n\nmdl.add_load(PointLoad(name='load_top', nodes='top', y=1000, z=1000))\n\n# Steps\n\nmdl.add_steps([\n GeneralStep(name='step_bc', displacements=['disp_pinned']),\n GeneralStep(name='step_load', loads=['load_top'])])\nmdl.steps_order = ['step_bc', 'step_load']\n\n# Summary\n\nmdl.summary()\n\n# Run (Abaqus)\n\nexe = '/home/al/abaqus/Commands/abaqus cae '\nmdl.analyse_and_extract(software='abaqus', exe=exe, fields=['u'])\nblender.plot_voxels(mdl, step='step_load', field='ux', vdx=0.01)\n","sub_path":"examples/block_tets_blender.py","file_name":"block_tets_blender.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"83146808","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 9 15:57:49 2018\n\n@author: yutong\n\"\"\"\n\"\"\"\nROC & AUC\n\n\"\"\"\n\n# Import Packages\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\nfrom sklearn import metrics\n\n# Setup Format\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows=10\npd.options.display.float_format='{:.3f}'.format\n\n# Load Data\ndata_train = pd.read_csv('https://storage.googleapis.com/mledu-datasets/california_housing_train.csv', sep=',')\ndata_train.reindex(np.random.permutation(data_train.index))\n\n# Check Data\n#data_train.describe()\nprint('\\nCorrelation Matrix Table: \\n{}'.format(data_train.corr()))\n\n\n# Pre-Process Features\ndef preprocess_feature(data):\n feature_selected = data[['latitude','longitude','housing_median_age','total_rooms',\n 'total_bedrooms','population','households','median_income']]\n feature_processed = feature_selected.copy()\n feature_processed['rooms_per_person'] = data['total_rooms'] / data['population']\n return feature_processed\n\n# Pre-Process Label\ndef preprocess_label(data):\n label_processed = pd.DataFrame()\n label_processed['high_house_value'] = (data['median_house_value']>265000).astype(float)\n return label_processed\n\n\n# Bucketize Data\ndef quantize_boundary(feature_value, bucket_num):\n boundary = np.arange(1.0, bucket_num) / bucket_num\n quantile = feature_value.quantile(boundary)\n return [quantile[i] for i in quantile.keys()]\n\n## Construct TensorFlow Feature Columns\n#def construct_feature_columns(data):\n# return set([tf.feature_column.numeric_column(item) for item in data])\n\n# Construct TensorFlow Feature Columns\ndef column_template(data):\n \"\"\"\n Return: A set of processed feature colunms\n \"\"\" \n # Transfer Feature Columns to Numeric Columns\n longitude = tf.feature_column.numeric_column('longitude')\n latitude = tf.feature_column.numeric_column('latitude')\n housing_median_age = tf.feature_column.numeric_column('housing_median_age')\n total_rooms = tf.feature_column.numeric_column('total_rooms')\n total_bedrooms = tf.feature_column.numeric_column('total_bedrooms')\n population = tf.feature_column.numeric_column('population')\n households = tf.feature_column.numeric_column('households')\n median_income = tf.feature_column.numeric_column('median_income')\n rooms_per_person = tf.feature_column.numeric_column('rooms_per_person')\n \n # Bucketize the Numeric Columns\n longitude_bucket = tf.feature_column.bucketized_column(\n longitude, boundaries=quantize_boundary(data['longitude'], 10))\n latitude_bucket = tf.feature_column.bucketized_column(\n latitude, boundaries=quantize_boundary(data['latitude'], 10))\n housing_median_age_bucket = tf.feature_column.bucketized_column(\n housing_median_age, boundaries=quantize_boundary(data['housing_median_age'], 5))\n total_rooms_bucket = tf.feature_column.bucketized_column(\n total_rooms, boundaries=quantize_boundary(data['total_rooms'], 5))\n total_bedrooms_bucket = tf.feature_column.bucketized_column(\n total_bedrooms, boundaries=quantize_boundary(data['total_bedrooms'], 5))\n population_bucket = tf.feature_column.bucketized_column(\n population, boundaries=quantize_boundary(data['population'], 5))\n households_bucket = tf.feature_column.bucketized_column(\n households, boundaries=quantize_boundary(data['households'], 5))\n median_income_bucket = tf.feature_column.bucketized_column(\n median_income, boundaries=quantize_boundary(data['median_income'], 5))\n rooms_per_person_bucket = tf.feature_column.bucketized_column(\n rooms_per_person, boundaries=quantize_boundary(data['rooms_per_person'], 5))\n \n # Create Crossed Feature Columns\n long_x_lat = tf.feature_column.crossed_column(\n set([longitude_bucket, latitude_bucket]), hash_bucket_size=50)\n \n # Zip the result into a set\n result = set([longitude_bucket, latitude_bucket, housing_median_age_bucket,\n total_rooms_bucket, total_bedrooms_bucket, population_bucket,\n households_bucket, median_income_bucket, rooms_per_person_bucket,\n long_x_lat]) \n return result \n\n\n# Split Train-Test Dataset\nX_train = preprocess_feature(data_train.head(12000))\nX_valid = preprocess_feature(data_train.tail(5000))\ny_train = preprocess_label(data_train.head(12000))\ny_valid = preprocess_label(data_train.tail(5000))\n#print('\\nTraining Features (X_train) Summary: \\n{}'.format(X_train.describe()))\n#print('\\nValidation Features (X_valid) Summary: \\n{}'.format(X_valid.describe()))\n#print('\\nTraining Label (y_train) Summary: \\n{}'.format(y_train.describe()))\n#print('\\nValidation Label (y_valid) Summary: \\n'.format(y_valid.describe()))\n \n\n# Model Size help function -- Only for FTRL Optimizer\n# Characterize l1_regulation_strength parameter\ndef model_size(estimator):\n variables = estimator.get_variable_names()\n size = 0\n for variable in variables:\n if not any(x in variable for x in ['global_step','centered_bias_weight', 'bias_weight', 'Ftrl']):\n size += np.count_nonzero(estimator.get_variable_value(variable))\n return size\n\n\n# Template Input Function\ndef input_template(feature, label, batch_size=1, epoch_num=None, shuffle=True):\n \"\"\"\n Return: A Tuple of (feature, label) for next data batch\n \"\"\"\n # Convert pandas data into a dict of np arrays\n feature = {key: np.array(value) for key,value in dict(feature).items()} \n # Construct a dataset with figured Batch, Epoch and Shffule\n ds = Dataset.from_tensor_slices((feature, label))\n ds = ds.batch(batch_size).repeat(epoch_num)\n if shuffle: ds = ds.shuffle(10000) \n # Return the next batch of data\n feature, label = ds.make_one_shot_iterator().get_next()\n return feature, label\n\n# Template Training Model\ndef model_template(optimizer, steps, batch_size, feature_columns,\n X_train, X_valid, y_train, y_valid):\n # Setup Initial Data\n periods = 10\n steps_per_period = steps / periods\n \n # Create Linear Classifier Object\n optimizer = tf.contrib.estimator.clip_gradients_by_norm(optimizer, 5.0)\n linear_classifier = tf.estimator.LinearClassifier(feature_columns=feature_columns, optimizer=optimizer)\n \n # Create Input Functions\n train_input_fn = lambda: input_template(X_train, y_train, batch_size=batch_size)\n valid_X_input_fn = lambda: input_template(X_train, y_train, epoch_num=1, shuffle=False)\n valid_y_input_fn = lambda: input_template(X_valid, y_valid, epoch_num=1, shuffle=False)\n \n # Train Model - Predict Loss in a loop\n print('\\nModel Training Initiating...')\n print('LogLoss of Training Data:')\n log_losses_train, log_losses_valid = [], []\n \n for period in range(0, periods):\n # Start Model Training from Prior State\n linear_classifier.train(input_fn=train_input_fn, steps=steps_per_period)\n \n # Compute Predictions\n probability_train = linear_classifier.predict(input_fn=valid_X_input_fn)\n probability_train = np.array([item['probabilities'] for item in probability_train])\n probability_valid = linear_classifier.predict(input_fn=valid_y_input_fn)\n probability_valid = np.array([item['probabilities'] for item in probability_valid])\n evaluation_metrics = linear_classifier.evaluate(input_fn=valid_y_input_fn)\n auc, accu = evaluation_metrics['auc'], evaluation_metrics['accuracy']\n \n # Compute RMSE Loss\n log_loss_train = metrics.log_loss(y_train, probability_train)\n log_losses_train.append(log_loss_train)\n log_loss_valid = metrics.log_loss(y_valid, probability_valid)\n log_losses_valid.append(log_loss_valid)\n \n # Print Current Loss\n print('Period_{:02d} | Log_Loss: {:.3f} | AUC: {:.3f} | Accuracy: {:.3f}'.format(period, log_loss_train, auc, accu))\n \n print('Model Training Finished! \\n')\n print('')\n \n # Plot Loss Metrics over Periods\n plt.figure(figsize=(10, 4))\n \n plt.subplot(121)\n plt.title('LogLoss vs. Periods')\n plt.xlabel('Periods')\n plt.ylabel('LogLoss')\n plt.tight_layout()\n plt.plot(log_losses_train, label='Log Loss Train')\n plt.plot(log_losses_valid, label='Log Loss Valid')\n plt.legend(loc=1)\n \n # Plot ROC Curve\n probability_roc = linear_classifier.predict(input_fn=valid_y_input_fn)\n probability_roc = np.array([item['probabilities'][1] for item in probability_roc])\n false_rate, true_rate, threshold = metrics.roc_curve(y_valid, probability_roc)\n \n plt.subplot(122)\n plt.title('ROC Curve')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.plot(false_rate, true_rate, label='Our Calssifier')\n plt.plot([0,1], [0,1], label='Random Classifier')\n plt.legend(loc = 2)\n \n # Return Result\n print('Final Log Loss of Training Data: \\t{:.3f}'.format(log_loss_train))\n print('Final Log Loss of Validation Data: \\t{:.3f}'.format(log_loss_valid))\n return linear_classifier\n \n\n\n\n# Test Case on Test Dataset via Linear Classifier Object\nmodel = model_template(optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05),\n steps=500, batch_size=100, \n feature_columns=column_template(preprocess_feature(data_train)),\n X_train=X_train, X_valid=X_valid, y_train=y_train, y_valid=y_valid)\n\n## Test Case on Test Dataset via Ada Delta Optimizer\n#model = model_template(optimizer = tf.train.AdadeltaOptimizer(learning_rate=125), \n# steps=500, batch_size=500, \n# feature_columns=column_template(preprocess_feature(data_train)),\n# X_train=X_train, X_valid=X_valid, y_train=y_train, y_valid=y_valid)\n\n## Test Case on Test Dataset via Adam Optimizer\n#model = model_template(optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1), \n# steps=500, batch_size=500,\n# feature_columns=column_template(preprocess_feature(data_train)),\n# X_train=X_train, X_valid=X_valid, y_train=y_train, y_valid=y_valid)\n\n## Test Case on Test Dataset via FTRL Optimizer\n#l1_strength = 0.1 # Only for FTRL Optimizer\n#model = model_template(optimizer=tf.train.FtrlOptimizer(learning_rate=0.1,\n# l1_regularization_strength=l1_strength),\n# steps=500, batch_size=500,\n# feature_columns=column_template(preprocess_feature(data_train)),\n# X_train=X_train, X_valid=X_valid, y_train=y_train, y_valid=y_valid)\n\ndata_test = pd.read_csv(\"https://storage.googleapis.com/mledu-datasets/california_housing_test.csv\", sep=\",\")\nX_test = preprocess_feature(data_test)\ny_test = preprocess_label(data_test)\n\ntest_input_fn = lambda: input_template(X_test, y_test, epoch_num=1, shuffle=False)\nprobability_test = model.predict(input_fn=test_input_fn)\nprobability_test = np.array([item['probabilities'] for item in probability_test])\nlog_loss_test = metrics.log_loss(y_test, probability_test)\n#print('\\nL1 Strength: {} | Model Size: {}'.format(l1_strength, model_size(model))) # Only for FTRL Optimizer\nprint('Final Log Loss of Testing Data: \\t{:.2f}'.format(log_loss_test))","sub_path":"Template_TensorFlow/Model_Linear_Classification.py","file_name":"Model_Linear_Classification.py","file_ext":"py","file_size_in_byte":11498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"207324812","text":"import pgzrun\r\nfrom random import randint\r\n\r\ndef draw():\r\n screen.blit('backdrop800',(0,0))\r\n ship.draw()\r\n alien.draw()\r\n for bullet in bullets:\r\n bullet.draw()\r\n screen.draw.text(\"Bullet : \"+str(len(bullets)),\r\n topleft = (10,10),fontsize=28,color='white')\r\n\r\ndef update():\r\n if keyboard.left:\r\n ship.x -= 1\r\n if ship.left < 0:\r\n ship.left = 0\r\n elif keyboard.right:\r\n ship.x += 1\r\n if ship.right > WIDTH:\r\n ship.right = WIDTH\r\n \r\n for bullet in bullets:\r\n bullet.y -= 1\r\n if bullet.top < 0:\r\n bullets.remove(bullet)\r\n alien.left += 1\r\n if alien.left > WIDTH:\r\n alien.right = 0\r\n\r\n for bullet in bullets:\r\n if bullet.y < 100:\r\n if bullet.colliderect(alien):\r\n alien.pos = (60,50)\r\n bullets.remove(bullet)\r\ndef on_key_down(key): \r\n if key == key.SPACE:\r\n bullets.append(Actor('bullet'))\r\n last = len(bullets)\r\n bullets[last-1].pos = ship.pos\r\n\r\n\r\nWIDTH = 800\r\nHEIGHT = 600\r\n\r\nship = Actor('ship')\r\nship.pos = (WIDTH/2,HEIGHT-40)\r\nbullets = []\r\nalien = Actor('alien')\r\nalien.pos = (400,50)\r\npgzrun.go()\r\n","sub_path":"EXgame2/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"361882793","text":"import torch\nimport torch.utils.data as Data\nimport sys\nimport torchvision\nimport torchvision.transforms as transforms\nimport time\nimport numpy as np\n\nmnist_train = torchvision.datasets.FashionMNIST(root=\"E:/data/\", train=True, download=False, transform=transforms.ToTensor())\nmnist_test = torchvision.datasets.FashionMNIST(root=\"E:/data/\", train=False, download=False, transform=transforms.ToTensor())\n\n# print(type(mnist_train[0]))\n# print(len(mnist_train), len(mnist_test))\n# feature, label = mnist_train[0]\n# print(feature.shape, label)\n\nif sys.platform.startswith('win'):\n num_workers = 0\nelse:\n num_workers = 4\n\nbatch_size = 256\n\ntrain_iter = Data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)\ntest_iter = Data.DataLoader(mnist_test, batch_size=batch_size, shuffle=True, num_workers=num_workers)\n\nstart = time.time()\nfor X, y in train_iter:\n continue\nprint(time.time() - start)\n\nnum_inputs = 28 * 28\nnum_outputs = 10\n\nW = torch.tensor(np.random.normal(0, 0.01,(num_inputs, num_outputs)), dtype=torch.float32)\nb = torch.zeros(num_outputs, dtype=torch.float32)\n\nW.requires_grad = True\nb.requires_grad = True\n\ndef softmax(X):\n X_exp = X.exp()\n partition = X_exp.sum(dim = 1, keepdim = True)\n return X_exp / partition\n\ndef net(X):\n return softmax(torch.mm(X.view((-1, num_inputs)), W) + b)\n\ndef cross_entropy(y_hat, y):\n return -torch.log(y_hat.gather(1, y.view(-1, 1)))\n\ndef evalute_accuracy(data_iter, net):\n acc_sum, n = 0.0, 0\n for X, y in data_iter:\n acc_sum += (net(X).argmax(dim=1)==y).float().sum().item()\n n += y.shape[0]\n return acc_sum / n\n\ndef sgd(params, batch_size, lr):\n for param in params:\n param.data -= lr * param.grad / batch_size\n\n\nloss = cross_entropy\nnum_epochs, lr = 5, 0.01\n\nfor epoch in range(num_epochs):\n train_acc_sum, train_l_sum, n = 0.0, 0.0, 0\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y).sum()\n l.backward()\n sgd([W, b], batch_size, lr)\n train_l_sum += l.item()\n n += y.shape[0]\n train_acc_sum = evalute_accuracy(train_iter, net)\n print('epoch %d, train loss %f train acc %d' % (epoch + 1, train_l_sum / n, train_acc_sum / n))\n","sub_path":"xixixi/图像分类数据集(Fashion-MNIST).py","file_name":"图像分类数据集(Fashion-MNIST).py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633736830","text":"'''\ngetChanges.py\nA python script (version 2.0 of get_new_bills.py) to download a new Master File from the Legiscan API and check\nagainst our application's existing master file for any bill changes.\nCurrently checks for old file in data/master_file_old.json\n\nWe periodically need to update with new session dates\n\nUsage:\npython getChanges.py\n'''\n\nimport json\nimport urllib.request\nimport datetime, os\nfrom os import listdir\nfrom os.path import isfile, join\n\n#API key for your Legiscan account (contained in gitignored directory)\nlegiscan_key = open('./keys/.legiscan_key','r').read().rstrip('\\n')\n\n#open a log for writing\nmaster_log = open('master_log.txt', 'a')\n\n#function to get session ids\ndef get_sessionList():\n\tsession_url = 'https://api.legiscan.com/?key=' + legiscan_key + '&op=getSessionList&state=NC'\n\tsession_list = []\n\ttry:\n\t\tprint('[api call]: ' + str(session_url))\n\t\tsession_data = urllib.request.urlopen(session_url).read()\n\t\tsession_json = json.loads(session_data)\n\t\tfor session in session_json['sessions']:\n\t\t\t#Capturing all sessions from 2021 on\n\t\t\tif session['year_start'] >= 2021:\n\t\t\t\tsession_list.append(session['session_id'])\n\texcept Exception as e:\n\t\tprint('ERROR: Something went wrong with retrieving the sessionList at ' + str(datetime.datetime.now()) + ' ' + str(e))\n\treturn session_list\n\n#define function to get master lists with specificed session ids\ndef get_masters(session_list):\n\tbase_url = 'https://api.legiscan.com/?key=' + legiscan_key + '&op=getMasterList&id='\n\turl_list = []\n\tsession_bills = {}\n\tcounter = 0\n\t#get session ids and build list of urls\n\tfor session in session_list:\n\t\tmaster_url = base_url + str(session)\n\t\turl_list.append(master_url)\n\t#read in master list with each session id\n\tfor url in url_list:\n\t\t#open the url and load in the json\n\t\tprint('[api call]: ' + str(url))\n\t\tcurrent_session = json.loads(urllib.request.urlopen(url).read())\n\t\tfor item in current_session['masterlist']:\n\t\t\tif item != 'session':\n\t\t\t\t#append to master list object\n\t\t\t\tsession_bills[counter] = current_session['masterlist'][item]\n\t\t\t\tcounter += 1\n\t#save to master_file.json\n\ttry:\n\t\twith open('data/master_file.json','w') as change_file:\n\t\t\tjson.dump(session_bills,change_file)\n\t\tmaster_log.write('SUCCESS: Newest master file saved at ' + str(datetime.datetime.now()) + '\\n')\n\texcept:\n\t\tmaster_log.write('ERROR: Invalid master file URL recorded at ' + str(datetime.datetime.now()) + '\\n')\n\t\treturn\n\n#define function to get new bills and overwrite old ones\ndef get_bill_updates():\n\t#open the new file\n\twith open('data/master_file.json') as data_file:\n\t\tmaster_data = json.load(data_file)\n\n\t#open the old file\n\t#need to build in exception for if it doesn't exist\n\tif (os.path.isfile('data/master_file_old.json')):\n\t\twith open('data/master_file_old.json') as old_file:\n\t\t\told_data = json.load(old_file)\n\telse:\n\t\tmaster_log.write('ALERT: No master file detected. Starting new one.\\n')\n\t\told_data = 0;\n\n\t#initialize the counters\n\tchange_count = 0\n\tunchanged_count = 0\n\tundled_count = 0\n\t#create empty list to store bills\n\tchanged_bills = []\n\tunchanged_bills = []\n\tundled_bills = []\n\n\tdl_bills = [f.split('.')[0] for f in listdir('data/bills/') if isfile(join('data/bills/', f))]\n\tdl_rollcalls = [f.split('.')[0] for f in listdir('data/votes/') if isfile(join('data/votes/', f))]\n\n\tfor item in master_data:\n\t\t#check for a blank file\n\t\tif(old_data == 0):\n\t\t\tchange_count += 1\n\t\t\tchanged_bills.append(item)\n\t\t#otherwise go through the old file and compare\n\t\telse:\n\t\t\ttry:\n\t\t\t\t#check for altered change_hash\n\t\t\t\tif (master_data[item]['change_hash'] != old_data[item]['change_hash']):\n\t\t\t\t\tchange_count += 1\n\t\t\t\t\tchanged_bills.append(item)\n\t\t\t\telse:\n\t\t\t\t\tunchanged_count += 1\n\t\t\t\t\tunchanged_bills.append(item)\n\t\t\t#if it throws a key error, the bill is new, so add it\n\t\t\texcept KeyError:\n\t\t\t\tchange_count += 1\n\t\t\t\tchanged_bills.append(item)\n\t\t#check on our undownloaded bills...\n\t\tif(str(master_data[item]['bill_id']) not in dl_bills):\n\t\t\tundled_count += 1\n\t\t\tundled_bills.append(item)\n\tmaster_log.write('ALERT: ' + str(unchanged_count) + ' bills have stayed the same in Legiscan data\\n')\n\tmaster_log.write('ALERT: ' + str(change_count) + ' bills have been updated in Legiscan data\\n')\n\tmaster_log.write('ALERT: ' + str(undled_count) + ' bills have not been downloaded yet\\n')\n\tfor bill in changed_bills:\n\t\tget_bill(master_data[bill]['bill_id'],master_data[bill]['number'])\n\t\tget_rollcall(master_data[bill]['bill_id'])\n\t#temporary maintenance steps that should normally be disabled\n\t#for bill in undled_bills:\n\t#\tget_bill(master_data[bill]['bill_id'],master_data[bill]['number'])\n\t#\tget_rollcall(master_data[bill]['bill_id'])\n\t#delete this after maintencance\n\t#for bill in unchanged_bills:\n\t#\tget_rollcall(master_data[bill]['bill_id'])\n\n#function to get a specific bill, by defined id\ndef get_bill(bill_id, name):\n\tbill_url = 'https://api.legiscan.com/?key=' + legiscan_key + '&op=getBill&id=' + str(bill_id)\n\n\ttry:\n\t\tprint('[api call]: ' + str(bill_url))\n\t\tbill_file = urllib.request.urlopen(bill_url).read()\n\t\tf = open('data/bills/' + str(bill_id) + '.json', 'wb')\n\t\tf.write(bill_file)\n\t\tf.close()\n\t\tmaster_log.write(name + ' data saved at ' + str(datetime.datetime.now()) + '\\n')\n\texcept:\n\t\tmaster_log.write('ERROR: Invalid bill file URL\\n')\n\t\treturn\n\n#function to get role call details, by defined id\ndef get_rollcall(bill_id):\n\t#open downloaded bill json given bill_id\n\twith open('data/bills/' + str(bill_id) + '.json') as bill_file:\n\t\tbill_data = json.load(bill_file)\n\t#define rollcall list variable\n\trollcall_list = []\n\t#store rollcall ids in list from bill votes\n\tfor vote in bill_data['bill']['votes']:\n\t\trollcall_list.append(vote['roll_call_id'])\n\tfor rollcall_id in rollcall_list:\n\t\t#check if rollcall json already exists\n\t\tif (os.path.isfile('data/votes/' + str(rollcall_id) + '.json')):\n\t\t\tmaster_log.write('Rollcall ' + str(rollcall_id) + ' exists. Skipped.\\n')\n\t\t\t#if not, then download it\n\t\telse:\n\t\t\trollcall_url = 'https://api.legiscan.com/?key=' + legiscan_key + '&op=getRollCall&id=' + str(rollcall_id)\n\t\t\t#rollcall_file = urllib.request.URLopener()\n\t\t\ttry:\n\t\t\t\tprint('[api call]: ' + str(rollcall_url))\n\t\t\t\t#rollcall_file.retrieve(rollcall_url,'data/votes/' + str(rollcall_id) + '.json')\n\t\t\t\trollcall_file = urllib.request.urlopen(rollcall_url).read()\n\t\t\t\tf = open('data/votes/' + str(rollcall_id) + '.json', 'wb')\n\t\t\t\tf.write(rollcall_file)\n\t\t\t\tf.close()\n\t\t\t\tmaster_log.write('Rollcall ' + str(rollcall_id) + ' saved at ' + str(datetime.datetime.now()) + '\\n')\n\t\t\texcept:\n\t\t\t\tmaster_log.write('ERROR: Invalid rollcall file URL\\n')\n\t\t\t\treturn\n\nif __name__ == '__main__':\n\t#run the functions\n\tget_masters(get_sessionList())\n\tget_bill_updates()\n\tmaster_log.write('Finished at ' + str(datetime.datetime.now()) + '\\n')\n\tmaster_log.write('= = = = = = = = = =\\n')\n\n\t#now make the file you downloaded the old file\n\tos.rename('data/master_file.json','data/master_file_old.json')\n","sub_path":"getChanges.py","file_name":"getChanges.py","file_ext":"py","file_size_in_byte":6907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"532328843","text":"'''\n하나 혹은 두개씩 문자를 잘라 used 배열에 놓음. (1~50 이기 때문)\n두 경우 모두 사용된 수일 경우 경로를 자름(백)\n끝까지 갔을때 순서대로 된 수열일 경우 종료\n'''\n\nfrom collections import deque\nimport sys\nN = input()\nused = [False]*51\ntemp = []\nresult = []\n\n\ndef back(i): # i번째 수가 들어왔을 경우\n\n if i == len(N):\n limit = max(temp)\n if False in used[1:limit+1]:\n return\n for k in temp:\n print(k, end=' ')\n exit()\n\n one = int(N[i])\n if i < len(N) and not used[one]:\n used[one] = True\n temp.append(one)\n back(i+1)\n used[one] = False\n temp.pop()\n\n if i < len(N)-1 and int(N[i:i+2]) <= 50 and not used[int(N[i:i+2])]:\n two = int(N[i:i+2])\n used[two] = True\n temp.append(two)\n back(i+2)\n used[two] = False\n temp.pop()\n\nback(0)\n\n","sub_path":"백준/Python/카테고리/백트래킹/10597(순열장난).py","file_name":"10597(순열장난).py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"76725127","text":"import cv2\nimport read_mnist as rm\n\n\nif __name__ == '__main__':\n images = rm.read_mnist('train')\n\n # 将train_images转化为jpg格式的图片\n dir = 'mnist_as_jpg/train/'\n for i in range(len(images[0])):\n name = dir + '%d_%05d.jpg' % (images[1][i], i)\n cv2.imwrite(name, images[0][i].reshape(28, 28))\n print(i, name)\n\n print('?')\n","sub_path":"save_as_jpg.py","file_name":"save_as_jpg.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"89352661","text":"def is_prime(num: int) -> str:\n if num > 1:\n for i in range(2, num//2):\n if num % i == 0:\n return f'{num} is not prime!'\n return f'{num} is prime!' \n else:\n return f'{num} is not prime'\n\nwhile True:\n print(\"Check if a given number is prime (q) to quit\")\n inp = input(\"Type a number: \")\n \n while not inp.isnumeric() and inp != 'q':\n inp = input(\"Must be a positive number, try again: \")\n \n if inp == 'q':\n break\n\n given = int(inp)\n \n print(is_prime(given))","sub_path":"src/is_prime.py","file_name":"is_prime.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"423999764","text":"# -*- coding: utf-8 -*-\nimport unittest\n\nclass StyleTestCase(unittest.TestCase):\n \"\"\"\n \"\"\"\n\n def test_single_rule_render(self):\n import style.tags as tags\n b = tags.Body(font_size=\"1em\")\n result = b.render()\n self.assertEqual(result, \"body { font-size: 1em }\")\n\n def test_multi_rule_render(self):\n import style.tags as tags\n b = tags.H6(font_size=\"0.4em\",font_weight=\"bold\")\n result = b.render()\n self.assertIn(\"h6 {\", result)\n self.assertIn(\"font-size: 0.4em\", result)\n self.assertIn(\"font-weight: bold\", result)\n self.assertIn(\"; \", result)\n\n def test_ordered_rules(self):\n import style.tags as tags\n b = tags.H6(font_size=\"0.4em\",font_weight=\"bold\", inorder=['font_size', 'font_weight'])\n result = b.render()\n self.assertEquals(\"h6 { font-size: 0.4em; font-weight: bold }\", result)\n\n def test_theme(self):\n import style.theme as theme\n t = theme.Theme()\n t.body(padding=\"10px 10px\")\n t.h1(font_size=\"1em\")\n result = \"\\n\".join(t.render())\n self.assertEquals(\"body { padding: 10px 10px }\\nh1 { font-size: 1em }\", result)\n\n def test_docs(self):\n import doctest\n doctest.testmod()\n","sub_path":"style/test_style.py","file_name":"test_style.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"583427011","text":"####150949 JUAN CARDOSO\n####150329 Emanuel Huber\n\n\nimport matplotlib.pyplot as plt\nimport networkx as nx \nimport re\nimport numpy as np\n\nTESTE_1 = \"A\"\nTESTE_2 = \"A.B\"\nTESTE_3 = \"A|B\"\nTESTE_4 = \"A*\"\nTESTE_5 = \"A.A*\"\nTESTE_6 = \"A.B*.(A+B)\"\nTESTE_7 = \"A.B.C\"\nTESTE_8 = \"A|B|C\"\nTESTE_9 = \"A*.B*\"\nTESTE_10 = \"A+\"\nE = \"&\"\nSTATE = 0\n\n#\"A.B\"\n#E-A-E-B-E\nclass Stack(object):\n def __init__(self):\n self.__stack = []\n \n def push(self, elemento):\n self.__stack.append(elemento)\n \n def pop(self):\n if not self.empty():\n return self.__stack.pop(-1)\n \n def empty(self):\n return len(self.__stack) == 0\n\n def top(self):\n if not self.empty():\n return self.__stack[-1]\n \nclass Graph:\n\n def __init__(self):\n self.edges = []\n\n def __str__(self):\n return ', '.join(self.nodes)\n\n\nprioridade = {\n '(': 1,\n ')': 1,\n '[': 1,\n ']': 1,\n '{': 1,\n '}': 1,\n '+': 3,\n '.': 2,\n '*': 3,\n '|': 2,\n}\n\n\n\ndef main():\n print(\"***O numero do nó inicial e final será mostrado no console***\\n\")\n print(\"Digite o regex Ex: A.B*.(A+B)\")\n V = input().upper()\n\n posfix = posFix(V)\n res = thompson(posfix)\n g = nx.DiGraph()\n edges = [(e[0], e[1]) for e in res.edges]\n \n g.add_edges_from(edges)\n pos = nx.layout.spring_layout(g)\n labels = {}\n \n for e in res.edges:\n labels[(e[0], e[1])] = e[2]\n plt.figure(figsize=(20, 20))\n nx.draw(g, pos, with_labels=True, edge_color='black', width=2,\n linewidths=1, node_size=250, node_color='green', alpha=0.9,)\n\n # print(res.edges)\n inicio = res.edges[0][0]\n fim = res.edges[-1][1]\n\n print(\"Nó Inicial: \"+ str(inicio)+\"\\n\",\"Nó Final: \"+str(fim)+\"\\n\")\n nx.draw_networkx_edge_labels(g, pos, edge_labels=labels)\n\n # plt.savefig(\"./teste.png\") # save as png\n # plt.show() # display\n\n ##MOCK\n V = \"AB\"\n inicio = 1\n fim = 6\n res.edges = [(1,2,\"A\"),(1,2,E),(2,3,E),(3,4,\"B\"),(3,4,E),(4,5,E),(5,6,\"B\"),(4,1,E)]\n ##MOCK\n print(\"Edges: \",res.edges,\"\\n\")\n afn_afd(res.edges, inicio, fim,V)\n \n\ndef S():\n global STATE\n STATE += 1\n return STATE\n\n#Calcula AFN\ndef thompson(exp):\n s = Stack()\n\n if len(exp) == 1:\n g = Graph()\n g.edges.append((S(), S(), 'A'))\n return g\n\n for e in exp:\n if e.isalpha():\n s.push(e)\n elif e == '.':\n op1 = s.pop()\n op2 = s.pop()\n g = Graph()\n if isinstance(op2, Graph):\n g.edges.append((S(), op2.edges[0][0], '&'))\n g.edges += op2.edges\n if isinstance(op1, Graph):\n g.edges.append((g.edges[-1][1], op1.edges[0][0], '&'))\n g.edges += op1.edges\n g.edges.append((g.edges[-1][1], S(), '&'))\n else:\n g.edges.append((g.edges[-1][1], S(), '&'))\n g.edges.append((g.edges[-1][1], S(), op1))\n g.edges.append((g.edges[-1][1], S(), '&'))\n else:\n g.edges.append((S(), S(), '&'))\n g.edges.append((g.edges[-1][1], S(), op2))\n if isinstance(op1, Graph):\n g.edges.append((g.edges[-1][1], op1.edges[0][0], '&'))\n g.edges += op1.edges\n g.edges.append((g.edges[-1][1], S(), '&'))\n else:\n g.edges.append((g.edges[-1][1], S(), '&'))\n g.edges.append((g.edges[-1][1], S(), op1))\n g.edges.append((g.edges[-1][1], S(), '&'))\n s.push(g)\n elif e == '|':\n op1 = s.pop()\n op2 = s.pop()\n g = Graph()\n finalState = S()\n finalOp1 = 1\n finalOp2 = 1\n if isinstance(op2, Graph):\n g.edges.append((S(), op2.edges[0][0], '&'))\n g.edges += op2.edges\n finalOp2 = op2.edges[-1][1]\n else:\n g.edges.append((S(), S(), '&'))\n g.edges.append((g.edges[-1][1], S(), op2))\n finalOp2 = g.edges[-1][1]\n if isinstance(op1, Graph):\n g.edges.append((g.edges[0][0], op1.edges[0][0], '&'))\n g.edges += op1.edges\n finalOp1 = op1.edges[-1][1]\n else:\n g.edges.append((g.edges[0][0], S(), '&'))\n g.edges.append((g.edges[-1][1], S(), op1))\n finalOp1 = g.edges[-1][1]\n #Add final states\n g.edges.append((finalOp2, finalState, '&'))\n g.edges.append((finalOp1, finalState, '&'))\n s.push(g)\n elif e == '*':\n op = s.pop()\n g = Graph()\n if isinstance(op, Graph):\n g.edges.append((S(), op.edges[0][0], '&'))\n g.edges += op.edges\n g.edges.append((g.edges[-1][1], op.edges[1][0], '&'))\n g.edges.append((g.edges[0][0], S(), '&'))\n g.edges.append((g.edges[-2][2], g.edges[-1][1], '&'))\n else:\n g.edges.append((S(), S(), '&'))\n g.edges.append((g.edges[-1][1], S(), op))\n g.edges.append((g.edges[-1][1], g.edges[-1][0], '&'))\n g.edges.append((g.edges[-1][0], S(), '&'))\n g.edges.append((g.edges[0][0], g.edges[-1][1], '&'))\n\n s.push(g)\n elif e == '+':\n op = s.pop()\n g = Graph()\n if isinstance(op, Graph):\n # AND\n g.edges.append((S(), op.edges[0][0], '&'))\n g.edges += op.edges\n g.edges.append((g.edges[-1][1], S(), '&'))\n # *\n g.edges.append((g.edges[-1][1], op.edges[0][0], '&'))\n g.edges += op.edges\n g.edges.append((g.edges[-1][1], op.edges[1][0], '&'))\n g.edges.append((g.edges[0][0], S(), '&'))\n g.edges.append((g.edges[-2][2], g.edges[-1][1], '&'))\n\n else:\n # AND\n g.edges.append((S(), S(), '&'))\n g.edges.append((g.edges[-1][1], S(), op))\n g.edges.append((g.edges[-1][1], S(), '&'))\n # *\n g.edges.append((g.edges[-1][1], S(), op))\n g.edges.append((g.edges[-1][1], g.edges[-1][0], '&'))\n g.edges.append((g.edges[-2][1], S(), '&'))\n g.edges.append((g.edges[2][0], g.edges[-1][1], '&'))\n s.push(g)\n\n \n return s.pop()\n\n#Calcula no closure\ndef calcClosure(edges):\n closure = []\n interation = True;\n\n while interation == True:\n interation = False\n for i in range(len(edges)):\n nfrom = edges[i][0]\n nto = edges[i][1]\n weight = edges[i][2]\n if len(closure) == 0 and i == 0:\n closure.append(nfrom)\n if weight == E:\n closure.append(nto)\n else:\n if weight == E and nfrom in closure:\n if not nto in closure:\n interation = True\n closure.append(nto)\n return closure\n\n\n#Retorna o alfabeto do regex passado inicialmente\ndef getAlfabeto(alfabeto):\n return list(dict.fromkeys(re.findall(\"[A-Z]\", alfabeto)))\n\n#Retorna todos os estados do qual o no e o peso passado conseguem alcancar\ndef getAllStatesNode(idNumber,weight,edges):\n dfaedge = getStatsFromNode(idNumber,weight,edges)\n lenDfaedge = 0\n \n while len(dfaedge) != lenDfaedge:\n lenDfaedge = len(dfaedge)\n for e in dfaedge:\n addNodes = getStatsFromNode(e,weight,edges)\n if len(addNodes) > 0:\n for i in addNodes:\n if not i in dfaedge:\n dfaedge.append(i)\n \n return dfaedge\n\n#Retorna o estados do qual um no pode chegar pelo peso\ndef getStatsFromNode(idNumber,weight,edges,vazio = True):\n #Pega todos os edges que o no do idNumber chega\n dfaedge = []\n for e in edges:\n if e[0] == idNumber:\n if e[2] == weight or (vazio and e[2] == E):\n if not e[1] in dfaedge:\n dfaedge.append(e[1])\n \n return dfaedge\n\n# def addcreateTableAlfabet(ed,alfabeto):\n# for x in alfabeto:\n# ed[x] = {}\n\n#SE NO NAO ESTIVER NO ARR PASSADO ELE IRA ADICIONAR\ndef addSeNaoRepetir(arr,addNodes):\n for node in addNodes:\n if not node in arr:\n arr.append(node) \n\n##ADICIONA A LISTA DE CLOSURES UM NOVO CLOSURE, \n# VERIFICANDO SE O MESMO JA NAO EXISTE PARA NAO ADICIONAR 2X\ndef addSeNaoRepetirClosure(closures,addNodes,closuresNotAlreadyPass, DEBUG = False):\n nExist = True\n for clos in closures:\n if len(closures[clos]['C']) == len(addNodes):\n npA = np.asarray(closures[clos]['C'])\n npB = np.asarray(addNodes)\n\n if len(npA) > 1:\n npA.sort()\n \n if len(npB) > 1:\n npB.sort()\n\n if np.array_equal(npA,npB): \n nExist = False\n\n if nExist:\n name = \"S\"+str(len(closures))\n closures[name] = {\"C\":addNodes}\n closuresNotAlreadyPass.append(name)\n\n#ADICIONA AS TUPLAS PARA TODAS AS LETRAS DO ALFABETO PARA O DETERMINADO CLOSURE PASSADO\ndef addTabelaEstados(estados,alfabeto,nomeClosure):\n tupla = {}\n for x in alfabeto:\n tupla[x] = \"\"\n estados[nomeClosure] = tupla\n\n#ACHA QUAL E A TAG DO CLOSURE DO CONJUNTO EX: S3\ndef findClosure(closures,conjunto):\n npB = np.asarray(conjunto)\n\n if len(npB) > 0:\n npB.sort()\n\n for cl in closures:\n npA = np.asarray(closures[cl][\"C\"])\n if len(npA) > 0:\n npA.sort()\n if np.array_equal(npA,npB):\n return cl\n\n#Transforma o AFN em AFD\ndef afn_afd(edges, initialNode, finalNode, input):\n alfabeto = getAlfabeto(input)\n closuresNotAlreadyPass = [\"S0\"]\n closures = {}\n conjunto = {}\n estados = {}\n \n while len(closuresNotAlreadyPass) > 0:\n closureName = closuresNotAlreadyPass[0]\n del closuresNotAlreadyPass[0]\n \n if(closureName == \"S0\"):\n closures[closureName] = {\"C\":[initialNode]}\n #NO CLOSURE INICIAL NAO POSSO TER UM NO LEVANDO PRA ESSE ESTADO (ESTADO INICIAL)\n closures[closureName][\"U\"] = [f for f in getAllStatesNode(edges[0][0],E,edges) if f != initialNode]\n \n #BUSCA OS CONJUNTOS COM AS LETRAS DO ALFABETO\n for letra in alfabeto:\n for nodeValue in closures[closureName][\"C\"]:\n if conjunto.get(letra) == None or len(conjunto[letra]) == 0:\n conjunto[letra] = getStatsFromNode(nodeValue,letra,edges,False) \n else:\n conjunto[letra].append(getStatsFromNode(nodeValue,letra,edges,False))\n\n # estados = {\"S0\":{\"A\":\"SX\",\"B\":\"SX\"}}\n addTabelaEstados(estados,alfabeto,closureName)\n \n # print(\"Uniao: \",closures[closureName]['U'])\n \n #ADICIONA A UNIAO AO CONJUNTO\n # if closureName == \"S0\":\n for letra in alfabeto:\n for node in closures[closureName]['U']:\n aux = getStatsFromNode(node,letra,edges,False) \n if len(aux) > 0:\n addSeNaoRepetir(conjunto[letra],aux)\n\n \n #ADICIONA O CONJUNTO AO CLOSURE SE O MESMO JA NAO ESTIVER LA\n for e in conjunto:\n addSeNaoRepetirClosure(closures,conjunto[e],closuresNotAlreadyPass)\n\n #LINKA A TRANSICAO DE ESTADOS COM O CLOSURE\n for conj in conjunto:\n closureFound = findClosure(closures,conjunto[conj]) \n estados[closureName][conj] = closureFound\n\n #ADICIONA A UNIAO DOS CLOSURES FALTANTES\n for cl in closures:\n if closures[cl].get(\"U\") == None:\n for union in closures[cl][\"C\"]:\n aux = getAllStatesNode(union,E,edges)\n if len(aux) > 0:\n if closures[cl].get(\"U\") == None:\n closures[cl][\"U\"] = aux\n else:\n closures[cl][\"U\"].append(aux)\n\n conjunto = {}\n\n print(\"Estados: \", estados)\n print(\"Closure\",closures)\n\n#Calcula a expressao para posfixa\ndef posFix(exp):\n # print(\"PF - Input: \",exp)\n stack = Stack()\n posfix = \"\"\n\n for x in exp:\n if(x >= 'A' and x <= 'Z'):\n posfix += x\n elif(x == '(' or x == '[' or x == '{'):\n stack.push(x)\n elif(x == ')' or x == ']' or x == '}'):\n posfix+=stack.pop()\n while prioridade[x] != prioridade[stack.top()]:\n posfix+=stack.pop()\n \n stack.pop()\n elif(x == '.' or x == '+' or x == '|' or x == '*'):\n while stack.top() != None and prioridade[x] <= prioridade[stack.top()]:\n posfix+=stack.pop()\n\n stack.push(x)\n\n while stack.empty() != True:\n posfix+=stack.pop()\n\n # print(\"PF - Output: \",posfix)\n return posfix\n\n\nif __name__ == '__main__':\n main()\n\n\n#https://www.youtube.com/watch?v=Efbtw2SjqRg\n#FIZ COM BASE NESSE VIDEO","sub_path":"ThompsonsConstruction.py","file_name":"ThompsonsConstruction.py","file_ext":"py","file_size_in_byte":13274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"375959520","text":"# Copyright 2008-2015 Nokia Networks\n# Copyright 2016- Robot Framework Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom inspect import getdoc, isclass\nfrom enum import Enum\n\nfrom robot.utils import Sortable, typeddict_types\nfrom robot.running import TypeConverter\n\n\nEnumType = type(Enum)\n\n\nclass DataTypeCatalog:\n\n def __init__(self, converters=None):\n self._converters = converters\n self._customs = set()\n self._enums = set()\n self._typed_dicts = set()\n\n def __iter__(self):\n return iter(sorted(self._customs | self._enums | self._typed_dicts))\n\n def __bool__(self):\n return next(iter(self), None) is not None\n\n @property\n def customs(self):\n return sorted(self._customs)\n\n @property\n def enums(self):\n return sorted(self._enums)\n\n @property\n def typed_dicts(self):\n return sorted(self._typed_dicts)\n\n def update(self, types):\n storages = {CustomDoc: self._customs,\n EnumDoc: self._enums,\n TypedDictDoc: self._typed_dicts}\n for typ in types:\n type_doc = self._get_type_doc_object(typ)\n for type_cls in storages:\n if isinstance(type_doc, type_cls):\n storages[type_cls].add(type_doc)\n\n def _get_type_doc_object(self, typ):\n if isinstance(typ, DataType):\n return typ\n if isinstance(typ, EnumType):\n return EnumDoc.from_type(typ)\n if isinstance(typ, typeddict_types):\n return TypedDictDoc.from_type(typ)\n info = TypeConverter.type_info_for(typ, self._converters)\n if info:\n return CustomDoc(info.name, info.doc)\n if isinstance(typ, dict) and 'type' in typ:\n cls = {EnumDoc.type: EnumDoc,\n TypedDictDoc.type: TypedDictDoc,\n CustomDoc.type: CustomDoc}.get(typ['type'])\n if cls:\n typ.pop('type')\n return cls(**typ)\n return None\n\n def to_dictionary(self):\n return {\n 'customs': [t.to_dictionary() for t in self.customs],\n 'enums': [t.to_dictionary() for t in self.enums],\n 'typedDicts': [t.to_dictionary() for t in self.typed_dicts]\n }\n\n\nclass DataType(Sortable):\n type = None\n\n def __init__(self, name, doc):\n self.name = name\n self.doc = doc\n\n @property\n def _sort_key(self):\n return self.name.lower()\n\n def to_dictionary(self):\n return {\n 'type': self.type,\n 'name': self.name,\n 'doc': self.doc,\n }\n\n\nclass TypedDictDoc(DataType):\n type = 'TypedDict'\n\n def __init__(self, name, doc, items=None):\n super().__init__(name, doc)\n self.items = items or []\n\n @classmethod\n def from_type(cls, typed_dict):\n items = []\n required_keys = list(getattr(typed_dict, '__required_keys__', []))\n optional_keys = list(getattr(typed_dict, '__optional_keys__', []))\n for key, value in typed_dict.__annotations__.items():\n typ = value.__name__ if isclass(value) else str(value)\n required = key in required_keys if required_keys or optional_keys else None\n items.append({'key': key, 'type': typ, 'required': required})\n return cls(name=typed_dict.__name__,\n doc=getdoc(typed_dict) or '',\n items=items)\n\n def to_dictionary(self):\n return {\n 'type': self.type,\n 'name': self.name,\n 'doc': self.doc,\n 'items': self.items\n }\n\n\nclass EnumDoc(DataType):\n type = 'Enum'\n\n def __init__(self, name, doc, members=None):\n super().__init__(name, doc)\n self.members = members or []\n\n @classmethod\n def from_type(cls, enum_type):\n return cls(name=enum_type.__name__,\n doc=getdoc(enum_type) or '',\n members=[{'name': name, 'value': str(member.value)}\n for name, member in enum_type.__members__.items()])\n\n def to_dictionary(self):\n return {\n 'type': self.type,\n 'name': self.name,\n 'doc': self.doc,\n 'members': self.members\n }\n\n\nclass CustomDoc(DataType):\n type = 'Custom'\n","sub_path":"src/robot/libdocpkg/datatypes.py","file_name":"datatypes.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"569067499","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport githistoryvis as ghv\n\n\n# ### Gather the data\n# \n# Githistoryvis exposes the class `git_history`.\n# \n# The inizialization:\n# \n# ```python\n# foo = git_history(PATH)\n# ```\n# sets the attribute `foo.path` that point to the git respository in PATH.\n# \n# Also `def_states` (and `def_states_explain`) are defined at inizialitation.\n# They are used to transform the state in the dataframe to number for visualization and define the legend.\n# \n# You can overwrite them at your own risk.\n# \n# ```python\n# def_states = {\n# 'A' : 120.,\n# 'M' : 180.,\n# 'S' : 255., # custom value, Static\n# 'D' : 240.,\n# 'N' : 128., # custom value, Non existent\n# }\n# \n# def_states_explain = {\n# 'A' : 'Added',\n# 'D' : 'Deleted',\n# 'M' : 'Modified',\n# 'S' : 'Static',\n# 'N' : 'Non existent'\n# }\n# ```\n# \n# \n# The method\n# \n# ```python\n# foo.get_history()\n# ```\n# extracts the git log, and define:\n# \n# - foo.all_commits = the whole git log\n# - foo.commits = the commits SHA-1\n# - foo.all_files = all the unique file ever existed\n# \n# \n\n# In[33]:\n\nimport os\n\npath = os.getcwd() # put here the desired git repo path\n\ngt = ghv.git_history(path)\n\ngt.get_history()\n\n\n# ### Visualize the data\n# \n# We define a pandas DataFrame to contain all the files (Rows) and the status (Columns).\n# \n# This Grid represent the status of each file at each step or commit.\n# \n# The inizial stata for all the files is `N` or `Non existent`, they are updated in the sequential reding of `git_history.all_commits` object.\n\n# ## Deserialize and structure the data\n# \n# The data gather in `githistoryvis.git_history` object are deserialized and gathered in a pandas DataFrame.\n\n# In[7]:\n\nimport pandas as pd\nimport numpy as np\n\n\n# In[8]:\n\ndef data_structure(git_history_object):\n \n all_filenames = pd.DataFrame(pd.DataFrame(list(git_history_object.all_files)),columns=git_history_object.commits, index=git_history_object.all_files)\n\n # fill NaN\n all_filenames.fillna('N', inplace=True)\n\n actual_commit = 0\n # previous_commit = 0\n for i in git_history_object.all_commits:\n # set the commit number\n if i[0] == 'C':\n value = i[1]\n # starting at the second commit see which file exist in the previous commit\n if actual_commit != int(all_filenames.columns[0]):\n previous_commit = actual_commit\n actual_commit = value\n # assig 1 to file not null un the previous commit\n if previous_commit != 0:\n all_filenames[actual_commit][\n (all_filenames[previous_commit] != 'N') & (all_filenames[previous_commit] != 'D')] = 'S'\n # all_filenames[previous_commit][all_filenames[actual_commit] == 'D'] = 'D'\n # all_filenames[actual_commit][all_filenames[actual_commit] == 'D'] = 'N'\n # print previous_commit,'>',actual_commit\n else:\n state,value = i\n # print ' '*4,'-',state,value\n all_filenames.ix[value,actual_commit] = state\n return all_filenames\n\n\n# In[9]:\n\nall_filenames = data_structure(gt)\n\n\n# ## Visualize the data\n# \n# The data from the pandas DataFrame coul be visualized by this simple example routine.\n# \n# The arguments are:\n# \n# - size (default 200) : the size of the pyplot.scatteplot.\n# - figsize (default [9,7]) : size of the pyplot.figure.\n# - linewidths (default 3) : width of the pyplot.scatteplot outer lines.\n# - outpath : if defined, the figure will be saved without visualization.\n\n# In[10]:\n\nimport matplotlib\nfrom matplotlib import pyplot as plt\nget_ipython().magic(u'matplotlib inline')\n\n\n# In[38]:\n\ndef plot_history_df(plot_df,**kwargs):\n\n if 'size' in kwargs:\n size = kwargs['size']\n else:\n size = 500\n \n if 'figsize' in kwargs:\n figsize = kwargs['figsize']\n else:\n figsize = [10,12]\n \n if 'linewidths' in kwargs:\n linewidths = kwargs['linewidths']\n else:\n linewidths = 3\n \n h = plot_df.applymap(lambda x: gt.def_states[x]).values.copy()\n h[h == 128] = np.nan\n\n fig = plt.figure(figsize=figsize)\n\n ax = plt.subplot(111)\n for i in range(len(plot_df.index)):\n x = range(len(plot_df.columns))\n y = [i for kk in x]\n ax.scatter(x, y, s = size, c=h[i,:], alpha=1, marker='o',linewidths = linewidths , cmap = plt.cm.spectral,vmin = 0, vmax = 255)\n ax.plot(x, y, lw = 3, c='k', zorder=0)\n\n ax.set_xticks(range(h.shape[1]))\n ax.set_xticklabels(plot_df.columns,rotation=90)\n\n ax.set_xlabel('commits sha-1 (time arrow to the right ->)')\n ax.set_xlim([-.5,len(plot_df.columns)-0.5])\n ax.set_ylabel('file names')\n ax.set_yticks(range(h.shape[0]))\n ax.set_yticklabels(plot_df.index.tolist())\n ax.set_yticks = 0.1\n # set 0 to bounding box width\n [i.set_linewidth(0.0) for i in ax.spines.itervalues()]\n # see http://stackoverflow.com/a/20416681/1435167\n # erase x ticks\n for tic in ax.xaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n # tic.label1On = tic.label2On = False\n # erase y ticks\n for tic in ax.yaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n # tic.label1On = tic.label2On = False\n\n ax2 = fig.add_axes([0.25, .9, 0.5, 0.075])\n\n colors = np.array(gt.def_states.values()).astype('float')\n colors[colors == 128] = np.nan\n\n x = range(len(colors))\n y = [1 for kk in x]\n ax2.scatter(x, y, s = size, c=colors, alpha=1, marker='o',linewidths = 3, cmap = plt.cm.spectral,vmin = 0, vmax = 255)\n ax2.plot(x, y, lw = 3, c='k', zorder=0)\n\n ax2.set_xticks(x)\n ax2.set_xticklabels(gt.def_states_explain.values())\n ax2.set_xlabel('Legend')\n ax2.set_xlim([-.5,len(x)-0.5])\n ax2.set_ylim([0.99,1.01])\n # set 0 to bounding box width\n [i.set_linewidth(0.0) for i in ax2.spines.itervalues()]\n # # see http://stackoverflow.com/a/20416681/1435167\n # erase x ticks\n for tic in ax2.xaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n # erase y ticks\n for tic in ax2.yaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n tic.label1On = tic.label2On = False\n\n if 'outpath' in kwargs:\n# print 'Saving figure in '+kwargs['outpath']\n fig.savefig(kwargs['outpath'])\n plt.close()\n\n\n# In[49]:\n\nall_filenames\nplot_history_df(all_filenames,size= 375)\nplot_history_df(all_filenames,size= 375,outpath=path+os.sep+'images/complete_visual_history.png')\n\n\n# In[48]:\n\n# filtering the history on:\n# a commit range\nplot_df_commit_range = all_filenames.ix[:,'a4cb9a1':'1222c5e']\nplot_df_commit_range\nplot_history_df(plot_df_commit_range,size= 350)\nplot_history_df(plot_df_commit_range,size= 350,outpath=path+os.sep+'images/commit_range.png')\n\n\n# In[47]:\n\n# filtering the history on:\n# a file range: all files not ending with txt\nplot_df_file_range = all_filenames[~all_filenames.index.str.contains('txt$')]\nplot_history_df(plot_df_file_range,size= 300,figsize= [9,7])\nplot_history_df(plot_df_file_range,size= 300,figsize= [9,7],outpath=path+os.sep+'images/file_range.png')\n\n\n# In[50]:\n\n# filtering the history on:\n# a commit range AND a file range: all files not ending with txt\nplot_df_commit_file_range = all_filenames.ix[:,'a4cb9a1':'1222c5e'][~all_filenames.index.str.contains('txt$')]\nplot_history_df(plot_df_commit_file_range,size= 300,figsize= [9,7])\nplot_history_df(plot_df_commit_file_range,size= 300,figsize= [9,7],outpath=path+os.sep+'images/commit_file_range.png')\n\n\n# In[54]:\n\n# filtering the history on:\n# a commit range AND a file range: all files not ending with txt\nplot_df_state_filter = all_filenames[all_filenames[all_filenames.columns[-1]] != 'N']\nplot_history_df(plot_df_state_filter,size= 300,figsize= [9,7])\nplot_history_df(plot_df_state_filter,size= 300,figsize= [9,7],outpath=path+os.sep+'images/state_filter.png')\n\n\n# In[ ]:\n\n\n\n","sub_path":"git_history_test_git.py","file_name":"git_history_test_git.py","file_ext":"py","file_size_in_byte":7916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"541964260","text":"from pycocotools.cocoeval import COCOeval\nfrom collections import defaultdict\nimport time\nimport numpy as np\nimport copy\nimport pycocotools.mask as maskUtils\nfrom PIL import Image\nimport math\n\nclass DEPTHeval(COCOeval):\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n # super(DEPTHeval,self).__init__(cocoGt, cocoDt, iouType)\n\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n ###################################################################\n self.depth_error = {}\n ####################################################################\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n\n p = self.params\n if p.useCats:\n gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n\n\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n # print('££££££££££££££££££', gt['image_id'], gt['category_id'])\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n # number of ground truth items (anno id+1)\n # len(self._dts): 111, number of detected items\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self, each_category=False):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n # print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n # print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n if p.maxDets:\n p.maxDets = sorted(p.maxDets)\n self.params = p\n self._prepare()\n\n\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n\n\n # if p.iouType == 'depth':\n # compute_depth_metrics = self.compute_depth_metrics\n # self.depth_error = {(imgId, catIds): compute_depth_metrics(imgId, catId) \\\n # for imgId in p.imgIds\n # for catId in catIds}\n # print('£££££££££££££££££££££££££', self.depth_error)\n # else:\n # if p.iouType == 'segm' or p.iouType == 'bbox':\n # computeIoU = self.computeIoU\n #\n # elif p.iouType == 'keypoints':\n # computeIoU = self.computeOks\n #\n # self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n # for imgId in p.imgIds\n # for catId in catIds}\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n elif p.iouType =='depth':\n computeIoU = self.compute_depth_metrics\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n elif p.iouType == 'whole_depth':\n computeIoU = self.compute_whole_depth_metrics\n\n\n\n # if p.iouType == 'depth' or p.iouType == 'whole_depth':\n # error = []\n # for imgId in p.imgIds:\n # for catId in catIds:\n # x = computeIoU(imgId, catId)\n # if x != []:\n # error.append(x)\n # error = np.asarray(error)\n # if len(error.shape) !=2:\n # self.mean_error = np.ones(9)\n # else:\n # self.mean_error = np.mean(error, axis=0)\n # if each_category:\n # # if self.mean_error[0]<0.3:\n # print('**********',catIds, self.mean_error)\n if p.iouType == 'depth' or p.iouType == 'whole_depth':\n error = []\n for imgId in p.imgIds:\n x = computeIoU(imgId, catIds)\n if x != []:\n error.append(x)\n error = np.asarray(error)\n if len(error.shape) != 2:\n self.mean_error = np.ones(9)\n else:\n self.mean_error = np.mean(error, axis=0)\n if each_category:\n print('**********', catIds, self.mean_error)\n else:\n evaluateImg = self.evaluateImg\n maxDet = p.maxDets[-1]\n self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n\n toc = time.time()\n # print('DONE (t={:0.2f}s).'.format(toc - tic))\n def compute_whole_depth_metrics(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId, catId]\n dt = self._dts[imgId, catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]\n\n if len(gt) == 0 or len(dt) == 0:\n return []\n # inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n # dt = [dt[i] for i in inds]\n # if len(dt) > p.maxDets[-1]:\n # dt = dt[0:p.maxDets[-1]]\n\n g = [g['depth'] for g in gt]\n d = [d['whole_depth'] for d in dt]\n\n\n depth_d = Image.open(d[0])\n width, height = depth_d.size\n depth_d = np.array(depth_d)/1000\n depth_g = Image.open('/home/wenjing/storage/ScanNetv2/' + g[0]).resize((width, height))\n depth_g = np.array(depth_g)/1000\n depth_d[depth_d > 10] = 10\n\n valid_mask = (depth_d > 0) & (depth_g> 0) # (depth_d!=0) # negative predicted values\n # valid_mask = (depth_g > 0)\n depth_d = depth_d[valid_mask]\n depth_g = depth_g[valid_mask]\n\n\n\n thresh = np.maximum((depth_g / depth_d), (depth_d / depth_g))\n a1 = (thresh < 1.25).mean()\n a2 = (thresh < 1.25 ** 2).mean()\n a3 = (thresh < 1.25 ** 3).mean()\n\n abs_diff = np.abs(depth_g - depth_d)\n mse = np.mean(np.square(abs_diff))\n rmse = np.sqrt(mse)\n\n mse_log = (np.log(depth_g) - np.log(depth_d)) ** 2\n rmse_log = np.sqrt(mse_log.mean())\n\n abs_rel = np.mean(np.abs(depth_d - depth_g) / depth_g)\n sq_rel = np.mean(((depth_g - depth_d) ** 2) / depth_g)\n\n\n log10_error = np.abs(np.log10(depth_g) - np.log10(depth_d))\n log10_mean = np.mean(log10_error)\n # metrics = [abs_rel, sq_rel, rmse, rmse_log, log10_mean, a1, a2, a3]\n\n inv_output = 1 / depth_d\n inv_target = 1 / depth_g\n abs_inv_diff = np.abs((inv_output - inv_target))\n imae = np.mean(abs_inv_diff.mean())\n irmse = np.sqrt(np.mean(np.power(abs_inv_diff, 2)))\n mae = np.mean(abs_diff)\n log_diff = np.log(depth_g) - np.log(depth_d)\n log_mae = np.mean(np.abs(log_diff))\n\n num_pixels = float(log_diff.size)\n scale_invar = np.sum(np.square(log_diff)) / num_pixels - np.square(np.sum(log_diff)) / np.square(num_pixels)\n\n metrics = [abs_rel, imae, irmse, log_mae, rmse_log, mae, rmse, scale_invar, sq_rel]\n return metrics\n\n def compute_depth_metrics(self, imgId, catId):\n p = self.params\n\n gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]\n # print('^^^^^^^^', len(gt), len(dt))\n # if p.useCats:\n # gt = self._gts[imgId, catId]\n # dt = self._dts[imgId, catId]\n # else:\n # gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]\n # dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]\n # print('^^^^^^^^', len(gt), len(dt))\n\n if len(gt) == 0 or len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n # if len(dt) > p.maxDets[-1]:\n # dt = dt[0:p.maxDets[-1]]\n\n\n # if p.iouType == 'depth':\n # g = [g['depth'] for g in gt]\n # d = [d['depth'] for d in dt]\n d_c = []\n if p.iouType == 'depth':\n g = [g['depth'] for g in gt]\n for d in dt:\n if d['score'] > 0:\n d_c.append(d['depth'])\n else:\n raise Exception('unknown iouType for iou computation')\n depth_g_ = Image.open('/home/wenjing/storage/ScanNetv2/' + g[0]).resize((320, 240))\n depth_g_ = np.array(depth_g_)/1000\n\n depth_d = np.zeros((240,320))\n for d_part in d_c:\n mask = depth_d==0\n depth_i = Image.open(d_part)\n depth_i = np.array(depth_i)\n depth_d += depth_i*mask\n depth_d = depth_d/1000\n depth_d[depth_d > 10] = 10\n # depth_d = depth_d.astype(int)\n # depth_d[depth_d <0.5]=0\n valid_mask = (depth_d>0) & (depth_g_>0) #(depth_d!=0) # negative predicted values\n # valid_mask = (depth_g_ > 0)\n depth_d = depth_d[valid_mask]\n depth_g = depth_g_[valid_mask]\n\n if len(depth_d)==0:\n return []\n # thresh = np.maximum((depth_g/depth_d), (depth_d/depth_g))\n # a1 = (thresh < 1.25).mean()\n # a2 = (thresh < 1.25 ** 2).mean()\n # a3 = (thresh < 1.25 ** 3).mean()\n\n abs_diff = np.abs(depth_g - depth_d)\n\n mse = np.mean(np.square(abs_diff))\n rmse = np.sqrt(mse)\n\n mse_log = (np.log(depth_g) - np.log(depth_d)) ** 2\n rmse_log = np.sqrt(mse_log.mean())\n\n\n abs_rel = np.mean(np.abs(depth_d - depth_g) / depth_g)\n sq_rel = np.mean(((depth_g - depth_d) ** 2) / depth_g)\n\n # log10_error = np.abs(np.log10(depth_g)-np.log10(depth_d))\n # log10_mean = np.mean(log10_error)\n # metrics = [abs_rel, sq_rel, rmse, rmse_log, log10_mean, a1, a2, a3]\n\n inv_output = 1 / depth_d\n inv_target = 1 / depth_g\n abs_inv_diff = np.abs((inv_output - inv_target))\n imae = np.mean(abs_inv_diff)\n irmse = np.sqrt(np.mean(np.power(abs_inv_diff, 2)))\n mae = np.mean(abs_diff)\n log_diff = np.log(depth_g) - np.log(depth_d)\n log_mae = np.mean(np.abs(log_diff))\n\n num_pixels = float(log_diff.size)\n scale_invar = np.sum(np.square(log_diff)) / num_pixels - np.square(np.sum(log_diff)) / np.square(num_pixels)\n\n metrics = [abs_rel, imae, irmse, log_mae, rmse_log, mae, rmse, scale_invar, sq_rel]\n return metrics\n\n\n def computeIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n # print(p.iouType)\n # print('222222222222222', len(g)) # 1 or 0\n # if p.iouType == 'segm':\n # print('33333333333333333', d[0]) # len(d) 0-26?\n # d: list (8,1,28,28)\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n\n def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap == 1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:, :, :, aind, mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:, :, aind, mind]\n if len(s[s > -1]) == 0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s > -1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n\n def _summarize_Depth():\n stats = np.zeros((9,))\n for i in range(9):\n stats[i] = self.mean_error[i]\n return stats\n\n if not self.eval and self.params.iouType != 'depth' and self.params.iouType != 'whole_depth':\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n elif iouType == 'depth' or iouType == 'whole_depth':\n summarize = _summarize_Depth\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()\n\nclass Params:\n def __init__(self, iouType='segm'):\n if iouType == 'depth':\n self.setDepthParams()\n elif iouType == 'segm' or iouType == 'bbox':\n self.setDetParams()\n elif iouType == 'keypoints':\n self.setKpParams()\n elif iouType == 'whole_depth':\n self.set_whole_depth_params()\n else:\n raise Exception('iouType not supported')\n self.iouType = iouType\n # useSegm is deprecated\n self.useSegm = None\n\n def set_whole_depth_params(self):\n self.imgIds = []\n self.catIds = []\n self.useCats = 1\n self.maxDets = [1, 10, 100]\n\n def setDepthParams(self):\n self.imgIds = []\n self.catIds = []\n self.maxDets = [1, 10, 100]\n self.useCats = 1\n # self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n # self.areaRngLbl = ['all', 'small', 'medium', 'large']\n\n\n def setDetParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)\n self.maxDets = [1, 10, 100]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'small', 'medium', 'large']\n self.useCats = 1\n\n def setKpParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)\n self.maxDets = [20]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'medium', 'large']\n self.useCats = 1\n self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n","sub_path":"maskrcnn_benchmark/data/datasets/torch2/pycocotools2/DepthEval.py","file_name":"DepthEval.py","file_ext":"py","file_size_in_byte":20775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"504534046","text":"import os\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import TensorBoard\nfrom tensorflow.python.eager import context\n\n\nclass TrainValTensorBoard(TensorBoard):\n def __init__(self, log_dir='./run_logs', X_train=None, X_test=None,\n y_train=None, y_test=None, **kwargs):\n \"\"\"\n Found from:\n https://stackoverflow.com/questions/47877475/keras-tensorboard-plot-train-and-validation-scalars-in-a-same-figure\n\n This is useful for logging the test vs log data\n\n :param log_dir:\n :param kwargs:\n \"\"\"\n # Make the original `TensorBoard` log to a subdirectory 'training'\n self.y_test = y_test\n self.y_train = y_train\n self.X_test = X_test\n self.X_train = X_train\n training_log_dir = os.path.join(log_dir, 'training')\n super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)\n\n # Log the validation metrics to a separate subdirectory\n self.val_log_dir = os.path.join(log_dir, 'validation')\n\n \"\"\" Add the image progress \"\"\"\n self.get_images()\n\n def set_model(self, model):\n # Setup writer for validation metrics\n self.val_writer = tf.summary.FileWriter(self.val_log_dir)\n super(TrainValTensorBoard, self).set_model(model)\n\n def on_epoch_end(self, epoch, logs=None):\n # Pop the validation logs and handle them separately with\n # `self.val_writer`. Also rename the keys so that they can\n # be plotted on the same figure with the training metrics\n # print(f'Logging for epoch {epoch} is {logs}')\n logs = logs or {}\n val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}\n \"\"\" Add the logs \"\"\"\n for name, value in val_logs.items():\n summary = tf.Summary()\n summary_value = summary.value.add()\n summary_value.simple_value = value.item()\n summary_value.tag = name\n self.val_writer.add_summary(summary, epoch)\n\n self.val_writer.flush()\n\n # Pass the remaining logs to `TensorBoard.on_epoch_end`\n logs = {k: v for k, v in logs.items() if not k.startswith('val_')}\n self._write_custom_summaries(epoch, logs)\n\n def _write_custom_summaries(self, step, logs=None):\n \"\"\"Writes metrics out as custom scalar summaries.\n\n Arguments:\n step: the global step to use for Tensorboard.\n logs: dict. Keys are scalar summary names, values are\n NumPy scalars.\n\n \"\"\"\n logs = logs or {}\n if context.executing_eagerly():\n # use summary ops\n with self.writer.as_default(), tf.contrib.summary.always_record_summaries():\n for name, value in logs.items():\n tf.contrib.summary.scalar(name, value.item(), step=step)\n else:\n # use FileWriter from v1 summary\n for name, value in logs.items():\n summary = tf.Summary()\n summary_value = summary.value.add()\n summary_value.simple_value = value.item()\n summary_value.tag = name\n self.writer.add_summary(summary, step)\n self.writer.flush()\n\n def on_batch_end(self, batch, logs=None):\n pass\n\n def on_train_end(self, logs=None):\n super(TrainValTensorBoard, self).on_train_end(logs)\n self.val_writer.close()\n\n def get_images(self):\n print('Hello')\n image_shaped_input = tf.reshape(self.X_test, [-1, *self.X_test[0].shape])\n # image_shaped_input = tf.transpose(image_shaped_input, perm=[0, 3, 2, 1])\n return tf.summary.image(name='input', tensor=image_shaped_input, max_outputs=4)#len(self.X_test))","sub_path":"lowes-product-classifier/josiah_testing/Logging.py","file_name":"Logging.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"197786090","text":"__author__ = 'Alexandre Calil Martins Fonseca, Github: xandao6'\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# FIXME Ideia: poder comparar medias\n\n\nfrom strategies.fixed_system import fixed_system\nfrom strategies.percentage_system import percentage_system\nfrom strategies.kelly_criterion import kelly_criterion\nfrom typing import Union, List\nimport random\nimport matplotlib.pyplot as plt\nimport matplotlib.style as style\n\n\nstyle.use('bmh')\n\n\n# General Input\nsamples = 1\nwin_rate = 0.5000 # win rate: 0.0000-1.0000\npayout_rate = 1.0000 # payout rate: 0.0000-2.0000 generally, but you choose\nbankroll = 500\nbet_count = 1000\nstoploss = None\nstopgain = None\n\n# Fixed System and Percentage System Input\nbet_percentage = 0.0200 # bet percentage: 0.0000-1.0000\n\n# Percentage System and Kelly Criterion Input\n#FIXME add to kelly criterion\nminimum_bet_value = 2\n\n# Kelly Criterion Input\nkelly_fraction = 1 # kelly fraction: 0.0000 to +inf, generally 1, 0.5 or 0.25\n\n\nif bankroll*bet_percentage <= minimum_bet_value:\n bet_percentage = minimum_bet_value/100.0 \n print(f'Bet size is less than minimum bet value! Adjusting the bet ' +\n f'percentage to {bet_percentage}\\n')\n \n \ndef main(): \n results = generate_random_bet_results(win_rate, bet_count, samples)\n \n betX, bkrY = fixed_system(\n results,\n payout_rate,\n bankroll, \n bet_percentage,\n stoploss,\n stopgain\n )\n plot_config('Fixed System', betX, bkrY, samples, False)\n\n betX, bkrY = percentage_system(\n results, \n payout_rate,\n bankroll, \n bet_percentage,\n minimum_bet_value,\n stoploss,\n stopgain\n )\n plot_config('Percentage System', betX, bkrY, samples, False)\n\n betX, bkrY = kelly_criterion(\n results,\n win_rate,\n payout_rate,\n bankroll, \n kelly_fraction,\n minimum_bet_value,\n stoploss,\n stopgain\n )\n if betX is not None and bkrY is not None:\n plot_config('Kelly Criterion', betX, bkrY, samples, False)\n \n plt.show()\n\n\ndef generate_random_bet_results(\n win_rate: float, \n bet_count: int,\n samples: int\n) -> List[List[bool]]:\n '''\n Parameters\n ----------\n win_rate : float\n The win rate is a rate that can range from 0.0000 to 1.0000, which \n means the percentage you have of winning. \n To know your win rate you must divide the total bets you won by the \n total bet, the more bets the more \n accurate that rate will be.\n bet_count : int\n The bet count is the amount of bets you will simulate.\n samples : int\n The amount of samples that we will plot on the graph.\n\n Returns\n -------\n List[List[bool]]\n The results are a list of betting results, the innermost lists \n represent the amount of bets and the outermost lists represent \n the number of samples.\n '''\n results = []\n for _ in range(samples):\n results_temp = []\n for _ in range(bet_count):\n result = round(random.uniform(0,1),4)\n if result <= win_rate:\n results_temp.append(True)\n elif result > win_rate: \n results_temp.append(False)\n results.append(results_temp.copy())\n return results\n \n \ndef plot_config(\n title: str, \n bet_count_history_X: List[List[int]], \n bankroll_history_Y: List[List[Union[int, float]]],\n samples: int,\n new_fig: bool = True\n) -> None:\n '''\n Parameters\n ----------\n title : str\n The title of the graph.\n bet_count_history_X : List[List[int]]\n bet_count_history_X is a list that contain the X axis lists which is \n the amount of bets.\n bankroll_history_Y : List[List[Union[int, float]]]\n bankroll_history_Y is a list that contain the Y axis lists which is \n the bankroll history.\n samples : int\n The amount of samples that we will plot on the graph.\n new_fig : bool, optional\n new_fig is to open a new graph window. The default is True.\n\n Returns\n -------\n None\n '''\n if new_fig:\n plt.figure()\n for x, y in zip(bet_count_history_X, bankroll_history_Y):\n plt.plot(x, y, linewidth = 0.6)\n plt.title(title)\n elif not new_fig and samples <= 3:\n for x, y in zip(bet_count_history_X, bankroll_history_Y):\n plt.plot(x, y, linewidth = 0.6, label=title) \n leg = plt.legend()\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n else: \n for x, y in zip(bet_count_history_X, bankroll_history_Y):\n plt.plot(x, y, linewidth = 0.6)\n \n plt.ylabel('Bankroll')\n plt.xlabel('Bet Count')\n plt.axhline(bankroll, color = 'b', linewidth = 0.5)\n plt.axhline(0, color = 'r', linewidth = 2)\n\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"566793911","text":"import sys\nimport os\nimport shutil\nfrom ivadomed.utils import init_ivadomed, __ivadomed_dir__\nfrom ivadomed.scripts import download_data as ivadomed_download_data\n\n__test_dir__ = os.path.join(__ivadomed_dir__, 'testing/functional_tests')\n__fixtures_dir__ = os.path.join(__test_dir__, 'fixtures')\n__data_testing_dir__ = \"data_functional_testing\"\n__tmp_dir__ = \"tmp\"\n\ninit_ivadomed()\n\n\nclass bcolors(object):\n \"\"\"Class for different colours.\"\"\"\n\n normal = '\\033[0m'\n red = '\\033[91m'\n green = '\\033[92m'\n yellow = '\\033[93m'\n blue = '\\033[94m'\n magenta = '\\033[95m'\n cyan = '\\033[96m'\n bold = '\\033[1m'\n underline = '\\033[4m'\n\n\ndef printv(string, verbose=1, type='normal'):\n \"\"\"Print color-coded messages, depending on verbose status.\n\n Only use in command-line programs (e.g. sct_propseg).\n \"\"\"\n colors = {\n 'normal': bcolors.normal,\n 'info': bcolors.green,\n 'warning': bcolors.yellow,\n 'error': bcolors.red,\n 'code': bcolors.blue,\n 'bold': bcolors.bold,\n 'process': bcolors.magenta\n }\n\n if verbose:\n # The try/except is there in case stdout does not have isatty field (it did happen to me)\n try:\n # Print color only if the output is the terminal\n if sys.stdout.isatty():\n color = colors.get(type, bcolors.normal)\n print(color + string + bcolors.normal)\n else:\n print(string)\n except Exception:\n print(string)\n\n\ndef download_dataset(dataset='data_testing', verbose=True):\n \"\"\"Download testing data from internet.\n\n Args:\n verbose (bool): whether or not to print\n \"\"\"\n printv('\\nDownloading testing data...', verbose)\n if os.path.exists(__tmp_dir__):\n __dataset_dir__ = os.path.join(__tmp_dir__, dataset)\n else:\n __dataset_dir__ = os.path.join(__test_dir__, dataset)\n ivadomed_download_data.main([\n '-d', dataset,\n '-o', __dataset_dir__\n ])\n\n\ndef remove_dataset(dataset='data_testing', verbose=True):\n \"\"\"Recursively remove the data_testing folder.\n\n Args:\n verbose (bool): whether or not to print\n \"\"\"\n if os.path.exists(__tmp_dir__):\n __dataset_dir__ = os.path.join(__tmp_dir__, dataset)\n else:\n __dataset_dir__ = os.path.join(__test_dir__, dataset)\n printv(\"rm -rf %s\" % (__dataset_dir__), verbose=verbose, type=\"code\")\n shutil.rmtree(__dataset_dir__, ignore_errors=True)\n\n\ndef create_tmp_dir(copy_data_testing_dir=True):\n \"\"\"Create temporary directory for test data and copy test data files.\n\n 1. Remove the ``tmp`` directory if it exists.\n 2. Copy the ``data_functional_testing`` directory to the ``tmp`` directory.\n\n Any data files created during testing will go into ``tmp`` directory.\n This is created/removed for each test.\n\n Args:\n copy_data_testing_dir (bool): If true, copy the __data_testing_dir_ref__ folder\n into the ``tmp`` folder.\n \"\"\"\n remove_tmp_dir()\n os.mkdir(__tmp_dir__)\n if os.path.exists(__data_testing_dir__) and copy_data_testing_dir:\n shutil.copytree(__data_testing_dir__,\n os.path.join(__tmp_dir__, __data_testing_dir__))\n\n\ndef remove_tmp_dir():\n \"\"\"Recursively remove the ``tmp`` directory if it exists.\"\"\"\n shutil.rmtree(__tmp_dir__, ignore_errors=True)\n","sub_path":"testing/functional_tests/t_utils.py","file_name":"t_utils.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"524738193","text":"from util import *\n\n\n@apply\ndef apply(fx, interval, x=None):\n assert fx._subs(x, -x) == fx\n return Equal(Inf[x:-interval](fx), Inf[x:interval](fx))\n\n\n@prove\ndef prove(Eq):\n from axiom import algebra\n\n m, M = Symbol(real=True, given=True)\n x = Symbol(real=True)\n Eq << apply(x ** 2, Interval(m, M, right_open=True), x)\n\n f = Function(real=True, eval=lambda x: x ** 2)\n Eq << Equal(f(x), f(-x), plausible=True)\n\n Eq << Eq[-1].this.lhs.defun()\n\n Eq << Eq[-1].this.rhs.defun()\n\n Eq << algebra.eq.imply.eq.inf.st.even_function.apply(Eq[-2], Eq[0].find(Interval), x)\n\n Eq << Eq[-1].this.find(f).defun()\n\n Eq << Eq[-1].this.find(f).defun().reversed\n\n \n\n\nif __name__ == '__main__':\n run()\n# created on 2019-09-18\n# updated on 2022-04-03\n","sub_path":"axiom/algebra/imply/eq/inf/st/even_function.py","file_name":"even_function.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"137116538","text":"import sys\nimport argparse\n\nfrom hailtop.batch_client.client import BatchClient\nfrom . import list_batches\nfrom . import delete\nfrom . import get\nfrom . import cancel\nfrom . import wait\nfrom . import log\nfrom . import job\n\n\ndef parser():\n main_parser = argparse.ArgumentParser(\n prog='hailctl batch',\n description='Manage batches running on the batch service managed by the Hail team.')\n subparsers = main_parser.add_subparsers()\n\n list_parser = subparsers.add_parser(\n 'list',\n help=\"List batches\",\n description=\"List batches\")\n get_parser = subparsers.add_parser(\n 'get',\n help='Get a particular batch\\'s info',\n description='Get a particular batch\\'s info')\n cancel_parser = subparsers.add_parser(\n 'cancel',\n help='Cancel a batch',\n description='Cancel a batch')\n delete_parser = subparsers.add_parser(\n 'delete',\n help='Delete a batch',\n description='Delete a batch'\n )\n log_parser = subparsers.add_parser(\n 'log',\n help='Get log for a job',\n description='Get log for a job'\n )\n job_parser = subparsers.add_parser(\n 'job',\n help='Get the status and specification for a job',\n description='Get the status and specification for a job'\n )\n wait_parser = subparsers.add_parser(\n 'wait',\n help='Wait for a batch to complete, then print JSON status.',\n description='Wait for a batch to complete, then print JSON status.'\n )\n\n list_parser.set_defaults(module='list')\n list_batches.init_parser(list_parser)\n\n get_parser.set_defaults(module='get')\n get.init_parser(get_parser)\n\n cancel_parser.set_defaults(module='cancel')\n cancel.init_parser(cancel_parser)\n\n delete_parser.set_defaults(module='delete')\n delete.init_parser(delete_parser)\n\n log_parser.set_defaults(module='log')\n log.init_parser(log_parser)\n\n job_parser.set_defaults(module='job')\n job.init_parser(job_parser)\n\n wait_parser.set_defaults(module='wait')\n wait.init_parser(wait_parser)\n\n return main_parser\n\n\ndef main(args):\n if not args:\n parser().print_help()\n sys.exit(0)\n jmp = {\n 'list': list_batches,\n 'delete': delete,\n 'get': get,\n 'cancel': cancel,\n 'log': log,\n 'job': job,\n 'wait': wait\n }\n\n args, pass_through_args = parser().parse_known_args(args=args)\n\n # hailctl batch doesn't create batches\n client = BatchClient(None)\n\n try:\n jmp[args.module].main(args, pass_through_args, client)\n finally:\n client.close()\n","sub_path":"hail/python/hailtop/hailctl/batch/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"401858024","text":"from ibapi.client import EClient, TickAttribLast, TickAttribBidAsk\nfrom ibapi.wrapper import EWrapper, iswrapper\nfrom ibapi.contract import Contract\n\nimport time\n\nfrom ibapp.dataclass.ConnectionParams import ConnectionParams\nfrom threading import Thread\n\n\nclass GetTickByTickData(EClient, EWrapper):\n\n def __init__(self, connection: ConnectionParams):\n EWrapper.__init__(self)\n EClient.__init__(self, self)\n\n # Connect to TWS\n self.connect(connection.address, connection.port, connection.client_id)\n\n # lists that will contains the data\n self.all_last_data = []\n self.bid_ask_data = []\n self.mid_point_data = []\n\n def error(self, req_id: int, code: str, msg: str):\n \"\"\"\n If TWS gets an 'error' this function is called.\n\n Args:\n req_id: the request identifier which generated the error. When req_id = -1 it indicates a notification\n code: the code identifying the error\n msg: error's description\n\n Returns: print the request id that generated the error code with its description\n \"\"\"\n\n print(f'Request Identifier : {req_id} - Error {code} : {msg}')\n\n @iswrapper\n def tickByTickAllLast(self, req_id: int, tick_type: int, time_var, price: float, size: int,\n tick_attrib_last: TickAttribLast, exchange: str, special_conditions: str):\n \"\"\"\n\n Args:\n req_id: request identifier\n tick_type: the type could be 'Last' or 'AllLast' depending on what you want.\n time_var: the timestamp\n price: the last price\n size: the last size\n tick_attrib_last: Tick attributes that describes additional information for price ticks\n exchange: the exchange platform for the transaction\n special_conditions: information about special conditions\n\n Returns: it returns the \"Last\" or \"AllLast\" tick-by-tick real-time tick\n\n \"\"\"\n\n self.all_last_data.append([time_var, price, size, exchange, special_conditions])\n\n @iswrapper\n def tickByTickBidAsk(self, req_id: int, time_var, bid_price: float, ask_price: float,\n bid_size: int, ask_size: int, tick_attrib_bid_ask: TickAttribBidAsk):\n \"\"\"\n\n Args:\n req_id:\n time_var:\n bid_price:\n ask_price:\n bid_size:\n ask_size:\n tick_attrib_bid_ask:\n\n Returns:\n\n \"\"\"\n self.bid_ask_data.append([time_var, bid_price, bid_size, ask_price, ask_size])\n\n @iswrapper\n def tickByTickMidPoint(self, req_id, time_var, mid_point):\n \"\"\"\n\n Args:\n req_id:\n time_var:\n mid_point:\n\n Returns:\n\n \"\"\"\n print(f'Request number {req_id} -> Tick-by-Tick Mid Point Data')\n self.bid_ask_data.append([time_var, mid_point])\n\n\ndef get_tick_by_tick_data(connect_params: ConnectionParams,\n contract_object: Contract,\n tick_type: str,\n number_of_ticks: int,\n ignore_size: bool):\n\n client = GetTickByTickData(connect_params)\n\n # generate request id\n request_id = 1\n\n client.reqTickByTickData(reqId=request_id,\n contract=contract_object,\n tickType=tick_type,\n numberOfTicks=number_of_ticks,\n ignoreSize=ignore_size)\n\n print('thread starting')\n\n thread = Thread(target=client.run(), daemon=True)\n thread.start()\n\n print('thread ending')\n\n return client\n\n\ncon_params = ConnectionParams(address='127.0.0.1', port=7497, client_id=0)\n\ncon = Contract()\ncon.symbol = 'CAD'\ncon.secType = 'CASH'\ncon.exchange = 'IDEAPRO'\ncon.currency = 'USD'\n\nclient = get_tick_by_tick_data(con_params, con, 'Last', 10, True)\n\n\n\n\n\n","sub_path":"ibapp/marketdata/RequestTickByTickData.py","file_name":"RequestTickByTickData.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"569205986","text":"from django.shortcuts import render,HttpResponse, redirect\n\ndef index(request):\n return render(request,'dojo_index.html')\n\ndef processing(request):\n print(request.POST)\n name = request.POST['name']\n location = request.POST['location']\n language = request.POST['language']\n comments = request.POST['comments']\n\n return redirect(f'/results/{name}/{location}/{language}/{comments}')\n\ndef results(request, name, location, language, comments):\n context = {\n 'name' : name,\n 'location' : location,\n 'language' : language,\n 'comments' : comments,\n }\n return render(request, \"dojo_return.html\", context)","sub_path":"django/django_fundamentals/dojo_survey/dojo_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"414102921","text":"from __future__ import absolute_import, division, print_function\n\nimport os\nimport pytest\nfrom libtbx import easy_run\nimport glob\n\ndef test_export_single_bitmap(dials_regression, run_in_tmpdir):\n data_dir = os.path.join(dials_regression, 'centroid_test_data')\n\n cmd = 'dials.export_bitmaps %s/centroid_0001.cbf' %data_dir\n result = easy_run.fully_buffered(cmd).raise_if_errors()\n\n assert os.path.exists('image0001.png')\n\ndef test_export_multiple_bitmaps(dials_regression, run_in_tmpdir):\n data_dir = os.path.join(dials_regression, 'centroid_test_data')\n cmd = ' '.join([\n 'dials.export_bitmaps', '%s/datablock.json' %data_dir, 'prefix=variance_',\n 'binning=2', 'display=variance', 'colour_scheme=inverse_greyscale',\n 'brightness=25', 'kernel_size=5,5'])\n result = easy_run.fully_buffered(cmd).raise_if_errors()\n\n for i in range(1, 8):\n assert os.path.exists('variance_000%i.png' %i)\n\ndef test_export_bitmap_with_prefix_and_no_padding(dials_regression, run_in_tmpdir):\n data_dir = os.path.join(dials_regression, 'centroid_test_data')\n cmd = 'dials.export_bitmaps %s/centroid_0001.cbf prefix=img_ padding=0' %data_dir\n result = easy_run.fully_buffered(cmd).raise_if_errors()\n assert os.path.exists('img_1.png')\n\ndef test_export_bitmap_with_prefix_and_extra_padding(dials_regression, run_in_tmpdir):\n data_dir = os.path.join(dials_regression, 'centroid_test_data')\n cmd = 'dials.export_bitmaps %s/centroid_0001.cbf prefix=img_ padding=5' %data_dir\n result = easy_run.fully_buffered(cmd).raise_if_errors()\n assert os.path.exists('img_00001.png')\n\ndef test_export_bitmap_with_specified_output_filename(dials_regression, run_in_tmpdir):\n data_dir = os.path.join(dials_regression, 'centroid_test_data')\n cmd = 'dials.export_bitmaps %s/centroid_0001.cbf output_file=kittens.png' %data_dir\n result = easy_run.fully_buffered(cmd).raise_if_errors()\n assert os.path.exists('kittens.png')\n\ndef test_export_multiple_bitmaps_with_specified_output_filename_fails(dials_regression, run_in_tmpdir):\n data_dir = os.path.join(dials_regression, 'centroid_test_data')\n with pytest.raises(RuntimeError):\n # setting output filename not allowed with >1 image\n cmd = ' '.join([\n 'dials.export_bitmaps', '%s/datablock.json' %data_dir, 'output_file=kittens.png'])\n result = easy_run.fully_buffered(cmd).raise_if_errors()\n\ndef test_export_still_image(dials_regression, run_in_tmpdir):\n image = os.path.join(dials_regression, 'image_examples', 'DLS_I24_stills', 'still_0001.cbf')\n\n cmd = 'dials.export_bitmaps %s' % image\n result = easy_run.fully_buffered(cmd).raise_if_errors()\n\n assert os.path.exists('image0001.png')\n\ndef test_export_multi_panel(dials_regression, run_in_tmpdir):\n image = os.path.join(dials_regression, 'image_examples', 'DLS_I23', 'germ_13KeV_0001.cbf')\n\n for binning in (1, 4):\n cmd = 'dials.export_bitmaps %s binning=%i prefix=binning_%i_' % (\n image, binning, binning)\n result = easy_run.fully_buffered(cmd).raise_if_errors()\n\n assert os.path.exists('binning_%i_0001.png' % binning)\n\ndef test_export_restricted_multiimage(dials_regression, run_in_tmpdir):\n \"Test exporting a subset of an imageset\"\n data_dir = os.path.join(dials_regression, 'centroid_test_data')\n\n cmd = ' '.join([\n 'dials.export_bitmaps', '%s/datablock.json' %data_dir, 'imageset_index=2'])\n easy_run.fully_buffered(cmd).raise_if_errors()\n assert glob.glob(\"*.png\") == [\"image0002.png\"], \"Only one image exported\"\n","sub_path":"test/command_line/test_export_bitmaps.py","file_name":"test_export_bitmaps.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"237839251","text":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\nimport util\nimport submit\nimport webapp2\nfrom webapp2_extras import json\nfrom google.appengine.api import users, app_identity, search, mail\nfrom google.appengine.ext import ndb, blobstore, deferred\nfrom models import *\n\n\nclass BaseHandler(webapp2.RequestHandler):\n\n @property\n def ui(self):\n user = users.get_current_user()\n if user:\n return UserInfo.get_or_insert(str(user.user_id()), user=user)\n\n def render(self, _template, _values):\n template = util.jinja_environment.get_template(_template)\n return template.render(_values)\n\n def send_json(self, _values):\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.encode(_values))\n\n def generate(self, template_name, template_values={}):\n if users.get_current_user():\n url = users.create_logout_url('/')\n urltext = 'Logout'\n else:\n url = users.create_login_url('/')\n urltext = 'Login'\n values = {\n 'url': url, 'urltext': urltext, 'ui': self.ui,\n 'brand': app_identity.get_application_id(),\n 'admin': users.is_current_user_admin()\n }\n values.update(template_values)\n template = util.jinja_environment.get_template(template_name)\n self.response.write(template.render(values))\n\n\nclass HomePage(BaseHandler):\n\n def get(self):\n if users.get_current_user():\n self.generate('home.html', {})\n else:\n self.response.set_cookie('active-tab', 'hero')\n self.generate('hero.html', {})\n\n\nclass Main_Frame(BaseHandler):\n\n def get(self, page):\n if users.get_current_user():\n bmq = self.bmq(page)\n cursor = ndb.Cursor(urlsafe=self.request.get('cursor'))\n self.build(page, bmq, cursor)\n else:\n self.redirect('/')\n\n def bmq(self, page):\n q1 = Bookmarks.query(\n Bookmarks.user == users.get_current_user()).order(-Bookmarks.data)\n q2 = q1.filter(Bookmarks.trashed == False)\n\n if page == 'archived':\n bmq = q2.filter(Bookmarks.archived == True)\n elif page == 'shared':\n bmq = q2.filter(Bookmarks.shared == True)\n elif page == 'starred':\n bmq = q2.filter(Bookmarks.starred == True)\n elif page == 'trashed':\n bmq = q1.filter(Bookmarks.trashed == True)\n elif page == 'domain':\n bmq = q1.filter(Bookmarks.domain == self.request.get('domain'))\n elif page == 'stream':\n bmq = Bookmarks.query(Bookmarks.trashed == False,\n Bookmarks.shared == True).order(-Bookmarks.data)\n else:\n bmq = q2.filter(Bookmarks.archived == False)\n return bmq\n\n def build(self, page, bmq, cursor):\n bms, next_curs, more = bmq.fetch_page(15, start_cursor=cursor)\n next_c = next_curs.urlsafe() if more else None\n if page == 'stream':\n html = self.render('stream.html', {'bms': bms})\n else:\n html = self.render('frame.html', {'bms': bms})\n more = self.render('more.html', {'cursor': next_c})\n self.response.set_cookie('active-tab', page)\n self.send_json({\"html\": html, \"more\": more})\n\n\nclass ItemPage(BaseHandler):\n\n def get(self, id):\n bm = Bookmarks.get_by_id(int(id))\n if bm.shared:\n self.generate('item.html', {'bm': bm})\n else:\n self.redirect('/')\n\n\nclass SettingPage(BaseHandler):\n\n def get(self):\n ui = self.ui\n upload_url = blobstore.create_upload_url('/upload')\n brand = app_identity.get_application_id()\n bookmarklet = \"\"\"\njavascript:location.href=\n'%s/submit?url='+encodeURIComponent(location.href)+\n'&title='+encodeURIComponent(document.title)+\n'&user='+'%s'+'&comment='+document.getSelection().toString()\n\"\"\" % (self.request.host_url, ui.email)\n self.response.set_cookie('mys', '%s' % ui.mys)\n self.response.set_cookie('daily', '%s' % ui.daily)\n self.response.set_cookie('active-tab', 'setting')\n self.generate('setting.html', {'bookmarklet': bookmarklet,\n 'upload_url': upload_url, 'brand': brand, })\n\n\nclass FeedsPage(BaseHandler):\n\n def get(self):\n feed_list = Feeds.query(Feeds.user == users.get_current_user()).order(Feeds.title)\n self.response.set_cookie('active-tab', 'feeds')\n self.generate('feeds.html', {'feeds': feed_list})\n\n\nclass EditBM(webapp2.RequestHandler):\n\n def get(self):\n bm = Bookmarks.get_by_id(int(self.request.get('bm')))\n if users.get_current_user() == bm.user:\n bm.url = self.request.get('url').encode('utf8')\n bm.title = self.request.get('title').encode('utf8')\n bm.comment = self.request.get('comment').encode('utf8')\n bm.put()\n self.redirect(self.request.referer)\n\n\nclass ArchiveBM(BaseHandler):\n\n def get(self):\n bm = Bookmarks.get_by_id(int(self.request.get('bm')))\n if users.get_current_user() == bm.user:\n if bm.trashed:\n bm.archived = False\n bm.trashed = False\n elif bm.archived:\n bm.archived = False\n else:\n bm.archived = True\n bm.put()\n\n\nclass TrashBM(BaseHandler):\n\n def get(self):\n bm = Bookmarks.get_by_id(int(self.request.get('bm')))\n if users.get_current_user() == bm.user:\n if bm.trashed is False:\n bm.archived = False\n bm.trashed = True\n bm.put()\n else:\n bm.key.delete()\n\n\nclass StarBM(webapp2.RequestHandler):\n\n def get(self):\n bm = Bookmarks.get_by_id(int(self.request.get('bm')))\n if users.get_current_user() == bm.user:\n if bm.starred is False:\n bm.starred = True\n html = ''\n else:\n bm.starred = False\n html = ''\n bm.put()\n self.response.write(html)\n\n\nclass ShareBM(webapp2.RequestHandler):\n\n def get(self):\n bm = Bookmarks.get_by_id(int(self.request.get('id')))\n if users.get_current_user() == bm.user:\n if bm.shared is False:\n bm.shared = True\n eye = ''\n else:\n bm.shared = False\n eye = ''\n bm.put()\n self.response.write(eye)\n\n\nclass cerca(BaseHandler):\n\n def post(self):\n user = users.get_current_user()\n query_string = self.request.get('query_string')\n try:\n results = search.Index(\n name='%s' % user.user_id()).search(query_string)\n bms_ids = [int(doc.doc_id) for doc in results]\n keys = [ndb.Key(Bookmarks, id) for id in bms_ids]\n bms = ndb.get_multi(keys)\n html = self.generate('frame.html', {'bms': bms})\n self.response.write(html)\n except search.Error:\n pass\n\n\nclass GetComment(webapp2.RequestHandler):\n\n def get(self):\n bm = Bookmarks.get_by_id(int(self.request.get('bm')))\n self.response.write(bm.comment)\n\n\nclass GetEdit(webapp2.RequestHandler):\n\n def get(self):\n bm = Bookmarks.get_by_id(int(self.request.get('bm')))\n self.render('edit.html', {'bm': bm})\n\n\nclass CheckFeed(webapp2.RequestHandler):\n\n def get(self):\n feed = Feeds.get_by_id(int(self.request.get('feed')))\n deferred.defer(submit.pop_feed, feed.key)\n\n#\n# Setting page\n#\n\n\nclass SetMys(webapp2.RequestHandler):\n\n def get(self):\n ui = UserInfo.query(UserInfo.user == users.get_current_user()).get()\n if ui.mys is False:\n ui.mys = True\n html = ' Enabled '\n else:\n ui.mys = False\n html = ' Disabled'\n ui.put()\n self.response.write(html)\n\n\nclass SetDaily(webapp2.RequestHandler):\n\n def get(self):\n ui = UserInfo.query(UserInfo.user == users.get_current_user()).get()\n if ui.daily is False:\n ui.daily = True\n html = ' Enabled '\n else:\n ui.daily = False\n html = ' Disabled'\n ui.put()\n self.response.write(html)\n\n\nclass SetNotify(webapp2.RequestHandler):\n\n def get(self):\n feed = Feeds.get_by_id(int(self.request.get('feed')))\n feed.notify = self.request.get('notify')\n feed.put()\n\n\nclass ReceiveMail(webapp2.RequestHandler):\n\n def post(self):\n from email import utils\n message = mail.InboundEmailMessage(self.request.body)\n texts = message.bodies('text/plain')\n for text in texts:\n txtmsg = \"\"\n txtmsg = text[1].decode().strip()\n submit.submit_bm(feed=None,\n user=users.User(utils.parseaddr(message.sender)[1]),\n url=txtmsg.encode('utf8'),\n title=self.get_subject(\n txtmsg.encode('utf8'), message),\n comment='Sent via email')\n\n def get_subject(self, o, message):\n from email import header\n try:\n return header.decode_header(message.subject)[0][0]\n except:\n return o\n\n\napp = webapp2.WSGIApplication([\n ('/', HomePage),\n ('/search', cerca),\n (r'/bms/(.*)', Main_Frame),\n ('/submit', submit.AddBM),\n ('/copy', submit.CopyBM),\n ('/upload', submit.UploadDelicious),\n ('/feed', submit.AddFeed),\n ('/edit', EditBM),\n ('/checkfeed', CheckFeed),\n ('/archive', ArchiveBM),\n ('/trash', TrashBM),\n ('/setting', SettingPage),\n ('/feeds', FeedsPage),\n ('/setmys', SetMys),\n ('/setdaily', SetDaily),\n ('/setnotify', SetNotify),\n ('/star', StarBM),\n ('/share', ShareBM),\n ('/getcomment', GetComment),\n ('/getedit', GetEdit),\n (r'/bm/(.*)', ItemPage),\n ('/_ah/mail/post@.*', ReceiveMail),\n], debug=util.debug, config=util.config)\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"20077901","text":"\"\"\"A python wrapper around bzip2 benchmark.\"\"\"\nimport subprocess\nimport sys\nimport typing\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nfrom datasets.benchmarks.proto import benchmarks_pb2\nfrom lib.labm8 import bazelutil\n\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\n 'bzip2_timeout_seconds', 60,\n 'The maximum number of seconds to allow process to run.')\n\n# Path to bzip2.\nBZIP2 = bazelutil.DataPath('bzip2/bzip2')\n\n# Source files for bzip2.\nBZIP2_SRCS = [\n bazelutil.DataPath('bzip2/blocksort.c'),\n bazelutil.DataPath('bzip2/bzlib.c'),\n bazelutil.DataPath('bzip2/compress.c'),\n bazelutil.DataPath('bzip2/crctable.c'),\n bazelutil.DataPath('bzip2/decompress.c'),\n bazelutil.DataPath('bzip2/huffman.c'),\n bazelutil.DataPath('bzip2/randtable.c'),\n bazelutil.DataPath('bzip2/bzip2.c'),\n]\n\n# Header files for bzip2.\nBZIP2_HEADERS = [\n bazelutil.DataPath('bzip2/bzlib.h'),\n bazelutil.DataPath('bzip2/bzlib_private.h'),\n]\n\n\nclass Bzip2Timeout(EnvironmentError):\n \"\"\"Eror raised in case of time out.\"\"\"\n pass\n\n\ndef Exec(data: str, args: typing.List[str],\n timeout_seconds: int = 60) -> subprocess.Popen:\n \"\"\"Run bzip2.\n\n Args:\n args: A list of arguments to pass to binary.\n timeout_seconds: The number of seconds to allow clang-format to run for.\n\n Returns:\n A Popen instance with stdout and stderr set to strings.\n \"\"\"\n cmd = ['timeout', '-s9', str(timeout_seconds), str(BZIP2)] + args\n logging.debug('$ %s', ' '.join(cmd))\n process = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n stdin=subprocess.PIPE)\n stdout, stderr = process.communicate(data)\n if process.returncode == 9:\n raise Bzip2Timeout(f'bzip2 timed out after {timeout_seconds}s')\n process.stdout = stdout\n process.stderr = stderr\n return process\n\n\nBzip2 = benchmarks_pb2.Benchmark(\n name='bzip2',\n binary=str(BZIP2),\n srcs=[str(x) for x in BZIP2_SRCS],\n hdrs=[str(x) for x in BZIP2_HEADERS],\n)\n\n# A list of all benchmarks in this file.\nBENCHMARKS = [\n Bzip2,\n]\n\n\ndef main(argv):\n \"\"\"Main entry point.\"\"\"\n try:\n proc = Exec(argv[1:], timeout_seconds=FLAGS.opt_timeout_seconds)\n if proc.stdout:\n print(proc.stdout)\n if proc.stderr:\n print(proc.stderr, file=sys.stderr)\n sys.exit(proc.returncode)\n except Bzip2Timeout as e:\n print(e, file=sys.stderr)\n sys.exit(1)\n\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"datasets/benchmarks/bzip2.py","file_name":"bzip2.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"524362446","text":"\"\"\"\nfrom\n\"Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition\"\nhttp://arxiv.org/abs/1406.4729\n\"\"\"\nimport theano\nimport theano.tensor as T\nimport thu\n\nfX = theano.config.floatX\n\n\ndef spatial_pyramid_pooling2d(tensor, levels, mode=\"max\"):\n s1, s2 = thu.shape(tensor)[-2:]\n results = []\n for l1, l2 in levels:\n assert (s1 % l1) == 0\n assert (s2 % l2) == 0\n res = thu.pool2d(tensor, pool_size=(s1 / l1, s2 / l2), mode=mode)\n results.append(res)\n return T.concatenate([res.flatten(2) for res in results], axis=1)\n","sub_path":"thu/sandbox/spp.py","file_name":"spp.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"151575250","text":"from django.conf.urls.defaults import *\nfrom django.conf import *\nfrom memo.views import *\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Example:\n # (r'^assistente/', include('assistente.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n \n \n (r'^$', include('core.urls', namespace='core')), \n \n # Memos\n (r'^memo/', include('memo.urls', namespace='memo')),\n\n # Uncomment the next line to enable the admin:\n (r'^admin/', include(admin.site.urls)),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^media/(?P.*)$',\n 'django.views.static.serve',\n { 'document_root': settings.MEDIA_ROOT }),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"348357537","text":"# Copyright (c) 2014 The University of Manchester\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport hashlib\nimport numpy\nimport math\n\n\ndef expand_to_bit_array(value):\n \"\"\"\n Expand a 32-bit value in to an array of length 32 of uint8 values,\n each of which is a 1 or 0.\n\n :param int value: The value to expand\n :rtype: ~numpy.ndarray(uint8)\n \"\"\"\n return numpy.unpackbits(\n numpy.asarray([value], dtype=\">u4\").view(dtype=\"uint8\"))\n\n\ndef compress_from_bit_array(bit_array):\n \"\"\"\n Compress a bit array of 32 uint8 values, where each is a 1 or 0,\n into a 32-bit value.\n\n :param ~numpy.ndarray(uint8) bit_array: The array to compress\n :rtype: int\n \"\"\"\n return numpy.packbits(bit_array).view(dtype=\">u4\")[0].item()\n\n\ndef compress_bits_from_bit_array(bit_array, bit_positions):\n \"\"\"\n Compress specific positions from a bit array of 32 uint8 value,\n where is a 1 or 0, into a 32-bit value.\n\n :param ~numpy.ndarray(uint8) bit_array:\n The array to extract the value from\n :param ~numpy.ndarray(int) bit_positions:\n The positions of the bits to extract,\n each value being between 0 and 31\n :rtype: int\n \"\"\"\n expanded_value = numpy.zeros(32, dtype=\"uint8\")\n expanded_value[-len(bit_positions):] = bit_array[bit_positions]\n return compress_from_bit_array(expanded_value)\n\n\ndef is_equal_or_None(a, b):\n \"\"\"\n If a and b are both not `None`, return True if and only if they are equal,\n otherwise return True.\n\n :rtype: bool\n \"\"\"\n return (a is None or b is None or a == b)\n\n\ndef is_single(iterable):\n \"\"\"\n Test if there is exactly one item in the iterable.\n\n :rtype: bool\n \"\"\"\n iterator = iter(iterable)\n\n # Test if there is a first item, if not return False\n if next(iterator, None) is None:\n return False\n\n # Test if there is a second item, if not return True\n if next(iterator, None) is None:\n return True\n\n # Otherwise return False\n return False\n\n\ndef md5(string):\n \"\"\"\n Get the MD5 hash of the given string, which is UTF-8 encoded.\n\n :param str string:\n :rtype: str\n \"\"\"\n return hashlib.md5(string.encode()).hexdigest()\n\n\ndef get_key_ranges(key, mask):\n \"\"\"\n Get a generator of base_key, n_keys pairs that represent ranges\n allowed by the mask.\n\n :param int key: The base key\n :param int mask: The mask\n :rtype: iterable(tuple(int,int))\n \"\"\"\n unwrapped_mask = expand_to_bit_array(mask)\n first_zeros = list()\n remaining_zeros = list()\n pos = len(unwrapped_mask) - 1\n\n # Keep the indices of the first set of zeros\n while pos >= 0 and unwrapped_mask[pos] == 0:\n first_zeros.append(pos)\n pos -= 1\n\n # Find all the remaining zeros\n while pos >= 0:\n if unwrapped_mask[pos] == 0:\n remaining_zeros.append(pos)\n pos -= 1\n\n # Loop over 2^len(remaining_zeros) to produce the base key,\n # with n_keys being 2^len(first_zeros)\n n_sets = 2 ** len(remaining_zeros)\n n_keys = 2 ** len(first_zeros)\n if not remaining_zeros:\n yield key, n_keys\n return\n unwrapped_key = expand_to_bit_array(key)\n for value in range(n_sets):\n generated_key = numpy.copy(unwrapped_key)\n generated_key[remaining_zeros] = \\\n expand_to_bit_array(value)[-len(remaining_zeros):]\n yield compress_from_bit_array(generated_key), n_keys\n\n\ndef get_n_bits(n_values):\n \"\"\"\n Determine how many bits are required for the given number of values.\n\n :param int n_values: the number of values (starting at 0)\n :return: the number of bits required to express that many values\n :rtype: int\n \"\"\"\n if n_values == 0:\n return 0\n if n_values == 1:\n return 1\n return int(math.ceil(math.log2(n_values)))\n\n\ndef get_field_based_keys(key, vertex_slice, shift=0):\n \"\"\"\n Translate a vertex slice with potentially multiple dimensions into\n a list of keys, one for each atom of the vertex, by putting the values\n into fields of the keys based on the shape of the slice.\n\n :param int key: The base key\n :param Slice vertex_slice: The slice to translate\n :param int shift:\n The left shift to apply to the atom key before adding to the key. Can\n be used to make space for additional information at the bottom of the\n key.\n :rtype: list(int)\n \"\"\"\n # Find the size of field required for each coordinate, and the shift\n # required to get to this field position (the first field has a shift\n # of 0)\n field_sizes = numpy.array([get_n_bits(n) for n in vertex_slice.shape])\n shifts = numpy.concatenate(([0], numpy.cumsum(field_sizes[:-1])))\n\n # Convert each atom into x, y coordinates based on shape\n # This uses numpy.unravel_index, the result of which needs to be\n # made into an array (it is a list of tuples) and transposed (it\n # gives the coordinates separately per axis)\n coords = numpy.array(numpy.unravel_index(\n numpy.arange(vertex_slice.n_atoms),\n vertex_slice.shape, order='F')).T\n\n # We now left shift each coordinate into its field and add them up to\n # get the key\n keys = numpy.sum(numpy.left_shift(coords, shifts), axis=1)\n\n # Do any final shifting as required (zero shift is valid but does nothing)\n if shift:\n keys = numpy.left_shift(keys, shift)\n\n # The final result is the above with the base key added\n return keys + key\n\n\ndef get_field_based_index(base_key, vertex_slice, shift=0):\n \"\"\"\n Map field based keys back to indices.\n\n :param int base_key: The base key\n :param Slice vertex_slice: The slice to translate\n :param int shift:\n The left shift to apply to the atom key before adding to the key. Can\n be used to make space for additional information at the bottom of the\n key.\n :rtype: dict(int,int)\n \"\"\"\n # Get the field based keys\n field_based_keys = get_field_based_keys(base_key, vertex_slice, shift)\n\n # Inverse the index\n return {\n key: i\n for i, key in enumerate(field_based_keys)\n }\n\n\ndef get_n_bits_for_fields(field_sizes):\n \"\"\"\n Get the number of bits required for the fields in the vertex slice.\n\n :param iterable(int) field_sizes: The sizes each of the fields\n :rtype: int\n \"\"\"\n field_size = [get_n_bits(n) for n in field_sizes]\n return sum(field_size)\n\n\ndef allocator_bits_needed(size):\n \"\"\"\n Get the bits needed for the routing info allocator.\n\n :param int size: The size to calculate the number of bits for\n :return: the number of bits required for that size\n :rtype: int\n \"\"\"\n if size == 0:\n return 0\n return int(math.ceil(math.log2(size)))\n","sub_path":"pacman/utilities/utility_calls.py","file_name":"utility_calls.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"69845293","text":"from helpers import *\nimport scipy\n\nfrom animation.animation import Animation\nfrom animation.transform import *\nfrom animation.simple_animations import *\nfrom animation.playground import *\nfrom animation.continual_animation import *\nfrom topics.geometry import *\nfrom topics.characters import *\nfrom topics.functions import *\nfrom topics.fractals import *\nfrom topics.number_line import *\nfrom topics.combinatorics import *\nfrom topics.numerals import *\nfrom topics.three_dimensions import *\nfrom topics.objects import *\nfrom topics.probability import *\nfrom topics.complex_numbers import *\nfrom topics.common_scenes import *\nfrom scene import Scene\nfrom scene.reconfigurable_scene import ReconfigurableScene\nfrom scene.zoomed_scene import *\nfrom camera import Camera\nfrom mobject import *\nfrom mobject.image_mobject import *\nfrom mobject.vectorized_mobject import *\nfrom mobject.svg_mobject import *\nfrom mobject.tex_mobject import *\nfrom topics.graph_scene import *\n\nfrom active_projects.fourier import *\n\n\nFREQUENCY_COLOR = RED\nUSE_ALMOST_FOURIER_BY_DEFAULT = False\n\nclass GaussianDistributionWrapper(Line):\n \"\"\"\n This is meant to encode a 2d normal distribution as\n a mobject (so as to be able to have it be interpolated\n during animations). It is a line whose center is the mean\n mu of a distribution, and whose radial vector (center to end)\n is the distribution's standard deviation\n \"\"\"\n CONFIG = {\n \"stroke_width\" : 0,\n \"mu\" : ORIGIN,\n \"sigma\" : RIGHT,\n }\n def __init__(self, **kwargs):\n Line.__init__(self, ORIGIN, RIGHT, **kwargs)\n self.change_parameters(self.mu, self.sigma)\n\n def change_parameters(self, mu = None, sigma = None):\n curr_mu, curr_sigma = self.get_parameters()\n mu = mu if mu is not None else curr_mu\n sigma = sigma if sigma is not None else curr_sigma\n self.put_start_and_end_on(mu - sigma, mu + sigma)\n return self\n\n def get_parameters(self):\n \"\"\" Return mu_x, mu_y, sigma_x, sigma_y\"\"\"\n center, end = self.get_center(), self.get_end()\n return center, end-center\n\n def get_random_points(self, size = 1):\n mu, sigma = self.get_parameters()\n return np.array([\n np.array([\n np.random.normal(mu_coord, sigma_coord)\n for mu_coord, sigma_coord in zip(mu, sigma)\n ])\n for x in range(size)\n ])\n\nclass ProbabalisticMobjectCloud(ContinualAnimation):\n CONFIG = {\n \"fill_opacity\" : 0.25,\n \"n_copies\" : 100,\n \"gaussian_distribution_wrapper_config\" : {}\n }\n def __init__(self, prototype, **kwargs):\n digest_config(self, kwargs)\n fill_opacity = self.fill_opacity or prototype.get_fill_opacity()\n self.gaussian_distribution_wrapper = GaussianDistributionWrapper(\n **self.gaussian_distribution_wrapper_config\n )\n group = VGroup(*[\n prototype.copy().set_fill(opacity = fill_opacity)\n for x in range(self.n_copies)\n ])\n ContinualAnimation.__init__(self, group, **kwargs)\n\n def update_mobject(self, dt):\n group = self.mobject\n points = self.gaussian_distribution_wrapper.get_random_points(len(group))\n for mob, point in zip(group, points):\n self.update_mobject_by_point(mob, point)\n return self\n\n def update_mobject_by_point(self, mobject, point):\n mobject.move_to(point)\n return self\n\nclass ProbabalisticDotCloud(ProbabalisticMobjectCloud):\n CONFIG = {\n \"color\" : BLUE,\n }\n def __init__(self, **kwargs):\n digest_config(self, kwargs)\n dot = Dot(color = self.color)\n ProbabalisticMobjectCloud.__init__(self, dot)\n\nclass ProbabalisticVectorCloud(ProbabalisticMobjectCloud):\n CONFIG = {\n \"color\" : RED,\n \"n_copies\" : 20,\n \"fill_opacity\" : 0.5,\n \"center_func\" : lambda : ORIGIN,\n }\n def __init__(self, **kwargs):\n digest_config(self, kwargs)\n vector = Vector(\n RIGHT, color = self.color,\n max_tip_length_to_length_ratio = 1,\n )\n ProbabalisticMobjectCloud.__init__(self, vector)\n\n def update_mobject_by_point(self, vector, point):\n vector.put_start_and_end_on(\n self.center_func(),\n point\n )\n\nclass RadarDish(SVGMobject):\n CONFIG = {\n \"file_name\" : \"radar_dish\",\n \"fill_color\" : LIGHT_GREY,\n \"stroke_color\" : WHITE,\n \"stroke_width\" : 1,\n \"height\" : 1,\n }\n\nclass Plane(SVGMobject):\n CONFIG = {\n \"file_name\" : \"plane\",\n \"color\" : GREY,\n \"height\" : 1,\n }\n def __init__(self, **kwargs):\n SVGMobject.__init__(self, **kwargs)\n self.rotate(-TAU/8)\n\nclass RadarPulseSingleton(ContinualAnimation):\n CONFIG = {\n \"speed\" : 3.0,\n \"direction\" : RIGHT,\n \"start_up_time\" : 0,\n \"fade_in_time\" : 0.5,\n \"color\" : WHITE,\n \"stroke_width\" : 3,\n }\n def __init__(self, radar_dish, target, **kwargs):\n digest_config(self, kwargs)\n self.direction = self.direction/np.linalg.norm(self.direction)\n self.radar_dish = radar_dish\n self.target = target\n self.reflection_distance = None\n self.arc = Arc(\n start_angle = -30*DEGREES,\n angle = 60*DEGREES,\n )\n self.arc.scale_to_fit_height(0.75*radar_dish.get_height())\n self.arc.move_to(radar_dish, UP+RIGHT)\n self.start_points = np.array(self.arc.points)\n self.start_center = self.arc.get_center()\n self.finished = False\n\n ContinualAnimation.__init__(self, self.arc, **kwargs)\n \n def update_mobject(self, dt):\n arc = self.arc\n total_distance = self.speed*self.internal_time\n arc.points = np.array(self.start_points)\n arc.shift(total_distance*self.direction)\n\n if self.internal_time < self.fade_in_time:\n alpha = np.clip(self.internal_time/self.fade_in_time, 0, 1)\n arc.set_stroke(self.color, alpha*self.stroke_width)\n\n if self.reflection_distance is None:\n #Check if reflection is happening\n arc_point = arc.get_edge_center(self.direction)\n target_point = self.target.get_edge_center(-self.direction)\n arc_distance = np.dot(arc_point, self.direction)\n target_distance = np.dot(target_point, self.direction)\n if arc_distance > target_distance:\n self.reflection_distance = target_distance\n #Don't use elif in case the above code creates reflection_distance\n if self.reflection_distance is not None:\n delta_distance = total_distance - self.reflection_distance\n point_distances = np.dot(self.direction, arc.points.T)\n diffs = point_distances - self.reflection_distance\n shift_vals = np.outer(-2*np.maximum(diffs, 0), self.direction)\n arc.points += shift_vals\n\n #Check if done\n arc_point = arc.get_edge_center(-self.direction)\n if np.dot(arc_point, self.direction) < np.dot(self.start_center, self.direction):\n self.finished = True\n self.arc.fade(1)\n\n def is_finished(self):\n return self.finished\n\nclass RadarPulse(ContinualAnimation):\n CONFIG = {\n \"n_pulse_singletons\" : 8,\n \"frequency\" : 0.05,\n \"colors\" : [BLUE, YELLOW]\n }\n def __init__(self, *args, **kwargs):\n digest_config(self, kwargs)\n colors = color_gradient(self.colors, self.n_pulse_singletons)\n self.pulse_singletons = [\n RadarPulseSingleton(*args, color = color, **kwargs)\n for color in colors\n ]\n pluse_mobjects = VGroup(*[ps.mobject for ps in self.pulse_singletons])\n ContinualAnimation.__init__(self, pluse_mobjects, **kwargs)\n \n def update_mobject(self, dt):\n for i, ps in enumerate(self.pulse_singletons):\n ps.internal_time = self.internal_time - i*self.frequency\n ps.update_mobject(dt)\n\n def is_finished(self):\n return all([ps.is_finished() for ps in self.pulse_singletons])\n\n###################\n\nclass MentionUncertaintyPrinciple(TeacherStudentsScene):\n def construct(self):\n title = TextMobject(\"Heisenberg Uncertainty Principle\")\n title.to_edge(UP)\n\n dot_cloud = ProbabalisticDotCloud()\n vector_cloud = ProbabalisticVectorCloud(\n gaussian_distribution_wrapper_config = {\"sigma_x\" : 0.2},\n center_func = lambda : dot_cloud.gaussian_distribution_wrapper.get_parameters()[0],\n )\n for cloud in dot_cloud, vector_cloud:\n cloud.gaussian_distribution_wrapper.next_to(\n title, DOWN, 2*LARGE_BUFF\n )\n vector_cloud.gaussian_distribution_wrapper.shift(3*RIGHT)\n\n def get_brace_text_group_update(gdw, vect, text, color):\n brace = Brace(gdw, vect)\n text = brace.get_tex(\"2\\\\sigma_{\\\\text{%s}}\"%text, buff = SMALL_BUFF)\n group = VGroup(brace, text)\n def update_group(group):\n brace, text = group\n brace.match_width(gdw, stretch = True)\n brace.next_to(gdw, vect)\n text.next_to(brace, vect, buff = SMALL_BUFF)\n group.highlight(color)\n return ContinualUpdateFromFunc(group, update_group)\n\n dot_brace_anim = get_brace_text_group_update(\n dot_cloud.gaussian_distribution_wrapper,\n DOWN, \"position\", dot_cloud.color\n )\n vector_brace_anim = get_brace_text_group_update(\n vector_cloud.gaussian_distribution_wrapper,\n UP, \"momentum\", vector_cloud.color\n )\n\n self.add(title)\n self.add(dot_cloud)\n self.play(\n Write(title),\n self.teacher.change, \"raise_right_hand\",\n self.get_student_changes(*[\"pondering\"]*3)\n )\n self.play(\n Write(dot_brace_anim.mobject, run_time = 1)\n )\n self.add(dot_brace_anim)\n self.wait()\n # self.wait(2)\n self.play(\n dot_cloud.gaussian_distribution_wrapper.change_parameters, \n {\"sigma\" : 0.1*RIGHT},\n run_time = 2,\n )\n self.wait()\n self.add(vector_cloud)\n self.play(\n FadeIn(vector_brace_anim.mobject)\n )\n self.add(vector_brace_anim)\n self.play(\n vector_cloud.gaussian_distribution_wrapper.change_parameters,\n {\"sigma\" : RIGHT},\n self.get_student_changes(*3*[\"confused\"]),\n run_time = 3,\n )\n #Back and forth\n for x in range(2):\n self.play(\n dot_cloud.gaussian_distribution_wrapper.change_parameters,\n {\"sigma\" : 2*RIGHT},\n vector_cloud.gaussian_distribution_wrapper.change_parameters,\n {\"sigma\" : 0.1*RIGHT},\n run_time = 3,\n )\n self.change_student_modes(\"thinking\", \"erm\", \"sassy\")\n self.play(\n dot_cloud.gaussian_distribution_wrapper.change_parameters,\n {\"sigma\" : 0.1*RIGHT},\n vector_cloud.gaussian_distribution_wrapper.change_parameters,\n {\"sigma\" : 1*RIGHT},\n run_time = 3,\n )\n self.wait()\n\nclass FourierTradeoff(Scene):\n def construct(self):\n #Setup axes\n time_mean = 4\n time_axes = Axes(\n x_min = 0,\n x_max = 2*time_mean,\n x_axis_config = {\"unit_size\" : 1.5},\n y_min = -2, \n y_max = 2,\n y_axis_config = {\"unit_size\" : 0.5}\n )\n time_label = TextMobject(\"Time\")\n time_label.next_to(\n time_axes.x_axis.get_right(), UP,\n buff = MED_SMALL_BUFF,\n )\n time_axes.add(time_label)\n time_axes.center().to_edge(UP)\n time_axes.x_axis.add_numbers(*range(1, 2*time_mean))\n\n frequency_axes = Axes(\n x_min = 0,\n x_max = 8,\n x_axis_config = {\"unit_size\" : 1.5},\n y_min = 0,\n y_max = 15,\n y_axis_config = {\n \"unit_size\" : 0.15,\n \"tick_frequency\" : 5,\n },\n color = TEAL,\n )\n frequency_label = TextMobject(\"Frequency\")\n frequency_label.next_to(\n frequency_axes.x_axis.get_right(), UP,\n buff = MED_SMALL_BUFF, \n )\n frequency_label.highlight(FREQUENCY_COLOR)\n frequency_axes.add(frequency_label)\n frequency_axes.move_to(time_axes, LEFT)\n frequency_axes.to_edge(DOWN, buff = LARGE_BUFF)\n frequency_axes.x_axis.add_numbers()\n\n # Graph information\n\n #x-coordinate of this point determines width of wave_packet graph\n width_tracker = VectorizedPoint(0.5*RIGHT)\n def get_width():\n return width_tracker.get_center()[0]\n\n def get_wave_packet_function():\n factor = 1./get_width()\n return lambda t : np.sqrt(factor)*np.cos(4*TAU*t)*np.exp(-factor*(t-time_mean)**2)\n\n def get_wave_packet():\n graph = time_axes.get_graph(\n get_wave_packet_function(),\n num_graph_points = 200,\n )\n graph.highlight(YELLOW)\n return graph\n\n time_radius = 10\n def get_wave_packet_fourier_transform():\n return get_fourier_graph(\n frequency_axes, get_wave_packet_function(),\n t_min = time_mean - time_radius,\n t_max = time_mean + time_radius,\n n_samples = 2*time_radius*17,\n # complex_to_real_func = abs,\n complex_to_real_func = lambda z : z.real,\n color = FREQUENCY_COLOR,\n )\n\n wave_packet = get_wave_packet()\n wave_packet_update = UpdateFromFunc(\n wave_packet, \n lambda g : Transform(g, get_wave_packet()).update(1)\n )\n fourier_graph = get_wave_packet_fourier_transform()\n fourier_graph_update = UpdateFromFunc(\n fourier_graph, \n lambda g : Transform(g, get_wave_packet_fourier_transform()).update(1)\n )\n\n arrow = Arrow(\n wave_packet, frequency_axes.coords_to_point(4, 10),\n color = FREQUENCY_COLOR,\n )\n fourier_words = TextMobject(\"Fourier Transform\")\n fourier_words.next_to(arrow, RIGHT, buff = MED_LARGE_BUFF)\n sub_words = TextMobject(\"(To be explained shortly)\")\n sub_words.highlight(BLUE)\n sub_words.scale(0.75)\n sub_words.next_to(fourier_words, DOWN)\n\n #Draw items\n self.add(time_axes, frequency_axes)\n self.play(ShowCreation(wave_packet, rate_func = double_smooth))\n self.play(\n ReplacementTransform(\n wave_packet.copy(),\n fourier_graph,\n ),\n GrowArrow(arrow),\n Write(fourier_words, run_time = 1)\n )\n # self.play(FadeOut(arrow))\n self.wait()\n for width in 6, 0.1, 1:\n self.play(\n width_tracker.move_to, width*RIGHT,\n wave_packet_update,\n fourier_graph_update,\n run_time = 3\n )\n if sub_words not in self.mobjects:\n self.play(FadeIn(sub_words))\n else:\n self.wait()\n self.wait()\n\nclass ShowPlan(PiCreatureScene):\n def construct(self):\n self.add_title()\n words = self.get_words()\n self.play_sound_anims(words[0])\n self.play_doppler_anims(words[1], words[0])\n self.play_quantum_anims(words[2], words[1])\n\n def add_title(self):\n title = TextMobject(\"The plan\")\n title.scale(1.5)\n title.to_edge(UP)\n h_line = Line(LEFT, RIGHT).scale(SPACE_WIDTH)\n h_line.next_to(title, DOWN)\n self.add(title, h_line)\n\n def get_words(self):\n colors = [YELLOW, GREEN, BLUE]\n topics = [\"sound waves\", \"Doppler radar\", \"quantum particles\"]\n words = VGroup()\n for topic, color in zip(topics, colors):\n word = TextMobject(\"Uncertainty for\", topic)\n word[1].highlight(color)\n words.add(word)\n words.arrange_submobjects(DOWN, aligned_edge = LEFT, buff = LARGE_BUFF)\n words.to_edge(LEFT)\n\n return words\n\n def play_sound_anims(self, word):\n morty = self.pi_creature\n wave = FunctionGraph(\n lambda x : 0.3*np.sin(15*x)*np.sin(0.5*x),\n x_min = 0, x_max = 30,\n num_anchor_points = 500,\n )\n wave.next_to(word, RIGHT)\n rect = BackgroundRectangle(wave, fill_opacity = 1)\n rect.stretch(2, 1)\n rect.next_to(wave, LEFT, buff = 0)\n wave_shift = AmbientMovement(\n wave, direction = LEFT, rate = 5\n )\n wave_fader = UpdateFromAlphaFunc(\n wave, \n lambda w, a : w.set_stroke(width = 3*a)\n )\n checkmark = self.get_checkmark(word)\n\n self.add(wave_shift)\n self.add_foreground_mobjects(rect, word)\n self.play(\n Animation(word),\n wave_fader,\n morty.change, \"raise_right_hand\", word\n )\n self.wait(2)\n wave_fader.rate_func = lambda a : 1-smooth(a)\n self.add_foreground_mobjects(checkmark)\n self.play(\n Write(checkmark),\n morty.change, \"happy\",\n wave_fader, \n )\n self.remove_foreground_mobjects(rect, word)\n self.add(word)\n self.wait()\n\n def play_doppler_anims(self, word, to_fade):\n morty = self.pi_creature\n\n radar_dish = RadarDish()\n radar_dish.next_to(word, DOWN, aligned_edge = LEFT)\n target = Plane()\n # target.match_height(radar_dish)\n target.next_to(radar_dish, RIGHT, buff = LARGE_BUFF)\n target_movement = AmbientMovement(target, direction = RIGHT, rate = 1.25)\n\n pulse = RadarPulse(radar_dish, target)\n\n checkmark = self.get_checkmark(word)\n\n self.add(target_movement)\n self.play(\n to_fade.fade, 0.5,\n Write(word),\n DrawBorderThenFill(radar_dish),\n UpdateFromAlphaFunc(\n target, lambda m, a : m.set_fill(opacity = a)\n ),\n morty.change, \"pondering\",\n run_time = 1\n )\n self.wait()\n self.add(pulse)\n count = it.count() #TODO, this is not a great hack...\n while not pulse.is_finished() and count.next() < 15:\n self.play(\n morty.look_at, pulse.mobject,\n run_time = 0.5\n )\n self.play(\n Write(checkmark),\n UpdateFromAlphaFunc(\n target, lambda m, a : m.set_fill(opacity = 1-a)\n ),\n FadeOut(radar_dish),\n morty.change, \"happy\"\n )\n self.wait()\n\n\n\n\n def play_quantum_anims(self, word, to_fade):\n pass\n\n ##\n\n def get_checkmark(self, word):\n checkmark = TexMobject(\"\\\\checkmark\")\n checkmark.highlight(GREEN)\n checkmark.scale(1.5)\n checkmark.next_to(word, UP+RIGHT, buff = 0)\n return checkmark\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"active_projects/uncertainty.py","file_name":"uncertainty.py","file_ext":"py","file_size_in_byte":19421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"310744612","text":"# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project\n# All rights reserved.\n#\n# This file is part of NeuroM \n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the names of\n# its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n'''Test neurom.ezy.Population'''\n\nimport os\nfrom nose import tools as nt\nfrom itertools import izip\nfrom neurom.ezy import load_population\nfrom neurom.ezy.population import Population\nfrom neurom.core.types import TreeType\n\n_path = os.path.dirname(os.path.abspath(__file__))\nDATA_PATH = os.path.join(_path, '../../../test_data')\nVALID_DIR = os.path.join(DATA_PATH, 'valid_set')\n\ndef test_construct_population():\n pop = load_population(VALID_DIR)\n nt.ok_(pop is not None)\n\n\nclass TestEzyPopulation(object):\n\n def setUp(self):\n self.directory = VALID_DIR\n self.pop = load_population(VALID_DIR)\n self.n_somata = len(self.pop.somata)\n\n def test_iter_somata(self):\n sm_it = self.pop.iter_somata()\n for soma in self.pop.somata:\n nt.assert_almost_equal(sm_it.next().radius, soma.radius)\n\n def test_get_n_neurites(self):\n nrts_all = sum(len(neuron.neurites) for neuron in self.pop.neurons)\n nt.assert_equal(nrts_all, self.pop.get_n_neurites())\n\n nrts_axons = sum(nrn.get_n_neurites(neurite_type=TreeType.axon) for nrn in self.pop.neurons)\n nt.assert_equal(nrts_axons, self.pop.get_n_neurites(neurite_type=TreeType.axon))\n\n\n def test_iter_neurites(self):\n nrts_it = self.pop.iter_neurites()\n nt.assert_equal(len(self.pop.neurites), len(list(nrts_it)))\n\n def test_iter_neurons(self):\n nrns_it = self.pop.iter_neurons()\n nt.assert_equal(len(self.pop.neurons), len(list(nrns_it)))\n","sub_path":"neurom/ezy/tests/test_population.py","file_name":"test_population.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"423594699","text":"from typing import final\nimport numpy as np\nimport pandas as pd\nfrom pulp import *\nfrom itertools import permutations\nimport os \nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\ndef get_routes(weekday): \n \n # Initialise route array\n routes = [] \n \n # Get current directory \n directory = os.getcwd()\n\n if weekday: \n # Read in routes for each region from the 'weekday' folder\n for i in range (1, 7):\n # Get path for file name\n folder = (directory + os.sep + 'results_closure' + os.sep + 'weekday')\n filename = folder + (os.sep + 'Routes - Region ' + str(i))\n # Open the file\n file = open(filename, 'r')\n # Add each route to the list of routes \n routes += file.readlines()\n # Close file \n file.close()\n else:\n # Read in routes for each region from the 'weekend' folder \n for i in range (1, 7):\n # Get path for file name \n folder = (directory + os.sep + 'results_closure' + os.sep + 'weekend')\n filename = folder + (os.sep + 'Routes - Region ' + str(i))\n # Open the file\n file = open(filename, 'r')\n # Add each route to the list of routes\n routes += file.readlines() \n # Close file\n file.close() \n\n # remove all next line symbols from the end of the arrays\n routes = [elem.strip('\\n') for elem in routes]\n\n for i in range(0, len(routes)):\n # remove the brackets read in from the file as strings\n # and split the string data into individual strings\n routes[i] = routes[i].strip('[').strip(']').split(', ')\n # convert each string to integer\n routes[i] = [int(j) for j in routes[i]]\n\n return routes\n\n\ndef cost_routes(routes, weekday): \n\n # Reading in the durations data from the csv file\n durations = pd.read_csv('WoolworthsTravelDurations2.csv')\n\n # Reading the dataset containg average pallet demand estimates\n demand = pd.read_csv('WoolworthsDemands2.csv')\n demand = demand[0:60]\n\n # Initialising region route costs dictionary and route number variable\n route_costs = {}\n route_number = 1\n DC = 59\n\n def generateNodeDemand(node, weekday):\n\n # Get average demand and standard deviation, and generate random\n # variate for demand from normal distribution \n if weekday:\n node_average = demand.loc[node-1][2]\n node_stddev = demand.loc[node-1][4]\n node_demand = np.random.normal(node_average, node_stddev) \n else:\n node_average = demand.loc[node-1][1] \n node_stddev = demand.loc[node-1][3]\n node_demand = np.random.normal(node_average, node_stddev) \n\n return node_demand \n\n \n def generateTravelDuration(node, weekday): \n\n # Get travel duration from current node to next node and calculate \n # average unloading time for one store (in hours)\n if weekday: \n travel_dur = float(\n durations.loc[node-1][route[route.index(node)+1]]) / 3600 \n avg_unloading = demand['Rounded Weekday'].mean() * (7.5/60)\n else: \n travel_dur = float(\n durations.loc[node-1][route[route.index(node)+1]]) / 3600 \n avg_unloading = demand['Rounded Saturday'].mean() * (7.5/60)\n\n # Set parameters for truncnorm function\n a = travel_dur - travel_dur * 1.3 \n b = travel_dur * 1.6 - travel_dur * 1.3\n\n # Generate random values from truncated norm distribution \n traffic_adj = stats.truncnorm.rvs(a, b, travel_dur * 1.3, 1)\n\n # Remove average unloading time for one store\n traffic_adj -= avg_unloading\n\n # Add traffic adjustment to travel duration \n travel_dur += traffic_adj \n \n return travel_dur \n\n\n def generateTravelDurationDC(weekday): \n \n # Similar function for travel duration from DC to start node and end node\n # to DC, and calculate average unloading time for one store (in hours)\n if weekday: \n DC_travel_dur = (float(durations.loc[DC][route[0]]) +\n float(durations.loc[route[-1]-1][DC+1])) / 3600 \n else: \n DC_travel_dur = (float(durations.loc[DC][route[0]]) +\n float(durations.loc[route[-1]-1][DC+1])) / 3600\n \n # Set parameters for truncnorm function\n a = DC_travel_dur - DC_travel_dur * 1.3 \n b = DC_travel_dur * 1.6 - DC_travel_dur * 1.3\n\n # Generate random values from truncated norm distribution\n traffic_adj = stats.truncnorm.rvs(a, b, DC_travel_dur * 1.3, 1)\n\n # Add traffic adjustment to travel duration\n DC_travel_dur += traffic_adj \n\n return DC_travel_dur\n\n\n for route in routes: \n\n # Initialising total duration required for route\n total_dur = 0\n route_demand = 0\n\n for node in route:\n \n # Initialise variables \n node_demand = 0\n # node_average = 0\n # node_stddev = 0\n travel_dur = 0\n\n # Find node demands, and travel duration if not the last or only node in the route \n if weekday:\n node_demand = generateNodeDemand(node, weekday)\n if len(route) > 1 and node != route[len(route)-1]:\n travel_dur = generateTravelDuration(node, weekday=True)\n else:\n node_demand = generateNodeDemand(node, weekday) \n if len(route) > 1 and node != route[len(route)-1]:\n travel_dur = generateTravelDuration(node, weekday=False) \n \n # Add number of pallets demanded to current route demand \n route_demand += node_demand\n \n # Calculate pallet unloading duration \n pallet_dur = node_demand * (7.5/60)\n\n # Duration is calculated from node to node and then added to total duration\n dur = travel_dur + pallet_dur\n total_dur += dur\n\n # Adding travel duration for DC to start node and end node to DC\n if weekday:\n total_dur += generateTravelDurationDC(weekday=True)\n else:\n total_dur += generateTravelDurationDC(weekday=False) \n\n # Calculating cost for route\n if total_dur > 4:\n route_cost = 900 + (total_dur - 4) * 275\n else:\n route_cost = total_dur * 225\n\n # Initialise array for routes that require Daily Freight Trucks\n DailyFreight = []\n\n # If demand for the route exceeds truck demand, add cost for hired truck\n if route_demand > 26:\n route_cost += 2000\n DailyFreight.append(route_number)\n\n # Appending individual route cost to region route costs dictionary\n route_costs[route_number] = route_cost\n\n # Incrementing route number\n route_number += 1\n\n return route_costs, DailyFreight\n\n# Run simulation\n\n# Set to True for a weekday, False for a weekend\ntime_period = False\n\n# Get routes \nroutes = get_routes(time_period)\n\n# Split routes into regions \nif time_period == True:\n reg1 = routes[0:3]\n reg2 = routes[3:6]\n reg3 = routes[6:10]\n reg4 = routes[10:13]\n reg5 = routes[13:17]\n reg6 = routes[17:21]\n reg7 = routes[0:21]\nelse: \n reg1 = routes[0:3]\n reg2 = routes[3:5]\n reg3 = routes[5:8]\n reg4 = routes[8:10]\n reg5 = routes[10:12]\n reg6 = routes[12:14]\n reg7 = routes[0:14]\n\n# Get total cost for optimised routing schedule from Part 1\nif time_period == True: \n optimised_cost = [3047, 2716, 3326, 2624, 3139, 3191, 18043] # weekday solutions (last value is total cost)\nelse: \n optimised_cost = [2191, 1542, 1862, 1206, 1634, 1683, 10118] # weekend solutions (last value is total cost) \n\n# Initialise arrays for simulations \nexpected_costs = [0] * 1000\nobserved_costs = [0] * 1000\n\n# Simulation for each region \nfor i in range(1,8): \n\n # Set appropriate routes for the region\n if i == 1: \n routes = reg1\n elif i == 2:\n routes = reg2\n elif i == 3:\n routes = reg3\n elif i == 4:\n routes = reg4\n elif i == 5:\n routes = reg5\n elif i == 6:\n routes = reg6\n elif i == 7:\n routes = reg7\n\n for j in range(len(observed_costs)):\n # Cost routes\n route_costs, DailyFreight = cost_routes(routes, time_period)\n\n # Sum cost of routes for total cost \n costs = route_costs.values()\n total_cost = sum(costs)\n\n # Populate arrays with appropriate costs\n expected_costs[j] = optimised_cost[i-1]\n observed_costs[j] = total_cost\n\n # Visualise cost distributions\n plt.hist(observed_costs, density=True, histtype='stepfilled', alpha=0.2)\n plt.title(\"Distribution of Simulated Costs for Region \" + str(i) + \"\\n 1000 simulations\")\n plt.xlabel(\"Total Routing Cost\")\n plt.ylabel(\"Probability\")\n plt.savefig(\"Region \" + str(i) +\".png\", format=\"PNG\")\n plt.close()\n\n\n sys.stdout = open(\"Simulation with Closed Stores Results for Region {}\".format(i), \"w\")\n \n # Print routes where hired trucks are used\n if len(DailyFreight) != 0: \n print(\"There are daily freight trucks used on the following routes:\", routes[int(DailyFreight[0])-1]) \n\n # Average routing cost\n average_cost = np.mean(observed_costs)\n\n print(\"The average cost for region \" + str(i) + \" is:\", average_cost)\n\n # Percentile interval\n observed_costs.sort()\n lowerBound = observed_costs[25]\n upperBound = observed_costs[975]\n\n print(\"The lower bound of the 95% percentile interval is: \", lowerBound)\n print(\"The upper bound of the 95% percentile interval is: \", upperBound)\n\n # Error rate\n error_rate = sum(np.greater(observed_costs, expected_costs))/len(observed_costs)\n\n print(\"The simulated cost of region \" + str(i) + \" is greater than our optimised cost\", error_rate*100, \"% of the time\")\n\n sys.stdout.close()\n","sub_path":"simulation_closure.py","file_name":"simulation_closure.py","file_ext":"py","file_size_in_byte":10099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"458726268","text":"class Solution(object):\n def hammingDistance(self, x, y):\n \"\"\"\n :type x: int\n :type y: int\n :rtype: int\n \"\"\"\n hamming_dist = 0\n \n pos_x = bin(x)[2:].zfill(31)\n pos_y = bin(y)[2:].zfill(31)\n \n for i in range(31):\n if pos_x[i] != pos_y[i]:\n hamming_dist +=1\n \n return hamming_dist","sub_path":"py/prob_461.py","file_name":"prob_461.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"76226038","text":"def fatorial(n):\n\tfat=1\n\twhile n > 0:\n\t\tfat = fat * n\n\t\tn -= 1\n\treturn(fat)\n\nn = 1\nwhile n >= 0:\n\tn = n = int(input(\"Digite um número inteiro positivo:\"))\n\tif n > 0:\n\t\tfat = fatorial(n)\n\t\tprint (\"Fatorial=\", fat)\n\n\n\n","sub_path":"curso1/fatorial.py","file_name":"fatorial.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"275379367","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 09 17:17:11 2018\n\n@author: Leon\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n#from psychopy.misc import fromFile\n#from psychopy import data\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys \nimport os \nimport struct\nimport numpy as np\nimport scipy\nimport math\nfrom scipy.misc import logsumexp\nimport psignifit as ps\n\n\n\nnameVpn = \"VO10HA\"\n\n\n\n#_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())\n#filename_trial = _thisDir + os.sep + u'data/acker/trialdata1.psydat'\n#datFile = fromFile(filename_trial)\n##get info (added when the handler was created)\n#print (datFile.extraInfo)\n##get data\n##print (datFile._getAllParamNames())\n#print (datFile.data['response'])\n##datFile.getExp()\n##print (datFile.getAllEntries())\n##get list of conditions\n#conditions = datFile.trialList\n##for condN, condition in enumerate(conditions):\n## print condition, datFile.data['response'][condN], numpy.mean(datFile.data['response'][condN])\n##print(conditions)\n#print(datFile.getExp())\n\n\n##### Experimenthandler \n#_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())\n#filename_trial = _thisDir + os.sep + u'data/acker/Yes-No Task_2018_Dec_11_1409.psydat'\n#filename_for_panda = _thisDir + os.sep + u'data/acker/Yes-No Task_2018_Dec_11_1409'\n#datFile = fromFile(filename_trial)\n#\n##print (datFile.extraInfo)\n#print (datFile.dataFileName)\n#print (datFile.dataNames)\n#print (datFile.entries[1][u'response'])\n\n######### Panda ##########\n### getting path\n#_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())\n_thisDir = os.path.dirname(os.path.abspath(__file__))\n#filename_for_panda = _thisDir + os.sep + r'data\\acker\\Yes-No Task_2018_Dec_11_1409.csv'\n#filename_for_panda_to_extend = _thisDir + os.sep + r'data\\jolle\\Yes-No Task_2018_Dec_11_1527.csv'\n#df = pd.read_csv(filename_for_panda)\n#print (df)\n#### Plott numerical data\n#df['signal_intensity'].plot()\ndef createListOfAllFilesInDic ():\n\n #### Automatically find all dataframes and put the name in FileNames\n \n # List to hold file names\n FileNames = []\n \n # Your path will be different, please modify the path below.\n os.chdir(_thisDir + os.sep + r'data')\n \n # Find any file that ends with \".xlsx\"\n for files in os.listdir(\".\"):\n if files.endswith(\".csv\"):\n FileNames.append(files)\n \n return FileNames\n\n#print FileNames\n\n#### Funktioniert \n\n\n# get all Files in Given Dictonary\ndef GetFile(file_name):\n\n # Path to excel file\n # Your path will be different, please modify the path below.\n location = _thisDir + os.sep + r'data/' + file_name\n # Parse the excel file\n # 0 = first sheet\n df = pd.read_csv(location)\n \n # Tag record to file name\n df['File'] = file_name\n \n # Make the \"File\" column the index of the df\n return df.set_index(['File'])\n\n#FileNames=createListOfAllFilesInDic() \n#\n## Create a list of dataframes\n#df_list = [GetFile(fname) for fname in FileNames]\n#\n## Combine all of the dataframes into one\n#big_df = pd.concat(df_list)\n#\n## delet whatever is unnecessary\n#del big_df['trials.thisTrialN']\n#del big_df['trials.thisN']\n#del big_df['trials.thisIndex']\n##print (big_df)\n#\n##creating a subdata set \n#subData = big_df.loc[big_df['expName'] == \"Yes-No Task\"]\n#\n#\n## diagram\n##big_df.response.value_counts().plot(kind='barh')\n#\n##save the data to csv\n#\n###### creat subset !!!!!!!!!!!!!!!\n#\n#big_df.to_csv('BigData.csv', index=False)\n#subData.to_csv('DataYesNo.csv', index=False)\n\n###############\n\n ## and plot\ndef create_subdataset_for_participant (name):\n \n participant_data = subData.loc[subData['participant'] == name]\n participant_data = participant_data.loc[participant_data['session'] == 2 ]\n \n return participant_data\n\ndef create_subdataset_for_participant2IFC (name):\n \n participant_data2IFC = subData2IFC.loc[subData2IFC['participant'] == name]\n participant_data2IFC = participant_data2IFC.loc[participant_data2IFC['session'] == 2 ]\n \n return participant_data2IFC\n ########## search for the signal Intensitys\n ########## calculate the probability of a correct answer for each signal Intesnity\n ########## Plots the Data-Points\n ########## Plots a Fiting curve\n \n \ndef evaluationAndPlot2IFC (dataPlot):\n list_of_intensitys = dataPlot['signal_intensity'].value_counts().index.tolist()\n\n correct_fit = []\n n_total_fit = []\n# listRelHit = []\n# listRelFalseA = []\n# listNGes = []\n for x in list_of_intensitys:\n# relTwoHit = 0\n# relTwoFalseA = 0\n# other = 0\n dataframe_signalintensity = dataPlot[dataPlot['signal_intensity'] == x]\n dataStimOnOne = dataframe_signalintensity[dataframe_signalintensity['signal_on_stimuluspos1'] == True]\n dataHit = dataStimOnOne[dataStimOnOne['response'] == 'correct answer']\n dataMiss = dataStimOnOne[dataStimOnOne['response'] == 'wrong answer']\n \n dataNoStim = dataframe_signalintensity[dataframe_signalintensity['signal_on_stimuluspos1'] == False]\n dataCorrectRejection = dataNoStim[dataNoStim['response'] == 'correct answer']\n dataFalseAlarm =dataNoStim[dataNoStim['response'] == 'wrong answer']\n correct_fit.append(dataHit.shape[0] + dataCorrectRejection.shape[0])\n n_total_fit.append(dataCorrectRejection.shape[0] + dataFalseAlarm.shape[0] + dataHit.shape[0] + dataMiss.shape[0])\n print(dataHit.shape[0])\n print(dataCorrectRejection.shape[0])\n\n \n data2IFC = np.empty([len(list_of_intensitys),3], dtype=float)\n\n sorted_ar = np.array(list_of_intensitys)\n #### data for fit\n correct_psyfit = np.array(correct_fit)\n n_total_psyfit = np.array(n_total_fit)\n o=0\n \n for k in sorted_ar:\n \n \n data2IFC[o][0] = sorted_ar[o]\n data2IFC[o][1] = correct_psyfit[o]\n data2IFC[o][2] = n_total_psyfit[o]\n# print (data[o][0])\n# print (data[o][1])\n# print (data[o][2])\n o = o+1\n return (data2IFC)\n \ndef evaluationAndPlot (givenData):\n \n \n count=[]\n correct_fit = []\n n_total_fit = []\n o = 0\n# print(dataframe_signalintensity.groupby('response').size())\n i=0\n# print(\"hier\")\n list_of_intensitys = givenData['signal_intensity'].value_counts().index.tolist()\n ## sortiere in Aufsteigender reihenfolge\n sortiert = np.sort(list_of_intensitys, axis=None) # sort the flattened array\n for x in sortiert:\n correct = 0\n wrong = 0\n# hit = 0\n# fa = 0\n# cr = 0\n# miss = 0\n dataframe_signalintensity = givenData[givenData['signal_intensity'] == x]\n answers = dataframe_signalintensity.groupby('response').size()\n answerOptions = dataframe_signalintensity['response'].value_counts().index.tolist()\n# dataframe_signalintensity.groupby('response').size()\n for y in answerOptions:\n \n if y == \"Hit\" :\n correct = correct + answers['Hit']\n if y == \"False Alarm\":\n wrong = wrong + answers['False Alarm']\n if y == \"Correct Rejection\":\n correct = correct + answers['Correct Rejection']\n if y == \"Miss\":\n wrong = wrong + answers['Miss']\n if y == \"No Answer\" :\n wrong = wrong + answers['No Answer']\n ##### for fit \n if y == \"correct answer\" :\n correct = correct + answers['correct answer']\n if y == \"wrong answer\" :\n wrong = wrong + answers['wrong answer']\n\n\n######################\n# if y == \"Hit\" :\n# hit = hit + answers['Hit']\n# if y == \"False Alarm\":\n# fa = fa + answers['False Alarm']\n# if y == \"Correct Rejection\":\n# cr = cr + answers['Correct Rejection']\n# if y == \"Miss\":\n# miss = miss + answers['Miss']\n# if y == \"No Answer\" :\n# wrong = wrong + answers['No Answer']\n# ##### for fit \n# if y == \"correct answer\" :\n# correct = correct + answers['correct answer']\n# if y == \"wrong answer\" :\n# wrong = wrong + answers['wrong answer']\n####################\n# \n correct_fit.append(correct)\n n_total_fit.append(correct + wrong)\n\n count.append(float(correct) / float(correct + wrong))\n i = i+1\n \n data = np.empty([len(sortiert),3], dtype=float)\n \n# D = np.array(count)\n sorted_ar = np.array(sortiert)\n #### data for fit\n correct_psyfit = np.array(correct_fit)\n n_total_psyfit = np.array(n_total_fit)\n \n \n for k in sorted_ar:\n \n \n data[o][0] = sorted_ar[o]\n data[o][1] = correct_psyfit[o]\n data[o][2] = n_total_psyfit[o]\n# print (data[o][0])\n# print (data[o][1])\n# print (data[o][2])\n o = o+1\n \n\n \n# options = dict() \n## options['sigmoidName'] = 'weibull' # choose a cumulative Gauss as the sigmoid \n# options['expType'] = 'nAFC' # choose 2-AFC as the experiment type \n## options['plotAsymptote'] = False\n# options['expN'] = 2 # this sets the guessing rate to .5 (fixed)\n## options['threshPC'] = 0.75\n## options['cuts']= 0.75\n## options[''] = 0.5\n## options['CImethod'] = 'stripes'\n## options['betaPrior'] = 10\n# \n# \n# ## options borders threshold,width,upper asymptote,lower asymptote,variance, scale\n## options['borders'] = np.array([[1,2],[0.05,5],[0.05,0.05], [.1,0.95],[0.2,.2]],dtype=float)\n## options['borders'] = np.array([[1,12],[0.05,2],[0.05,0.3], [.1,0.95],[0.2,.2]],dtype=float)\n## options['borders'] = np.array([[1,12],[0.05,2],[0.1,0.45], [.1,0.95],[0.6,.2]],dtype=float)\n## options['borders'] = np.array([[1,2],[0.05,5],[0.05,0.005], [0,0.5],[0.6,.2]],dtype=float)\n# \n#\n# print (\"ready1\")\n## options['maxBorderValue']= 0.05\n## options['setBordersType']= 0.5\n## options['instantPlot'] = True\n#\n# \n## options['moveBorders'] = 0\n# result = ps.psignifit(data)\n## result.round(2)\n# print (\"ready2\")\n# result['Fit']\n#\n# result['conf_Intervals']\n# print (\"ready3\")\n \n# ps.psigniplot.plotPsych(result)\n return (data)\n ##############\n#################### \n#by the √2 rule d'2AFC = √2 d'Yes/No\ndef calc2AFCSuggestion (dataPerson):\n list_of_intensitys = dataPerson['signal_intensity'].value_counts().index.tolist()\n \n listRelHit = []\n listRelFalseA = []\n listNCoFa = []\n listNHiMi = []\n listIntensitys = []\n smaltoBig = np.sort(list_of_intensitys, axis=None)\n suggestionData = np.empty([len(list_of_intensitys),3], dtype=float)\n for x in smaltoBig:\n relHit = 0\n relFalseA = 0\n other = 0\n NCo = 0\n NMi = 0\n dataframe_signalintensity = dataPerson[dataPerson['signal_intensity'] == x]\n answers = dataframe_signalintensity.groupby('response').size()\n answerOptions = dataframe_signalintensity['response'].value_counts().index.tolist()\n# dataframe_signalintensity.groupby('response').size()\n for y in answerOptions:\n \n if y == \"Hit\" :\n relHit = relHit + answers['Hit']\n if y == \"False Alarm\":\n relFalseA = relFalseA + answers['False Alarm']\n if y == \"Correct Rejection\":\n NCo = NCo + answers['Correct Rejection']\n if y == \"Miss\":\n NMi = NMi + answers['Miss']\n if y == \"No Answer\" :\n other = other + answers['No Answer']\n ##### for fit \n\n \n listRelHit.append(relHit)\n# print(relHit)\n listRelFalseA.append(relFalseA)\n listNCoFa.append(relFalseA + NCo)\n listNHiMi.append(relHit + NMi)\n listIntensitys.append(x)\n \n a = 0\n np.array(listRelHit)\n np.array(listRelFalseA)\n np.array(listNHiMi)\n np.array(listNCoFa)\n np.array(listIntensitys)\n o = 0\n for p in listRelHit:\n# print ('%s ratio Hits %s' % (a,float(listRelHit[a])/listNGes[a]))\n relHitForIntensity=float(listRelHit[a]/listNHiMi[a])\n\n# print ('%s ratio False Alarms %s' % (a,float(listRelFalseA[a])/listNGes[a]))\n relFaForIntensity=float(listRelFalseA[a]/listNCoFa[a])\n# print(relFaForIntensity)\n DYesNo = scipy.stats.norm.ppf(relHitForIntensity) - scipy.stats.norm.ppf(relFaForIntensity)\n# print(DYesNo)\n #D2IFC = (2*DYesNo) / math.sqrt(2)\n D2IFC = DYesNo * math.sqrt(2)\n a = a+1\n# print(D2IFC)\n criterium = D2IFC/2\n WsHitAFC = scipy.stats.norm.cdf(((D2IFC - criterium)/math.sqrt(2)))\n\n NegD2IFC = 0-D2IFC\n WsFAAFC = scipy.stats.norm.cdf(((NegD2IFC - criterium)/math.sqrt(2)))\n\n \n \n smaltoBig = np.sort(listIntensitys, axis=None)\n \n \n \n \n suggestionData[o][0] = smaltoBig[o]\n nGes = listNCoFa[o] + listNHiMi[o]\n numberOfHits = (nGes/2) * WsHitAFC\n# print (numberOfHits)\n numberOfCA = ((nGes/2) * (1 - WsFAAFC))\n# print (numberOfCA)\n suggestionData[o][1] = numberOfCA + numberOfHits\n# print(nGes)\n suggestionData[o][2] = nGes\n# print (data[o][0])\n# print (data[o][1])\n# print (data[o][2])\n o = o+1\n return suggestionData \n# \n# ts.plot()\n# plt.plot(sortiert, D)\n# plt.ylim(0, 1)\n# plt.show()\n\n\n## treffer + false alarm\ndef RelativeProbabilityYesNo (dataPerson):\n list_of_intensitys = dataPerson['signal_intensity'].value_counts().index.tolist() \n listRelHit = []\n listRelFalseA = []\n listRelCorrectR = []\n listRelMiss = []\n smaltoBig = np.sort(list_of_intensitys, axis=None)\n print (smaltoBig)\n listNGes = []\n for x in smaltoBig:\n relHit = 0\n relFalseA = 0\n relCorrectR = 0\n relMiss = 0\n other = 0\n dataframe_signalintensity = dataPerson[dataPerson['signal_intensity'] == x]\n answers = dataframe_signalintensity.groupby('response').size()\n answerOptions = dataframe_signalintensity['response'].value_counts().index.tolist()\n# dataframe_signalintensity.groupby('response').size()\n for y in answerOptions:\n \n if y == \"Hit\" :\n relHit = relHit + answers['Hit']\n if y == \"False Alarm\":\n relFalseA = relFalseA + answers['False Alarm']\n if y == \"Correct Rejection\":\n relCorrectR = relCorrectR + answers['Correct Rejection']\n if y == \"Miss\":\n relMiss = relMiss + answers['Miss']\n if y == \"No Answer\" :\n other = other + answers['No Answer']\n ##### for fit \n\n \n listRelHit.append(relHit)\n listRelFalseA.append(relFalseA)\n listRelCorrectR.append(relCorrectR)\n listRelMiss.append(relMiss)\n #print(relHit)\n listNGes.append(relHit + relFalseA + relCorrectR + relMiss + other)\n \n a = 0\n np.array(listRelHit)\n np.array(listRelFalseA)\n np.array(listNGes)\n for p in listRelHit:\n hitProbability = float(listRelHit[a])/(listRelHit[a]+listRelMiss[a])\n faProbability = float(listRelFalseA[a])/(listRelFalseA[a]+listRelCorrectR[a])\n dprime = scipy.stats.norm.ppf(hitProbability) - scipy.stats.norm.ppf(faProbability)\n# print ('%s ratio Hits %s' % (a+1,hitProbability))\n# print ('%s ratio False Alarms %s' % (a+1,faProbability))\n print ('%s DPRIME %s' % (a+1,dprime))\n a = a+1\n \n \ndef RelativeProbability2IFC (dataPerson):\n list_of_intensitys = dataPerson['signal_intensity'].value_counts().index.tolist() \n listRelHit = []\n listRelFalseA = []\n listRelCorrectR = []\n listRelMiss = []\n smaltoBig = np.sort(list_of_intensitys, axis=None)\n listNGes = []\n # for each signal intensity\n for x in smaltoBig:\n relHit = 0\n relFalseA = 0\n relCorrectR = 0\n relMiss = 0\n other = 0\n # filters only specific signal intensity\n dataframe_signalintensity = dataPerson[dataPerson['signal_intensity'] == x]\n # filters only trials where stimulus was on position 1 (hits & misses)\n dataStimOnOne = dataframe_signalintensity[dataframe_signalintensity['signal_on_stimuluspos1'] == True]\n # filters only trials where stimulus was on position 2 (false as & correct rs)\n dataStimOnTwo = dataframe_signalintensity[dataframe_signalintensity['signal_on_stimuluspos1'] == False]\n # gets the amount of correct and wrong answers with stimulus on position one\n answersOnOne = dataStimOnOne.groupby('response').size()\n # gets the amount of correct and wrong answers with stimulus on position two\n answersOnTwo = dataStimOnTwo.groupby('response').size()\n # correct answer & wrong answer\n answerOptions = dataframe_signalintensity['response'].value_counts().index.tolist()\n\n for y in answerOptions:\n \n if y == \"correct answer\" :\n relHit = relHit + answersOnOne['correct answer']\n if y == \"wrong answer\":\n relMiss = relMiss + answersOnOne['wrong answer']\n if y == \"correct answer\" :\n relCorrectR = relCorrectR + answersOnTwo['correct answer']\n if y == \"wrong answer\":\n relFalseA = relFalseA + answersOnTwo['wrong answer']\n \n listRelHit.append(relHit)\n listRelFalseA.append(relFalseA)\n listRelCorrectR.append(relCorrectR)\n listRelMiss.append(relMiss)\n listNGes.append(relHit + relFalseA + relCorrectR + relMiss + other)\n \n a = 0\n np.array(listRelHit)\n np.array(listRelFalseA)\n np.array(listNGes)\n for p in listRelHit:\n hitProbability = float(listRelHit[a])/(listRelHit[a]+listRelMiss[a])\n faProbability = float(listRelFalseA[a])/(listRelFalseA[a]+listRelCorrectR[a])\n dprime = scipy.stats.norm.ppf(hitProbability) - scipy.stats.norm.ppf(faProbability)\n# print ('%s ratio Hits %s' % (a+1,hitProbability))\n# print ('%s ratio False Alarms %s' % (a+1,faProbability))\n print ('%s DPRIME %s' % (a+1,dprime))\n a = a+1\n \n\n\n#def \n###################### Execution ######################\n\n### PREPARATION OF DATA###\n \n### a list of filenames in Dictionary \nFileNames=createListOfAllFilesInDic() \n\n# Create a list of all Dataframes in Dicrionary\ndf_list = [GetFile(fname) for fname in FileNames]\n\n# Combine all of the dataframes into one big Dataframe\nbig_df = pd.concat(df_list)\n\n# delet whatever is unnecessary \ndel big_df['trials.thisTrialN']\ndel big_df['trials.thisN']\ndel big_df['trials.thisIndex']\n\n\nsubData = big_df.loc[big_df['expName'] == \"Yes-No Task\"]\nsubData2IFC = big_df.loc[big_df['expName'] == \"2IFC\"]\n#print (big_df)\n##big_df.to_csv('BigData.csv', index=False)\n###subData.to_csv('DataYesNo.csv', index=False)\n#creating a subdata set with only \"yes-No Task\n\n\n## 2IFC + Suggestion start\n\ndataOne = evaluationAndPlot (create_subdataset_for_participant (nameVpn))\ndataTwo = calc2AFCSuggestion (create_subdataset_for_participant (nameVpn))\ndataThree = evaluationAndPlot2IFC (create_subdataset_for_participant2IFC(nameVpn))\noptions = dict() \noptions['expType'] = 'nAFC' # choose 2-AFC as the experiment type \noptions['expN'] = 2 \nresultOne = ps.psignifit(dataOne, options)\nresult = dict()\nresultTwo = ps.psignifit(dataTwo, options)\nps.psigniplot.plotPsych(resultOne, dataColor=[1,0,0.5],lineColor=[1,0,0.5])\nps.psigniplot.plotPsych(resultTwo, dataColor=[0.5,1,0.5],lineColor=[0.5,1,0.5])\nresultThree = ps.psignifit(dataThree, options)\nps.psigniplot.plotPsych(resultThree, dataColor=[0.5,0,1],lineColor=[0.5,0,1])\n# 2IFC + Suggestion end\n\n#RelativeProbabilityYesNo(create_subdataset_for_participant (nameVpn))\n#print ('for 2IFC')\n#RelativeProbability2IFC(create_subdataset_for_participant2IFC(nameVpn))\n\n\n\n\n\n\n\n\n\n###### create psychometric function for VPN\n\n\n#dataOne = evaluationAndPlot (create_subdataset_for_participant (nameVpn))\n#options = dict() \n#options['expType'] = 'nAFC' # choose 2-AFC as the experiment type \n#options['expN'] = 2 \n#options2 = dict() \n#options2['expType'] = 'nAFC' # choose 2-AFC as the experiment type \n#options2['expN'] = 2 \n#options3 = dict() \n#options3['expType'] = 'nAFC' # choose 2-AFC as the experiment type \n#options3['expN'] = 2 \n###### IMMER EINS AUSKOMMENTIEREN ERSTEN WERT ABLESEN UND NOCHMAL RUN ########\n#\n#options['threshPC'] = 0.3\n#options2['threshPC'] = 0.5\n#options3['threshPC'] = 0.7\n#\n#######\n#\n#resultOne = ps.psignifit(dataOne, options)\n#resultTwo = ps.psignifit(dataOne, options2)\n#resultThree = ps.psignifit(dataOne, options3)\n#result = dict()\n#\n##result['conf_Intervals']\n#ps.psigniplot.plotPsych(resultOne)\n#\n#print(\"Hier den Wert ablesen 0.65%:\")\n#print(resultOne['Fit'][0])\n#print(\"Hier den Wert ablesen 0.75%:\")\n#print(resultTwo['Fit'][0])\n#print(\"Hier den Wert ablesen 0.85%:\")\n#print(resultThree['Fit'][0])\n\n\n\n\n##########\n\n\n\n##### OLD SHIT DONT DELETE\n##### get the rel Probability of VPN (YES NO TASK)\n#RelativeProbabilityYesNo(create_subdataset_for_participant('felixtest'))\n#ps.psigniplot.plotPsych(lineColor=[0,0.4,0.7])\n##### get the rel Probability of VPN (2IFC)\n#dataTwo = evaluationAndPlot (create_subdataset_for_participant (nameVpn))\n\n\n#calc2AFCSuggestion(create_subdataset_for_participant (nameVpn))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n#\n#def RelativeProbability2IFC(dataTwoIFC):\n# \n# list_of_intensitys = [3,5,7]\n## listRelHit = []\n## listRelFalseA = []\n## listNGes = []\n# for x in list_of_intensitys:\n## relTwoHit = 0\n## relTwoFalseA = 0\n## other = 0\n# dataframe_signalintensity = dataTwoIFC[dataTwoIFC['signal_intensity'] == x]\n# dataStimOnOne = dataframe_signalintensity[dataframe_signalintensity['signal_on_stimuluspos1'] == True]\n# dataHit = dataStimOnOne[dataStimOnOne['response'] == 'correct answer']\n# dataMiss = dataStimOnOne[dataStimOnOne['response'] == 'wrong answer']\n# \n# dataNoStim = dataframe_signalintensity[dataframe_signalintensity['signal_on_stimuluspos1'] == False]\n# dataCorrectRejection = dataNoStim[dataNoStim['response'] == 'correct answer']\n# dataFalseAlarm =dataNoStim[dataNoStim['response'] == 'wrong answer']\n# \n# print('durchgang Beendet')\n# print (dataHit.shape)\n# print (dataCorrectRejection.shape)\n# print (dataFalseAlarm.shape)\n# print (dataMiss.shape)\n#\n#def RelativeProbability2IFC (dataPerson):\n# list_of_intensitys = dataPerson['signal_intensity'].value_counts().index.tolist() \n# listRelHit = []\n# listRelFalseA = []\n# listRelCorrectR = []\n# listRelMiss = []\n# smaltoBig = np.sort(list_of_intensitys, axis=None)\n# listNGes = []\n# for x in smaltoBig:\n# relHit = 0\n# relFalseA = 0\n# relCorrectR = 0\n# relMiss = 0\n# other = 0\n# dataframe_signalintensity = dataPerson[dataPerson['signal_intensity'] == x]\n# dataStimOnOne = dataframe_signalintensity[dataframe_signalintensity['signal_on_stimuluspos1'] == True]\n# dataStimOnTwo = dataframe_signalintensity[dataframe_signalintensity['signal_on_stimuluspos1'] == False]\n# answersOnOne = dataStimOnOne.groupby('response').size()\n# answersOnTwo = dataStimOnTwo.groupby('response').size()\n# answerOptionsOnOne = dataStimOnOne['response'].value_counts().index.tolist()\n# answerOptionsOnTwo = dataStimOnTwo['response'].value_counts().index.tolist()\n#\n## dataframe_signalintensity.groupby('response').size()\n# for y in answerOptionsOnOne:\n# \n# if y == \"correct answer\" :\n# relHit = relHit + answersOnOne['correct answer']\n# if y == \"wrong answer\":\n# relMiss = relMiss + answersOnOne['wrong answer']\n# \n# for z in answerOptionsOnTwo:\n# \n# if z == \"correct answer\" :\n# relCorrectR = relCorrectR + answersOnTwo['correct answer']\n# if z == \"wrong answer\":\n# relFalseA = relFalseA + answersOnTwo['wrong answer']\n# \n# listRelHit.append(relHit)\n# listRelFalseA.append(relFalseA)\n# listRelCorrectR.append(relCorrectR)\n# listRelMiss.append(relMiss)\n# #print(relHit)\n# listNGes.append(relHit + relFalseA + relCorrectR + relMiss + other)\n# \n# a = 0\n# np.array(listRelHit)\n# np.array(listRelFalseA)\n# np.array(listNGes)\n# for p in listRelHit:\n# hitProbability = float(listRelHit[a])/(listRelHit[a]+listRelMiss[a])\n# faProbability = float(listRelFalseA[a])/(listRelFalseA[a]+listRelCorrectR[a])\n# dprime = scipy.stats.norm.ppf(hitProbability) - scipy.stats.norm.ppf(faProbability)\n# print ('%s ratio Hits %s' % (a+1,hitProbability))\n# print ('%s ratio False Alarms %s' % (a+1,faProbability))\n# print ('%s dprime %s' % (a+1,dprime))\n# a = a+1\n# ","sub_path":"Auswertung.py","file_name":"Auswertung.py","file_ext":"py","file_size_in_byte":25508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"600865830","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport sys\n\nimport hyperparameters as PARAM\nfrom aux.AuxNetwork import AuxNetwork\n\nclass PPO():\n def __init__(self, episode_buffer, replay_buffer, action_space=3):\n self.lr = PARAM.LEARNING_RATE\n self.episode_buffer = episode_buffer\n self.replay_buffer = replay_buffer\n self.N = PARAM.N\n self.gamma = PARAM.gamma\n self.seq_len = PARAM.A2C_SEQUENCE_LENGTH\n self.aux_batch_size = PARAM.AUX_TASK_BATCH_SIZE\n self.vfr_weight = PARAM.VFR_LOSS_WEIGHT\n self.rp_weight = PARAM.RP_LOSS_WEIGHT\n self.pc_weight = PARAM.PC_LOSS_WEIGHT\n\n self.ppo_epochs = 10 #PARAM.PPO_EPOCHS\n self.num_mini_batch = 12 #PARAM.PPO_NUM_MINI_BATCH\n self.clip_param = 0.2\n\n #self.max_grad_norm = PARAM.MAX_GRAD_NORM\n #self.use_clipped_value_loss = PARAM.USE_CLIPPED_VALUE_LOSS\n\n # A2C network\n self.A = AuxNetwork(state_size=PARAM.STATE_SIZE, action_space=action_space, seq_len=self.seq_len)\n\n # GPU availability\n self.gpu = torch.cuda.is_available()\n if self.gpu:\n print(\"Using GPU\")\n self.A = self.A.cuda()\n else:\n print(\"Using CPU\")\n\n # Loss Function and Optimizer\n self.optimizer = optim.Adam(self.A.parameters(), lr=self.lr, weight_decay=1e-6)\n self.vfr_criterion = nn.MSELoss() # Value Function Replay loss\n self.rp_criterion = nn.CrossEntropyLoss() # Reward Prediction loss\n self.pc_criterion = nn.MSELoss() # Value Function Replay loss\n\n\n def reduce_learning_rate(self):\n for pgroups in self.optimizer.param_groups:\n pgroups['lr'] = pgroups['lr']/10.0\n\n def train(self, episode_len):\n T = episode_len\n n = self.N\n advantages = []\n rewards = []\n for t in range(T-1, -1, -1):\n val = self.episode_buffer[t][-1]\n if t + n >= T:\n Vend = 0\n else:\n Vend = self.episode_buffer[t+n][-1]\n sum_ = 0.0\n \n for k in range(n):\n if t + k < T:\n tk_reward = self.episode_buffer[t+k][2]\n sum_ += tk_reward * (self.gamma**k)\n rew = Vend*(self.gamma**n) + float(sum_)\n rewards.append(rew)\n \n if t == T-1:\n advantages.append(rew-val)\n else:\n advantages.append(rew-val)\n\n advantages = list(reversed(advantages))\n advantages = torch.tensor(advantages)\n advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)\n\n rewards = list(reversed(rewards))\n rewards = torch.tensor(rewards)\n\n self.ppo_epochs = 10 #PARAM.PPO_EPOCHS\n #self.clip_param = PARAM.PPO_CLIP_PARAM\n self.num_mini_batch = 1 #PARAM.PPO_NUM_MINI_BATCH\n self.seq_len = 4\n\n for e in range(self.ppo_epochs):\n random_indicies = np.random.randint(T, size=self.num_mini_batch)\n new_values = []\n new_softmax = []\n for index in random_indicies:\n val, softmax, action = self.get_output([index], self.num_mini_batch, self.seq_len)\n new_values.append(val)\n new_softmax.append(softmax)\n \n for k, index in enumerate(random_indicies):\n action_log_probs = torch.log(new_softmax[k])\n old_action_log_probs = torch.log(self.episode_buffer[index][4])\n\n advantage_target = advantages[index]\n\n ratio = torch.exp(action_log_probs - old_action_log_probs)\n surr1 = ratio * advantage_target \n surr2 = torch.clamp(ratio, 1.0 - self.clip_param,\n 1.0 + self.clip_param) * advantage_target\n action_loss = -torch.min(surr1, surr2).mean()\n\n value_loss = 0.8 * F.mse_loss(rewards[index], new_values[k])\n\n self.optimizer.zero_grad()\n \n loss = action_loss + value_loss\n loss += self.vfr_weight * self.compute_vfr_loss()\n if self.replay_buffer.any_reward_instances():\n loss += self.rp_weight * self.compute_rp_loss()\n loss += self.pc_weight * self.compute_pc_loss()\n \n loss.backward(retain_graph=True)\n \n torch.nn.utils.clip_grad_value_(self.A.parameters(), PARAM.GRAD_CLIP_VAL)\n self.optimizer.step()\n\n\n def compute_vfr_loss(self):\n \"\"\" Computes Value Function Replay Loss. \"\"\"\n idxs = self.replay_buffer.sample_idxs(self.aux_batch_size)\n vision, scent, state, reward = self.get_io_from_replay_buffer(idxs, batch_size=self.aux_batch_size, seq_len=self.seq_len)\n val, _ = self.A.forward(vision, scent, state)\n\n return self.vfr_criterion(val.view(-1, 1), reward)\n\n def compute_rp_loss(self):\n \"\"\" Computes Reward Prediction Loss. \"\"\"\n vision, ground_truth = self.get_io_from_skewed_replay_buffer(batch_size=self.aux_batch_size, seq_len=3)\n pred = self.A.predict_rewards(vision)\n\n return self.rp_criterion(pred, ground_truth)\n\n def compute_pc_loss(self):\n \"\"\" Computes Pixel Control Loss. \"\"\"\n idxs = self.replay_buffer.sample_idxs(self.aux_batch_size)\n vision, aux_rew, actions = self.get_pc_io_from_replay_buffer(idxs, batch_size=self.aux_batch_size, seq_len=1)\n pred = self.A.pixel_control(vision)\n for i in range(20):\n if i == 0:\n pc_loss = self.pc_criterion(aux_rew[i], pred[i, actions[i]])\n else:\n pc_loss += self.pc_criterion(aux_rew[i], pred[i, actions[i]])\n\n return pc_loss\n\n def get_output(self, index, batch_size=1, seq_len=1, no_grad=False):\n ''' Returns output from the A network. '''\n vision, scent, state = self.get_input_tensor(index, batch_size, seq_len)\n if no_grad:\n with torch.no_grad():\n val, softmax = self.A.forward(vision, scent, state)\n else:\n val, softmax = self.A.forward(vision, scent, state)\n\n action = np.random.choice(np.arange(3), 1, p=np.squeeze(softmax.clone().cpu().detach().numpy()))\n return val, softmax.view(3), action\n\n def get_input_tensor(self, idxs, batch_size=1, seq_len=1):\n ''' Returns an input tensor from the observation. '''\n vision = np.zeros((batch_size, seq_len, 3, 11, 11))\n scent = np.zeros((batch_size, seq_len, 3))\n state = np.zeros((batch_size, seq_len, 4))\n\n for k, idx in enumerate(idxs):\n for j in range(seq_len):\n if idx - j < 0:\n continue\n obs, action, rew, _, _, tong_count, _ = self.episode_buffer[idx-j]\n vision[k, j] = np.moveaxis(obs['vision'], -1, 0)\n scent[k, j] = obs['scent']\n state[k, j] = np.array([action, rew, int(obs['moved']), tong_count])\n\n vision, scent, state = torch.from_numpy(vision).float(), torch.from_numpy(scent).float(), torch.from_numpy(state).float()\n if self.gpu:\n vision, scent, state = vision.cuda(), scent.cuda(), state.cuda()\n\n return vision, scent, state\n\n def get_io_from_replay_buffer(self, idxs, batch_size=1, seq_len=1):\n ''' Returns an input tensor from the observation. '''\n vision = np.zeros((batch_size, seq_len, 3, 11, 11))\n scent = np.zeros((batch_size, seq_len, 3))\n state = np.zeros((batch_size, seq_len, 4))\n reward = np.zeros((batch_size, 1))\n\n for k, idx in enumerate(idxs):\n for j in range(seq_len):\n obs, action, rew, _, _, tong_count = self.replay_buffer.get_single_sample(idx-j)\n vision[k, j] = np.moveaxis(obs['vision'], -1, 0)\n scent[k, j] = obs['scent']\n state[k, j] = np.array([action, rew, int(obs['moved']), tong_count])\n if j == 0:\n reward[k] = rew\n\n vision, scent, state, reward = torch.from_numpy(vision).float(), torch.from_numpy(scent).float(), torch.from_numpy(state).float(), torch.from_numpy(reward).float()\n if self.gpu:\n vision, scent, state, reward = vision.cuda(), scent.cuda(), state.cuda(), reward.cuda()\n\n return vision, scent, state, reward\n\n def get_io_from_skewed_replay_buffer(self, batch_size=1, seq_len=1):\n ''' Returns an input tensor from the observation. '''\n vision, reward_class = self.replay_buffer.skewed_samples(batch_size, seq_len)\n vision, reward_class = torch.from_numpy(vision).float(), torch.from_numpy(reward_class).long()\n if self.gpu:\n vision, reward_class = vision.cuda(), reward_class.cuda()\n\n return vision, reward_class\n\n def get_pc_io_from_replay_buffer(self, idxs, batch_size=1, seq_len=1):\n ''' Returns an input tensor from the observation. '''\n vision = np.zeros((batch_size, seq_len, 3, 11, 11))\n aux_rew = np.zeros((batch_size, 11, 11))\n actions = [[]]*batch_size\n\n for k, idx in enumerate(idxs):\n for j in range(seq_len):\n obs, action, _, next_obs, _, _ = self.replay_buffer.get_single_sample(idx-j)\n vision[k, j] = np.moveaxis(obs['vision'], -1, 0)\n if j == 0:\n if next_obs['moved']:\n aux_rew[k] = np.mean(np.abs(obs['vision'] - next_obs['vision']), axis=2)\n actions[k] = action\n\n vision, aux_rew = torch.from_numpy(vision).float(), torch.from_numpy(aux_rew).float()\n if self.gpu:\n vision, aux_rew = vision.cuda(), aux_rew.cuda()\n\n return vision, aux_rew, actions\n\n def set_train(self):\n self.A.train()\n\n def set_eval(self):\n self.A.eval()\n\n def save_model_weights(self, suffix, path='./'):\n # Helper function to save your model / weights.\n state = {\n 'epoch': suffix,\n 'state_dict': self.A.state_dict(),\n 'optmizer': self.optimizer.state_dict(),\n }\n torch.save(state, path + str(suffix) + '.dat')\n\n def load_model(self, model_file):\n # Helper function to load an existing model.\n state = torch.load(model_file)\n self.A.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optimizer'])\n","sub_path":"PPO/models/PPO.py","file_name":"PPO.py","file_ext":"py","file_size_in_byte":9653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420215309","text":"import numpy as np\n\ndef distance(d1, d2, p):\n \"\"\"\n Return the distance between a line determined by points d1 and d2, and \n a point p\n \"\"\"\n u = d2-d1\n v = p-d1\n return np.linalg.norm(np.cross(u, v))/np.linalg.norm(u)\n\ndef douglas_peucker(points, thres, min=0, max=-1):\n \"\"\"\n Apply douglas peucker algorithm in place, removing points from list by\n setting them to None.\n The Douglas-Peucker algorithm removes points from a polyline that are not\n significant (that is, if their distance from the main line is more than\n a given threshold).\n \n https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm\n \"\"\"\n # Allows for negative indexing at first call\n if max < 0:\n max += len(points)\n\n # No points between min and max: finished\n if max-min <= 1:\n return\n\n # Otherwise, we find the most extreme point\n first, last = points[min], points[max]\n dmax, imax = 0, min\n for i in range(min+1, max):\n d = distance(first, last, points[i])\n if d > dmax:\n dmax, imax = d, i\n\n if dmax <= thres:\n # Most extreme point is below thresehold: remove all points\n for i in range(min+1, max):\n points[i] = None\n else:\n # Apply alogrithm on sublines\n douglas_peucker(points, thres, min, imax)\n douglas_peucker(points, thres, imax, max)\n\ndef reduce_pointset(points, thres):\n \"\"\"\n Apply Douglas-Peucker algorithm on a laser point set to \n reduce the number of points\n \"\"\"\n # On ordonne les points verticalement\n if len(points) <= 2:\n return points\n points.sort(key=lambda x: x[2])\n douglas_peucker(points, thres)\n return filter(lambda x: x is not None, points)\n\n### Tests (to be moved elsewhere) ###\ndef test_distance_point_to_line():\n D1, D2 = np.array([-1, 3, 4]), np.array([1, 3, 4])\n P = np.array([0, 0, 0])\n assert distance(D1, D2, P) == 5\n\ndef test_distance_point_to_line_aligned():\n D1, D2 = np.array([-1, 0, 0]), np.array([1, 0, 0])\n P = np.array([0, 0, 0])\n assert distance(D1, D2, P) == 0\n\ndef array_equal(A, B):\n \"\"\"Helper function to compare 2 numpy arrays\"\"\"\n return not (A - B).any()\n\ndef test_reduce_pointset_3points():\n A, B, C = np.array([0, 0, 0]), np.array([2.5, 1, 6]), np.array([5, 0, 10])\n res = reduce_pointset([A, B, C], 2)\n assert len(res) == 2\n assert array_equal(A, res[0])\n assert array_equal(C, res[1])\n\ndef test_reduce_pointset_3points_keep():\n A, B, C = np.array([0, 0, 0]), np.array([2.5, 1, 1]), np.array([5, 0, 2])\n res = reduce_pointset([A, B, C], 0.5)\n assert len(res) == 3\n\ndef test_reduce_pointset_16points():\n points = map(np.array, zip(*[range(16) for i in range(3)]))\n points[8][0] = 42\n res = reduce_pointset(points)\n assert len(res) == 5\n\nif __name__ == \"__main__\":\n # Collect tests if not using py.test\n _ = locals()\n is_a_test = lambda x: x.startswith('test_') and '__call__' in dir(_[x])\n for test_name in filter(is_a_test, _.keys()):\n _[test_name]()\n\n","sub_path":"research/triangulation_4/douglaspeucker.py","file_name":"douglaspeucker.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"570784580","text":"import json\n\nf = open(\"WMT.feat\", 'w')\ng = open(\"WMT.label\", 'w')\n\nfor i, line in enumerate(open(\"WMT.json\", \"r\").readlines()):\n o = json.loads(line)\n\n if not o[1]: continue\n\n label, featvec = o\n f.write('{}\\t{}\\n'.format(i, json.dumps(featvec)))\n g.write('{}\\t{}\\n'.format(i, label))\n#end for\n","sub_path":"XYdata/1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"14559120","text":"\nfrom sqlalchemy import exists\n\nfrom data import db_session\nfrom data.users import User\nfrom data.vidiotours import VidioTour\nfrom data.questtours import QuestTour\nfrom data.tests import Test\nimport datetime\n\nERR_EMAIL = -10\nOUT_SUCCESS = 1\n\n\ndef add_user(db_sess, **qwargs) -> User:\n email = qwargs.get(\"email\").lower()\n if email is None or email_exists(db_sess, email):\n return ERR_EMAIL\n # db_sess = db_session.create_session()\n user = User()\n user.name = qwargs.get(\"name\")\n user.surname = qwargs.get(\"surname\")\n user.email = email\n user.set_password(qwargs.get('password'))\n user.status = qwargs.get(\"status\")\n db_sess.add(user)\n db_sess.commit()\n return User\n\n\ndef create_vidiotour(db_sess, **qwargs) -> VidioTour:\n obj = VidioTour()\n\n obj.title = qwargs.get(\"title\")\n obj.description = qwargs.get(\"description\")\n obj.title_image = qwargs.get(\"title_image\")\n obj.resource = qwargs.get(\"resource\")\n\n db_sess.add(obj)\n db_sess.commit()\n return obj\n\n\ndef create_test(db_sess, **qwargs) -> Test:\n obj = Test()\n\n obj.title = qwargs.get(\"title\")\n obj.task = qwargs.get(\"task\")\n obj.quest_tour = qwargs.get(\"quest_tour\")\n\n db_sess.add(obj)\n db_sess.commit()\n return obj\n\n\ndef create_questtour(db_sess, **qwargs) -> QuestTour:\n obj = QuestTour()\n\n obj.title = qwargs.get(\"title\")\n obj.description = qwargs.get(\"description\")\n obj.test = qwargs.get(\"test\")\n\n db_sess.add(obj)\n db_sess.commit()\n return obj\n\n\ndef get_questtours(db_sess):\n tours = []\n for tour in db_sess.query(QuestTour).all():\n tours.append(tour.get_info())\n return tours\n\n\ndef get_vidiotours(db_sess):\n tours = []\n for tour in db_sess.query(VidioTour).all():\n tours.append(tour.get_info())\n return tours\n\n\ndef get_tests(db_sess, tour_id):\n tests = []\n db_tests = db_sess.query(Test).filter(Test.quest_tour_id == tour_id).all()\n for test in db_tests:\n tests.append(test.get_info())\n return tests\n\n\ndef email_exists(db_sess, email: str) -> bool:\n # db_sess = db_session.create_session()\n is_exists = db_sess.query(exists().where(User.email == email)).scalar()\n return is_exists\n\n\n\ndef main():\n db_session.global_init(\"db/gideTMN55.db\")\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"232664103","text":"import requests\n\n\nclass TestProxyIP:\n # 开始测试\n def TestIp(self, proxy_ip):\n try:\n self.StartRequests(proxy_ip)\n return True\n except Exception as e:\n # print(str(e))\n return False\n\n # 谷歌浏览器\n def Headers(self):\n return {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36\"\n }\n\n # 构建代理ip\n def Proxies(self, proxy_ip):\n return {\n \"http\": \"http://\" + proxy_ip,\n \"https\": \"http://\" + proxy_ip,\n }\n\n # 发起请求\n def StartRequests(self, proxy_ip):\n response = requests.get(\n url=\"https://api.coinbtc.us/api/test/ip\",\n headers=self.Headers(),\n proxies=self.Proxies(proxy_ip),\n timeout=3,\n ) # 设置timeout,使响应等待1s\n print(response.json())\n response.close()\n if response.status_code == 200:\n print(proxy_ip, \"\\033[31m可用\\033[0m\")\n # 这里完成程序.............................\n return True\n else:\n print(proxy_ip, \"不可用\")\n return False\n","sub_path":"py3/苹果注册验证码/proxy_ip/TestProxyIP.py","file_name":"TestProxyIP.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"501395069","text":"'''\nRuntime: 56 ms, faster than 6.81% of Python3 online submissions for Most Common Word.\nMemory Usage: 13.7 MB, less than 5.88% of Python3 online submissions for Most Common Word.\n'''\n# https://leetcode.com/problems/most-common-word/submissions/\n\nimport re\nimport operator\n\nclass Solution:\n def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:\n #paragraph = \"a, a, a, a, b,b,b,c, c\"\n #banned = [\"a\"]\n #paragraph.translate(str.maketrans('', '', string.punctuation))\n #paragraph.translate(str.maketrans(''.'', string.punctuation))\n #list1 = paragraph.split(\"[\\\\s\\\\-\\\\.\\\\'\\\\?\\\\,\\\\_\\\\@]+\")\n list1 = re.sub(r'[^\\w\\s]',' ',paragraph)\n list1 = list1.strip().split(' ')\n print(list1)\n dict1 = {}\n for a in list1:\n if a == '':\n continue\n temp = a.lower()\n \n if temp in banned:\n continue\n if temp in dict1.keys():\n dict1[temp] +=1\n else :\n dict1[temp] = 1\n \n sorted_d = sorted(dict1.items(), key=operator.itemgetter(1))\n print(sorted_d)\n return sorted_d[-1][0]\n \n","sub_path":"MostCommonWord.py","file_name":"MostCommonWord.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"642435842","text":"from ConsultCafeApp.models import Business\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.http import QueryDict\nfrom django.utils import timezone\nfrom django.core import serializers\nimport json\nimport logging\n\nlogger = logging.getLogger(\"consultcafe\")\n\ndef plainRouter(request):\n\tif request.method == 'GET':\n\t\treturn query(request)\n\telif request.method == 'POST':\n\t\treturn add(request)\n\telse:\n\t\treturn HttpResponse(status=400)\n\ndef idRouter(request, id):\n\tif request.method == 'PUT':\n\t\treturn edit(request, id)\n\telif request.method == 'DELETE':\n\t\treturn remove(id)\n\telse:\n\t\treturn HttpResponse(status=400)\n\ndef query(request):\n\tbusinesses = Business.objects.all()\n\tbusinesses = serializers.serialize('json', businesses)\n\treturn HttpResponse(businesses, content_type='application/json', status=200)\n\ndef add(request):\n\tdata = QueryDict(request.body)\n\tbusiness = Business(\n\t\t\tname=data['name'],\n\t\t\twage=data['wage'],\n\t\t\tincome=data['income'],\n\t\t\tcreated=timezone.now(),\n\t\t\tupdated=timezone.now()\n\t\t)\n\n\tbusiness.save()\n\treturn HttpResponse(serializers.serialize('json', Business.objects.all().filter(pk=business.name)), status=201)\n\ndef edit(request, id):\n\tbusiness = Business.objects.get(pk=id)\n\n\tif business is None:\n\t\treturn HttpResponse(status=404)\n\n\tdata = QueryDict(request.body)\n\n\tBusiness.objects.filter(pk=id).update(\n\t\tname=data.get('name', business.name),\n\t\twage=data.get('wage', business.wage),\n\t\tincome=data.get('income', business.income),\n\t\tupdated=timezone.now()\n\t)\n\n\treturn HttpResponse(status=200)\n\ndef remove(id):\n\tbusiness = Business.objects.get(pk=id)\n\n\tif business is None:\n\t\treturn HttpResponse(status=404)\n\n\tBusiness.objects.filter(pk=id).delete()\n\n\treturn HttpResponse(status=200)","sub_path":"ConsultCafeApp/api/business.py","file_name":"business.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"308740616","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\n# needed if using pytest (not needed for py.test)\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + \"/../dpkg\")\nimport dpkg\nfrom dpkg import readfile\nimport unittest\nimport pandas as pd\n\n\nclass ReadFileTest(unittest.TestCase):\n \"\"\"Read file test.\"\"\"\n\n # preparing to test\n def setUp(self):\n \"\"\" Setting up for the test \"\"\"\n print(\"ReadFileTest:setUp_:begin\")\n global f\n global n\n global joint_id\n f = \"test_files/tracks_ctr.csv\"\n n = 20\n joint_id = 'Track N'\n print(\"ReadFileTest:setUp_:end\")\n\n # ending the test\n def tearDown(self):\n \"\"\"Cleaning up after the test\"\"\"\n print(\"ReadFileTest:tearDown_:begin\")\n # do something...\n print(\"ReadFileTest:tearDown_:end\")\n\n # test routine import_file\n def test_01_import_file(self):\n \"\"\"Test routine import_file\"\"\"\n print(\"ReadFileTest:test_01_import_file\")\n self.assertTrue(os.path.exists(f))\n global read_file\n read_file = readfile.import_file(f, n)\n print(read_file)\n assert read_file is not None\n\n # test routine group_by_joint_id\n def test_02_group_by_joint_id(self):\n \"\"\"Test routine group_by_joint_id\"\"\"\n global grouped\n grouped = readfile.group_by_joint_id(read_file, joint_id)\n self.assertTrue(grouped is not None)\n self.assertIsInstance(grouped, pd.core.groupby.DataFrameGroupBy)\n print(\"ReadFileTest:test_02_group_by_joint_id\")\n\n # test routine split_in_objs_evnts\n def test_03_split_in_objs_evnts(self):\n \"\"\"Test routine split_in_objs_evnts\"\"\"\n dict_ = readfile.split_in_objs_evnts(joint_id, grouped)\n self.assertTrue(dict_ is not None)\n for key in dict_:\n print(\"key: %s , value: %s\" % (key, dict_[key]))\n print(\"ReadFileTest:test_03_split_in_objs_evnts\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_readfile.py","file_name":"test_readfile.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"227172040","text":"from collections import defaultdict\nn = int(input())\n\nhashmap = defaultdict(int)\nfor i in range(n):\n a,b = map(int,input().split())\n hashmap[a]+=1\n hashmap[b]-=1\n\ntimestamp = sorted(hashmap.keys())\n\npeople = 0\nans = 0\nfor i in timestamp:\n people += hashmap[i]\n ans = max(ans,people)\n\nprint(ans)","sub_path":"Sorting and Searching/Restaurant Customers/solAlt.py","file_name":"solAlt.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"14596373","text":"import json\nimport os\nimport re\nfrom urllib import request\nimport scrapy\nfrom scrapy import Request\nfrom baiduimage.items import BaiduimageItem\n\n# '''\n# D:\\My Documents\\Desktop\\百度图片项目\\baiduimage 1\\baiduimage\\spiders\\name2.txt\n# D:\\My Documents\\Desktop\\百度图片项目\\baiduimage 1\\baiduimage\\spiders\\image.py\n# '''\nclass ImageSpider(scrapy.Spider):\n name = 'image1'\n allowed_domains = ['baidu.com']\n img_urls = []\n pn=1\n def start_requests(self):\n os.chdir('D:/My Documents/Desktop/百度图片项目/baiduimage 1/baiduimage/spiders')\n #读取人名\n with open('name2.txt', 'r', encoding='UTF-8') as i:\n search = i.readlines()\n #防止出现阻塞切片(属于浅拷贝)读取\n for j in search[2500:3000]:\n base_url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + j.strip() + '&cg=girl&pn=' + str(\n self.pn) + '&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'\n yield Request(url=base_url, meta={'name': j})\n def parse(self, response):\n item = BaiduimageItem()\n rsp = response.text\n data_list = json.loads(rsp)\n item['name'] =request.unquote(re.search(r'word=(.*?)&',response.url).group(1)).strip()\n for num, i in enumerate(data_list['imgs']):\n # item['img_url'] =i['objURL']\n self.img_urls.append(i['objURL'])\n item['img_urls']=self.img_urls\n yield item\n next_url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + item['name'] + '&cg=girl&pn=61&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'\n yield Request(next_url, callback=self.parse)\n\n\n\n","sub_path":"baiduimage/spiders/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"486018887","text":"# LargeSmall.py - This program calculates the largest and smallest of three integer values. \n# Declare and initialize variables here\na = -50;\nb = 85;\nc = 79;\n\n\nlargest = 0\nsmallest = 0\n# Write assignment, if, or if else statements here as appropriate\n\n# if c > b:\n# largest = c\n# elif c > a :\n# largest = c\n# elif b > c:\n# largest = b\n# elif b > a:\n# largest = b\n# elif a > c:\n# largest = a\n# elif a > b:\n# largest = a\n\n# if a < b:\n# smallest = a\n# elif a < c :\n# smallest = a\n# elif b < a:\n# smallest = b\n# elif b < c:\n# smallest = b\n# elif c < a:\n# smallest = a\n# elif c < b:\n# smallest = a\ndef comparison(a,b,c): \n if a= (len(astr) - 1): break\n amount += 1\n # print(type(BufInt))\n # print(BufInt)\n # print()\n for i in range(int(BufInt)):\n Buf = Buf + astr[j]\n i += 1\n j = amount\n\n# print(astr[1])\n# print(type(int(astr[1])))\n\nBuf = Buf + \"\\n\"\noutput.write(Buf)\n\noutput.close()\n#\n#\n# S = {0,1,2,3,4,5,6,7,8,9}\n# p = int(2)\n# res = p in S\n# print(res)\n","sub_path":"python/codingliter.py","file_name":"codingliter.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"416816085","text":"import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torchvision import models\r\nfrom .base_model import BaseModel\r\nfrom . import networks\r\n\r\n\r\nclass Classifier(BaseModel):\r\n def initialize(self, opt):\r\n BaseModel.initialize(self, opt)\r\n h_size = opt.crop_size // 2**5\r\n\r\n if opt.model == 'VGG16':\r\n self.network = networks.VGG16(opt.crop_size, opt.n_class)\r\n elif opt.model == 'ResNet18':\r\n self.network = networks.ResNet18(opt.crop_size, opt.n_class)\r\n else:\r\n raise NotImplementedError('model [{}] is not implemented'.format(opt.model))\r\n\r\n if opt.isTrain:\r\n self.optimizer = torch.optim.Adam([p for p in self.network.parameters() if p.requires_grad],\r\n lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay)\r\n self.criterion = nn.CrossEntropyLoss()\r\n\r\n def set_variables(self, data):\r\n self.image = data['image'].cuda()\r\n self.label = data['label'].cuda()\r\n\r\n def optimize_parameters(self):\r\n self.optimizer.zero_grad()\r\n logit = self.network(self.image)\r\n self.loss = self.criterion(logit, self.label)\r\n self.loss.backward()\r\n self.optimizer.step()\r\n\r\n def inference(self):\r\n with torch.no_grad():\r\n logit = self.network(self.image)\r\n pred = torch.max(logit.cpu(), dim=1)[1]\r\n return logit, pred\r\n","sub_path":"classifier/models/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"391027778","text":"import hashlib\nimport sys\nimport re\nimport os\nfrom collections import defaultdict\n\n# GET FILE TUPLE: (path, hash, size)\ndef get_file_tuple(path):\n \"\"\"Calculates the hash value for a file.\"\"\"\n hash_object = hashlib.sha256()\n file_size = os.path.getsize(path)\n with open(path, 'r') as inputfile:\n #for chunk in iter(lambda:inputfile.read(1024 * 8), \"\"):\n hash_object.update(inputfile.read().encode())\n inputfile.close()\n return (path, (hash_object.hexdigest(), file_size))\n\n\n#print(get_file_tuple(sys.argv[1]))\n#print(get_file_tuple(sys.argv[1])[0])\n#print(get_file_tuple(sys.argv[1])[1])\n\n# GENERATE THE LIST OF EXTRACTED TEXT PATHS\ndef txt_generator(targeted_root):\n txt_paths = []\n for root, dirs, files in os.walk(targeted_root):\n for name in files:\n txt_paths.append(os.path.join(root, name))\n return txt_paths\n\ndef getkey(item):\n return item[1][0]\n\n# SORT THE FILES TUPLES \ndef sort_tuples(paths):\n tuples = []\n for p, path in enumerate(paths):\n tuples.append(get_file_tuple(path))\n return sorted(tuples, key=getkey)\n\ndef duplicated_files(tuples):\n dup = defaultdict(list) \n unique = [tuples[0][0]]\n i = 0\n elem = tuples[0][0]\n while (i < len(tuples) - 1):\n if(tuples[i][1] == tuples[i+1][1]):\n dup[elem].append(tuples[i+1][0])\n else:\n elem = tuples[i+1][0]\n unique.append(elem)\n i += 1\n return (unique, dup) \n\ninfos = duplicated_files(sort_tuples(txt_generator(sys.argv[1]))) \nunique_files = infos[0]\nduplicated = dict(infos[1])\n#print(unique_files)\n#print(duplicated.keys())\n\nwith open('./unique_files', 'w') as unique:\n for i, name in enumerate(unique_files):\n unique.write('{}\\n'.format(name))\n unique.close()\n\nwith open('./duplicate_files', 'w') as duplicate:\n for i in duplicated:\n #print('{}: {}\\n'.format(i, duplicated[i]))\n duplicate.write('{}: {}\\n'.format(i, duplicated[i]))\n duplicate.close()\n","sub_path":"PREPROCESSING/DATA_MANAGER/duplicate_detection/hash.py","file_name":"hash.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"180504216","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, with_statement, division\n\n\"\"\"\ncosi299a- Cinderella\nalexluu@brandeis.edu\n\nDemo: (Cache size: 2)\n>>> \nEND OF FILE: chapter_01.txt\nCache size: 1\n0 \t {}\n0 \t {}\n1 \t {'p': [(1.0, 0, 'p')]}\n1 \t {'p': [(1.0, 0, 'p')]}\n2 \t {'t': [(1.0, 1, 'p')]}\n2 \t {'t': [(1.0, 1, 'p')]}\n3 \t {'p': [(1.0, 1, 'a')], 'b': [(1.0, 1, 'b2')], 'b2': [(1.0, 0, 'b2')]}\n3 \t {'p': [(1.0, 1, 'a')], 'b': [(1.0, 1, 'b2')], 'b2': [(1.0, 0, 'b2')]}\n4 \t {'t': [(1.0, 3, 'b')]}\n4 \t {'t': [(1.0, 3, 'b')]}\n5 \t {'i': [(1.0, 0, 'i')]}\n5 \t {'i': [(1.0, 0, 'i')]}\n6 \t {'i': [(1.0, 5, 'i')]}\n6 \t {'i': [(1.0, 5, 'i')], 't': [(1.0, 2, 't')]}\n7 \t {'p': [(1.0, 6, 't')]}\n7 \t {'p': [(1.0, 6, 't')]}\n8 \t {'i': [(1.0, 7, 'p')], 't2': [(1.0, 7, 'p')], 'i2': [(1.0, 7, 'i')], 'm': [(1.0, 7, 'p')]}\n8 \t {'i': [(1.0, 7, 'p')], 'm': [(1.0, 7, 'p')], 'i2': [(1.0, 7, 'i')], 't2': [(1.0, 7, 'p')]}\n9 \t {'t': [(1.0, 8, 'g')]}\n9 \t {'t': [(1.0, 8, 'g')]}\n10 \t {}\n10 \t {}\n11 \t {'i': [(1.0, 8, 'i2')], 'h': [(1.0, 10, 'h')], 't': [(1.0, 8, 't2')]}\n11 \t {'i': [(1.0, 8, 'i2')], 'h': [(1.0, 10, 'h')], 't': [(1.0, 8, 't2')]}\n12 \t {'i': [(1.0, 11, 't')], 'b2': [(1.0, 4, 't')]}\n12 \t {'i': [(1.0, 11, 't')], 'b2': [(1.0, 4, 't')]}\n13 \t {'i': [(1.0, 11, 'i')], 'i2': [(1.0, 12, 'i')], 'g': [(1.0, 9, 't')]}\n13 \t {'i': [(1.0, 11, 'i')], 'i2': [(1.0, 12, 'i')], 'i4': [(1.0, 12, 'i')], 'g': [(1.0, 9, 't')]}\n14 \t {'t': [(1.0, 13, 'g')]}\n14 \t {'t': [(1.0, 13, 'g')]}\n15 \t {'i': [(1.0, 13, 'i')], 'p': [(1.0, 13, 'p2')], 't3': [(1.0, 13, 'p2')], 'g': [(1.0, 14, 't')]}\n15 \t {'i': [(1.0, 13, 'i')], 'p': [(1.0, 13, 'p2')], 't3': [(1.0, 13, 'i4')], 'g': [(1.0, 14, 't')]}\n16 \t {'i': [(1.0, 15, 'i')]}\n16 \t {'i': [(1.0, 15, 'i')]}\n17 \t {'i': [(1.0, 16, 'i')], 'p': [(1.0, 13, 'i2')], 'p2': [(1.0, 15, 'p')]}\n17 \t {'i': [(1.0, 16, 'i')], 'p2': [(1.0, 15, 'p')], 'p': [(1.0, 13, 'p2')]}\n18 \t {'g': [(1.0, 15, 'g')]}\n18 \t {'g': [(1.0, 15, 'g')]}\n19 \t {'i': [(1.0, 17, 'i')]}\n19 \t {'i': [(1.0, 17, 'i')]}\n20 \t {'i': [(1.0, 19, 'i')], 'g': [(1.0, 15, 'g2')]}\n20 \t {'i': [(1.0, 19, 'i')], 'g': [(1.0, 15, 'g2')]}\n21 \t {'i': [(1.0, 20, 'i')]}\n21 \t {'i': [(1.0, 20, 'i')]}\n22 \t {}\n22 \t {}\n23 \t {'i': [(1.0, 21, 'i')]}\n23 \t {'i': [(1.0, 21, 'i')]}\n24 \t {'i': [(1.0, 23, 'i')]}\n24 \t {'i': [(1.0, 23, 'i')]}\n25 \t {'i': [(1.0, 24, 'i')], 't': [(1.0, 24, 'g')]}\n25 \t {'i': [(1.0, 24, 'i')]}\n26 \t {'i2': [(1.0, 25, 'i')]}\n26 \t {'i2': [(1.0, 25, 'i')]}\n27 \t {'i': [(1.0, 26, 'i2')], 'p2': [(1.0, 17, 'p')], 't3': [(1.0, 26, 't2')]}\n27 \t {'i': [(1.0, 26, 'i2')], 'p': [(1.0, 23, 'p')], 'p2': [(1.0, 17, 'p')], 't3': [(1.0, 26, 't2')]}\n28 \t {'i': [(1.0, 27, 'i')], 'p': [(1.0, 27, 'p')]}\n28 \t {'i': [(1.0, 27, 'i')], 'p': [(1.0, 27, 'p')]}\n29 \t {'h2': [(1.0, 11, 'h')], 'h': [(1.0, 28, 'p')], 's2': [(1.0, 28, 'p')]}\n29 \t {'h2': [(1.0, 11, 'h')], 's2': [(1.0, 28, 'p')], 'h': [(1.0, 28, 'p')]}\n30 \t {'i': [(1.0, 28, 'i')], 'p': [(1.0, 28, 'p')], 'b': [(1.0, 15, 'b2')], 'f': [(1.0, 5, 'j')]}\n30 \t {'i': [(1.0, 28, 'i')], 'p': [(0.5, 29, 's2'), (0.5, 29, 'h')], 'b': [(1.0, 15, 'b2')], 'f': [(1.0, 5, 'j')]}\n31 \t {'i': [(1.0, 30, 'i')]}\n31 \t {'i': [(1.0, 30, 'i')]}\n32 \t {'i': [(1.0, 31, 'i')], 'h': [(1.0, 31, 'h')]}\n32 \t {'i': [(1.0, 31, 'i')], 'h': [(1.0, 31, 'h')]}\n33 \t {'g': [(1.0, 32, 'h')], 'm2': [(1.0, 32, 'i')]}\n33 \t {'g': [(1.0, 24, 'g')]}\n58.0 59.0 52.0\nR: 0.8966\tP: 0.8814\tF1: 0.8889\n\n\nNumber of nodes: 306\nNumber of edges: 310\nNumber of ancas: 100\nNumber of coref links: 59\n>>> \n\n>>>\n0 \t [(12.0, 'i'), (6.0, 'p')]\n1 \t [(8.0, 'p'), (5.0, 'b2'), (4.0, 'a')]\n2 \t [(6.333333333333333, 't2'), (4.5, 't')]\n3 \t [(10.666666666666666, 'b'), (8.0, 'b2'), (6.666666666666666, 'p')]\n4 \t [(20.666666666666668, 't')]\n5 \t [(10.0, 'i'), (6.0, 'a')]\n6 \t [(20.666666666666668, 'i'), (4.5, 't')]\n7 \t [(8.0, 'p')]\n8 \t [(12.666666666666666, 'i2'), (8.666666666666666, 'i'), (7.333333333333332, 'g'), (6.5, 't2'), (4.666666666666666, 'm')]\n9 \t [(8.666666666666666, 't')]\n10 \t [(8.0, 'o'), (8.0, 'h')]\n11 \t [(8.5, 't'), (8.0, 'i'), (6.0, 'h')]\n12 \t [(10.0, 'i'), (7.0, 'b2'), (4.0, 'e')]\n13 \t [(12.0, 'i'), (9.333333333333334, 'g'), (5.333333333333333, 'i4'), (5.142857142857142, 'i2'), (4.0, 'p2'), (4.0, 'i3')]\n14 \t [(10.0, 't')]\n15 \t [(22.666666666666668, 'i'), (10.666666666666666, 'g'), (6.666666666666666, 'p'), (3.833333333333333, 't3'), (2.6666666666666665, 'a2'), (2.4761904761904763, 'h'), (2.4761904761904763, 'g3'), (2.4761904761904763, 'g2'), (2.4761904761904763, 'a5')]\n16 \t [(10.666666666666666, 'i'), (4.666666666666666, 'c'), (3.0, 'p')]\n17 \t [(8.0, 'i'), (4.0, 'p2'), (4.0, 'p')]\n18 \t [(8.0, 'g'), (6.0, 'c'), (4.666666666666666, 'a3'), (4.0, 't2'), (4.0, 't')]\n19 \t [(16.0, 'i'), (4.0, 'p'), (3.6, 'a3')]\n20 \t [(12.666666666666666, 'i'), (4.666666666666666, 'g')]\n21 \t [(12.666666666666666, 'i'), (4.666666666666666, 'c'), (4.0, 's')]\n22 \t [(6.666666666666666, 'o'), (6.0, 'k'), (4.0, 'l')]\n23 \t [(10.0, 'i'), (6.5, 'p'), (6.0, 'm3')]\n24 \t [(10.0, 'i')]\n25 \t [(10.0, 'i'), (8.0, 't'), (4.0, 'a')]\n26 \t [(7.6, 'i2'), (5.166666666666666, 't3')]\n27 \t [(19.333333333333336, 'i'), (7.5, 'p'), (4.933333333333334, 't3'), (4.0, 'p2')]\n28 \t [(14.0, 'i'), (6.0, 'p')]\n29 \t [(8.0, 's2'), (8.0, 'h'), (4.666666666666666, 'h2')]\n30 \t [(10.0, 'i'), (5.333333333333333, 'p'), (4.666666666666666, 's'), (4.666666666666666, 'f'), (4.666666666666666, 'b')]\n31 \t [(12.0, 'i'), (5.333333333333333, 'l'), (5.0, 'd')]\n32 \t [(10.0, 'i'), (7.333333333333333, 'h'), (4.666666666666666, 'p'), (4.666666666666666, 'n2'), (4.666666666666666, 'g'), (4.666666666666666, 'b')]\n33 \t [(8.666666666666666, 'g'), (4.333333333333333, 'm2')]\n>>>\n|| no givenness in salience factors\n|||| cache size: 1 -> R: 0.8966\tP: 0.8814\tF1: 0.8889\n|||| cache size: 3 -> R: 0.8966\tP: 0.8525\tF1: 0.8739\n|||| cache size: 4 -> R: 0.8966\tP: 0.8525\tF1: 0.8739\n\n1st and 2nd pronoun handling\nantecas <-> 'conjunction'\nedit semantic_match()\nmore for PREDICATE_NOUNS -> abandon\n3 \t {..., 'b2': (1, set(['p']))} ->(0, set(['b2']))\nvs\n8 \t {'..., 'm': (7, set(['p'])), ...}\n||and other semantic cases\nadd 'one' -> pronouns (in constant.py and pronouns.txt) ||22.o\nremove deixis in ancas and antecas\n3rd pronouns handling\nupdate coref info\n||previous stable version: 104\ncoref nodes handling\nMUC\ncorrect rank_ancas for the case of conjunction\n\"\"\"\n\nfrom constants import *\nfrom semaland_utils import *\nfrom semaland_amr_graph import *\nfrom semaland_semantic_features import *\nfrom semaland_string_features import *\nimport networkx as nx\nimport re\nfrom collections import defaultdict\nimport xml.etree.ElementTree as ET\nfrom operator import itemgetter\nfrom copy import deepcopy\n\npros = dict()\nfor line in read_lines('Lists/pronouns.txt'):\n tokens = line.split()\n pro = dict()\n pro['num']=tokens[1]\n pro['gen']=tokens[2]\n pro['per']=tokens[3]\n pros[tokens[0]]=pro\n\ndef is_pronominal(g,n):\n \"\"\" 1/0 (True/False) \"\"\"\n if g.node[n]['content'].ful_name_ in pros:\n return 1 # True\n return 0 # False\n\ndef is_pronominal_quote(g,n): # 1st and 2nd person pronouns\n \"\"\" 1/0 (True/False) \"\"\"\n if g.node[n]['content'].ful_name_ in pros and \\\n pros[g.node[n]['content'].ful_name_]['num'] in {1,2}:\n return 1 # True\n return 0 # False\n\ndef is_given(g,n): # node having at least one antecedent <- simplest\n \"\"\" 1/0 (True/False) \"\"\"\n## if 'coref' in g.node[n]:\n## return 1 # True\n return 0 # False \n\ndef classify_node(g,n): # g: AMR graph, n: AMR node\n \"\"\" -> '@', event, special concept, pronoun, conjunction, constant or other \"\"\"\n if n=='@':\n return n\n node_concept = g.node[n]['content'].ful_name_\n if re.match(r'\\S+-\\d+',node_concept):\n return 'event' # node with sense tag\n if node_concept in AMR_SPECIAL_CONCEPTS:\n return 'special'\n if node_concept in NOMINATIVE_PRONOUNS:\n return 'pronoun'\n if node_concept in AMR_CONJUNCTIONS:\n return 'conjunction'\n if node_concept in DEIXIS:\n return 'deixis'\n if node_concept=='':\n return 'constant'\n return 'other' # named entities and abstract concepts???\n\ndef get_antecas(g): #antecas: antecedent candidates\n \"\"\" remove '@', events, special concepts, and constants \"\"\"\n return set(n for n in g if classify_node(g,n) not in\n {'@','event','special','constant','conjunction','deixis'})\n\n# assumption: the maximum number of arguments of a predicate < 10\nl_re_in = r'^:ARG\\d$' # label pattern of core role in-edge\nl_re_out = r'^:ARG\\d-of$' # label pattern of core role out-edge\n\ndef get_ancas(g): # g: AMR graph\n \"\"\" core roles or root node without sense tag \"\"\"\n output = set()\n ns = set(n for n in g if classify_node(g,n) not in\n {'@','event','special','constant','deixis'})\n #for n in get_antecas(g):\n for n in ns:\n # root node, :ARGX, or :ARGX-of\n if '@' in g.predecessors(n) or \\\n check_edge_label(g,n,'in',l_re_in) or \\\n check_edge_label(g,n,'out',l_re_out): \n output.add(n)\n for n in output:\n if is_pronominal_quote(g,n):\n output.remove(n)\n return output\n\ndef update_frequency(g,n,e_di,l_re): #assumption: existence of edges\n \"\"\" -> number of instances of each core role type a node plays \"\"\"\n #output = defaultdict(int)\n output = dict()\n for e in get_edges_by_direction(g,n,e_di):\n if 'label' in e[2] and re.match(l_re,e[2]['label']):\n arg_index = e[2]['label'][4]\n if arg_index in output:\n output[arg_index] += 1\n else:\n output[arg_index] = 1\n return output\n\ndef update_frequency_ext(g,n,e_di,l_re,conj): # extended for conjunction case\n \"\"\" ... \"\"\"\n if conj:\n output = dict()\n freq_n = update_frequency(g,n,e_di,l_re)\n keys_n = set(freq_n.keys())\n freq_conj = update_frequency(g,conj,e_di,l_re)\n keys_conj = set(freq_conj.keys())\n for i in keys_n.intersection(keys_conj):\n output[i] = freq_n[i] + freq_conj[i]\n for i in keys_n.difference(keys_conj):\n output[i] = freq_n[i]\n for i in keys_conj.difference(keys_n):\n output[i] = freq_conj[i]\n return output\n \n return update_frequency(g,n,e_di,l_re)\n\n#def get_salience_factors(g,n):\ndef get_salience_factors(g,n,conj=None):\n # g: AMR graph, n: AMR node\n \"\"\" distance from graph root, core roles, pronominalization, giveness \"\"\"\n output = dict()\n output['dis'] = nx.shortest_path_length(g,'@',n)\n #output['in'] = update_frequency(g,n,'in',l_re_in)\n output['in'] = update_frequency_ext(g,n,'in',l_re_in,conj)\n #output['out'] = update_frequency(g,n,'out',l_re_out)\n output['out'] = update_frequency_ext(g,n,'out',l_re_out,conj)\n output['pro'] = is_pronominal(g,n)\n output['giv'] = is_given(g,n)\n return output \n\ndef score_core_roles(s_factors,e_di):\n \"\"\" smaller index, better score \"\"\"\n output = float()\n for k in s_factors[e_di]:\n output += s_factors[e_di][k]/(int(k) + 1)\n return output\n\n# SALIENCE formula\ndef salience(s_factors,w_dis,w_in,w_out,w_pro,w_giv): # recency excluded -> cache model\n \"\"\" -> salience score \"\"\"\n output = sum([\n w_dis/s_factors['dis'],\n w_in*score_core_roles(s_factors,'in'),\n w_out*score_core_roles(s_factors,'out'),\n w_pro*s_factors['pro'],\n w_giv*s_factors['giv'],\n ])\n return output\n\n\n#coreference chains and links\ndef update_coref_chains(text,i,ni,j,nj,wij):\n # text: list of AMR graphs\n # i/j: index of AMR graph containing anaphor ni/ antecedent nj\n # wij: weight of coreference link between ni and nj\n \"\"\" -> updated AMR text with coreference information \"\"\"\n if 'coref' not in text[i].node[ni]:\n text[i].node[ni]['coref'] = list() # cannot be set()\n # because components' type is 'list' (unhashable)\n coref_link = (wij,j,nj)\n if 'coref' in text[j].node[nj]:\n for chain in text[j].node[nj]['coref']:\n chain.insert(0,coref_link)\n text[i].node[ni]['coref'].append(chain)\n else:\n text[i].node[ni]['coref'].append([coref_link])\n \ndef update_coref_links(link_dict,i,ni,j,nj,wij):\n # link_dict: default dict of resolved coreference links\n # i/j: index of AMR graph containing anaphor ni/ antecedent nj\n # wij: weight of coreference connection between ni and nj\n \"\"\" -> updated AMR text with coreference information \"\"\"\n coref_link = (wij,j,nj)\n if ni in link_dict[i]:\n link_dict[i][ni].append(coref_link)\n else:\n link_dict[i][ni] = [coref_link]\n\n\ndef define_cache(text,cache_size,i):\n # text: list of AMR graphs, i: index of current AMR graph\n \"\"\" -> lists of indexes of graphs in / out of cache\"\"\"\n if i>0:\n if i>cache_size:\n cache = [(i-x-1) for x in range(cache_size)]\n non_cache = range(i-cache_size-1,-1,-1)\n else:\n cache = range(i-1,-1,-1)\n non_cache = None\n return cache, non_cache\n return None\n \ndef rank_ancas(g):\n \"\"\" \"\"\"\n output = dict()\n ancas = get_ancas(g)\n s_factors = dict()\n for n in ancas:\n if classify_node(g,n)!='conjunction':\n output[n] = salience(get_salience_factors(g,n),\n 8,4,1,2,1)\n else:\n temp = set()\n for nn in g.node[n]['content'].next_:\n if re.match(l_re_op,nn.edge_label_) and \\\n (classify_node(g,nn.name_) not in \\\n {'event','special','constant'}):\n temp.add(nn.name_)\n for nnn in temp:\n #output[nnn] = salience(get_salience_factors(g,nnn),\n output[nnn] = salience(get_salience_factors(g,nnn,n),\n 8,4,1,2,1)\n return output\n\ndef new_concept(node):# node: node structure of amr_reader\n \"\"\" -> 'sure'/'unsure' \"\"\"\n for next_node in node.next_:\n if (next_node.edge_label_==':mod' and \\\n next_node.ful_name_ in {'another','any','some'}) or \\\n (next_node.edge_label_==':quant' and \\\n (next_node.ful_name_ in {'many','much','lot','few','little'} or \\\n alnum_is(next_node.ful_name_)=='d')):\n return 'sure'\n return 'unsure'\n\ndef ne_constraint(n1,n2): # ne: named entity\n \"\"\" -> passed/failed \"\"\"\n if n1.is_entity_ and n2.is_entity_ and \\\n n1.entity_name_!=n2.entity_name_:# <-> string match\n return 'failed'\n return 'passed'\n \ndef string_match(anca_node,text,i,m_function,m_value,link_dict):\n # m_function/m_value: matching function/value\n \"\"\" -> set of antecas string-matching anca \"\"\"\n output = set()\n if new_concept(anca_node)=='sure' or \\\n anca_node.ful_name_=='thing': # anca_node.ful_name_ in PREDICATE_NOUNS\n return output\n g = text[i]\n ranked_ancas = rank_ancas(g)\n antecas = get_antecas(g)\n for n in antecas:\n n_node = g.node[n]['content'] \n n_str = n_node.ful_name_\n if n_str in {'he','she','it','they'}:\n for cn in get_coref_nodes(text,i,n,link_dict):\n cn_node = text[cn[0]].node[cn[1]]['content']\n cn_str = cn_node.ful_name_\n if cn_str!=n_str: # not pronominal\n n_str = cn_str\n n_node = cn_node\n break \n if m_function(anca_node.ful_name_,n_str)==m_value and \\\n ne_constraint(anca_node,n_node)=='passed':\n output.add(n)\n #if any((n in ancas) for n in matched):\n if output.intersection(ranked_ancas):\n output = output.intersection(ranked_ancas)\n if len(output)>1:\n highest = max(ranked_ancas[n] for n in output)\n output = set(n for n in output if ranked_ancas[n]==highest)\n return output\n\ndef semantics_of_node(node):\n \"\"\" -> set of node name(s) and the corresponding POS\"\"\"\n output = set()\n #if node.ful_name_ not in PREDICATE_NOUNS:\n if node.ful_name_!='thing':\n pos = POS_SET\n output.add(node.ful_name_)\n else:\n pos = {'v'}\n for next_node in node.next_:\n if re.match(l_re_out,next_node.edge_label_):\n## if (node.ful_name_=='thing' and \\\n## next_node.edge_label_==':ARG1-of') or \\\n## (node.ful_name_!='thing' and \\\n## next_node.edge_label_==':ARG0-of'):\n output.add(next_node.ful_name_.split('-')[0]) # event\n return output.difference(NOMINATIVE_PRONOUNS),pos\n\ndef get_sense_index_sum(pair): # pair: output of get_best_synset_pair()\n \"\"\" -> sum of sense indexes of two synsets in the pair \"\"\"\n output = int()\n for s in pair[0]:\n output += int(s.name().split('.')[-1])\n return output \n\ndef semantic_match(anca_node,text,i,\n threshold,delta,sem_depth,sense_index_sum,\n link_dict):\n # anca_node: node structure of amr_reader\n \"\"\" -> set of node names \"\"\"\n output = set()\n if new_concept(anca_node)=='sure':\n return output\n words1,pos1 = semantics_of_node(anca_node)\n words2,pos2 = set(),set()\n g = text[i]\n ranked_ancas = rank_ancas(g)\n antecas = get_antecas(g)\n for n in antecas:\n n_node = text[i].node[n]['content']\n if n_node.ful_name_ in {'he','she','it','they'}:\n for cn in get_coref_nodes(text,i,n,link_dict):\n cn_node = text[cn[0]].node[cn[1]]['content']\n if cn_node.ful_name_!=n_node.ful_name_: # not pronominal\n words2,pos2 = semantics_of_node(cn_node)\n break\n else: \n words2,pos2 = semantics_of_node(n_node)\n for w1 in words1:\n for w2 in words2:\n## if gender_constraint(w1,w2)=='passed' and \\\n## animacy_constraint(w1,w2)=='passed':\n if gender_constraint(w1,w2)=='passed' or \\\n animacy_constraint(w1,w2)=='passed':\n semantic_pair = get_best_synset_pair(w1,w2,pos1,pos2)\n temp_sim = threshold # lch_similarity\n temp_dep = 3 # min_depth <- hard code\n temp_sum = sense_index_sum\n if pos1=={'v'} or pos2=={'v'} or \\\n (semantic_pair and semantic_pair[3] and \\\n semantic_pair[3]>sem_depth):\n temp_sim = threshold - delta\n temp_dep = 0 # <- hard code\n if pos1=={'v'} or pos2=={'v'} or \\\n animacy(w1).intersection(w2)!=set():\n temp_sum = sense_index_sum + 6 # bonus <- hard code\n if semantic_pair and semantic_pair[1]>=temp_sim and \\\n semantic_pair[3] and semantic_pair[3]>temp_dep and \\\n ne_constraint(anca_node,n_node)=='passed' and \\\n get_sense_index_sum(semantic_pair)<=temp_sum:\n output.add((semantic_pair[1],n_node.name_))\n if output:\n best_sem_sim = max(output)[0]\n output = set(e[1] for e in output if e[0]==best_sem_sim)\n if output.intersection(ranked_ancas):\n output = output.intersection(ranked_ancas)\n if len(output)>1:\n highest = max(ranked_ancas[n] for n in output)\n output = set(n for n in output if ranked_ancas[n]==highest)\n return output\n\ndef resolve_pro(p_node,g,threshold): # p_node: node of 3rd pronoun\n \"\"\" -> set of node names \"\"\"\n ranked_ancas = rank_ancas(g)\n temp = [(n,r) for (n,r) in sorted(ranked_ancas.items(),\n key=itemgetter(1),reverse=True)\n if r>=threshold and \\\n not((g.node[n]['content'].ful_name_ in \\\n NOMINATIVE_PRONOUNS.difference({'one'})) and \\\n g.node[n]['content'].ful_name_!=p_node.ful_name_) and \\\n gender_constraint_pro(p_node.ful_name_,\n g.node[n]['content'].ful_name_)=='passed' and \\\n animacy_constraint_pro(p_node.ful_name_,\n g.node[n]['content'].ful_name_)=='passed']\n if not temp:\n output = set(n for n in get_antecas(g).difference(ranked_ancas)\n if g.node[n]['content'].ful_name_==p_node.ful_name_)\n return output\n output = set(n for n,r in temp if r==temp[0][1])\n return output\n\ndef get_coref_nodes(text,i,n,link_dict):\n \"\"\" recursive function to get all nodes co-referred with n of text[i] \"\"\"\n output = list()\n if (i in link_dict) and \\\n (n in link_dict[i]) and \\\n len(link_dict[i][n])==1:\n ante = link_dict[i][n][0][1:]\n output.append(ante)\n output.extend(get_coref_nodes(text,ante[0],ante[1],link_dict))\n return output\n\ndef resolve_anca(n,i,text,cache_info,link_dict):\n \"\"\" -> set of matched nodes \"\"\"\n matched = set()\n threshold =[2.7,3.5]\n sem_depth = [5,3]\n sense_index_sum = [2,4]\n n_node = text[i].node[n]['content']\n if classify_node(text[i],n)=='other': \n for p in range(len(cache_info)):\n if cache_info[p]: # for non_cache partition\n for j in cache_info[p]:\n matched = string_match(n_node,text,j,\n truecase_matching_is,'f',link_dict)\n if matched:\n matched = (j,matched)\n break\n else:\n matched = semantic_match(n_node,text,j,\n threshold[p],0.6,sem_depth[p],\n sense_index_sum[p],link_dict)\n if matched:\n matched = (j,matched)\n break\n if matched:\n break\n elif n_node.ful_name_ in {'i','we','you',\"y'all\"}:\n for j in range(i-1,-1,-1):\n matched = string_match(n_node,text,j,\n truecase_matching_is,'f',link_dict)\n if matched:\n matched = (j,matched)\n break\n elif n_node.ful_name_ in {'he','she','it','they'}:\n for j in cache_info[0]:\n matched = resolve_pro(n_node,text[j],6.0)\n if matched:\n matched = (j,matched)\n break \n return matched\n\nl_re_op = r'^:op\\d+$'\n#def resolve_ancas_graph(i,text,cache_size):\ndef resolve_ancas_graph(i,text,cache_size,link_dict):\n \"\"\" -> dict whose keys are ancas and whose values are sets of antecas\"\"\"\n output = dict()\n cache_info = define_cache(text,cache_size,i)\n if cache_info:\n ranked_ancas = [n for n,_ in sorted(rank_ancas(text[i]).items(),\n key=itemgetter(1),\n reverse=True)]\n for n in ranked_ancas:\n output[n] = resolve_anca(n,i,text,cache_info,link_dict)\n if output[n]:\n w = 1/(len(output[n][1])) # weight of coref link\n for m in output[n][1]:\n update_coref_chains(text,i,n,output[n][0],m,w) # <- this change rank values??? (<-> giveness???)\n update_coref_links(link_dict,i,n,output[n][0],m,w)\n return output \n \ndef resolve_ancas_text(text,cache_size):\n \"\"\" -> output (legacy) and link dict (key info) \"\"\"\n output = dict()\n link_dict = dict()\n for i in range(len(text)):\n link_dict[i] = dict()\n output[i] = resolve_ancas_graph(i,text,cache_size,link_dict)\n return output,link_dict\n\ndef get_gold(f): # f: xml file of anaphora resolution annotation\n \"\"\" -> gold link dict \"\"\"\n output = dict()\n tree = ET.parse(f)\n for (i,amr) in enumerate(tree.findall('./sntamr/amr')[1:]):\n output[i] = dict()\n if 'ana' in amr.attrib:\n for link in amr.attrib['ana'].split():\n n,jm = link.split(':')\n j,m = jm.split('.')\n output[i][n] = [(1.0,int(j)-2,m)]\n return output\n\ndef muc(gold_dict,test_dict):\n \"\"\" -> MUC F-measure \"\"\"\n gold = float()\n test = float()\n correct = float()\n for i in gold_dict:\n gold += len(gold_dict[i])\n test += len(test_dict[i])\n for n in test_dict[i]:\n if n in gold_dict[i]:\n for l in test_dict[i][n]:\n if l[1:]==gold_dict[i][n][0][1:]:\n correct += l[0]\n print(gold,test,correct)\n r = correct/gold # recall\n p = correct/test # precision\n f1 = 2*r*p/(r+p)\n return r,p,f1\n \n\ndef main():\n \"\"\" \"\"\"\n pass\n\nif __name__ == \"__main__\":\n #main()\n amr_table = get_amr_table_path(DATA_AMR_LPP_01)\n docid = 'lpp_1943'\n doc = amr_table[docid]\n annotated_doc = ET.parse(DATA_AMR_ANNOTATED_LPP_01)\n quote_info = [('quote' in amr.attrib)\n for amr in annotated_doc.findall('./sntamr/amr')]\n text = [AMRGraph(sen=doc[k],quote=quote_info[k-1]) # doc index starting at 1\n for k in sorted(doc.keys())]\n text[4].add_edge('c2','b',label=':ARG0')\n text[4].add_edge('c2','p',label=':ARG1')\n # http://www.nltk.org/book/ch04.html\n working_text = deepcopy(text[1:])\n## print('Cache size: 1')\n## output,link_dict = resolve_ancas_text(working_text,1)\n## \n## gold_dict = get_gold(DATA_AMR_ANNOTATED_LPP_01)\n##\n## for i in gold_dict:\n## print(i,'\\t',gold_dict[i])\n## print(i,'\\t',link_dict[i])\n## r,p,f1 = muc(gold_dict,link_dict)\n## print(\"R: {0:.4f}\\tP: {1:.4f}\\tF1: {2:.4f}\".format(r,p,f1))\n## print('\\n')\n print('Number of nodes:', sum([(g.number_of_nodes()-1) for g in text]))\n print('Number of edges:', sum([(g.number_of_edges()-1) for g in text]))\n #print('Number of ancas:', sum([len(output[k]) for k in output]))\n## print('Number of coref links:', sum([len(link_dict[k]) for k in link_dict]))\n \n## ranked_ancas = [rank_ancas(g) for g in working_text]\n## for i in range(len(ranked_ancas)):\n## temp = sorted([(v,k) for (k,v) in ranked_ancas[i].items()],\n## reverse=True)\n## print(i,'\\t',temp)\n","sub_path":"semaland_anaphora_resolution.py","file_name":"semaland_anaphora_resolution.py","file_ext":"py","file_size_in_byte":26213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"157493018","text":"#!/usr/bin/python2\n# -*- coding: Utf-8 -*-\n# Raphael Javaux - 2 nov. 2010\n\nimport struct # C's structs reader\n\nimport pygtk, gtk\npygtk.require('2.0')\n\nKEY_SEPARATOR = '\\0'\nFREESPACE_COLOR = gtk.gdk.color_parse('#A4E386')\nPADDING_COLOR = gtk.gdk.color_parse('#E3A67B')\n\ndef info_dialog(title, dic = {}):\n\tdef key_value_box(key, value):\n\t\tbox = gtk.HBox(True)\n\t\t\n\t\tkey_label = gtk.Label()\n\t\tkey_label.set_markup('{0}'.format(key))\n\t\tbox.pack_start(key_label)\n\t\tbox.pack_start(gtk.Label(value))\n\t\treturn box\n\t\n\tdialog = gtk.Dialog('Details {0}'.format(title))\n\tdialog.set_size_request(200, 100)\n\t\t\n\tfor key, value in dic.iteritems():\n\t\tdialog.vbox.pack_start(key_value_box(key, value))\n\t\n\tdialog.show_all()\n\nclass Element():\n\tdef __init__(self, in_file_offset, offset, size, key, value):\n\t\tself.in_file_offset = in_file_offset\n\t\tself.offset = offset\n\t\tself.size = size\n\t\tself.key = key\n\t\tself.value = value\n\t\t\n\t@property\n\tdef total_size(self):\n\t\treturn self.size + 6\n\t\t\n\t@property\n\tdef padding(self):\n\t\treturn (4 - (self.total_size % 4)) % 4;\n\t\n\t@property\n\tdef freespace(self):\n\t\treturn self.offset - (self.total_size + self.padding)\n\t\t\n\t@property\n\tdef widgets(self):\n\t\tdef space_button(callback, title=None, color=None, width=-1):\n\t\t\tbut = gtk.Button(title)\n\t\t\tif color != None:\n\t\t\t\tbut.modify_bg(gtk.STATE_NORMAL, color)\n\t\t\t\tbut.modify_bg(gtk.STATE_PRELIGHT, color)\n\t\t\t\n\t\t\tbut.set_size_request(width, -1)\n\t\t\t\t\n\t\t\tbut.connect('clicked', callback)\n\t\t\t\n\t\t\treturn but\n\t\t\t\n\t\tyield space_button(self.details, title=self.key, width=max(25, self.total_size * 4))\n\t\t\n\t\tif self.padding > 0:\n\t\t\tyield space_button(self.details_padding, color=PADDING_COLOR, width=self.padding * 4)\n\t\t\n\t\tif self.freespace > 0:\n\t\t\tyield space_button(self.details_libre, color=FREESPACE_COLOR, width=max(25, self.freespace * 4))\n\n\tdef details(self, event):\n\t\tinfo_dialog(self.key, { 'In file offset' : self.in_file_offset,\n\t\t\t'Offset' : self.offset,\n\t\t\t'Size' : self.size,\n\t\t\t'Total size' : self.total_size,\n\t\t\t'Key' : self.key,\n\t\t\t'Value': self.value\n\t\t})\n\t\t\n\tdef details_padding(self, event):\n\t\tinfo_dialog('padding ({0})'.format(self.key),\n\t\t\t{ 'In file offset' : self.in_file_offset + self.total_size,\n\t\t\t'Padding' : self.padding\n\t\t})\n\t\n\tdef details_libre(self, event):\n\t\tinfo_dialog('freespace ({0})'.format(self.key),\n\t\t\t{ 'In file offset' : self.in_file_offset + self.total_size + self.padding,\n\t\t\t'Free space' : self.freespace\n\t\t})\n\nclass Toolbar(gtk.Toolbar):\n\tdef __init__(self, win):\n\t\tsuper(Toolbar, self).__init__()\n\t\t\n\t\tself.win = win\n\t\t\n\t\tself.open_but = gtk.ToolButton(gtk.STOCK_OPEN)\n\t\tself.open_but.connect('clicked', self.open)\n\t\tself.refresh_but = gtk.ToolButton(gtk.STOCK_REFRESH)\n\t\tself.refresh_but.connect('clicked', self.refresh)\n\t\t\n\t\tself.insert(self.open_but, -1)\n\t\tself.insert(self.refresh_but, -1)\n\t\n\tdef open(self, event):\n\t\tdialog = gtk.FileChooserDialog(title='Choisissez un fichier',\n\t\t\taction=gtk.FILE_CHOOSER_ACTION_OPEN,\n\t\t\tbuttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK)\n\t\t)\n\t\t\t\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.win.file.file = dialog.get_filename()\n\t\t\n\t\tdialog.destroy()\n\t\n\tdef refresh(self, event):\n\t\tself.win.file.refresh()\n\t\nclass File(gtk.ScrolledWindow):\n\tdef __init__(self, win):\n\t\tsuper(File, self).__init__(hadjustment=None, vadjustment=None)\n\t\t\n\t\tself.hbox = gtk.HBox()\n\t\t\n\t\tself.add_with_viewport(self.hbox)\n\t\tself.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_NEVER)\n\t\t\n\t\tself.file = None\n\t\n\t@property\n\tdef file(self):\n\t\treturn self._file\n\t\n\t@file.setter\n\tdef file(self, value):\n\t\tself._file = value\n\t\tself.refresh()\n\t\n\tdef refresh(self):\n\t\tif self.file != None:\n\t\t\ttry:\n\t\t\t\tself.clear()\n\t\t\t\tfor elem in self.gen_elements():\n\t\t\t\t\tfor widget in elem.widgets:\n\t\t\t\t\t\tself.hbox.pack_start(widget, False, False)\n\t\t\texcept Exception as e:\n\t\t\t\tself.clear()\n\t\t\t\tself.hbox.pack_start(gtk.Label(\"Erreur\"))\n\t\t\t\tprint (e)\n\t\t\t\n\t\t\tself.hbox.show_all()\n\t\n\tdef gen_elements(self):\n\t\tdef read_headers(f):\n\t\t\tdata = f.read(6)\n\t\t\t\n\t\t\tif data:\n\t\t\t\treturn struct.unpack('ih', data) # (int offset, short size)\n\t\t\telse:\n\t\t\t\treturn None\n\t\t\n\t\tdef read_data(f, length):\n\t\t\tdef list_to_str(l):\n\t\t\t\treturn reduce(lambda acc, val: acc + str(val), l, \"\")\n\t\t\t\n\t\t\tkey = []\n\t\t\tvalue = []\n\t\t\tcurr = key\n\t\t\tfor byte in f.read(length):\n\t\t\t\tif byte == KEY_SEPARATOR:\n\t\t\t\t\tcurr = value\n\t\t\t\telse:\n\t\t\t\t\tcurr.append(struct.unpack('c', byte)[0])\n\t\t\t\n\t\t\treturn list_to_str(key), list_to_str(value)\n\t\t\n\t\twith open(self.file) as f:\n\t\t\tin_file_offset = 0\n\t\t\theaders = read_headers(f)\n\t\t\twhile headers != None:\n\t\t\t\tdata = read_data(f, headers[1])\n\t\t\t\t\n\t\t\t\tyield Element(in_file_offset, headers[0], headers[1], data[0], data[1])\n\t\t\t\t\n\t\t\t\tf.seek(headers[0] - (headers[1] + 6), 1)\n\t\t\t\tin_file_offset += headers[0]\n\t\t\t\t\n\t\t\t\theaders = read_headers(f)\n\t\n\tdef clear(self):\n\t\tself.hbox.foreach(lambda widget: self.hbox.remove(widget))\n\nclass Window(gtk.Window):\n\tdef __init__(self):\n\t\tsuper(Window, self).__init__()\n\t\t\n\t\tself.set_title('Lecteur de fichier')\n\t\tself.set_size_request(700, 100)\n\t\t\n\t\tself.vbox = gtk.VBox()\n\t\t\n\t\tself.toolbar = Toolbar(self)\n\t\tself.file = File(self)\n\t\t\n\t\tself.vbox.pack_start(self.toolbar, False, False)\n\t\tself.vbox.pack_end(self.file)\n\t\tself.add(self.vbox)\n\t\t\nif __name__ == '__main__':\n\twin = Window()\n\twin.show_all()\n\t\n\t#win.file.file = 'test.txt'\n\t\n\tgtk.main()\n","sub_path":"LaboC++/DossierFichiersOctobre/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539294384","text":"# from sympy import Symbol, solve, log\n# import numpy as np\n# n = 4\n# x = Symbol('x')\n# a = np.random.rand(n-1,1)\n# b = float(solve(x*log(x) - (3-a.T@np.log(a)), x)[0])\n# print(b)\n\nimport cvxpy as cp\nimport numpy as np\nimport time\nimport dccp\nfrom numpy import linalg as LA\nw, v = LA.eig(np.diag((1, 2, 3)))\nimport matplotlib.pyplot as plt\n\n# Problem data.\nn = 4\n\n# gamma must be nonnegative due to DCP rules.\nc = cp.Parameter((n, 1))\nucbweight = cp.Parameter(nonneg=True)\nA = np.eye(n)\n# Construct the problem.\nx = cp.Variable((n, 1))\nw, v = LA.eigh(A)\nsqrtw = np.sqrt(abs(w)).reshape(-1,1)\nobj = cp.Maximize(c.T@x+ucbweight*cp.norm((cp.multiply(sqrtw, v*x))))\n\nx.value = 1/np.ones((n, 1))\n# constraints = [x.T@cp.log(x)<=5, x>=0]\nconstraints = [cp.sum(-cp.entr(x))<=5, x>=0]\n# constraints = [cp.log_sum_exp(x)<=10, x>=0]\n\nprob = cp.Problem(obj, constraints)\nstart_time = time.time()\n# your code\n\n# Construct a trade-off curve of ||Ax-b||^2 vs. ||x||_1\nfor i in range(1):\n # c.value = np.random.rand(n, 1)\n ucbweight.value = np.random.rand(1)[0]\n c.value = np.asarray([[1., 1., 1., 1.]]).T\n\n # print(\"problem is DCP:\", prob.is_dcp()) # false\n # print(\"problem is DCCP:\", dccp.is_dccp(prob)) # true\n prob.solve(method='dccp')\n # Use expr.value to get the numerical value of\n # an expression in the problem.\n print(\"optimal var:\\n\", x.value)\n\nelapsed_time = time.time() - start_time\nprint(elapsed_time)\n# plt.rc('text', usetex=True)\n# plt.rc('font', family='serif')\n# plt.figure(figsize=(6,10))\n#\n# # Plot trade-off curve.\n# plt.subplot(211)\n# plt.plot(l1_penalty, sq_penalty)\n# plt.xlabel(r'\\|x\\|_1', fontsize=16)\n# plt.ylabel(r'\\|Ax-b\\|^2', fontsize=16)\n# plt.title('Trade-Off Curve for LASSO', fontsize=16)\n#\n# # Plot entries of x vs. gamma.\n# plt.subplot(212)\n# for i in range(m):\n# plt.plot(gamma_vals, [xi[i] for xi in x_values])\n# plt.xlabel(r'\\gamma', fontsize=16)\n# plt.ylabel(r'x_{i}', fontsize=16)\n# plt.xscale('log')\n# plt.title(r'\\text{Entries of x vs. }\\gamma', fontsize=16)\n#\n# plt.tight_layout()\n# plt.show()","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"59564431","text":"class Solution:\n # @return a string\n def countAndSay(self, n):\n # Careful: Mine version is based on recursion\n if n == 1:\n return '1'\n\n originString = self.countAndSay(n-1)\n newString = ''\n counterName= [originString[0]]\n counterNumber = [1]\n \n for i in range(1, len(originString)):\n if originString[i] == originString[i-1]:\n counterNumber[-1] += 1\n else:\n counterName.append(originString[i])\n counterNumber.append(1)\n\n for j in range(len(counterName)):\n newString =newString + str(counterNumber[j])\n newString = newString + counterName[j]\n return newString\n\n\n#########################################################\n# Use iteration:\n def count(self, s):\n # initialize:\n t = ''; counter = 0; lastOccur = '#'\n for i in s:\n if i != lastOccur:\n if lastOccur != '#':\n t +=str(count) + lastOccur\n lastOccur = i\n count = 1\n else:\n count += 1\n\n t+=str(count)+lastOccur\n return t\n\n def countAndSay2(self, n):\n s = '1'\n for j in range(2, n+1):\n s = self.count(s)\n return s\n # originString = '1'\n # if n ==1:\n # return originString\n\n # for i in range(1:n):\n # counterName = []\n # counterNumber = []\n # for j in range(len(originString)):\n # if \n\n\nif __name__ == '__main__':\n solve = Solution( )\n #solve.countAndSay(5)\n print(solve.countAndSay2(5))","sub_path":"CountandSay.py","file_name":"CountandSay.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"83977259","text":"import clr\r\nfrom config import config\r\nimport re\r\nfrom System import DBNull\r\n\r\ndef getDBClasses(dbType):\r\n parameterPrefix = ':'\r\n if dbType == 'sqlite':\r\n try:\r\n clr.AddReference('System.Data.SQLite')\r\n from System.Data.SQLite import (\r\n SQLiteConnection as Connection,\r\n SQLiteParameter as Parameter)\r\n except IOError:\r\n clr.AddReference('Mono.Data.Sqlite')\r\n from Mono.Data.Sqlite import (\r\n SqliteConnection as Connection,\r\n SqliteParameter as Parameter)\r\n elif dbType == 'postgres':\r\n clr.AddReference('Npgsql')\r\n from Npgsql import (\r\n NpgsqlConnection as Connection,\r\n NpgsqlParameter as Parameter)\r\n elif dbType == 'mysql':\r\n clr.AddReference('MySql.Data')\r\n from MySql.Data.MySqlClient import (\r\n MySqlConnection as Connection,\r\n MySqlParameter as Parameter)\r\n parameterPrefix = '?'\r\n elif dbType == 'sqlserver':\r\n clr.AddReference('System.Data')\r\n from System.Data.SqlClient import (\r\n SqlConnection as Connection,\r\n SqlParameter as Parameter)\r\n parameterPrefix = '@'\r\n else:\r\n raise ValueError('invalid db_type setting: %r' % dbType)\r\n return Connection, Parameter, parameterPrefix\r\n\r\n\r\nConnection, Parameter, parameterPrefix = getDBClasses(config.db_type)\r\n\r\n\r\ndef openConnection():\r\n conn = Connection(config.connection)\r\n conn.Open()\r\n return conn\r\n\r\n\r\nconn = openConnection()\r\n\r\n\r\ndef fixParameters(sql):\r\n if parameterPrefix != ':':\r\n sql = re.sub(r':(\\w+)', parameterPrefix + r'\\1', sql)\r\n return sql\r\n\r\n\r\nSAVE_FRIEND_STATEMENT = fixParameters('''\r\n insert into friends (\r\n id, screen_name, name, description,\r\n location, url, image_url)\r\n values (\r\n :id, :screen_name, :name, :description,\r\n :location, :url, :profile_image_url)''')\r\n\r\nSAVE_TWEET_STATEMENT = fixParameters('''\r\n insert into tweets (\r\n id, created, text, friend_id)\r\n values (\r\n :id, :created_at, :text, :friend_id)''')\r\n\r\nGET_FRIENDS_STATEMENT = 'select screen_name from friends order by 1'\r\n\r\nGET_TWEETS_STATEMENT = fixParameters('''\r\n select f.screen_name, f.url, f.location, f.name,\r\n f.image_url, t.created, t.text\r\n from friends f inner join tweets t\r\n on f.id = t.friend_id\r\n where f.screen_name = :friend\r\n or :friend is null\r\n order by t.id desc''')\r\n\r\n\r\ndef setParameter(cmd, name, value):\r\n if value is None:\r\n value = DBNull.Value\r\n cmd.Parameters.Add(Parameter(name, value))\r\n\r\n\r\ndef itemExists(table, id):\r\n cmd = conn.CreateCommand()\r\n cmd.CommandText = fixParameters('select 1 from %s where id = :id' % table)\r\n setParameter(cmd, 'id', id)\r\n return cmd.ExecuteScalar() == 1\r\n\r\n\r\nfriendAttrs = 'id screen_name name description location url profile_image_url'.split()\r\n\r\n\r\ndef makeTweet(reader):\r\n tweet = {}\r\n for i in range(reader.FieldCount):\r\n tweet[reader.GetName(i)] = reader[i]\r\n return tweet\r\n\r\n\r\ndef saveTweet(tweet):\r\n if itemExists('tweets', tweet['id']):\r\n return\r\n\r\n friend = tweet.pop('user')\r\n saveFriend(friend)\r\n\r\n cmd = conn.CreateCommand()\r\n cmd.CommandText = SAVE_TWEET_STATEMENT\r\n for key, value in tweet.iteritems():\r\n setParameter(cmd, key, value)\r\n\r\n setParameter(cmd, 'friend_id', friend['id'])\r\n cmd.ExecuteNonQuery()\r\n\r\n\r\ndef getTweets(friend=None):\r\n cmd = conn.CreateCommand()\r\n cmd.CommandText = GET_TWEETS_STATEMENT\r\n setParameter(cmd, 'friend', friend)\r\n reader = cmd.ExecuteReader()\r\n try:\r\n tweets = []\r\n while reader.Read():\r\n tweets.append(makeTweet(reader))\r\n finally:\r\n reader.Close()\r\n return tweets\r\n\r\n\r\n# Practical 6: Write db access functions to save and load friends\r\n# to the database. These will be similar to the routines for\r\n# saving and loading tweets, above.\r\n\r\ndef saveFriend(friend):\r\n\tpass\r\n\r\ndef getFriends():\r\n\treturn []\r\n\r\n","sub_path":"tutorial/code/exercises/stutterdb.py","file_name":"stutterdb.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"172670172","text":"# CS121: Schelling Model of Housing Segregation\n#\n# Program for simulating of a variant of Schelling's model of\n# housing segregation. This program takes four parameters:\n#\n# filename -- name of a file containing a sample city grid\n#\n# R - The radius of the neighborhood: home at Location (k, l) is in\n# the neighborhood of the home at Location (i, j)\n# if i-R <= k <= i+R and j-R <= l <= j+R\n#\n# threshold - minimum acceptable threshold for ratio of the number\n# of similar neighbors to the number of occupied homes\n# in a neighborhood.\n#\n# max_steps - the maximum number of passes to make over the city\n# during a simulation.\n#\n# \n# By: Yasoob Rasheed\n#\n\nimport os\nimport sys\nimport utility\nimport click\n\n\n# AUXILIARY METHODS FOR COMPUTE_SIMILARITY_SCORE\n\ndef generate_neighborhood(grid, R, location):\n # Generate a list with the order of the houses in a given neighborhood \"R\"\n i = location[0]\n j = location[1]\n neighborhood = []\n \n # Using the eqution given, grab the house at the indices of the neighborhood\n for k in range(0, len(grid)):\n if i - R <= k <= i + R:\n for l in range(0, len(grid[k])):\n if j - R <= l <= j + R:\n neighborhood.append(grid[k][l])\n\n return neighborhood\n\n\ndef compute_similarity_score(grid, R, location):\n ''' \n Compute the similarity score for the homeowner at the specified\n location using a neighborhood of radius R.\n\n Inputs: \n grid (list of lists of strings): the grid\n R (int): radius for the neighborhood\n location (pair of ints): a grid location\n \n Returns: float\n ''' \n # I have bypassed writing an assertion that a spot is open\n assert utility.is_grid(grid), \"Grid argument is the wrong type\"\n\n # First figure out what color this home is\n home_color = grid[location[0]][location[1]]\n\n # Next get the neighborhood around this home\n neighborhood = generate_neighborhood(grid, R, location)\n\n # Next build a counter for each type of home\n same_color_homes = 0.0\n different_color_homes = 0.0\n\n # Next use an equation to return the similarity score\n for other_home_color in neighborhood:\n if other_home_color == home_color:\n same_color_homes += 1.0\n elif other_home_color != \"O\":\n different_color_homes += 1.0\n\n return (same_color_homes) / (different_color_homes + same_color_homes)\n\n\n# AUXILIARY METHODS FOR DO_SIMULATION\n\ndef one_step(grid, R, threshold, open_locations):\n # Run through one entire step and keep track of the number of swaps\n # Variable relocation holds the amount of relocations\n relocations = 0\n\n for x in range(0, len(grid)):\n for y in range(0, len(grid[x])):\n # 2. Compute homeowner's similarity score\n similarity_score = compute_similarity_score(grid, R, (x, y))\n if grid[x][y] != \"O\" and similarity_score < threshold:\n # 3. Swap the values at the two locations\n if swap(grid, R, threshold, open_locations, (x, y)) != -1:\n relocations += 1\n\n return relocations\n \n\ndef swap(grid, R, threshold, open_locations, home_location):\n # 3. Swap the values at the two locations\n # best_similatiry and best_location hold the values of the open location to swap with the house\n # flag is used to make sure that there is a place to swap the house and the open location\n # flag equals False means no and equals True means yes\n best_similarity = 1.0\n best_location = (0, 0)\n x = home_location[0]\n y = home_location[1]\n flag = False\n \n # Loop through the open locations to find the best swap\n for open_location in open_locations:\n i = open_location[0]\n j = open_location[1]\n \n grid[i][j] = grid[x][y]\n grid[x][y] = \"O\"\n similarity = compute_similarity_score(grid, R, open_location)\n \n # If a potential swap location exists record it\n if threshold <= similarity < best_similarity:\n best_similarity = similarity\n best_location = open_location\n flag = True\n\n grid[x][y] = grid[i][j]\n grid[i][j] = \"O\"\n \n # If a swap exists do the swap\n # Delete existing spot from open locations\n # Add old location to open location list\n # Else if no swap exists return that one doesn't exist with a -1\n if flag:\n grid[best_location[0]][best_location[1]] = grid[x][y]\n grid[x][y] = \"O\"\n\n index_of_best_location = open_locations.index(best_location)\n del open_locations[index_of_best_location]\n open_locations.append(home_location)\n else:\n return -1\n\n\ndef do_simulation(grid, R, threshold, max_steps):\n '''\n Do a full simulation.\n\n Inputs:\n grid: (list of lists of strings) the grid\n R: (int) radius for the neighborhood\n threshold: (float) satisfaction threshold\n max_steps: (int) maximum number of steps to do\n\n Returns:\n (int) The number of relocations completed.\n '''\n assert utility.is_grid(grid), \"Grid argument is the wrong type\"\n\n # Steps 1-5 outline the logic of the do_simulation method \n # 1. Create and initialize the list of open locations\n open_locations = []\n \n for i in range(0, len(grid)):\n for j in range(0, len(grid[i])):\n if grid[i][j] == \"O\":\n open_locations.append((i, j))\n \n # 4. Simulate one step of the simulation\n relocations = 0\n steps = 0\n\t\n # Keep track of the relocations of one step\n while(steps != max_steps):\n relocations += one_step(grid, R, threshold, open_locations)\n # 5. Run steps until one of the stopping conditions is met: No relocations in a step\n if relocations == 0:\n return relocations\n else:\n steps += 1\n \n # 5. Run steps until one of the stopping conditions is met: Maximum steps reached\n if steps == max_steps:\n return relocations \n\n\n@click.command(name=\"schelling\")\n@click.option('--grid_file', type=click.Path(exists=True))\n@click.option('--r', type=int, default=1, help=\"neighborhood radius\")\n@click.option('--threshold', type=float, default=0.44,\n help=\"satisfaction threshold\")\n@click.option('--max_steps', type=int, default=1)\ndef go(grid_file, r, threshold, max_steps):\n '''\n Put it all together: do the simulation and process the results.\n '''\n\n if grid_file is None:\n print(\"No parameters specified: just loading the code.\")\n return\n\n grid = utility.read_grid(grid_file)\n\n if len(grid) < 20:\n print(\"Initial state of city:\")\n for row in grid:\n print(row)\n print()\n\n num_relocations = do_simulation(grid, r, threshold, max_steps)\n print(\"Number of relocations done: \" + str(num_relocations))\n\n if len(grid) < 20:\n print()\n print(\"Final state of the city:\")\n for row in grid:\n print(row)\n\nif __name__ == \"__main__\":\n go()\n","sub_path":"pas/pa2/schelling.py","file_name":"schelling.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"633317886","text":"#!/usr/bin/env python\n\"\"\" Populate the hubs db with some dev data. \"\"\"\n\nimport json\nimport os\n\nimport flask\n\nimport hubs.models\n\napp = flask.Flask(__name__)\napp.config.from_object('hubs.default_config')\nif 'HUBS_CONFIG' in os.environ:\n app.config.from_envvar('HUBS_CONFIG')\n\nsession = hubs.models.init(app.config['DB_URL'], True, True)\n\nhub = hubs.models.Hub(name='designteam', summary='The Fedora Design Team')\nsession.add(hub)\n\nwidget = hubs.models.Widget(plugin='stats', index=0)\nhub.widgets.append(widget)\nwidget = hubs.models.Widget(plugin='rules', index=1,\n _config=json.dumps({\n 'link': 'http://threebean.org'\n }))\nhub.widgets.append(widget)\nwidget = hubs.models.Widget(plugin='dummy', index=2)\nhub.widgets.append(widget)\nwidget = hubs.models.Widget(plugin='sticky', index=0, left=True,\n _config=json.dumps({\n 'text': 'This is a sticky note.',\n }))\nhub.widgets.append(widget)\nwidget = hubs.models.Widget(plugin='dummy', index=1, left=True)\nhub.widgets.append(widget)\n\nsession.commit()\n\n","sub_path":"populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"440347002","text":"\"\"\"\n A simple command line application that shows the files in a person's google drive directory\n It uses the google drive Api and the google-api-python-client library\n It also uses the new google oauth2client for authorization\n \"\"\"\nfrom __future__ import print_function\nimport httplib2\nimport os\n\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\nSCOPES = 'https://www.googleapis.com/auth/drive.metadata.readonly'\n\"\"\"The scope varible indicates a set of scopes of authorization an application wants to obtain\n In our case read-only access of your Google Drive file or folder metadata\n \"\"\"\nCLIENT_SECRET_FILE = 'client_secret.json'\n#json file that will keep the list of files availbe in the drive directory\n\nAPPLICATION_NAME = 'Andela_Day3 Drive API Application'\n#indicates the application name\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'andela_day3.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n #here is where the application tries to get a valid access token in order to make an API call\n\n \"\"\"after user gets authorization to there personal data a special \n access token is given to the application and the application stores this token in the \n client_secret.json file\"\"\"\n\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n \"\"\"if credentials are invalid or expired then create the authrization flow \n using the client secret downloaded from the google developer console\"\"\"\n \n if flags:\n credentials = tools.run_flow(flow, store, flags)\n #The obtained credentials are executed to make sure that they are valid\n \n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef main():\n \"\"\"Shows basic usage of the Google Drive API.\n\n Creates a Google Drive API service object and outputs the names and IDs\n for up to 10 files.\n \"\"\"\n credentials = get_credentials()\n #get valid credentials\n\n http = credentials.authorize(httplib2.Http())\n #make a http request\n\n service = discovery.build('drive', 'v3', http=http)\n #create an end point to the service requested which is get the files from the google drive directory\n\n results = service.files().list(\n pageSize=10,fields=\"nextPageToken, files(id, name)\").execute()\n items = results.get('files', [])\n #get the files put them in a list \n \n if not items:\n print('No files found.')\n else:\n print('Files:')\n for item in items:\n print('{0} ({1})'.format(item['name'], item['id']))\n\n #return list of files with the respective numbers\n\n\nif __name__ == '__main__':\n main()","sub_path":"google_drive_files_list.py","file_name":"google_drive_files_list.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"486812489","text":"from copy import deepcopy\nimport math\nclass MaxEntropy:\n def __init__(self, EPS=0.005):\n self._samples = []\n self._label_y = set() # 标签集合,相当去去重后的y\n self._numXY = {} # key为(x,y),value为出现次数\n self._samples_num = 0 # 样本数\n self._Ep_ = [] # 样本分布的特征期望值\n self._xyID = {} # key记录(x,y),value记录id号\n self._xy_num = 0 # 特征键值(x,y)的个数\n self._max_feature_num = 0 # 最大特征数\n self._IDxy = {} # key为(x,y),value为对应的id号\n self._weights = []\n self._EPS = EPS # 收敛条件\n self._last_weights = [] # 上一次w参数值\n\n def loadData(self, dataset):\n self._samples = deepcopy(dataset)\n for items in self._samples:\n y = items[0]\n X = items[1:]\n self._label_y.add(y) # 集合中y若已存在则会自动忽略\n for x in X:\n if (x, y) in self._numXY:\n self._numXY[(x, y)] += 1\n else:\n self._numXY[(x, y)] = 1\n\n self._samples_num = len(self._samples)\n self._xy_num = len(self._numXY)\n self._max_feature_num = max([len(sample) - 1 for sample in self._samples])\n self._weights = [0] * self._xy_num\n self._last_weights = self._weights[:]\n\n self._Ep_ = [0] * self._xy_num\n for i, xy in enumerate(self._numXY): # 计算特征函数fi关于经验分布的期望\n self._Ep_[i] = self._numXY[xy] / self._samples_num\n self._xyID[xy] = i\n self._IDxy[i] = xy\n\n print(self._max_feature_num)\n print(self._numXY)\n #self._Ep_[self._xyID[('sunny', 'no')]]=1\n print(self._Ep_)\n print(self._xy_num)\n # 计算每个Z(x)值\n def _calc_zx(self, X):\n zx = 0\n for y in self._label_y:\n temp = 0\n for x in X:\n if (x, y) in self._numXY:\n temp += self._weights[self._xyID[(x, y)]]\n zx += math.exp(temp)\n return zx\n\n # 计算每个P(y|x)\n def _calu_model_pyx(self, y, X):\n zx = self._calc_zx(X)\n temp = 0\n for x in X:\n if (x, y) in self._numXY:\n temp += self._weights[self._xyID[(x, y)]]\n pyx = math.exp(temp) / zx\n return pyx\n\n # 计算特征函数fi关于模型的期望\n def _calc_model_ep(self, index):\n x, y = self._IDxy[index]\n ep = 0\n for sample in self._samples:\n if x not in sample:\n continue\n pyx = self._calu_model_pyx(y, sample)\n ep += pyx / self._samples_num\n return ep\n\n # 判断是否全部收敛\n def _convergence(self):\n for last, now in zip(self._last_weights, self._weights):\n if abs(last - now) >= self._EPS:\n return False\n return True\n\n # 计算预测概率\n def predict(self, X):\n Z = self._calc_zx(X)\n result = {}\n for y in self._label_y:\n ss = 0\n for x in X:\n if (x, y) in self._numXY:\n ss += self._weights[self._xyID[(x, y)]]\n pyx = math.exp(ss) / Z\n result[y] = pyx\n return result\n\n # 训练\n def train(self, maxiter=1000):\n for loop in range(maxiter):\n print(\"迭代次数:%d\" % loop)\n self._last_weights = self._weights[:]\n for i in range(self._xy_num):\n ep = self._calc_model_ep(i) # 计算第i个特征的模型期望\n self._weights[i] += math.log(self._Ep_[i] / ep) / self._max_feature_num # 更新参数\n print(\"权值:\", self._weights)\n if self._convergence(): # 判断是否收敛\n break\n def get_weight(self):\n return self._weights\n\n\ndataset = [['no', 'sunny', 'hot', 'high', 'FALSE'],\n ['no', 'sunny', 'hot', 'high', 'TRUE'],\n ['yes', 'overcast', 'hot', 'high', 'FALSE'],\n ['yes', 'rainy', 'mild', 'high', 'FALSE'],\n ['yes', 'rainy', 'cool', 'normal', 'FALSE'],\n ['no', 'rainy', 'cool', 'normal', 'TRUE'],\n ['yes', 'overcast', 'cool', 'normal', 'TRUE'],\n ['no', 'sunny', 'mild', 'high', 'FALSE'],\n ['yes', 'sunny', 'cool', 'normal', 'FALSE'],\n ['yes', 'rainy', 'mild', 'normal', 'FALSE'],\n ['yes', 'sunny', 'mild', 'normal', 'TRUE'],\n ['yes', 'overcast', 'mild', 'high', 'TRUE'],\n ['yes', 'overcast', 'hot', 'normal', 'FALSE'],\n ['no', 'rainy', 'mild', 'high', 'TRUE']]\n\nmaxent = MaxEntropy()\nx = ['sunny', 'mild', 'high', 'FALSE']\n\nmaxent.loadData(dataset)\nmaxent.train(maxiter=100)\n\nprint('精度:', maxent.predict(x))\n\n","sub_path":"MAXENT/UnderstandMaxent.py","file_name":"UnderstandMaxent.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"2571285","text":"import sys, os, json\nimport tkinter\nfrom tkinter import ttk\nimport appdirs\n\nclass Application(tkinter.Tk):\n \n def __init__(self):\n super().__init__()\n self._name = None\n self._version = None\n self.createGUI()\n \n def start(self):\n self.readConfig()\n self.geometry(self._config['WinGeom'])\n self.protocol('WM_DELETE_WINDOW',self.saveAndExit)\n self.mainloop()\n \n def name(self, appname=None):\n if appname!=None:\n self._name = appname\n self.title(appname)\n if self._name == None:\n raise NotImplementedError()\n return self._name\n \n def version(self, appver=None):\n if appver!=None:\n self._version = appver\n if self._version == None:\n raise NotImplementedError()\n return self._version\n \n def getConfigDir(self):\n return appdirs.user_config_dir(self._name)\n\n def getConfigFile(self):\n return os.path.join(self.getConfigDir(),'config.json')\n \n def saveConfig(self):\n config_dir = self.getConfigDir()\n config_file = self.getConfigFile()\n if not os.path.exists(config_dir):\n os.makedirs(config_dir)\n with open(config_file, \"w\") as outf:\n json.dump(self._config, outf)\n \n def readConfig(self):\n config_dir = self.getConfigDir()\n config_file = self.getConfigFile()\n print(\"Hello: %s\",config_file)\n if os.path.exists(config_file):\n with open(config_file,'r') as inf:\n self._config = json.load(inf)\n else:\n self._config = self.defaultConfig()\n \n def defaultConfig(self):\n raise NotImplementedError()\n \n def saveAndExit(self):\n self._config['WinGeom'] = self.geometry()\n self.saveConfig()\n sys.exit()\n \n\n \n \n \n","sub_path":"img2segy/AppFramework/Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"537396441","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 8 06:03:29 2019\n\n@author: Sofonias Alemu\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\n### Exercise 2\n\ndef bern(n,x):\n import scipy.special as sp\n Res=np.zeros([n+1,np.shape(x)[0]])\n for v in range(n+1):\n Res[v]=sp.binom(n,v)*x**v*(1-x)**(n-v)\n return Res\nN=100\nX=np.linspace(0,1,N)\n\nplt.figure(1)\nplt.subplot(4,4,1)\nplt.plot(X,np.ones(N))\nplt.subplot(4,4,5)\nplt.plot(X,bern(1,X)[0,:])\nplt.subplot(4,4,6)\nplt.plot(X,bern(1,X)[1,:])\nplt.subplot(4,4,9)\nplt.plot(X,bern(2,X)[0,:])\nplt.subplot(4,4,10)\nplt.plot(X,bern(2,X)[1,:])\nplt.subplot(4,4,11)\nplt.plot(X,bern(2,X)[2,:])\nplt.subplot(4,4,13)\nplt.plot(X,bern(3,X)[0,:])\nplt.subplot(4,4,14)\nplt.plot(X,bern(3,X)[1,:])\nplt.subplot(4,4,15)\nplt.plot(X,bern(3,X)[2,:])\nplt.subplot(4,4,16)\nplt.plot(X,bern(3,X)[3,:])\n\n#### Exercise 3 \n\nA=np.load(\"MLB.npy\")\nplt.figure(1)\nplt.subplot(1,3,1)\nplt.scatter(A[:,0],A[:,1])\nplt.xlabel(\"height(inches)\")\nplt.ylabel(\"weight(pounds)\")\nplt.subplot(1,3,2)\nplt.scatter(A[:,0],A[:,2])\nplt.xlabel(\"height(inches)\")\nplt.ylabel(\"age(years)\")\nplt.subplot(1,3,3)\nplt.scatter(A[:,1],A[:,2])\nplt.xlabel(\"weight(pounds)\")\nplt.ylabel(\"age(years)\")\n\n#### Exercise 5\n\nx=np.linspace(-1.5,1.5,200)\nX,Y=np.meshgrid(x,x)\nZ=(1-X)**2+100*(Y-X**2)**2\n\nplt.figure(2)\nplt.subplot(2,1,1)\nplt.pcolormesh(X,Y,Z)\nplt.subplot(2,1,2)\nplt.contour(X,Y,Z)\nplt.plot(1,1,marker='o')\n\n### Exercise 6\n\nA=np.load(\"countries.npy\")\n\n","sub_path":"Computation/Wk1_DifInt/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"591179080","text":"###############################################################################\n# Generación del dataset\n###############################################################################\n\n# archivos de texto\nARCHIVOS_TEXTO = ['../textos/texto_1_Wikipedia.com-Criptografía.txt', '../textos/texto_2_Wikepedia.com-Criptoanálisis.txt'\n\t\t# '../textos/texto_3_JLB-Funes el memorioso.txt', '../textos/texto_4_HPL-El clérigo malvado.txt' ] ,\n\t\t# '../textos/texto_5_Wikepedia.com-Argentina.txt'\n\t\t]\n\n# archivo de salida\nARCHIVO_SALIDA = './dataset.csv'\n\n# opciones\n#AFIN_A = [1,3,5,7,9,11,15,17,19,21,23,25]\nAFIN_A = [21,23,25]\n\nAFIN_B = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25]\n\n# longitud de mensaje fija\nCARACTERES_FIX = 32\n\n# caracteres permitidos (fijo)\nCARACTERES_RE = '[^A-Z]'\n\n###############################################################################\n\nfrom pycipher import Affine\nimport re\n\nprint('Iniciando proceso...')\n\n# caracters permitidos\nregex = re.compile(CARACTERES_RE)\n\n# abrir archivo reescribiendo cada vez\nf = open(ARCHIVO_SALIDA, 'w')\nheader = ''\nfor i in range(CARACTERES_FIX):\n\theader = header + 'claro' + str(i+1) + ','\nfor i in range(CARACTERES_FIX):\n\theader = header + 'cifrado' + str(i+1) + ','\nheader = header + 'aff'\nprint(header, file=f)\n\ntotal_lineas = 0\n\n# procesar archivos de texto\nfor archivo in ARCHIVOS_TEXTO:\n\twith open(archivo, encoding=\"utf8\") as f2:\n\t\tcontenido = f2.readlines()\n\t\tfor linea in contenido:\n\t\t\tlinea = linea.strip().upper()\n\t\t\tif linea == '':\n\t\t\t\tcontinue\n\t\t\tlimpio = regex.sub('', linea)\n\t\t\tfor claro in re.findall('.{%d}' % CARACTERES_FIX, limpio):\n\t\t\t\tsalida = ''\n\t\t\t\tlista_claro = list(claro)\n\t\t\t\tfor caracter in lista_claro:\n\t\t\t\t\tsalida = salida + str(ord(caracter)) + ','\n\t\t\t\tfor a in AFIN_A:\n\t\t\t\t\tfor b in AFIN_B:\n\t\t\t\t\t\tsalida2 = ''\n\t\t\t\t\t\tcifrado = Affine(a, b).encipher(claro)\n\t\t\t\t\t\tlista_cifrado = list(cifrado)\n\t\t\t\t\t\tfor caracter in lista_cifrado:\n\t\t\t\t\t\t\tsalida2 = salida2 + str(ord(caracter)) + ','\n\t\t\t\t\t\taff = (a * 100) + b\n\t\t\t\t\t\t###aff = b\n\t\t\t\t\t\tprint(salida + salida2 + str(aff), file=f)\n\t\t\t\t\t\ttotal_lineas = total_lineas + 1\n\nprint('Total de registros: ' + str(total_lineas))\nprint('Proceso finalizado.')\n","sub_path":"pruebas/prueba07/generar_dataset.py","file_name":"generar_dataset.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"332566052","text":"#Purpose: derive the CIE ionic fraction at the condition positions.\n\n# Import used modules\nimport cie_physderi\nimport pyatomdb\nimport pickle, os\nfrom datetime import datetime\nfrom astropy.io import ascii\nimport multiprocessing as mp\n\n#system parameters\nrootpath = os.getcwd()+'/'\nZlist = [1,2,6,7,8,10,12,14,16,18,20,26,28]\n\n#Read the condition file\nconfile = rootpath+'adia.exp_phy.info'\nconditions = ascii.read(confile)\nncondi = len(conditions)\ncondi_index = range(0,ncondi)\n\n#1.derive the CIE ionic fraction in parallel way-----------\nnow1 = datetime.now().hour*3600. + datetime.now().minute*60. + \\\n datetime.now().second + datetime.now().microsecond/1e6\npool = mp.Pool(mp.cpu_count())\nres = pool.starmap(cie_physderi.deri_cie_ionfrac, \\\n [([Z], conditions, condi_index) for Z in Zlist])\npool.close()\nnow2 = datetime.now().hour*3600. + datetime.now().minute*60. + \\\n datetime.now().second + datetime.now().microsecond/1e6\nprint(\"Time Consuming:%7.2f sec.\" % (now2-now1))\n## Time Consuming: 69.84 sec.\n#Combine the ionic fraction file\ncomb_ionfrac = {}\nfor Z in Zlist:\n zionfrac = pickle.load(open(rootpath+'tciefrac_'+ \\\n pyatomdb.atomic.Ztoelsymb(Z)+'.pkl','rb'))\n comb_ionfrac[Z] = zionfrac[Z]\n\ntmp = open(rootpath+'tionfrac_cie.pkl','wb')\npickle.dump(comb_ionfrac,tmp)\ntmp.close()\n","sub_path":"adiabatic/cie_case/derive_cie_ionfrac.py","file_name":"derive_cie_ionfrac.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"139038911","text":"from django.contrib.auth.models import User\nfrom django.test import TestCase, Client\nfrom django.urls import reverse\nfrom posts.views import NotificationsListView\nfrom posts.utils import Notifications\nfrom posts.models import Activity\n\nclass TestViews(TestCase):\n\n def setUp(self):\n print('\\nsetting up test environment...')\n self.list_url = reverse('posts:list')\n self.post_detail_api = reverse('posts:detail', args=['b1638f970c3ddd528671df76c4dcf13e'])\n\n def test_json_data_in_db(self):\n print('testing whether the the JSON is being properly loaded...')\n Notifications.load_json_to_db()\n self.assertEquals(23, Activity.objects.all().count())\n\n\n def test_activity_list_GET(self):\n print('testing whether activity GET request is working properly...')\n response = self.client.get(self.list_url)\n self.assertContains(response, 'Acme Inc dynamically scales niches worldwide')\n self.assertContains(response, 'How to professionally administrate seamless growth')\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'posts/activity_list.html')\n\n def test_post_notifications_GET(self):\n print('testing whether API endpoint for posts is working...')\n Notifications.load_json_to_db()\n response = self.client.get(self.post_detail_api)\n self.assertEquals(response.data['comment_notification'],\n 'Suoma Narjus commented on your post: \"Acme Inc dynamically scales...\"')\n response = self.client.get(reverse('posts:detail', \n args=['7d78ff348647sdfsdb782cb3027d836single-like']))\n self.assertEquals(response.data['like_notification'],\n 'Eugenio Bertè liked your post: \"How to professionally admin...\"')\n","sub_path":"posts/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"21997905","text":"from django.core.management.base import BaseCommand\nfrom main.models import Localization\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass Command(BaseCommand):\n help = 'Deletes any localizations marked for deletion with null project, type, version, or media.'\n\n def handle(self, **options):\n BATCH_SIZE = 1000\n num_deleted = 0\n while True:\n # We cannot delete with a LIMIT query, so make a separate query\n # using IDs.\n null_project = Localization.objects.filter(project__isnull=True)\n null_meta = Localization.objects.filter(meta__isnull=True)\n null_version = Localization.objects.filter(version__isnull=True)\n null_media = Localization.objects.filter(media__isnull=True)\n loc_ids = (null_project | null_meta | null_version | null_media)\\\n .distinct()\\\n .values_list('pk', flat=True)[:BATCH_SIZE]\n localizations = Localization.objects.filter(pk__in=loc_ids)\n num_localizations = localizations.count()\n if num_localizations == 0:\n break\n localizations.delete()\n num_deleted += num_localizations\n logger.info(f\"Deleted a total of {num_deleted} localizations...\")\n","sub_path":"main/management/commands/prunelocalizations.py","file_name":"prunelocalizations.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"569716068","text":"# Rinus van Grunsven\n# 10755373\n\nimport json\nimport csv\nimport pandas as pd\n\ninput = \"DP_LIVE_GDP.csv\"\noutput = \"DP_LIVE_GDP.json\"\n\ndef read_csv(filename):\n \"\"\"\n Read CSV and append a dict to a list\n \"\"\"\n with open(input, \"r\") as csvfile:\n # data_dict = {}\n reader = pd.read_csv(csvfile, delimiter=';')\n df = reader[[\"LOCATION\", \"TIME\", \"Value\"]]\n df_dict = df.to_dict(orient=\"split\")\n with open(output, \"w\") as jsonfile:\n json.dump(df_dict, jsonfile)\n\nif __name__ == \"__main__\":\n read_csv(input)\n","sub_path":"homework/week6/convertCSV2JSON.py","file_name":"convertCSV2JSON.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"251609634","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n/***************************************************************************\n HTML_Generator\n A QGIS plugin\n HTML single or multi page generator\n Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/\n -------------------\n begin : 2020-11-18\n copyright : (C) 2020 by Giulio\n email : giulio.fattori@tin.it\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\n__author__ = 'Giulio Fattori'\n__date__ = '2020-11-18'\n__copyright__ = '(C) 2020 by Korto19'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nfrom qgis.PyQt.QtCore import QCoreApplication\nfrom qgis.core import (QgsProcessing,\n QgsFeatureSink,\n QgsProcessingAlgorithm,\n\t\t\t\t\t QgsProcessingParameterField,\n\t\t\t\t\t QgsProcessingParameterFile,\n\t\t\t\t\t QgsProcessingParameterExpression,\n\t\t\t\t\t QgsProcessingParameterDefinition,\n\t\t\t\t\t QgsProcessingParameterBoolean,\n\t\t\t\t\t QgsFeatureRequest,\n\t\t\t\t\t QgsExpression,\n\t\t\t\t\t QgsProcessingParameterFileDestination,\n QgsProcessingParameterFeatureSource,\n QgsProcessingParameterFeatureSink)\nimport re\nimport processing\nfrom pathlib import Path\n\n#senza questo non puoi vedere il progetto\nfrom qgis.core import QgsProject\n\n#questo per l'icona dell'algoritmo di processing\nimport os\nimport inspect\nfrom qgis.PyQt.QtGui import QIcon\n\nclass HTML_GeneratorAlgorithm(QgsProcessingAlgorithm):\n \"\"\"\n Algorithm that make an HTML page or pages\n \"\"\"\n INPUT_L = 'INPUT_L' #layer dati\n INPUT_F = 'INPUT_F' #campi per html\n GROUP_BY = 'GROUP_BY' #espressione filtro\n INPUT_T = 'INPUT T' #titolo pagina\n INPUT_I = 'INPUT_I' #icona\n INPUT_S = 'INPUT_S' #foglio di style\n INPUT_ABS = 'INPUT_ABS' #percorso relativo si/no\n INPUT_P = 'INPUT_P' #pie' di pagina interattivo\n \n OUTPUT_H = 'OUTPUT_H'\n \n def tr(self, string):\n \"\"\"\n Returns a translatable string with the self.tr() function.\n \"\"\"\n return QCoreApplication.translate('Processing', string)\n\t\t\n\t\t#icona dell'algoritmo di processing\n def icon(self):\n cmd_folder = os.path.split(inspect.getfile(inspect.currentframe()))[0]\n icon = QIcon(os.path.join(os.path.join(cmd_folder, 'icon.png')))\n return icon\n\n def createInstance(self):\n return HTML_GeneratorAlgorithm()\n\n def name(self):\n \"\"\"\n Returns the algorithm name, used for identifying the algorithm. This\n string should be fixed for the algorithm, and must not be localised.\n The name should be unique within each provider. Names should contain\n lowercase alphanumeric characters only and no spaces or other\n formatting characters.\n \"\"\"\n return 'Html Generator'\n\n def displayName(self):\n \"\"\"\n Returns the translated algorithm name, which should be used for any\n user-visible display of the algorithm name.\n \"\"\"\n return self.tr('Html Generator')\n\n def group(self):\n \"\"\"\n Returns the name of the group this algorithm belongs to. This string\n should be localised.\n \"\"\"\n return self.tr('')\n\n def groupId(self):\n \"\"\"\n Returns the unique ID of the group this algorithm belongs to. This\n string should be fixed for the algorithm, and must not be localised.\n The group id should be unique within each provider. Group id should\n contain lowercase alphanumeric characters only and no spaces or other\n formatting characters.\n \"\"\"\n return ''\n\n def shortHelpString(self):\n \"\"\"\n Returns a localised short helper string for the algorithm. This string\n should provide a basic description about what the algorithm does and the\n parameters and outputs associated with it..\n \"\"\"\n return self.tr(\"Produce uno o più file html basati su un CSS utilizzabili in una cornice HTML del composer\\n\\\n OPZIONI\\n\\\n - Filtro sui campi\\n\\\n - Fogli multipli in funzione di un campo selezionato nel filtro con possibilità di intestazione e piè pagina interattivi\\n\\\n - Foto o icona e/o titolo nell'intestazione\\n\\\n - Percorsi assoluti / relativi\\n\\\n NOTA BENE\\n\\\n Le impostazioni dell'aspetto della pagina sono impostate dal foglio di stile Standard.css\\n\\\n I campi con immagini devono contenere il nome con estensione es: image.jpg\\n\\\n I campi con immagini possono contenere anche il percorso relativo o assoluto\\n\\\n Le dimensioni delle immagini sono impostate nel file CSS\\n\\\n Trascinando i campi nel selettore campi per HTML è possibile riordinarli\\n\\\n Selezionare intestazioni / piè di pagina interattivi per navigare fra i fogli prodotti\\n\\\n Selezionare riferimenti relativi consente il trasferimento del progetto, in tal caso:\\n\\\n LE CARTELLE ED I RELATIVI FILE DEVONO ESSERE NELLA CARTELLA DI PROGETTO\\n\\\n

E' possibile includere nel progetto il file HTML e il CSS relativo incollando il sorgente HTML\\\n nella casella'Sorgente' delle proprietà della cornice HTML

\\n\\\n \")\n\n \n def initAlgorithm(self, config=None):\n \"\"\"\n Here we define the inputs and output of the algorithm, along\n with some other properties.\n \"\"\"\n # We add the point input vector features source\n #QgsProcessingFeatureSourceDefinition \n self.addParameter(\n QgsProcessingParameterFeatureSource(\n self.INPUT_L,\n self.tr('Input sorgente dati'),\n [QgsProcessing.TypeMapLayer],\n )\n )\n \n self.addParameter(\n QgsProcessingParameterField(\n self.INPUT_F,\n self.tr('Selezionare i campi da inserire nel file Html'),\n allowMultiple = True,\n parentLayerParameterName=self.INPUT_L\n )\n )\n \n # We add the input css\n self.addParameter(\n QgsProcessingParameterFile(\n self.INPUT_S,\n 'Foglio di stile CSS',\n behavior=QgsProcessingParameterFile.File, fileFilter='Css file (*.css)',\n optional = False\n )\n )\n \n GROUP_BY = QgsProcessingParameterExpression(\n self.GROUP_BY,\n self.tr('Espressione filtro'),\n optional = True,\n parentLayerParameterName=self.INPUT_L,\n )\n GROUP_BY.setFlags(GROUP_BY.flags() | QgsProcessingParameterDefinition.FlagAdvanced)\n self.addParameter(GROUP_BY)\n \n INPUT_P = QgsProcessingParameterBoolean(\n self.INPUT_P,\n self.tr('Intestazione e pié di pagina interattivi [solo html multipli]'),\n optional = True,\n defaultValue = 0,\n )\n INPUT_P.setFlags(INPUT_P.flags() | QgsProcessingParameterDefinition.FlagAdvanced)\n self.addParameter(INPUT_P)\n \n INPUT_T = QgsProcessingParameterExpression(\n self.INPUT_T,\n self.tr('Titolo pagina'),\n optional = True,\n parentLayerParameterName=self.INPUT_L\n )\n INPUT_T.setFlags(INPUT_T.flags() | QgsProcessingParameterDefinition.FlagAdvanced)\n self.addParameter(INPUT_T)\n \n # We add the input icon source\n INPUT_I = QgsProcessingParameterFile(\n self.INPUT_I,\n 'Icona Gruppo',\n behavior=QgsProcessingParameterFile.File, fileFilter='Image file (*.gif; *.jpeg; *.jpg; *.png; *.svg)',\n optional = True\n )\n INPUT_I.setFlags(INPUT_I.flags() | QgsProcessingParameterDefinition.FlagAdvanced)\n self.addParameter(INPUT_I)\n\n\n # We add a file output of type HTML\n self.addParameter(\n QgsProcessingParameterFileDestination(\n self.OUTPUT_H,\n self.tr('Tabella HTML'),\n 'HTML files (*.html)',\n )\n )\n \n self.addParameter(\n QgsProcessingParameterBoolean(\n self.INPUT_ABS,\n 'Percorsi file relativi [solo se file NON temporaneo/i]',\n 0\n )\n )\n \n\n \n def processAlgorithm(self, parameters, context, feedback):\n \"\"\"\n Here is where the processing itself takes place.\n \"\"\"\n sourceL = self.parameterAsSource(\n parameters,\n self.INPUT_L,\n context)\n \n sourceF = self.parameterAsMatrix(\n parameters,\n self.INPUT_F,\n context)\n \n filtro = self.parameterAsString(\n parameters,\n self.GROUP_BY,\n context)\n \n titolo = self.parameterAsString(\n parameters,\n self.INPUT_T,\n context)\n \n html = self.parameterAsFileOutput(\n parameters,\n self.OUTPUT_H,\n context)\n \n source_path = self.parameterAsString(\n parameters,\n self.INPUT_L,\n context)\n \n icona = self.parameterAsString(\n parameters,\n self.INPUT_I,\n context)\n \n fogliocss = self.parameterAsString(\n parameters,\n self.INPUT_S,\n context)\n \n rel_path = self.parameterAsBool(\n parameters,\n self.INPUT_ABS,\n context)\n \n pie_p = self.parameterAsBool(\n parameters,\n self.INPUT_P,\n context)\n \n def html_composer(sourceL, sourceF, filtro, titolo, html, source_path, icona, fogliocss, rel_path, partizione, values, valori):\n ''' COMPOSIZIONE PAGINA HTML ''' \n #FASE #01 - cerco la path del progetto\n if QgsProject.instance().homePath():\n path_proj = QgsProject.instance().homePath()\n #windowizzo la path quale che sia\n path_proj = str(Path(path_proj))\n #rimuovo geopakage: se presente\n path_proj = path_proj.replace('geopackage:','')\n else:\n feedback.reportError('WARNING NO PROJECT PATH: the html file may not work correctly\\n')\n path_proj = ''\n #tolgo %20 e metto spazio \n path_proj = path_proj.replace('%20',' ')\n \n #FASE #02 - cerco la path del file di input\n path_file = (self.parameterDefinition('INPUT_L').valueAsPythonString(parameters['INPUT_L'], context))\n path_file = path_file[1:path_file.rfind('/')+1]\n if 'memory' in path_file:\n file_mem = True\n path_file = ''\n else:\n file_mem = False\n #windowizzo la path quale che sia\n path_file = str(Path(path_file))\n\n #tolgo %20 e metto spazio \n path_file = path_file.replace('%20',' ')\n \n #FASE #03 - scelgo la path da usare tra le due: prioritaria quella di progetto\n if path_proj:\n path_dir = path_proj\n if path_proj not in path_file and path_file != '':\n feedback.reportError('WARNING PATH FILE ' + path_file)\n feedback.reportError('OUTSIDE PROJECT PATH ' + path_proj)\n feedback.reportError('MOST LIKELY IT WON''T WORK' + '\\n')\n elif path_file == '':\n feedback.reportError('WARNING TEMPORARY LAYER WITHOUT PATH\\n')\n else:\n path_dir = path_file\n if path_dir:\n feedback.reportError('WARNING use the path of the input file ' + path_dir + '\\n')\n else:\n feedback.reportError('WARNING TEMPORARY LAYER WITHOUT PATH\\n')\n \n #FASE #04 - controllo se si sta salvando file con percorsi relativi nella cartella di progetto\n if path_dir not in str(Path(html)) and 'processing' not in str(Path(html)):\n feedback.reportError('WARNING HTML WITH RELATIVE PATH SAVED OUTSIDE THE PROJECT PATH DOES NOT WORK PROPERLY\\n')\n if 'processing' in str(Path(html)):\n feedback.reportError('WARNING TEMPORARY HTML WORK PROPERLY ONLY WITH ABSOLUTE PATH\\n')\n \n #FASE #05 - controllo se icona e css sono entro la cartella progetto\n if fogliocss and (path_dir not in fogliocss):\n feedback.reportError('WARNING css PATH OUTSIDE PROJECT PATH: the html file may not work correctly\\n')\n if icona and path_dir not in icona:\n feedback.reportError('WARNING icon PATH OUTSIDE PROJECT PATH: the html file may not work correctly\\n')\n \n #FASE #06 - aggiungo terminatore di percorso se non è un file temporaneo\n if path_dir != '':\n path_dir = path_dir + '\\\\'\n \n #FASE #07 - modifica se csv in input\n if source_path.find(\".csv\"):\n source_path = 'file:///' + source_path[0:source_path.rfind('/')+1]\n \n #FASE #08 pulisco titolo e riordino a causa di un bug \n titolo = titolo.replace('\\\"','')\n \n intestazione = titolo.replace('\"','')\n intestazione = titolo.replace('\\'','')\n \n #riordino campi come da selezione per bug \n cleanlist = []\n [cleanlist.append(x) for x in sourceF if x not in cleanlist]\n sourceF = cleanlist\n \n #FASE #09 - inizializzo variabile per barra % esecuzione script\n # Compute the number of steps to display within the progress bar and\n # get features from source\n total = 100.0 / sourceL.featureCount() if sourceL.featureCount() else 0\n \n #FASE #10 - filtra dati se richiesto\n if len (filtro) > 0:\n request = QgsFeatureRequest(QgsExpression(filtro))\n features = sourceL.getFeatures(request)\n else:\n features = sourceL.getFeatures()\n \n #FASE #11 - produco il file in uscita\n with open(html, 'w') as output_file:\n # write header\n line = '\\r'\n output_file.write(line)\n \n #FASE #11.01 - se richiesto inserisco foglio css\n if fogliocss:\n if not rel_path or 'processing' in html:\n fogliocss = 'file:///' + fogliocss\n else:\n fogliocss = str(Path(fogliocss))\n fogliocss = fogliocss.replace(path_dir,'')\n line = '\\r\\r'\n output_file.write(line)\n \n #FASE #11.02 - se richiesto inserisco icona e titolo\n if icona or titolo:\n line = '
'\n output_file.write(line)\n if icona:\n if not rel_path or 'processing' in html:\n icona = 'file:///' + icona\n else:\n icona = str(Path(icona))\n icona = icona.replace(path_dir,'')\n line = '' #'\" style=\"width:' + wi + ';height:' + hi + ';\">'\n output_file.write(line)\n line = ''\n if titolo:\n if icona:\n line = line + '' + '  ' + titolo + ''\n else:\n line = line + '' + titolo + ''\n output_file.write(line)\n line = '
'\n output_file.write(line)\n line = None\n \n #FASE #11.03 - compongo tabella\n line = ''\n output_file.write(line)\n \n #FASE #11.04 - inserisco testata pagina se ho più di una pagina\n if values and pie_p:\n if rel_path and 'processing' not in html:\n html = str(Path(html))\n html = html.replace(path_dir,'')\n feedback.pushInfo('Done: ' + html + '\\n') \n line = ''\n output_file.write(line)\n \n line = '\\r\\r'\n output_file.write(line)\n \n #titoli colonne\n line = ''.join(('\\r'\n output_file.write(line)\n \n line = '\\r\\r'\n output_file.write(line)\n \n #righe tabella\n for current, f in enumerate(features):\n line = '\\r'\n output_file.write(line)\n \n for name in sourceF:\n #controllo se si tratta di una immagine\n try:\n img_type = f[name].split(\".\")\n img_type = img_type[len(img_type)-1]\n except:\n img_type = ''\n \n #se è un'immagine e/o ha un percorso\n if img_type in [\"JPEG\",\"jpeg\",\"JPG\",\"jpg\",\"PNG\",\"png\"]:\n #se non è un file temporaneo o non voglio riferimenti relativi\n if not rel_path or 'processing' in html:\n if file_mem:\n img_name = ''\n else:\n img_name = 'file:///'\n if path_dir not in str(Path(f[name])):\n img_name = img_name + path_dir\n img_name = img_name + f[name]\n else:\n #se voglio riferimenti relativi\n img_name = str(Path(f[name]))\n img_name = img_name.replace(path_dir,'')\n line = ''.join('\\r') #+ 'width=\"' + wf + '\" height=\"' + hf +\n else:\n try:\n line = ''.join('\\r')\n except:\n line = ''.join('\\r')\n output_file.write(line)\n \n line = '\\r'\n output_file.write(line)\n\n # Update the progress bar\n feedback.setProgress(int(current * total))\n \n #FASE #11.05 - inserisco piè di pagina se ho più di una pagina\n if values and pie_p:\n if rel_path and 'processing' not in html:\n html = str(Path(html))\n html = html.replace(path_dir,'')\n feedback.pushInfo('Done: ' + html + '\\n') \n line = ''\n output_file.write(line)\n \n line = '\\r
' + str(valori) + '
'\n output_file.write(line)\n for i in range (0, len(values)):\n html = re.sub('_[0-9]{2,3}(.html)','_0' + str(i) + '.html', html)\n if i == 0:\n line = '«'\n output_file.write(line)\n try:\n valore_ins = values[i].toString('dd.MM.yyyy')\n except:\n valore_ins = str(values[i])\n line = '' + partizione + ': ' + valore_ins + ''\n output_file.write(line)\n line = '»
'+ str(name)+ '\\r') for name in sourceF) + '
'+f[name].toString(\"dd.MM.yyyy\")+ ''+ str(f[name]) + '
'\n output_file.write(line)\n for i in range (0, len(values)):\n html = re.sub('_[0-9]{2,3}(.html)','_0' + str(i) + '.html', html)\n if i == 0:\n line = '«'\n output_file.write(line)\n try:\n valore_ins = values[i].toString('dd.MM.yyyy')\n except:\n valore_ins = str(values[i])\n line = '' + partizione + ': ' + valore_ins + ''\n output_file.write(line)\n line = '»
\\r'\n output_file.write(line)\n \n output_file.close()\n return {self.OUTPUT_H: html}\n \n # INIZIO ELABORAZIONE\n if filtro and \"'\" in filtro or not filtro:\n partizione = 0\n values = ''\n valori = ''\n risultato = html_composer(sourceL, sourceF, filtro, titolo, html, source_path, icona, fogliocss, rel_path, partizione, values, valori)\n elif filtro:\n partizione = filtro[1:len(filtro)-1]\n idx = sourceL.fields().indexOf(partizione)\n values = sourceL.uniqueValues(idx)\n pagine = len(values)\n values = sorted(values)\n for current, valori in enumerate(values):\n try:\n valori = valori.toString(\"yyyy-MM-dd\")\n except:\n pass\n if valori:\n N_Filter =\"\\\"\" + partizione + \"\\\"\" + ' = ' + \"'\" + str(valori) + \"'\"\n else:\n N_Filter =\"\\\"\" + partizione + \"\\\"\" + \" is None\"\n filtro = N_Filter\n finale = html.replace(\".html\", \"_0\" + str(current)+\".html\")\n risultato = html_composer(sourceL, sourceF, filtro, titolo, finale, source_path, icona, fogliocss, rel_path, partizione, values, valori)\n filtro = \"\"\n \n return{self.OUTPUT_H: risultato['OUTPUT_H']}","sub_path":"HTML_Generator_algorithm.py","file_name":"HTML_Generator_algorithm.py","file_ext":"py","file_size_in_byte":24252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"210817218","text":"\nfrom odoo import api, fields, models\nimport datetime\n\n\nclass StockPicking(models.Model):\n _inherit = \"stock.picking\"\n\n def invoice_line_non_kit(self):\n sale_order_line_obj = self.env['sale.order.line']\n invoice_line_obj = self.env['account.invoice.line']\n inv_rec = self.env['account.invoice'].search(\n [('sale_id', '=', self.origin)], limit=1)\n sale_order = self.env['sale.order'].search(\n [('name', '=', self.origin)], limit=1)\n inv_line = False\n for sale_line in sale_order.order_line:\n if sale_line.product_id and \\\n sale_line.product_id.product_tmpl_id and \\\n not sale_line.product_id.product_tmpl_id.bom_ids:\n account = self.get_account_properties()\n inv_line = invoice_line_obj.create({\n 'name': sale_line.name,\n 'account_id': account and account.id or False,\n 'invoice_id': inv_rec and inv_rec.id or False,\n 'price_unit': sale_line.price_unit,\n 'quantity': sale_line.product_uom_qty,\n 'uom_id': sale_line.product_id and\n sale_line.product_id.uom_id.id or False,\n 'product_id': sale_line.product_id and\n sale_line.product_id.id or False,\n })\n order_line_ids = sale_order_line_obj.search([\n ('order_id', '=', sale_order.id),\n ('product_id', '=', sale_line.product_id.id)])\n for order_line in order_line_ids:\n order_line.write({\n 'qty_to_invoice': sale_line.product_uom_qty,\n 'invoice_lines': [(4, inv_line.id, 0)]\n })\n tax_ids = []\n if order_line and order_line_ids[0]:\n for tax in order_line[0].tax_id:\n tax_ids.append(tax.id)\n\n inv_line.write({\n 'price_unit': order_line[0].price_unit,\n 'discount': order_line[0].discount,\n 'invoice_line_tax_ids': [(6, 0, tax_ids)]\n })\n inv_rec.compute_taxes()\n return True\n\n def get_account_properties(self):\n ir_property_obj = self.env['ir.property']\n account_obj = self.env['account.account']\n account = False\n for pick_line in self.move_lines:\n if pick_line.product_id.property_account_income_id:\n account = pick_line.product_id.\\\n property_account_income_id\n elif pick_line.product_id.categ_id.\\\n property_account_income_categ_id:\n account = pick_line.product_id.categ_id.\\\n property_account_income_categ_id\n else:\n account_search = ir_property_obj.search(\n [('name', '=', 'property_account_income_categ_id')])\n account = account_search.value_reference\n account = account.split(\",\")[1]\n account = account_obj.browse(account)\n return account\n\n def invoice_lines_creation(self):\n invoice_line_obj = self.env['account.invoice.line']\n acc_inv_rec = self.env['account.invoice'].search(\n [('sale_id', '=', self.origin)], limit=1)\n sale_order = self.env['sale.order'].search(\n [('name', '=', self.origin)], limit=1)\n account = self.get_account_properties()\n for inv_lines in acc_inv_rec.invoice_line_ids:\n inv_lines.unlink()\n for sale_line in sale_order.order_line:\n inv_line_id = invoice_line_obj.create({\n 'name': sale_line.name,\n 'account_id': account and account.id or False,\n 'invoice_id': acc_inv_rec and acc_inv_rec.id or False,\n 'price_unit': sale_line.price_unit,\n 'quantity': sale_line.product_uom_qty,\n 'uom_id': sale_line.product_id.uom_id and\n sale_line.product_id.uom_id.id or False,\n 'product_id': sale_line.product_id and\n sale_line.product_id.id or False\n })\n sale_line.write({\n 'qty_to_invoice': sale_line.qty_delivered,\n 'invoice_lines': [(4, inv_line_id.id, 0)]\n })\n tax_ids = []\n if sale_line[0]:\n for tax in sale_line[0].tax_id:\n tax_ids.append(tax.id)\n inv_line_id.write({\n 'price_unit': sale_line[0].price_unit,\n 'discount': sale_line[0].discount,\n 'invoice_line_tax_ids': [(6, 0, tax_ids)]\n })\n acc_inv_rec.compute_taxes()\n return True\n\n @api.multi\n def action_done(self):\n if self.sale_id and self.sale_id.carrier_id:\n if not self.env['sale.order.line'].search_count([\n ('order_id', 'in', self.ids),\n ('is_delivery', '=', True)]):\n self.sale_id.delivery_rating_success = False\n res = self.sale_id.carrier_id.rate_shipment(self.sale_id)\n if res.get('success', False):\n self.sale_id.write({\n 'delivery_rating_success': True,\n 'delivery_price': res and res.get('price', 0.0),\n 'delivery_message': res and\n res.get('warning_message', 0.0),\n })\n self.carrier_price = res.get('price', 0.0)\n else:\n self.sale_id.write({\n 'delivery_rating_success': False,\n 'delivery_price': 0.0,\n 'delivery_message': res and\n res.get('error_message', 0.0),\n })\n self._add_delivery_cost_to_so()\n self.sale_id.invoice_shipping_on_delivery = False\n res = super(StockPicking, self).action_done()\n acc_inv_rec = self.env['account.invoice'].search([\n ('sale_id', '=', self.origin)], limit=1)\n sale_order = self.env['sale.order'].search([\n ('name', '=', self.origin)], limit=1)\n inv_line_obj = self.env['account.invoice.line']\n flag = 0\n if sale_order and acc_inv_rec:\n account = self.get_account_properties()\n acc_inv_rec.write({\n 'user_id': sale_order.user_id and\n sale_order.user_id.id or False,\n 'name': sale_order.client_order_ref,\n 'date_invoice': fields.Date.context_today(self)\n })\n for inv_lines in acc_inv_rec.invoice_line_ids:\n inv_lines.unlink()\n for sale_line in sale_order.order_line:\n if sale_line.product_id.product_tmpl_id.bom_ids:\n invoice_line_id = inv_line_obj.create({\n 'name': sale_line.name,\n 'account_id': account.id,\n 'invoice_id': acc_inv_rec and acc_inv_rec.id,\n 'price_unit': sale_line.price_unit,\n 'quantity': sale_line.product_uom_qty,\n 'uom_id': sale_line.product_id.uom_id.id,\n 'product_id': sale_line.product_id.id})\n sale_line.write({\n 'qty_to_invoice': sale_line.qty_delivered,\n 'invoice_lines': [(4, invoice_line_id.id, 0)]\n })\n tax_ids = []\n if sale_line[0]:\n for tax in sale_line[0].tax_id:\n tax_ids.append(tax.id)\n invoice_line_id.write({\n 'price_unit': sale_line[0].price_unit,\n 'discount': sale_line[0].discount,\n 'invoice_line_tax_ids': [(6, 0, tax_ids)]\n })\n acc_inv_rec.compute_taxes()\n else:\n flag = 1\n if flag == 1:\n self.invoice_line_non_kit()\n\n sale_order.write({\n 'x_studio_last_invoice_date': datetime.date.today(),\n 'x_studio_invoiced': True,\n 'x_studio_invoice_amount': acc_inv_rec.amount_total\n })\n return True\n\n @api.multi\n def do_print_picking_2(self):\n self.write({'x_studio_delivery_printed': True})\n return self.env.ref('stock.action_report_delivery').\\\n report_action(self)\n","sub_path":"custom_au_in/models/custom_stock_picking.py","file_name":"custom_stock_picking.py","file_ext":"py","file_size_in_byte":8773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"271911271","text":"from fastapi import FastAPI, Request, Response\nfrom pydantic import BaseModel\nfrom typing import List\nfrom pathlib import Path\nfrom model import Chatbot\nimport requests\nimport os\nimport json\nimport re\n\nTOKEN = os.environ.get(\"VERIFY_TOKEN\")\nPAGE_ACCESS_TOKEN = os.environ.get('PAGE_ACCESS_TOKEN')\n\ntokenizer_path = Path(\"data/tokenizer.pkl\")\nweights_path = Path(\"model/model.hdf5\")\nmodel = Chatbot.from_files(tokenizer_path, weights_path)\nstart_words = ['hola']\nstop_words = ['adios', 'chao', 'gracias']\n\napp = FastAPI()\n\nclass WebhookData(BaseModel):\n object: str = \"\"\n entry: List = []\n\n\n@app.router.get(\"/\")\ndef verify(request: Request):\n\n if request.query_params.get(\"hub.mode\") == \"subscribe\" and request.query_params.get(\"hub.challenge\"):\n if not request.query_params.get(\"hub.verify_token\") == TOKEN:\n return Response(content=\"Los tokens no coinciden\", status_code=403)\n return Response(content=request.query_params[\"hub.challenge\"])\n\n return Response(content=\"Faltan argumentos en el request\", status_code=400)\n\n\n\n@app.post(\"/\")\ndef chat(webhook_data: WebhookData):\n if webhook_data.object == 'page':\n for entry in webhook_data.entry:\n for event in entry['messaging']:\n if event.get('message', None):\n sender_id = event['sender']['id']\n recipient_id = event['recipient']['id']\n text = event['message']['text']\n text_words = text.lower().strip().split(' ') \n if any([re.match(w, text.lower()) for w in start_words]):\n send_message(sender_id, \"Hola. Soy un bot \\U0001F916 programado para responder preguntas sobre peliculas. ¿En qué puedo ayudarte?\")\n if len(text_words) > 1:\n answer = model.chat(text)\n send_message(sender_id, answer)\n elif any([re.match(w, text.lower()) for w in stop_words]):\n send_message(sender_id, \"Ha sido un placer servirte. Si tienes alguna otra pregunta puedes escribirme en cualquier momento \\U0001F642\")\n else:\n answer = model.chat(text)\n send_message(sender_id, answer)\n return Response(content=\"ok\")\n\ndef send_message(recipient_id: str, message: str):\n params = {'access_token': PAGE_ACCESS_TOKEN}\n headers = {'Content-Type': 'application/json'}\n data = json.dumps({\n 'recipient': {\n 'id': recipient_id\n },\n 'message': {\n 'text': message\n }\n })\n\n response = requests.post('https://graph.facebook.com/v2.6/me/messages', params=params, headers=headers, data=data)\n\n print(response.status_code)\n\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"626815375","text":"class Solution(object):\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n strs.sort()\n LCP = \"\"\n if not strs:\n return LCP\n \n for i in range (len(strs[0])):\n if(strs[0][i] == strs[-1][i]):\n LCP += strs[0][i]\n else:\n break\n\n return LCP","sub_path":"0014-Longest-Common-Prefix/0014-Longest-Common-Prefix.py","file_name":"0014-Longest-Common-Prefix.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"111231905","text":"__author__ = 'dmitry'\r\nimport re\r\nimport unittest\r\nfrom tests import TestLoginValidation\r\n\r\ndef validate_login_with_regex(login):\r\n regex = re.compile(r\"\"\"\r\n ^ # beginning of string\r\n [a-zA-Z] # login should begin from latin character\r\n ([a-zA-Z\\d.-]{0,19}) # login should be less or equal 20 and consists of alphanumeric symbols, minus and dot\r\n (?<=[a-zA-Z\\d]$) # login should ends with alphanumeric symbols\r\n $ # end of string\r\n \"\"\", re.X)\r\n return regex.match(login) is not None\r\n\r\nclass TestLoginValidationWithRegex(TestLoginValidation):\r\n def setUp(self):\r\n self.validation_function = validate_login_with_regex\r\n\r\nif __name__ == '__main__':\r\n suite = unittest.TestLoader().loadTestsFromTestCase(TestLoginValidationWithRegex)\r\n unittest.TextTestRunner(verbosity=2).run(suite)","sub_path":"test_2/case_1.py","file_name":"case_1.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"195197481","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n gdaladdo_slms.py\n ---------------------\n Date : August 2012\n Copyright : (C) 2012 by Victor Olaya\n Email : volayaf at gmail dot com\n***************************************************************************\n* *\n* This program is free software; you can redistribute it and/or modify *\n* it under the terms of the GNU General Public License as published by *\n* the Free Software Foundation; either version 2 of the License, or *\n* (at your option) any later version. *\n* *\n***************************************************************************\n\"\"\"\n\n__author__ = 'Victor Olaya'\n__date__ = 'August 2012'\n__copyright__ = '(C) 2012, Victor Olaya'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nfrom processing.algs.gdal.GdalAlgorithm import GdalAlgorithm\nfrom processing.core.parameters import ParameterRaster\nfrom processing.core.parameters import ParameterBoolean\nfrom processing.core.parameters import ParameterSelection\nfrom processing.core.parameters import ParameterString\nfrom processing.core.outputs import OutputRaster\n\nfrom processing.algs.gdal.GdalUtils import GdalUtils\n\n\nclass gdaladdo_slms(GdalAlgorithm):\n\n INPUT = 'INPUT'\n LEVELS = 'LEVELS'\n CLEAN = 'CLEAN'\n RESAMPLING_METHOD = 'RESAMPLING_METHOD'\n FORMAT = 'FORMAT'\n OUTPUT = 'OUTPUT'\n\n METHODS = [\n 'nearest',\n 'average',\n 'gauss',\n 'cubic',\n 'average_mp',\n 'average_magphase',\n 'mode',\n ]\n\n FORMATS = ['Internal (if possible)', 'External (GTiff .ovr)',\n 'External (ERDAS Imagine .aux)']\n\n def commandLineName(self):\n return \"gdalogr:slms-overviews\"\n\n def defineCharacteristics(self):\n self.name, self.i18n_name = self.trAlgorithm('SLMS - GeoTIFF overviews creation')\n self.group, self.i18n_group = self.trAlgorithm('[GDAL] Miscellaneous')\n self.addParameter(ParameterRaster(\n self.INPUT, self.tr('Input layer'), False))\n self.addParameter(ParameterString(self.LEVELS,\n self.tr('Overview levels'), '2 4 8 16 32'))\n #self.addParameter(ParameterBoolean(self.CLEAN,\n # self.tr('Remove all existing overviews'), False))\n self.addParameter(ParameterSelection(self.RESAMPLING_METHOD,\n self.tr('Resampling method'), self.METHODS, 0))\n #self.addParameter(ParameterSelection(self.FORMAT,\n # self.tr('Overview format'), self.FORMATS, 0))\n self.addOutput(OutputRaster(self.OUTPUT, self.tr('Pyramidized'), True))\n\n def getConsoleCommands(self):\n inFile = self.getParameterValue(self.INPUT)\n clearOverviews = self.getParameterValue(self.CLEAN)\n ovrFormat = self.getParameterValue(self.FORMAT)\n\n arguments = []\n arguments.append(inFile)\n #if clearOverviews:\n # arguments.append('-clean')\n arguments.append('-r')\n arguments.append(self.METHODS[self.getParameterValue(self.RESAMPLING_METHOD)])\n\n #if ovrFormat == 1:\n # external .ovr\n # arguments.append('-ro')\n #elif ovrFormat == 2:\n # external .aux\n # arguments.extend('--config USE_RRD YES'.split(' '))\n\n arguments.extend(self.getParameterValue(self.LEVELS).split(' '))\n self.setOutputValue(self.OUTPUT, inFile)\n\n return ['gdaladdo', GdalUtils.escapeAndJoin(arguments)]\n","sub_path":"gdal_scripts/gdaladdo_slms.py","file_name":"gdaladdo_slms.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"324411756","text":"#Basic Calculator - Must include code comments \n#Must accept user input (include a prompt)\n\naddNum1 = input(\"What is the first number?: \")\naddNum2 = input (\"What is the second number to add? \")\n#Must store the input inside a variable\nequals = int(addNum1) + int(addNum2)\n#Must be able to accept whole numbers (21, 100,1000)\n\t#Data Type Integer\n#Must be able to add two whole numbers together\n\t#2+2 operator\n#Must be able to print out the sum of the two numbers that were input\n#Must include a message along with the sum of the numbers e.g. “the sum of x plus x is: y”\nprint (\"The sum of \" + str(addNum1) + \" plus \" + str(addNum2) + \" is: \" + str(equals))\n","sub_path":"BasicCalculator.py","file_name":"BasicCalculator.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"400066148","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport djangocms_text_ckeditor.fields\nimport filer.fields.image\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('filer', '0007_auto_20161016_1055'),\n ('avangard75', '0004_auto_20170301_1143'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='KuhniAkziiData',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('title', models.CharField(max_length=100, verbose_name='заголовок акции')),\n ('link', models.URLField(null=True, verbose_name='ссылка', blank=True)),\n ('description', djangocms_text_ckeditor.fields.HTMLField(null=True, verbose_name='описание акции', blank=True)),\n ('image', filer.fields.image.FilerImageField(to='filer.Image', verbose_name='изображение')),\n ],\n options={\n 'verbose_name_plural': 'акции',\n 'verbose_name': 'акция',\n },\n ),\n ]\n","sub_path":"avangard75/migrations/0005_kuhniakziidata.py","file_name":"0005_kuhniakziidata.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552696463","text":"\n# -*- coding: UTF-8 -*-\n#!/usr/bin/python3\n#\n\nstr = \"123abcrunooba321\"\nprint(str.strip('123a') ) #字符序列為12\n\n\n#format\nname = 'Jack'\ntext = 'world'\n\nprint('hello {name}, hello {text}'.format(name=name, text=text))\n# hello Jack, hello world\n","sub_path":"AboutString/aboutString.py","file_name":"aboutString.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"19915959","text":"import logging\nimport time\nfrom carreralib import ControlUnit\nfrom bluepy.btle import BTLEException\nfrom .errors import RMSException, DalException\n\n# track events (used for troubleshooting)\nRMS_STARTUP_EVENT = {'event_id': 1, 'event_name': 'rms_startup_event', 'type': 'info'}\nRMS_XDK_INITIALIZE_EVENT = {'event_id': 2, 'event_name': 'rms_xdk_initialize_event', 'type': 'info'}\nRMS_CAMERA_INITIALIZE_EVENT = {'event_id': 3, 'event_name': 'rms_camera_initialize_event', 'type': 'info'}\nRMS_IR_INITIALIZE_EVENT = {'event_id': 4, 'event_name': 'rms_ir_initialize_event', 'type': 'info'}\nRMS_TRACK_INITIALIZE_EVENT = {'event_id': 5, 'event_name': 'rms_track_initialize_event', 'type': 'info'}\nRMS_SHUTDOWN_EVENT = {'event_id': 6, 'event_name': 'rms_shutdown_event', 'type': 'info'}\nRMS_XDK_TERMINATE_EVENT = {'event_id': 7, 'event_name': 'rms_xdk_terminate_event', 'type': 'info'}\nRMS_CAMERA_TERMINATE_EVENT = {'event_id': 8, 'event_name': 'rms_camera_terminate_event', 'type': 'info'}\nRMS_IR_TERMINATE_EVENT = {'event_id': 9, 'event_name': 'rms_ir_terminate_event', 'type': 'info'}\nRMS_TRACK_TERMINATE_EVENT = {'event_id': 10, 'event_name': 'rms_track_terminate_event', 'type': 'info'}\nRMS_RACE_START_EVENT = {'event_id': 11, 'event_name': 'rms_race_start_event', 'type': 'info'}\nRMS_RACE_END_EVENT = {'event_id': 12, 'event_name': 'rms_race_end_event', 'type': 'info'}\nRMS_LEADER_BOARD_EVENT = {'event_id': 13, 'event_name': 'rms_leader_board_event', 'type': 'info'}\nRMS_RESET_EVENT = {'event_id': 14, 'event_name': 'rms_reset_event', 'type': 'error'}\nRMS_INITIALIZE_ERROR = {'event_id': -1, 'event_name': 'rms_initialize_error', 'type': 'error'}\nRMS_STARTUP_ERROR = {'event_id': -2, 'event_name': 'rms_startup_error', 'type': 'error'}\nRMS_TERMINATE_ERROR = {'event_id': -3, 'event_name': 'rms_terminate_error', 'type': 'error'}\nRMS_SHUTDOWN_ERROR = {'event_id': -4, 'event_name': 'rms_shutdown_error', 'type': 'error'}\nRMS_RACE_ERROR = {'event_id': -3, 'event_name': 'rms_race_error', 'type': 'error'}\n\n\nclass MongoRMS(object):\n \"\"\"\n MongoRMS\n - connects to Carrera racetrack\n - creates new race\n - starts race\n - resets on reset command\n - stops race\n \"\"\"\n def __init__(self, cu_addr, dal, game_cfg):\n \"\"\"\n initialize race track\n\n :param cu_addr:\n :param dal:\n :param game_cfg:\n \"\"\"\n self.__cu_addr = cu_addr\n self.__cu = None\n self.__dal = dal\n self.__cfg = game_cfg\n self.__current_race = None\n self.__logger = logging.getLogger(__name__)\n self.__logger.info('race management system created')\n\n def initialize_cameras(self):\n \"\"\"\n setup usb cameras\n\n :return:\n \"\"\"\n self.__dal.send_event(RMS_CAMERA_INITIALIZE_EVENT, 'initialize cameras (warning: not implemented yet)')\n\n def terminate_cameras(self):\n \"\"\"\n release system handles to cameras\n\n :return:\n \"\"\"\n self.__dal.send_event(RMS_CAMERA_TERMINATE_EVENT, 'terminate camera handles (warning: not implemented yet)')\n\n def initialize_ir(self):\n \"\"\"\n setup infrared receiver/transmitter\n :return:\n \"\"\"\n self.__dal.send_event(RMS_IR_INITIALIZE_EVENT, 'initialize infrared (warning: not implemented yet)')\n\n def terminate_ir(self):\n \"\"\"\n setup infrared receiver/transmitter\n :return:\n \"\"\"\n self.__dal.send_event(RMS_IR_TERMINATE_EVENT, 'terminate infrared handles (warning: not implemented yet)')\n\n def initialize_xdk(self):\n \"\"\"\n setup xdk connections\n\n :return:\n \"\"\"\n self.__dal.send_event(RMS_XDK_INITIALIZE_EVENT, 'initialize xdk (warning: not implemented yet)')\n\n def terminate_xdk(self):\n \"\"\"\n setup xdk connections\n\n :return:\n \"\"\"\n self.__dal.send_event(RMS_XDK_TERMINATE_EVENT, 'terminate xdk handles (warning: not implemented yet)')\n\n def initialize_dal(self):\n self.__dal.initialize(self.__cfg['name'], self.__cfg['league'])\n\n def initialize_racetrack(self):\n \"\"\"\n setup racetrack connection\n\n :return:\n \"\"\"\n try:\n self.__cu = ControlUnit(self.__cu_addr)\n self.__dal.send_event(RMS_TRACK_INITIALIZE_EVENT, 'successfully initialized and connected to track')\n except BTLEException as btlee:\n self.__logger.fatal('problem connecting to carrera track: %s', btlee.message)\n self.__dal.send_event(RMS_INITIALIZE_ERROR, 'failed to initialize track')\n raise RMSException(btlee.message, RMS_INITIALIZE_ERROR['event_id'])\n\n def terminate_racetrack(self):\n \"\"\"\n setup racetrack connection, carrera control unit does not throw an exception during closing\n\n :return:\n \"\"\"\n self.__cu.close()\n self.__dal.send_event(RMS_TRACK_TERMINATE_EVENT, 'successfully disconneted and terminated track')\n\n def start_system(self):\n \"\"\"\n start the system, performing all hardware checks\n\n :return:\n \"\"\"\n try:\n self.initialize_dal()\n self.initialize_racetrack()\n self.initialize_ir()\n self.initialize_cameras()\n self.initialize_xdk()\n except DalException as de:\n self.__logger.fatal('failed to start system: %s', de)\n self.__logger.fatal('failed to initialize dal')\n raise RMSException(de)\n except RMSException as rmse:\n self.__logger.fatal('failed to start system: %s', rmse)\n self.__dal.send_event(RMS_STARTUP_ERROR, 'Mongo RMS failed to properly initialize')\n raise RMSException(rmse)\n self.__dal.send_event(RMS_STARTUP_EVENT, 'Mongo RMS initialized and running')\n\n def shutdown_system(self):\n \"\"\"\n\n :return:\n \"\"\"\n self.terminate_racetrack()\n self.terminate_ir()\n self.terminate_cameras()\n self.terminate_xdk()\n self.__dal.send_event(RMS_SHUTDOWN_EVENT, 'mongo RMS successfully terminated and shutdown')\n\n def setup_drivers(self):\n \"\"\"\n\n :return:\n \"\"\"\n pass\n\n def setup_cars(self):\n \"\"\"\n\n :return:\n \"\"\"\n pass\n\n def setup_race(self):\n \"\"\"\n\n :return:\n \"\"\"\n self.setup_drivers()\n self.setup_cars()\n pass\n\n def start_race(self):\n \"\"\"\n\n :return:\n \"\"\"\n pass\n","sub_path":"mongo_rms/race_management_system.py","file_name":"race_management_system.py","file_ext":"py","file_size_in_byte":6462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"240370467","text":"#!/usr/bin/env python\n\"\"\"load_graph.py \nLoad a set of application-dataset relationships in CSV form into a Neptune graph database\n\"\"\"\n\nfrom __future__ import print_function # Python 2/3 compatibility\nimport csv\nimport sys\nimport os\nimport argparse\n\nfrom gremlin_python import statics\nfrom gremlin_python.structure.graph import Graph\nfrom gremlin_python.process.graph_traversal import __\n# from gremlin_python.process.strategies import *\nfrom gremlin_python.driver.driver_remote_connection import DriverRemoteConnection\n\nfrom gremlin_python.process.traversal import Bindings\n\n\ndef parse_options():\n \"\"\"parse the command line options, returning input file and Neptune endpoint\"\"\"\n parser = argparse.ArgumentParser(description=\"Load CSV input into UBD Neptune database\")\n parser.add_argument('-i', '--ifile', default='algo-output.csv',\n metavar=\"input-pathname\")\n parser.add_argument('-n', '--neptune',\n metavar=\"neptune-endpoint\")\n return parser.parse_args()\n\n\ndef load_database(input_file, neptune_endpoint):\n \"\"\"reads the input CSV file and loads into the neptune database\n Positional Arguments:\n input_file: input CSV file\n neptune_endpoint: secure web socket Neptune endpoint\n \"\"\"\n\n # initiate connection\n graph = Graph()\n\n # Neptune secure web sockets endpoint, e.g.\n # wss://neptunedbinstance-gobble.dygook.us-west-1.neptune.amazonaws.com:8182/gremlin\n if neptune_endpoint is None:\n neptune_endpoint = os.environ.get('NEPTUNEDBRO')\n if neptune_endpoint is None:\n sys.exit(\"Neptune Endpoint was not supplied in either command line or NEPTUNEDBRO environment\")\n remote_connection = DriverRemoteConnection(neptune_endpoint, 'g')\n graph_trav = graph.traversal().withRemote(remote_connection)\n\n # start loading graph\n\n empty = False\n # clear graph\n try:\n graph_trav.V().drop().iterate()\n except:\n empty = True\n print(\"No existing graphs to clear\", file=sys.stderr)\n names = []\n titles = []\n\n # load csv file\n with open(input_file, 'r') as file:\n\n # initiate csv reader\n reader = csv.DictReader(file)\n\n # loop through every line in csv file\n for line in reader:\n\n # generates a list of existing applications and datasets to avoid duplicates\n if not empty:\n names = graph_trav.V().name.toList()\n titles = graph_trav.V().title.toList()\n\n # this conditional checks for application duplicates\n if line['name'] not in names:\n # if application is not yet in database, add it\n app_vertex = graph_trav.addV('application').property('topic', line['topic']) \\\n .property('name', line['name']) \\\n .property('site', line['site']) \\\n .property('screenshot', line['screenshot']) \\\n .property('publication', line['publication']) \\\n .property('description', line['description']).next()\n else:\n print(f\"{line['name']} already in db, skipping...\", file=sys.stderr)\n # else, get existing application vertex\n app_vertex = graph_trav.V().has('application', 'name', line['name']).limit(1)\n\n # this conditional checks for dataset duplicates\n if line['title'] not in titles:\n # if dataset is not yet in database, add it\n dataset_vertex = graph_trav.addV('dataset') \\\n .property('doi', line['doi']) \\\n .property('title', line['title']).next()\n else:\n # else, get existing dataset vertex\n dataset_vertex = graph_trav.V().has('dataset', 'title', line['title']).limit(1)\n\n # add edge between application and dataset vertices\n graph_trav.addE('uses').from_(app_vertex).to(dataset_vertex).iterate()\n\n\n # counts vertices, used for troubleshooting purposes\n print(f\"Vertices count: {graph_trav.V().count().next()}\", file=sys.stderr)\n\n # close connection\n # remote_connection.close()\n\n return graph_trav\n\n# Main program\nargs = parse_options()\nload_database(args.ifile, args.neptune)\n","sub_path":"load_graph.py","file_name":"load_graph.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"242965586","text":"import sys, random, math\r\n\r\n\r\n\r\ndef getFeatureData(featureFile, bias = 0):\r\n\t\r\n\tx = []\r\n\t\r\n\tdFile = open(featureFile, 'r')\r\n\t\r\n\ti = 0\r\n\t\r\n\tfor line in dFile:\r\n\t\r\n\t\trow = line.split()\r\n\t\t\r\n\t\trVec = [float(item) for item in row]\r\n\t\t\r\n\t\tif bias > 0:\r\n\t\t\r\n\t\t\trVec.insert(0, bias)\r\n\r\n#\t\tprint('row {} : {}'.format(i,rVec))\r\n \r\n\t\tx.append(rVec) \r\n\t\t\r\n\t\ti += 1\r\n\t\t\r\n\tdFile.close()\r\n\t\r\n\treturn x\r\n\r\n\r\ndef getLabelData(labelFile, hyperPlaneClass = False):\r\n\r\n\tlFile = open(labelFile, 'r')\r\n\t\r\n\tlDict = {}\r\n\t\r\n\tfor line in lFile:\r\n\t\r\n\t\trow = line.split()\r\n\t\r\n\t\t\r\n\t\tif hyperPlaneClass and int(row[0]) <= 0:\r\n\t\t\r\n\t\t\tlDict[int(row[1])] = -1\r\n\t\t\r\n\t\telse:\r\n\t\t\t\r\n\t\t\tlDict[int(row[1])] = int(row[0])\r\n\t\r\n\tprint('label : {}'.format(lDict))\r\n\t\t\r\n\tlFile.close()\r\n\t\r\n\treturn lDict\r\n\r\n\r\ndef connectLabels(lsi, n, lsl):\r\n\t\r\n\tcheckList, lstest, lstrain = [i[1] for i in lsl], [], []\r\n\r\n\tfor i in (range(len(lsi))):\r\n\r\n\t\tfor j in range(len(lsl)):\r\n\r\n\t\t\tif lsl[j][1] == i:\r\n\r\n\t\t\t\tlsi[i].append(lsl[j][0])\r\n\t\t\t\r\n\t\tif i not in checkList:\r\n\r\n\t\t\tlstest.append(lsi[i])\r\n\t\t\t\r\n\t\telse:\r\n\r\n\t\t\tlstrain.append(lsi[i])\r\n\r\n\treturn lsi, lstrain, lstest\r\n\r\n\r\ndef dot(weightList, trainList):\r\n\r\n\toutPut = []\r\n\r\n\tfor i in trainList:\r\n\r\n\t\ti = [1] + i\r\n\r\n\t\tsum = 0\r\n\r\n\t\tfor j,k in zip(weightList, i):\r\n\t\t\t\r\n\t\t\tsum += j * k\r\n\t\t\r\n\t\toutPut.append(sum)\r\n\t\r\n\treturn outPut\r\n\r\n\r\ndef sigmoid(weightList, trainList):\r\n\t\r\n\toutPut = dot(weightList, trainList)\r\n\t\r\n\tfor i in range(len(outPut)):\r\n\r\n\t\t#if outPut[i] > -3:\r\n\t\t\t\r\n\t\toutPut[i] = 1 / (1 + (2.718281828459045) ** (-1 * outPut[i]))\r\n\t\t\r\n\t\t#else:\r\n\t\t\r\n\t\t#\toutPut[i] = 0\r\n\t\r\n\treturn outPut\r\n\r\ndef sigm (w, inp):\r\n\r\n\tsum = 0\r\n\r\n\tfor i in range(len(w)):\r\n\t\t\r\n\t\tsum += w[i] * inp[i]\r\n\r\n\tsum = 1/(1+ (2.718281828459045 ** (-1*sum)))\r\n\r\n\tif sum == 1.0:\r\n\t\r\n\t\tsum = 0.9999\r\n\t\r\n\treturn sum\r\n\r\n\r\ndef loss(outPut, trainList, weightList, alpha = 0.001):\r\n\t#print()\r\n\ttotalLoss = [0 for _ in weightList]\r\n\r\n\tlse = 0\r\n\t\r\n\tfor i, j in zip(trainList, outPut):\r\n\t\r\n\t\ti = [1] + i\r\n\t\t\r\n\t\tfor k in range(len(weightList)):\r\n\t\t\t\r\n\t\t\ttotalLoss[k] += (alpha * (i[-1] - j) * i[k])\r\n\t\t\t\r\n\t\tlse -= (i[-1] * math.log(sigm(weightList, i)) + ((1 - i[-1]) * math.log(1 - sigm(weightList, i))))\r\n\r\n\treturn totalLoss, lse\r\n\r\n\r\ndef logisticRegression(trainList,pow=0.001, alpha = 0.001):\r\n\t\r\n\tif len(sys.argv) >= 4:\r\n\r\n\t\talpha = float(sys.argv[3])\r\n\r\n\tif len(sys.argv) == 5:\r\n\t\r\n\t\tpow = float(sys.argv[4])\r\n\t\t\r\n\tweightList, lse, nofeatures, diff =[], 0, len(trainList[0]), 10\r\n\t\r\n\tfor _ in range(nofeatures):\r\n\t\t\t\r\n\t\tweightList.append(random.random())\r\n\r\n\twhile ( diff > pow):\r\n\t\t \r\n\t\toutPut, prev, lse = sigmoid(weightList, trainList),lse, 0\r\n\t\r\n\t\tdifw, lse = loss(outPut, trainList, weightList,alpha=alpha)\r\n\t\t\r\n\t\tfor i in range(nofeatures):\t\t\t\r\n\t\t\t\r\n\t\t\tweightList[i] += difw[i]\r\n\t\t\r\n\t\tdiff = abs(prev - lse)\r\n\t\t\r\n\t\tprint('\\n>> error differnce =', diff, end = '')\r\n\t\r\n\tprint(\" FINAL LOSS =\", lse)\r\n\t\r\n\treturn weightList\r\n\r\n\r\n\r\n#opening files\r\ninputList = getFeatureData(sys.argv[1])\r\n\r\nlabelList = getLabelData(sys.argv[2])\r\n\r\n\r\ntsl = []\r\n\r\nfor k, v in labelList.items():\r\n\t\r\n\ttsl.append([v,k])\r\n\r\nlabelList = tsl\r\n\r\nfeatures = (len(inputList[0]))\r\n\r\n\r\n#connecting missing labels with it's features\r\ninputList, trainList, testList = connectLabels(inputList, features, labelList)\r\n\r\nfor i in range(len(inputList)):\t\r\n\r\n\tinputList[i] = [1] + inputList[i]\r\n\r\n#training\r\nw = logisticRegression(trainList)\r\n\r\n#classifying regression labels with boundry conditions\r\nfor n, i in enumerate(testList):\r\n\t\r\n\ti = [1] + i\r\n\r\n\tsum = sigm(w,i)\r\n\t\t\r\n\tif sum > 0.5:\r\n\r\n\t\tsum = 1\r\n\t\r\n\telse:\r\n\t\t\r\n\t\tsum = 0\r\n\t\r\n\ttestList[n].append(sum)\r\n\r\nfor n, i in enumerate(inputList):\r\n\r\n\tfor j in testList:\r\n\r\n\t\tif i[1:] == j[:len(j)-1]:\r\n\t\t\r\n\t\t\tprint(j[-1], n)\r\n\r\nw0 = w.pop(0)\r\n\r\nsum = 0\r\n\r\nfor i in w:\r\n\r\n\tsum += i ** 2\r\n\r\nprint(\"w =\", w)\r\n\r\nprint(\"||w|| =\", sum**(1/2))\r\n\r\nprint(\"distance from origin =\", w0 / (sum) ** (1 / 2))\r\n","sub_path":"HW4/ds962_HW4.py","file_name":"ds962_HW4.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"603487357","text":"# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport logging\nfrom typing import TYPE_CHECKING, Mapping, Tuple\n\nimport transformers\n\nfrom composer.models.base import BaseMosaicModel\nfrom composer.models.nlp_metrics import LanguageCrossEntropyLoss\n\nif TYPE_CHECKING:\n from composer.core.types import Batch, Metrics, Tensors\n\nlog = logging.getLogger(__name__)\n\n\nclass MosaicTransformer(BaseMosaicModel):\n \"\"\"Implements the base logic that all Transformers can build on top of.\n\n Works with `Hugging Face Transformers `_.\n\n Args:\n module (transformers.PreTrainedModel): An instance of PreTrainedModel that\n contains the forward pass function.\n config (transformers.PretrainedConfig): The PretrainedConfig object that\n stores information about the model hyperparameters.\n tokenizer_name (str): The name of the tokenizer used for tihs model,\n necessary to assert required model inputs.\n \"\"\"\n\n def __init__(self, module: transformers.PreTrainedModel, config: transformers.PretrainedConfig,\n tokenizer_name: str) -> None:\n super().__init__()\n self.module = module\n self.config = config\n self.tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)\n log.info(\"Number of parameters in the model: \" \\\n f\"{sum(p.numel() for p in module.parameters()):,}\") # type: ignore (thirdparty)\n log.info(\"Number of trainable parameters in the model: \"\n f\"{sum(p.numel() for p in module.parameters() if p.requires_grad):,}\") # type: ignore (thirdparty)\n\n # the set of inputs that a model expects\n # if an algorithm modifies the loss function, it must remove \"labels\" from this set.\n self.model_inputs = set(self.tokenizer.model_input_names)\n self.model_inputs.update(set({\"labels\"}))\n\n # define metrics for measurements\n self.train_loss = LanguageCrossEntropyLoss()\n self.val_loss = LanguageCrossEntropyLoss()\n\n def loss(self, outputs: Mapping, batch: Batch) -> Tensors:\n \"\"\"Computes the loss of the tensor from the output.\n\n We don't implement this for the generic Transformer abstraction, since loss\n functions are model and objective specific. A single model architeture could\n use a myriad of loss functions which are better left expressed by the user.\n\n Args:\n outputs (Mapping): The dictionary output from the model.\n It could contain the loss as computed by Hugging Face,\n or algorithms can pop the labels from the input in case\n they modify the loss function.\n batch (Batch): The set of ground truth labels to use to compute the loss against.\n\n Returns:\n The loss as a ``Tensors`` object.\n\n Raises:\n NotImplementedError: A model-specific and task-specific loss function must be written.\n \"\"\"\n\n raise NotImplementedError(\"A model-specific loss function must be written.\")\n\n def forward(self, batch: Batch) -> Mapping:\n \"\"\"Runs the forward pass of the model.\n\n Args:\n batch (Batch): A dictionary of Dict[str, Tensor] of inputs that the\n model expects, as found in MosaicTransformer.get_model_inputs().\n\n Returns:\n A dictionary of model outputs as a ``Mapping``. It will include the loss\n if `labels` is passed as an input.\n \"\"\"\n if not isinstance(batch, dict):\n raise ValueError(f'Model expects batch to be a dict, got {type(batch)}')\n\n for key in self.model_inputs:\n if key not in batch.keys():\n raise ValueError(f'Batch missing key: {key}')\n\n output = self.module(**batch) # type: ignore (thirdparty)\n return output\n\n def metrics(self, train: bool = False) -> Metrics:\n \"\"\"Get metrics for evaluating the model.\n\n Downstream models should override this method if they would like to\n add task-specific metrics.\n\n Args:\n train (bool): a boolean flag to indicate whether to return\n training or validation metrics.\n\n .. warning:: If train=True, then it might calculate the training loss twice if\n algorithms are overriding the loss fn. This could be expensive due\n to the computational cost of softmax; it is worth exploring caching stratgies.\n\n Returns:\n A Metrics object that can be used to calculate task performance.\n \"\"\"\n return self.train_loss if train else self.val_loss\n\n def validate(self, batch: Batch) -> Tuple[Mapping, None]:\n \"\"\"Runs the validation step.\n\n Args:\n batch (Batch): a dictionary of Dict[str, Tensor] of inputs\n that the model expects, as found in MosaicTransformer.get_model_inputs().\n\n Returns:\n A tuple of (Mapping, None) with the output from the forward pass.\n This is fed into directly into the output of :meth:`metrics`.\n \"\"\"\n\n assert self.training is False, \"For validation, model must be in eval mode\"\n output = self.forward(batch)\n\n return (output, None)\n\n def get_model_inputs(self):\n \"\"\"Returns a set of inputs that the model expects in the forward pass.\n\n If an algorithm wants to interact with the model inputs (for instance,\n popping the labels for a custom loss fn, or adding attention head masks\n for head pruning, it must access self.set_model_inputs().\n\n Returns:\n The set of keys that are expected in the Mapping used to compute the forward pass.\n \"\"\"\n\n return self.model_inputs\n","sub_path":"composer/models/transformer_shared.py","file_name":"transformer_shared.py","file_ext":"py","file_size_in_byte":5807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"227099597","text":"class Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n if len(strs)< 2:\n return [strs]\n \n curHash = {}\n \n for eachWord in strs:\n sortedStr = sorted(eachWord)\n key = ''.join(sortedStr)\n \n if key in curHash:\n curHash[key].append(eachWord)\n else:\n curHash[key] = [eachWord]\n return list(curHash.values())\n","sub_path":"baekjoon/anagramsss.py","file_name":"anagramsss.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"13099518","text":"\n\nimport os\nimport sys\nimport importlib\nimport numpy as np\nimport shutil\nimport tensorflow as tf\nfrom timeit import default_timer as timer\nif '__file__' in locals():\n dir_path = os.path.dirname(os.path.realpath(__file__))\nelse:\n dir_path = os.getcwd()\npython_path = dir_path + '/../python/'\nsys.path.append(python_path)\n\n\n\nimport dataset_loader\nimportlib.reload(dataset_loader)\nfrom dataset_loader import EM_DATA_REAL_SYTH, EM_DATA,EM_DATA_DISC_RANDOM\nfrom dataset_loader import BATCH_SIZE, NBOX_IN,NBOX_OUT,N_CHANNELS\nfrom dataset_loader import getbox\nimport net_3d_1\nimport utils\nfrom utils import get_available_gpus\n\n\n# In[2]:\n\n\n#define folders\n#base_data_folder = \"/Users/markroza/Documents/work_from_home/NNcourse_project/data/\"\nbase_data_folder = \"/Users/markroza/Documents/work_from_home/NNcourse_project/data/\"\nbase_data_folder = \"/specific/netapp5_2/iscb/wolfson/Mark/data/NNcourse_project/data/\"\ndata_fld = base_data_folder + \"/res6/synth_exp/\"\nout_fld = base_data_folder + \"/results/disc_exp/\"\n\n\nmodel_path = out_fld+'/network_test/'\ngraph_folder = out_fld+'/graphs/'\ntest_res_folder = out_fld + '/tests/'\n\nif os.path.isdir(out_fld):\n shutil.rmtree(out_fld, ignore_errors=True)\n\nos.mkdir(out_fld)\nos.mkdir(model_path)\nos.mkdir(graph_folder)\nos.mkdir(test_res_folder)\n\n\n\nsyn_data_pairs = dataset_loader.read_list_file(data_fld+'list_synth.txt')\nsyn_pdbs = [x[0] for x in syn_data_pairs]\nreal_data_pairs = dataset_loader.read_list_file(data_fld+'list_real.txt')\nreal_pdbs = [x[0] for x in real_data_pairs]\n\n\nreal_data = EM_DATA(data_fld,train_pdbs = real_pdbs[:4], is_random = True)\nreal_iter = real_data.train_dataset.make_initializable_iterator()\nreal_pair = real_iter.get_next()\n\nsynth_data = EM_DATA(data_fld,train_pdbs = syn_pdbs[:4], is_random = True)\nsynth_iter = synth_data.train_dataset.make_initializable_iterator()\nsyn_pair = synth_iter.get_next()\n\nreal_data_t = EM_DATA(data_fld,train_pdbs = real_pdbs[:4], is_random = True)\nreal_iter_t = real_data_t.train_dataset.make_initializable_iterator()\nreal_pair_t = real_iter_t.get_next()\n\nsynth_data_t = EM_DATA(data_fld,train_pdbs = syn_pdbs[:4], is_random = True)\nsynth_iter_t = synth_data_t.train_dataset.make_initializable_iterator()\nsyn_pair_t = synth_iter_t.get_next()\n\n\n\n\n# In[3]:\n\n\nimportlib.reload(net_3d_1)\n#tf.reset_default_graph()\nnn = net_3d_1.DISC_V1()\n\n\n# In[4]:\n\n\n# open session and initialize all variables\nconfig = tf.ConfigProto(log_device_placement=False)\nconfig.gpu_options.allow_growth = True\n\nwith tf.Session(config=config) as sess:\n #sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n saver = tf.train.Saver()\n\n time_start = timer()\n sess.run(synth_iter.initializer)\n sess.run(real_iter.initializer)\n sess.run(synth_iter_t.initializer)\n sess.run(real_iter_t.initializer)\n # training-loop\n\n summary_op = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter(graph_folder, sess.graph)\n\n\n for batch in range(synth_data.N_batches-1):\n vx_synth, map_synth = sess.run(syn_pair)\n vx_real, map_real = sess.run(real_pair)\n\n\n fd ={nn.mp_real: map_real,nn.vx_real:vx_real,\\\n nn.mp_fake: map_synth,nn.vx_fake:vx_synth}\n\n sess.run(nn.opti_D, feed_dict=fd)\n\n #update\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n sess.run(extra_update_ops)\n\n\n #Learrate\n new_learn_rate = sess.run(nn.new_learning_rate)\n if new_learn_rate > 0.00005:\n sess.run(nn.add_global)\n\n #Summary\n summary_str = sess.run(summary_op, feed_dict=fd)\n summary_writer.add_summary(summary_str, batch)\n\n if batch %10 == 0:\n loss_real,loss_fake,loss_rand= sess.run([nn.D_real_loss,nn.D_fake_loss,nn.D_tilde_loss],feed_dict=fd)\n print(\"Loss Real {} Loss Fake {} Loss Rand {} \".format(loss_real,loss_fake,loss_rand))\n if batch %100 == 0:\n vx_synth, map_synth = sess.run(syn_pair_t)\n vx_real, map_real = sess.run(real_pair_t)\n\n\n fd ={nn.mp_real: map_real,nn.vx_real:vx_real,\\\n nn.mp_fake: map_synth,nn.vx_fake:vx_synth}\n\n sess.run(nn.opti_D, feed_dict=fd)\n prd_real,prd_synth = sess.run([nn.D_pro_logits,nn.G_pro_logits],feed_dict = fd)\n print(\"Losses \",np.mean(prd_real),np.mean(prd_synth))\n print(\"Accus \",np.sum(prd_real>0),np.sum(prd_synth<0))\n","sub_path":"AAcryoGAN3/code/python/train_disc.py","file_name":"train_disc.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"173005348","text":"# -*- encoding=utf -*-\n\nfrom __future__ import absolute_import\n\nimport datetime\nimport re\n\nfrom collections import namedtuple, OrderedDict\n\nfrom ...browser import Drilldown, Cell, PointCut, SetCut, RangeCut\nfrom ...browser import SPLIT_DIMENSION_NAME\nfrom ...model import Attribute\nfrom ...errors import *\nfrom ...expr import evaluate_expression\nfrom ...logging import get_logger\nfrom ... import compat\n\nfrom .mapper import DEFAULT_KEY_FIELD, PhysicalAttribute\nfrom .utils import condition_conjunction, order_column\n\n\ntry:\n import sqlalchemy\n import sqlalchemy.sql as sql\n\nexcept ImportError:\n from ...common import MissingPackage\n sqlalchemy = sql = MissingPackage(\"sqlalchemy\", \"SQL aggregation browser\")\n\n\n__all__ = [\n \"SnowflakeSchema\",\n \"QueryBuilder\"\n ]\n\n\nSnowflakeAttribute = namedtuple(\"SnowflakeAttribute\", [\"attribute\", \"join\"])\n\n\n\"\"\"Product of join_expression\"\"\"\nJoinedProduct = namedtuple(\"JoinedProduct\",\n [\"expression\", \"tables\"])\n\n\n_SQL_EXPR_CONTEXT = {\n \"sqlalchemy\": sqlalchemy,\n \"sql\": sql,\n \"func\": sql.expression.func,\n \"case\": sql.expression.case,\n \"text\": sql.expression.text,\n \"datetime\": datetime,\n \"re\": re,\n \"extract\": sql.expression.extract,\n \"and_\": sql.expression.and_,\n \"or_\": sql.expression.or_\n}\n\ndef table_str(key):\n \"\"\"Make (`schema`, `table`) tuple printable.\"\"\"\n table, schema = key\n return \"%s.%s\" % (str(schema), (table)) if schema else str(table)\n\n\nMATCH_MASTER_RSHIP = 1\nOUTER_DETAIL_RSHIP = 2\n\nclass SnowflakeTable(object):\n def __init__(self, schema, name, alias=None, table=None, join=None):\n self.schema = schema\n self.name = name\n self.table = table\n self.alias = alias\n self.join = join\n self.detail_keys = set()\n\n @property\n def key(self):\n return (self.schema, self.aliased_name)\n\n @property\n def aliased_name(self):\n return self.alias or self.name\n\n def __str__(self):\n return \"%s.%s\" % (self.key)\n\n# TODO: merge this with mapper\nclass SnowflakeSchema(object):\n def __init__(self, cube, mapper, metadata, safe_labels):\n self.cube = cube\n self.mapper = mapper\n self.metadata = metadata\n self.safe_labels = safe_labels\n\n # Initialize the shema information: tables, column maps, ...\n self.schema = self.mapper.schema\n\n # Prepare physical fact table - fetch from metadata\n #\n self.fact_key = self.cube.key or DEFAULT_KEY_FIELD\n self.fact_name = self.mapper.fact_name\n\n try:\n self.fact_table = sqlalchemy.Table(self.fact_name,\n self.metadata,\n autoload=True,\n schema=self.schema)\n except sqlalchemy.exc.NoSuchTableError:\n in_schema = (\" in schema '%s'\" % self.schema) if self.schema else \"\"\n msg = \"No such fact table '%s'%s.\" % (self.fact_name, in_schema)\n raise WorkspaceError(msg)\n\n try:\n self.fact_key_column = self.fact_table.c[self.fact_key].label(self.fact_key)\n except KeyError:\n try:\n self.fact_key_column = list(self.fact_table.columns)[0]\n except Exception as e:\n raise ModelError(\"Unable to get key column for fact \"\n \"table '%s' in cube '%s'. Reason: %s\"\n % (self.fact_name, self.cube.name, str(e)))\n\n # Collect all tables and their aliases.\n #\n # table_aliases contains mapping between aliased table name and real\n # table name with alias:\n #\n # (schema, aliased_name) --> (schema, real_name, alias)\n #\n\n # Mapping where keys are attributes and values are columns\n self.logical_to_column = {}\n # Mapping where keys are column labels and values are attributes\n self.column_to_logical = {}\n\n # Collect tables from joins\n\n self.tables = {}\n # Table -> relationship type\n # Prepare maps of attributes -> relationship type\n self.fact_relationships = {}\n self.aggregated_fact_relationships = {}\n\n self._collect_tables()\n self._analyse_table_relationships()\n\n def _collect_tables(self):\n \"\"\"Collect tables in the schema. Analyses their relationship towards\n the fact table.\n\n Stored information contains:\n\n * attribute ownership by a table\n * relationship type of tables towards the fact table: master/match or\n detail (outer)\n\n The rule for deciding the table relationship is as follows:\n\n * if a table is connected to a fact or other master/detail table by\n master/detail then it will be considered master/detail\n * if a table is connected to an outer detail it is considered to be\n outer detail (in relationship to the fact), regardless of it's join\n type\n * if a table is connected through outer detail to any kind of table,\n then it will stay as detail\n\n Input: schema, fact name, fact table, joins\n\n Output: tables[table_key] = SonwflakeTable()\n\n \"\"\"\n\n # Collect the fact table as the root master table\n #\n table = SnowflakeTable(self.schema, self.fact_name,\n table=self.fact_table)\n self.tables[table.key] = table\n\n # Collect all the detail tables\n # \n for join in self.mapper.joins:\n # just ask for the table\n\n sql_table = sqlalchemy.Table(join.detail.table,\n self.metadata,\n autoload=True,\n schema=join.detail.schema)\n\n if join.alias:\n sql_table = sql_table.alias(join.alias)\n\n table = SnowflakeTable(schema=join.detail.schema,\n name=join.detail.table,\n alias=join.alias,\n join=join,\n table=sql_table)\n\n self.tables[table.key] = table\n\n # Collect detail keys:\n # \n # Every table object has a set of keys `detail_keys` which are\n # columns that are used to join detail tables.\n #\n for join in self.mapper.joins:\n key = (join.master.schema, join.master.table)\n try:\n master = self.tables[key]\n except KeyError:\n raise ModelError(\"Unknown table (or join alias) '%s'\"\n % table_str(key))\n master.detail_keys.add(join.master.column)\n\n def _analyse_table_relationships(self):\n\n # Analyse relationships\n # ---------------------\n\n # Dictionary of raw tables and their joined products\n # table-to-master relationships:\n # MASTER_MATCH_RSHIP: either joined as \"match\" or \"master\"\n # OUTER_DETAIL_RSHIP: joined as \"detail\"\n relationships = {}\n\n # Anchor the fact table\n key = (self.schema, self.fact_name)\n relationships[key] = MATCH_MASTER_RSHIP\n self.tables[key].relationship = MATCH_MASTER_RSHIP\n\n # Collect all the tables first:\n for join in self.mapper.joins:\n # Add master table to the list\n table = (join.master.schema, join.master.table)\n if table not in relationships:\n self.fact_relationships[table] = None\n\n # Add (aliased) detail table to the rist\n table = (join.detail.schema, join.alias or join.detail.table)\n if table not in relationships:\n relationships[table] = None\n else:\n raise ModelError(\"Joining detail table %s twice\" % (table, ))\n\n # Analyse the joins\n for join in reversed(self.mapper.joins):\n master_key = (join.master.schema or self.schema, join.master.table)\n detail_key = (join.detail.schema or self.schema, join.alias or join.detail.table)\n\n if relationships.get(detail_key):\n raise InternalError(\"Detail %s already classified\" % detail_key)\n\n master_rs = relationships[master_key]\n\n if master_rs is None:\n raise InternalError(\"Joining to unclassified master. %s->%s \"\n \"Hint: check your joins, their order or \"\n \"mappings.\" % (table_str(master_key),\n table_str(detail_key)))\n elif master_rs == MATCH_MASTER_RSHIP \\\n and join.method in (\"match\", \"master\"):\n relationship = MATCH_MASTER_RSHIP\n elif master_rs == OUTER_DETAIL_RSHIP \\\n or join.method == \"detail\":\n relationship = OUTER_DETAIL_RSHIP\n else:\n raise InternalError(\"Unknown relationship combination for \"\n \"%s(%s)->%s(%s)\"\n % (table_str(master_key), master_rs,\n table_str(detail_key), join.method))\n\n relationships[detail_key] = relationship\n self.tables[detail_key].relationship = relationship\n\n\n # Prepare relationships of attributes\n #\n # TODO: make SnowflakeAttribute class\n attributes = self.cube.get_attributes(aggregated=False)\n tables = self.mapper.tables_for_attributes(attributes)\n tables = dict(zip(attributes, tables))\n mapping = {}\n\n for attribute in attributes:\n try:\n table_ref = tables[attribute]\n except KeyError:\n raise ModelError(\"Unknown table for attribute %s. \"\n \"Missing mapping?\" % attribute)\n try:\n mapping[attribute] = relationships[table_ref]\n except KeyError:\n attr, table = table_ref\n if table:\n message = \"Missing join for table '%s'?\" % table\n else:\n message = \"Missing mapping or join?\"\n\n raise ModelError(\"Can not determine to-fact relationship for \"\n \"attribute '%s'. %s\"\n % (attribute.ref(), message))\n self.fact_relationships = mapping\n\n attributes = self.cube.get_attributes(aggregated=True)\n tables = self.mapper.tables_for_attributes(attributes)\n tables = dict(zip(attributes, tables))\n mapping = {}\n for attribute in attributes:\n mapping[attribute] = relationships[tables[attribute]]\n self.aggregated_fact_relationships = mapping\n\n def _collect_detail_keys(self):\n \"\"\"Assign to each table which keys from the table are used by another\n detail table as master keys.\"\"\"\n\n\n def is_outer_detail(self, attribute, for_aggregation=False):\n \"\"\"Returns `True` if the attribute belongs to an outer-detail table.\"\"\"\n if for_aggregation:\n lookup = self.aggregated_fact_relationships\n else:\n lookup = self.fact_relationships\n\n try:\n return lookup[attribute] == OUTER_DETAIL_RSHIP\n except KeyError:\n # Retry as raw table (used by internally generated attributes)\n ref = self.mapper.physical(attribute)\n key = (ref.schema, ref.table)\n return self.tables[key].relationship\n except KeyError:\n raise InternalError(\"No fact relationship for attribute %s \"\n \"(aggregate: %s)\"\n % (attribute.ref(), for_aggregation))\n\n def join_expression(self, attributes, include_fact=True, master_fact=None,\n master_detail_keys=None):\n \"\"\"Create partial expression on a fact table with `joins` that can be\n used as core for a SELECT statement. `join` is a list of joins\n returned from mapper (most probably by `Mapper.relevant_joins()`)\n\n Returns a tuple: (`expression`, `tables`) where `expression` is\n SQLAlchemy expression object and `tables` is a list of `SnowflakeTable`\n objects used in the join.\n\n If `include_fact` is ``True`` (default) then fact table is considered\n as starting point. If it is ``False`` The first detail table is\n considered as starting point for joins. This might be useful when\n getting values of a dimension without cell restrictions.\n\n `master_fact` is used for building a composed aggregated expression.\n `master_detail_keys` is a dictionary of aliased keys from the master\n fact exposed to the details.\n\n **Requirement:** joins should be ordered from the \"tentacles\" towards\n the center of the star/snowflake schema.\n\n **Algorithm:**\n\n * FOR ALL JOINS:\n 1. get a join (order does not matter)\n 2. get master and detail TABLES (raw, not joined)\n 3. prepare the join condition on columns from the tables\n 4. find join PRODUCTS based on the table keys (schema, table)\n 5. perform join on the master/detail PRODUCTS:\n * match: left inner join\n * master: left outer join\n * detail: right outer join – swap master and detail tables and\n do the left outer join\n 6. remove the detail PRODUCT\n 7. replace the master PRODUCT with the new one\n\n * IF there is more than one join product left then some joins are\n missing\n * Result: join products should contain only one item which is the\n final product of the joins\n \"\"\"\n\n joins = self.mapper.relevant_joins(attributes)\n\n # Dictionary of raw tables and their joined products\n joined_products = {}\n\n master_detail_keys = master_detail_keys or {}\n\n tables = []\n\n fact_key = (self.schema, self.fact_name)\n\n if include_fact:\n if master_fact is not None:\n fact = master_fact\n else:\n fact = self.fact_table\n\n joined_products[fact_key] = fact\n tables.append(self.tables[fact_key])\n\n # Collect all the tables first:\n for join in joins:\n if not join.detail.table or (join.detail.table == self.fact_name and not join.alias):\n raise MappingError(\"Detail table name should be present and \"\n \"should not be a fact table unless aliased.\")\n\n # 1. MASTER\n # Add master table to the list. If fact table (or statement) was\n # explicitly specified, use it instead of the original fact table\n\n if master_fact is not None and (join.master.schema, join.master.table) == fact_key:\n table = master_fact\n else:\n table = self.table(join.master.schema, join.master.table)\n joined_products[(join.master.schema, join.master.table)] = table\n\n # 2. DETAIL\n # Add (aliased) detail table to the rist. Add the detail to the\n # list of joined tables – will be used to determine \"outlets\" for\n # keys of outer detail joins\n\n table = self.table(join.detail.schema, join.alias or join.detail.table)\n key = (join.detail.schema, join.alias or join.detail.table)\n joined_products[key] = table\n tables.append(self.tables[key])\n\n # Perform the joins\n # =================\n #\n # 1. find the column\n # 2. construct the condition\n # 3. use the appropriate SQL JOIN\n # \n for join in joins:\n # Prepare the table keys:\n # Key is a tuple of (schema, table) and is used to get a joined\n # product object\n master = join.master\n master_key = (master.schema, master.table)\n detail = join.detail\n detail_key = (detail.schema, join.alias or detail.table)\n\n # We need plain tables to get columns for prepare the join\n # condition\n # TODO: this is unreadable\n if master_fact is not None and (join.master.schema, join.master.table) == fact_key:\n key = (join.master.schema, join.master.table, join.master.column)\n try:\n master_label = master_detail_keys[key]\n except KeyError:\n raise InternalError(\"Missing fact column %s (has: %s)\"\n % (key, master_detail_keys.keys()))\n master_column = master_fact.c[master_label]\n else:\n master_table = self.table(master.schema, master.table)\n\n try:\n master_column = master_table.c[master.column]\n except KeyError:\n raise ModelError('Unable to find master key (schema %s) '\n '\"%s\".\"%s\" ' % join.master[0:3])\n\n detail_table = self.table(join.detail.schema, join.alias or join.detail.table)\n try:\n detail_column = detail_table.c[detail.column]\n except KeyError:\n raise MappingError('Unable to find detail key (schema %s) \"%s\".\"%s\" ' \\\n % join.detail[0:3])\n\n # The join condition:\n onclause = master_column == detail_column\n\n # Get the joined products – might be plain tables or already\n # joined tables\n try:\n master_table = joined_products[master_key]\n except KeyError:\n raise ModelError(\"Unknown master %s. Missing join or \"\n \"wrong join order?\" % (master_key, ))\n detail_table = joined_products[detail_key]\n\n\n # Determine the join type based on the join method. If the method\n # is \"detail\" then we need to swap the order of the tables\n # (products), because SQLAlchemy provides inteface only for\n # left-outer join.\n if join.method == \"match\":\n is_outer = False\n elif join.method == \"master\":\n is_outer = True\n elif join.method == \"detail\":\n # Swap the master and detail tables to perform RIGHT OUTER JOIN\n master_table, detail_table = (detail_table, master_table)\n is_outer = True\n else:\n raise ModelError(\"Unknown join method '%s'\" % join.method)\n\n product = sql.expression.join(master_table,\n detail_table,\n onclause=onclause,\n isouter=is_outer)\n\n del joined_products[detail_key]\n joined_products[master_key] = product\n\n if not joined_products:\n # This should not happen\n raise InternalError(\"No joined products left.\")\n\n if len(joined_products) > 1:\n raise ModelError(\"Some tables are not joined: %s\"\n % (joined_products.keys(), ))\n\n # Return the remaining joined product\n result = list(joined_products.values())[0]\n\n return JoinedProduct(result, tables)\n\n def column(self, attribute, locale=None):\n \"\"\"Return a column object for attribute.\n\n `locale` is explicit locale to be used. If not specified, then the\n current locale is used for localizable attributes.\n \"\"\"\n\n logical = self.mapper.logical(attribute, locale)\n if logical in self.logical_to_column:\n return self.logical_to_column[logical]\n\n ref = self.mapper.physical(attribute, locale)\n table = self.table(ref.schema, ref.table)\n\n try:\n column = table.c[ref.column]\n except:\n avail = [str(c) for c in table.columns]\n raise BrowserError(\"Unknown column '%s' in table '%s' avail: %s\"\n % (ref.column, ref.table, avail))\n\n # Extract part of the date\n if ref.extract:\n column = sql.expression.extract(ref.extract, column)\n if ref.func:\n column = getattr(sql.expression.func, ref.func)(column)\n if ref.expr:\n # Provide columns for attributes (according to current state of\n # the query)\n context = dict(_SQL_EXPR_CONTEXT)\n getter = _TableGetter(self)\n context[\"table\"] = getter\n getter = _AttributeGetter(self, attribute.dimension)\n context[\"dim\"] = getter\n getter = _AttributeGetter(self, self.cube)\n context[\"fact\"] = getter\n context[\"column\"] = column\n\n\n column = evaluate_expression(ref.expr, context, 'expr', sql.expression.ColumnElement)\n\n if self.safe_labels:\n label = \"a%d\" % self.label_counter\n self.label_counter += 1\n else:\n label = logical\n\n if isinstance(column, compat.string_type):\n raise ValueError(\"Cannot resolve %s to a column object: %r\" % (attribute, column))\n\n column = column.label(label)\n\n self.logical_to_column[logical] = column\n self.column_to_logical[label] = logical\n\n return column\n\n def columns(self, attributes, expand_locales=False):\n \"\"\"Returns list of columns.If `expand_locales` is True, then one\n column per attribute locale is added.\"\"\"\n\n if expand_locales:\n columns = []\n for attr in attributes:\n if attr.is_localizable():\n columns += [self.column(attr, locale) for locale in attr.locales]\n else: # if not attr.locales\n columns.append(self.column(attr))\n else:\n columns = [self.column(attr) for attr in attributes]\n\n return columns\n\n def logical_labels(self, columns):\n \"\"\"Returns list of logical attribute labels from list of columns\n or column labels.\n\n This method and additional internal references were added because some\n database dialects, such as Exasol, can not handle dots in column\n names, even when quoted.\n \"\"\"\n\n # Should not this belong to the snowflake\n attributes = []\n\n for column in columns:\n attributes.append(self.column_to_logical.get(column.name,\n column.name))\n\n return attributes\n\n def table(self, schema, table_name):\n \"\"\"Return a SQLAlchemy Table instance. If table was already accessed,\n then existing table is returned. Otherwise new instance is created.\n\n If `schema` is ``None`` then browser's default schema is used.\n \"\"\"\n\n key = (schema or self.mapper.schema, table_name)\n # Get real table reference\n try:\n return self.tables[key].table\n except KeyError:\n raise ModelError(\"Table with reference %s not found. \"\n \"Missing join in cube '%s'?\"\n % (key, self.cube.name) )\n\n\nclass _StatementConfiguration(object):\n def __init__(self):\n self.attributes = []\n self.cuts = []\n self.cut_attributes = []\n self.other_attributes = []\n\n self.split_attributes = []\n self.split_cuts = []\n\n self.ptd_attributes = []\n\n @property\n def all_attributes(self):\n \"\"\"All attributes that should be considered for a statement\n composition. Mostly used to get the relevant joins.\"\"\"\n\n return set(self.attributes) | set(self.cut_attributes) \\\n | set(self.split_attributes) | set(self.other_attributes)\n\n def merge(self, other):\n self.attributes += other.attributes\n self.cuts += other.cuts\n self.cut_attributes += other.cut_attributes\n\n self.split_attributes += other.split_attributes\n self.split_cuts += other.split_cuts\n\n self.other_attributes += other.other_attributes\n self.ptd_attributes += other.ptd_attributes\n\n def is_empty(self):\n return not (bool(self.attributes) \\\n or bool(self.cut_attributes) \\\n or bool(self.other_attributes) \\\n or bool(self.split_attributes))\n\nclass QueryBuilder(object):\n def __init__(self, browser):\n \"\"\"Creates a SQL query statement builder object – a controller-like\n object that incrementally constructs the statement.\n\n Result attributes:\n\n * `statement` – SQL query statement\n * `labels` – logical labels for the statement selection\n \"\"\"\n\n self.browser = browser\n\n # Inherit\n # FIXME: really?\n self.logger = browser.logger\n self.mapper = browser.mapper\n self.cube = browser.cube\n\n self.snowflake = SnowflakeSchema(self.cube, self.mapper,\n self.browser.metadata,\n safe_labels=browser.safe_labels)\n\n self.master_fact = None\n\n # Intermediate results\n self.drilldown = None\n self.split = None\n\n # Output:\n self.statement = None\n self.labels = []\n\n # Semi-additive dimension\n # TODO: move this to model (this is ported from the original\n # SnapshotBrowser)\n\n # TODO: remove this later\n if \"semiadditive\" in self.cube.info:\n raise NotImplementedError(\"'semiadditive' in 'info' is not \"\n \"supported any more\")\n\n for dim in self.cube.dimensions:\n if dim.nonadditive:\n raise NotImplementedError(\"Non-additive behavior for \"\n \"dimensions is not yet implemented.\"\n \"(cube '%s', dimension '%s')\" %\n (self.cube.name, dim.name))\n\n def aggregation_statement(self, cell, drilldown=None, aggregates=None,\n split=None, attributes=None, summary_only=False):\n \"\"\"Builds a statement to aggregate the `cell`.\n\n * `cell` – `Cell` to aggregate\n * `drilldown` – a `Drilldown` object\n * `aggregates` – list of aggregates to consider\n * `split` – split cell for split condition\n * `summary_only` – do not perform GROUP BY for the drilldown. The\n * drilldown is used only for choosing tables to join and affects outer\n detail joins in the result\n\n Algorithm description:\n\n All the tables have one of the two relationship to the fact:\n *master/match* or *detail*. Every table connected to a table that has\n \"detail\" relationship is considered also in the \"detail\" relationship\n towards the fact. Therefore we have two join zones: all master or\n detail tables from the core, directly connected to the fact table and\n rest of the table connected to the core through outer detail\n relationship.\n\n Depending on the query it is decided whether we are fine with just\n joining everything together into single join or we need to separate\n the fact master core from the outer details::\n\n +------+ +-----+\n | fact |--(match)--| dim +\n +------+ +-----+\n Master Fact |\n ===============|========================\n Outer Details | +-----+\n +------(detail)-| dim |\n +-----+\n\n The outer details part is RIGHT OUTER JOINed to the fact. Since there\n are no tables any more, the original table keys for joins to the outer\n details were exposed and specially labeled as `__masterkeyXX` where XX\n is a sequence number of the key. The `join_expression` JOIN\n constructing method receives the map of the keys and replaces the\n original tables with connections to the columns already selected in\n the master fact.\n\n .. note::\n\n **Limitation:** we can not have a Cut (condition) where keys (path\n elements) are from both join zones. Whole cut should be within one\n zone: either the master fact or outer details.\n \"\"\"\n\n if not aggregates:\n raise ArgumentError(\"List of aggregates sohuld not be empty\")\n\n drilldown = drilldown or Drilldown()\n\n # Configuraion of statement parts\n master = _StatementConfiguration()\n detail = _StatementConfiguration()\n\n self.logger.debug(\"prepare aggregation statement. cell: '%s' \"\n \"drilldown: '%s' summary only: %s\" %\n (\",\".join([str(cut) for cut in cell.cuts]),\n drilldown, summary_only))\n\n # Analyse and Prepare\n # -------------------\n # Get the cell attributes and find whether we have some outer details\n #\n # Cut\n # ~~~\n\n mcuts, mattrs, dcuts, dattrs = self._split_cell_by_relationship(cell)\n master.cuts += mcuts\n master.cut_attributes += mattrs\n detail.cuts += dcuts\n detail.cut_attributes += dattrs\n\n # Split\n # ~~~~~\n # Same as Cut, just different target\n\n mcuts, mattrs, dcuts, dattrs = self._split_cell_by_relationship(split)\n master.split_cuts += mcuts\n master.split_attributes += mattrs\n detail.split_cuts += dcuts\n detail.split_attributes += dattrs\n\n # Drilldown\n # ~~~~~~~~~\n\n drilldown_attributes = drilldown.all_attributes()\n master.attributes, detail.attributes = \\\n self._split_attributes_by_relationship(drilldown_attributes)\n\n # Period-to-date\n #\n # One thing we have to do later is to generate the PTD condition\n # (either for master or for detail) and assign it to the appropriate\n # list of conditions\n\n ptd_attributes = self._ptd_attributes(cell, drilldown)\n ptd_master, ptd_detail = self._split_attributes_by_relationship(ptd_attributes)\n if ptd_master and ptd_detail:\n raise InternalError(\"PTD attributes are spreading from master \"\n \"to outer detail. This is not supported.\")\n elif ptd_master:\n master.ptd_attributes = ptd_master\n elif ptd_detail:\n detail.ptd_attributes = ptd_detail\n\n # TODO: PTD workaround #2\n # We need to know which attributes have to be included for JOINs,\n # however we can know this only when \"condition\" in mapping is\n # evaluated, which can be evaluated only after joins and when the\n # master-fact is ready.\n required = self.cube.browser_options.get(\"ptd_master_required\", [])\n\n if required:\n required = self.cube.get_attributes(required)\n master.ptd_attributes += required\n\n # Semi-additive attribute\n semiadditives = self.semiadditive_attributes(aggregates, drilldown)\n sa_master, sa_detail = self._split_attributes_by_relationship(semiadditives)\n master.other_attributes += sa_master\n detail.other_attributes += sa_detail\n\n # Pick the method:\n #\n # M - master, D - detail\n # C - condition, A - selection attributes (drilldown)\n #\n # MA MC DA DC | method\n # ============|=======\n # 0 -- -- -- -- | simple\n # 1 xx -- -- -- | simple\n # 2 -- xx -- -- | simple\n # 3 xx xx -- -- | simple\n # 4 -- -- xx -- | simple\n # 5 xx -- xx -- | simple\n # 6 -- xx xx -- | composed\n # 7 xx xx xx -- | composed\n # 8 -- -- -- xx | simple\n # 9 xx -- -- xx | simple\n # 10 -- -- xx xx | simple\n # 11 xx -- xx xx | simple\n # 12 -- xx -- xx | composed\n # 13 xx xx -- xx | composed\n # 14 -- xx xx xx | composed\n # 15 xx xx xx xx | composed\n # \n\n # The master cut is in conflict with detail drilldown or detail cut \n if master.cut_attributes and (detail.attributes or\n detail.cut_attributes):\n simple_method = False\n else:\n simple_method = True\n master.merge(detail)\n\n coalesce_measures = not detail.is_empty()\n\n master_conditions = self.conditions_for_cuts(master.cuts)\n\n if simple_method:\n self.logger.debug(\"statement: simple\")\n\n # Drilldown – Group-by\n # --------------------\n #\n # SELECT – Prepare the master selection\n # * master drilldown items\n\n selection = [self.column(a) for a in set(master.attributes)]\n group_by = selection[:]\n\n # SPLIT\n # -----\n if split:\n master_split = self._cell_split_column(master.split_cuts)\n group_by.append(master_split)\n selection.append(master_split)\n\n # WHERE\n # -----\n conditions = master_conditions\n ptd_attributes = master.ptd_attributes\n\n # JOIN\n # ----\n attributes = set(aggregates) \\\n | master.all_attributes \\\n | set(ptd_attributes)\n join = self.snowflake.join_expression(attributes)\n join_expression = join.expression\n\n else:\n self.logger.debug(\"statement: composed\")\n\n # 1. MASTER FACT\n # ==============\n\n join = self.snowflake.join_expression(master.all_attributes)\n join_expression = join.expression\n\n # Store a map of joined columns for later\n # The map is: (schema, table, column) -> column\n\n # Expose fact master detail key outlets:\n master_detail_keys = {}\n master_detail_selection = []\n counter = 0\n for table in join.tables:\n for key in table.detail_keys:\n column_key = (table.schema, table.aliased_name, key)\n label = \"__masterkey%d\" % counter\n master_detail_keys[column_key] = label\n\n column = table.table.c[key].label(label)\n master_detail_selection.append(column)\n counter += 1\n\n # SELECT – Prepare the master selection\n # * drilldown items\n # * measures\n # * aliased keys for outer detail joins\n\n # Note: Master selection is carried as first (we need to retrieve\n # it later by index)\n master_selection = [self.column(a) for a in set(master.attributes)]\n\n measures = self.measures_for_aggregates(aggregates)\n measure_selection = [self.column(m) for m in measures]\n\n selection = master_selection \\\n + measure_selection \\\n + master_detail_selection\n\n # SPLIT\n # -----\n if master.split_cuts:\n master_split = self._cell_split_column(master.split_cuts,\n \"__master_split\")\n group_by.append(master_split)\n selection.append(master_split)\n else:\n master_split = None\n\n # Add the fact key – to properely handle COUNT()\n selection.append(self.snowflake.fact_key_column)\n\n # WHERE Condition\n # ---------------\n condition = condition_conjunction(master_conditions)\n\n # Add the PTD\n if master.ptd_attributes:\n ptd_condition = self._ptd_condition(master.ptd_attributes)\n condition = condition_conjunction([condition, ptd_condition])\n # TODO: PTD workaround #3:\n # Add the PTD attributes to the selection,so the detail part\n # of the join will be able to find them in the master\n cols = [self.column(a) for a in master.ptd_attributes]\n selection += cols\n\n # Prepare the master_fact statement:\n statement = sql.expression.select(selection,\n from_obj=join_expression,\n use_labels=True,\n whereclause=condition)\n\n # From now-on the self.column() method will return columns from\n # master_fact if applicable.\n self.master_fact = statement.alias(\"__master_fact\")\n\n # Add drilldown – Group-by\n # ------------------------\n #\n\n # SELECT – Prepare the detail selection\n # * master drilldown items (inherit)\n # * detail drilldown items\n\n master_cols = list(self.master_fact.columns)\n master_selection = master_cols[0:len(master.attributes)]\n\n detail_selection = [self.column(a) for a in set(detail.attributes)]\n\n selection = master_selection + detail_selection\n group_by = selection[:]\n\n # SPLIT\n # -----\n if detail.split_cuts:\n if master_split:\n # Merge the detail and master part of the split \"dimension\"\n master_split = self.master_fact.c[\"__master_split\"]\n detail_split = self._cell_split_column(detail.split_cuts,\n \"__detail_split\")\n split_condition = (master_split and detail_split)\n detail_split = sql.expression.case([(split_condition, True)],\n else_=False)\n detail_split.label(SPLIT_DIMENSION_NAME)\n else:\n # We have only detail split, no need to merge the\n # condition\n detail_split = self._cell_split_column(detail.split_cuts)\n\n selection.append(detail_split)\n group_by.append(detail_split)\n\n\n # WHERE\n # -----\n conditions = self.conditions_for_cuts(detail.cuts)\n ptd_attributes = detail.ptd_attributes\n\n # JOIN\n # ----\n # Replace the master-relationship tables with single master fact\n # Provide mapping between original table columns to the master\n # fact selection (with labelled columns)\n join = self.snowflake.join_expression(detail.all_attributes,\n master_fact=self.master_fact,\n master_detail_keys=master_detail_keys)\n\n join_expression = join.expression\n\n # The Final Statement\n # ===================\n #\n\n # WHERE\n # -----\n if ptd_attributes:\n ptd_condition = self._ptd_condition(ptd_attributes)\n self.logger.debug(\"adding PTD condition: %s\" % str(ptd_condition))\n conditions.append(ptd_condition)\n\n condition = condition_conjunction(conditions)\n group_by = group_by if not summary_only else None\n\n # Include the semi-additive dimension, if required\n #\n if semiadditives:\n self.logger.debug(\"preparing semiadditive subquery for \"\n \"attributes: %s\"\n % [a.name for a in semiadditives])\n\n join_expression = self._semiadditive_subquery(semiadditives,\n selection,\n from_obj=join_expression,\n condition=condition,\n group_by=group_by)\n\n aggregate_selection = self.builtin_aggregate_expressions(aggregates,\n coalesce_measures=coalesce_measures)\n\n if summary_only:\n # Don't include the group-by part (see issue #157 for more\n # information)\n selection = aggregate_selection\n else:\n selection += aggregate_selection\n\n # condition = None\n statement = sql.expression.select(selection,\n from_obj=join_expression,\n use_labels=True,\n whereclause=condition,\n group_by=group_by)\n\n self.statement = statement\n self.labels = self.snowflake.logical_labels(selection)\n\n # Used in order\n self.drilldown = drilldown\n self.split = split\n\n return self.statement\n\n def _split_attributes_by_relationship(self, attributes):\n \"\"\"Returns a tuple (`master`, `detail`) where `master` is a list of\n attributes that have master/match relationship towards the fact and\n `detail` is a list of attributes with outer detail relationship\n towards the fact.\"\"\"\n\n if not attributes:\n return ([],[])\n\n master = []\n detail = []\n for attribute in attributes:\n if self.snowflake.is_outer_detail(attribute):\n detail.append(attribute)\n else:\n master.append(attribute)\n\n return (master, detail)\n\n def _split_cell_by_relationship(self, cell):\n \"\"\"Returns a tuple of _StatementConfiguration objects (`master`,\n `detail`)\"\"\"\n\n if not cell:\n return ([], [], [], [])\n\n master_cuts = []\n master_cut_attributes = []\n detail_cuts = []\n detail_cut_attributes = []\n\n for cut, attributes in self.attributes_for_cell_cuts(cell):\n is_outer_detail = [self.snowflake.is_outer_detail(a) for a in attributes]\n\n if all(is_outer_detail):\n detail_cut_attributes += attributes\n detail_cuts.append(cut)\n elif any(is_outer_detail):\n raise InternalError(\"Cut %s is spreading from master to \"\n \"outer detail is not supported.\"\n % str(cut))\n else:\n master_cut_attributes += attributes\n master_cuts.append(cut)\n\n return (master_cuts, master_cut_attributes,\n detail_cuts, detail_cut_attributes)\n\n def _cell_split_column(self, cuts, label=None):\n \"\"\"Create a column for a cell split from list of `cust`.\"\"\"\n\n conditions = self.conditions_for_cuts(cuts)\n condition = condition_conjunction(conditions)\n split_column = sql.expression.case([(condition, True)],\n else_=False)\n\n label = label or SPLIT_DIMENSION_NAME\n\n return split_column.label(label)\n\n def semiadditive_attributes(self, aggregates, drilldown):\n \"\"\"Returns an attribute from a semi-additive dimension, if defined for\n the cube. Cubes allows one semi-additive dimension. \"\"\"\n\n nonadds = set(self.cube.nonadditive_type(agg) for agg in aggregates)\n # If there is no nonadditive aggregate, we skip\n if not any(nonaddtype for nonaddtype in nonadds):\n return None\n\n if None in nonadds:\n nonadds.remove(None)\n\n if \"time\" not in nonadds:\n raise NotImplementedError(\"Nonadditive aggregates for other than \"\n \"time dimension are not supported.\")\n\n # Here we expect to have time-only nonadditive\n # TODO: What to do if we have more?\n\n # Find first time drill-down, if any\n items = [item for item in drilldown \\\n if item.dimension.role == \"time\"]\n\n attributes = []\n for item in drilldown:\n if item.dimension.role != \"time\":\n continue\n attribute = Attribute(\"__key__\", dimension=item.dimension)\n attributes.append(attribute)\n\n if not attributes:\n time_dims = [ d for d in self.cube.dimensions if d.role == \"time\" ]\n if not time_dims:\n raise BrowserError(\"Cannot locate a time dimension to apply for semiadditive aggregates: %r\" % nonadds)\n attribute = Attribute(\"__key__\", dimension=time_dims[0])\n attributes.append(attribute)\n\n return attributes\n\n def _semiadditive_subquery(self, attributes, selection,\n from_obj, condition, group_by):\n \"\"\"Prepare the semi-additive subquery\"\"\"\n sub_selection = selection[:]\n\n semiadd_selection = []\n for attr in attributes:\n col = self.column(attr)\n # Only one function is supported for now: max()\n func = sql.expression.func.max\n col = func(col)\n semiadd_selection.append(col)\n\n sub_selection += semiadd_selection\n\n # This has to be the same as the final SELECT, except the subquery\n # selection\n sub_statement = sql.expression.select(sub_selection,\n from_obj=from_obj,\n use_labels=True,\n whereclause=condition,\n group_by=group_by)\n\n sub_statement = sub_statement.alias(\"__semiadditive_subquery\")\n\n # Construct the subquery JOIN condition\n # Skipt the last subquery selection which we have created just\n # recently\n join_conditions = []\n\n for left, right in zip(selection, sub_statement.columns):\n join_conditions.append(left == right)\n\n remainder = list(sub_statement.columns)[len(selection):]\n for attr, right in zip(attributes, remainder):\n left = self.column(attr)\n join_conditions.append(left == right)\n\n join_condition = condition_conjunction(join_conditions)\n join_expression = from_obj.join(sub_statement, join_condition)\n\n return join_expression\n\n def denormalized_statement(self, cell=None, attributes=None,\n expand_locales=False, include_fact_key=True):\n \"\"\"Builds a statement for denormalized view. `whereclause` is same as\n SQLAlchemy `whereclause` for `sqlalchemy.sql.expression.select()`.\n `attributes` is list of logical references to attributes to be\n selected. If it is ``None`` then all attributes are used.\n `condition_attributes` contains list of attributes that are not going\n to be selected, but are required for WHERE condition.\n\n Set `expand_locales` to ``True`` to expand all localized attributes.\n \"\"\"\n\n if attributes is None:\n attributes = self.cube.all_attributes\n\n join_attributes = set(attributes) | self.attributes_for_cell(cell)\n\n join_product = self.snowflake.join_expression(join_attributes)\n join_expression = join_product.expression\n\n columns = self.snowflake.columns(attributes, expand_locales=expand_locales)\n\n if include_fact_key:\n columns.insert(0, self.snowflake.fact_key_column)\n\n if cell is not None:\n condition = self.condition_for_cell(cell)\n else:\n condition = None\n\n statement = sql.expression.select(columns,\n from_obj=join_expression,\n use_labels=True,\n whereclause=condition)\n\n self.statement = statement\n self.labels = self.snowflake.logical_labels(statement.columns)\n\n return statement\n\n def members_statement(self, cell, attributes=None):\n \"\"\"Prepares dimension members statement.\"\"\"\n self.denormalized_statement(cell, attributes, include_fact_key=False)\n group_by = self.snowflake.columns(attributes)\n self.statement = self.statement.group_by(*group_by)\n return self.statement\n\n def fact(self, id_):\n \"\"\"Selects only fact with given id\"\"\"\n condition = self.snowflake.fact_key_column == id_\n return self.append_condition(condition)\n\n def append_condition(self, condition):\n \"\"\"Appends `condition` to the generated statement.\"\"\"\n self.statement = self.statement.where(condition)\n return self.statement\n\n def measures_for_aggregates(self, aggregates):\n \"\"\"Returns a list of measures for `aggregates`. This method is used in\n constructing the master fact.\"\"\"\n\n measures = []\n\n aggregates = [agg for agg in aggregates if agg.function]\n\n for aggregate in aggregates:\n function_name = aggregate.function.lower()\n function = self.browser.builtin_function(function_name, aggregate)\n\n if not function:\n continue\n\n names = function.required_measures(aggregate)\n if names:\n measures += self.cube.get_attributes(names)\n\n return measures\n\n def builtin_aggregate_expressions(self, aggregates,\n coalesce_measures=False):\n \"\"\"Returns list of expressions for aggregates from `aggregates` that\n are computed using the SQL statement.\n \"\"\"\n\n expressions = []\n for agg in aggregates:\n exp = self.aggregate_expression(agg, coalesce_measures)\n if exp is not None:\n expressions.append(exp)\n\n return expressions\n\n def aggregate_expression(self, aggregate, coalesce_measure=False):\n \"\"\"Returns an expression that performs the aggregation of measure\n `aggregate`. The result's label is the aggregate's name. `aggregate`\n has to be `MeasureAggregate` instance.\n\n If aggregate function is post-aggregation calculation, then `None` is\n returned.\n\n Aggregation function names are case in-sensitive.\n\n If `coalesce_measure` is `True` then selected measure column is wrapped\n in ``COALESCE(column, 0)``.\n \"\"\"\n # TODO: support aggregate.expression\n\n if aggregate.expression:\n raise NotImplementedError(\"Expressions are not yet implemented\")\n\n # If there is no function specified, we consider the aggregate to be\n # computed in the mapping\n if not aggregate.function:\n # TODO: this should be depreciated in favor of aggreate.expression\n # TODO: Following expression should be raised instead:\n # raise ModelError(\"Aggregate '%s' has no function specified\"\n # % str(aggregate))\n column = self.column(aggregate)\n return column\n\n function_name = aggregate.function.lower()\n function = self.browser.builtin_function(function_name, aggregate)\n\n if not function:\n return None\n\n expression = function(aggregate, self, coalesce_measure)\n\n return expression\n\n def attributes_for_cell(self, cell):\n \"\"\"Returns a set of attributes included in the cell.\"\"\"\n if not cell:\n return set()\n\n attributes = set()\n for cut, cut_attrs in self.attributes_for_cell_cuts(cell):\n attributes |= set(cut_attrs)\n return attributes\n\n def attributes_for_cell_cuts(self, cell):\n \"\"\"Returns a list of tuples (`cut`, `attributes`) where `attributes`\n is list of attributes involved in the `cut`.\"\"\"\n\n # Note: this method belongs here, not to the Cell class, as we might\n # discover that some other attributes might be required for the cell\n # (in the future...)\n\n result = []\n\n for cut in cell.cuts:\n depth = cut.level_depth()\n if depth:\n dim = self.cube.dimension(cut.dimension)\n hier = dim.hierarchy(cut.hierarchy)\n keys = [level.key for level in hier[0:depth]]\n result.append((cut, keys))\n\n return result\n\n def condition_for_cell(self, cell):\n \"\"\"Returns a SQL condition for the `cell`.\"\"\"\n conditions = self.conditions_for_cuts(cell.cuts)\n condition = condition_conjunction(conditions)\n return condition\n\n def conditions_for_cuts(self, cuts):\n \"\"\"Constructs conditions for all cuts in the `cell`. Returns a list of\n SQL conditional expressions.\n \"\"\"\n\n conditions = []\n\n for cut in cuts:\n dim = self.cube.dimension(cut.dimension)\n\n if isinstance(cut, PointCut):\n path = cut.path\n condition = self.condition_for_point(dim, path, cut.hierarchy,\n cut.invert)\n\n elif isinstance(cut, SetCut):\n set_conds = []\n\n for path in cut.paths:\n condition = self.condition_for_point(dim, path,\n cut.hierarchy,\n invert=False)\n set_conds.append(condition)\n\n condition = sql.expression.or_(*set_conds)\n\n if cut.invert:\n condition = sql.expression.not_(condition)\n\n elif isinstance(cut, RangeCut):\n condition = self.range_condition(cut.dimension,\n cut.hierarchy,\n cut.from_path,\n cut.to_path, cut.invert)\n\n else:\n raise ArgumentError(\"Unknown cut type %s\" % type(cut))\n\n conditions.append(condition)\n\n return conditions\n\n def condition_for_point(self, dim, path, hierarchy=None, invert=False):\n \"\"\"Returns a `Condition` tuple (`attributes`, `conditions`,\n `group_by`) dimension `dim` point at `path`. It is a compound\n condition - one equality condition for each path element in form:\n ``level[i].key = path[i]``\"\"\"\n\n conditions = []\n\n levels = dim.hierarchy(hierarchy).levels_for_path(path)\n\n if len(path) > len(levels):\n raise ArgumentError(\"Path has more items (%d: %s) than there are levels (%d) \"\n \"in dimension %s\" % (len(path), path, len(levels), dim.name))\n\n for level, value in zip(levels, path):\n\n # Prepare condition: dimension.level_key = path_value\n column = self.column(level.key)\n conditions.append(column == value)\n\n condition = sql.expression.and_(*conditions)\n\n if invert:\n condition = sql.expression.not_(condition)\n\n return condition\n\n def range_condition(self, dim, hierarchy, from_path, to_path, invert=False):\n \"\"\"Return a condition for a hierarchical range (`from_path`,\n `to_path`). Return value is a `Condition` tuple.\"\"\"\n\n dim = self.cube.dimension(dim)\n\n lower = self._boundary_condition(dim, hierarchy, from_path, 0)\n upper = self._boundary_condition(dim, hierarchy, to_path, 1)\n\n conditions = []\n if lower is not None:\n conditions.append(lower)\n if upper is not None:\n conditions.append(upper)\n\n condition = condition_conjunction(conditions)\n\n if invert:\n condition = sql.expression.not_(condition)\n\n return condition\n\n def _boundary_condition(self, dim, hierarchy, path, bound, first=True):\n \"\"\"Return a `Condition` tuple for a boundary condition. If `bound` is\n 1 then path is considered to be upper bound (operators < and <= are\n used), otherwise path is considered as lower bound (operators > and >=\n are used )\"\"\"\n\n if not path:\n return None\n\n last = self._boundary_condition(dim, hierarchy,\n path[:-1],\n bound,\n first=False)\n\n levels = dim.hierarchy(hierarchy).levels_for_path(path)\n\n if len(path) > len(levels):\n raise ArgumentError(\"Path has more items (%d: %s) than there are levels (%d) \"\n \"in dimension %s\" % (len(path), path, len(levels), dim.name))\n\n conditions = []\n\n for level, value in zip(levels[:-1], path[:-1]):\n column = self.column(level.key)\n conditions.append(column == value)\n\n # Select required operator according to bound\n # 0 - lower bound\n # 1 - upper bound\n if bound == 1:\n # 1 - upper bound (that is <= and < operator)\n operator = sql.operators.le if first else sql.operators.lt\n else:\n # else - lower bound (that is >= and > operator)\n operator = sql.operators.ge if first else sql.operators.gt\n\n column = self.column(levels[-1].key)\n conditions.append(operator(column, path[-1]))\n condition = condition_conjunction(conditions)\n\n if last is not None:\n condition = sql.expression.or_(condition, last)\n\n return condition\n\n def _ptd_attributes(self, cell, drilldown):\n \"\"\"Return attributes that are used for the PTD condition. Output of\n this function is used for master/detail fact composition and for the\n `_ptd_condition()`\"\"\"\n # Include every level only once\n levels = set()\n\n # For the cell:\n if cell:\n levels |= set(item[2] for item in cell.deepest_levels())\n\n # For drilldown:\n if drilldown:\n levels |= set(item[2] for item in drilldown.deepest_levels())\n\n attributes = []\n for level in levels:\n ref = self.mapper.physical(level.key)\n if ref.condition:\n attributes.append(level.key)\n\n return attributes\n\n def _ptd_condition(self, ptd_attributes):\n \"\"\"Returns \"periods to date\" condition for `ptd_attributes` (which\n should be a result of `_ptd_attributes()`)\"\"\"\n\n # Collect the conditions\n #\n # Conditions are currently specified in the mappings as \"condtition\"\n # Collect relevant columns – those with conditions\n\n # Construct the conditions from the physical attribute expression\n conditions = []\n\n for attribute in ptd_attributes:\n # FIXME: this is a hack\n\n ref = self.mapper.physical(attribute)\n if not ref.condition:\n continue\n\n column = self.column(attribute)\n\n # Provide columns for attributes (according to current state of\n # the query)\n context = dict(_SQL_EXPR_CONTEXT)\n getter = _TableGetter(self)\n context[\"table\"] = getter\n getter = _AttributeGetter(self, attribute.dimension)\n context[\"dim\"] = getter\n getter = _AttributeGetter(self, self.cube)\n context[\"fact\"] = getter\n context[\"column\"] = column\n\n condition = evaluate_expression(ref.condition,\n context,\n 'condition',\n sql.expression.ColumnElement)\n\n conditions.append(condition)\n\n # TODO: What about invert?\n return condition_conjunction(conditions)\n\n def fact_key_column(self):\n \"\"\"Returns a column that represents the fact key.\"\"\"\n # TODO: this is used only in FactCountFunction, suggestion for better\n # solution is in the comments there.\n if self.master_fact is not None:\n return self.master_fact.c[self.snowflake.fact_key]\n else:\n return self.snowflake.fact_key_column\n\n def column(self, attribute, locale=None):\n \"\"\"Returns either a physical column for the attribute or a reference to\n a column from the master fact if it exists.\"\"\"\n\n if self.master_fact is not None:\n ref = self.mapper.physical(attribute, locale)\n self.logger.debug(\"column %s (%s) from master fact\" % (attribute.ref(), ref))\n try:\n return self.master_fact.c[ref.column]\n except KeyError:\n self.logger.debug(\"retry column %s from tables\" % (attribute.ref(), ))\n return self.snowflake.column(attribute, locale)\n else:\n self.logger.debug(\"column %s from tables\" % (attribute.ref(), ))\n return self.snowflake.column(attribute, locale)\n\n def paginate(self, page, page_size):\n \"\"\"Returns paginated statement if page is provided, otherwise returns\n the same statement.\"\"\"\n\n if page is not None and page_size is not None:\n self.statement = self.statement.offset(page * page_size).limit(page_size)\n\n return self.statement\n\n def order(self, order):\n \"\"\"Returns a SQL statement which is ordered according to the `order`. If\n the statement contains attributes that have natural order specified, then\n the natural order is used, if not overriden in the `order`.\n\n `order` sohuld be prepared using\n :meth:`AggregationBrowser.prepare_order`.\n\n `dimension_levels` is list of considered dimension levels in form of\n tuples (`dimension`, `hierarchy`, `levels`). For each level it's sort\n key is used.\n \"\"\"\n\n # Each attribute mentioned in the order should be present in the selection\n # or as some column from joined table. Here we get the list of already\n # selected columns and derived aggregates\n\n selection = OrderedDict()\n\n # Get logical attributes from column labels (see logical_labels method\n # description for more information why this step is necessary)\n for column, ref in zip(self.statement.columns, self.labels):\n selection[ref] = column\n\n # Make sure that the `order` is a list of of tuples (`attribute`,\n # `order`). If element of the `order` list is a string, then it is\n # converted to (`string`, ``None``).\n\n order = order or []\n\n drilldown = self.drilldown or []\n\n for dditem in drilldown:\n dim, hier, levels = dditem[0:3]\n for level in levels:\n level = dim.level(level)\n lvl_attr = level.order_attribute or level.key\n lvl_order = level.order or 'asc'\n order.append((lvl_attr, lvl_order))\n\n order_by = OrderedDict()\n\n if self.split:\n split_column = sql.expression.column(SPLIT_DIMENSION_NAME)\n order_by[SPLIT_DIMENSION_NAME] = split_column\n\n # Collect the corresponding attribute columns\n for attribute, order_dir in order:\n try:\n column = selection[attribute.ref()]\n except KeyError:\n attribute = self.mapper.attribute(attribute.ref())\n column = self.column(attribute)\n\n column = order_column(column, order_dir)\n\n if attribute.ref() not in order_by:\n order_by[attribute.ref()] = column\n\n # Collect natural order for selected columns\n for (name, column) in selection.items():\n try:\n # Backward mapping: get Attribute instance by name. The column\n # name used here is already labelled to the logical name\n attribute = self.mapper.attribute(name)\n except KeyError:\n # Since we are already selecting the column, then it should\n # exist this exception is raised when we are trying to get\n # Attribute object for an aggregate - we can safely ignore\n # this.\n\n # TODO: add natural ordering for measures (may be nice)\n attribute = None\n\n if attribute and attribute.order and name not in order_by.keys():\n order_by[name] = order_column(column, attribute.order)\n\n self.statement = self.statement.order_by(*order_by.values())\n\n return self.statement\n\n\n\n# Used as a workaround for \"condition\" attribute mapping property\n# TODO: temp solution\n# Assumption: every other attribute is from the same dimension\nclass _AttributeGetter(object):\n def __init__(self, owner, context):\n self._context = context\n self._owner = owner\n\n def __getattr__(self, attr):\n return self._column(attr)\n\n def __getitem__(self, item):\n return self._column(item)\n\n def _column(self, name):\n attribute = self._context.attribute(name)\n return self._owner.column(attribute)\n\n # Backward-compatibility for table.c.foo\n @property\n def c(self):\n return self\n\nclass _TableGetter(object):\n def __init__(self, owner):\n self._owner = owner\n\n def __getattr__(self, attr):\n return self._table(attr)\n\n def __getitem__(self, item):\n return self._table(item)\n\n def _table(self, name):\n # Create a dummy attribute\n return _ColumnGetter(self._owner, name)\n\n\nclass _ColumnGetter(object):\n def __init__(self, owner, table):\n self._owner = owner\n self._table = table\n\n def __getattr__(self, attr):\n return self._column(attr)\n\n def __getitem__(self, item):\n return self._column(item)\n\n def _column(self, name):\n # Create a dummy attribute\n attribute = PhysicalAttribute(name, table=self._table)\n return self._owner.column(attribute)\n\n # Backward-compatibility for table.c.foo\n @property\n def c(self):\n return self\n\n","sub_path":"learn_web/django_demo/venv/Lib/site-packages/cubes-1.0.1-py3.5.egg/cubes/backends/sql/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":66270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"116850178","text":"import curses\nimport curses.ascii\nimport npyscreen\nfrom src import commandHandler\n\n\ndef help_screen(*args, **kwargs):\n usage = (\"Navigation\\n\"\n \" -Tab and Shift-Tab: Navigate between modules\\n\"\n \" -Arrow Keys: Navigate inside of a module.\\n\"\n \" -Ctrl-Q: Exit the application.\\n\"\n \"Application\\n\"\n \" You can add -d or -debug when running the application to have additional information\\n\"\n \" You can add -s or -screen when running the application to have a visible web browser\\n\"\n \"Trees\\n\"\n \" -Left Arrow: Collapse opened Item\\n\"\n \" -Right Arrow: Expand unopened Item\\n\"\n \" -SpaceBar: Toggle item expansion or call annotation\\n\"\n \" -A: Open annotation for item\\n\"\n \" -F: Take item name to Command Line\\n\"\n \"Command Line\\n\"\n \" Takes Input and processes it\\n\"\n \" -Enter: Submits input\\n\"\n \" -Arrow Keys: Command history recall\\n\"\n \" -Ctrl-C: List of available commands\")\n npyscreen.notify_confirm(usage,\n title=\"Usage Manual\",\n form_color='STANDOUT',\n wrap=True,\n wide=True,\n editw=0)\n\n\nclass ControlPanel(npyscreen.Textfield):\n def __init__(self, *args, **kwargs):\n super(ControlPanel, self).__init__(*args, **kwargs)\n self.log_man = self.find_parent_app().log_man\n self.full_tree = self.find_parent_app().full_tree\n self.reports = self.find_parent_app().reports\n self.command_history = []\n self.recall_position = 0\n self.checker = commandHandler.CommandHandler(self.log_man, self.full_tree, self.reports,\n self.find_parent_app().debug, self.find_parent_app().screen)\n\n def set_up_handlers(self):\n super(ControlPanel, self).set_up_handlers()\n self.handlers.update({\n curses.ascii.NL: self.run_cmd,\n curses.ascii.CR: self.run_cmd,\n curses.KEY_UP: self.recall_command_backward,\n curses.KEY_DOWN: self.recall_command_forward,\n \"^Q\": self.quit,\n \"^C\": self.command_help,\n \"^H\": help_screen\n })\n\n def command_help(*args, **kwargs):\n usage_text = (\"Flagging\\n\"\n \" Flagging is a module for finding errors in the saved files.\\n\"\n \" To update the saved files, refer to the WebDriver section.\\n\"\n \" Base Command: 'flag'\\n\"\n \" Sub Commands:\\n\"\n \" -'duplicates': Checks all reports for duplicate pulls.\\n\"\n \" -'spelling': Checks the spelling of all reports for misspellings.\\n\"\n \" -'sum': Checks the integrity of all reports for missed pulls.\\n\"\n \" -'show' [reportname]: Shows whats missing in sum of a report, alternatively press f on the report.\\n\"\n \" eg: \\\"flag show TRA_Modular_StoreDetail_Daily_001\\\"\\n\"\n \" -'clear': Removes flags and resets to normal.\\n\"\n \"WebDriver\\n\"\n \" The WebDriver is a collection of scripts applied to a webscraper.\\n\"\n \" It is not perfect and will occasionally error out.\\n\"\n \" If issues occur, I highly suggest using the -screen runtime argument.\\n\"\n \" Commands:\\n\"\n \" -'start': Initializes the web scraper client, prereq for all other commands.\\n\"\n \" -'login': Connects to retaillink, on failure it will attempt next password. prereq for most commands.\\n\"\n \" -'rebuild data': Attempts to download all reports in Category2014 folder and save them.\\n\"\n \" -'rebuild master': Attempts to download the Master Tree from the configured groups.\\n\"\n \" -'rebuild report [reportname]': Attempts to rebuild known data on report from RetailLink\\n\"\n \" -'stop': shuts down the webdriver.\")\n npyscreen.notify_confirm(usage_text,\n title=\"Commands\",\n form_color='STANDOUT',\n wrap=True,\n wide=True,\n editw=0)\n\n def recall_command_backward(self, *args, **kwargs):\n if self.recall_position + 1 > len(self.command_history):\n return\n self.recall_position += 1\n self.value = self.command_history[-self.recall_position]\n self.update()\n\n def recall_command_forward(self, *args, **kwargs):\n if self.recall_position <= 1:\n self.recall_position = 0\n self.value = \"\"\n self.update()\n return\n self.recall_position -= 1\n self.value = self.command_history[-self.recall_position]\n\n def run_cmd(self, *args, **kwargs):\n self.checker.check(self.value)\n self.command_history.append(self.value)\n self.recall_position = 0\n self.value = \"\"\n self.update()\n\n def quit(self, *args, **kwargs):\n raise KeyboardInterrupt\n\n\nclass ControlPanelWrapper(npyscreen.BoxTitle):\n _contained_widget = ControlPanel\n\n\nclass MultiLineLogger(npyscreen.MultiLineEdit):\n def __init__(self, *args, **kwargs):\n super(MultiLineLogger, self).__init__(*args, **kwargs)\n self.log_man = None\n #self.start_display_at = self.height\n self.add_handlers({\n \"^Q\": self.quit\n })\n\n def set_up_handlers(self):\n npyscreen.widget.InputHandler.set_up_handlers(self)\n self.handlers.update({\n curses.KEY_UP: self.h_line_up,\n curses.KEY_DOWN: self.h_line_down,\n curses.KEY_LEFT: self.h_cursor_left,\n curses.KEY_RIGHT: self.h_cursor_right\n })\n\n def quit(self, *args, **kwargs):\n raise KeyboardInterrupt\n\n def define_log_manager(self, log_man):\n self.log_man = log_man\n\n def update(self, clear=True):\n if clear: self.clear()\n display_length = self.maximum_display_height\n display_width = self.maximum_display_width\n xdisplay_offset = 0\n text_to_display = self.get_value_as_list()\n if self.cursor_position < 0: self.cursor_position = 0\n if self.cursor_position > len(self.value): self.cursor_position = len(self.value)\n\n self.cursory, self.cursorx = self.translate_cursor(self.cursor_position)\n\n display = len(text_to_display[self.start_display_at:])\n\n for line_count in range(self.height):\n if line_count >= len(text_to_display)-self.start_display_at:\n break\n line_counter = -line_count\n line_to_display = text_to_display[self.start_display_at+line_counter][xdisplay_offset:]\n line_to_display = self.safe_string(line_to_display)\n if isinstance(line_to_display, bytes):\n line_to_display = line_to_display.decode(self.encoding, 'replace')\n column = 0\n place_in_string = 0\n encoded = False\n maintain_color = \"\"\n line_place = self.height + line_counter\n while column <= (display_width):\n ignore_char = False\n\n if not line_to_display:\n break\n if place_in_string >= len(line_to_display):\n break\n width_of_char_to_print = 1\n if column - 1 + width_of_char_to_print > display_width:\n break\n if self.do_colors():\n if place_in_string == 0:\n for i in range(0, len(self.log_man.encodeKeys)):\n if line_to_display[place_in_string] == self.log_man.encodeKeys[i]:\n maintain_color = self.parent.theme_manager.findPair(self, self.log_man.encodeResponse[i])\n encoded = True\n ignore_char = True\n break\n if encoded:\n color = maintain_color\n else:\n color = self.parent.theme_manager.findPair(self)\n else:\n color = curses.A_NORMAL\n\n if not ignore_char:\n self.parent.curses_pad.addstr(self.rely+line_place,self.relx+column,\n self._print_unicode_char(line_to_display[place_in_string]),\n color\n )\n column += width_of_char_to_print\n place_in_string += 1\n\n if self.editing:\n _cur_y, _cur_x = self.translate_cursor(self.cursor_position)\n try:\n char_under_cur = self.safe_string(self.value[self.cursor_position])\n if char_under_cur == '\\n':\n char_under_cur = ' '\n elif char_under_cur == '∙':\n char_under_cur = ''\n except:\n char_under_cur = ' '\n\n if self.do_colors():\n self.parent.curses_pad.addstr(self.rely + _cur_y - self.start_display_at, _cur_x - xdisplay_offset + self.relx, char_under_cur,\n self.parent.theme_manager.findPair(self) | curses.A_STANDOUT)\n\n else:\n self.parent.curses_pad.addstr(self.rely + _cur_y - self.start_display_at, _cur_x - xdisplay_offset + self.relx, char_under_cur, curses.A_STANDOUT)\n\n\nclass MultiLineLoggerWrapper(npyscreen.BoxTitle):\n _contained_widget = MultiLineLogger\n\n\nclass CustomTreeData(npyscreen.NPSTreeData):\n def __init__(self, annotation=None, *args, **kwargs):\n super(CustomTreeData, self).__init__(*args, **kwargs)\n self.color = \"DEFAULT\"\n self.annotation = annotation\n\n def add_annotation(self, annotation):\n self.annotation = annotation\n\n def has_annotation(self):\n if self.annotation is not None:\n if self.annotation != \"\":\n return True\n return False\n\n\nclass CustomMLTree(npyscreen.MLTree, npyscreen.MultiLineAction):\n def __init__(self, *args, **kwargs):\n super(CustomMLTree, self).__init__(*args, **kwargs)\n self.add_handlers({\n \"^Q\": self.quit,\n curses.KEY_LEFT: self.h_collapse_tree,\n curses.KEY_RIGHT: self.h_expand_tree,\n curses.ascii.NL: self.toggle_collapse_tree,\n curses.ascii.CR: self.toggle_collapse_tree,\n \"a\": self.annotation,\n \"f\": self.set_value,\n \"^H\": help_screen\n })\n\n def set_up_handlers(self):\n super(CustomMLTree, self).set_up_handlers()\n # Secondary handler since add_handlers does not support Spacebar\n self.handlers.update({\n ord(' '): self.toggle_collapse_tree\n })\n\n def toggle_collapse_tree(self, *args, **kwargs):\n if self._has_children(self.values[self.cursor_line]):\n if self.values[self.cursor_line].expanded:\n self.h_collapse_tree(self)\n else:\n self.h_expand_tree(self)\n else:\n self.annotation()\n\n def set_value(self, *args, **kwargs):\n try:\n name = self.values[self.cursor_line].content\n if self.find_parent_app().control_panel.entry_widget.value == \"\":\n self.find_parent_app().control_panel.entry_widget.value = \"flag show \" + name\n else:\n self.find_parent_app().control_panel.entry_widget.value += name\n self.find_parent_app().control_panel.entry_widget.update()\n except IndexError:\n self.find_parent_app().log_man.handled_add(\"Report does not exist?\")\n\n def _set_line_values(self, line, value_indexer):\n try:\n line.color = self.values[value_indexer].color\n except IndexError:\n self._set_line_blank(line)\n except TypeError:\n self._set_line_blank(line)\n super(CustomMLTree, self)._set_line_values(line, value_indexer)\n\n def annotation(self, *args, **kwargs):\n try:\n if self.values[self.cursor_line].has_annotation():\n npyscreen.notify_confirm(self.values[self.cursor_line].annotation,\n title=self.values[self.cursor_line].content,\n form_color='STANDOUT',\n wrap=True,\n wide=True,\n editw=0)\n else:\n self.find_parent_app().log_man.handled_add(\"NO ANNOTATION\")\n except IndexError:\n self.find_parent_app().log_man.handled_add(\"NO ANNOTATION\")\n\n def quit(self, *args, **kwargs):\n raise KeyboardInterrupt\n\n\nclass CustomMLTreeWrapper(npyscreen.BoxTitle):\n _contained_widget = CustomMLTree\n\n\nclass WindowForm(npyscreen.FormBaseNew):\n def create(self, *args, **kwargs):\n super(WindowForm, self).create(*args, **kwargs)\n\n def while_waiting(self):\n pass","sub_path":"src/WidgetSchema.py","file_name":"WidgetSchema.py","file_ext":"py","file_size_in_byte":13504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"17797195","text":"\nfrom mp1 import MP, MPthread\n\nclass MonitorExample(MP):\n\t\"\"\"\n\tAn example using the monitor features of the 4410\n\tSynchronization Library.\n\n\tNote: This monitor does not correctly implement the\n\timplied specification!\n\t\"\"\"\n\tdef __init__(self):\n\t\tMP.__init__(self, None)\n\t\tself.value = self.Shared(\"value\", 0)\n\t\tself.lock = self.Lock(\"monitor lock\")\n\t\tself.gt0 = self.lock.Condition(\"value greater than 0\")\n\t\tself.lt2 = self.lock.Condition(\"value less than 2\")\n\n\tdef get_value(self):\n\t\twith self.lock:\n\t\t\treturn self.value.read()\n\n\tdef block_until_pos(self):\n\t\twith self.lock:\n\t\t\twhile not (self.value.read() > 0):\n\t\t\t\tself.gt0.wait()\n\n\tdef update(self, value):\n\t\twith self.lock:\n\t\t\tself.value.write(value)\n\t\t\tif self.value.read() > 0:\n\t\t\t\tself.gt0.signal()\n\t\t\tif self.value.read() < 2:\n\t\t\t\tself.lt2.broadcast()\n\n","sub_path":"Synchronization/examples/rvr-monitor-example.py","file_name":"rvr-monitor-example.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"298517240","text":"from flask import Blueprint, request, current_app\nfrom flask.ext.api import status\nfrom flask.ext.login import login_required, logout_user, login_user, current_user\nfrom flask_json import as_json\n\nfrom Src.Security import login_manager\nfrom Src.Models import db\nfrom Src.Forms.Conta.UsuarioAutenticacaoForm import UsuarioAutenticacaoForm\nfrom Src.Forms.Conta.UsuarioForm import UsuarioForm\nfrom Src.Models.Conta.UsuarioModel import UsuarioModel\n\nconta = Blueprint('conta', __name__)\n\n\n@login_manager.user_loader\ndef load_user(usuario_id):\n return UsuarioModel.query.filter(UsuarioModel.id == usuario_id).first()\n\n\n@conta.route('/usuarios', methods=['POST'])\n@as_json\ndef adicionar_usuario():\n form = UsuarioForm.from_json(request.get_json(force=True))\n form.validate_on_submit()\n\n if any(form.senha.errors) or \\\n any(form.nome.errors) or\\\n any(form.email.errors):\n mensagem_erro = 'Usuário inválido. {0} {1} {2}'.format(\n ' '.join(form.senha.errors if form.senha.errors else ''),\n ' '.join(form.nome.errors if form.nome.errors else ''),\n ' '.join(form.email.errors if form.email.errors else ''))\n\n return {'descricao': mensagem_erro.strip()}, status.HTTP_400_BAD_REQUEST\n\n if UsuarioModel.query.filter_by(email=form.email.data).first() or\\\n UsuarioModel.query.filter_by(nome=form.nome.data).first():\n return {'descricao': 'usuário não pode ser cadastrado.'}, status.HTTP_409_CONFLICT\n\n usuario_novo = UsuarioModel(form.nome.data, form.email.data, form.senha.data)\n db.session.add(usuario_novo)\n db.session.commit()\n\n return {'descricao':'Usuário cadastrado com sucesso.'}, status.HTTP_201_CREATED\n\n\n@conta.route(\"/sessao\", methods=[\"POST\"])\n@as_json\ndef autenticar():\n form = UsuarioAutenticacaoForm.from_json(request.get_json(force=True))\n form.validate_on_submit()\n\n if any(form.senha.errors) or\\\n any(form.usuario.errors):\n mensagem_erro = 'Usuário inválido. {0} {1}'.format(\n ' '.join(form.senha.errors if form.senha.errors else ''),\n ' '.join(form.usuario.errors if form.usuario.errors else ''))\n\n return {'descricao': mensagem_erro.strip()}, status.HTTP_400_BAD_REQUEST\n\n usuario = UsuarioModel.query.filter_by(email=form.usuario.data).first()\n\n if usuario and usuario.senha_correta(form.senha.data):\n login_user(usuario)\n return {'descricao': 'Usuário autenticado com sucesso.'}, status.HTTP_200_OK\n\n return {'descricao': 'Usuário ou senha inválido.'}, status.HTTP_401_UNAUTHORIZED\n\n\n@conta.route('/sessao', methods=[\"DELETE\"])\n@login_required\n@as_json\ndef sair():\n logout_user()\n return {'descricao': 'Usuário desconectado com sucesso.'}, status.HTTP_200_OK\n\n\n@conta.route('/sessao/usuario_info', methods=[\"GET\"])\n@login_required\n@as_json\ndef checar_status():\n return {'autenticacao_status': current_user.to_json()}\n","sub_path":"Src/Controllers/Conta/Conta.py","file_name":"Conta.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"554293078","text":"from turtle import Screen, Turtle\n\nscreen = Screen()\nscreen.bgcolor(\"black\")\nscreen.setup(width=800, height=600)\nscreen.title(\"Pong\")\n\npaddle = Turtle()\npaddle.shape(\"square\")\npaddle.color(\"white\")\npaddle.shapesize(stretch_wid=5, stretch_len=1)\npaddle.penup()\npaddle.goto(350, 0)\n\ndef go_up():\n new_y = paddle.ycor() + 28\n paddle.goto(paddle.xcor(), new_y)\n\ndef go_down():\n new_y = paddle.ycor() - 28\n paddle.goto(paddle.xcor(), new_y)\n \nscreen.listen()\nscreen.onkey(go_up, \"Up\")\nscreen.onkey(go_down, \"Down\")\n\nscreen.exitonclick()\n\n","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"25970363","text":"import pandas as pd \r\nimport numpy as np \r\nfrom sklearn.model_selection import train_test_split \r\nfrom sklearn.linear_model import LogisticRegression \r\nfrom sklearn.svm import SVC \r\nfrom sklearn.metrics import accuracy_score \r\nfrom sklearn.preprocessing import MinMaxScaler \r\nfrom sklearn.manifold import TSNE \r\nimport matplotlib.pyplot as plt \r\nimport seaborn as sns \r\nfrom keras.layers import Input, Dense \r\nfrom keras.models import Model, Sequential \r\nfrom keras import regularizers\r\nimport pandas as pd\r\nimport numpy\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.metrics import confusion_matrix, zero_one_loss\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.datasets import make_classification\r\nfrom sklearn import metrics\r\n\r\nIndex=(['normal.', 'buffer_overflow.', 'loadmodule.', 'perl.', 'neptune.',\r\n 'smurf.', 'guess_passwd.', 'pod.', 'teardrop.', 'portsweep.',\r\n 'ipsweep.', 'land.', 'ftp_write.', 'back.', 'imap.', 'satan.', 'phf.',\r\n 'nmap.', 'multihop.', 'warezmaster.', 'warezclient.', 'spy.',\r\n 'rootkit.'])\r\n\r\nprint(np.shape(Index))\r\n\r\nprint(Index[0])\r\n\r\n\r\n# DATASET\r\n\r\n# ---------------------------------------------------------------------------\r\n# Must declare data_dir as the directory of training and test files\r\n#data_dir=\"./datasets/KDD-CUP-99/\"\r\nraw_data_filename =\"kddcup.data_10_percent_corrected\"\r\n#raw_data_filename = data_dir + \"kddcup.data_10_percent\"\r\nprint (\"Loading raw data\")\r\nraw_data = pd.read_csv(raw_data_filename, header=None)\r\nprint (\"Transforming data\")\r\n# Categorize columns: \"protocol\", \"service\", \"flag\", \"attack_type\"\r\nraw_data[1], protocols= pd.factorize(raw_data[1])\r\nraw_data[2], services = pd.factorize(raw_data[2])\r\nraw_data[3], flags = pd.factorize(raw_data[3])\r\nraw_data[41], attacks = pd.factorize(raw_data[41])\r\n\r\nprint(attacks)\r\n# separate features (columns 1..40) and label (column 41)\r\nfeatures= raw_data.iloc[:,:raw_data.shape[1]-1]\r\nlabels= raw_data.iloc[:,raw_data.shape[1]-1:]\r\nlabels= labels.values.ravel() # this becomes a 'horizontal' array\r\nprint(labels)\r\nimport matplotlib.pyplot as plt\r\nplt.plot(labels);\r\nplt.show()\r\n\r\n\r\ndf= pd.DataFrame(features)\r\nX, X_test, y, y_test = train_test_split(df, labels, train_size=0.8, test_size=0.2)\r\n\r\nprint (\"X, y :\", X.shape, y.shape)\r\nprint (\"X_test, y_test:\", X_test.shape, y_test.shape)\r\n\r\n# Scaling the data to make it suitable for the auto-encoder \r\nX_scaled = MinMaxScaler().fit_transform(X) \r\nX_normal_scaled = X_scaled[y == 0] \r\nX_ids_scaled = X_scaled[y == 1] \r\n\r\n# Building the Input Layer \r\ninput_layer = Input(shape =(X.shape[1], )) \r\n\r\n# Building the Encoder network \r\nencoded = Dense(100, activation ='tanh',activity_regularizer = regularizers.l1(10e-5))(input_layer) \r\nencoded = Dense(50, activation ='tanh',activity_regularizer = regularizers.l1(10e-5))(encoded) \r\nencoded = Dense(25, activation ='tanh',activity_regularizer = regularizers.l1(10e-5))(encoded) \r\nencoded = Dense(12, activation ='tanh',activity_regularizer = regularizers.l1(10e-5))(encoded) \r\nencoded = Dense(6, activation ='relu')(encoded) \r\n\r\n# Building the Decoder network \r\ndecoded = Dense(12, activation ='tanh')(encoded) \r\ndecoded = Dense(25, activation ='tanh')(decoded) \r\ndecoded = Dense(50, activation ='tanh')(decoded) \r\ndecoded = Dense(100, activation ='tanh')(decoded) \r\n\r\n# Building the Output Layer \r\noutput_layer = Dense(X.shape[1], activation ='relu')(decoded) \r\n\r\n# Defining the parameters of the Auto-encoder network \r\nautoencoder = Model(input_layer, output_layer) \r\nautoencoder.compile(optimizer =\"adadelta\", loss =\"mse\") \r\n\r\n# Training the Auto-encoder network \r\nautoencoder.fit(X_normal_scaled, X_normal_scaled,batch_size = 16, epochs =1,shuffle = True, validation_split = 0.20) \r\n\r\nhidden_representation = Sequential() \r\nhidden_representation.add(autoencoder.layers[0]) \r\nhidden_representation.add(autoencoder.layers[1]) \r\nhidden_representation.add(autoencoder.layers[2]) \r\nhidden_representation.add(autoencoder.layers[3]) \r\nhidden_representation.add(autoencoder.layers[4])\r\n\r\n# Separating the points encoded by the Auto-encoder as normal and ids \r\nnormal_hidden_rep = hidden_representation.predict(X_normal_scaled) \r\nids_hidden_rep = hidden_representation.predict(X_ids_scaled) \r\n\r\n# Combining the encoded points into a single table \r\nencoded_X = np.append(normal_hidden_rep, ids_hidden_rep, axis = 0) \r\ny_normal = np.zeros(normal_hidden_rep.shape[0]) \r\ny_ids = np.ones(ids_hidden_rep.shape[0]) \r\nencoded_y = np.append(y_normal, y_ids) \r\n\r\nprint(np.shape(encoded_X))\r\nprint(np.shape(encoded_y))\r\n#Create a Gaussian Classifier\r\nclf=RandomForestClassifier(n_estimators=100)\r\n#Train the model using the training sets y_pred=clf.predict(X_test)\r\nclf.fit(encoded_X,encoded_y)\r\ny_pred=clf.predict(encoded_X)\r\nprint(\"Accuracy:\",metrics.accuracy_score(encoded_y, y_pred))\r\n\r\nfor ik in range(len(y_pred)):\r\n print(Index[y_pred[ik]])\r\n","sub_path":"GIVEN_RF_6_2_20.py","file_name":"GIVEN_RF_6_2_20.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"566268487","text":"import pytest\nimport time\nfrom .pages.product_page import ProductPage\nfrom .pages.login_page import LoginPage\nfrom .pages.basket_page import BasketPage\n\n\nlink = \"https://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n\n\nclass TestProductPage:\n def test_guest_should_see_login_link_on_product_page(self, browser):\n page = ProductPage(browser, link)\n page.open()\n page.should_be_login_link()\n\n @pytest.mark.need_review\n def test_guest_can_go_to_login_page_from_product_page(self, browser):\n page = ProductPage(browser, link)\n page.open()\n page.go_to_login_page()\n login_page = LoginPage(browser, browser.current_url)\n login_page.should_be_login_page()\n\n @pytest.mark.need_review\n @pytest.mark.parametrize('promo', [\"?promo=offer0\",\n # pytest.param(\"?promo=offer7\", marks=pytest.mark.xfail)\n ])\n def test_guest_can_add_product_to_basket(self, browser, promo):\n url = link + promo\n page = ProductPage(browser, url)\n page.open()\n page.add_product_to_basket()\n page.solve_quiz_and_get_code()\n page.should_be_product_add_to_basket_message()\n page.should_be_price_of_basket()\n\n @pytest.mark.skip\n def test_message_disappeared_after_adding_product_to_basket(self, browser):\n page = ProductPage(browser, link)\n page.open()\n page.add_product_to_basket()\n page.should_be_disappeared_success_message()\n\n @pytest.mark.need_review\n def test_guest_cant_see_product_in_basket_opened_from_product_page(self, browser):\n page = ProductPage(browser, link)\n page.open()\n page.go_to_basket_page()\n basket_page = BasketPage(browser, browser.current_url)\n basket_page.should_be_basket_page()\n basket_page.should_not_be_list_of_products()\n basket_page.should_be_basket_is_empty_message()\n\n\n@pytest.mark.user_on_product_page\nclass TestUserAddToBasketFromProductPage:\n @pytest.fixture(scope='function', autouse=True)\n def setup(self, browser):\n login_link = \"https://selenium1py.pythonanywhere.com/accounts/login/\"\n login_page = LoginPage(browser, login_link)\n login_page.open()\n email = str(time.time()) + \"@fakemail.org\"\n password = str(time.time())\n login_page.register_new_user(email, password)\n login_page.should_be_authorized_user()\n\n def test_user_cant_see_success_message(self, browser):\n page = ProductPage(browser, link)\n page.open()\n page.should_not_be_success_message()\n\n @pytest.mark.need_review\n def test_user_can_add_product_to_basket(self, browser):\n url = link + \"?promo=offer0\"\n page = ProductPage(browser, url)\n page.open()\n page.add_product_to_basket()\n page.solve_quiz_and_get_code()\n page.should_be_product_add_to_basket_message()\n page.should_be_price_of_basket()\n","sub_path":"test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"83842633","text":"import sys, glob, time, serial, os, struct, subprocess, threading, struct\r\n\r\nstd_speeds = ['115200', '57600', '38400', '19200', '9600', '4800', '2400', '1200', '600', '300', '150', '100', '75', '50']\r\nparitys = ['N', 'E', 'O'] #Бит четности\r\nstopbitss = [1, 2] #Количество стоп-бит\r\nbite_size = 8 #Биты данных\r\nt_out = 1 #Таймаут в секундах, должен быть больше 1с\r\nflag1=0 #Флаг для остановки программы, устанавливается в 1, если найдена сигнатура \r\nreading_bytes = 1 #Количество байт для чтения после открытия порта\r\n\r\nSIGNATURE_TO_CONNECTION = b'\\x5e' #'^'\r\nSIGNATURE_END_SERCH = b'\\x5f' #'_'\r\nSIGNATURE_CONFIRM_CONNECTION = b'\\x7e' #'~'\r\n\r\nSIGNATURE_PORT_CLOSE \t\t\t= b'\\x7C'\t\t#'|'\r\n\r\nSIGMATURE_START_SEND_ARRAY\t\t= b'\\x1D'\t\t#'{' PC -> UART\r\nSIGNATURE_END_SEND_ARRAY\t\t= b'\\x7D' \t\t#'}'\r\n\r\nSIGNATURE_START_RECIVE_ARRAY\t= b'\\x5B'\t\t#'[' UART -> PC\r\nSIGNATURE_END_RECIVE_ARRAY\t\t= b'\\x5D'\t\t#']'\r\n\r\n\r\nrecive_buffer = []\r\n\r\nser = serial.Serial()\r\n\r\n################# Поиск доступных портов windows, linux,\r\ndef serial_ports():\r\n if sys.platform.startswith('win'):\r\n ports = ['COM%s' % (i + 1) for i in range(256)]\r\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\r\n # this excludes your current terminal \"/dev/tty\"\r\n ports = glob.glob('/dev/tty[A-Za-z]*')\r\n elif sys.platform.startswith('darwin'):\r\n ports = glob.glob('/dev/tty.*')\r\n else:\r\n raise EnvironmentError('Unsupported platform')\r\n\r\n result = []\r\n for port in ports:\r\n try:\r\n s = serial.Serial(port)\r\n s.close()\r\n result.append(port)\r\n except (OSError, serial.SerialException):\r\n pass\r\n return result\r\n##################################\r\n\r\nprint('Сигнатура для поиска:', end = '')\r\nprint(SIGNATURE_TO_CONNECTION)\r\n\r\nports=serial_ports()\r\nif ports:\r\n print('Доступные порты:')\r\n print(ports)\r\n if len(ports)>1:\r\n #ser.port = 'COM8' ############################# раскоментировать\r\n ser.port = input('Введите адрес COM порта ')\r\n else:\r\n ser.port = ports[0]\r\n print ('Работаем с портом '+ser.port)\r\nelse:\r\n print('\\nНет доступных COM портов, проверьте подключние.\\n')\r\n sys.exit()\r\n\r\ntry: \r\n for stop_bit in stopbitss:\r\n for parit in paritys:\r\n for com_speed in std_speeds:\r\n ser.close()\r\n ser.baudrate = com_speed\r\n ser.timeout = t_out\r\n ser.bytesize = bite_size\r\n ser.parity = parit\r\n ser.stopbits = stop_bit\r\n ser.open()\r\n #ser.write(cmd) #!Раскомментировать при необходимости отправки команды в устройство для инициализации связи \r\n message_b = ser.read(reading_bytes)\r\n if flag1==1:\r\n break\r\n if message_b:\r\n print ('\\nRAW data on '+ser.port+', '+com_speed+', '+str(ser.bytesize)+', '+ser.parity+', '+str(ser.stopbits)+':')\r\n print ('---------------------')\r\n print (message_b)\r\n print ('---------------------')\r\n try:\r\n if SIGNATURE_TO_CONNECTION in message_b:\r\n print ('\\n\\033[0;33mСигнатура ', end = '') #желтый цвет текста\r\n print(SIGNATURE_TO_CONNECTION, end = '')\r\n print(' найдена при следующих настройках: \\n'+ser.port+', '+com_speed+', '+str(ser.bytesize)+', '+ser.parity+', '+str(ser.stopbits))\r\n print('\\x1b[0m')\r\n ser.close()\r\n port = ser.port\r\n baudrate = com_speed\r\n bytesize = ser.bytesize\r\n parity = ser.parity\r\n stopbits = ser.stopbits\r\n flag1=1\r\n break\r\n else:\r\n ser.close()\r\n except:\r\n print ('error decode')\r\n print ('---------------------')\r\n ser.close()\r\n else:\r\n print('timeout on '+ser.port+', '+com_speed+', '+str(ser.bytesize)+', '+ser.parity+', '+str(ser.stopbits))\r\n print ('---------------------')\r\n ser.close()\r\n if flag1 == 0:\r\n print('Поиск завершен, сигнатура не найдена')\r\nexcept serial.SerialException: \r\n print ('Ошибка при открытии порта '+ser.port)\r\n sys.exit()\r\n\r\ndef connect_to_serial():\r\n\r\n while (ser.read() != SIGNATURE_CONFIRM_CONNECTION):\r\n ser.write(SIGNATURE_END_SERCH)\r\n\r\ndef read_array ():\r\n buf = 0\r\n while (ser.read() != SIGNATURE_END_RECIVE_ARRAY):\r\n buf = ser.read(30)\r\n recive_buffer.append(buf)\r\n\r\n\r\ndef main(): \r\n\r\n connect_to_serial()\r\n\r\n if ser.read() != 0:\r\n\r\n while(ser.read() != SIGNATURE_PORT_CLOSE):\r\n print(ser.read())\r\n\r\n if ser.read() == SIGNATURE_START_RECIVE_ARRAY:\r\n read_array()\r\n print(recive_buffer)\r\n\r\n ser.close()\r\n\r\n else:\r\n print(\"serial is empty\") \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"com_scanner 0.1.0.py","file_name":"com_scanner 0.1.0.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"63482836","text":"#!/usr/bin/env python\n################################################################################\n# DATE: 2017/03/21\n#\n# SCRIPT: log_transformation.py\n#\n# VERSION: 2.2\n# \n# AUTHOR: Coded by: Miguel A Ibarra (miguelib@ufl.edu) \n# Edited by: Matt Thoburn (mthoburn@ufl.edu)\n# Last review by: Miguel A Ibarra (miguelib@ufl.edu) \n# \n# DESCRIPTION: This script log transforms the data.\n#\n################################################################################\n# Impot built-in libraries\nimport os\nimport logging\nimport argparse\nfrom argparse import RawDescriptionHelpFormatter\n\n# Impot add-on libraries\nimport numpy as np\nimport pandas as pd\n\n# Impot local data libraries\nfrom dataManager import logger as sl\nfrom dataManager.interface import wideToDesign\n\ndef getOptions():\n \"\"\" Function to pull in arguments \"\"\"\n description = \"\"\" One-Way ANOVA \"\"\"\n parser = argparse.ArgumentParser(description=description, \n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument(\"-i\",\"--input\", dest=\"input\", action='store', \n required=True, help=\"Input dataset in wide format.\")\n parser.add_argument(\"-d\",\"--design\", dest=\"design\", action='store', \n required=True, help=\"Design file.\")\n parser.add_argument(\"-id\",\"--ID\", dest=\"uniqID\", action='store', \n required=True, help=\"Name of the column with unique\"\\\n \" identifiers.\")\n parser.add_argument(\"-l\", \"--log\", dest=\"log\", action='store', \n required=True, choices=['log', 'log10', 'log2'], \n default=None, help=\"Type of log to be used\")\n parser.add_argument(\"-o\",\"--out\", dest=\"oname\", action='store', \n required=True, help=\"Output file name.\")\n parser.add_argument(\"--debug\", dest=\"debug\", action='store_true', \n required=False, help=\"Add debugging log output.\")\n args = parser.parse_args()\n\n # Standatdize paths\n args.oname = os.path.abspath(args.oname)\n args.input = os.path.abspath(args.input)\n args.design = os.path.abspath(args.design)\n\n return(args)\n\n\ndef main(args):\n # Imput data\n dat = wideToDesign(args.input, args.design, args.uniqID, logger=logger)\n\n # Convert objects to numeric\n norm = dat.wide.applymap(float)\n\n # According to the tipe of log selected perform log transformation\n if args.log == 'log':\n logger.info(u\"Running log transform with log e\")\n norm = norm.apply(lambda x: np.log(x))\n elif args.log == 'log2':\n logger.info(u\"Running log transform with log 2\")\n norm = norm.apply(lambda x: np.log2(x))\n elif args.log == 'log10':\n logger.info(u\"Running log transform with log 10\")\n norm = norm.apply(lambda x: np.log10(x))\n\n # Round results to 4 digits\n norm = norm.apply(lambda x: x.round(4))\n\n # Treat inf as NaN\n norm.replace([np.inf, -np.inf], np.nan, inplace=True)\n\n # Save file to CSV\n norm.to_csv(args.oname, sep=\"\\t\")\n logger.info(\"Finishing Script\")\n \nif __name__ == '__main__':\n # Command line options\n args = getOptions()\n\n # Set up logger\n logger = logging.getLogger()\n sl.setLogger(logger)\n\n # Import data\n logger.info(u\"Importing data with the folowing parameters: \"\\\n \"\\n\\tWide: {0}\"\\\n \"\\n\\tDesign:{1}\"\\\n \"\\n\\tUniqID:{2}\"\\\n \"\\n\\tLog: {3}\".\\\n format(args.input,args.design,args.uniqID,args.log))\n\n # Runing log transformation\n main(args)\n","sub_path":"src/scripts/log_transformation.py","file_name":"log_transformation.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"416454055","text":"# data 폴더의 2019-01-15, 2019-01-16, 2019-01-17의 시간, 온도 데이터를\n# 분석하는 프로그램을 만드시오\n\n# 각 일자별 최고 온도를 구하시오.\n# 각 일자별 최저 온도를 구하시오.\n# 위에서 구한 최고/최저 온도를 따로 파일로 작성 할 수 있게 하시오.\n\n# hit : .split()\n\ndef loadFile(filename, base_dir='.\\\\data'):\n fo = open('\\\\'.join([base_dir, filename]), 'r', encoding='utf-8')\n data = fo.read()\n fo.close()\n return data\n\ndef dataConvert(data):\n res = [] # 빈 리스트\n for item in data.split('\\n')[:-1]: # 마지막 요소 제외\n t, m = item.split(' ') # unpacking\n res.append([t, float(m)])\n return res\n\n# def maxTemp(data):\n# mx = data[0][1]\n# for item in data[1:]:\n# if mx < item[1]:\n# mx = item[1]\n# return mx\n\n# def minTemp(data):\n# mn = data[0][1]\n# for item in data[1:]:\n# if mn > item[1]:\n# mn = item[1]\n# return mn\n\ndef max_minTemp(data):\n return max(data), min(data)\n\ndef saveFile(filename, resultList, base_dir = '.\\\\data'):\n # 파일에 기록되는 다음과 같이 하시오.\n # 2019-01-15 27.0 17.0\n # 2019-01-16 25-0 15.0\n # 2019-01-17 25.0 14.0\n fw = open('\\\\'.join([base_dir, filename]), 'w', encoding='utf-8')\n\n for data in resultList:\n fw.write(data + '\\n')\n\n fw.close()\n\n# data = loadFile('2019-01-15')\n# d_15 = dataConvert(data)\n\n# mx = maxTemp(d_15)\n# mn = minTemp(d_15)\n\n# print(f'2019년 01월 15일 최고 온도는 {mx}, 최저 온도는 {mn}')\n\nresultList = []\nfor day in range(15, 18):\n data = loadFile(f'2019-01-{day}')\n d = dataConvert(data)\n d_temp = list(map(lambda x:x[1], d))\n # mx, mn = maxTemp(d), minTemp(d)\n mx, mn = max_minTemp(d_temp)\n resultList.append(f'2019-01-{day} {mx} {mn}')\n # print(f'2019년 01월 {day}일 최고 온도는 {mx}, 최저 온도는 {mn}')\n\nsaveFile(f'temperatureResult.txt', resultList)\n\nprint(list(map(lambda x:x[1], d))) # 온도 데이터 맵핑\nprint(max(list(map(lambda x:x[1], d)))) # 맵핑 데이터 중 최대 값 # 리스트 또는 형변환 필요함","sub_path":"workspace/kg_lecture/Ch01/07_exam.py","file_name":"07_exam.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"610601778","text":"\"\"\"\nCopyright (c) 2004-Present Pivotal Software, Inc.\n\nThis program and the accompanying materials are made available under\nthe terms of the under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport sys\nimport shutil\n\nfrom contextlib import closing\nfrom datetime import datetime\nfrom StringIO import StringIO\n\nimport unittest2 as unittest\nfrom unittest2.runner import _WritelnDecorator\n\nfrom tinctest import TINCTestLoader\nfrom tinctest import TINCTextTestResult\n\nfrom mpp.models.mpp_tc import _MPPMetaClassType\nfrom mpp.models.mpp_tc import MPPDUT\n\nfrom mpp.models import SQLTestCase, SQLTestCaseException\n\n# Since we overwrite optimizer_mode depending on product/version, force the internal variables to gpdb/4.3\n# This will ensure that optimizer_mode both works as designed, and all the tests written for that works.\n# _MPPMetaClassType.DUT = MPPDUT('gpdb', '4.3')\n\n@unittest.skip('mock')\nclass MockSQLTestCase(SQLTestCase):\n \"\"\"\n \n @description test case with metadata\n @created 2012-07-05 12:00:00\n @modified 2012-07-05 12:00:02\n @tags orca hashagg\n @gucs gp_optimizer=on;gp_log_optimizer=on\n @optimizer_mode ON\n \"\"\"\n def setUp(self):\n pass\n def test_explicit_definition(self):\n pass\n\n@unittest.skip('mock')\nclass MockSQLTemplateTestCase(SQLTestCase):\n template_dir = 'template_dir'\n template_subs = {'%PERCENTAGE%' : 'my_percent',\n '&&' : 'my_amp',\n '@AT' : 'my_at'}\n \n@unittest.skip('mock')\nclass MockSQLTemplateTestCaseExplicit(SQLTestCase):\n template_dir = 'template_dir'\n template_subs = {'%PERCENTAGE%' : 'my_percent',\n '&&' : 'my_amp',\n '@AT' : 'my_at'}\n \n@unittest.skip('mock')\nclass MockSQLTemplateTestCaseRegular(SQLTestCase):\n template_dir = 'template_dir'\n template_subs = {'%PERCENTAGE%' : 'my_percent',\n '&&' : 'my_amp',\n '@AT' : 'my_at'}\n\nclass MockMPPMetaClassTypeGPDB43(_MPPMetaClassType):\n _MPPMetaClassType.DUT = MPPDUT('gpdb', '4.3')\n \n@unittest.skip('mock')\nclass MockSQLTestCaseForOptimizerMode(SQLTestCase):\n \"\"\"\n \n @description test case with metadata\n @created 2012-07-05 12:00:00\n @modified 2012-07-05 12:00:02\n @tags orca hashagg\n @gucs gp_optimizer=on;gp_log_optimizer=on\n @optimizer_mode on\n \"\"\"\n __metaclass__ = MockMPPMetaClassTypeGPDB43\n pass\n\n@unittest.skip('mock')\nclass MockSQLTestCaseForOptimizerModeBoth(SQLTestCase):\n \"\"\"\n @optimizer_mode both\n \"\"\"\n __metaclass__ = MockMPPMetaClassTypeGPDB43\n pass\n\n@unittest.skip('mock')\nclass MockSQLTestCaseInvalidOptimizerMode(SQLTestCase):\n \"\"\"\n @optimizer_mode invalid_value\n \"\"\"\n __metaclass__ = MockMPPMetaClassTypeGPDB43\n pass\n\nclass MockMPPMetaClassTypeHAWQ(_MPPMetaClassType):\n _MPPMetaClassType.DUT = MPPDUT('hawq', '1.1.0.0')\n\n@unittest.skip('mock')\nclass MockSQLTestCaseOptimizerModeHAWQ(SQLTestCase):\n __metaclass__ = MockMPPMetaClassTypeHAWQ\n\n def test_optimizer_mode_both(self):\n \"\"\"\n @optimizer_mode both\n \"\"\"\n pass\n\n def test_optimizer_mode_on(self):\n \"\"\"\n @optimizer_mode on\n \"\"\"\n pass\n\n def test_optimizer_mode_off(self):\n \"\"\"\n @optimizer_mode off\n \"\"\"\n pass\n\nclass SQLTestCaseTests(unittest.TestCase):\n\n def test_infer_metadata(self):\n test_loader = TINCTestLoader()\n test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)\n test_case = None\n for case in test_suite._tests:\n if case.name == \"MockSQLTestCase.test_query02\":\n test_case = case\n self.assertNotEqual(test_case, None)\n self.assertEqual(test_case.name, \"MockSQLTestCase.test_query02\")\n self.assertEqual(test_case.author, 'kumara64')\n self.assertEqual(test_case.description, 'test sql test case')\n self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-05 12:00:00', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-08 12:00:02', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))\n self.assertEqual(test_case.gucs, set(['gp_optimizer=on', 'gp_log_optimizer=on']))\n \n def test_optimizer_mode_from_sql_file(self):\n test_case = MockSQLTestCaseForOptimizerMode('test_query02')\n # sql file query02.sql has overriden optimizer_mode\n self.assertEqual(test_case.optimizer_mode, 'off')\n\n def test_optimizer_mode_from_class(self):\n test_case = MockSQLTestCaseForOptimizerMode('test_query03')\n self.assertEqual(test_case.optimizer_mode, 'on')\n\n def test_optimizer_mode_invalid_value(self):\n with self.assertRaises(SQLTestCaseException) as cm:\n test_case = MockSQLTestCaseInvalidOptimizerMode('test_query01')\n \n def test_direct_instantiation(self):\n test_case = MockSQLTestCase('test_query02')\n self.assertEqual(test_case.name, \"MockSQLTestCase.test_query02\")\n self.assertEqual(test_case.author, 'kumara64')\n self.assertEqual(test_case.description, 'test sql test case')\n self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-05 12:00:00', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-08 12:00:02', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))\n\n def test_explicit_test_fixtures(self):\n test_case = MockSQLTestCase('test_explicit_definition')\n self.assertEqual(test_case.name, \"MockSQLTestCase.test_explicit_definition\")\n self.assertEqual(test_case.author, 'balasr3')\n self.assertEqual(test_case.description, 'test case with metadata')\n self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-05 12:00:00', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-05 12:00:02', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.tags, set(['orca', 'hashagg']))\n\n def test_explicit_test_fixtures_through_loading(self):\n test_loader = TINCTestLoader()\n test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)\n # 4 tests for 3 sqls in the directory and 1 explicit test method\n self.assertEqual(test_suite.countTestCases(), 4)\n\n def test_optimizer_mode_both(self):\n test_loader = TINCTestLoader()\n test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseForOptimizerModeBoth)\n for test in test_suite._tests:\n # Data provider should exists for query01 and query03.\n # query02 shouldn't have it, since its optimizer mode is overwritten with value 'off'\n if test.name == \"MockSQLTestCaseForOptimizerModeBoth.test_query01\" or test.name == \"MockSQLTestCaseForOptimizerModeBoth.test_query03\":\n self.assertEqual(test.optimizer_mode, \"both\")\n self.assertEqual(test.data_provider, \"optimizer_handling\")\n else:\n self.assertNotEqual(test.optimizer_mode, \"both\")\n self.assertTrue(test.data_provider is None)\n\n def test_optimizer_mode_hawq(self):\n \"\"\"\n Test whether optimizer_mode both is overriden in hawq to None\n \"\"\"\n test_case = MockSQLTestCaseOptimizerModeHAWQ('test_optimizer_mode_both')\n self.assertIsNone(test_case.optimizer_mode)\n test_case = MockSQLTestCaseOptimizerModeHAWQ('test_optimizer_mode_on')\n self.assertEquals(test_case.optimizer_mode, 'on')\n test_case = MockSQLTestCaseOptimizerModeHAWQ('test_optimizer_mode_off')\n self.assertEquals(test_case.optimizer_mode, 'off')\n \n \nclass MockSQLTestCaseForSkip(SQLTestCase):\n \"\"\"\n \n @description test case to test skip tag\n @created 2012-08-07 12:00:00\n @modified 2012-08-07 12:00:02\n \"\"\"\n\nclass SQLTestCaseSkipTests(unittest.TestCase):\n def test_skip_tag_in_sql_file(self):\n test_case = MockSQLTestCaseForSkip('test_query01')\n self.assertEqual(test_case.name, \"MockSQLTestCaseForSkip.test_query01\")\n self.assertEqual(test_case.skip, 'demonstrating skipping')\n def test_skip_when_tag_in_sql_file(self):\n test_loader = TINCTestLoader()\n test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseForSkip)\n test_case = None\n for case in test_suite._tests:\n if case.name == \"MockSQLTestCaseForSkip.test_query01\":\n test_case = case\n self.assertNotEqual(test_case, None)\n self.assertEqual(test_case.name, \"MockSQLTestCaseForSkip.test_query01\")\n with closing(_WritelnDecorator(StringIO())) as buffer:\n test_result = TINCTextTestResult(buffer, True, 1)\n test_case.run(test_result)\n self.assertEqual(test_result.testsRun, 1)\n self.assertEqual(len(test_result.failures), 0)\n self.assertEqual(len(test_result.skipped), 1)\n self.assertEqual(len(test_result.errors), 0)\n\n@unittest.skip('mock')\nclass MockSQLTestCaseForLoader(SQLTestCase):\n @classmethod\n def setUpClass(cls):\n pass\n\nclass SQLTestLoaderTests(unittest.TestCase):\n def test_load_implicit_python_from_name(self):\n \"\"\"Test loadTestsFromName for a dynamically generated sql test method\"\"\"\n test_loader = TINCTestLoader()\n test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTestCaseForLoader.test_query01')\n test_case = test_suite._tests[0]\n self.assertEqual(test_case.name, \"MockSQLTestCaseForLoader.test_query01\")\n self.assertEqual(test_case.author, 'lammin')\n self.assertEqual(test_case.description, 'test sql test case')\n self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-20 12:00:00', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-20 12:00:02', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))\n\n def test_load_test_from_class_name(self):\n \"\"\"Test loadTestsFromName for a class name\"\"\"\n test_loader = TINCTestLoader()\n test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTestCaseForLoader')\n test_case = None\n for my_test_case in test_suite._tests:\n if my_test_case.name == 'MockSQLTestCaseForLoader.test_query01':\n test_case = my_test_case\n break\n\n self.assertTrue(test_case is not None)\n self.assertEqual(test_case.name, \"MockSQLTestCaseForLoader.test_query01\")\n self.assertEqual(test_case.author, 'lammin')\n self.assertEqual(test_case.description, 'test sql test case')\n self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-20 12:00:00', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-20 12:00:02', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))\n\n def test_load_test_from_class_name_with_supplementary_sqls(self):\n \"\"\"Test loadTestsFromName for a class name\"\"\"\n test_loader = TINCTestLoader()\n test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTestCaseForLoader')\n # 3 tests for 3 sql tests in the current directory. \n self.assertEquals(len(test_suite._tests), 3)\n for test_case in test_suite._tests:\n if test_case.name == 'MockSQLTestCaseForLoader.test_query03':\n break\n\n self.assertEqual(test_case.name, \"MockSQLTestCaseForLoader.test_query03\")\n self.assertEqual(test_case.author, 'balasr3')\n self.assertEqual(test_case.description, 'test sql test case sql')\n self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-20 12:00:00', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-20 12:00:02', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))\n\nclass SQLTemplateTests(unittest.TestCase):\n def test_templates_regular_sql(self):\n \"\"\"Test loadTestsFromName for a dynamically generated sql test method.\"\"\"\n test_loader = TINCTestLoader()\n test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTemplateTestCaseRegular.test_query01')\n test_case = test_suite._tests[0]\n # Non-template test case should work as is...\n self.assertEqual(test_case.name, \"MockSQLTemplateTestCaseRegular.test_query01\")\n self.assertEqual(test_case.author, 'lammin')\n self.assertEqual(test_case.description, 'test sql test case')\n self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-20 12:00:00', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-20 12:00:02', '%Y-%m-%d %H:%M:%S'))\n self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))\n \n def test_templates_template_sql_file(self):\n \"\"\"Test loadTestsFromName for a dynamically generated sql template test method.\"\"\"\n test_loader = TINCTestLoader()\n test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTemplateTestCaseExplicit.test_template_query04')\n test_case = test_suite._tests[0]\n # Template test case should work as if it is non-template test case...\n self.assertEqual(test_case.name, \"MockSQLTemplateTestCaseExplicit.test_template_query04\")\n self.assertEqual(test_case.author, 'shahn17')\n self.assertEqual(test_case.description, 'template test case')\n\n sql_file_path = os.path.join(test_case.get_out_dir(), \"MockSQLTemplateTestCaseExplicit\", \"template_query04.sql\")\n ans_file_path = os.path.join(test_case.get_out_dir(), \"MockSQLTemplateTestCaseExplicit\", \"template_query04.ans\")\n original_sql_file_path = os.path.join(os.path.dirname(sys.modules[test_case.__class__.__module__].__file__), test_case.__class__.sql_dir, test_case.__class__.template_dir, \"query04.sql\")\n original_ans_file_path = os.path.join(os.path.dirname(sys.modules[test_case.__class__.__module__].__file__), test_case.__class__.ans_dir, test_case.__class__.template_dir, \"query04.ans\")\n self.assertEqual(test_case.sql_file, sql_file_path)\n self.assertEqual(test_case.ans_file, ans_file_path)\n self.assertEqual(test_case._original_sql_file, original_sql_file_path)\n self.assertEqual(test_case._original_ans_file, original_ans_file_path)\n self.assertTrue(os.path.exists(test_case.sql_file))\n self.assertTrue(os.path.exists(test_case.ans_file))\n self.assertTrue(os.path.exists(test_case._original_sql_file))\n self.assertTrue(os.path.exists(test_case._original_ans_file))\n # Cleanup\n dir_path = os.path.join(test_case.get_out_dir(), \"MockSQLTemplateTestCaseExplicit\")\n self.assertTrue(os.path.exists(dir_path))\n shutil.rmtree(dir_path)\n \n def test_templates_all_files(self):\n \"\"\"Test loadTestsFromName for a class name\"\"\"\n test_loader = TINCTestLoader()\n test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTemplateTestCase')\n # 5 tests for 3 sql files in the current directory, and 2 sql files in the template directory\n self.assertEquals(len(test_suite._tests), 5)\n for test_case in test_suite._tests:\n if test_case.name == 'MockSQLTemplateTestCase.test_template_query04':\n break\n \n self.assertEqual(test_case.name, \"MockSQLTemplateTestCase.test_template_query04\")\n self.assertEqual(test_case.author, 'shahn17')\n self.assertEqual(test_case.description, 'template test case')\n \n sql_file_path = os.path.join(test_case.get_out_dir(), \"MockSQLTemplateTestCase\", \"template_query04.sql\")\n ans_file_path = os.path.join(test_case.get_out_dir(), \"MockSQLTemplateTestCase\", \"template_query04.ans\")\n original_sql_file_path = os.path.join(os.path.dirname(sys.modules[test_case.__class__.__module__].__file__), test_case.__class__.sql_dir, test_case.__class__.template_dir, \"query04.sql\")\n original_ans_file_path = os.path.join(os.path.dirname(sys.modules[test_case.__class__.__module__].__file__), test_case.__class__.ans_dir, test_case.__class__.template_dir, \"query04.ans\")\n self.assertEqual(test_case.sql_file, sql_file_path)\n self.assertEqual(test_case.ans_file, ans_file_path)\n self.assertEqual(test_case._original_sql_file, original_sql_file_path)\n self.assertEqual(test_case._original_ans_file, original_ans_file_path)\n self.assertTrue(os.path.exists(test_case.sql_file))\n self.assertTrue(os.path.exists(test_case.ans_file))\n self.assertTrue(os.path.exists(test_case._original_sql_file))\n self.assertTrue(os.path.exists(test_case._original_ans_file))\n \n # Template test case sql file should exists\n sql_file_path = os.path.join(test_case.get_out_dir(), \"MockSQLTemplateTestCase\", \"template_query04.sql\")\n self.assertTrue(os.path.exists(sql_file_path))\n sql_file_data = None\n with open(sql_file_path, 'r') as sql_file_object:\n sql_file_data = sql_file_object.read()\n self.assertTrue(sql_file_data is not None)\n # Correct substitution\n self.assertTrue('my_percent' in sql_file_data)\n # Error in python code\n self.assertTrue('my_at@' in sql_file_data)\n # Error in sql template\n self.assertTrue('&' in sql_file_data)\n\n # Template test case ans file should exists\n ans_file_path = os.path.join(test_case.get_out_dir(), \"MockSQLTemplateTestCase\", \"template_query05.ans\")\n self.assertTrue(os.path.exists(ans_file_path))\n ans_file_data = None\n with open(ans_file_path, 'r') as sql_file_object:\n ans_file_data = sql_file_object.read()\n self.assertTrue(ans_file_data is not None)\n # Correct substitution\n self.assertTrue('my_percent' in ans_file_data)\n # Error in python code\n self.assertTrue('my_at@' in ans_file_data)\n # Error in ans template\n self.assertTrue('&' in ans_file_data)\n\n # Cleanup\n dir_path = os.path.join(test_case.get_out_dir(), \"MockSQLTemplateTestCase\")\n self.assertTrue(os.path.exists(dir_path))\n shutil.rmtree(dir_path)\n\n@unittest.skip('mock')\nclass MockTINCTestCaseForLoaderDiscovery(SQLTestCase):\n def test_lacking_product_version(self):\n \"\"\"\n \n @maintainer balasr3\n @description test stuff\n @created 2012-07-05 12:00:00\n @modified 2012-07-05 12:00:02\n @tags storage\n \"\"\"\n pass\n def test_containing_product_version(self):\n \"\"\"\n \n @maintainer balasr3\n @description test stuff\n @created 2012-07-05 12:00:00\n @modified 2012-07-05 12:00:02\n @tags storage\n @product_version gpdb: 4.2\n \"\"\"\n pass\n def test_main_product_version(self):\n \"\"\"\n \n @maintainer balasr3\n @description test stuff\n @created 2012-07-05 12:00:00\n @modified 2012-07-05 12:00:02\n @tags storage\n @product_version gpdb: main\n \"\"\"\n pass\n\n def test_containing_product_version_exclusive_range(self):\n \"\"\"\n \n @maintainer balasr3\n @description test stuff\n @created 2012-07-05 12:00:00\n @modified 2012-07-05 12:00:02\n @tags storage\n @product_version gpdb: (4.1.0.0-main)\n \"\"\"\n pass\n\n def test_containing_product_version_inclusive_range(self):\n \"\"\"\n \n @maintainer balasr3\n @description test stuff\n @created 2012-07-05 12:00:00\n @modified 2012-07-05 12:00:02\n @tags storage\n @product_version gpdb: [4.2.0.0-main]\n \"\"\"\n pass\n\n\nclass TINCTestLoaderDiscoveryTests(unittest.TestCase):\n def test_matching_author(self):\n test_case = MockTINCTestCaseForLoaderDiscovery('test_lacking_product_version')\n self.assertTrue(test_case.match_metadata(\"author\", \"pedroc\"))\n self.assertFalse(test_case.match_metadata(\"author\", \"kumara64\"))\n def test_matching_maintainer(self):\n test_case = MockTINCTestCaseForLoaderDiscovery('test_lacking_product_version')\n self.assertTrue(test_case.match_metadata(\"maintainer\", \"balasr3\"))\n self.assertFalse(test_case.match_metadata(\"maintainer\", \"kumara64\"))\n def test_matching_tags(self):\n test_case = MockTINCTestCaseForLoaderDiscovery('test_lacking_product_version')\n self.assertTrue(test_case.match_metadata(\"tags\", \"storage\"))\n self.assertFalse(test_case.match_metadata(\"tags\", \"text_analytics\"))\n","sub_path":"src/test/tinc/tincrepo/mpp/models/test/sql_related/test_sql_test_case.py","file_name":"test_sql_test_case.py","file_ext":"py","file_size_in_byte":21608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"3691771","text":"n = int(input(\"Please enter a positive integer: \"))\n\ntry:\n\tif n < 0:\n\t\traise ValueError()\n\tsum = 0\n\tfor i in range(n+1):\n\t\tsum += i\n\tprint(\"The sum is: {}\".format(sum))\nexcept ValueError:\n\tprint(\"This integer is not positive!\")\n","sub_path":"tag-2/aufgaben/aufgabe-1-1.py","file_name":"aufgabe-1-1.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"311860930","text":"# coding=utf-8\n# Copyright 2021-present, the Recognai S.L. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pydantic import BaseSettings\nfrom rubrix import DEFAULT_API_KEY\n\n\nclass Settings(BaseSettings):\n\n \"\"\"\n Attributes\n ----------\n\n secret_key:\n The secret key used for signed the token data\n\n algorithm:\n Encryption algorithm for token data\n\n token_expiration_in_minutes:\n The session token expiration in minutes. Default=30000\n\n \"\"\"\n\n secret_key: str = \"secret\"\n algorithm: str = \"HS256\"\n token_expiration_in_minutes: int = 30000\n token_api_url: str = \"/api/security/token\"\n\n default_apikey: str = DEFAULT_API_KEY\n default_password: str = (\n \"$2y$12$MPcRR71ByqgSI8AaqgxrMeSdrD4BcxDIdYkr.ePQoKz7wsGK7SAca\" # 1234\n )\n users_db_file: str = \".users.yml\"\n\n class Config:\n env_prefix = \"RUBRIX_LOCAL_AUTH_\"\n\n fields = {\n \"secret_key\": {\"env\": [\"SECRET_KEY\", f\"{env_prefix}SECRET_KEY\"]},\n \"token_expiration_in_minutes\": {\n \"env\": [\n \"TOKEN_EXPIRATION_IN_MINUTES\",\n f\"{env_prefix}TOKEN_EXPIRATION_IN_MINUTES\",\n ]\n },\n }\n\n\nsettings = Settings()\n","sub_path":"src/rubrix/server/security/auth_provider/local/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"94034209","text":"from scipy.interpolate import interp1d\nimport numpy as np\n\nclass Model(object):\n def __init__(self):\n pass\n def output(self):\n pass\n def step(self):\n pass\n \nclass FirstOrder(Model):\n \"\"\" \n This is First order process, described by differential equation dydt = (ku-y)/tau. In a first order process , the rate of change is directly proportional to the driving force with the proportionality constant being 1/tau. The driving force is (ku - y) . As the 'y' gets closer to 'ku' and driving force keeps on decreasing finally leading to zero , and the process reaches the steady state. \n \"\"\" \n def __init__(self,k,tau,dt):\n self.k = k\n self.tau = tau\n self.dt = dt\n \n def sim(self,u):\n y = 0\n ys = []\n ts = range(len(u))\n uf= interp1d(ts,u)\n for t in ts:\n\n if (t-self.dt) < 0:\n dydt=0\n else:\n dydt = (self.k*uf(t-self.dt) - y)/self.tau \n\n y += dydt\n ys.append(y)\n y_arr = np.array(ys)\n return y_arr \n \n def step(self): # does not work.\n \"\"\"\n By default, a step signal , with length equal to 5 times the time contant is used to generate the output\n \"\"\"\n step_signal = np.zeros(int(5*self.tau))\n step_signal[1:] = 1\n step_sim = self.sim(step_signal)\n return step_sim\n\n\n\nclass Ramp(Model):\n \"\"\" \n This is First order Ramp process, y = ku t where the ramp gain follows a first order process. \n For example , flow vs level is a ramp process. When you inrease a flow set point , the flow PV itself may follow a first order proess ,\n so a term dynamic gain is used (dyn_k). This dyn_k reaches the steady state value \"k\". The \"first order\" dynamics of the gain is not obviously visible in the step response or simulation . However , this is clearly visible in the impulse response. \n \"\"\" \n def __init__(self,u,k,tau,dt):\n self.k =k\n self.tau = tau\n self.dt = dt\n\n def sim(self,u):\n y = 0\n dyn_k = 0 \n ys = []\n ts = range(len(u))\n uf= interp1d(ts,u)\n for t in ts:\n\n if (t-self.dt) < 0:\n dkdt = 0\n else:\n dkdt = (self.k*uf(t-self.dt) - dyn_k )/self.tau\n dyn_k += dkdt\n\n y += dyn_k\n ys.append(y)\n y_arr = np.array(ys)\n return y_arr\n\nclass SecondOrder(Model):\n \"\"\"\n This is a second order process described differential equation tau2^2 d2ydt2 + tau1 dydt = (ku -y)\n This model is used to define the systems with underdamped , critically damped, overdamped systems. To define systems with inverese response,and overshoot response use secondorder2 \n \"\"\" \n def __init__(self,k,tau1,tau2,dt):\n self.k = k\n self.tau1 = tau1\n self.tau2 = dt\n self.dt = dt\n \n def sim(self,u):\n y = 0\n dy2dt2 = 0\n dydt = 0\n ys = []\n ts = range(len(u))\n u_int = interp1d(ts,u)\n for t in ts :\n if (t-self.dt ) < 0:\n dy2dt2 = 0\n dydt = 0\n else:\n dy2dt2 = (self.k*u_int(t-self.dt) - y - self.tau1*dydt) /(self.tau2*self.tau2) \n\n dydt = dydt + dy2dt2\n\n y += dydt\n ys.append(y)\n y_arr = np.array(ys)\n return y_arr\n\nclass SecondOrder2(Model):\n \"\"\"\n This is a second order process described by two parallel processs described by two differential equations\n dy1dt1 = (k1u -y1 ) /tau1\n dy2dt2 = (k2u - y2) /tau2\n y = dy1dt1 + dy2dt2\n This allows to define inverse response systems , when k1 and k2 are in opposite directions and tau1 is very short,\n compared to tau2.\n Eg: In columns with material balance control scheme , where the accumulator level is controlled by manipulating the reflux,\n the relationship between bottom temperatre setpoint and the top product quality follow the inverse response. As the bottom temperature increased , the vapor carries more heavier content.So the impurity in distillate increases first. However , as the accumulator level increases due increased vapor traffic, this increase the reflux flow. As the sharpness of separation increases , the impurity level decreases and reaches a new steady state value lower than intitial value. \n This can be also used to define the overshoot response system, if both k1 and k2 are in same direction. \n \"\"\" \n def __init__(self,k1,k2,tau1,tau2,dt):\n \n self.k1 = k1\n self.k2 = k2\n self.tau1 = tau1\n self.tau2 = tau2\n self.dt = dt\n self.k = self.k1 + self.k2\n \n def sim(self,u):\n \n y1 = 0\n y2 = 0\n dy1dt =0\n dy2dt = 0\n dydt = 0\n ys = []\n ts = range(len(u))\n u_int = interp1d(ts,u)\n for t in ts :\n if (t-self.dt ) < 0:\n dy1dt = 0\n dy2dt =0\n dydt = 0\n else:\n dy1dt = (self.k1*u_int(t-self.dt) - y1) /self.tau1 \n y1 += dy1dt\n dy2dt = (self.k2*u_int(t-self.dt) - y2) /self.tau2\n y2 += dy2dt\n y += (dy1dt+dy2dt)\n ys.append(y)\n y_arr = np.array(ys)\n return y_arr \n","sub_path":"models/.ipynb_checkpoints/models-checkpoint.py","file_name":"models-checkpoint.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"512878500","text":"\"\"\"Monte Carlo of inserting stuff inside of a sphere.\"\"\"\nfrom enum import Enum\nfrom numba import jit\nimport numpy as np\nimport math\nfrom .. import plot as wplot\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport seaborn as sns\n\nSMALL_NUM = 10e-8\n\n# @jit(nopython=True)\ndef is_inside(x, xr, r):\n \"\"\"is the sphere of radius xr totally inside the confining sphere of radius\n r centered at the origin?\n ||x|| + xr < r ?\"\"\"\n return x[0]*x[0] + x[1]*x[1] + x[2]*x[2] < (r - xr)*(r - xr)\n\n# @jit(nopython=True)\ndef norm3_squared(x):\n return x[0]*x[0] + x[1]*x[1] + x[2]*x[2]\n\n# @jit(nopython=True)\ndef weeks_chandler_anderson(r, sigma, epsilon=1):\n \"\"\"WCA Interaction Potential (a rescaled \"repulsive part of lennard jones\"\n potential) with epsilon defaulting to 1, so that you can fix beta*epsilon\n elsewhere in the simulation.\"\"\"\n # r = x2 - x1\n r2 = norm3_squared(r)\n if r2 < SMALL_NUM:\n return np.inf\n sigma2 = sigma*sigma\n if math.pow(2, 1/3)*sigma2 < r2:\n return 0\n sigr = sigma2/r2\n sigr = sigr*sigr*sigr\n return 4*epsilon*(sigr*(sigr - 1) + 1/4)\n\n# # turns out this can be made into a clever weeks_chandler_anderson call\n# @jit(nopython=True)\n# def confined_in_sphere(x, r_x, r_conf, epsilon=1):\n# \"\"\"E_confinement = E_WCA(r_conf - ||x||).\"\"\"\n# r = r_conf - np.linalg.norm(x)\n\ndef num_spheres_from_density(target_density, sphere_radii, confinement_radius):\n # num_spheres*sphere_volume = target_density*confinement_volume\n sphere_volume = volume_of_sphere(sphere_radii)\n confinement_volume = volume_of_sphere(confinement_radius)\n num_spheres = math.floor(target_density*confinement_volume/sphere_volume)\n return num_spheres\n\ndef initial_locations(num_spheres, sphere_radii, confinement_radius):\n \"\"\"Get uniformly random initial locations for Metropolis Algorithms to\n start at.\"\"\"\n num_successful = 0\n sphere_centers = np.zeros((num_spheres, 3))\n while num_successful < num_spheres:\n new_pos = 2*confinement_radius*(np.random.random_sample((3,)) - 1/2)\n if is_inside(new_pos, sphere_radii, confinement_radius):\n sphere_centers[num_successful, :] = new_pos\n num_successful += 1\n return sphere_centers\n\n# @jit(nopython=True)\ndef volume_of_sphere(radius):\n return (4/3)*math.pi*(radius**3)\n\ndef single_wca_energy(sphere_pos, sphere_idx, sphere_centers, num_spheres, sphere_radii,\n confinement_radius=1, energy_outside=np.inf):\n \"\"\"Return contribution to energy of a particle at position sphere_pos if it\n replaced the particle with index sphere_idx in the list of particles\n sphere_centers corresponding to spheres of radius sphere_radii inside of a\n confining sphere of radius confinement_radius=1 centered at the origin.\n Distance from confinement has an energetic contribution equal to being that\n same distance from another sphere.\n\n Use energy_outside=np.inf to set the energy associated with being totally\n outside the confinement. Set energy_outside to None to honor the CWA\n potential symmetrically outside the sphere.\"\"\"\n energy = 0\n dist_to_bdry = confinement_radius - np.linalg.norm(sphere_pos)\n if dist_to_bdry < 0 and energy_outside is not None:\n energy += energy_outside\n else:\n # vector with correct magnitude to pass to weeks_chandler_anderson\n vec_to_bdry = np.array([dist_to_bdry, 0, 0])\n energy += weeks_chandler_anderson(vec_to_bdry, 2*sphere_radii)\n for sj in range(num_spheres):\n if sj == sphere_idx:\n continue\n energy += weeks_chandler_anderson(sphere_pos - sphere_centers[sj,:], 2*sphere_radii)\n return energy\n\n# @jit(nopython=True)\ndef total_wca_energy(sphere_centers, num_spheres, sphere_radii,\n confinement_radius=1, energy_outside=np.inf):\n \"\"\"Return total energy of particles in sphere_centers corresponding to\n spheres of radius sphere_radii inside of a confining sphere of radius\n confinement_radius=1 centered at the origin. Distance from confinement has an energetic\n contribution equal to being that same distance from another sphere.\n\n Use energy_outside=np.inf to set the energy associated with being totally\n outside the confinement. Set energy_outside to None to honor the CWA\n potential symmetrically outside the sphere.\"\"\"\n energy = 0\n for si in range(num_spheres):\n # confinement energy\n dist_to_bdry = confinement_radius - np.linalg.norm(sphere_centers[si,:])\n if dist_to_bdry < 0 and energy_outside is not None:\n energy += energy_outside\n else:\n # vector with correct magnitude to pass to weeks_chandler_anderson\n vec_to_bdry = np.array([dist_to_bdry, 0, 0])\n energy += weeks_chandler_anderson(vec_to_bdry, 2*sphere_radii)\n # interaction energy\n for sj in range(si):\n energy += weeks_chandler_anderson(sphere_centers[si,:] - sphere_centers[sj,:], 2*sphere_radii)\n return energy\n\ndef norm_step_mc(num_steps, sphere_centers, num_spheres, sphere_radii, confinement_radius=1,\n step_size=None, beta_epsilon=0.665, energy_outside=np.inf):\n \"\"\"Peform num_steps monte carlo steps on the set of spheres with\n sphere_centers, sphere_radii in a confinement centered at the origin of\n radius confinement_radius. At each step, move one bead by a gaussian amount\n with std dev step_size(default->sphere_radii). The beads are assumed to\n have a weeks_chandler_anderson potential between them with sigma=sphere_radii, and\n epsilon and the temperature are determined by beta_epsilon. The confinement\n sphere is also weeks_chandler_anderson.\"\"\"\n # default step size to be on average the size of the bead\n step_size = sphere_radii if step_size is None else step_size\n tot_energy_change = 0\n for i in range(num_steps):\n si = np.random.randint(num_spheres)\n # new positions energy calculation, and exit early if possible\n new_pos = sphere_centers[si,:] + step_size*np.random.standard_normal((3,))\n new_dist_to_bdry = confinement_radius - np.linalg.norm(new_pos)\n if new_dist_to_bdry < 0: # then you're outisde the bdry\n continue\n new_potential = single_wca_energy(new_pos, si, sphere_centers, num_spheres, sphere_radii, confinement_radius, energy_outside)\n old_potential = single_wca_energy(sphere_centers[si,:], si, sphere_centers, num_spheres, sphere_radii, confinement_radius, energy_outside)\n pot_diff = new_potential - old_potential\n # MH acceptance rule, most short-circuitable form\n if pot_diff > 0 or np.log(np.random.rand()) >= -beta_epsilon*pot_diff:\n continue\n # if we get here, the monte carlo step is accepted.\n sphere_centers[si,:] = new_pos\n tot_energy_change += pot_diff\n return sphere_centers, tot_energy_change\n\ndef sphere_dispersal_mc(num_steps, target_density, sphere_radii, confinement_radius=1,\n steps_per_check=1000, step_size=None,\n beta_epsilon=0.665, initial_centers=None):\n \"\"\"Perform MCMC for num_steps after uniform position initialization of\n spheres with effective hard repulsive size sphere_radii (accomplished by\n weeks_chandler_anderson potential with barker-henderson mean collision\n diameter set to equal sigma) at target_density inside of a sphere with size\n confinement_radius.\n\n For now, beta_epsilon fixed to 0.665, per what Tom did in his thesis.\"\"\"\n num_spheres = num_spheres_from_density(target_density, sphere_radii, confinement_radius)\n if initial_centers is None:\n sphere_centers = initial_locations(num_spheres, sphere_radii, confinement_radius)\n else:\n sphere_centers = initial_centers\n # break run into shorter sprints of 1000 steps, report energy change after\n # each 1000 steps\n num_checks = math.floor(num_steps/steps_per_check)\n energy = total_wca_energy(sphere_centers, num_spheres, sphere_radii, confinement_radius)\n for i in range(num_checks):\n # run MC, reports energy change\n sphere_centers, d_energy = norm_step_mc(num_steps=steps_per_check,\n sphere_centers=sphere_centers, num_spheres=num_spheres,\n sphere_radii=sphere_radii, confinement_radius=confinement_radius,\n step_size=step_size, beta_epsilon=beta_epsilon)\n print(energy + d_energy)\n energy = total_wca_energy(sphere_centers, num_spheres, sphere_radii, confinement_radius)\n print(energy)\n return sphere_centers\n\ndef mc_minimize_energy(target_density, sphere_radii, confinement_radius=1,\n steps_per_check=10000, step_size=None, beta_epsilon=0.665,\n initial_centers=None):\n \"\"\"Perform MCMC after uniform position initialization of\n spheres with effective hard repulsive size sphere_radii (accomplished by\n weeks_chandler_anderson potential with barker-henderson mean collision\n diameter set to equal sigma) at target_density inside of a sphere with size\n confinement_radius. Rerun in batches of steps_per_check MC steps until the\n energies converge. To check if energy has converged, we do the lazy thing:\n wait till it increases for the first time then just run one more MC batch\n after that.\n\n For now, beta_epsilon fixed to 0.665, per what Tom did in his thesis.\"\"\"\n num_spheres = num_spheres_from_density(target_density, sphere_radii, confinement_radius)\n if initial_centers is None:\n sphere_centers = initial_locations(num_spheres, sphere_radii, confinement_radius)\n else:\n sphere_centers = initial_centers\n energy = total_wca_energy(sphere_centers, num_spheres, sphere_radii, confinement_radius)\n i = 0\n while True:\n print(\"Total Energy after batch {i}: {energy: =10.8g}\".format(i=i, energy=energy))\n sphere_centers, d_energy = norm_step_mc(num_steps=steps_per_check,\n sphere_centers=sphere_centers, num_spheres=num_spheres,\n sphere_radii=sphere_radii, confinement_radius=confinement_radius,\n step_size=step_size, beta_epsilon=beta_epsilon)\n i += 1\n energy += d_energy\n if d_energy > 0:\n print(\"Total Energy after batch {i}: {energy: =10.8g}\".format(i=i, energy=energy))\n sphere_centers, d_energy = norm_step_mc(num_steps=steps_per_check,\n sphere_centers=sphere_centers, num_spheres=num_spheres,\n sphere_radii=sphere_radii, confinement_radius=confinement_radius,\n step_size=step_size, beta_epsilon=beta_epsilon)\n i += 1\n energy += d_energy\n print(\"Total Energy after batch {i}: {energy: =10.8g}\".format(i=i, energy=energy))\n break\n return sphere_centers\n\ndef plot_spheres(sphere_centers, radii, **kwargs):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n palette = sns.color_palette('hls', 12)\n for i,c in enumerate(sphere_centers):\n color = palette[i % len(palette)]\n wplot.draw_sphere(c, radii, colors=color, axes=ax, **kwargs)\n return ax\n","sub_path":"wlcsim/mc/sphere_insertion.py","file_name":"sphere_insertion.py","file_ext":"py","file_size_in_byte":11199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"343886204","text":"import os\nimport cv2\nimport csv\nimport time\nimport subprocess\nimport shutil\nimport pandas as pd\n\ndirct = os.getcwd() #r'/home/soumyad/mlproj/sdd\nsdd = os.path.join(os.path.split(dirct)[0], \"stanford_dd_yolo\")\nimg_dir = os.path.join(sdd, \"images\")\nlabel_dir = os.path.join(sdd, \"labels\")\n_list_dir = [sdd, img_dir, label_dir]\n\ntest_csv = os.path.join(sdd, \"test.csv\")\ntest_csv_cols = []\n\nfor dirs in _list_dir:\n try:\n os.mkdir(dirs)\n except OSError as error:\n print(error)\n \ntotal_labels = 0\nstart = time.time()\nno_of_vid = 0\nstart = time.time()\n#vid_data_dir = ''\nfor subdir, dirs, files in os.walk(dirct):\n '''print('subdir', subdir)\n print('dirs:', dirs)\n print('files:', files)'''\n \n for file in files:\n if file.endswith('.mov'):\n #no_of_vid+=1\n f = os.path.join(subdir, file)\n vid_name = f.split('/')[-2]\n #if os.path.isfile(f):\n print('\\n')\n print('current subdirectory: ', subdir)\n #print('count:', c)\n fr_path = os.path.join(subdir, 'frames')\n try:\n os.mkdir(fr_path)\n except OSError as error:\n print(error)\n \n print('Video file: ',f)\n print('Frame dir path: ',fr_path)\n print('\\n')\n \n '''query = \"ffmpeg -i \" + f + \" -qscale:v 2 -crf 18 \" + fr_path + \"/\" + vid_name + \"_%d.jpg\"\n print(query)\n response = subprocess.Popen(query, shell=True, stdout=subprocess.PIPE).stdout.read()\n s = str(response).encode('utf-8')'''\n print(vid_name+\" is Done...\")\n print('current subdirectory: ', subdir)\n \n label_path = os.path.join(dirct, 'labels', vid_name, \"annotations.txt\")\n df_labels = pd.read_csv(label_path, names=['id', 'left', 'top', 'right', 'bottom', 'frames','a','b','c','class'], sep=' ') #we are not concerned with some columns e.g. a,b,c\n df_labels.sort_values(['frames'], axis=0, ascending=True, inplace=True)\n selected = pd.DataFrame(df_labels, columns = ['left', 'top', 'right', 'bottom', 'frames','class'])\n print(selected)\n \n frame_list = []\n frame = 0\n for x, row in selected.iterrows():\n frame = int(row['frames'])\n \n if frame%89 ==0:\n \n fr_name = vid_name+'_'+str(frame+1)+'.jpg'\n frame_path = os.path.join(fr_path, fr_name)\n img = cv2.imread(frame_path)\n frame_txt = fr_name.split('.')[0] +'.txt'\n labels_per_frame = os.path.join(label_dir, frame_txt)\n \n rows = selected.loc[selected['frames'] == frame]\n each_line=[]\n if frame not in frame_list:\n frame_list.append(frame)\n for i, obj in rows.iterrows():\n class_lbl = 0\n left = int(obj['left'])\n top = int(obj['top'])\n right = int(obj['right'])\n bottom= int(obj['bottom'])\n _class= obj['class']\n if _class =='Pedestrian':\n class_lbl=1\n elif _class == 'Biker':\n class_lbl=2\n elif _class == 'Skater':\n class_lbl=3\n elif _class == 'Cart':\n class_lbl=4\n elif _class == 'Car':\n class_lbl=5\n elif _class == 'Bus':\n class_lbl=6\n total_labels+=1\n x_norm = (left+((right-left)/2))/img.shape[1]\n y_norm = (top+((bottom-top)/2))/img.shape[0]\n width_norm = (right-left)/(img.shape[1])\n height_norm = (bottom-top)/(img.shape[0])\n _each_line = [class_lbl, x_norm, y_norm, width_norm, height_norm]\n each_line.append(_each_line)\n \n with open(labels_per_frame, 'w') as file1:\n for data in each_line:\n for _data in data:\n file1.writelines(\"%s \" %_data)\n file1.writelines('\\n')\n file1.close()\n for _frame in frame_list:\n fr_name1 = vid_name+'_'+str(_frame+1)\n shutil.copy(os.path.join(fr_path, fr_name1+'.jpg'), img_dir)\n print(fr_name1)\n test_csv_cols.append({'image': fr_name1+'.jpg',\n 'label': fr_name1+'.txt'})\n print(_each_line)\n print(left, right, top, bottom)\n print('height: {}, width: {}'.format(img.shape[0], img.shape[1]))\n \n print(frame_list)\n print('Last frame:{}'.format(frame))\n print('Done')\nprint(f'total {total_labels} annotations')\nheader = ['image', 'label']\nwith open(test_csv, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=header)\n writer.writeheader()\n writer.writerows(test_csv_cols)\ntime.sleep(1)\nend = time.time()\nprint(f\"Time taken: {(end-start)/60} minutes\")\n","sub_path":"data_preparation/get_test_data.py","file_name":"get_test_data.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"411686017","text":"# coding=utf-8\nimport pymysql\nimport time\n\ndef create_conn():\n host = \"183.131.202.146\"\n user = \"education\"\n password = \"education\"\n db = \"testeducation\"\n conn = pymysql.connect(host=host, user=user, password=password, db=db)\n return conn\n\n\ndef clear_database(conn):\n for table in ['`teacher_question`', '`testpaper_question`', '`student_testpaper_question`']:\n with conn.cursor() as cursor:\n try:\n sql = \"SELECT * FROM %s WHERE testpaper_id like '%%1111%%'\" % table\n print(sql)\n cursor.execute(sql)\n res = cursor.fetchall()\n print(type(res))\n print(res)\n if res:\n sql = \"DELETE FROM %s WHERE testpaper_id like '%%1111%%'\" % table\n cursor.execute(sql)\n except Exception as e:\n conn.rollback()\n raise e\n conn.commit()\n\n\ndef create_done_exercise_list(conn):\n clear_database(conn)\n # print(db)\n sql1 = \"INSERT INTO `testeducation`.`teacher_question`\" \\\n \"(`testpaper_id`, `class_id`, `teacher_id`, `student_id`, `testpaper_type`, `testpaper_name`, `create_time`, \" \\\n \"`creater_id`, `item_id`, `save_flag`, `source_paper_id`, `id`) \" \\\n \"VALUES ('1111-1111-1111-1111-%s', NULL, '010101001001', '0101010011404023', 1,\" \\\n \" '陈翰韬强化训练-1111-1111-%s', '%s', '010102001001', NULL, 1, '0101020016001',\" \\\n \"'1111-1111-1111-1111-%s')\"\n\n sql2 = \"INSERT INTO `testeducation`.`testpaper_question`(`testpaper_id`, `question_id`, `point_id`, `rate`) \" \\\n \"VALUES ('1111-1111-1111-1111-%s', '0101020015001021', 'Ae01', 0.35135135135135137)\"\n sql3 = \"INSERT INTO `testeducation`.`student_testpaper_question`\" \\\n \"(`student_id`, `testpaper_id`, `question_id`, `student_question_subtract`, `student_question_score`,\" \\\n \" `student_question_score_rate`, `student_question_answer`, `student_question_corret`,\" \\\n \" `student_question_answer_point`, `student_question_point_correct`) \" \\\n \"VALUES ('0101010011404023', '1111-1111-1111-1111-%s',\" \\\n \" '0101020015001021', NULL, NULL, NULL, 'test', 0, NULL, NULL) \"\n\n create_time = time.strftime(\"%Y-%m-%d %X\", time.localtime())\n for i in range(10):\n print(i)\n try:\n with conn.cursor() as cursor:\n cursor.execute(sql1 % (i, i, create_time, i))\n cursor.execute(sql2 % i)\n cursor.execute(sql3 % i)\n conn.commit()\n except Exception as e:\n print(e)\n\n\ndef remove_conn(conn):\n conn.close()\n\n\ndef setup():\n print(\"Preparing for testting...\")\n conn = create_conn()\n create_done_exercise_list(conn)\n remove_conn(conn)\n\n\ndef teardown():\n print(\"Cleaning...\")\n conn = create_conn()\n clear_database(conn)\n remove_conn(conn)\n\n\ndef compare_result(list1, list2):\n flag = 1\n for a in list1:\n if a in list2:\n flag = 0\n break\n return flag\n\n\ndef judge_finished_testpaper(conn, testpaper_id, student_id ):\n sql1 = (\"SELECT count(a.question_id) FROM `testpaper_question` a, question b\" \\\n \" WHERE a.testpaper_id='%s' \" \\\n \"AND a.question_id=b.question_id AND b.question_type_chinese IN\" \\\n \" ('选择题', '填空题', '判断题', '计算题') and b.question_abnormal_reason is null\" % testpaper_id)\n# print(sql1)\n sql2 = (\"SELECT count(question_id) FROM `student_testpaper_question` \" \\\n \"where testpaper_id='%s' and student_id='%s'\" % (testpaper_id, student_id))\n# print(sql2)\n with conn.cursor() as cursor:\n cursor.execute(sql1)\n question_num = cursor.fetchone()\n# print(question_num)\n cursor.execute(sql2)\n question_finished_num = cursor.fetchone()\n# print(question_finished_num)\n if question_num == question_finished_num:\n finish_flag = 1\n# print(\"finished\")\n else:\n finish_flag = 0\n# print(\"unfishished\")\n return finish_flag\n\n\ndef count_finished_testpaper(student_id):\n sql = \"SELECT testpaper_id FROM `teacher_question` WHERE student_id='%s' % student_id\"\n conn = create_conn()\n with conn.cursor() as cursor:\n cursor.execute(sql)\n res = cursor.fetchall()\n# print(res)\n testpaper_count = 0\n for testpaper_id in res:\n finished_flag = judge_finished_testpaper(conn, testpaper_id[0], student_id)\n if finished_flag:\n print(testpaper_id[0])\n testpaper_count = testpaper_count + 1\n conn.close()\n return testpaper_count\n\n\n\n\nif __name__ == \"__main__\":\n# testpaper_id = '010102001001-11303033-20190117-145315-21'\n student_id = '0101010011404023'\n# judge_finished_testpaper(testpaper_id, student_id)\n res = count_finished_testpaper(student_id)\n print(res)","sub_path":"test/utils/Make_finished_excercise_list.py","file_name":"Make_finished_excercise_list.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"43280373","text":"from .settings import *\n\nDEBUG = True\n\nALLOWED_HOSTS = []\n\nROOT_URLCONF = 'mysite.urls'\n\nINSTALLED_APPS += [\n 'debug_toolbar',\n]\n\nPOSTGRES_HOST = os.environ.setdefault('POSTGRES_HOST', 'localhost')\nPOSTGRES_PORT = os.environ.setdefault('POSTGRES_PORT', '54320')\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'softwareArcihtechDB',\n 'USER': 'db_user',\n 'PASSWORD': 'password',\n 'HOST': POSTGRES_HOST,\n 'PORT': POSTGRES_PORT,\n }\n}\n\nMIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nDEBUG_TOOLBAR_CONFIG = {\n 'JQUERY_URL': '',\n}","sub_path":"mysite/settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"172095564","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nmydata=pd.read_csv(\"C://Users//Richa Sharma//Desktop//pythonall//diabetes.csv\")\r\nlist=[]\r\ns=[]\r\nfor i in range(len(mydata)):\r\n\tc=0\r\n\tsum=0\r\n\tif mydata['Pregnancies'][i] not in list:\r\n\t\tlist.append(mydata['Pregnancies'][i])\r\n\t\tfor j in range(len(mydata)):\r\n\t\t\tif mydata['Pregnancies'][i]==mydata['Pregnancies'][j]:\r\n\t\t\t\tsum=sum+mydata['DiabetesPedigreeFunction'][j]\r\n\t\t\t\tc=c+1\r\n\t\tprint(sum/c)\r\n\t\ts.append(sum/c)\r\nprint(list)\r\nprint(s)\r\nplt.plot(list,s)\r\nplt.show()","sub_path":"diab.py","file_name":"diab.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"356509590","text":"import time\nimport os\nimport RPi.GPIO as GPIO\nimport Adafruit_GPIO.I2C as I2C\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\nSCL = 3\nSDA = 5\nPOWER_CONTROL = 19 # should be 21 but didn't include level conversion for p chan\nI2C_CONTROL = 40\nGPIO.setup(POWER_CONTROL, GPIO.OUT)\nGPIO.setup(I2C_CONTROL, GPIO.OUT)\ni2c_power = False\n\nleft_addr = 0x50\nright_addr = 0x51\n\n# these values from the messages the huble flasher prints\noffset = 0 + 4\nlength = 2240 - 4\n\ndef get_version_and_cksum(addr):\n print(\"-\" * 50)\n print(\"requesting info from %x\" % addr)\n\n \"\"\"\n turn off the power - have to unload the i2c module and then make the scl and sda pins low\n left side of the GP had about 2.2v on the vbus line, right side 1.2v. \n left side would never start bootloader\n \"\"\"\n GPIO.output(POWER_CONTROL, False)\n GPIO.output(I2C_CONTROL, False)\n if i2c_power:\n os.system(\"sudo rmmod i2c-bcm2835\")\n GPIO.setup(SCL, GPIO.OUT)\n GPIO.setup(SDA, GPIO.OUT)\n GPIO.output(SCL, False)\n GPIO.output(SDA, False)\n os.system(\"sudo modprobe i2c-bcm2835\")\n i2c = I2C.get_i2c_device(address=addr, busnum=1)\n time.sleep(0.1)\n\n # turn on power\n GPIO.output(POWER_CONTROL, True)\n GPIO.output(I2C_CONTROL, True)\n time.sleep(0.100)\n\n TWI_BOOTLOADER_VERSION = 0x06\n \"\"\" \n this is a hack because pi doesn't support clock stretching\n the attiny holds sda low until it has finished calculating the checksum (about 20ms)\n\n so, have to catch the exception, wait for the calculation to finish then ask for the result.\n \"\"\"\n try:\n print(\"offset: %x %x, length: %x %x\" % (offset & 0xFF, offset >> 8, length & 0xFF, length >> 8))\n i2c.writeList(TWI_BOOTLOADER_VERSION, [offset & 0xFF, offset >> 8, length & 0xFF, length >> 8])\n except IOError as e:\n print(\"i2c timeout (expected due to cksum calc), waiting 50 ms\")\n time.sleep(0.05)\n try:\n version, cksum_low, cksum_hi = i2c.readList(0, 3)\n # version should be 1\n print(\"version = %d\" % version)\n print(\"cksum = %d\" % (cksum_low + (cksum_hi << 8)))\n except IOError as e:\n print(\"no result\")\n\nget_version_and_cksum(right_addr)\nget_version_and_cksum(left_addr)\n","sub_path":"attiny-bootloader/check_bootloader_version.py","file_name":"check_bootloader_version.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"176251361","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..core.models import StrictTimestamp\nfrom ..session.models import Session\n\n\nclass SessionMaterial(StrictTimestamp):\n title = models.CharField(blank=False, max_length=30)\n session_id = models.ForeignKey(Session, on_delete=models.DO_NOTHING, blank=False)\n session_material = models.FileField(blank=False)\n session_material_description = models.TextField(blank=False)\n\n\n class Meta:\n verbose_name = _('Session Material')\n verbose_name_plural = _('Session Materials')\n\n def __str__(self):\n return \"\".format(self.title)","sub_path":"app/session_material/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"90457449","text":"#find_open_slots.py\n\nimport google_api_call as google #Availble time slot for each individual participant\nimport functions as func\t\t\t#Helper functions\nimport datetime\nimport sys\n\nclass MeetingAvailability:\n\tdef __init__(self, meetingMatrix, participantData, numOfWindowSlots, timeWindowSlots):\n\t\tself.meetingMatrix = meetingMatrix\n\t\tself.participants = participantData\n\t\tself.numOfWindowSlots = numOfWindowSlots\n\t\tself.timeWindowSlots = timeWindowSlots\n\n\tdef getMeetingMatrix(self):\n\t\treturn self.meetingMatrix\n\n\tdef getParticipants(self):\n\t\treturn self.participants\n\n\tdef getNumOfWindowSlots(self):\n\t\treturn self.numOfWindowSlots\n\n\tdef getTimeWindowSlots(self):\n\t\treturn self.timeWindowSlots\n\n\ndef createMeetingMatrix():\t#args are for test suite only\n\t#print \"Test Arg length: \", len(sys.argv)\n\t#print \"Test Args: \", str(sys.argv)\n\tif len(sys.argv) >= 4:\t#run program with test data\n\t\tstartTimeWindow = str(sys.argv[1])\n\t\tendTimeWindow = str(sys.argv[2])\n\t\ttimeWindowData = google.getTimeWindowData(startTimeWindow, endTimeWindow) #get timeWindow w/ test data\n\n\t\t#Create list of test emails from command line\n\t\temailList = list()\n\t\tif len(sys.argv)>= 4:\n\t\t\tfor x in range(3, len(sys.argv)):\t\n\t\t\t\temailList.append(sys.argv[x])\t\n\t\tparticipantData = google.getParticipantData(timeWindowData, emailList)\n\telse:\t#run program with user data\n\t\ttimeWindowData = google.getTimeWindowData(None, None) #get time window data from Google API call\n\n\t\t#Get participants data from Google API call\n\t\tparticipantData = google.getParticipantData(timeWindowData, None)\n\n\t# parseGoogleTime returns class object for getday - getYear\n\t#Parse start and end times of timeWindow provided by Actor and store in object GoogleTime\n\ttimeWindowStart = func.parseGoogleTime(timeWindowData[0])\n\ttimeWindowEnd = func.parseGoogleTime(timeWindowData[1])\n\t\n\t#Calculate the number of 30 minute slots in timeWindow\n\tminutesRange = func.hoursRange(timeWindowStart, timeWindowEnd)\n\t\n\t#Create Actor's timeWindow slots for meeting times in Posix format\n\ttimeWindowSlots = func.createTimeSlots(timeWindowStart, timeWindowEnd, 30)\n\tnumOfWindowSlots = len(timeWindowSlots)\n\tnumOfParticipants = len(participantData)\n\n\t#Create meeting matrix for each timeWindowSlot and participant\n\tmeetingMatrix = list()\t#binary matrix displaying availability for all participants\n\tfor i in range(0, numOfParticipants):\t\n\t\tmeetingMatrix.append('Null')\t\t\t#initiate matrix to null\n\t\n\tidxActor = 0\n\tidxEvent = 0\n\tfor idxPartpnt, elePartpntm in enumerate(participantData):\t\t#Each participant\n\t\ttimeSlots = list()\t#temp storage for each participant's time slot\n\t\t#print '----------------------------'\n\t\t#print elePartpntm.getName()\n\t\tnumOfParticipantSlots = len(participantData[idxPartpnt].getBusyTimeSlot())\n\t\t#print 'numOfParticipantSlots = ', numOfParticipantSlots\n\t\tif participantData[idxPartpnt].getBusyTimeSlot()[0] is None: #participant available during all timeWindow\n\t\t\tfor x in range(0, numOfWindowSlots):\t\t#Actor's desired meeting slots\n\t\t\t\ttimeSlots.append(0)\t\t\t\t\t\t#participant available during time slot\t\n\t\t\tmeetingMatrix[idxPartpnt] = timeSlots\n\n\t\telse:\t#participant has scheduled events during time window\n\t\t\twhile(idxActor < numOfWindowSlots):\t\t#Actor's desired meeting slots\n\t\t\t\twindowSlot = timeWindowSlots[idxActor]\n\t\t\t\tparticipantSlot = participantData[idxPartpnt].getBusyTimeSlot()\n\t\n\t\t\t\t#Compare participant's time slot to Actor's desired meeting slots\n\t\t\t\t#print 'idxEvent = ', idxEvent\n\t\t\t\t#print 'idxActor = ', idxActor\n\t\t\t\tif(idxEvent < numOfParticipantSlots):\t\t#Participant time slot\n\t\t\t\t\tparticipantPosix = func.timeStrToPosix(participantSlot[idxEvent])\n\t\t\t\t\t#print func.posixToPST(participantPosix)\n\t\t\t\t\t#print func.posixToPST(windowSlot)\n\t\n\t\t\t\t\tif(participantPosix == windowSlot):\n\t\t\t\t\t\t#print 'equal'\n\t\t\t\t\t\ttimeSlots.append(1)\t\t#participant not available in meeting slot\n\t\t\t\t\t\tidxActor += 1\t\t\t#next Actor's meeting slot\n\t\t\t\t\t\tidxEvent += 1\t\t\t#next participant's time slot\n\t\t\t\t\t\t#continue\t\n\t\n\t\t\t\t\tif(participantPosix < windowSlot):\n\t\t\t\t\t\t#print 'less'\n\t\t\t\t\t\ttimeSlots.append(0)\t\t#participant available in meeting slot\n\t\t\t\t\t\tidxEvent += 1\t\t\t#next participant's time slot\n\t\t\t\t\t\t#continue\t\n\t\n\t\t\t\t\tif(participantPosix > windowSlot):\n\t\t\t\t\t\t#print 'more'\n\t\t\t\t\t\ttimeSlots.append(0)\t\t#participant available in meeting slot\n\t\t\t\t\t\tidxActor += 1\t\t\t#next Actor's meeting slot\n\t\t\t\t\t\t#continue\t\n\t\n\t\t\t\t#Go to next participant if all participant time slots is checked\n\t\t\t\tif(idxEvent == numOfParticipantSlots or idxActor == numOfWindowSlots):\n\t\t\t\t\t#Set remainding meetingMatrix to 0, indicating participant is available\n\t\t\t\t\tfor j in range(idxActor, numOfWindowSlots):\n\t\t\t\t\t\ttimeSlots.append(0)\t\t#mark remainder as unvailable\n\t\n\t\t\t\t\tidxActor = 0\t#reset iterator to first Actor's meeting slot\n\t\t\t\t\tidxEvent = 0\t#reset iterator to first time slot for next participant\n\t\t\t\t\t\n\t\t\t\t\t#Store participant availability in meeting matrix\n\t\t\t\t\tmeetingMatrix[idxPartpnt] = timeSlots\n\t\t\t\n\t\t\t\t\t#print meetingMatrix\n\t\t\t\t\tbreak\t\t#got to next participant\n\n\n\t#store meetingMatrix in MeetingAvailability object\n\tmeetingAvailability = MeetingAvailability(meetingMatrix, participantData, numOfWindowSlots, timeWindowSlots)\n\t#print 'Fun starts here'\n\t#print meetingAvailability.getMeetingMatrix()\n\t#print meetingAvailability.getParticipants()\n\t#print meetingAvailability.getNumOfWindowSlots()\n\t#print meetingAvailability.getTimeWindowSlots()\n\treturn meetingAvailability\n\t#return meetingMatrix\n","sub_path":"find_open_slots.py","file_name":"find_open_slots.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"29775566","text":"import cv2 as cv\r\nimport numpy as np\r\n\r\n#############################\r\nwidthimg = 360\r\nheightimg = 540\r\n############################\r\n\r\n#cam feed\r\ncap = cv.VideoCapture(0)\r\ncap.set(3, widthimg) #changing width\r\ncap.set(4, heightimg) #changing hight\r\ncap.set(10, 100) #changing brightness\r\n\r\ndef preprocessing(img):\r\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\r\n blur = cv.GaussianBlur(gray, (5, 5), 1)\r\n cannyedge = cv.Canny(blur, 200, 200)\r\n kernel = np.ones((3, 3))\r\n dailation = cv.dilate(cannyedge, kernel, iterations = 2)\r\n result = cv.erode(dailation, kernel, iterations = 1)\r\n\r\n return result\r\n\r\n\r\n# function to get contours\r\ndef getContours(img, imgcount):\r\n biggest = np.array([])\r\n maxArea = 0\r\n contours, hierarchy = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\r\n for cnt in contours:\r\n area = cv.contourArea(cnt)\r\n if area > 10:\r\n #cv.drawContours(imgcount, cnt, -1, (0, 255, 0), 2)\r\n peri = cv.arcLength(cnt, True)\r\n approx = cv.approxPolyDP(cnt, 0.02 * peri, True)\r\n if area > maxArea and len(approx)==4:\r\n biggest = approx\r\n maxArea = area\r\n cv.drawContours(imgcount, biggest, -1, (0, 255, 0), 20)\r\n return biggest\r\n\r\ndef reorder(mypoints):\r\n #mypoints current shape is (4,1,2)need to change it to (4, 2)\r\n mypoints = mypoints.reshape((4, 2))\r\n mypointsresult = np.zeros((4, 1, 2), np.int32)\r\n add = mypoints.sum(1)\r\n mypointsresult[0] = mypoints[np.argmin(add)]\r\n mypointsresult[3] = mypoints[np.argmax(add)]\r\n# print(add)\r\n\r\n diff = np.diff(mypoints, axis = 1)\r\n mypointsresult[1] = mypoints[np.argmin(diff)]\r\n mypointsresult[2] = mypoints[np.argmax(diff)]\r\n # print(diff)\r\n # print(mypointsresult)\r\n return mypointsresult\r\n\r\n\r\n#get warp prospective\r\ndef getWarp(img, biggest):\r\n biggest = reorder(biggest)\r\n pt1 = np.float32(biggest)\r\n pt2 = np.float32([[0, 0], [widthimg, 0], [0, heightimg], [widthimg, heightimg]])\r\n matrix = cv.getPerspectiveTransform(pt1, pt2)\r\n imgout = cv.warpPerspective(img, matrix, (widthimg, heightimg))\r\n #removing pixcels from sides, use only if needed\r\n #imgout = imgout[20:imgout.shape[0]-20, 20:imgout.shape[1]-20]\r\n #imgout = cv.resize(imgout, (widthimg, heightimg))\r\n return imgout\r\n\r\n#main loop\r\nwhile True:\r\n isTrue, frame = cap.read()\r\n cv.resize(frame, (widthimg, heightimg))\r\n imgcount = frame.copy()\r\n result = preprocessing(frame)\r\n biggest = getContours(result, imgcount)\r\n if biggest.size != 0:\r\n #print(biggest)\r\n warped = getWarp(frame, biggest)\r\n cv.imshow('frame', frame)\r\n cv.imshow('imgcount', imgcount)\r\n cv.imshow('warped', warped)\r\n else:\r\n cv.imshow('frame', frame)\r\n cv.imshow('imgcount', imgcount)\r\n if cv.waitKey(20) & 0xFF == ord('q'):\r\n break\r\ncap.release()\r\ncv.destroyAllWindows()","sub_path":"Document Scanner/DocumentScanner.py","file_name":"DocumentScanner.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"409723448","text":"\"\"\"Tests of the climate entity of the balboa integration.\"\"\"\nfrom __future__ import annotations\n\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\n\nfrom homeassistant.components.climate import (\n ATTR_FAN_MODE,\n ATTR_HVAC_ACTION,\n ATTR_HVAC_MODES,\n ATTR_MAX_TEMP,\n ATTR_MIN_TEMP,\n ATTR_PRESET_MODE,\n ATTR_PRESET_MODES,\n FAN_HIGH,\n FAN_LOW,\n FAN_MEDIUM,\n FAN_OFF,\n ClimateEntityFeature,\n HVACAction,\n HVACMode,\n)\nfrom homeassistant.const import ATTR_TEMPERATURE, TEMP_FAHRENHEIT\nfrom homeassistant.core import HomeAssistant\n\nfrom . import init_integration\n\nfrom tests.components.climate import common\n\nFAN_SETTINGS = [\n FAN_OFF,\n FAN_LOW,\n FAN_MEDIUM,\n FAN_HIGH,\n]\n\nHVAC_SETTINGS = [\n HVACMode.HEAT,\n HVACMode.OFF,\n HVACMode.AUTO,\n]\n\nENTITY_CLIMATE = \"climate.fakespa_climate\"\n\n\nasync def test_spa_defaults(hass: HomeAssistant, client: MagicMock) -> None:\n \"\"\"Test supported features flags.\"\"\"\n await init_integration(hass)\n\n state = hass.states.get(ENTITY_CLIMATE)\n\n assert state\n assert (\n state.attributes[\"supported_features\"]\n == ClimateEntityFeature.TARGET_TEMPERATURE | ClimateEntityFeature.PRESET_MODE\n )\n assert state.state == HVACMode.HEAT\n assert state.attributes[ATTR_MIN_TEMP] == 10.0\n assert state.attributes[ATTR_MAX_TEMP] == 40.0\n assert state.attributes[ATTR_PRESET_MODE] == \"Ready\"\n assert state.attributes[ATTR_HVAC_ACTION] == HVACAction.IDLE\n\n\nasync def test_spa_defaults_fake_tscale(hass: HomeAssistant, client: MagicMock) -> None:\n \"\"\"Test supported features flags.\"\"\"\n client.get_tempscale.return_value = 1\n\n await init_integration(hass)\n\n state = hass.states.get(ENTITY_CLIMATE)\n\n assert state\n assert (\n state.attributes[\"supported_features\"]\n == ClimateEntityFeature.TARGET_TEMPERATURE | ClimateEntityFeature.PRESET_MODE\n )\n assert state.state == HVACMode.HEAT\n assert state.attributes[ATTR_MIN_TEMP] == 10.0\n assert state.attributes[ATTR_MAX_TEMP] == 40.0\n assert state.attributes[ATTR_PRESET_MODE] == \"Ready\"\n assert state.attributes[ATTR_HVAC_ACTION] == HVACAction.IDLE\n\n\nasync def test_spa_with_blower(hass: HomeAssistant, client: MagicMock) -> None:\n \"\"\"Test supported features flags.\"\"\"\n client.have_blower.return_value = True\n\n config_entry = await init_integration(hass)\n\n # force a refresh\n await client.new_data_cb()\n await hass.async_block_till_done()\n\n state = hass.states.get(ENTITY_CLIMATE)\n\n assert state\n assert (\n state.attributes[\"supported_features\"]\n == ClimateEntityFeature.TARGET_TEMPERATURE\n | ClimateEntityFeature.PRESET_MODE\n | ClimateEntityFeature.FAN_MODE\n )\n\n for fan_state in range(4):\n # set blower\n state = await _patch_blower(hass, config_entry, fan_state, client)\n assert state\n assert state.attributes[ATTR_FAN_MODE] == FAN_SETTINGS[fan_state]\n\n # test the nonsense checks\n for fan_state in (None, 70): # type: ignore[assignment]\n state = await _patch_blower(hass, config_entry, fan_state, client)\n assert state\n assert state.attributes[ATTR_FAN_MODE] == FAN_OFF\n\n\nasync def test_spa_temperature(hass: HomeAssistant, client: MagicMock) -> None:\n \"\"\"Test spa temperature settings.\"\"\"\n\n config_entry = await init_integration(hass)\n\n # flip the spa into F\n # set temp to a valid number\n state = await _patch_spa_settemp(hass, config_entry, 0, 100.0, client)\n assert state\n assert state.attributes.get(ATTR_TEMPERATURE) == 38.0\n\n\nasync def test_spa_temperature_unit(hass: HomeAssistant, client: MagicMock) -> None:\n \"\"\"Test temperature unit conversions.\"\"\"\n\n with patch.object(hass.config.units, \"temperature_unit\", TEMP_FAHRENHEIT):\n config_entry = await init_integration(hass)\n\n state = await _patch_spa_settemp(hass, config_entry, 0, 15.4, client)\n assert state\n assert state.attributes.get(ATTR_TEMPERATURE) == 15.0\n\n\nasync def test_spa_hvac_modes(hass: HomeAssistant, client: MagicMock) -> None:\n \"\"\"Test hvac modes.\"\"\"\n\n config_entry = await init_integration(hass)\n\n # try out the different heat modes\n for heat_mode in range(2):\n state = await _patch_spa_heatmode(hass, config_entry, heat_mode, client)\n assert state\n modes = state.attributes.get(ATTR_HVAC_MODES)\n assert [HVACMode.AUTO, HVACMode.HEAT, HVACMode.OFF] == modes\n assert state.state == HVAC_SETTINGS[heat_mode]\n\n with pytest.raises(ValueError):\n await _patch_spa_heatmode(hass, config_entry, 2, client)\n\n\nasync def test_spa_hvac_action(hass: HomeAssistant, client: MagicMock) -> None:\n \"\"\"Test setting of the HVAC action.\"\"\"\n\n config_entry = await init_integration(hass)\n\n # try out the different heat states\n state = await _patch_spa_heatstate(hass, config_entry, 1, client)\n assert state\n assert state.attributes[ATTR_HVAC_ACTION] == HVACAction.HEATING\n\n state = await _patch_spa_heatstate(hass, config_entry, 0, client)\n assert state.attributes[ATTR_HVAC_ACTION] == HVACAction.IDLE\n\n\nasync def test_spa_preset_modes(hass: HomeAssistant, client: MagicMock) -> None:\n \"\"\"Test the various preset modes.\"\"\"\n\n await init_integration(hass)\n\n state = hass.states.get(ENTITY_CLIMATE)\n assert state\n modes = state.attributes.get(ATTR_PRESET_MODES)\n assert [\"Ready\", \"Rest\", \"Ready in Rest\"] == modes\n\n # Put it in Ready and Rest\n modelist = [\"Ready\", \"Rest\"]\n for mode in modelist:\n client.heatmode = modelist.index(mode)\n await common.async_set_preset_mode(hass, mode, ENTITY_CLIMATE)\n await client.new_data_cb()\n await hass.async_block_till_done()\n\n state = hass.states.get(ENTITY_CLIMATE)\n assert state\n assert state.attributes[ATTR_PRESET_MODE] == mode\n\n # put it in RNR and test assertion\n client.heatmode = 2\n\n with pytest.raises(ValueError):\n await common.async_set_preset_mode(hass, 2, ENTITY_CLIMATE)\n\n\n# Helpers\nasync def _patch_blower(hass, config_entry, fan_state, client):\n \"\"\"Patch the blower state.\"\"\"\n client.get_blower.return_value = fan_state\n\n if fan_state is not None and fan_state <= len(FAN_SETTINGS):\n await common.async_set_fan_mode(hass, FAN_SETTINGS[fan_state])\n await client.new_data_cb()\n await hass.async_block_till_done()\n\n return hass.states.get(ENTITY_CLIMATE)\n\n\nasync def _patch_spa_settemp(hass, config_entry, tscale, settemp, client):\n \"\"\"Patch the settemp.\"\"\"\n client.get_tempscale.return_value = tscale\n client.get_settemp.return_value = settemp\n\n await common.async_set_temperature(\n hass, temperature=settemp, entity_id=ENTITY_CLIMATE\n )\n await client.new_data_cb()\n await hass.async_block_till_done()\n\n return hass.states.get(ENTITY_CLIMATE)\n\n\nasync def _patch_spa_heatmode(hass, config_entry, heat_mode, client):\n \"\"\"Patch the heatmode.\"\"\"\n client.heatmode = heat_mode\n\n await common.async_set_hvac_mode(hass, HVAC_SETTINGS[heat_mode], ENTITY_CLIMATE)\n await client.new_data_cb()\n await hass.async_block_till_done()\n\n return hass.states.get(ENTITY_CLIMATE)\n\n\nasync def _patch_spa_heatstate(hass, config_entry, heat_state, client):\n \"\"\"Patch the heatmode.\"\"\"\n client.get_heatstate.return_value = heat_state\n\n await common.async_set_hvac_mode(hass, HVAC_SETTINGS[heat_state], ENTITY_CLIMATE)\n await client.new_data_cb()\n await hass.async_block_till_done()\n\n return hass.states.get(ENTITY_CLIMATE)\n","sub_path":"tests/components/balboa/test_climate.py","file_name":"test_climate.py","file_ext":"py","file_size_in_byte":7560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"358708133","text":"rows = 'ABCDEFGHI'\ncols = '123456789'\n\ndef cross(a, b):\n\treturn [s+t for s in a for t in b]\n\nboxes = cross(rows, cols)\n\nrow_units = [cross(r, cols) for r in rows]\ncolumn_units = [cross(rows, c) for c in cols]\nsquare_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\ndiagonal_units = [[r+c for r,c in zip(rows,cols)], [r+c for r,c in zip(rows,cols[::-1])]]\nunitlist = row_units + column_units + square_units + diagonal_units\nunits = dict((s, [u for u in unitlist if s in u]) for s in boxes)\npeers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)\nassignments = []\n\ndef assign_value(values, box, value):\n\t\"\"\"\n\tAssigns a value to a given box in the values dictionary. If it updates the board record it.\n\tArgs:\n\t\tvalues(dict): a dictionary of the form {'box_name': '123456789', ...}\n\tReturns:\n\t\tthe values dictionary with the updated box value.\n\t\"\"\"\n\n\t# Don't waste memory appending actions that don't actually change any values\n\tif values[box] == value:\n\t\treturn values\n\n\tvalues[box] = value\n\tif len(value) == 1:\n\t\tassignments.append(values.copy())\n\treturn values\n\ndef naked_twins(values):\n\t\"\"\"\n\tEliminate values using the naked twins strategy.\n\tArgs:\n\t\tvalues(dict): a dictionary of the form {'box_name': '123456789', ...}\n\tReturns:\n\t\tthe values dictionary with the naked twins eliminated from peers.\n\t\"\"\"\n\n\t# Find all instances of naked twins in grid.\n\t# Removing a naked twin might create another naked twin\n\t# so we loop the twin detection and removal until there are no more naked twins left.\n\tno_more_twins = False\n\twhile not no_more_twins:\n\t\tboard_before = values\n\t\t# Identify boxes with two numbers in the same unit and check if the numbers are the same\n\t\tpossible_twins = [box for box in values.keys() if len(values[box]) == 2]\n\t\tnaked_twins = []\n\t\tfor box in possible_twins:\n\t\t\tdigit = values[box]\n\t\t\t# Compare box amongst its peers to see if a twin exists\n\t\t\tfor peer in peers[box]:\n\t\t\t\tif digit==values[peer] and peer != box:\n\t\t\t\t\tnaked_twins.append((box,peer))\n\n\t\t# If list is empty there are no naked twins\n\t\tif len(naked_twins) == 0:\n\t\t\treturn values\n\n\t\t# Eliminate the naked twins as possible values for their peers\n\t\tfor a,b in naked_twins:\n\t\t\t# Find shared peers for both boxes\n\t\t\tshared_peers = list(set(peers[a] & peers[b]))\n\t\t\tfor digit in values[a]:\n\t\t\t\t# Iterate through shared peers and remove the twin values\n\t\t\t\tfor peer in shared_peers:\n\t\t\t\t\tif digit in values[peer] and peer != a and peer != b:\n\t\t\t\t\t\tvalues[peer] = values[peer].replace(digit,'')\n\n\t\t# If the value of the board did not change after twin detection then there are no new twins\n\t\tboard_after = values\n\t\tif board_before == board_after:\n\t\t\tno_more_twins = True\n\n\treturn values\n\ndef grid_values(grid):\n\t\"\"\"\n\tConvert grid into a dict of {square: char} with '123456789' for empties.\n\tArgs:\n\t\tgrid(string) - A grid in string form.\n\tReturns:\n\t\tA grid in dictionary form\n\t\t\tKeys: The boxes, e.g., 'A1'\n\t\t\tValues: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.\n\t\"\"\"\n\n\tvalues = []\n\tall_digits = '123456789'\n\tfor c in grid:\n\t\tif c == '.':\n\t\t\tvalues.append(all_digits)\n\t\telif c in all_digits:\n\t\t\tvalues.append(c)\n\tassert len(values) == 81\n\treturn dict(zip(boxes, values))\n\ndef display(values):\n\t\"\"\"\n\tDisplay the values as a 2-D grid.\n\tArgs:\n\t\tvalues(dict): a dictionary of the form {'box_name': '123456789', ...}\n\tReturns:\n\t\tNone.\n\t\"\"\"\n\n\twidth = 1+max(len(values[s]) for s in boxes)\n\tline = '+'.join(['-'*(width*3)]*3)\n\tfor r in rows:\n\t\tprint(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n\t\t\t\t\t for c in cols))\n\t\tif r in 'CF': print(line)\n\treturn\n\ndef eliminate(values):\n\t\"\"\"\n\tEliminate values from peers of each box with a single value.\n\n\tGo through all the boxes, and whenever there is a box with a single value,\n\teliminate this value from the set of values of all its peers.\n\n\tArgs:\n\t\tvalues(dict): a dictionary of the form {'box_name': '123456789', ...}\n\tReturns:\n\t\tthe values dictionary after eliminating values.\n\t\"\"\"\n\n\tsolved_values = [box for box in values.keys() if len(values[box]) == 1]\n\tfor box in solved_values:\n\t\tdigit = values[box]\n\t\tfor peer in peers[box]:\n\t\t\tvalues[peer] = values[peer].replace(digit,'')\n\treturn values\n\ndef only_choice(values):\n\t\"\"\"\n\tFinalize all values that are the only choice for a unit.\n\n\tGo through all the units, and whenever there is a unit with a value\n\tthat only fits in one box, assign the value to this box.\n\tArgs:\n\t\tvalues(dict): a dictionary of the form {'box_name': '123456789', ...}\n\tReturns:\n\t\tthe values dictionary with the only choices filled in.\n\t\"\"\"\n\n\tfor unit in unitlist:\n\t\tfor digit in '123456789':\n\t\t\tdplaces = [box for box in unit if digit in values[box]]\n\t\t\tif len(dplaces) == 1:\n\t\t\t\tvalues[dplaces[0]] = digit\n\treturn values\n\ndef reduce_puzzle(values):\n\t\"\"\"\n\tIterate through the eliminate, only choice, and naked twin strategies.\n\tIf at some point, there is a box with no available values, return False.\n\tIf the sudoku is solved, return the sudoku.\n\tIf after an iteration of both functions, the sudoku remains the same, return the sudoku.\n\tArgs:\n\t\tvalues(dict): a dictionary of the form {'box_name': '123456789', ...}\n\tReturns:\n\t\tthe reduced values dictionary resulting from the eliminations.\n\t\"\"\"\n\tstalled = False\n\twhile not stalled:\n\t\t# Check how many boxes have a determined value\n\t\tsolved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\n\t\t# Use the Eliminate Strategy\n\t\tvalues = eliminate(values)\n\t\t# Use the Only Choice Strategy\n\t\tvalues = only_choice(values)\n\t\t# Use the Naked Twin Strategy\n\t\tvalues = naked_twins(values)\n\t\t# Check how many boxes have a determined value, to compare\n\t\tsolved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\n\t\t# If no new values were added, stop the loop.\n\t\tstalled = solved_values_before == solved_values_after\n\t\t# Sanity check, return False if there is a box with zero available values:\n\t\tif len([box for box in values.keys() if len(values[box]) == 0]):\n\t\t\treturn False\n\treturn values\n\ndef search(values):\n\t\"\"\"\n\tUse depth-first search and propagation.\n\tArgs:\n\t\tvalues(dict): a dictionary of the form {'box_name': '123456789', ...}\n\tReturns:\n\t\tthe values dictionary.\n\t\"\"\"\n\n\t# First, reduce the puzzle using the previous function\n\tvalues = reduce_puzzle(values)\n\tif values is False:\n\t\treturn False ## Failed earlier\n\tif all(len(values[s]) == 1 for s in boxes): \n\t\treturn values ## Solved!\n\t# Choose one of the unfilled squares with the fewest possibilities\n\tn,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n\t# Now use recurrence to solve each one of the resulting sudokus\n\tfor value in values[s]:\n\t\tnew_sudoku = values.copy()\n\t\tnew_sudoku[s] = value\n\t\tattempt = search(new_sudoku)\n\t\tif attempt:\n\t\t\treturn attempt\n\ndef solve(grid):\n\t\"\"\"\n\tFind the solution to a Sudoku grid.\n\tArgs:\n\t\tgrid(string): a string representing a sudoku grid.\n\t\t\tExample: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n\tReturns:\n\t\tThe dictionary representation of the final sudoku grid. False if no solution exists.\n\t\"\"\"\n\t\n\tvalues = grid_values(grid)\n\tvalues = search(values)\n\treturn values\n\n\nif __name__ == '__main__':\n\tdiag_sudoku_grid = '.......41......8....7....3........8.....47..2.......6.7.2........1.....4..6.9.3..'\n\tdisplay(solve(diag_sudoku_grid))\n\n\ttry:\n\t\tfrom visualize import visualize_assignments\n\t\tvisualize_assignments(assignments)\n\n\texcept SystemExit:\n\t\tpass\n\texcept:\n\t\tprint('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"327766961","text":"# -*- coding: utf-8 -*-\nfrom vizdoom import *\nfrom constants import WINDOW_VISIBLE, HAND_MODE\nimport skimage.color, skimage.transform, skimage.io\nimport numpy as np\nimport math\nimport time\n\n#channels x width x height\nresolution = (3, 120, 120)\nframe_repeat = 3\nHISTORY_LENGTH = 4\nAVAILABLE_ACTIONS = [[1, 0, 0, 0, 0, 0, 0, 0, 0],[0, 1, 0, 0, 0, 0, 0, 0, 0],[0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0],[0, 0, 0, 0, 1, 0, 0, 0, 0],[0, 0, 0, 0, 0, 1, 0, 0, 0]]\nACTION_SIZE = len(AVAILABLE_ACTIONS)\n\nbots = 20\n\nclass GameState(object):\n def __init__(self, rand_seed, display=False, no_op_max=7):\n self.game = DoomGame()\n\n # After setting the seed every episode will look the same (if\n # agent will behave deterministicly of course).\n self.game.set_seed(rand_seed)\n\n self.game.load_config(\"map/cig.cfg\")\n self.game.set_doom_map(\"map01\") # Limited deathmatch.\n self.game.add_game_args(\"-host 1 -deathmatch +timelimit 1.0 \"\n \"+sv_forcerespawn 1 +sv_noautoaim 1 +sv_respawnprotect 1 +sv_spawnfarthest 1\")\n self.game.add_game_args(\"+name AI +colorset 0\")\n self.game.set_mode(Mode.PLAYER)\n self.game.add_available_game_variable(GameVariable.POSITION_X)\n self.game.add_available_game_variable(GameVariable.POSITION_Y)\n self.game.add_available_game_variable(GameVariable.HEALTH)\n self.game.add_available_game_variable(GameVariable.SELECTED_WEAPON_AMMO)\n self.game.add_available_game_variable(GameVariable.FRAGCOUNT)\n\n self.last_health = 0\n self.last_ammo = 0\n self.total_frag_count = 0\n self.randseed = rand_seed\n\n self.game.set_window_visible(WINDOW_VISIBLE)\n if HAND_MODE:\n self.game.set_mode(Mode.SPECTATOR)\n self.game.init()\n\n self.game.send_game_command(\"removebots\")\n for i in range(bots):\n self.game.send_game_command(\"addbot\")\n\n\n self.last_ammo = 15\n self.last_health = 100\n self.last_fragcount = 0\n\n self.s_t = [] #frame1(3)-centered1(3)-frame2(3)-centered2(3)-....\n self.ss_t = [] #ammo and health\n\n # skip head\n for i in range(HISTORY_LENGTH):\n indx = np.random.choice(range(len(AVAILABLE_ACTIONS)))\n self.make_action(AVAILABLE_ACTIONS[indx])\n\n # use current frame to update memory of img&var\n def update_frame(self):\n screen = self.game.get_state().screen_buffer\n compressed_screen = skimage.transform.resize(screen, resolution)\n compressed_screen = compressed_screen.astype(np.float32)\n\n centered_screen = screen[:,140:260, 260:380]\n centered_screen = skimage.transform.resize(centered_screen, resolution)\n centered_screen = centered_screen.astype(np.float32)\n\n if len(self.ss_t) == HISTORY_LENGTH:\n self.s_t.pop(0)\n self.s_t.pop(0)\n self.ss_t.pop(0)\n\n # self.s_t: 4*3*h*w\n self.s_t.append(compressed_screen)\n self.s_t.append(centered_screen)\n # factor of 0.01 is multiplied\n self.ss_t.append([self.game.get_game_variable(GameVariable.SELECTED_WEAPON_AMMO)*0.01, self.game.get_game_variable(GameVariable.HEALTH)*0.01])\n\n #update frame is also included\n def make_action(self, action):\n\n old_position = [self.game.get_game_variable(GameVariable.POSITION_X), self.game.get_game_variable(GameVariable.POSITION_Y)]\n\n if HAND_MODE:\n self.game.advance_action()\n else:\n self.game.make_action(action, frame_repeat)\n if not self.game.is_episode_finished():\n self.update_frame()\n else:\n self.game.close()\n self.__init__(self.randseed)\n\n new_position = [self.game.get_game_variable(GameVariable.POSITION_X), self.game.get_game_variable(GameVariable.POSITION_Y)]\n\n reward = -0.008 # living reward\n new_fragcount = self.game.get_game_variable(GameVariable.FRAGCOUNT)\n new_health = max(self.game.get_game_variable(GameVariable.HEALTH), 0)\n new_ammo = self.game.get_game_variable(GameVariable.SELECTED_WEAPON_AMMO)\n\n if new_fragcount - self.last_fragcount > 0:\n reward += 2 # killed someone\n self.total_frag_count += 1\n if self.game.is_player_dead():\n reward += -1 # killed by someone or by self\n self.total_frag_count -= 1\n\n reward += 0.01 * (new_health - self.last_health)\n reward += 0.04 * (new_ammo - self.last_ammo)\n\n reward += 5e-5 * (math.sqrt(math.pow(old_position[0] - new_position[0], 2) + math.pow(old_position[1] - new_position[1], 2)) - 8)\n\n self.last_fragcount = new_fragcount\n self.last_health = new_health\n self.last_ammo = new_ammo\n\n if self.game.is_player_dead():\n self.game.respawn_player()\n self.last_health = 100\n self.last_ammo = 15\n\n return reward\n\n def is_terminal(self):\n return self.game.is_episode_finished()\n\n def get_total_rew(self):\n return self.game.get_game_variable(GameVariable.FRAGCOUNT)\n\n def get_history_chain(self):\n revised_s_t = np.reshape(np.array(self.s_t), [HISTORY_LENGTH * resolution[0] * 2, resolution[1], resolution[2]]) #24xhxw\n revised_ss_t = np.reshape(np.array(self.ss_t), [HISTORY_LENGTH * 2])\n return revised_s_t.transpose((1,2,0)), revised_ss_t\n\n","sub_path":"game_state.py","file_name":"game_state.py","file_ext":"py","file_size_in_byte":5047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"44042425","text":"import socket\nimport errno\nimport os\nimport ipaddress\nfrom ctypes import cdll, c_int, c_char_p\n\nglobal _lib_load_err, _lib\n_lib_load_err = None\n_lib = None\n\nclass MulticastException(Exception):\n pass\n\ndef _pull_error_msgs():\n global _lib_load_err, _lib\n if _lib_load_err is not None:\n raise _lib_load_err\n assert(_lib)\n errcount = _lib.errmsg_count()\n msgs = []\n for i in range(errcount):\n msgs.append(str(_lib.errmsg(i)))\n done_errcount = _lib.errmsg_count()\n if errcount != done_errcount:\n msgs.append('error: libpymcast error count changed (%d to %d) while enumerating' % (errcount, done_errcount))\n if done_errcount > errcount:\n for i in range(errcount, done_errcount):\n msgs.append(str(_lib.errmsg(i)))\n _lib.clear_errors()\n return msgs\n\ndef join_ssm(sock, source, group, if_idx):\n global _lib_load_err, _lib\n if _lib_load_err is not None:\n raise _lib_load_err\n assert(_lib)\n\n src_ip = ipaddress.ip_address(source)\n grp_ip = ipaddress.ip_address(group)\n\n if src_ip.version != grp_ip.version:\n raise MulticastException('Join(S=%s,G=%s) mismatched IP addresses' %\n (src_ip, grp_ip))\n\n if not grp_ip.is_multicast:\n raise MulticastException('Join(S=%s,G=%s) non-multicast group' %\n (src_ip, grp_ip))\n\n result = _lib.join_ssm(sock.fileno(), if_idx, src_ip.version,\n src_ip.packed, grp_ip.packed)\n\n if result != 0:\n raise MulticastException('\\n'.join(_pull_error_msgs()))\n\ndef join_asm(sock, source, group, ifname):\n raise MulticastException(\"join_asm: unsupported\")\n pass\n\ndef _on_load():\n global _lib_load_err, _lib\n tapspath = os.path.abspath(os.path.dirname(__file__))\n libpath = os.path.join(tapspath, 'libpymcast.so')\n _lib_load_err = None\n try:\n _lib = cdll.LoadLibrary(libpath)\n _lib.errmsg.restype = c_char_p\n if _lib.errmsg_count() != 0:\n _lib_load_err = YangException()\n except Exception as ex:\n _lib = None\n _lib_load_err = ex\n\n_on_load()\n","sub_path":"PyTAPS/multicast.py","file_name":"multicast.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"96772068","text":"\"\"\"\ncontent.py\n\nContent database model\n\"\"\"\n\nfrom datetime import datetime\n\nfrom implicit.db import db\n\nDATE_FORMAT = '%Y-%m-%d %H:%M:%S'\n\n\nclass Content(db.Model):\n \"\"\"\n Content database model\n \"\"\"\n __tablename__ = 'content'\n\n id = db.Column(db.Integer, primary_key=True)\n file_name = db.Column(db.String(100))\n file_directory = db.Column(db.String(256))\n source_url = db.Column(db.String(256))\n uploaded_at = db.Column(db.DateTime, default=datetime.now)\n\n contexts = db.relationship('Context', backref='content', lazy='dynamic')\n stimulus = db.relationship('Stimuli', backref='content', lazy='dynamic')\n\n def __init__(self, file_name, file_directory, source_url):\n \"\"\"\n Initialize the model\n\n PARAMS:\n file_name (str): the file name of the content\n file_directory (str): the directory containing the content\n source_url (str): the url to access the content\n \"\"\"\n self.file_name = file_name\n self.file_directory = file_directory\n self.source_url = source_url\n\n def __repr__(self):\n \"\"\"\n Representation of the object\n \"\"\"\n return \"\".format(self.file_name, self.source_url)\n\n @property\n def serialize(self):\n \"\"\"\n Serialize the model\n \"\"\"\n contexts = [context.serialize for context in self.contexts]\n stimulus = [stimuli.serialize for stimuli in self.stimulus]\n return {\n 'id': self.id,\n 'file_name': self.file_name,\n 'file_directory': self.file_directory,\n 'source_url': self.source_url,\n 'uploaded_at': self.uploaded_at,\n 'contexts': contexts,\n 'stimulus': stimulus\n }\n","sub_path":"implicit/db/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"373583541","text":"import os\nimport openai\nfrom dotenv import load_dotenv\nload_dotenv()\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\ndef getgpt(userinput):\n response = openai.Completion.create(\n engine=\"davinci\",\n prompt=userinput.strip(),\n temperature=0.8, # One of the most important settings to control the output of the GPT-3 engine is the temperature. This setting controls the randomness of the generated text. A value of 0 makes the engine deterministic, which means that it will always generate the same output for a given input text.\n max_tokens=1000, \n top_p=1.0,\n frequency_penalty=0.0, # Frequency penalty works by lowering the chances of a word being selected again the more times that word has already been used. Frequency Penalty is a way to prevent word repetitions\n presence_penalty=0.0, # Presence penalty does not consider how frequently a word has been used, but just if the word exists in the text. Presence Penalty is a way to prevent topic repetitions.\n stop=[\"\\n\"]\n )\n\n return response[\"choices\"][0][\"text\"]\n\ndef rewrite_temp_gpt3_without_pandoc(input_user_prompt, input_title):\n # read in html gpt3 page to include new content\n with open('templates/templates/2021/07/11/temp_gpt3.html') as f:\n html_lines = f.readlines()\n\n # remove last 8 lines\n html_lines.pop()\n html_lines.pop()\n html_lines.pop()\n html_lines.pop()\n html_lines.pop()\n html_lines.pop()\n html_lines.pop()\n\n article_body = getgpt(input_user_prompt)\n\n html_lines.append('
\\n')\n html_lines.append('

'+str(input_title.strip())+'

\\n')\n html_lines.append(' 2021 Jul 11 \\n')\n html_lines.append('See all posts\\n')\n html_lines.append('


\\n')\n html_lines.append('

'+str(input_user_prompt.strip()) + \" \" + str(article_body.strip())+'

\\n') # concatenate prompt and response with a single space in between\n html_lines.append('
')\n\n # re-write the new app code\n html_out = open(\"templates/templates/2021/07/11/temp_gpt3.html\", \"w\")\n for line in html_lines:\n html_out.write(line)","sub_path":"non_pandoc.py","file_name":"non_pandoc.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"334354544","text":"import ee\nimport numpy as np\nimport xarray as xr\nimport pysptools.eea as eea\nimport pysptools.abundance_maps as amp\nfrom skimage.morphology import disk\nfrom skimage.filters import threshold_otsu, rank\n\nimport rastersmith as rs\n\nfrom . import geeutils, downscale\n\nclass Sentinel1(object):\n def __init__(self,):\n return\n\n @staticmethod\n def getFloodMap(gr,time_start,time_end,\n canny_threshold=7, # threshold for canny edge detection\n canny_sigma=1, # sigma value for gaussian filter\n canny_lt=7, # lower threshold for canny detection\n smoothing=100, # amount of smoothing in meters\n connected_pixels=200, # maximum size of the neighborhood in pixels\n edge_length=50, # minimum length of edges from canny detection\n smooth_edges=100,\n ):\n\n geom = ee.Geometry.Rectangle([gr.west,gr.south,gr.east,gr.north])\n\n mapResult = geeutils.s1WaterMap(geom,time_start,time_end,canny_threshold,\n canny_sigma,canny_lt,smoothing,\n connected_pixels,edge_length,\n smooth_edges)\n\n return mapResult\n\n\nclass Atms(object):\n def __init__(self):\n return\n\n @staticmethod\n def maskClouds(ds,threshold=-20):\n rain = ds.sel(band='C16').astype(np.float) - ds.sel(band='C1').astype(np.float)\n\n cloudMask = rain > threshold\n\n return ds.raster.updateMask(cloudMask)\n\n @classmethod\n def getWaterFraction(cls,ds,cloudThresh=-20,constrain=True,maskClouds=True):\n\n if maskClouds:\n atmsNoClouds = cls.maskClouds(ds,threshold=cloudThresh)\n else:\n atmsNoClouds = ds.copy()\n\n dBtr = atmsNoClouds.sel(band='C4').astype(np.float) - atmsNoClouds.sel(band='C3').astype(np.float)\n dBtr.coords['band'] = 'dBtr'\n\n channels = xr.concat([atmsNoClouds.sel(band=['C3','C4','C16']).isel(time=0,z=0),\n dBtr.isel(time=0,z=0)],dim='band')\n arr = channels.values\n arr[np.isnan(arr)] = -9999\n\n nClasses = 3\n nfindr = eea.NFINDR()\n U = nfindr.extract(arr, nClasses, maxit=100, normalize=True, ATGP_init=True)\n\n drop = np.argmin(list(map(lambda x:U[x,:].mean(),range(nClasses))))\n waterIdx = np.argmin(list(map(lambda x:np.delete(U,drop,axis=1)[x,:],range(nClasses-2))))\n\n if waterIdx == 0:\n bandList = ['water','land','mask']\n else:\n bandList = ['land','water','mask']\n\n nnls = amp.NNLS()\n amaps = nnls.map(arr, U, normalize=True)\n\n drop = np.argmin(list(map(lambda x:amaps[:,:,x].mean(),range(amaps.shape[2]))))\n\n unmixed = np.delete(amaps,drop,axis=2)\n\n unmixed[unmixed==0] = np.nan\n\n scaled = np.zeros_like(unmixed)\n for i in range(scaled.shape[2]):\n summed = unmixed[:,:,i]/unmixed.sum(axis=2)\n scaled[:,:,i] = (summed - np.nanmin(summed)) / (np.nanmax(summed) - np.nanmin(summed))\n\n scaled = scaled - 0.25\n scaled[scaled<0] = 0\n\n fWater = atmsNoClouds.sel(band=['C1','C2','mask']).copy()\n fWater[:,:,0,:2,0] = scaled[:,:,:]\n fWater.coords['band'] = bandList\n\n return fWater.raster.updateMask(atmsNoClouds.sel(band='mask'))\n\n\n\nclass Landsat8(object):\n def __init__():\n return\n\n\nclass Viirs(object):\n def __init__(self):\n return\n\n @staticmethod\n def getWaterMask(ds,transform=True):\n if transform:\n ds = 1 / (1 + (np.e ** ds))\n\n arr = ds.isel(time=0,z=0,band=0).values\n global_otsu = threshold_otsu(arr[~np.isnan(arr)])\n waterMask = ds >= global_otsu\n\n return waterMask.where(waterMask>0)\n\n\nclass Modis(object):\n def __init__():\n return\n\n\nclass Sentinel2(object):\n def __init__():\n return\n","sub_path":"tethysapp/hydraviewer/hydrafloods/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"294221142","text":"\"\"\"\n\nTest transformers contained in streams.\n\nCopyright (C) 2017 The Pylp Authors.\nThis file is under the MIT License.\n\n\"\"\"\n\nimport pylp\nfrom utils import AsyncTestCase\n\n\nclass UpperTransformer(pylp.Transformer):\n\t\"\"\"A transformer that capitalizes contents.\"\"\"\n\n\tasync def transform(self, file):\n\t\t\"\"\"Function called when a file need to be transformed.\"\"\"\n\t\tfile.contents = file.contents.upper()\n\t\treturn file\n\n\nclass ReverseTransformer(pylp.Transformer):\n\t\"\"\"A transformer that reverses contents.\"\"\"\n\n\tasync def transform(self, file):\n\t\t\"\"\"Function called when a file need to be transformed.\"\"\"\n\t\tfile.contents = file.contents[::-1]\n\t\treturn file\n\n\nclass RecorderTransformer(pylp.Transformer):\n\t\"\"\"A transformer that record files passed inside.\"\"\"\n\n\tdef __init__(self):\n\t\tself.files = []\n\n\tasync def transform(self, file):\n\t\t\"\"\"Function called when a file need to be transformed.\"\"\"\n\t\tself.files.append(file.clone())\n\t\treturn file\n\n\n\nclass TestTransformer(AsyncTestCase):\n\t\"\"\"Test transformers contained in streams.\"\"\"\n\n\tasync def test_transfomer_upper(self):\n\t\t\"\"\"It should capitalize contents.\"\"\"\n\n\t\tstream = pylp.src(\"./fixtures/file.txt\").pipe(UpperTransformer())\n\t\tawait stream.wait_processed()\n\n\t\tself.assertEqual(len(stream.files), 1)\n\t\tfile = stream.files[0]\n\n\t\tself.assertIsInstance(file, pylp.File)\n\t\tself.assertEqual(file.contents, \"THIS IS A TEST FILE.\")\n\n\t\n\tasync def test_transfomer_multiple(self):\n\t\t\"\"\"It should run multiple transformers.\"\"\"\n\n\t\trecorder = RecorderTransformer()\n\t\tstream = pylp.pipes(\n\t\t\tpylp.src(\"./fixtures/file.txt\"),\n\t\t\tUpperTransformer(),\n\t\t\trecorder,\n\t\t\tReverseTransformer()\n\t\t)\n\n\t\tawait stream.wait_processed()\n\n\t\tself.assertEqual(len(stream.files), 1)\n\t\tself.assertEqual(len(recorder.files), 1)\n\n\t\tself.assertEqual(recorder.files[0].contents, \"THIS IS A TEST FILE.\")\n\t\tself.assertEqual(stream.files[0].contents, \".ELIF TSET A SI SIHT\")\n","sub_path":"tests/tests/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"237379449","text":"from matplotlib import pyplot as plt\nfrom shapely.geometry import Point, Polygon\nimport numpy as np\nimport node\nimport geopy.distance as distance\nimport tkinter as tk\nfrom PIL import Image, ImageTk\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom shapely.geometry import Point, Polygon\nimport shapefile\nimport node\nfrom tkinter import filedialog\n\n\nclass MyFirstGUI:\n \n def filesel(self):\n filename = filedialog.askopenfilename(initialdir=\"E:\\TE\\SIH\\Drone-routing-and-scheduling\\shapefiles\",title=\"Select file\",filetypes=((\"shape files\", \"*.shp\"), (\"all files\", \"*.*\")))\n self.newf = filename[39:-1]\n\n def run(self):\n\n drones = 5\n polygon = None\n poly_points = None\n\n def line_intersection(l1, l2):\n dx = (l1[0][0] - l1[1][0], l2[0][0] - l2[1][0])\n dy = (l1[0][1] - l1[1][1], l2[0][1] - l2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(dx, dy)\n if div == 0:\n print('lines do not intersect')\n\n d = (det(*l1), det(*l2))\n x = det(d, dx) / div\n y = det(d, dy) / div\n return x, y\n\n sf = shapefile.Reader(self.newf)\n for shape in list(sf.iterShapes()):\n npoints = len(shape.points) # total points\n nparts = len(shape.parts) # total parts\n polygon = Polygon(shape.points)\n poly_points = shape.points\n if nparts == 1:\n x_lon = np.zeros((len(shape.points), 1))\n y_lat = np.zeros((len(shape.points), 1))\n for ip in range(len(shape.points)):\n x_lon[ip] = shape.points[ip][0]\n y_lat[ip] = shape.points[ip][1]\n plt.plot(x_lon, y_lat)\n\n plt.savefig(\"boundary.png\")\n plt.close()\n\n x_min, y_min, x_max, y_max = polygon.bounds\n print(polygon.length * 20)\n bitmap = []\n row = []\n x_range = np.arange(x_min, x_max, 0.0001)\n y_range = np.arange(y_min, y_max, 0.0001)\n for j in range(0, len(y_range)):\n row = []\n for i in range(0, len(x_range)):\n point = None\n if polygon.contains(Point(x_range[i], y_range[j])):\n plt.scatter(x_range[i], y_range[j], s=.5, c='blue')\n point = node.Node(x_range[i], y_range[j], \"In\")\n else:\n plt.scatter(x_range[i], y_range[j], s=.5, c='red')\n point = node.Node(x_range[i], y_range[j], \"Out\")\n row.append(point)\n bitmap.append(row)\n plt.plot()\n plt.savefig(\"dotcon.png\")\n plt.close()\n\n shape = np.array(bitmap).shape\n\n x_array = []\n y_array = []\n for e, x in enumerate(bitmap):\n row = []\n for y in x:\n if y.get_state() in [\"In\", \"Partial\"]:\n row.append(y)\n if e % 2 != 0:\n row.reverse()\n\n for i in row:\n temp = i.get_points()\n x_array.append(temp[0])\n y_array.append(temp[1])\n index = len(x_array)\n cal = []\n for i in range(0, len(poly_points) - 1):\n result = filter(lambda x: (x <= poly_points[i][1] and x >= poly_points[i + 1][1]) or (\n x >= poly_points[i][1] and x <= poly_points[i + 1][1]), y_range)\n for line in result:\n pt = line_intersection((poly_points[i], poly_points[i + 1]), ((0, line), (x_max, line)))\n cal.append(pt)\n x_array.insert(index, pt[0])\n y_array.insert(index, pt[1])\n\n total_length = 0\n for i in range(0, len(x_array) - 1):\n total_length += distance.vincenty((x_array[i], y_array[i]), ((x_array[i + 1], y_array[i + 1]))).m\n\n part = total_length / drones\n print(part)\n clr = ['red', 'orange', 'cyan', 'green', 'coral']\n\n x_array.reverse()\n y_array.reverse()\n\n i = 0;\n path_x = [x_array[i]]\n path_y = [y_array[i]]\n td = 0\n self.max_time = 0;\n self.info = []\n clr = ['red', 'blue', 'cyan', 'coral', 'orange']\n for j in range(0, drones):\n while td <= part and i < len(x_array) - 1:\n td += distance.vincenty((x_array[i], y_array[i]), ((x_array[i + 1], y_array[i + 1]))).m\n i += 1\n path_x.append(x_array[i])\n path_y.append(y_array[i])\n plt.plot(path_x, path_y, color=clr[j])\n if td/1000 > self.max_time:\n self.max_time = td/1000\n d = dict(drone=j + 1, distance=td, time=td/1000, color=clr[j])\n path_y = [y_array[i]]\n path_x = [x_array[i]]\n self.info.append(d)\n td = 0\n\n plt.savefig(\"final.png\")\n plt.close()\n self.b_img = Image.open(\"boundary.png\")\n self.b_img = self.b_img.resize((500,300), Image.ANTIALIAS)\n self.pic = ImageTk.PhotoImage(self.b_img)\n\n self.lab = tk.Label(image=self.pic)\n self.lab.place(x=50,y=5)\n\n\n self.b_img1 = Image.open(\"dotcon.png\")\n self.b_img1 = self.b_img1.resize((500,300), Image.ANTIALIAS)\n self.pic1 = ImageTk.PhotoImage(self.b_img1)\n\n self.lab1 = tk.Label(image=self.pic1)\n self.lab1.place(x=760,y=5)\n\n for e,d in enumerate(self.info):\n lable = \"Drone : {}, Color : {}, Distance : {}, Time : {}.\".format(d['drone'], d['color'], d['distance'], d['time'])\n tk.Label(self.lab4, text=lable).place(x=10, y=50 * (e+1), )\n\n tk.Label(self.lab4, text=\"Total Time : {}.\".format(\"%.2f\" % self.max_time)).place(x=10, y=10)\n self.lab4.place(x=760, y=330)\n\n \n self.b_img2 = Image.open(\"final.png\")\n self.b_img2 = self.b_img2.resize((500,300), Image.ANTIALIAS)\n self.pic2 = ImageTk.PhotoImage(self.b_img2)\n\n self.lab2 = tk.Label(image=self.pic2)\n self.lab2.place(x=50,y=330)\n\n def __init__(self, master):\n self.master = master\n master.title(\"A simple GUI\")\n self.b_img3 = Image.open(\"drone1.jpg\")\n self.pic3 = ImageTk.PhotoImage(self.b_img3)\n\n self.lab3 = tk.Label(image=self.pic3)\n self.lab3.place(x=0,y=0)\n\n self.lab4 = tk.Frame(master, height=300, width=500)\n\n self.button2 = tk.Button(root, text=\"RUN\", command=self.run, bg=\"Brown\")\n self.button2.place(x=700, y=650)\n\n\n self.button4 = tk.Button(root, text=\"Select File\", command=self.filesel, bg=\"Brown\")\n self.button4.place(x=400, y=650)\n\n self.newf = \"\"\n\n\nroot = tk.Tk()\nroot.geometry('1920x1080+0+0')\nroot.resizable(height=None, width=None)\nroot.title(\"Drone Routing and Planning\")\nmy_gui = MyFirstGUI(root)\nroot.mainloop()\n","sub_path":"hack.py","file_name":"hack.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"443100471","text":"def deleteMiddle(node):\n # Thought the parameters of the problem were that I would not have to deal\n # with the node passed in being the last node so I will handle this case here\n if not node.next:\n return\n\n # Otherwise you can just copy the next node data into the current node\n # Then move the next pointer to the next.next node\n node.data = node.next.data\n node.next = node.next.next\n","sub_path":"Chapter2/problem_3.py","file_name":"problem_3.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"421359248","text":"# PROBLEM 4\n#\n# A palindromic number reads the same both ways. The largest palindrome made \n# from the product of two 2-digit numbers is 9009 = 91 × 99.\n#\n# Find the largest palindrome made from the product of two 3-digit numbers.\n#\n\n# Return true or false depending on whether or not a number is a palindrome\ndef palindrome(num):\n return str(num) == str(num)[::-1]\n\n# Brute force solution to the problem; returns the palindrome\ndef brute_force():\n\n best_solution = 0\n\n for i in range(999,100,-1):\n for j in range(999,100,-1):\n if i*j > best_solution and palindrome(i*j):\n best_solution = i*j\n\n return best_solution\n\nprint(brute_force())\n","sub_path":"004.py","file_name":"004.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"390802243","text":"\"\"\"Train and test methods for transformer qa.\n\nAuthor:\n Chris Chute (chute@stanford.edu)\n Jeffrey Shen\n\"\"\"\n\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torch.cuda.amp as amp\n\nfrom collections import OrderedDict\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\nfrom ujson import load as json_load\nfrom os.path import join\n\nfrom models import RoBERTa\nfrom datasets.bpe_squad import SQuAD\nfrom preprocess.bpe import BPE\nimport eval\nimport trainer.trainer as base_trainer\nimport trainer.util as util\nimport trainer.stats as stats\nimport models.transformer as T\nimport trainer.scheduler as sched\n\n\ndef add_special_tokens(args):\n args.ignore_idx = -1\n args.padding_idx = 0\n args.cls_idx = 1\n args.sep_idx = 2\n args.mask_idx = 3\n\n\ndef get_args(args):\n # Compute derived args values\n device, args.gpu_ids = util.get_available_devices()\n\n args.batch_size_per_gpu = args.batch_size\n args.batch_size *= max(1, len(args.gpu_ids))\n return args, device\n\n\ndef get_num_steps(args):\n args.num_steps = args.epoch_size // args.batch_size // args.gradient_accumulation\n if args.num_epochs >= 0:\n args.num_steps *= args.num_epochs\n\n if args.decay_forever:\n args.num_steps = float(\"inf\")\n\n return args.num_steps\n\n\ndef get_bpe(args):\n bpe = BPE()\n with open(args.bpe_file, \"r\") as file:\n bpe.load_state_dict(json_load(file))\n add_special_tokens(args)\n return bpe\n\n\ndef get_dataset(args, file, shuffle, randomize):\n # Don't need to supply special idxs, since they're the same.\n dataset = SQuAD(\n file,\n block_size=args.max_positions,\n use_v2=args.use_squad_v2,\n )\n loader = data.DataLoader(\n dataset,\n batch_size=args.batch_size,\n shuffle=shuffle,\n num_workers=args.num_workers,\n collate_fn=dataset.get_sliding_window_collate(\n args.context_window_stride, randomize\n ),\n )\n return dataset, loader\n\n\ndef get_model(args, bpe):\n model = RoBERTa(\n dim=args.dim,\n n_heads=args.n_heads,\n ff_dim=args.ff_dim,\n activation=args.activation,\n dropout=args.dropout,\n attn_dropout=args.attn_dropout,\n act_dropout=args.act_dropout,\n n_layers=args.n_layers,\n max_positions=args.max_positions,\n max_tokens=len(bpe),\n padding_idx=args.padding_idx,\n ignore_idx=None,\n prenorm=args.prenorm,\n qa=True,\n )\n return model\n\n\ndef train(args):\n trainer = base_trainer.Trainer()\n args, device = get_args(args)\n args, log, tbx = trainer.setup(args)\n\n # Get BPE\n log.info(\"Loading BPE...\")\n bpe = get_bpe(args)\n log.info(\"Loaded {} BPE tokens\".format(len(bpe)))\n\n # Get data loader\n log.info(\"Building dataset...\")\n train_dataset, train_loader = get_dataset(args, args.train_record_file, True, True)\n dev_dataset, dev_loader = get_dataset(args, args.dev_record_file, False, True)\n args.epoch_size = len(train_dataset)\n log.info(\"Train has {} examples\".format(args.epoch_size))\n\n # Get model\n log.info(\"Building model...\")\n model = get_model(args, bpe)\n model = trainer.setup_model(model, device)\n\n # Get optimizer, scheduler, and scaler\n optimizer = optim.AdamW(\n model.parameters(),\n args.lr,\n betas=(args.beta_1, args.beta_2),\n eps=args.eps,\n weight_decay=args.l2_wd,\n )\n\n get_num_steps(args)\n log.info(\"Scheduler will decay over {} steps\".format(args.num_steps))\n scheduler = sched.get_linear_warmup_power_decay_scheduler(\n optimizer, args.warmup_steps, args.num_steps, power=args.power_decay\n )\n\n scaler = amp.GradScaler()\n optimizer, scheduler, scaler = trainer.setup_optimizer(optimizer, scheduler, scaler)\n\n # Train\n log.info(\"Training...\")\n model.train()\n sample_num = 0\n samples_till_eval = args.eval_per_n_samples\n epoch = 0\n step = 0\n trainer.setup_saver()\n trainer.setup_random()\n sample_num, samples_till_eval, epoch, step = trainer.setup_step(\n step_vars=(sample_num, samples_till_eval, epoch, step)\n )\n trainer.setup_close()\n\n while epoch != args.num_epochs:\n trainer.save_checkpoint(step_vars=(sample_num, samples_till_eval, epoch, step))\n epoch += 1\n log.info(f\"Starting epoch {epoch}...\")\n # Print histogram of weights every epoch\n for tags, params in model.named_parameters():\n tbx.add_histogram(tags, params.data, epoch)\n with torch.enable_grad(), tqdm(total=len(train_loader.dataset)) as progress_bar:\n for x, y, c_padding_mask, _, _ in train_loader:\n batch_size = x.size(0)\n loss, loss_val, _ = forward(x, y, c_padding_mask, args, device, model, is_train=True)\n loss = loss / args.gradient_accumulation\n\n # Backward\n scaler.scale(loss).backward()\n if (step + 1) % args.gradient_accumulation == 0:\n scaler.unscale_(optimizer)\n nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n scaler.step(optimizer)\n scaler.update()\n scheduler.step()\n optimizer.zero_grad()\n\n # Log info\n step += 1\n sample_num += batch_size\n progress_bar.update(batch_size)\n progress_bar.set_postfix(epoch=epoch, NLL=loss_val)\n tbx.add_scalar(\"train/NLL\", loss_val, sample_num)\n tbx.add_scalar(\"train/LR\", optimizer.param_groups[0][\"lr\"], sample_num)\n tbx.add_scalar(\n \"train/steps\", step // args.gradient_accumulation, sample_num\n )\n\n samples_till_eval -= batch_size\n if samples_till_eval <= 0:\n samples_till_eval = args.eval_per_n_samples\n\n # Evaluate and save checkpoint\n log.info(f\"Evaluating at sample step {sample_num}...\")\n results, pred_dict = evaluate(\n model, dev_loader, device, args.dev_eval_file, args\n )\n trainer.save_best(sample_num, results[args.metric_name])\n\n # Log to console\n results_str = \", \".join(\n f\"{k}: {v:05.2f}\" for k, v in results.items()\n )\n log.info(f\"Dev {results_str}\")\n\n # Log to TensorBoard\n log.info(\"Visualizing in TensorBoard...\")\n for k, v in results.items():\n tbx.add_scalar(f\"dev/{k}\", v, sample_num)\n util.visualize(\n tbx,\n pred_dict=pred_dict,\n eval_path=args.dev_eval_file,\n step=sample_num,\n split=\"dev\",\n num_visuals=args.num_visuals,\n )\n\n\ndef forward(x, y, c_padding_mask, args, device, model, is_train=False, autocast=True):\n # Setup for forward\n x = x.to(device)\n padding_mask = T.get_padding_mask(x, args.padding_idx)\n\n # Forward\n with amp.autocast(enabled=autocast):\n scores = model(x, padding_mask=padding_mask)\n c_padding_mask = c_padding_mask.to(device)\n scores = model.module.mask_scores(scores, c_padding_mask)\n y = y.to(device)\n weight = None\n if is_train:\n weight = torch.ones(scores.size(1), device=device, dtype=torch.float)\n weight[0] = args.na_class_weight\n loss = model.module.get_loss(scores, y, weight=weight)\n loss_val = loss.item() * 2\n\n return loss, loss_val, scores\n\n\ndef evaluate(model, data_loader, device, eval_file, args):\n nll_meter = stats.AverageMeter()\n\n model.eval()\n pred_dict = {}\n with open(eval_file, \"r\") as fh:\n gold_dict = json_load(fh)\n with torch.no_grad():\n for x, y, c_padding_mask, c_starts, ids in data_loader:\n batch_size = x.size(0)\n _, loss_val, scores = forward(x, y, c_padding_mask, args, device, model)\n nll_meter.update(loss_val, batch_size)\n\n # Get F1 and EM scores\n p1, p2 = model.module.get_prob(scores).split(1, dim=-1)\n p1, p2 = p1.squeeze(-1), p2.squeeze(-1)\n starts, ends = util.discretize(p1, p2, args.max_ans_len, args.use_squad_v2)\n\n preds, _ = util.convert_tokens(\n gold_dict,\n ids.tolist(),\n starts.tolist(),\n ends.tolist(),\n args.use_squad_v2,\n c_starts.tolist(),\n )\n pred_dict.update(preds)\n\n model.train()\n\n results = {\"NLL\": nll_meter.avg}\n results.update(eval.eval_dicts(gold_dict, pred_dict, args.use_squad_v2))\n return results, pred_dict\n\n\ndef add_train_args(parser):\n \"\"\"Add arguments needed in train.py.\"\"\"\n add_train_test_args(parser)\n base_trainer.add_train_args(parser)\n\n parser.add_argument(\n \"--eval_per_n_samples\",\n type=int,\n default=25000,\n help=\"Number of samples between successive evaluations.\",\n )\n parser.add_argument(\n \"--gradient_accumulation\",\n type=int,\n default=4,\n )\n parser.add_argument(\"--lr\", type=float, default=0.025, help=\"Learning rate.\")\n parser.add_argument(\n \"--warmup_steps\", type=float, default=7500, help=\"Warmup optimizer steps.\"\n )\n parser.add_argument(\n \"--power_decay\", type=float, default=-0.5, help=\"Power of the decay.\"\n )\n parser.add_argument(\n \"--decay_forever\",\n type=lambda s: s.lower().startswith(\"t\"),\n default=True,\n help=\"Whether the decay should reach end_lr at the end of training, or in the limit to infinity\",\n )\n\n parser.add_argument(\"--l2_wd\", type=float, default=0.01, help=\"AdamW weight decay.\")\n parser.add_argument(\"--eps\", type=float, default=1e-6, help=\"Adam epsilon.\")\n parser.add_argument(\"--beta_1\", type=float, default=0.9, help=\"Adam beta_1.\")\n parser.add_argument(\"--beta_2\", type=float, default=0.98, help=\"Adam beta_2.\")\n parser.add_argument(\n \"--num_epochs\",\n type=int,\n default=2,\n help=\"Number of epochs for which to train. Negative means forever.\",\n )\n parser.add_argument(\n \"--metric_name\",\n type=str,\n default=\"F1\",\n choices=(\"NLL\", \"EM\", \"F1\"),\n help=\"Name of dev metric to determine best checkpoint.\",\n )\n parser.add_argument(\n \"--max_grad_norm\",\n type=float,\n default=5.0,\n help=\"Maximum gradient norm for gradient clipping.\",\n )\n\n parser.add_argument(\n \"--na_class_weight\",\n type=float,\n default=1.5,\n help=\"Class weight for the N/A answer, all others are given 1.0.\",\n )\n\n\ndef test(args):\n trainer = base_trainer.Trainer(is_train=False)\n args, device = get_args(args)\n args, log, tbx = trainer.setup(args)\n\n # Get BPE\n log.info(\"Loading BPE...\")\n bpe = get_bpe(args)\n log.info(\"Loaded {} BPE tokens\".format(len(bpe)))\n\n # Get data loader\n log.info(\"Building dataset...\")\n record_file = vars(args)[f\"{args.split}_record_file\"]\n dataset, data_loader = get_dataset(\n args, record_file, shuffle=False, randomize=False\n )\n\n # Get model\n log.info(\"Building model...\")\n model = get_model(args, bpe)\n model = trainer.setup_model(model, device)\n model.eval()\n\n trainer.setup_close()\n\n # Evaluate\n log.info(f\"Evaluating on {args.split} split...\")\n nll_meter = stats.AverageMeter()\n pred_dict = {} # Predictions for TensorBoard\n sub_dict = {} # Predictions for submission\n eval_file = vars(args)[f\"{args.split}_eval_file\"]\n with open(eval_file, \"r\") as fh:\n gold_dict = json_load(fh)\n with torch.no_grad(), tqdm(total=len(dataset)) as progress_bar:\n for x, y, c_padding_mask, c_starts, ids in data_loader:\n batch_size = x.size(0)\n _, loss_val, scores = forward(x, y, c_padding_mask, args, device, model)\n nll_meter.update(loss_val, batch_size)\n\n # Get F1 and EM scores\n p1, p2 = model.module.get_prob(scores).split(1, dim=-1)\n p1, p2 = p1.squeeze(-1), p2.squeeze(-1)\n starts, ends = util.discretize(p1, p2, args.max_ans_len, args.use_squad_v2)\n\n # Log info\n progress_bar.update(batch_size)\n if args.split != \"test\":\n # No labels for the test set, so NLL would be invalid\n progress_bar.set_postfix(NLL=nll_meter.avg)\n\n idx2pred, uuid2pred = util.convert_tokens(\n gold_dict,\n ids.tolist(),\n starts.tolist(),\n ends.tolist(),\n args.use_squad_v2,\n c_starts.tolist(),\n )\n pred_dict.update(idx2pred)\n sub_dict.update(uuid2pred)\n\n # Log results (except for test set, since it does not come with labels)\n if args.split != \"test\":\n\n results = {\"NLL\": nll_meter.avg}\n results.update(eval.eval_dicts(gold_dict, pred_dict, args.use_squad_v2))\n\n # Log to console\n results_str = \", \".join(f\"{k}: {v:05.2f}\" for k, v in results.items())\n log.info(f\"{args.split.title()} {results_str}\")\n\n # Log to TensorBoard\n tbx = SummaryWriter(args.save_dir)\n util.visualize(\n tbx,\n pred_dict=pred_dict,\n eval_path=eval_file,\n step=0,\n split=args.split,\n num_visuals=args.num_visuals,\n )\n\n # Write submission file\n if args.split == \"dev\":\n sub_path = join(args.save_dir, \"val\" + \"_\" + args.sub_file)\n else:\n sub_path = join(args.save_dir, args.split + \"_\" + args.sub_file)\n log.info(f\"Writing submission file to {sub_path}...\")\n eval.write_submission(sub_path, sub_dict)\n\n\ndef add_test_args(parser):\n \"\"\"Get arguments needed in test.py.\"\"\"\n add_train_test_args(parser)\n\n\ndef add_train_test_args(parser):\n \"\"\"Add arguments common to train.py and test.py\"\"\"\n parser.add_argument(\n \"--max_ans_len\",\n type=int,\n default=15,\n help=\"Maximum length of a predicted answer.\",\n )\n parser.add_argument(\n \"--num_workers\",\n type=int,\n default=4,\n help=\"Number of sub-processes to use per data loader.\",\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=16,\n help=\"Batch size per GPU. Scales automatically when \\\n multiple GPUs are available.\",\n )\n parser.add_argument(\n \"--use_squad_v2\",\n type=lambda s: s.lower().startswith(\"t\"),\n default=True,\n help=\"Whether to use SQuAD 2.0 (unanswerable) questions.\",\n )\n parser.add_argument(\n \"--num_visuals\",\n type=int,\n default=10,\n help=\"Number of examples to visualize in TensorBoard.\",\n )\n parser.add_argument(\n \"--context_window_stride\",\n type=int,\n default=256,\n help=\"Stride for selecting sliding windows from the context.\",\n )\n\n # Model params\n parser.add_argument(\n \"--dim\",\n type=int,\n default=768,\n help=\"Embedding dimension.\",\n )\n parser.add_argument(\n \"--n_heads\",\n type=int,\n default=12,\n help=\"Attention heads.\",\n )\n parser.add_argument(\n \"--ff_dim\",\n type=int,\n default=3072,\n help=\"Feedforward dimension.\",\n )\n parser.add_argument(\n \"--activation\",\n choices=[\"relu\", \"gelu\"],\n default=\"gelu\",\n help=\"Feedforward activation function.\",\n )\n parser.add_argument(\n \"--dropout\",\n type=float,\n default=0.1,\n help=\"Dropout probability.\",\n )\n parser.add_argument(\n \"--attn_dropout\",\n type=float,\n default=0.1,\n help=\"Dropout probability for attention weights within self attn.\",\n )\n parser.add_argument(\n \"--act_dropout\",\n type=float,\n default=0.0,\n help=\"Dropout probability after activation within FF.\",\n )\n parser.add_argument(\n \"--n_layers\",\n type=int,\n default=12,\n help=\"Number of layers.\",\n )\n parser.add_argument(\n \"--max_positions\",\n type=int,\n default=512,\n help=\"Maximum number of tokens.\",\n )\n parser.add_argument(\n \"--prenorm\",\n type=lambda s: s.lower().startswith(\"t\"),\n default=False,\n help=\"Whether to put LayerNorm after the residual or before.\",\n )","sub_path":"trainer/roberta_finetune.py","file_name":"roberta_finetune.py","file_ext":"py","file_size_in_byte":16893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"638172629","text":"#!/usr/bin/env python\n#cd ~/Desktop/SU_Aur_backup/channels/bg_sub/flat/im_distort/align/OE/QU/rotated_QU/aligned_QU/flux_scaling/PA_diskmajoraxis\n'''\nThis file measures 1D flux (vertical and horizontal) along\na given at the image center for and plots it on a log log plot.\nThe image is rotated at various position angles (PA's) to see at \nwhich PA has the largest flux which corresponds the major axis\nof the disk.\n\nCriteria for evaluating major axis (boundary) based on flux graphs:\n(1) flux curve is oscillating (damped)\n(2) flux curve is decreasing fast\n(3) double checked with PI graphs\n'''\nfrom pylab import *\nimport pyfits\nfrom scipy.ndimage.interpolation import rotate\nimport numpy as np\n\nz=pyfits.open('obj_PI_fs_conv_sig1.fits')\nobj_PI_fs_conv=z[0].data\nz.close()\n\n#define image center\nxcenter=271\nycenter=318\n\n#crop image; 401x401\nobj_PI_fs_conv_cropped=obj_PI_fs_conv[ycenter-200:ycenter+201,xcenter-200:xcenter+201]\n\nfigure(0)\nclf()\nextent=array([-200,200,-200,200])\nimshow(log10(obj_PI_fs_conv_cropped),interpolation='nearest',origin='lower',cmap=cm.jet,vmin=-7.4,vmax=-5,extent=extent)\nplot([0,0], [-200, 200], color='r', linestyle='--', linewidth=1)\nplot([-200,200], [0, 0], color='r', linestyle='--', linewidth=1)\ntitle('PA=0')\n\n##scale the axes in AU; make array with 400 elements and subtract 200: [-200,-199,..199,200]*AU conversion factor\nrscale=arange(401)-200*9.53e-3*140\n\nfigure(1)\nclf()\n##plot logy 1D-flux values along center (cross-hair)\n##plot x-values (flux) along y=200 (new center, horizontal)\nsemilogy(rscale,obj_PI_fs_conv_cropped[200,:],label='horizontal')\n##plot y-values (:) along x=200 (new center, vertical)\nsemilogy(rscale,obj_PI_fs_conv_cropped[:,200],label='vertical')\ntitle('PA=0')\nylabel('Flux (normalized)')\nxlabel('pixel position')\nlegend(loc='upper right', shadow=True)\n\n#angles=range(91)\nangles=arange(10,91,10)\nfor i in angles:\n rotated_image=rotate(obj_PI_fs_conv_cropped,i)\n figure(i)\n clf()\n dim=rotated_image.shape[0]\n midpoint=dim/2\n extent_PI=array([0-midpoint,dim-midpoint,0-midpoint,dim-midpoint])\n imshow(log10(rotated_image),extent=extent_PI)\n plot([0,0], [-midpoint, midpoint], color='r', linestyle='--', linewidth=1)\n plot([-midpoint,midpoint], [0, 0], color='r', linestyle='--', linewidth=1)\n name1='PA='+str(i)\n title(name1)\n new_rscale=(arange(dim)-midpoint)*9.53e-3*140\n figure(i+1)\n clf()\n semilogy(new_rscale,rotated_image[midpoint,:],label='horizontal')\n semilogy(new_rscale,rotated_image[:,midpoint],label='vertical')\n xmax=round(np.max(rotated_image[midpoint,:]),7)\n ymax=round(np.max(rotated_image[:,midpoint]),7)\n name2='PA='+str(i)+'; xmax='+str(xmax)+'; ymax='+str(ymax)\n title(name2)\n ylabel('Flux (normalized)')\n xlabel('pixel position')\n legend(loc='upper right', shadow=True)\n show()\n#0 - -113-9=122\n\n#10- -47-115=162\n\n#20- -87-64=151\n\n#25- -46-25=71 minimum (visual)\n\n#30- -58-45=103\n\n#40- -41-77=118\n\n#50- -85-98=183\n\n#60- -81-107=188 maximum\n\n#70- -82-91=178\n\n#80- -85-90=175 \n\n#85- -90-93=183 2nd max (visual)\n\n#90- -75-96=171\n\n#95- - 82-86=168\n","sub_path":"channels/bg_sub/flat/im_distort/align/OE/QU/rotated_QU/aligned_QU/flux_scaling/PA_diskmajoraxis/flux_PA.py","file_name":"flux_PA.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"51120679","text":"from django.urls import path, re_path\n\nfrom main import views\n\napp_name = 'main'\n\nurlpatterns = [\n re_path(r'^homePage/$', views.homePage, name='homePage'),\n re_path(r'^account/(?P\\d+)/explore/$', views.explore, name='Explore'),\n re_path(r'^account/(?P\\d+)/classified/$', views.classified, name='Classified'),\n path('account//classified//', views.classifiedSpecific, name='classifiedSpecific'),\n path('account//classified//', views.subClassified, name=\"subClassified\"),\n\n re_path(r'^account/(?P\\d+)/personInfo/$', views.personInfo, name='PersonInfo'),\n re_path(r'^classify_test/$', views.classify_test, name='classify_test'),\n path('classify_img/', views.classify_img, name='classify_img'),\n \n re_path(r'^classify/$', views.classifyImage, name=\"classify\"),\n re_path(r'^saveImage/$', views.saveImage, name='saveImage'),\n\n path('account/createSubFolder', views.createSubFolder, name='createSubFolder'),\n path('account/changeSubFolder', views.changeSubFolder, name='changeSubFolder'),\n path('account/removeImage', views.removeImage, name='removeImage'),\n path('account/moveImage', views.moveImage, name='moveImage'),\n path('account/updateIntroduction', views.updateIntroduction, name='updateIntroduction'),\n path('account/getTypeDict', views.getTypeDict, name=\"getTypeDict\"),\n\n path('test/getRandomPhoto', views.getRandomPhoto, name=\"getRandomPhoto\"),\n]\n","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"496620741","text":"# Dependencies\nimport requests as req\n\nurl = \"https://api.nytimes.com/svc/search/v2/articlesearch.json?\"\napi_key = \"194b3fc29096414a94a1361bb983fb3b\"\n\n# Search for articles that mention granola\nq = \"granola\"\n\n# Build query URL\nquery = url + \"api-key=\" + api_key + \"&q=\" + q\n\n# Populate articles\narticles = req.get(query).json()\n\n# The \"response\" property in articles contains the actual articles\narticles_list = [article for article in articles[\"response\"][\"docs\"]]\n\nprint(\"Your Reading List:\")\nfor article in articles_list:\n print(article[\"web_url\"])\n","sub_path":"API/Python-API1-11-6-17/13-Stu_RetrieveArticles/NytApi.py","file_name":"NytApi.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"360695218","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\n\nimport torch\n\nfrom pytorch3d import _C\n\n\ndef nn_points_idx(p1, p2, p2_normals=None) -> torch.Tensor:\n \"\"\"\n Compute the coordinates of nearest neighbors in pointcloud p2 to points in p1.\n Args:\n p1: FloatTensor of shape (N, P1, D) giving a batch of pointclouds each\n containing P1 points of dimension D.\n p2: FloatTensor of shape (N, P2, D) giving a batch of pointclouds each\n containing P2 points of dimension D.\n p2_normals: [optional] FloatTensor of shape (N, P2, D) giving\n normals for p2. Default: None.\n\n Returns:\n 3-element tuple containing\n\n - **p1_nn_points**: FloatTensor of shape (N, P1, D) where\n p1_neighbors[n, i] is the point in p2[n] which is\n the nearest neighbor to p1[n, i].\n - **p1_nn_idx**: LongTensor of shape (N, P1) giving the indices of\n the neighbors.\n - **p1_nn_normals**: Normal vectors for each point in p1_neighbors;\n only returned if p2_normals is passed\n else return [].\n \"\"\"\n N, P1, D = p1.shape\n with torch.no_grad():\n p1_nn_idx = _C.nn_points_idx(\n p1.contiguous(), p2.contiguous()\n ) # (N, P1)\n p1_nn_idx_expanded = p1_nn_idx.view(N, P1, 1).expand(N, P1, D)\n p1_nn_points = p2.gather(1, p1_nn_idx_expanded)\n if p2_normals is None:\n p1_nn_normals = []\n else:\n if p2_normals.shape != p2.shape:\n raise ValueError(\"p2_normals has incorrect shape.\")\n p1_nn_normals = p2_normals.gather(1, p1_nn_idx_expanded)\n\n return p1_nn_points, p1_nn_idx, p1_nn_normals\n","sub_path":"pytorch3d/ops/nearest_neighbor_points.py","file_name":"nearest_neighbor_points.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"367410703","text":"from typing import Any\nfrom pakkr.returns._meta import _Meta\nfrom pakkr.returns._no_return import _NoReturn\nfrom pakkr.returns._return import _Return\nfrom pakkr.returns._return_type import _ReturnType\n\n\ndef returns(*args, **kwargs):\n \"\"\"Decorator to add the __pakkr_returns__ attribute to the object being decorated\"\"\"\n if not (args or kwargs):\n return_obj = _NoReturn()\n elif not args and kwargs:\n return_obj = _Meta(**kwargs)\n elif args:\n return_obj = _Return(args, _Meta(**kwargs) if kwargs else None)\n\n def decorated(obj):\n obj.__pakkr_returns__ = return_obj\n return obj\n return decorated\n\n\ndef collapse(returns):\n \"\"\"Collapse or roll-up a sequence of \"return\" types into one\"\"\"\n final_args = [Any]\n final_meta = {}\n\n for ret in returns:\n if isinstance(ret, _Meta):\n final_args = []\n final_meta.update(ret)\n elif isinstance(ret, _Return):\n final_args = ret.values\n final_meta.update(ret.meta or {})\n elif isinstance(ret, _NoReturn):\n final_args = []\n elif ret is Any:\n final_args = [Any]\n else:\n raise RuntimeError(\"Unexpected return type {}\".format(ret))\n\n if final_args:\n return _Return(final_args, _Meta(**final_meta) if final_meta else None)\n elif final_meta:\n return _Meta(**final_meta)\n else:\n return _NoReturn()\n","sub_path":"pakkr/returns/returns.py","file_name":"returns.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"160604552","text":"import os, shutil\nimport collector\n\ncwd = os.getcwd()\n\nignore_dirs = [\"__pycache__\", \"__MACOSX\"]\n\nprint(\"- [ Extension Collector ] -\")\next = collector.get_ext()\ndotext = '.' + ext\n\nignore_dirs.append(ext)\n\nif not os.path.exists(ext):\n os.makedirs(ext)\n \ndest_dir = os.path.join(cwd, ext)\n\nprint(\"__________\\n\")\nfor dir, subdirs, files in os.walk(cwd):\n subdirs[:] = [d for d in subdirs if d not in ignore_dirs]\n collector.check_dir(dir, dest_dir, files, dotext)\n\nshutil.make_archive(ext, 'zip', dest_dir)\n\nprint(\"Zip file '\" + ext + \".zip' created.\\n\")\n\n \n \n ","sub_path":"032-extension-collector/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"638099707","text":"\"\"\"\nAll provided masking functions expect the complex valued stft signal as input.\nEach masking function should not take care of further convenience functions\nthan allowing arbitrary sensor_axis and any number of independent dimensions.\n\nOnly, when a multichannel signal is used to pool the power along channels,\nthe sensor_axis can be provided.\n\nAll other convenience should be dealt with from wrapper functions which possibly\ntake the masking function as a callback. If you want to use lists of arrays,\nwrite an appropriate wrapper function.\n\nIf desired, concatenation of *ins can be done in a decorator.\n\nWhen appropriate, functions assume that the target speaker is channel 0 and\nnoise is channel 1.\n\nOptional axis parameters are:\n * ``source_axis`` with default ``0``.\n * ``sensor_axis`` with default ``None``. If given, it is used for pooling.\n * ``frequency_axis`` with default ``-2``.\n * ``time_axis`` with default ``-1``.\n\nAll other axes are regarded as independent dimensions.\n\"\"\"\n\n# TODO: Migrate and remove this files:\n# TODO: - tests/speech_enhancement_test/test_merl_masks.py\n# TODO: - nt/speech_enhancement/merl_masks.py\n# TODO: - nt/speech_enhancement/mask_estimation.py\n# TODO: Add test-case for LorenzMask\n# CB: Eventuell einen Dekorator nutzen für force signal np.ndarray?\n# CB: Eventuell einen Dekorator nutzen für force signal.real.dtype == return.dtype?\n\nimport numpy as np\n\nEPS = 1e-18\n\n\ndef lorenz_mask(\n signal: np.ndarray,\n *,\n sensor_axis=None,\n axis=(-2, -1),\n lorenz_fraction: float=0.98,\n weight: float=0.999,\n) -> np.ndarray:\n \"\"\" Calculate softened mask according to Lorenz function criterion.\n\n To be precise, the lorenz_fraction is not actually a quantile\n although it is in the range [0, 1]. If it was the quantile fraction, it\n would the the fraction of the number of observations.\n\n Args:\n signal: Complex valued stft signal.\n sensor_axis:\n axis: time_axis and/or frequency_axis\n lorenz_fraction: Fraction of observations which are rated down\n weight: Governs the influence of the mask\n\n Returns:\n\n \"\"\"\n signal = np.asarray(signal)\n\n power = np.abs(signal)**2\n if sensor_axis is not None:\n power = power.sum(axis=sensor_axis, keepdims=True)\n\n if not isinstance(axis, (tuple, list)):\n axis = (axis,)\n\n # Only works, when last two dimensions are frequency and time.\n tmp_axis = tuple([-i - 1 for i in range(len(axis))])\n\n power = np.moveaxis(power, axis, tmp_axis)\n shape = power.shape\n working_shape = tuple([\n np.prod(shape[:-len(tmp_axis)], dtype=np.int64),\n np.prod(shape[-len(tmp_axis):]),\n ])\n\n power = np.reshape(power, working_shape)\n\n mask = np.zeros_like(power, dtype=power.real.dtype)\n\n def get_mask(power):\n sorted_power = np.sort(power, axis=None)[::-1]\n lorenz_function = np.cumsum(sorted_power) / np.sum(sorted_power)\n threshold = np.min(sorted_power[lorenz_function < lorenz_fraction])\n _mask = power > threshold\n return _mask\n\n for i in range(power.shape[0]):\n mask[i, :] = get_mask(power[i])\n\n mask = 0.5 + weight * (mask - 0.5)\n\n return np.moveaxis(mask.reshape(shape), tmp_axis, axis)\n\n\ndef quantil_mask(\n signal: np.ndarray,\n quantil=[0.1, -0.9],\n *,\n sensor_axis=None,\n axis=(-2),\n weight: float=0.999,\n) -> np.ndarray:\n \"\"\"\n\n Args:\n signal:\n quantil: pos for speech, negative for noise\n sensor_axis:\n axis: Suggestion: time axis, Alternative time and frequency axis\n weight:\n\n Returns:\n Mask of shape [*quantil.shape, *signal.shape]\n\n \"\"\"\n signal = np.abs(signal)\n\n if isinstance(quantil, (tuple, list)):\n return np.array([quantil_mask(signal=signal, sensor_axis=sensor_axis, axis=axis, quantil=q, weight=weight) for q in quantil])\n\n if sensor_axis is not None:\n signal = signal.sum(axis=sensor_axis, keepdims=True)\n\n if not isinstance(axis, (tuple, list)):\n axis = (axis,)\n\n # Convert signal to 2D with [independent, sample axis]\n tmp_axis = tuple([-i - 1 for i in range(len(axis))])\n signal = np.moveaxis(signal, axis, tmp_axis)\n shape = signal.shape\n working_shape = tuple(\n [np.prod(shape[:-len(tmp_axis)]), np.prod(shape[-len(tmp_axis):])])\n signal = np.reshape(signal, working_shape)\n\n if quantil >= 0:\n threshold = np.percentile(signal, q=(1 - quantil)*100, axis=-1)\n else:\n threshold = np.percentile(signal, q=abs(quantil)*100, axis=-1)\n\n mask = np.zeros_like(signal)\n for i in range(mask.shape[0]):\n if quantil >= 0:\n mask[i, :] = signal[i, :] > threshold[i]\n else:\n mask[i, :] = signal[i, :] < threshold[i]\n\n # Drop this line?\n mask = 0.5 + weight * (mask - 0.5)\n\n # Restore original shape\n mask = np.moveaxis(mask.reshape(shape), tmp_axis, axis)\n\n if sensor_axis is not None:\n mask = np.squeeze(mask, axis=sensor_axis)\n return mask\n","sub_path":"pb_chime5/speech_enhancement/mask_module.py","file_name":"mask_module.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"540809208","text":"def points_on_segments(segments):\n \"\"\"\n segments - list of segments\n example:\n [[2, 3], [2, 6], [1, 8]]\n\n возращает оптимальное число точек, которыми нужно покрыть все отрезки,\n чтобы каждый из отрезков содержал хотя бы одну точку\n \"\"\"\n assert isinstance(segments, list)\n assert len(segments) > 0\n assert len(segments[0]) == 2\n\n # отсортируем отрезки по левым концам\n segments = sorted(segments)\n\n points = []\n while len(segments) > 0:\n # первая точка - левый конец самого последнего отрезка\n points.append(segments[-1][0])\n\n # удаляем те отрезки, на которые попала первая точка\n segments = [i for i in segments if points[-1] not in range(i[0], i[1] + 1)]\n\n return points\n","sub_path":"points.py","file_name":"points.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"62161078","text":"from expyriment import design, control, stimuli\n\ncontrol.defaults.window_mode = True\ncontrol.defaults.window_size = [800,600]\ndesign.defaults.experiment_background_colour = (230,230,70)\n\nexp = design.Experiment(name=\"Cool Experiment\")\ncontrol.initialize(exp)\n\nblock_one = design.Block(name=\"Our only block\")\ntmp_trial = design.Trial()\n\ncross = stimuli.FixCross()\ncross.preload()\n\n# define stimulus positions\npositions = [-400, -200, 0, 200, 400]\n\n# go through all positions\nfor xpos in positions:\n # create circle accordingly\n stim = stimuli.Circle(radius=25, colour=(0,0,0),position=[xpos,0])\n stim.preload()\n tmp_trial.add_stimulus(stim)\n tmp_trial.add_stimulus(cross)\n block_one.add_trial(tmp_trial)\n tmp_trial = tmp_trial.copy()\n tmp_trial.clear_stimuli()\nexp.add_block(block_one)\ncontrol.start()\n\nfor b in exp.blocks:\n for t in b.trials:\n t.stimuli[0].present(clear=True, update=False)\n t.stimuli[1].present(clear=False, update=True)\n\n exp.keyboard.wait()\n\ncontrol.end()\n","sub_path":"2018/12/lecture/expyriment_lecture.py","file_name":"expyriment_lecture.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"324409429","text":"# url+视图函数\nfrom flask import Blueprint, render_template, request, redirect,jsonify\nfrom .models import *\n\nblog = Blueprint('blog', __name__)\nadmin = Blueprint('admin', __name__)\n\n\n@blog.route('/')\ndef home():\n return 'HOME'\n\n##################################################################################\n##后台\n##################################################################################\n# 管理首页\n@admin.route('/admin/')\ndef amdin_index():\n return render_template('admin/index.html')\n\n\n# 文章管理\n@admin.route('/article/')\ndef amdin_article():\n return redirect('/article/1/')\n\n@admin.route('/article//')\ndef amdin_article_page(page=None):\n per_page = 2\n if not page:\n page = 1\n articles = Article.query.all()\n articles = articles[(page-1)*per_page:page*per_page]\n my_paginate = Article.query.order_by('id').paginate(page=page,per_page=per_page)\n\n return render_template('admin/article.html',articles=articles,my_paginate=my_paginate)\n\n# 删除文章\n@admin.route('/article/delete/',methods=['POST'])\ndef article_delete():\n id = request.form.get('id')\n article = Article.query.get(id)\n if article:\n try:\n db.session.delete(article)\n db.session.commit()\n code = 1\n msg = 'delete success'\n except:\n db.session.rollback()\n db.session.flush()\n code = 0\n msg = 'delete fail'\n\n else:\n code = 0\n msg = 'not id'\n data = {\n 'code': code,\n 'msg': msg,\n }\n print(data)\n return jsonify(data)\n\n# 修改文章\n@admin.route('/update/article/',methods=['GET','POST'])\ndef update_article():\n if request.method == 'GET':\n id = request.args.get('id')\n article = Article.query.get(id)\n categorys = Category.query.all()\n if article:\n return render_template('admin/update-article.html',article=article,categorys=categorys)\n return 'fail'\n else:\n id = request.form.get('id')\n print(id)\n article = Article.query.get(id)\n if article:\n try:\n article.title = request.form.get('title')\n article.content = request.form.get('content')\n article.categoryid = request.form.get('category')\n article.describe = request.form.get('describe')\n article.keywords = request.form.get('keywords')\n article.titlepic = request.form.get('titlepic')\n article.visibility = request.form.get('visibility')\n db.session.commit()\n return redirect('/article/')\n except:\n db.session.rollback()\n db.session.flush()\n return 'update fail'\n\n else:\n return 'not article'\n\n# 增加文章链接\n@admin.route('/addarticle/')\ndef amdin_addarticle():\n categorys = Category.query.all()\n data = {\n 'categorys':categorys\n }\n return render_template('admin/add-article.html',data=data)\n\n# 增加文章\n@admin.route('/Article/add',methods=['POST','GET'])\ndef amdin_articleadd():\n if request.method == 'POST':\n try:\n title = request.form.get('title')\n print(title)\n user = Article.query.filter_by(title=title).first()\n if user:\n print(user.all())\n return '标题重复'\n content = request.form.get('content')\n keywords = request.form.get('keywords')\n describe = request.form.get('describe')\n category = request.form.get('category')\n tags = request.form.get('tags')\n # tags = request.form.get('tags')\n visibility = request.form.get('visibility')\n\n \n article = Article()\n article.title = title\n article.content = content\n article.keywords = keywords\n article.describe = describe\n article.categoryid = category\n article.tags = tags\n article.visibility = visibility\n db.session.add(article)\n db.session.commit()\n return redirect('/article/')\n except:\n # 回滚\n db.session.rollback()\n db.session.flush()\n return 'No'\n return '请求方式错误'\n\n\n# 栏目管理\n@admin.route('/category/')\ndef amdin_category():\n categorys = Category.query.all()\n data = {\n 'categorys':categorys,\n }\n return render_template('admin/category.html',data=data)\n\n# 修改栏目\n@admin.route('/update/category/',methods=['GET','POST'])\ndef update_category():\n if request.method == 'GET':\n id = request.args.get('id')\n category = Category.query.get(id)\n if category:\n categorys = Category.query.filter(Category.id != id)\n data = {\n 'categorys': categorys,\n 'category':category,\n }\n return render_template('admin/update-category.html',data=data)\n return 'fail'\n else:\n id = request.form.get('id')\n print(id)\n category = Category.query.get(id)\n if category:\n try:\n category.name = request.form.get('name')\n category.keywords = request.form.get('keywords')\n category.fid = request.form.get('fid')\n category.describe = request.form.get('describe')\n category.alias = request.form.get('alias')\n db.session.commit()\n return redirect('/category/')\n except:\n db.session.rollback()\n db.session.flush()\n return 'update fail'\n\n else:\n return 'not category'\n\n\n# 增加栏目\n@admin.route('/category/add/',methods=['GET','POST'])\ndef amdin_category_add():\n # 得到新栏目的数据\n # 名字\n name = request.form.get('name')\n category = Category.query.filter_by(name=name).first()\n if category:\n return '栏目名重复'\n # 别名\n alias = request.form.get('alias')\n # 父节点\n fid = request.form.get('fid')\n # 关键字\n keywords = request.form.get('keywords')\n # 描述\n describe = request.form.get('describe')\n try:\n category = Category()\n category.name = name\n category.alias = alias\n category.fid = fid\n category.keywords = keywords\n category.describe = describe\n db.session.add(category)\n db.session.commit()\n except:\n # 回滚\n db.session.rollback()\n db.session.flush()\n return 'fail'\n\n return redirect('/category/')\n\n# 删除栏目\n@admin.route('/category/delete/',methods=['POST'])\ndef amdin_category_delete():\n id = request.form.get('id')\n category = Category.query.get(id)\n if category:\n try:\n db.session.delete(category)\n db.session.commit()\n code = 1\n msg = 'delete success'\n except:\n db.session.rollback()\n db.session.flush()\n code = 0\n msg = 'delete fail'\n\n else:\n code = 0\n msg = 'not id'\n data = {\n 'code':code,\n 'msg':msg,\n }\n print(data)\n return jsonify(data)\n # return redirect('/category/')\n# 公告管理\n@admin.route('/notice/')\ndef amdin_notice():\n return render_template('admin/notice.html')\n\n# 增加公告\n@admin.route('/addnotice/')\ndef amdin_addnotice():\n return render_template('admin/add-notice.html')\n\n# 评论管理\n@admin.route('/comment/')\ndef amdin_comment():\n return render_template('admin/comment.html')\n\n# 用户管理\n@admin.route('/manageuser/')\ndef amdin_manageuser():\n return render_template('admin/manage-user.html')\n\n# 管理登录日志\n@admin.route('/loginlog/')\ndef amdin_loginlog():\n return render_template('admin/loginlog.html')\n\n# 基本设置\n@admin.route('/setting/')\ndef amdin_setting():\n return render_template('admin/setting.html')\n\n# 阅读设置\n@admin.route('/readset/')\ndef amdin_readset():\n return render_template('admin/readset.html')\n\n# 友情链接\n@admin.route('/flink/')\ndef amdin_flink():\n return render_template('admin/flink.html')\n\n# 增加友情链接\n@admin.route('/addflink/')\ndef amdin_addflink():\n return render_template('admin/add-flink.html')\n\n\n##################################################################################\n##前台\n##################################################################################\n# 网站首页\n@blog.route('/index/')\ndef index():\n try:\n # 显示所有分类\n categorys = Category.query.filter()\n # 显示所有文章\n articles = Article.query.all()\n # 汇总\n # c_count = []\n # for category in categorys:\n # t = category.articles.count()\n # c_count.append(t)\n data = {\n 'categorys': categorys,\n 'articles': articles,\n # 'count':c_count,\n }\n except:\n return 'fail'\n return render_template('blog/index.html',data=data)\n\n# 分类\n@blog.route('/blogcategory/')\ndef blog_category():\n # per_page = 1\n id = request.args.get('id')\n # page = request.args.get('page')\n # if not page:\n # page = 1\n if not id:\n # articles = Article.query.order_by('id').paginate(page=page,per_page=per_page)\n articles = Article.query.all()\n\n else:\n # articles = Article.query.filter_by(categoryid=id).order_by('id').paginate(page=page,per_page=per_page)\n articles = Article.query.filter_by(categoryid=id).all()\n categorys = Category.query.all()\n\n data = {\n 'articles':articles,\n 'categorys':categorys\n }\n # return jsonify(data)\n return render_template('blog/list.html',data=data)\n\n# 详情\n@blog.route('/blogdetail/')\ndef blog_detail():\n id = request.args.get('id')\n if not id:\n id = 1\n article = Article.query.get(id)\n categorys = Category.query.all()\n data = {\n 'article': article,\n 'category':article.category.name,\n 'categorys':categorys,\n }\n return render_template('blog/info.html',data=data)\n\n# 我的相册\n@blog.route('/share/')\ndef share():\n return render_template('blog/share.html')\n\n# 我的日记\n@blog.route('/list/')\ndef list():\n return redirect('/blogcategory/')\n\n# 关于我\n@blog.route('/about/')\ndef about():\n return render_template('blog/about.html')\n\n# 留言\n@blog.route('/gbook/')\ndef gbook():\n return render_template('blog/gbook.html')\n\n# 内容页\n@blog.route('/info/')\ndef info():\n return redirect('/blogdetail/')\n\n# 详情页\n@blog.route('/infopic/')\ndef infopic():\n return render_template('blog/infopic.html')","sub_path":"FlaskBlogPro/App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"49791739","text":"# Copyright Ruben Decrop\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport csv\nfrom django.http import HttpResponse\nfrom binascii import a2b_base64\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, renderer_classes\nfrom rest_framework.response import Response\nfrom rest_framework.renderers import BaseRenderer, JSONRenderer\n\nfrom .models import BelPlayer, FidePlayer, CdSubscription\nfrom .serializers import (\n SubscriptionSerializer,\n BelplayerSerializer,\n FideplayerSerializer,\n PhotoSerializer,\n)\nfrom .mail import sendconfirmationmail\n\ndef subscriptionpage(request):\n return render(request, 'cd_subscription/subscriptionpage.html')\n\ndef participantspage(request):\n return render(request, 'cd_subscription/participants.html')\n\n@login_required\ndef csvparticipants(request):\n \"\"\"\n cretae a csv file of all participants\n :param request:\n :return:\n \"\"\"\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"participants.csv\"'\n\n fields = [\n 'badgelength',\n 'birthdate',\n 'category',\n 'chesstitle',\n 'confirmed',\n 'emailparent',\n 'emailplayer',\n 'federation',\n 'fidenation',\n 'fiderating',\n 'firstname',\n 'fullnameattendant',\n 'fullnameparent',\n 'gender',\n 'id_club',\n 'id_fide',\n 'id_national',\n 'locale',\n 'mobileattendant',\n 'mobileparent',\n 'mobileplayer',\n 'name',\n 'nationality',\n 'natrating',\n 'payamount',\n 'paydate',\n 'paymessage',\n 'rating',\n 'remarks',\n 'custom1',\n 'custom2',\n 'custom3',\n ]\n writer = csv.writer(response)\n writer.writerow(fields)\n for s in CdSubscription.objects.all():\n values = [str(getattr(s, f)) for f in fields]\n writer.writerow(values)\n return response\n\nclass ImageRenderer(BaseRenderer):\n media_type = 'image/*'\n format = 'jpg'\n charset = None\n render_style = 'binary'\n\n def render(self, data, media_type=None, renderer_context=None):\n return data\n\n@api_view(['POST'])\n@csrf_exempt\ndef subscription_confirmation(request, pk):\n\n try:\n subscription = CdSubscription.objects.get(pk=pk)\n except CdSubscription.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'POST':\n subscription.confirmed = True\n subscription.save()\n sendconfirmationmail(subscription)\n return Response(status=status.HTTP_200_OK)\n\n@api_view(['GET', 'PUT', 'DELETE'])\n@csrf_exempt\ndef subscription_detail(request, pk):\n\n try:\n cs = CdSubscription.objects.get(pk=pk)\n except CdSubscription.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n ss = SubscriptionSerializer(data=request.data)\n return Response(ss.data)\n\n if request.method == 'PUT':\n ss = SubscriptionSerializer(data=request.data)\n if ss.is_valid():\n cs.category = ss.validated_data.get('category')\n cs.emailparent = ss.validated_data.get('emailparent') or ''\n cs.emailplayer = ss.validated_data.get('emailplayer') or ''\n cs.fullnameattendant = ss.validated_data.get('fullnameattendant') or ''\n cs.fullnameparent = ss.validated_data.get('fullnameparent') or ''\n cs.mobileattendant = ss.validated_data.get('mobileattendant') or ''\n cs.mobileparent = ss.validated_data.get('mobileparent') or ''\n cs.mobileplayer = ss.validated_data.get('mobileplayer') or ''\n cs.save()\n return Response({'id': cs.id, 'paymessage': cs.paymessage},\n status=status.HTTP_200_OK)\n return Response(ss.data)\n return Response(ss.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if request.method == 'DELETE':\n cs.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n@api_view(['POST', 'GET'])\n@csrf_exempt\ndef subscription_list(request):\n\n pass\n\n if request.method == 'GET':\n subscriptions = CdSubscription.objects.all()\n ss = SubscriptionSerializer(subscriptions, many=True)\n return Response(ss.data)\n\n if request.method == 'POST':\n\n ss = SubscriptionSerializer(data=request.data)\n if ss.is_valid():\n id_national = ss.validated_data.get('id_national')\n try:\n bp = BelPlayer.objects.get(id_national=id_national)\n except BelPlayer.DoesNotExist:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n if bp.id_fide:\n try:\n fp = FidePlayer.objects.get(id_fide=bp.id_fide)\n except:\n fp = None\n try:\n cs = CdSubscription.objects.get(id_national=id_national)\n except CdSubscription.DoesNotExist:\n cs = CdSubscription()\n cs.birthdate = bp.birthdate\n cs.category = ss.validated_data.get('category')\n cs.chesstitle = bp.chesstitle\n cs.emailparent = ss.validated_data.get('emailparent') or ''\n cs.emailplayer = ss.validated_data.get('emailplayer') or ''\n cs.federation = bp.federation\n cs.fidenation = fp.fidenation if fp else ''\n cs.fiderating = fp.fiderating if fp else 0\n cs.firstname = bp.firstname\n cs.fullnameattendant = ss.validated_data.get('fullnameattendant') or ''\n cs.fullnameparent = ss.validated_data.get('fullnameparent') or ''\n cs.gender = bp.gender\n cs.id_club = bp.id_club\n cs.id_fide = bp.id_fide or ''\n cs.id_national = bp.id_national\n cs.locale = request.LANGUAGE_CODE.lower()[:2]\n cs.mobileattendant = ss.validated_data.get('mobileattendant') or ''\n cs.mobileparent = ss.validated_data.get('mobileparent') or ''\n cs.mobileplayer = ss.validated_data.get('mobileplayer') or ''\n cs.name = bp.name\n cs.natrating = bp.natrating\n cs.nationality = bp.nationality\n cs.payamount = 0\n cs.rating = max(cs.natrating, cs.fiderating)\n try:\n cs.save()\n except Exception as e:\n log.exception('Saving to db')\n nr = 201700000 + cs.pk\n rm1 = cs.pk // 1000\n rm2 = cs.pk % 1000\n rm3 = nr % 97 or 97\n cs.paymessage = \"+++020/170{0:01d}/{1:03d}{2:02d}+++\".format(\n rm1, rm2, rm3)\n cs.save()\n return Response({'id': cs.id, 'paymessage': cs.paymessage},\n status=status.HTTP_201_CREATED)\n return Response(ss.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'POST'])\n@renderer_classes((ImageRenderer, JSONRenderer))\n@csrf_exempt\ndef subscription_photo(request, pk):\n\n try:\n subscription = CdSubscription.objects.get(pk=pk)\n except CdSubscription.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n return Response(content_type=subscription.badgemimetype,\n data=subscription.badgeimage)\n\n if request.method == 'POST':\n ps = PhotoSerializer(data=request.data)\n if ps.is_valid():\n try:\n header, data = ps.validated_data.get('imagedata').split(',')\n subscription.badgemimetype = header.split(':')[1].split(';')[0]\n subscription.badgeimage = a2b_base64(data)\n subscription.badgelength = len(subscription.badgeimage)\n subscription.save()\n return Response(status=status.HTTP_201_CREATED)\n except Exception as e:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET'])\ndef belplayer(request, id_national):\n\n try:\n while id_national.startswith('0'):\n id_national = id_national[1:]\n bp = BelPlayer.objects.get(id_national=id_national)\n except BelPlayer.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n if bp.birthdate.year < 1997:\n return Response(status=499)\n bp_serializer = BelplayerSerializer(bp)\n responsedict = dict(bp_serializer.data)\n try:\n cs = CdSubscription.objects.get(id_national=id_national)\n if cs.confirmed:\n responsedict['alreadysubscribed'] = True\n except:\n pass\n return Response(responsedict)\n\n@api_view(['GET'])\ndef fideplayer(request, id_fide):\n\n try:\n fp = FidePlayer.objects.get(id_fide=id_fide)\n except FidePlayer.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n fp_serializer = FideplayerSerializer(fp)\n return Response(fp_serializer.data)\n\n@api_view(['GET'])\ndef participants(request, cat):\n players = CdSubscription.objects.filter(category=cat).order_by('-rating')\n data = [{\n 'id': p.id,\n 'category': p.category,\n 'name': p.name,\n 'firstname': p.firstname,\n 'rating': p.rating,\n 'id_club': p.id_club,\n 'fidenation': p.fidenation,\n 'confirmed': p.confirmed,\n } for p in players]\n return Response(data)","sub_path":"cd_subscription/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"5793720","text":"#This file stores things common to / required by\n#all cards.\n\n\n#This class stores operating level commands common to all cards.\n#These commands are on demand and do not contain timing information\n#like experiment level commands.\n#\n#Commands 0-63 (0x00-0x3F) are reserved for this purpose.\n#Organization: (0-15) (0x0~) -> diagnose board (blink led, get amount of free ram, get uuid etc.)\n# (16-31)(0x1~) -> experiment stack operations (load ops, clear ops, start experiment etc.)\n# (32-47)(0x2~) -> TBD\n# (48-63)(0x3~) -> TBD \n\n\n#Current format is:\n#|(operation byte)|(parameter byte 1)|(parameter byte 2)| ... (max 30 parameter bytes)\n#TODO:block transfer if needed\n\nimport time\nimport utils\nutil = utils.utils()\n\nclass common_card:\n\t#################Define command constants######################################\n\t# #\n\t# #\n\n\tdef __init__(self, controller, id, address):\n\t\t\n\t\tself.id = id\n\t\tself.address = address\n\t\tself.controller = controller\n\n\t\t#diagnostics##############################################################################\n\t\t#Blink indicator LED (useful for finding card in rack)\n\t\t#{input:4 bytes; output:0 bytes}\n\t\t#Input : |{2 bytes}(number of blinks)|{2 bytes}(duration of blink in ms)|\n\t\t#Output: nothing\n\t\tself.instr_util_blink_led = 0x00\n \t\n\t\t#Return the card's uuid\n\t\t#{input:0 bytes; output:8 bytes}\n\t\t#Input : nothing\n\t\t#Output: |{2 bytes}(card type)|{6 bytes}(Serial number)|\n\t\tself.instr_util_get_uuid = 0x01\n\n\t\t#Return the amount of free ram\n\t\t#{input:0 bytes; output:2 bytes}\n\t\t#Input : nothing\n\t\t#Output: |{2 bytes}(Amount of free ram in bytes)|\n\t\tself.instr_util_free_ram = 0x02\n\t\t\n\t\t#Benchmark communication\n\t\t#{input: (0-30) bytes; output:(0-31) bytes}\n\t\t#Input : |{1 byte}(number of bytes to send back (0-30))|{0-30}(variable length of bytes to recieve)|\n\t\t#Output: |{0-30 bytes}(variable amount of bytes to send back)|\n\t\tself.instr_util_bench_com = 0x03\n\n\t\t#Digital pulse for checking timing\n\t\t#{input: (0-30) bytes; output:(0) bytes}\n\t\t#Input : |{2 bytes}(digital output pin number)|{2 bytes}(number of pulses to send)|{2 bytes}(delay between pulses)|\n\t\t#Output: nothing\n\t\tself.instr_util_check_pulses = 0x04\n\t\t\n\t\t#Experiment stack operations#############################################################\n\n\t\t#Run experiment stack\n\t\t#{input:1 bytes; output:0 bytes}\n\t\t#Input : |{1 byte}(Which interupt to fire on)|\n\t\t#Output: nothing\n\t\tself.instr_exp_run = 0x10\n\n\t\t#Run experiment stack at slower speed for debugging\n\t\t#TODO: prevent overflow, verify division is correct\n\t\t#{input:1 bytes; output:0 bytes}\n\t\t#Input : |{1 byte}(time divider)|\n\t\t#Output: nothing\n\t\tself.instr_exp_run_slow = 0x11\n\t\t\n\t\t#Load operation into experiment stack (2 byte ms time counter = 65 seconds max time)\n\t\t#\n\t\t#On the card these operations are performed and deleted to save memory.\n\t\t#These experiment operations could be loaded while the experiment is running if the \n\t\t#card is setup to block communication during operations. If the exp_op stack can be supplied faster\n\t\t#than it is consumed and the data stack is consummed fast enough, this allows continous operation.\n\t\t#\n\t\t#{input:(10-30) bytes; output:1 byte}\n\t\t#Input : |{1 byte}(exp operation code)|{2 bytes}(start time in ms)|{2 bytes}(number of repetitions)|{2 bytes}(delay between repetitions in ms)|{0-9 bytes}(exp op args)|\n\t\t#Output: |{3 bytes}(bytes of free ram left {minus 64 to protect stack})|\n\t\tself.instr_exp_load_op = 0x12\n\t\t\n\t\t#Run exp operation on demand (2 byte ms time counter = 65 seconds max time)\n\t\t#{input:(10-30) bytes; output:(0-31) bytes}\n\t\t#Input : |{1 byte}(exp operation code)|{0-23 bytes}(exp op args)|\n\t\t#Output: |{0-31 bytes}(exp operation return values)|\n\t\tself.instr_exp_run_op = 0x13\n\n\t\t#Benchmark exp operation on demand, measure execution time in microseconds, excluding communication time\n\t\t#operations taking longer than 1 millisecond could cause issues with timing\n\t\t#{input:(10-30) bytes; output:2 bytes}\n\t\t#Input : |{1 byte}(exp operation code)|{0-23 bytes}(exp op args)|\n\t\t#Output: |{4 bytes}(exp operation runtime in microseconds)|\n\t\tself.instr_exp_bench_op = 0x14\n\t\t\n\t\t#Calculate checksum of experiment stack\n\t\t#{input: 0 bytes, output : 1 byte}\n\t\t#Input : nothing\n\t\t#Output: |{1 byte}(modulus checksum of all bytes in stack)|\n\t\tself.instr_exp_check = 0x15\n\n\t\t#Clear experiment stack\n\t\t#{input: 0 bytes, output : 0 bytes}\n\t\t#Input : nothing\n\t\t#Output: nothing\n\t\tself.instr_exp_clear = 0x16\n\n\t\t#Get row of experiment data off data stack\n\t\t#\n\t\t#On the card, the data block is deleted after being sent to conserve memory.\n\t\t#This data can be retrieved while the experiment is running if the \n\t\t#card is setup to block communication during operations. If the exp_op stack can be supplied faster\n\t\t#than it is consumed and the data stack is consumed faster then the card produces data, this allows continous experiment operation.\n\t\t#\n\t\t#{input: 0 bytes, output : 0 bytes}\n\t\t#Input : nothing\n\t\t#Output: |{1 byte}(stack empty indicator: 0x01->data, 0x00-> empty)|{1 byte}(exp level function code that this data came from)|{0-2 bytes}(time stamp of data)|{0-16 bytes}(data)|\n\t\tself.instr_exp_get = 0x17\n\t\n\t# #\n\t# #\n\t#################Define command constants######################################\n\t\n\n\n\n\t#################Define utility functions based on definitions above###########\n\t# #\n\t# #\n\t\n\t#Blink led variable number of times to \n\tdef util_blink_led(self,number_of_blinks,duration_of_blink):\n\t\tblock = ([self.instr_util_blink_led] + util.int_to_bytes(number_of_blinks) + util.int_to_bytes(duration_of_blink))\n\t\tself.controller.write_block(self.address,block);\n\n\t#Get the uuid of the card\n\tdef util_get_uuid(self):\n\t\treturn self.controller.read_block(self.address, self.instr_util_get_uuid)[0:8]\n\t\n\t#Get the amount of free ram in the card in bytes\n\tdef util_free_ram(self):\n\t\tmessage_block = self.controller.read_block(self.address, self.instr_util_free_ram)\n\t\tbytes = message_block[0:2]\n\t\treturn util.bytes_to_unsigned_int(bytes)\n\t\t\n\t#Send timed pulses to specific pin to test clock drift etc.\n\tdef util_check_pulses(self,pin_number, number_of_pulses,duration_of_pulse):\n\t\tblock = ([self.instr_util_check_pulses] + util.int_to_bytes(pin_number) + util.int_to_bytes(number_of_pulses) + util.int_to_bytes(duration_of_pulse))\n\t\tself.controller.write_block(self.address,block);\n\t\n\t\n\t# #\n\t# #\t\t\n\t#################Define utility functions based on definition above############\n\t\n\n\n\n\t#######Define standard experiment functions based on definitions above#########\n\t# #\n\t# #\n\n\t#Load experiment operation into card memory\n\tdef exp_load_op(self, operation):\n\t\tblock = ([self.instr_exp_load_op] + operation)\n\t\tself.controller.write_block(self.address,block)\n\n\t#Place card into experiment mode\n\t#Once in this mode, the card will wait for an interupt to start executing intructions\n\tdef exp_run(self):\n\t\tblock = [self.instr_exp_run]\n\t\tself.controller.write_block(self.address,block)\n\n\t#Requests one datapoint off the card data stack\n\tdef exp_get(self):\n\t\tmessage_block = self.controller.read_block(self.address, self.instr_exp_get)\n\t\treturn message_block[0:9]\n\n\t#Get all data from card\n\tdef exp_get_all_data(self):\n\t\tgot_all_data = False\n\t\tall_data = []\n\t\t#poll data array until empty\n\t\twhile(not got_all_data):\n\t\t\ttime.sleep(0.01)\n\t\t\tblock = self.exp_get()\n\t\t\t#0x00 indicates data array is empty\n\t\t\tif(block[0] == 0x00):\n\t\t\t\tgot_all_data = True\n\t\t\telse:\n\t\t\t\tall_data.append(block)\n\t\treturn all_data\t\t\n\t# #\n\t# #\n\t#######Define standard experiment functions based on definitions above#########\n\n\n\t\n\t\t\t\t\n","sub_path":"oldcode/i2cMaster/cards/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":8480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"329287737","text":"# -*- coding: utf-8 -*-\nfrom django.contrib import admin\n\nfrom frota.enums import Situacao\nfrom datetime import datetime\n\nfrom frota.models import Veiculo, VeiculoCombustivel, VeiculoCor, VeiculoModelo, VeiculoMarca, VeiculoTipo\nfrom frota.forms import VeiculoForm, VeiculoCombustivelForm, VeiculoCorForm, VeiculoModeloForm, VeiculoMarcaForm\nfrom frota.forms import ViaturaForm, ViagemAgendamentoForm\nfrom frota.models import Viatura, ViagemAgendamento\n\n#admin.site.register(VeiculoMarca)\n#admin.site.register(VeiculoModelo)\n#admin.site.register(VeiculoCor)\n#admin.site.register(VeiculoCombustivel)\n#admin.site.register(Veiculo)\n\n###############\n### VEICULO ###\n###############\n\nadmin.site.register(VeiculoTipo)\n\nclass VeiculoModeloAdmin(admin.ModelAdmin):\n form = VeiculoModeloForm\n\nadmin.site.register(VeiculoModelo, VeiculoModeloAdmin)\n\nclass VeiculoCorAdmin(admin.ModelAdmin):\n form = VeiculoCorForm\n\nadmin.site.register(VeiculoCor, VeiculoCorAdmin)\n\nclass VeiculoCombustivelAdmin(admin.ModelAdmin):\n form = VeiculoCombustivelForm\n\nadmin.site.register(VeiculoCombustivel, VeiculoCombustivelAdmin)\n\nclass VeiculoMarcaAdmin(admin.ModelAdmin):\n form = VeiculoMarcaForm\n\nadmin.site.register(VeiculoMarca, VeiculoMarcaAdmin)\n\nclass VeiculoAdmin(admin.ModelAdmin):\n form = VeiculoForm\n search_fields = ('modelo__nome', 'placa', 'cor__nome')\n list_display = ('editar', 'modelo', 'placa', 'lotacao', 'cor')\n fieldsets = [\n (None, {'fields': ('modelo','cor', 'ano_fabric', 'placa', 'odometro', ('chassi', 'renavam'), 'lotacao', 'consumo_medio', 'potencia', 'cilindrada',\n 'combustiveis', 'capacidade_tanque','capacidade_gnv', 'obs' \n )}),\n ]\n def editar(self, obj):\n return ''\n editar.allow_tags = True\n editar.short_description = ''\nadmin.site.register(Veiculo, VeiculoAdmin)\n\n###############\n### VIATURA ###\n###############\n\nclass ViaturaAdmin(VeiculoAdmin):\n form = ViaturaForm\n editForm = False # utilizado para controlar as apresentações dos campos que não são editáveis\n\n search_fields = ('modelo__nome', 'placa', 'cor__nome')\n list_display = ('icone_editar', 'modelo', 'placa', 'lotacao', 'get_combustiveis', 'cor', 'tipo', 'status',)\n fieldsets = [\n (None, {'fields': ('tipo', 'modelo','cor', 'ano_fabric',\n 'placa', 'lotacao', 'odometro', ('chassi', 'renavam'), 'consumo_medio', 'potencia', 'cilindrada',\n 'combustiveis', 'capacidade_tanque','capacidade_gnv', 'obs' \n )}),\n ]\n \n def queryset(self, request):\n return Viatura.objects.all() \n \n def editavel(self, request):\n url = request.META['PATH_INFO']\n info = self.model._meta.app_label, self.model._meta.module_name\n if '%s/%s/add' % info not in url:\n self.editForm = True\n else:\n self.editForm = False\n \n def get_form(self, request, obj=None, **kwargs):\n self.editavel(request)\n form = super(ViaturaAdmin,self).get_form(request, obj=None, **kwargs)\n form.base_fields['status'].widget.attrs['disabled'] = 'disabled'\n \n if self.editForm:\n form.base_fields['odometro'].widget.attrs['readonly'] = 'True'\n form.base_fields['chassi'].widget.attrs['readonly'] = 'True'\n form.base_fields['renavam'].widget.attrs['readonly'] = 'True'\n form.base_fields['capacidade_tanque'].widget.attrs['readonly'] = 'True'\n else:\n if 'disabled' in form.base_fields['odometro'].widget.attrs:\n form.base_fields['odometro'].widget.attrs.pop('readonly')\n\n return form\n \n def icone_editar(self, obj):\n return ''\n icone_editar.allow_tags = True\n icone_editar.short_description = ''\n icone_editar.attrs = {'width': '18px'}\n \nadmin.site.register(Viatura, ViaturaAdmin)\n\nclass ViagemAgendamentoAdmin(admin.ModelAdmin):\n form = ViagemAgendamentoForm\n search_fields = ['solicitante__nome']\n list_display = ['icone_editar', 'icone_validar', 'get_data_saida', 'get_data_chegada', 'solicitante']\n fieldsets = [\n (None, {'fields': ('tipo_viatura', 'solicitante', ('data_saida', 'data_chegada'), 'objetivo')}),\n ]\n \n def icone_editar(self, obj):\n if obj.status == Situacao.PENDENTE:\n return ''\n else:\n return '' % (obj.id)\n icone_editar.allow_tags = True\n icone_editar.short_description = ''\n icone_editar.attrs = {'width': '18px'}\n \n def get_data_chegada(self, obj):\n return obj.data_chegada.strftime(\"%d de %b. de %Y às %H:%M:%S\")\n get_data_chegada.allow_tags = True\n get_data_chegada.admin_order_field = 'data_chegada'\n get_data_chegada.short_description = 'Data de Chegada'\n \n def get_data_saida(self, obj):\n return obj.data_saida.strftime(\"%d de %b. de %Y às %H:%M:%S\")\n get_data_saida.allow_tags = True\n get_data_saida.admin_order_field = 'data_saida'\n get_data_saida.short_description = 'Data de Saída'\n \n def icone_validar(self, obj):\n if obj.status == Situacao.PENDENTE:\n if obj.data_saida > datetime.now():\n return '' % (obj.id)\n else:\n return ''\n elif obj.status == Situacao.DEFERIDA:\n return ''\n elif obj.status == Situacao.INDEFERIDA:\n return ''\n else:\n return ''\n icone_validar.allow_tags = True\n icone_validar.short_description = ''\n icone_validar.attrs = {'width': '18px'}\n\nadmin.site.register(ViagemAgendamento, ViagemAgendamentoAdmin)","sub_path":"stunat/frota/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"102094594","text":"#!/usr/bin/env python3\n# Take a list, say for example this one:\n# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n# and write a program that prints out all the elements of the list that are less than 5.\n\n# Extras:\n\n# Instead of printing the elements one by one, make a new list that has all the elements less than 5 from this list in it and print out this new list.\n# Write this in one line of Python.\n# Ask the user for a number and return a list that contains only elements from the original list a that are smaller than that number given by the user.\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n\n# Not as a one liner\ndef extra1():\n global a\n less_than_five_list = []\n for item in a:\n if item < 5:\n less_than_five_list.append(item)\n print(f'Extra 1 - These numbers are less than 5: {less_than_five_list}')\n\n\n# as a one liner\ndef extra2():\n print('Extra 2 - These numbers are less than 5:',[i for i in [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89] if i < 5])\n\n\ndef extra3():\n global a\n less_than_five_list = []\n number = int(input('\\nPick a number: \\n'))\n for item in a:\n if item < number:\n less_than_five_list.append(item)\n print(f'Extra 3 - These numbers are less than {number}: {less_than_five_list}')\n\n\ndef main():\n global a\n for item in a:\n if item < 5:\n print(f'{item} is less than 5')\n extra1()\n extra2()\n extra3()\n\nif __name__ == '__main__':\n main()","sub_path":"ex_3.py","file_name":"ex_3.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"334987438","text":"#usr/bin/python\n# -*- coding: UTF-8 -*-\nimport frappe\nimport xml.etree.ElementTree as ET\nimport os\nfrom datetime import datetime\nimport conf\nimport logging\n\nSYNC_DATE = conf.DATE_TO_SYNC if not conf.DEFAULT_SYN_DATE else datetime.now().strftime('%Y%m%d')\n\n\ndic_oprt = {'1':\"自营\",'3':\"联营\"}\ndic_mang = {'1':\"单品\", '2':\"金额\", '3':\"售价金额\"}\ndic_merch = {'1':\"标准\",'2':\"AOC\", '3':\"耗材\",'4':\"生鲜原材料\",'5':\"生鲜\",'6':\"服务\",'7':\"包装\"}\n# dic_tax = {'0.17':\"销项税%17-dmall\",'0.13':\"销项税%13-dmall\",'0.07':\"销项税%7-dmall\"}\n\n\ndict_store_item={\n 'store_id':'OrgNO',\n 'item_code':'MerchID',\n # 'category':'ClsCode',\n 'short_name':'SimpleName',\n # 'operation_mode':'OperationMode',\n # 'management_style':'ManagementStyle',\n # 'high_stock_days':'HighStockDays' ,\n # 'safe_stock_days':'SafeStockDays',\n 'first_purchase_date':'FirstPurchaseDate',\n 'last_purchase_date':'LastPurchaseDate',\n # 'price_top_limit':'PriceUpLimit',\n # 'price_bottom_limit':'PriceLowLimit',\n 'chg_steelyard_price':'ChgSteelyardPrice',\n 'is_steelyard_count':'IsSteelyardCount',\n 'is_steelyard_sale':'IsSteelyardSale',\n 'default_supplier':'DefaultSupOrgNO ',\n 'default_dc_org_no':'DefaultDCOrgNO',\n # 'merch_style':'MerchStyle',\n 'can_order':'CanOrder',\n 'can_change_retail_price':'CanChangeRetailPrice',\n 'can_sale':'CanSale',\n 'can_return':'CanReturn',\n 'item_status':'Status'\n}\n\n\ndef test_record(record):\n item_code = record.find('MerchID').text\n logging.info('对比门店商品: %s' % item_code)\n item = frappe.get_doc('Store Item',item_code)\n for key,value in dict_store_item.items():\n if item.get(key) != record.find(value).text:\n logging.info('不一致field: %s' % key)\n\n main_item = frappe.get_doc('Item',{'item_code':item_code})\n if main_item.get('item_group') != record.find('ClsCode').text:\n logging.info('不一致的filed: item_group')\n if item.get('operation_mode') != dic_oprt[record.find('OperationMode').text]:\n logging.info('不一致field: operation_mode')\n\n if item.get('management_style') != dic_oprt[record.find('ManagementStyle').text]:\n logging.info('不一致field: management_style')\n\n if item.get('merch_style') != dic_oprt[record.find('MerchStyle').text]:\n logging.info('不一致field: merch_style')\n\n if int(item.get('high_stock_days')) != int(record.find('HighStockDays')):\n logging.info('不一致field: height_stock_days')\n\n if int(item.get('safe_stock_days')) != int(record.find('SafeStockDays')):\n logging.info('不一致field: safe_stock_days')\n\n if float(item.get('price_top_limit')) != float(record.find('PriceUpLimit')):\n logging.info('不一致field: PriceUpLimit')\n\n if float(item.get('price_bottom_limit')) != float(record.find('PriceLowLimit')):\n logging.info('不一致field: PriceLowLimit')\n\ndef parseXML(filename ):\n '''\n 解析xml,并比较字段\n '''\n tree = ET.parse(filename)\n dataroot = tree.getroot()\n data = dataroot[0][0].findall('REC_OrgMerch')\n for rec in data:\n test_record(rec)\n\ndef main():\n '''\n 遍历数据文件夹,查找相应xml,并解析\n '''\n data_dir = '/home/ubuntu/data/%s' % SYNC_DATE\n file_count = 0\n for xml_file in os.listdir(data_dir):\n if xml_file.find('REC_OrgMerch') != -1:\n file_count += 1\n filename = os.path.join(data_dir, xml_file)\n logging.info('处理第[%s]个文件: %s' % (file_count, filename))\n parseXML(filename)\n if file_count == 0:\n logging.info('当前日期没有OrgMerch文件!')\n\nif __name__ == \"__main__\":\n try:\n frappe.connect(conf.SITE_NAME)\n logging.info('connected to local site')\n\n main()\n\n except Exception as e:\n logging.warning(e)\n finally:\n frappe.destroy()\n logging.info('frappe destroyed')","sub_path":"verify_script_test/verify_store_item.py","file_name":"verify_store_item.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"450963288","text":"from knock41 import Cabocha\n'''\n始める で ここで\n見る は を 吾輩は ものを\n'''\nif __name__ == \"__main__\":\n for chunks in Cabocha().get_sentence():\n for chunk in chunks:\n verb = \"\"\n for m in chunk.morphs:\n if m.pos == \"動詞\":\n verb = m.base\n if verb == \"\":\n continue\n \n particles = {}\n for src in chunk.srcs:\n morph = chunks[src].morphs[-1] # want only last element to check\n if morph.pos == \"助詞\": # if last element == \"助詞\" then\n particles[morph.base] = chunks[src].print_morphs()\n \n if len(verb) > 0 and len(particles) > 0:\n print(verb,\" \".join(particles.keys()),\" \".join(particles.values()))\n","sub_path":"bambi/chapter05/knock46.py","file_name":"knock46.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"366608218","text":"r=1.1\npi=3.1416\narea= pi*(r*r)\nprint(area)\n#DEVUELVE EL VALOR ABSOLUTO\n\nn=1\nd=21\ndiferencia= n-d\nprint (diferencia)\n#propinas\ntotal=44.50\npropina=10\nt= total/10\nprpo= t+total\nprint(prpo)\n\n","sub_path":"CAROLA/trabajo de CAROLA.py","file_name":"trabajo de CAROLA.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"540545792","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/django_payworld/views.py\n# Compiled at: 2012-02-15 01:03:27\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom django.views.generic.simple import direct_to_template\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django_payworld.forms import PaymentForm\nfrom django_payworld.signals import payment_notification, payment_error\nfrom django_payworld.utils import calculate_hash\n\n@csrf_exempt\ndef success(request):\n params_dict = {}\n if request.POST:\n for param in ('transaction_id', 'order_id', 'order_total', 'result_message'):\n params_dict[param] = request.POST.get(param, '')\n\n return direct_to_template(request, 'django_payworld/success.html', params_dict)\n return HttpResponse('POST Only')\n\n\n@csrf_exempt\ndef failure(request):\n params_dict = {}\n if request.POST:\n for param in ('transaction_id', 'order_id', 'order_total', 'result_message'):\n params_dict[param] = request.POST.get(param, '')\n\n return direct_to_template(request, 'django_payworld/failure.html', params_dict)\n return HttpResponse('POST Only')\n\n\n@csrf_exempt\ndef result(request):\n if request.POST:\n params_dict = {}\n for param in ('transaction_id', 'order_id', 'order_total', 'payer_email', 'seller_name',\n 'shop_id', 'hash'):\n params_dict[param] = request.POST.get(param, '')\n\n if params_dict['hash'] == calculate_hash(params_dict, settings.PAYWORLD_SECRET_CODE):\n del params_dict['hash']\n payment_notification.send(result, **params_dict)\n else:\n payment_error.send(result, **params_dict)\n return HttpResponse('OK')\n return HttpResponse('POST Only')","sub_path":"pycfiles/django_payworld-0.1-py2.6/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"355204084","text":"#!/usr/bin/python3\n\nimport matplotlib.pyplot as plt\n\nx_values = list(range(0,5001))\ny_values = [x**3 for x in x_values]\n\nplt.plot(x_values, y_values)\n\nplt.title(\"Cube of the funcion\")\nplt.xlabel(\"X Value\")\nplt.ylabel(\"Cube of X\")\n\nplt.tick_params(axis='both', which='major', labelsize=14)\n\nplt.show()\n","sub_path":"chap_15/cubes.py","file_name":"cubes.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"560586295","text":"import csv\r\nimport sys\r\nimport numpy as np\r\nimport math\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import minimum_spanning_tree\r\nimport random\r\nimport timeit\r\nimport time\r\n\r\ndef extractData(fileName):\r\n # with open(fileName,newline='', encoding='utf_8') as csvfile:\r\n with open(fileName, 'rb') as csvfile:\r\n reader = csv.reader(csvfile, delimiter=',')\r\n rows = list(reader)\r\n NumberOfExamples = len(rows)\r\n NumberOfFeatures = len(rows[0])\r\n X = np.zeros((NumberOfExamples, NumberOfFeatures),dtype = int)\r\n for i in range(0,NumberOfExamples):\r\n \tX[i,:]=rows[i]\r\n return X,NumberOfFeatures,NumberOfExamples\r\n\r\ndef initializeGraph(NumberOfFeatures):\r\n I = gencoordinates(NumberOfFeatures)\r\n maxTree= findMaximumSpanningTree(I)\r\n G = treeToGraph(maxTree)\r\n parents = dfs(G, random.randint(0,len(G)-1))\r\n # for i in range(0,len(G)):\r\n # print('<<',parents[0,i],',',i,'>>')\r\n return parents\r\n\r\ndef initializeKGraphs(K, NumberOfFeatures):\r\n GraphArray={}\r\n for k in range(0,K):\r\n # print(\"new graph\")\r\n GraphArray[k] = initializeGraph(NumberOfFeatures)\r\n return GraphArray\r\n\r\ndef initializeClusterProbabilities(K):\r\n clusterProbabalities=np.ones((1,K))\r\n count = 0\r\n for k in range(0,K):\r\n count += clusterProbabalities[0,k]\r\n for k in range(0,K):\r\n clusterProbabalities[0,k]=clusterProbabalities[0,k]/float(count)\r\n return clusterProbabalities\r\n\r\ndef initializeProbabilties(K,NumberOfFeatures,GraphArray):\r\n jointProbabilityDict= {}\r\n for k in range(0,K):\r\n jointProbabilityDict[k]={}\r\n for j in range(0,NumberOfFeatures):\r\n jointProbabilityDict[k][j]={}\r\n parent = GraphArray[k][0,j]\r\n if parent==-1:\r\n jointProbabilityDict[k][j][parent]=(random.random())\r\n else:\r\n jointProbabilityDict[k][j][parent]=(random.random(),random.random())\r\n return jointProbabilityDict\r\n\r\ndef gencoordinates(n):\r\n seen = set()\r\n visited = set()\r\n allNodes = set(range(0,n))\r\n NumberOfEdges = 0\r\n while (True):\r\n if((NumberOfEdges>=n-1)and(visited==allNodes)):\r\n break\r\n else:\r\n x, y = random.randint(0,n-1), random.randint(0,n-1)\r\n while (((x, y) in seen)or((y,x)in seen) or (x==y)):\r\n x, y = random.randint(0,n-1), random.randint(0,n-1)\r\n seen.add((x, y))\r\n # print(\"x,y=\",(x,y))\r\n NumberOfEdges = NumberOfEdges +1\r\n visited.add(x)\r\n visited.add(y)\r\n I = np.zeros((NumberOfFeatures,NumberOfFeatures))\r\n for tup in seen:\r\n I[tup[0]][tup[1]]=1\r\n return I\r\n\r\ndef findMaximumSpanningTree(InfomationMatrix):\r\n I = csr_matrix(InfomationMatrix)\r\n Tcsr = minimum_spanning_tree(I)\r\n t = Tcsr.toarray()\r\n return t\r\n\r\ndef treeToGraph(maxTree):\r\n G = {}\r\n NumberOfFeatures = len(maxTree)\r\n for i in range(0,NumberOfFeatures):\r\n G[i]=set()\r\n\r\n for i in range(0, NumberOfFeatures):\r\n index =maxTree[i].nonzero()[0]\r\n if(len(index)>0):\r\n for j in range(0,len(index)):\r\n G[i].add(index[j])\r\n G[index[j]].add(i)\r\n return G\r\n\r\ndef dfs(graph, start):\r\n NumberOfFeatures = len(graph)\r\n path = np.zeros((1,NumberOfFeatures),dtype=int)\r\n visited, stack, parent = set(), [start], []\r\n while stack:\r\n vertex = stack.pop()\r\n if vertex not in visited:\r\n visited.add(vertex)\r\n if not parent:\r\n path[0,vertex]=-1\r\n else:\r\n parentIS = parent.pop()\r\n path[0,vertex]=parentIS\r\n if((graph[parentIS].issubset(visited))==False):\r\n parent.append(parentIS)\r\n if((graph[vertex].issubset(visited))==False):\r\n parent.append(vertex)\r\n stack.extend(graph[vertex] - visited)\r\n return path\r\n\r\ndef calculateWeights(XTrain,trainingExamplesClusterTable,clusterNumber):\r\n probJointDict = {}\r\n probDict = {}\r\n NumberOfTrainingExamples = len(XTrain)\r\n NumberOfFeatures=len(XTrain[0,:])\r\n den = float(4)\r\n I = np.zeros((NumberOfFeatures,NumberOfFeatures))\r\n\r\n for i in range(0,NumberOfTrainingExamples):\r\n if(trainingExamplesClusterTable[i,clusterNumber]>1):\r\n print(\"Checking second time Prob greater than 1\")\r\n sys.exit(0)\r\n den += trainingExamplesClusterTable[i,clusterNumber]\r\n\r\n if(den<=0):\r\n print(\"Problem while calculating denominator\")\r\n print(\"den=\",den)\r\n sys.exit(0)\r\n\r\n for j in range(0,NumberOfFeatures):\r\n count_ones=2\r\n count_zeros=2\r\n for i in range(0,NumberOfTrainingExamples):\r\n if XTrain[i,j]==1:\r\n count_ones+=trainingExamplesClusterTable[i,clusterNumber]\r\n\r\n probDict[j]=count_ones/den\r\n if(probDict[j]>1):\r\n print(\"probDict[\",j,\"]=\",probDict[j])\r\n sys.exit(0)\r\n\r\n\r\n for j in range(0,NumberOfFeatures):\r\n probJointDict[j]={}\r\n # print(\"j=\",j)\r\n for k in range(j+1,NumberOfFeatures):\r\n # print(\"k=\",k)\r\n countArray = np.ones((2,2))\r\n for i in range(0,NumberOfTrainingExamples):\r\n countArray[XTrain[i,j],XTrain[i,k]] += trainingExamplesClusterTable[i,clusterNumber]\r\n\r\n a1 = countArray[0,0]/den\r\n b1 = (1-probDict[j])*(1-probDict[k])\r\n I[j][k] = I[j][k] + a1*math.log((a1/b1),2)\r\n\r\n a2 = countArray[0,1]/den\r\n b2 = (1-probDict[j])*(probDict[k])\r\n I[j][k] = I[j][k] + a2*math.log((a2/b2),2)\r\n\r\n a3 = countArray[1,0]/den\r\n b3 = (probDict[j])*(1-probDict[k])\r\n I[j][k] = I[j][k] + a3*math.log((a3/b3),2)\r\n\r\n a4 = countArray[1,1]/den\r\n b4 =(probDict[j])*(probDict[k])\r\n I[j][k] = I[j][k] + a4*math.log((a4/b4),2)\r\n\r\n if(I[j][k]<-0.00000000001):\r\n print(\"\")\r\n print(\"I[\",j,\"][\",k,\"]=\",I[j][k])\r\n print('Mutual info negative')\r\n print(\"den=\",den)\r\n print(\"\")\r\n print(\"countArray[0,0]\",countArray[0,0])\r\n print(\"countArray[0,1]\",countArray[0,1])\r\n print(\"countArray[1,0]\",countArray[1,0])\r\n print(\"countArray[1,1]\",countArray[1,1])\r\n print(\"\")\r\n print(\"prob[0,0]\",countArray[0,0]/den)\r\n print(\"prob[0,1]\",countArray[0,1]/den)\r\n print(\"prob[1,0]\",countArray[1,0]/den)\r\n print(\"prob[1,1]\",countArray[1,1]/den)\r\n print(\"\")\r\n print(\"count[\",j,\"]=\",probDict[j]*den)\r\n print(\"1-count[\",j,\"]=\",(1-probDict[j])*den)\r\n print(\"count[\",k,\"]=\",probDict[k]*den)\r\n print(\"1-count[\",k,\"]=\",(1-probDict[k])*den)\r\n print(\"\")\r\n print(\"probDict[\",j,\"]=\",probDict[j])\r\n print(\"1-probDict[\",j,\"]=\",1-probDict[j])\r\n print(\"probDict[\",k,\"]=\",probDict[k])\r\n print(\"1-probDict[\",k,\"]=\",1-probDict[k])\r\n print(\"\")\r\n print(\"countArray[0,0]+countArray[0,1]=\",countArray[0,0]+countArray[0,1])\r\n print(\"countArray[1,0]+countArray[1,1]=\",countArray[1,0]+countArray[1,1])\r\n print(\"countArray[0,0]+countArray[1,0]=\",countArray[0,0]+countArray[1,0])\r\n print(\"countArray[0,1]+countArray[1,1]=\",countArray[0,1]+countArray[1,1])\r\n print(\"\")\r\n print(\"a1=\",a1)\r\n print(\"b1=\",b1)\r\n print(\"a2=\",a2)\r\n print(\"b2=\",b2)\r\n print(\"a3=\",a3)\r\n print(\"b3=\",b3)\r\n print(\"a4=\",a4)\r\n print(\"b4=\",b4)\r\n print(\"\")\r\n sys.exit(0)\r\n I[j][k] = -1*I[j][k]\r\n\r\n probJointDict[j][k]=[[countArray[0,0]/den,countArray[0,1]/den],[countArray[1,0]/den,countArray[1,1]/den]]\r\n return I, probJointDict, probDict\r\n\r\ndef findProb(probJointDict,j,jValue,k,kValue):\r\n prob = 0\r\n try:\r\n prob = probJointDict[j][k][jValue][kValue]\r\n except:\r\n try:\r\n prob = probJointDict[k][j][kValue][jValue]\r\n except:\r\n print('Issue with count storage or accessing probJointDict')\r\n print(\"j=\",j,\" k=\",k)\r\n print(\"jValue=\",jValue,\" kValue=\",KValue)\r\n sys.exit(0)\r\n return prob\r\n\r\ndef updateJointProbability(jointProbabilityDict,NumberOfFeatures,GraphArray,k, probJointDict, probDict):\r\n jointProbabilityDict[k]={}\r\n for j in range(0,NumberOfFeatures):\r\n jointProbabilityDict[k][j]={}\r\n parent = GraphArray[k][0,j]\r\n if parent==-1:\r\n jointProbabilityDict[k][j][parent]=probDict[j]\r\n else:\r\n a= findProb(probJointDict,j,1,parent,1)/probDict[parent]\r\n b = findProb(probJointDict,j,1,parent,0)/(1-probDict[parent])\r\n jointProbabilityDict[k][j][parent]=(a,b)\r\n return jointProbabilityDict\r\n\r\n\r\ndef completingDataWithParameters(XTrain,K,jointProbabilityDict,clusterProbabalities,GraphArray):\r\n NumberOfFeatures=len(XTest[0,:])\r\n NumberOfTrainingExamples = len(XTrain)\r\n averageProbabilityOfDataSet=0\r\n trainingExamplesClusterTable = np.ones((NumberOfTrainingExamples,K))\r\n\r\n for i in range(0,NumberOfTrainingExamples):\r\n for k in range(0,K):\r\n trainingExamplesClusterTable[i,k]=clusterProbabalities[0,k]\r\n for j in range(0,NumberOfFeatures):\r\n parent = GraphArray[k][0,j]\r\n if parent == -1:\r\n if XTrain[i,j]==1:\r\n trainingExamplesClusterTable[i,k]=trainingExamplesClusterTable[i,k]*(jointProbabilityDict[k][j][parent])\r\n else:\r\n trainingExamplesClusterTable[i,k]=trainingExamplesClusterTable[i,k]*(1-jointProbabilityDict[k][j][parent])\r\n else:\r\n if(XTrain[i,j]==1 and XTrain[i,parent]==1):\r\n trainingExamplesClusterTable[i,k]=trainingExamplesClusterTable[i,k]*(jointProbabilityDict[k][j][parent][0])\r\n elif(XTrain[i,j]==0 and XTrain[i,parent]==1):\r\n trainingExamplesClusterTable[i,k]=trainingExamplesClusterTable[i,k]*(1-jointProbabilityDict[k][j][parent][0])\r\n elif(XTrain[i,j]==1 and XTrain[i,parent]==0):\r\n trainingExamplesClusterTable[i,k]=trainingExamplesClusterTable[i,k]*(jointProbabilityDict[k][j][parent][1])\r\n else:\r\n trainingExamplesClusterTable[i,k]=trainingExamplesClusterTable[i,k]*(1-jointProbabilityDict[k][j][parent][1])\r\n if(trainingExamplesClusterTable[i,k]>1):\r\n print(\"trainingExamplesClusterTable[\",i,\",\",k,\"]=\",trainingExamplesClusterTable[i,k])\r\n sys.exit(0)\r\n count =0\r\n for k in range(0,K):\r\n count +=trainingExamplesClusterTable[i,k]\r\n for k in range(0,K):\r\n trainingExamplesClusterTable[i,k]=trainingExamplesClusterTable[i,k]/float(count)\r\n if(trainingExamplesClusterTable[i,k]>1):\r\n print(\"Prob greater than 1\")\r\n print(\"count = \",count)\r\n print(\"trainingExamplesClusterTable[\",i,\",\",k,\"]=\",trainingExamplesClusterTable[i,k])\r\n sys.exit(0)\r\n return trainingExamplesClusterTable\r\n\r\ndef computingParametersWithCompletedData(XTrain, trainingExamplesClusterTable,K):\r\n # print(\"Old clusterProbabalities =\",clusterProbabalities)\r\n NumberOfFeatures=len(XTest[0,:])\r\n NumberOfTrainingExamples = len(XTrain)\r\n newClusterProbabalities=np.zeros((1,K))\r\n for k in range(0,K):\r\n newClusterProbabalities[0,k]=0\r\n for i in range(0,NumberOfTrainingExamples):\r\n newClusterProbabalities[0,k]+=trainingExamplesClusterTable[i,k]\r\n newClusterProbabalities[0,k]= newClusterProbabalities[0,k]/float(NumberOfTrainingExamples)\r\n jointProbabilityDict= {}\r\n GraphArray={}\r\n for k in range(0,K):\r\n # print(\"k=\",k)\r\n InfomationMatrix, probJointDict, probDict = calculateWeights(XTrain,trainingExamplesClusterTable,k)\r\n maxTree= findMaximumSpanningTree(InfomationMatrix)\r\n G = treeToGraph(maxTree)\r\n parents = dfs(G, random.randint(0,len(G)-1))\r\n GraphArray[k]=parents\r\n jointProbabilityDict = updateJointProbability(jointProbabilityDict,NumberOfFeatures,GraphArray,k, probJointDict, probDict)\r\n return newClusterProbabalities, GraphArray, jointProbabilityDict\r\n\r\ndef PredictProbabalitiesTestData(XTest,newClusterProbabalities, GraphArray, jointProbabilityDict):\r\n NumberOfTestingExamples = len(XTest)\r\n NumberOfFeatures=len(XTest[0,:])\r\n averageProbabilityOfDataSet = 0\r\n for i in range(0,NumberOfTestingExamples):\r\n probOfTestExample = 0\r\n for k in range(0,K):\r\n probOfTestExampleForK = 1\r\n for j in range(0,NumberOfFeatures):\r\n parent = GraphArray[k][0,j]\r\n if parent == -1:\r\n if XTrain[i,j]==1:\r\n probOfTestExampleForK=probOfTestExampleForK*(jointProbabilityDict[k][j][parent])\r\n else:\r\n probOfTestExampleForK=probOfTestExampleForK*(1-jointProbabilityDict[k][j][parent])\r\n else:\r\n if(XTrain[i,j]==1 and XTrain[i,parent]==1):\r\n probOfTestExampleForK=probOfTestExampleForK*(jointProbabilityDict[k][j][parent][0])\r\n elif(XTrain[i,j]==0 and XTrain[i,parent]==1):\r\n probOfTestExampleForK=probOfTestExampleForK*(1-jointProbabilityDict[k][j][parent][0])\r\n elif(XTrain[i,j]==1 and XTrain[i,parent]==0):\r\n probOfTestExampleForK=probOfTestExampleForK*(jointProbabilityDict[k][j][parent][1])\r\n else:\r\n probOfTestExampleForK=probOfTestExampleForK*(1-jointProbabilityDict[k][j][parent][1])\r\n probOfTestExample += newClusterProbabalities[0,k]*probOfTestExampleForK\r\n averageProbabilityOfDataSet+= math.log(probOfTestExample,2)\r\n averageProbabilityOfDataSet= averageProbabilityOfDataSet/float(NumberOfTestingExamples)\r\n print(\"Log Likelihood=\",averageProbabilityOfDataSet)\r\n print(\"\")\r\n return averageProbabilityOfDataSet\r\n\r\ntrainingFileName = sys.argv[1]\r\ntestingFileName= sys.argv[2]\r\nK = int(sys.argv[3])\r\nmaxIterations=int(sys.argv[4])\r\nmaxNumberOfRuns = int(sys.argv[5])\r\n\r\nprint(trainingFileName)\r\nprint(testingFileName)\r\nprint(\"K=\",K)\r\nprint(\"maxIterations=\",maxIterations)\r\nprint(\"maxNumberOfRuns=\",maxNumberOfRuns)\r\nprint(\"\")\r\n\r\nXTrain,NumberOfFeatures,NumberOfTrainingExamples = extractData(trainingFileName)\r\nXTest,n,NumberOfTestingExamples = extractData(testingFileName)\r\n\r\nNumberOfRuns=0\r\n# start = timeit.default_timer()\r\nLikelihood=np.zeros((1,maxNumberOfRuns))\r\n\r\nwhile(NumberOfRuns0.001):\r\n change = 1\r\n clusterProbabalities = newClusterProbabalities\r\n else:\r\n change = 1\r\n clusterProbabalities = newClusterProbabalities\r\n i+=1\r\n print(\"Converged at IterationNumber=\",i-1)\r\n Likelihood[0,NumberOfRuns]= PredictProbabalitiesTestData(XTest,newClusterProbabalities, GraphArray, jointProbabilityDict)\r\n # stop = timeit.default_timer()\r\n # print (\"Time taken = \",stop - start)\r\n NumberOfRuns= NumberOfRuns+1\r\nprint(\"\")\r\nprint(\"Mean=\",np.mean(Likelihood))\r\nprint(\"Standard Dev=\",np.std(Likelihood))\r\n","sub_path":"part3.py","file_name":"part3.py","file_ext":"py","file_size_in_byte":16564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"72012575","text":"from tkinter import *\nimport numpy\nimport scipy\nimport math\nt=Tk()\nt.title('最短路径')\nt.geometry('1600x900')\nLabel(t,text='点的个数').place(x=5,y=5)#输入点的个数\nx1=''\nglobal e1\ne1=Entry(t,width=10)\ne1.place(x=60,y=5)\nLabel(t,text=' 到 的距离:').place(x=22,y=40)\ndis=Entry(t,width=5)\ndis.place(x=185,y=40)\nstart=Entry(t,width=5)\nstart.place(x=5,y=40)\nend=Entry(t,width=5)\nend.place(x=75,y=40)\na=0#变量a用来判断按钮回调函数是否会被触发\ndef get():#储存按钮的回调函数\n global a\n if a!=0:#a不等于0时储存函数被触发\n if start.get().isdigit()==True and end.get().isdigit()==True and dis.get().isdigit()==True:\n a=2\n line=int(start.get())#起点\n column=int(end.get())#终点\n distance=int(dis.get())#距离\n pi=math.pi\n if column!=line and 00:#a等于0并且输入的东西符合要求时,生成函数被触发\n a=1\n global x1\n x1=int(e1.get())\n global matrix#把一些变量全局化来进行调用\n global trans\n global c\n c=Canvas(t,width=1000,height=750,bg='white')\n c.place(x=5,y=70)\n c.create_oval(200,75,800,675)#在一个圆周上显示出这些点\n matrix=numpy.zeros((x1,x1))#把最短距离矩阵初始化\n trans=numpy.zeros((x1,x1))#把最短路径中间点初始化\n for i in range(x1):\n pi=math.pi\n c.create_oval(500+300*math.cos(2*pi*i/x1),375+300*math.sin(2*pi*i/x1),504+300*math.cos(2*pi*i/x1),379+300*math.sin(2*pi*i/x1),fill='black')\n #在大圆上生成一系列的点\n c.create_text(500+310*math.cos(2*pi*i/x1),375+310*math.sin(2*pi*i/x1),text=int(i+1))#在每个点边上生成数字\nButton(t,text='存储',command=get).place(x=230,y=35)\nButton(t,text='生成',command=establish).place(x=150,y=0)\ndef cal():#计算按钮的回调函数,采用弗洛伊德算法\n global a\n if a==2:#a=2时触发计算函数\n a=3\n for k in range(x1):\n for i in range(x1):\n for j in range(x1):\n if matrix[i][k]!=0 and matrix[k][j]!=0:#判断两点之间是否有距离\n if matrix[i][k]+matrix[k][j]0 and getend.get().isdigit()==True and int(getend.get())>0:\n a=4\n gets=int(getstart.get())\n gete=int(getend.get())\n T.insert('insert','点')\n T.insert('insert',gets)\n T.insert('insert','到')\n T.insert('insert',gete)\n T.insert('insert','的最短路径为:')\n if gete==gets:#起点和终点一样时\n T.insert('insert','你输入了两个一样的点!')\n T.insert('insert','\\n')\n elif matrix[gets-1][gete-1]==0:#两点之间距离为0,即无法连接时\n T.insert('insert','这两个点之间不可通行哦!')\n T.insert('insert','\\n')\n else:\n k=int(trans[gets-1][gete-1])\n T.insert('insert',gets)\n while k!=gete:#当中间点不是终点时,要继续循环\n T.insert('insert','--->')\n T.insert('insert',k)\n k=int(trans[k-1][gete-1])\n T.insert('insert','--->')\n T.insert('insert',k)\n T.insert('insert',' 距离为')\n T.insert('insert',matrix[gets-1][gete-1])\n T.insert('insert','\\n')\nButton(t,text='查看',command=look).place(x=1270,y=105)\ndef clear():#定义清除按钮的回调函数\n global a\n matrix=numpy.zeros((1,1))\n trans=numpy.zeros((1,1))\n T.delete(1.0,END)\n c=Canvas(t,width=1000,height=750,bg='white')\n c.place(x=5,y=70)\n c.create_oval(200,75,800,675)\n dis.delete(0,END)\n start.delete(0,END)\n end.delete(0,END)\n getstart.delete(0,END)\n getend.delete(0,END)\n e1.delete(0,END)\n a=0#将a变成0 这步很重要\nButton(t,text='清空',command=clear).place(x=1150,y=770)\ndef guide():#定义规则按钮的回调函数\n M=Tk()\n M.title('规则')\n M.geometry('600x400')\n S=Text(M,width=80,height=30)\n S.place(x=0,y=0)\n S.insert('insert','欢迎来到最短路径计算小程序!以下是程序使用的注意事项,请认真阅读,按照规则进行!祝大家使用愉快!')\n S.insert('insert','\\n')\n S.insert('insert','1.首先你需要输入一个数字,代表点的个数,然后点击【生成】按钮,会生成一个圆周,上面会生成均匀分布的点。')\n S.insert('insert','\\n')\n S.insert('insert','2.现在你要输入有距离的两点和这两点之间的距离,然后点击【储存】按钮,数据会被储存,同时两点之间会连一条红线,红线上标有距离。')\n S.insert('insert','\\n')\n S.insert('insert','3.两点之间距离没有方向性(即A到B的距离与B到A的距离相等)')\n S.insert('insert','\\n')\n S.insert('insert','4.注意:输入的点为正整数,标号在圆周上会显示,距离为浮点数。')\n S.insert('insert','\\n')\n S.insert('insert','5.若不小心输错了距离,可以更改,只需重新输入起点,终点和新的距离即可,红线上的距离会跟着更改。')\n S.insert('insert','\\n')\n S.insert('insert','6.系统一开始默认两点之间距离为0(这两点无法连接),你不需要输入这样的两点,修改时若想取消两点之间的连线,距离那一处输入0,相应的红线和距离会被消除。')\n S.insert('insert','\\n')\n S.insert('insert','7.当你把所有的距离信息输入完成并确认无误之后,点击【计算】按钮,程序将会计算出任意两点之间的最短距离和路径')\n S.insert('insert','\\n')\n S.insert('insert','8.最短距离优先级如下:当两点之间有两条路径最短时,经过的点标号较小的将被输出。')\n S.insert('insert','\\n')\n S.insert('insert','9.计算完成后,在右边的屏幕中,输入你想知道的两点之间的距离,点击【查看】按钮,文本框中就会出现相应结果。')\n S.insert('insert','\\n')\n S.insert('insert','10.当你获得了你所需要的一系列最短距离之后,可以点击【清零】按钮,每个文本框都会被清空,所有距离和点的信息也会被清除。')\n S.insert('insert','\\n')\n S.insert('insert','11.请从第一条开始阅读!【手动狗头】')\nButton(t,text='规则',command=guide).place(x=350,y=0)\nt.mainloop()\n","sub_path":"dazuoye.py","file_name":"dazuoye.py","file_ext":"py","file_size_in_byte":9461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"409964953","text":"import os\nfrom predict import TRANSF\nimport glob\nimport numpy as np\nimport torch\nimport json\nfrom flask import Flask, flash, request, redirect, url_for, send_from_directory, render_template\nfrom werkzeug.utils import secure_filename\n\nUPLOAD_FOLDER = 'static'\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n\n file = request.files['file']\n\n # if user does not select file, browser also\n # submit an empty part without filename\n\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n \n full_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n \n file.save(full_path)\n\n return redirect(url_for('predict',path=filename))\n\n return '''\n \n \n \n \n Flower-Torch\n \n \n

Flower-Torch

\n \n
\n Esse é o Flower-Torch, um modelo de inteligência artificial criado com PyTorch para identificar até 5 espécies diferentes de flores.

O modelo consegui distinguir entre \n Margaridas, Dentes-de-leão, Rosas, Girassois e Tulipas.

Mande uma foto usando o formulário abaixo.\n
\n

Upload do Arquivo

\n
\n \n \n
\n \n '''\n\n@app.route('/predict/')\ndef predict(path):\n\n image_tensor = TRANSF.transform_image('static/'+path)\n model = torch.load(glob.glob('TrainModel/models/*')[0],map_location=torch.device('cpu'))\n \n outputs = model(image_tensor)\n _, predicted = torch.max(outputs.data, 1)\n \n classes = ['Margarida', 'Dente-de-leão', 'Rosa', 'Girassol', 'Tulipas']\n\n result = str(classes[predicted])\n\n # return json.dumps(dict(result=result))\n\n pag =f'''\n \n \n Result\n \n \n \n
\n \n
\n
\n

{result}

\n
\n \n \n '''\n\n return pag\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=\"5000\")","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"178949928","text":"import logging\nimport logging.handlers\nimport sys\n\nclass Logger:\n\n def __init__(self, f, console=False, maxBytes=100000000, backupCount=1000):\n self.f = f\n self.logger = None\n self.setup(console, maxBytes, backupCount)\n\n def setup(self, console, maxBytes, backupCount):\n try:\n # setup the logger\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.INFO)\n handler = logging.handlers.RotatingFileHandler(\n self.f, maxBytes=maxBytes, backupCount=backupCount)\n formatter = logging.Formatter(\"%(asctime)s-%(levelname)s - %(message)s\")\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n if console: self.logger.addHandler(logging.StreamHandler(sys.stdout))\n except Exception as err:\n raise LoggerException(err)\n\nclass LoggerException(Exception):\n pass\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"119259521","text":"# -*- coding: utf-8 -*-\n\"\"\"\n pip_services3_rpc.client.RestClient\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n REST client implementation\n \n :copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nimport requests\n\nfrom pip_services3_commons.config import ConfigParams, IConfigurable\nfrom pip_services3_commons.run import IOpenable, IClosable\nfrom pip_services3_commons.refer import IReferenceable\nfrom pip_services3_components.connect import ConnectionParams, ConnectionResolver\nfrom pip_services3_components.log import CompositeLogger\nfrom pip_services3_components.count import CompositeCounters\nfrom pip_services3_commons.errors import ConfigException, UnknownException, InvocationException\nfrom pip_services3_commons.errors import ErrorDescription, ApplicationExceptionFactory\nfrom pip_services3_commons.data import IdGenerator\nfrom ..connect.HttpConnectionResolver import HttpConnectionResolver\n\n\nclass RestClient(IOpenable, IConfigurable, IReferenceable):\n \"\"\"\n Abstract client that calls remove endpoints using HTTP/REST protocol.\n\n ### Configuration parameters ###\n - base_route: base route for remote URI\n - connection(s):\n - discovery_key: (optional) a key to retrieve the connection from IDiscovery\n - protocol: connection protocol: http or https\n - host: host name or IP address\n - port: port number\n - uri: resource URI or connection string with all parameters in it\n - options:\n - retries: number of retries (default: 3)\n - connect_timeout: connection timeout in milliseconds (default: 10 sec)\n - timeout: invocation timeout in milliseconds (default: 10 sec)\n\n ### References ###\n\n - *:logger:*:*:1.0 (optional) ILogger components to pass log messages\n - *:counters:*:*:1.0 (optional) ICounters components to pass collected measurements\n - *:discovery:*:*:1.0 (optional) IDiscovery services to resolve connection\n\n Example:\n class MyRestClient(RestClient, IMyClient):\n def get_data(self, correlation_id, id):\n timing = self.instrument(correlationId, 'myclient.get_data')\n result = self._controller.get_data(correlationId, id)\n timing.end_timing()\n return result\n\n ...\n\n client = MyRestClient()\n client.configure(ConfigParams.fromTuples(\"connection.protocol\", \"http\",\n \"connection.host\", \"localhost\",\n \"connection.port\", 8080))\n\n data = client.getData(\"123\", \"1\")\n ...\n \"\"\"\n _default_config = None\n\n _client = None\n _uri = None\n _timeout = 1000\n _connection_resolver = None\n _logger = None\n _counters = None\n _options = None\n _base_route = None\n _retries = 1\n _headers = None\n _connect_timeout = 1000\n\n def __init__(self):\n \"\"\"\n Creates a new instance of the client.\n \"\"\"\n self._connection_resolver = HttpConnectionResolver()\n self._default_config = ConfigParams.from_tuples(\n \"connection.protocol\", \"http\",\n \"connection.host\", \"0.0.0.0\",\n \"connection.port\", 3000,\n\n \"options.timeout\", 10000,\n \"options.request_max_size\", 1024 * 1024,\n \"options.connect_timeout\", 10000,\n \"options.retries\", 3,\n \"options.debug\", True\n )\n self._logger = CompositeLogger()\n self._counters = CompositeCounters()\n self._options = ConfigParams()\n self._headers = {}\n\n def set_references(self, references):\n \"\"\"\n Sets references to dependent components.\n\n :param references: references to locate the component dependencies.\n \"\"\"\n self._logger.set_references(references)\n self._counters.set_references(references)\n self._connection_resolver.set_references(references)\n\n def configure(self, config):\n \"\"\"\n Configures component by passing configuration parameters.\n\n :param config: configuration parameters to be set.\n \"\"\"\n config = config.set_defaults(self._default_config)\n self._connection_resolver.configure(config)\n\n self._options.override(config.get_section(\"options\"))\n self._retries = config.get_as_integer_with_default(\"options.retries\", self._retries)\n self._connect_timeout = config.get_as_integer_with_default(\"options.connect_timeout\", self._connect_timeout)\n self._timeout = config.get_as_integer_with_default(\"options.timeout\", self._timeout)\n self._base_route = config.get_as_string_with_default(\"base_route\", self._base_route)\n\n def _instrument(self, correlation_id, name):\n \"\"\"\n Adds instrumentation to log calls and measure call time. It returns a Timing object that is used to end the time measurement.\n\n :param correlation_id: (optional) transaction id to trace execution through call chain.\n :param name: a method name.\n :return: Timing object to end the time measurement.\n \"\"\"\n TYPE_NAME = self.__class__.__name__ or 'unknown-target'\n self._logger.trace(correlation_id, f\"Calling {name} method {TYPE_NAME}\")\n self._counters.increment_one(f\"{TYPE_NAME}.{name}.call_count\")\n return self._counters.begin_timing(f\"{TYPE_NAME}.{name}.call_count\")\n\n def _instrument_error(self, correlation_id, name, err, result=None, callback=None):\n \"\"\"\n Adds instrumentation to error handling.\n\n :param correlation_id: (optional) transaction id to trace execution through call chain.\n :param name: a method name.\n :param err: an occured error\n :param result: (optional) an execution result\n :param callback: (optional) an execution callback\n \"\"\"\n if err is not None:\n TYPE_NAME = self.__class__.__name__ or 'unknown-target'\n self._logger.error(correlation_id, err, f\"Failed to call {name} method of {TYPE_NAME}\")\n self._counters.increment_one(f\"{name}.call_errors\")\n if callback:\n callback(err, result)\n\n def is_opened(self):\n \"\"\"\n Checks if the component is opened.\n\n :return: true if the component has been opened and false otherwise.\n \"\"\"\n return self._client is not None\n\n def open(self, correlation_id):\n \"\"\"\n Opens the component.\n\n :param correlation_id: (optional) transaction id to trace execution through call chain.\n \"\"\"\n if self.is_opened():\n return\n\n connection = self._connection_resolver.resolve(correlation_id)\n\n self._uri = connection.get_uri()\n\n self._client = requests\n\n self._logger.debug(correlation_id, \"Connected via REST to \" + self._uri)\n\n def close(self, correlation_id):\n \"\"\"\n Closes component and frees used resources.\n\n :param correlation_id: (optional) transaction id to trace execution through call chain.\n \"\"\"\n if self._client is not None:\n self._logger.debug(correlation_id, \"Disconnected from \" + self._uri)\n\n self._client = None\n self._uri = None\n\n def _to_json(self, obj):\n if obj is None:\n return None\n\n if isinstance(obj, set):\n obj = list(obj)\n if isinstance(obj, list):\n result = []\n for item in obj:\n item = self._to_json(item)\n result.append(item)\n return result\n\n if isinstance(obj, dict):\n result = {}\n for (k, v) in obj.items():\n v = self._to_json(v)\n result[k] = v\n return result\n\n if hasattr(obj, 'to_json'):\n return obj.to_json()\n if hasattr(obj, '__dict__'):\n return self._to_json(obj.__dict__)\n return obj\n\n def fix_route(self, route) -> str:\n if route is not None and len(route) > 0:\n if route[0] != '/':\n route = f'/{route}'\n return route\n\n return ''\n\n def createRequestRoute(self, route):\n builder = ''\n if self._uri is not None and len(self._uri) > 0:\n builder = self._uri\n\n builder += self.fix_route(self._base_route)\n\n if route[0] != '/':\n builder += '/'\n builder += route\n\n return builder\n\n def add_correlation_id(self, correlation_id=None, params=None):\n params = params or {}\n if not (correlation_id is None):\n params['correlation_id'] = correlation_id\n\n return params\n\n def add_filter_params(self, params=None, filters=None):\n params = params or {}\n if not (filters is None):\n params.update(filters)\n\n return params\n\n def add_paging_params(self, params=None, paging=None):\n params = params or {}\n if not (paging is None):\n if not (paging['total'] is None):\n params['total'] = paging['total']\n if not (paging['skip'] is None):\n params['skip'] = paging['skip']\n if not (paging['take'] is None):\n params['take'] = paging['take']\n # params.update(paging)\n\n return params\n\n def call(self, method, route, correlation_id=None, params=None, data=None):\n \"\"\"\n Calls a remote method via HTTP/REST protocol.\n\n :param method: HTTP method: \"get\", \"head\", \"post\", \"put\", \"delete\"\n\n :param route: a command route. Base route will be added to this route\n\n :param correlation_id: (optional) transaction id to trace execution through call chain.\n\n :param params: (optional) query parameters.\n\n :param data: (optional) body object.\n\n :return: result object\n \"\"\"\n method = method.upper()\n\n route = self.createRequestRoute(route)\n params = self.add_correlation_id(correlation_id=correlation_id, params=params)\n response = None\n result = None\n\n try:\n # Call the service\n data = self._to_json(data)\n response = requests.request(method, route, params=params, json=data, timeout=self._timeout)\n\n except Exception as ex:\n error = InvocationException(correlation_id, 'REST_ERROR', 'REST operation failed: ' + str(ex)).wrap(ex)\n raise error\n\n if response.status_code == 404 or response.status_code == 204:\n return None\n\n try:\n # Retrieve JSON data\n result = response.json()\n except:\n # Data is not in JSON\n if response.status_code < 400:\n raise UnknownException(correlation_id, 'FORMAT_ERROR',\n 'Failed to deserialize JSON data: ' + response.text) \\\n .with_details('response', response.text)\n else:\n raise UnknownException(correlation_id, 'UNKNOWN', 'Unknown error occured: ' + response.text) \\\n .with_details('response', response.text)\n\n # Return result\n if response.status_code < 400:\n return result\n\n # Raise error\n # Todo: We need to implement proper from_value method\n error = ErrorDescription.from_json(result)\n error.status = response.status_code\n\n raise ApplicationExceptionFactory.create(error)\n","sub_path":"pip_services3_rpc/clients/RestClient.py","file_name":"RestClient.py","file_ext":"py","file_size_in_byte":11618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"461670794","text":"# Copyright (c) 2018 Bruce Chou\n#\n# Licensed under the MIT License;\n# you may not use this file except in compliance with the License.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# Adapted from the original implementation by Bruce Chou.\n# Source: https://github.com/brucechou1983/CheXNet-Keras\n\nimport numpy as np\nimport os\nimport pandas as pd\n\n\ndef get_sample_counts(output_dir, dataset, class_names):\n \"\"\"\n Get total and class-wise positive sample count of a dataset\n\n Arguments:\n output_dir - str, folder of dataset.csv\n dataset - str, train|dev|test\n class_names - list of str, target classes\n\n Returns:\n total_count - int\n class_positive_counts - dict of int, ex: {\"Effusion\": 300, \"Infiltration\": 500 ...}\n \"\"\"\n df = pd.read_csv(os.path.join(output_dir, dataset + \".csv\"))\n labels = df[class_names].as_matrix()\n total_count = labels.shape[0]\n positive_counts = np.sum(labels, axis=0)\n class_positive_counts = dict(zip(class_names, positive_counts))\n return total_count, class_positive_counts\n\ndef get_class_names(output_dir, dataset):\n df = pd.read_csv(os.path.join(output_dir, dataset + \".csv\"))\n return list(df.columns[1:]) #Remove \"Path\" from names\n","sub_path":"classifier_real/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"570297321","text":"import os\r\nimport xml.etree.ElementTree as ET\r\nimport csv\r\nimport pickle\r\nfrom PIL import Image\r\n\r\nimg_dir = \"D:\\\\PESU\\\\Sem_V\\\\AI\\\\Hackathon\\\\data\\\\VOCdevkit\\\\VOC2010\\\\JPEGImages\"\r\nannotation_dir = \"D:\\\\PESU\\\\Sem_V\\\\AI\\\\Hackathon\\\\data\\\\VOCdevkit\\\\VOC2010\\\\Annotations\"\r\npadded_images_dir = \"D:\\\\PESU\\\\Sem_V\\\\AI\\\\Hackathon\\\\PaddedImages\"\r\n\r\nwith open(\"D:\\\\PESU\\\\Sem_V\\\\AI\\\\Hackathon\\\\image_names.pickle\", 'rb') as f:\r\n train = pickle.load(f)\r\n\r\nfor i in range(len(train)):\r\n train[i] = train[i].encode('ascii')\r\n\r\nall_img = os.listdir(img_dir)\r\n\r\nfor i in all_img:\r\n if(i[:-4] in train):\r\n #count += 1\r\n image = os.path.join(img_dir, i)\r\n img = Image.open(image)\r\n small = img.resize((int(img.size[0]*0.25) ,int(img.size[1]*0.25))) \r\n \r\n new_img = Image.new(mode = \"RGB\", size=(128,128))\r\n new_img.paste(small, (0, 0))\r\n new_img.save(os.path.join(padded_images_dir, i))\r\n","sub_path":"Hackathon/localization_preprocess.py","file_name":"localization_preprocess.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"236588952","text":"# -*- coding: utf-8 -*-\n\"\"\"chapter6.ipynb\n\n# 第6章のソースコード\n\n## リスト6.1: 量子回路の作成\n\"\"\"\n\nfrom qiskit import QuantumCircuit\nfrom qiskit import ClassicalRegister, QuantumRegister\n\n# 量子回路の初期化\nqr = QuantumRegister(2, 'q') # 量子レジスタを作成\ncr = ClassicalRegister(2, 'c') # 古典レジスタを作成\ncircuit = QuantumCircuit(qr, cr) # レジスタを使い量子回路を初期化\n\n# 量子回路の組み立て\ncircuit.h(qr[0]) # アダマール行列を適用\ncircuit.cx(qr[0], qr[1]) # CNOTを適用\n\n# 測定\ncircuit.measure(qr, cr)\n\n\"\"\"## リスト6.2: 実行と結果取得(実行する毎に結果は変化します)\"\"\"\n\nfrom qiskit import BasicAer, execute\n\n# 実行と結果取得\nbackend = BasicAer.get_backend('qasm_simulator') # デバイス指定 \njob = execute(circuit, backend) # 量子プログラムを実行\nresult = job.result() # 結果を取得\nprint(result.get_counts(circuit)) # 結果をテキスト表示\n\n\"\"\"## リスト6.4: ヒストグラム表示(実行する毎に結果は変化します)\"\"\"\n\nfrom qiskit.tools.visualization import plot_histogram\n\n# ヒストグラム表示\nplot_histogram(job.result().get_counts(circuit))\n\n\"\"\"## リスト6.5: 量子回路を描画\"\"\"\n\nfrom qiskit.tools.visualization import circuit_drawer\n\n# 量子回路を描画\ncircuit_drawer(circuit)","sub_path":"20190414_quantum_computer/python/chapter6.py","file_name":"chapter6.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"586222033","text":"import subprocess\nimport threading\nimport constants\nimport datetime\nimport random\nimport time\nimport glob\nimport sys\nimport re\n\nimport constants\nimport get_command\n\ntimeout = 1800 # 30 minutes\nbegin = ''\ncurrent_command = []\nn_threads = 0\nn_max_threads = 16\nsurynek = False\nall_commands = []\n\ntimed_out_filename = constants.timed_out_commands_file\nexecuted_filename = constants.executed_commands_file\nsegfault_filename = constants.segfault_commands_file\n\ntimed_out_file = open(timed_out_filename, 'w+');\ntimed_out_file.close()\n\nexecuted_file = open(executed_filename, 'w+')\nexecuted_file.close()\n\nsegfault_file = open(segfault_filename, 'w+')\nsegfault_file.close()\n\nfor i in range(len(sys.argv)):\n if sys.argv[i] == '-b':\n begin = sys.argv[i + 1]\n continue\n elif sys.argv[i] == '-t':\n timeout = int(sys.argv[i + 1])\n continue\n elif sys.argv[i] == '-s':\n surynek = True\n print (\"Solving using Surynek's solver\")\n\n\ndef run_in_thread(thread_command):\n global n_threads\n global file\n global timed_out_filename\n global executed_filename\n n_threads += 1\n print(str(datetime.datetime.now()))\n print(thread_command)\n\n executed_file = open(executed_filename, 'a')\n executed_file.write(str(datetime.datetime.now()) + ' : ' \\\n + thread_command[2] + ' ' + thread_command[10] + '\\n')\n executed_file.close()\n\n try:\n process = subprocess.run(args=thread_command, timeout=timeout * 1.2)\n\n if process.returncode == -6:\n segfault_file = open(segfault_filename, 'a')\n segfault_file.write(str(datetime.datetime.now()) + ' : ' \\\n + thread_command[2] + ' ' + thread_command[10] + '\\n')\n segfault_file.close()\n\n except subprocess.TimeoutExpired:\n timed_out_file = open(timed_out_filename, 'a')\n timed_out_file.write(str(datetime.datetime.now()) + ' : ' \\\n + thread_command[2] + ' ' + thread_command[10] + '\\n')\n timed_out_file.close()\n\n n_threads -= 1\n return\n\n\ndef run_in_thread_s(thread_command):\n global n_threads\n global file\n global timed_out_filename\n global executed_filename\n n_threads += 1\n print(str(datetime.datetime.now()))\n print(thread_command)\n\n commands_file = open(executed_filename, 'a')\n commands_file.write(str(datetime.datetime.now()) + ' : ' + thread_command[1] + ' ' + thread_command[3] + '\\n')\n commands_file.close()\n\n try:\n process = subprocess.run(args=thread_command, timeout=timeout * 1.2)\n\n if process.returncode == -6:\n segfault_file = open(segfault_filename, 'a')\n segfault_file.write(str(datetime.datetime.now()) + ' : ' + thread_command[1] + ' ' + thread_command[3] + '\\n')\n segfault_file.close()\n n_threads -= 1\n return\n\n except subprocess.TimeoutExpired:\n timed_out_commands_file = open(timed_out_filename, 'a')\n timed_out_commands_file.write(str(datetime.datetime.now()) + ' : ' + thread_command[1] + ' ' + thread_command[3] + '\\n')\n timed_out_commands_file.close()\n n_threads -= 1\n return \n \n output_filename = re.sub('--output-file=', '', thread_command[2])\n solved = glob.glob(constants.sury_solutions_dir + '/' + output_filename)\n if len(solved) == 0:\n output_file = open(output_filename, 'w')\n output_file.write('No solution\\n')\n output_file.close\n\n n_threads -= 1\n return\n\n\n####################### MAIN ##########################\n\nfilenames = glob.glob(constants.instances_dir + '/' + begin + '*.cpf')\n\nfor filename in filenames:\n instance = re.sub(constants.instances_dir + '/', '', filename)\n instance = re.sub('.cpf', '', instance)\n\n stats = glob.glob(constants.stat_files_dir + '/' + instance + '*.txt')\n if len(stats) > 0 and not surynek:\n print('Instance ' + instance + ' has already been solved.')\n continue\n\n # current_command = get_command.get_solve_command(instance, search='binary', verbosity=0,timeout=timeout)\n # all_commands.append(current_command)\n\n if not surynek:\n current_command = get_command.get_solve_command(instance, search='UNSAT-SAT', verbosity=0, timeout=timeout, indep=1)\n else:\n current_command = get_command.get_sury_solve_command(instance, makespan=32)\n\n all_commands.append(current_command)\n\n#all_commands.sort(reverse=True)\nn_commands_total = len(all_commands)\nprint('All ' + str(n_commands_total) + ' commands ready')\nrandom.shuffle(all_commands)\n\n\nwhile len(all_commands) > 0:\n if n_threads < n_max_threads:\n current_command = all_commands.pop()\n print('solving instance ' + str(n_commands_total - len(all_commands)) + ' out of ' + str(\n n_commands_total) + '.')\n print(str(int(100 * (n_commands_total - len(all_commands)) / n_commands_total)) + '% done.')\n print(str(n_threads) + ' threads active.')\n if not surynek:\n thread = threading.Thread(target=run_in_thread, args=[current_command])\n else:\n thread = threading.Thread(target=run_in_thread_s, args=[current_command])\n thread.start()\n else:\n if n_threads > 12:\n time.sleep(10)\n","sub_path":"scripts/solve_all.py","file_name":"solve_all.py","file_ext":"py","file_size_in_byte":5275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"474881600","text":"import os\nimport glob\nimport argparse\nimport numpy as np\nimport pandas as pd\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data-dir', type=str,\n help='data directory')\nargs = parser.parse_args()\n\n\nfiles = sorted(glob.glob('{0}/{0}_*'.format(args.data_dir)))\n\n# Column names. Used to ensure that traling commas in csv don't cause error.\nsample_df = pd.read_csv(files[0])\ncols = sample_df.columns.values\ntime_steps = sample_df.shape[0]\n\n# Gather all comments that appeared.\nall_captions = set()\nfor file in files:\n df = pd.read_csv(file, names=cols, skiprows=1)\n all_captions.update(df['caption'].values)\n\nall_captions.remove(np.nan)\nall_captions.add('')\n\n# Data augmentation. Ensure parity between RED and BLUE teams.\nred = set()\nblue = set()\nred_blue = set()\nothers = set()\nfor comment in all_captions:\n if 'RED' in comment and 'BLUE' not in comment:\n red.add(comment)\n blue.add(comment.replace('RED', 'BLUE'))\n elif 'BLUE' in comment and 'RED' not in comment:\n blue.add(comment)\n red.add(comment.replace('BLUE', 'RED'))\n elif 'RED' in comment and 'BLUE' in comment:\n red_blue.add(comment)\n else:\n others.add(comment)\n\nassert len(red) == len(blue)\n\n# Gather augmentated comments.\nall_comments = sorted(red | blue | red_blue | others)\n\n# Save all comments.\nsave_dir = args.data_dir + '_processed'\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\nnp.save(os.path.join(save_dir, 'comments'), all_comments)\n\n# Label the comments with integers.\ncaption_to_index = dict((comment, i) for i, comment in enumerate(all_comments))\ncaption_to_index.update({np.nan: 0})\n\n# Separate `time`, `score`, `ball`, `players` and `caption` and prepare data.\ntime_all_matches = []\nscore_all_matches = []\nball_all_matches = []\nplayers_all_matches = []\ncaption_all_matches = []\n\nfor file in files:\n df = pd.read_csv(file, names=cols, skiprows=1)\n\n if df.shape[0] != time_steps:\n print(\"ERROR: {0}\\n\\t shape mismatch {1} != {2} \\tSkipped!\".format(\n file, df.shape[0], time_steps))\n continue\n\n df['caption'].replace(caption_to_index, inplace=True)\n\n time_all_matches.append(df['time'].values)\n score_all_matches.append(df[['score_A', 'score_B']].values)\n ball_all_matches.append(df[['ball_x', 'ball_y']].values)\n players_all_matches.append(df.iloc[:, 5:-1].values)\n caption_all_matches.append(df['caption'].values)\n\ntime_all_matches = np.stack(time_all_matches).astype(np.float32)\nscore_all_matches = np.stack(score_all_matches).astype(np.float32)\nball_all_matches = np.stack(ball_all_matches).astype(np.float32)\nplayers_all_matches = np.stack(players_all_matches).astype(np.float32)\ncaption_all_matches = np.stack(caption_all_matches).astype(np.int)\n\n\ndata = {'time': time_all_matches,\n 'score': score_all_matches,\n 'ball': ball_all_matches,\n 'players': players_all_matches,\n 'caption': caption_all_matches}\n\nnp.save(os.path.join(save_dir, 'processed_data'), data)\n","sub_path":"data/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"1983953","text":"import typing\n\nfrom discord.ext import commands\n\nfrom cogs.utils.checks import requires_config\n\n\nclass Stats(commands.Cog):\n \"\"\"Redirect stats commands to the appropriate place\"\"\"\n def __init__(self, bot):\n self.bot = bot\n\n @commands.group(invoke_without_command=True)\n async def stats(self, ctx):\n \"\"\"The main stats command for all donation, trophy, attacks and defense statistics.\n\n This command does nothing by itself, however - check out the subcommands!\n\n If your server is currently in an event (`+info event`), this will automatically divert your command to\n `+eventstats...`, otherwise it will automatically call `+seasonstats...`.\n \"\"\"\n if ctx.invoked_subcommand is None:\n return await ctx.send_help(ctx.command)\n\n @stats.command(name='attacks')\n @requires_config('event')\n async def stats_attacks(self, ctx, season_id: typing.Optional[int] = None):\n \"\"\"Get top attack wins for all clans.\n\n **Parameters**\n :key: Season ID (optional - defaults to last season)\n\n **Format**\n :information_source: `+stats attacks SEASON_ID`\n\n **Example**\n :white_check_mark: `+stats attacks`\n :white_check_mark: `+stats attacks 2`\n \"\"\"\n if ctx.config:\n return await ctx.invoke(self.bot.get_command('eventstats attacks'))\n await ctx.invoke(self.bot.get_command('seasonstats attacks'), season_id)\n\n @stats.command(name='defenses', aliases=['defense', 'defences', 'defence'])\n @requires_config('event')\n async def stats_defenses(self, ctx, season_id: typing.Optional[int] = None):\n \"\"\"Get top defense wins for all clans.\n\n **Parameters**\n :key: Season ID (optional - defaults to last season)\n\n **Format**\n :information_source: `+stats defenses SEASON_ID`\n\n **Example**\n :white_check_mark: `+stats defenses`\n :white_check_mark: `+stats defenses 1`\n \"\"\"\n if ctx.config:\n return await ctx.invoke(self.bot.get_command('eventstats defenses'))\n await ctx.invoke(self.bot.get_command('seasonstats defenses'), season_id)\n\n @stats.command(name='gains', aliases=['gain', 'trophies'])\n @requires_config('event')\n async def stats_gains(self, ctx, season_id: typing.Optional[int] = None):\n \"\"\"Get top trophy gainers for all clans.\n\n **Parameters**\n :key: Season ID (optional - defaults to last season)\n\n **Format**\n :information_source: `+stats gains SEASON_ID`\n\n **Example**\n :white_check_mark: `+stats gains`\n :white_check_mark: `+stats gains 3`\n \"\"\"\n if ctx.config:\n return await ctx.invoke(self.bot.get_command('eventstats gains'))\n await ctx.invoke(self.bot.get_command('seasonstats gains'), season_id)\n\n @stats.command(name='donors', aliases=['donations', 'donates', 'donation'])\n @requires_config('event')\n async def stats_donors(self, ctx, season_id: typing.Optional[int] = None):\n \"\"\"Get top donors for all clans.\n\n **Parameters**\n :key: Season ID (optional - defaults to last season)\n\n **Format**\n :information_source: `+stats donors SEASON_ID`\n\n **Example**\n :white_check_mark: `+stats donors`\n :white_check_mark: `+stats donors 4`\n \"\"\"\n if ctx.config:\n return await ctx.invoke(self.bot.get_command('eventstats donors'))\n await ctx.invoke(self.bot.get_command('seasonstats donors'), season_id)\n\n\ndef setup(bot):\n bot.add_cog(Stats(bot))\n","sub_path":"cogs/stats/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"297841042","text":"import networkx as nx\r\nimport matplotlib.pyplot as plt\r\nimport queue\r\nimport random\r\nimport xlsxwriter\r\nimport collections\r\nimport numpy as np\r\n\r\nG = nx.Graph()\r\n\r\n# G=nx.read_edgelist(\"dataset/Email-1133.edgelist\")\r\n# sir_res_stock = 'SIR_Data/SIR_Email.npy'\r\n# beta = 0.08\r\n# ========================================\r\nG=nx.read_edgelist(\"dataset/CA-Hep.edgelist\")\r\nsir_res_stock = 'SIR_Data/SIR_CA_Hep.npy'\r\nbeta = 0.12\r\n# ========================================\r\n# G=nx.read_edgelist(\"dataset/hamster.edgelist\")\r\n# sir_res_stock = 'SIR_Data/SIR_hamster.npy'\r\n# beta = 0.04\r\n# ========================================\r\n# G=nx.read_edgelist(\"dataset/PGP.edgelist\")\r\n# sir_res_stock = 'SIR_Data/SIR_PGP.npy'\r\n# beta = 0.08\r\n# ========================================\r\n# G=nx.read_edgelist(\"dataset/astro.edgelist\")\r\n# sir_res_stock = 'SIR_Data/SIR_astro.npy'\r\n# beta = 0.023\r\n# ========================================\r\n# G=nx.read_edgelist(\"dataset/facebook.edgelist\")\r\n# sir_res_stock = 'SIR_Data/SIR_facebook.npy'\r\n# beta = 0.014\r\n# ========================================\r\n\r\n# ==================================================\r\ndef sir_model_process(p=0.10, q=1, graph=G, u=\"A\"):\r\n # p = 0.10\r\n # q = 1\r\n # u = \"NorthTexas\"\r\n I = []\r\n I.insert(0, u)\r\n # print(I)\r\n\r\n S = {}\r\n for v in graph:\r\n S[v] = 1\r\n S[u] = 2\r\n # print(S)\r\n\r\n R = []\r\n\r\n count = 0\r\n\r\n while I:\r\n u = I.pop()\r\n # print(I)\r\n count = count + 1\r\n for v in graph.neighbors(u):\r\n if S[v] == 1:\r\n r = random.uniform(0, 1)\r\n # print(r)\r\n if r <= p:\r\n S[v] = 2\r\n # R[v] = 1\r\n # R.append(u)\r\n I.insert(0, v)\r\n # print(I)\r\n # end if\r\n # end if\r\n # end for\r\n r = random.uniform(0, 1)\r\n if r > q:\r\n I.pop(0)\r\n else:\r\n R.append(u)\r\n # end while\r\n\r\n # res = {}\r\n\r\n # res[1] = R\r\n # res[2] = count\r\n\r\n # print(R)\r\n # print(len(R))\r\n # print(\"count: \" + str(count))\r\n return R\r\n\r\n\r\n########## Degree Centrality #########\r\ndeg = {}\r\nN = G.number_of_nodes()\r\nfor v in G:\r\n deg[v] = ((G.degree(v) / (N - 1)))\r\n\r\n# print(deg)\r\nmaxDeg = max(deg.values())\r\n\r\n# print(maxDeg)\r\n######################################\r\n\r\n# def moyen(dict=deg, q=0.10):\r\n# m = sorted(dict.items(), key=lambda kv: kv[1], reverse=True)\r\n# s = 0\r\n# r = q * dict.__len__()\r\n# # print(r)\r\n# # print(int(r))\r\n# for x in range(int(r)):\r\n# s = s + m[x][1]\r\n# return s\r\n\r\n\r\nprint(\"degree: \" + str(sorted(deg.items(), key=lambda kv: kv[1], reverse=True)))\r\n# m = sorted(deg.items(), key=lambda kv: kv[1], reverse=True)\r\n# print(m)\r\n# print(moyen(dict=deg, q=0.10))\r\n\r\n# ===========================================\r\nSIR_res = {}\r\nnB = G.number_of_nodes()\r\ncountH = 0\r\nfor v in G:\r\n countH = countH + 1\r\n print(str(countH) + '/' + str(nB))\r\n res = []\r\n counter = 0\r\n # worksheet.write(row, col, v)\r\n for x in range(100):\r\n counter = counter + 1\r\n res.append(sir_model_process(p=beta, graph=G, u=v).__len__())\r\n # print(sum(res))\r\n val = ((sum(res) / counter) / (N - 1))\r\n # print(val)\r\n SIR_res[v] = val\r\nnp.save(sir_res_stock, SIR_res)\r\n# =============================================\r\n# SIR_res = np.load(sir_res_stock).item()\r\n# =============================================\r\n\r\nprint(\"SIR_res: \" + str(sorted(SIR_res.items(), key=lambda kv: kv[1], reverse=True)))\r\n\r\n\r\ndef moyen(dict1=deg, dict2=SIR_res, q=0.10):\r\n m1 = sorted(dict1.items(), key=lambda kv: kv[1], reverse=True)\r\n s = 0\r\n r = q * dict2.__len__()\r\n # print(r)\r\n # print(int(r))\r\n for x in range(int(r)):\r\n s = s + dict2[m1[x][0]]\r\n return s\r\n\r\n\r\ndef moyen_eff(dict=SIR_res, q=0.10):\r\n m = sorted(dict.items(), key=lambda kv: kv[1], reverse=True)\r\n s = 0\r\n r = q * dict.__len__()\r\n # print(r)\r\n # print(int(r))\r\n for x in range(int(r)):\r\n s = s + m[x][1]\r\n return s\r\n\r\n\r\ndef eps(p=0.10, sp_eff_values = SIR_res, centr_values=deg):\r\n val = 1 - (moyen(dict1=centr_values, dict2=sp_eff_values, q=p) / moyen_eff(dict=sp_eff_values, q=p))\r\n # print(moyen(dict1=deg, dict2=sp_eff_values, q=p))\r\n # print(moyen_eff(dict=sp_eff_values, q=p))\r\n return val\r\n\r\n\r\nprint(eps(p=0.05, sp_eff_values=SIR_res, centr_values=deg))\r\n\r\ndef neighbor(benchmark, node, a, n, p):\r\n if n <= 0 | n > 4:\r\n return -1\r\n value = benchmark[node]\r\n if n == 1:\r\n sum1 = 0\r\n for j in G.neighbors(node):\r\n sum1 = sum1 + a * benchmark[j]\r\n value = value + sum1\r\n return value\r\n if n == 2:\r\n sum1 = 0\r\n if p is True:\r\n print(str(node))\r\n for j in G.neighbors(node):\r\n if p is True:\r\n print(\" \" + str(j))\r\n sum1 = sum1 + a * benchmark[j]\r\n sum2 = 0\r\n for l in G.neighbors(j):\r\n if l == node:\r\n continue\r\n if p is True:\r\n print(\" \" + str(l))\r\n sum2 = sum2 + (a ** 2) * benchmark[l]\r\n value = value + sum2\r\n value = value + sum1\r\n return value\r\n if n == 3:\r\n sum1 = 0\r\n if p is True:\r\n print(str(node))\r\n for j in G.neighbors(node):\r\n if p is True:\r\n print(\" \" + str(j))\r\n sum1 = sum1 + a * benchmark[j]\r\n sum2 = 0\r\n for l in G.neighbors(j):\r\n if l == node:\r\n continue\r\n if p is True:\r\n print(\" \" + str(l))\r\n sum2 = sum2 + (a ** 2) * benchmark[l]\r\n sum3 = 0\r\n for m in G.neighbors(l):\r\n if m == j:\r\n continue\r\n if p is True:\r\n print(\" \" + str(m))\r\n sum3 = sum3 + (a ** 3) * benchmark[m]\r\n value = value + sum3\r\n value = value + sum2\r\n value = value + sum1\r\n return value\r\n if n == 4:\r\n sum1 = 0\r\n if p is True:\r\n print(str(node))\r\n for j in G.neighbors(node):\r\n if p is True:\r\n print(\" \" + str(j))\r\n sum1 = sum1 + a * benchmark[j]\r\n sum2 = 0\r\n for l in G.neighbors(j):\r\n if l == node:\r\n continue\r\n if p is True:\r\n print(\" \" + str(l))\r\n sum2 = sum2 + (a ** 2) * benchmark[l]\r\n sum3 = 0\r\n for m in G.neighbors(l):\r\n if m == j:\r\n continue\r\n if p is True:\r\n print(\" \" + str(m))\r\n sum3 = sum3 + (a ** 3) * benchmark[m]\r\n sum4 = 0\r\n for s in G.neighbors(m):\r\n if s == l:\r\n continue\r\n if p is True:\r\n print(\" \" + str(s))\r\n sum4 = sum4 + (a ** 4) * benchmark[m]\r\n value = value + sum4\r\n value = value + sum3\r\n value = value + sum2\r\n value = value + sum1\r\n return value\r\n\r\n\r\nneighbor_values = {}\r\nfor v in G:\r\n t = neighbor(benchmark=nx.degree_centrality(G),node=v,a=0.2,n=1, p=False)\r\n neighbor_values[v] = t\r\n # print(str(v) + \" \" + str(t))\r\n\r\nprint(neighbor_values)\r\n\r\n####################################################################################\r\npas = 0.01\r\n###################################################################################\r\ndiag_val = {}\r\nfor x in range(1,20):\r\n i = x * pas\r\n diag_val[i] = eps(p=i, sp_eff_values=SIR_res, centr_values=deg)\r\n\r\nplt.plot(diag_val.keys(), diag_val.values(), label='k', color='black', linestyle='solid', marker='s')\r\n#####################################################################################\r\ndiag_val = {}\r\nfor x in range(1,20):\r\n i = x * pas\r\n diag_val[i] = eps(p=i, sp_eff_values=SIR_res, centr_values=neighbor_values)\r\n\r\nplt.plot(diag_val.keys(), diag_val.values(), label='C¹(k)', color='red', linestyle='solid', marker='o')\r\n#####################################################################################\r\nneighbor_values = {}\r\nfor v in G:\r\n t = neighbor(benchmark=nx.degree_centrality(G),node=v,a=0.2,n=2, p=False)\r\n neighbor_values[v] = t\r\n # print(str(v) + \" \" + str(t))\r\ndiag_val = {}\r\nfor x in range(1,20):\r\n i = x * pas\r\n diag_val[i] = eps(p=i, sp_eff_values=SIR_res, centr_values=neighbor_values)\r\nprint(neighbor_values)\r\nplt.plot(diag_val.keys(), diag_val.values(), label='C²(k)', color='blue', linestyle='solid', marker='^')\r\n#######################################################################################\r\n# neighbor_values = {}\r\n# for v in G:\r\n# t = neighbor(benchmark=nx.degree_centrality(G),node=v,a=0.2,n=3, p=False)\r\n# neighbor_values[v] = t\r\n# # print(str(v) + \" \" + str(t))\r\n# diag_val = {}\r\n# for x in range(1,20):\r\n# i = x * 0.01\r\n# diag_val[i] = eps(p=i, sp_eff_values=SIR_res, centr_values=neighbor_values)\r\n# print(neighbor_values)\r\n# plt.plot(diag_val.keys(), diag_val.values(), label='C³(k)', color='c', linestyle='solid', marker='v')\r\n#######################################################################################\r\n# neighbor_values = {}\r\n# for v in G:\r\n# t = neighbor(benchmark=nx.degree_centrality(G),node=v,a=0.2,n=4, p=False)\r\n# neighbor_values[v] = t\r\n# # print(str(v) + \" \" + str(t))\r\n# diag_val = {}\r\n# for x in range(1,20):\r\n# i = x * 0.01\r\n# diag_val[i] = eps(p=i, sp_eff_values=SIR_res, centr_values=neighbor_values)\r\n# print(neighbor_values)\r\n# plt.plot(diag_val.keys(), diag_val.values(), label='C⁴(k)', color='fuchsia', linestyle='solid', marker='<')\r\n#######################################################################################\r\nplt.xlabel('p')\r\nplt.ylabel('ε(p)')\r\nplt.title(\"Email\")\r\nplt.legend()\r\nplt.axis([0.0, 0.20, 0.0, 0.06])\r\nplt.show()\r\n\r\n# # Create a workbook and add a worksheet.\r\n# workbook = xlsxwriter.Workbook('karate_SIR_Model.xlsx')\r\n# worksheet = workbook.add_worksheet()\r\n#\r\n# # Start from the first cell. Rows and columns are zero indexed.\r\n# row = 0\r\n# col = 0\r\n#\r\n# for v in G:\r\n# worksheet.write(row, col, v)\r\n# row += 1\r\n#\r\n# tab = {}\r\n# for col in range(20):\r\n# row = 0\r\n# for v in G:\r\n# res = []\r\n# counter = 0\r\n# # worksheet.write(row, col, v)\r\n# for x in range(100):\r\n# counter = counter + 1\r\n# res.append(sir_model_process(p=0.22, q=1, graph=G, u=v).__len__())\r\n# print(sum(res))\r\n# val = (sum(res)/counter)\r\n# # print(val)\r\n# tab[v] = val\r\n# worksheet.write(row, col + 1, val)\r\n# row += 1\r\n#\r\n# print(tab)\r\n# sorted_by_value = sorted(tab.items(), key=lambda kv: kv[1], reverse=True)\r\n# print(sorted_by_value)\r\n#\r\n# workbook.close()\r\n\r\n# Network topology\r\n# G = nx.erdos_renyi_graph(110, 0.1)\r\n\r\n# n = 250\r\n# tau1 = 3\r\n# tau2 = 1.5\r\n# mu = 0.1\r\n# G = LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=5,\r\n# min_community=20, seed=10)\r\n\r\n# G\r\n\r\n# # Model Selection\r\n# model = sir.SIRModel(G)\r\n#\r\n# # Model Configuration\r\n# config = mc.Configuration()\r\n# config.add_model_parameter('beta', 0.001)\r\n# config.add_model_parameter('gamma', 0.01)\r\n# config.add_model_parameter(\"percentage_infected\", 0.05)\r\n# model.set_initial_status(config)\r\n#\r\n# # Simulation\r\n# iterations = model.iteration_bunch(200)\r\n# trends = model.build_trends(iterations)\r\n#\r\n# viz = DiffusionTrend(model, trends)\r\n# p = viz.plot(width=400, height=400)\r\n# show(p)\r\n#\r\n# viz2 = DiffusionPrevalence(model, trends)\r\n# p2 = viz2.plot(width=400, height=400)\r\n# show(p2)\r\n# ========================================","sub_path":"SIR_info.py","file_name":"SIR_info.py","file_ext":"py","file_size_in_byte":12047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"4066462","text":"from query_parser import parseQuery\nimport academic_constants\nfrom academic_data import *\nfrom similarity_measure import cosine_sim\nfrom nltk.corpus import stopwords\nfrom text_analysis import *\nimport academic_db_interface\nfrom textblob import TextBlob\n\n\n\"\"\"\nIMPORTANT: run this function and download stopwords corpus from the window:\n\t\t\t\tnltk.download()\n\"\"\"\n\n# Size parameters for each query\nQUERY_SIZE_INITIAL = 13\nQUERY_SIZE_AUTHOR = 40\n\n# Handler for the topic search use case\ndef do_topic_search(abstract):\n\t\"\"\"\n\tHandler for the topic search use case\n\n\t:param abstract: full text of a paper abstract\n\t:returns: List of Authors ready to be displayed\n\t\"\"\"\n\t# Initial AS Query\n\tkeyword_list = parseQuery(abstract)\n\tquery_string = create_query(keyword_list)\n\tpopulated_authors = academic_db_interface.get_authors(query_string, QUERY_SIZE_INITIAL, QUERY_SIZE_AUTHOR)\n\n\t# Reset author scores to 0\n\tfor author in populated_authors:\n\t\tfor p in author.papers:\n\t\t\tp.cosine_similarity = 0\n\n\tscore_authors(populated_authors, abstract=abstract)\n\n\t# Compute scores for each author before sending them to be displayed\n\tfor author in populated_authors:\n\t\tauthor.sumCitations()\n\t\tauthor.computeMostRecentYear()\n\t\tfor p in author.papers:\n\t\t\tif p.title == p.desc:\n\t\t\t\tp.desc = \"Not available\"\n\tpopulated_authors = list(filter(lambda a: a.numPublications > 3, populated_authors))\n\tpopulated_authors.sort(key=lambda author: author.cumulativeScore, reverse=True)\n\treturn populated_authors\n\n\ndef score_authors(author_list, abstract):\n\t\"\"\"\n\tScores a list of authors against a given abstract\n\n\t:param author_list: A list of authors populated with papers\n\t:param abstract: Abstract to be scored against\n\t:returns: No return value.\n\t\"\"\"\n\t# create corpus from query words\n\tdocs = {}\n\tcachedStopWords = stopwords.words(\"english\")\n\tquery = TextBlob(abstract.lower())\n\tdocs[-1] = query\n\tcorpWords = []\n\tfor word in query.words:\n\t\tif word not in cachedStopWords and word not in corpWords:\n\t\t\tcorpWords.append(word)\n\t# construct tf-idf vectors from documents\n\tmaxCitations = 0\n\tfor author in author_list:\n\t\tfor paper in author.papers:\n\t\t\tif paper.citations > maxCitations:\n\t\t\t\tmaxCitations = paper.citations\n\t\t\tif paper.id not in docs.keys():\n\t\t\t\tdocs[paper.id] = TextBlob(paper.desc.lower())\n\tcorpus = Corpus(docs, corpWords)\n\tcorpus.constructVectors()\n\n\t# cosine similarity\n\tquery = corpus.scoredDocs[0].vector\n\n\t# original doc has id of -1\n\tfor doc in corpus.scoredDocs:\n\t\tif doc.id == -1:\n\t\t\tquery = doc.vector\n\tdocDict = {}\n\tfor document in corpus.scoredDocs:\n\t\tsim = cosine_sim(query, document.vector)\n\t\tdocument.addScore(sim)\n\t\tdocDict[document.id] = sim\n\n\tfor author in author_list:\n\t\tauthor.setCosineSimilarity(docDict)\n\t\tauthor.scorePapers(maxCitations)\n\t\tauthor.papers.sort(key=lambda paper: paper.finalScore, reverse=True)\n\t\tauthor.scoreAuthor()\n\n\ndef create_query(keyword_list):\n\t\"\"\"\n\tCreates the query used by topic search to find the initial list of authors\n\n\t:param keyword_list: A list of keywords that were parsed from the abstract\n\t:returns: A query string formatted for use with microsoft academic\n\t\"\"\"\n\tcachedStopWords = stopwords.words(\"english\")\n\twordslist = []\n\tfor key in keyword_list:\n\t\twrd = []\n\t\tfor w in key.split(' '):\n\t\t\tif w not in cachedStopWords:\n\t\t\t\twrd.append('W==\\'{}\\''.format(w))\n\t\tline = ','.join(wrd)\n\t\twordslist.append('And({})'.format(line))\n\t# Or together and return\n\tkeyword_query = 'Or({})'.format(','.join(wordslist))\n\treturn 'And({},{})'.format(keyword_query, \"Composite(F.FId=41008148)\")\n","sub_path":"ExpertFinder/topic_search.py","file_name":"topic_search.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"337072918","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 24 20:13:30 2019\n\n@author: victor\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os\nfrom os.path import join\n\ndef euclidian(a, b):\n \"\"\"\n Calculate the euclidian distance between two points.\n \n Args:\n a: Point a\n b: Point b\n \n Returns:\n Return the euclidian distance between the two points.\n \"\"\"\n \n x = np.asarray(a)\n y = np.asarray(b)\n \n return np.sqrt(np.sum(np.power(x - y, 2)))\n\n\ndef plot_plain_separator(model, \n x, \n grid_size=1000, \n grid_range=(-5, 15),\n save=None, \n path=join('..', 'Artigo_1_RNA', 'Imagens')):\n x_lab = np.linspace(grid_range[0], grid_range[1], num=grid_size)\n y_lab = np.linspace(grid_range[0], grid_range[1], num=grid_size)\n x1, x2 = np.meshgrid(x_lab, y_lab)\n x_grid = np.transpose(np.vstack([x1.flatten(), x2.flatten()]))\n \n z = model.predict(x_grid)\n \n z = z.reshape([1000,1000])\n plt.contour(x1, x2, z, levels=[0], colors=('cyan',), linewidths=(2.5,))\n# plt.contour(x1, x2, z, linewidths=(2,))\n if save:\n plt.savefig(join(fr'{path}', fr'{save}.png'))\n \n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"322452481","text":"'''\nCreated on 30 janv. 2019\n\n@author: coline\n'''\n\nimport pickle\nimport itertools \n\n''' calcule l'ensemble des combinaisons de couples possibles entre deux listes d'entiers de 2 a 10\na partir de la liste des combinaisons de couples possibles entre deux listes d'entiers de 2 a 9 '''\ndef recherche_paires_9():\n with open(\"liste_combi_8.pickle\", 'rb') as fichier :\n mon_depickler = pickle.Unpickler(fichier)\n liste_epuree = mon_depickler.load()\n \n print(liste_epuree)\n \n paires = [[]]\n try :\n for element in itertools.product([2,3,4,5,6,7,8,9,10], [10]):\n paires[0].append(element)\n del(paires[0][len(paires[0])-1])\n paires.append([])\n for element in itertools.product([10], [2,3,4,5,6,7,8,9,10]):\n if element not in paires :\n paires[1].append(element)\n del(paires[1][len(paires[1])-1])\n \n except MemoryError as error :\n print(\"memory error 1\")\n \n print(paires)\n \n liste_epuree_2 = []\n liste_ajoute = []\n \n for groupe in liste_epuree :\n pas_bon = False\n for elt in groupe :\n if 10 == elt[0] or 10 == elt[1] :\n pas_bon = True \n if pas_bon == False :\n liste_temp = []\n liste_temp = list(groupe)\n liste_temp.append((10,10))\n liste_epuree_2.append(list(liste_temp))\n if (10,10) not in liste_ajoute :\n liste_ajoute.append((10,10))\n \n ok_1 = []\n for elt_1 in paires[0] :\n pas_bon = False\n for elt in groupe :\n if elt_1[0] == elt[0] or elt_1[1] == elt[1] or (elt_1[0] < elt[0] and elt_1[1] > elt[1]) or (elt_1[0] > elt[0] and elt_1[1] < elt[1]):\n pas_bon = True\n if pas_bon == False :\n ok_1.append(elt_1)\n \n ok_2 = [] \n for elt_2 in paires[1] :\n pas_bon = False\n for elt in groupe :\n if elt_2[0] == elt[0] or elt_2[1] == elt[1] or (elt_2[0] < elt[0] and elt_2[1] > elt[1]) or (elt_2[0] > elt[0] and elt_2[1] < elt[1]):\n pas_bon = True\n if pas_bon == False :\n ok_2.append(elt_2)\n \n print(ok_1) \n for e in range(max(1, len(ok_1))) :\n for f in range(max(1,len(ok_2))) :\n liste_temp = []\n \n liste_temp = list(groupe)\n taille_liste = len(liste_temp)\n \n if len(ok_1) > 0 :\n liste_temp.append(ok_1[e])\n if ok_1[e] not in liste_ajoute :\n liste_ajoute.append(ok_1[e])\n if len(ok_2) > 0 :\n liste_temp.append(ok_2[f])\n if ok_2[f] not in liste_ajoute :\n liste_ajoute.append(ok_2[f])\n if taille_liste < len(liste_temp) :\n liste_epuree_2.append(list(liste_temp))\n \n for elt in paires[0] :\n if elt not in liste_ajoute :\n liste_epuree_2.append([elt])\n \n for elt in paires[1] :\n if elt not in liste_ajoute :\n liste_epuree_2.append([elt]) \n\n \n print(len(liste_epuree_2))\n# print(liste_epuree_2) \n \n with open(\"liste_combi_9.pickle\", 'wb') as fichier :\n mon_pickler = pickle.Pickler(fichier)\n mon_pickler.dump(liste_epuree_2)\n \n with open(\"liste_combi_9.txt\", 'w') as fichier_2 :\n for elt in liste_epuree_2 :\n fichier_2.write(str(elt) + '\\n')\n\n''' recherche l'ensemble des combinaisons de couples possibles entre deux listes d'entiers de 2 a 9 '''\ndef recherche_toutes_paires():\n\n chaines_1 = [2,3,4,5,6,7,8,9]\n chaines_2 = [2,3,4,5,6,7,8,9]\n paires = []\n try :\n for element in itertools.product(chaines_1, chaines_2):\n paires.append(element)\n except MemoryError as error :\n print(\"memory error 1\")\n \n print(paires)\n \n liste = []\n new_couples = []\n new_chaine = []\n liste_epuree = [[(2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9)]]\n k = 0\n #for i in range(1, min(len(chaines_1[num_chaine])-1, len(chaines_2[num_chaine])-1 ) +1) :\n while 7-k >= 1 :\n# print(min(len(chaines_1[num_chaine])-1, len(chaines_2[num_chaine])-1 )-k)\n# print(num_chaine)\n num_l = len(liste)\n try :\n combinaisons = list(itertools.combinations(paires,7-k))\n except MemoryError as error :\n print(\"memory_error_2\")\n# print(len(combinaisons))\n# print(combinaisons)\n \n for elt in combinaisons : ##voir si un sommet n est pas superpose avec deux autres sommets distincts\n pas_bon = False\n i = 0\n while i < len(elt) :\n j = i + 1\n while j < len(elt) :\n if elt[i][0] == elt[j][0] or elt[i][1] == elt[j][1] :\n pas_bon = True\n j = j+1\n i = i+1\n if pas_bon == False :\n liste.append(elt)\n \n num_co = len(new_couples)\n \n for chaine in liste[num_l:] :\n #print(chaine)\n new_ch = []\n for couple in chaine :\n new_ch.append(couple)\n new_couples.append(new_ch)\n# print(\"new couples\")\n# print(len(new_couples))\n# print(new_couples)\n\n num_ca = len(new_chaine) \n for possib in new_couples[num_co:] :\n #print(possib)\n incompatibles = {}\n for i in range(len(possib)) :\n for j in range(i+1, len(possib)) :\n if (possib[i][0] < possib[j][0] and possib[i][1] > possib[j][1]) or (possib[i][0] > possib[j][0] and possib[i][1] < possib[j][1]) :\n if possib[i] not in incompatibles.keys() :\n incompatibles.update({possib[i] : [possib[j]]})\n else :\n incompatibles[possib[i]].append(possib[j])\n# print(incompatibles)\n# for elt in incompatibles.keys() : \n# print(len(incompatibles[elt]))\n# if max < len(incompatibles[elt]) :\n# max = len(incompatibles[elt])\n if len(incompatibles) == 0 :\n new_chaine.append(possib)\n\n \n ## ne garder que les combinaisons uniques\n a_enlever = []\n for i in range(0,len(new_chaine)) :\n for j in range(0, len(new_chaine)) :\n if j != i :\n nb_existe_deja = 0\n for m in range(0, len(new_chaine[i])) :\n for l in range(0, len(new_chaine[j])) :\n if new_chaine[i][m][0] == new_chaine[j][l][0] and new_chaine[i][m][1] == new_chaine[j][l][1] :\n nb_existe_deja += 1\n if nb_existe_deja == len(new_chaine[i]) and i not in a_enlever :\n if len(new_chaine[i]) == len(new_chaine[j]) :\n a_enlever.append(j)\n else :\n a_enlever.append(i)\n for i in range(num_ca, len(new_chaine)) :\n if i not in a_enlever :\n liste_epuree.append(new_chaine[i])\n \n k = k+1 \n \n print(liste_epuree)\n print(len(liste_epuree))\n \n with open(\"liste_combi_10.pickle\", 'wb') as fichier :\n mon_pickler = pickle.Pickler(fichier)\n mon_pickler.dump(liste_epuree)\n \n #print(combinaisons)\n# paires = [[(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9), (10, 10)]]\n# i = 9\n# compteur = 0\n# anciennes_paires = []\n# taille_anciennes_paires = 0\n# while i > 6 :\n# print(i) \n# \n# l = 0\n# m = 0\n# ancienne_taille_ancienne_paire = taille_anciennes_paires\n# taille_anciennes_paires = len(anciennes_paires)\n# #print(taille_anciennes_paires)\n# while l < 10 : \n# \n# for n in range(ancienne_taille_ancienne_paire, max(1,taille_anciennes_paires)) :\n# paire_temp = []\n# for k in range(1,i) :\n# paire_temp.append((k,k))\n# \n# if i+m+1 < 11 :\n# l = i+m+1\n# paire_temp.append((i,l))\n# \n# if len(anciennes_paires) > 0 :\n# prob = False\n# for elt in anciennes_paires[n] :\n# for elt_1 in paire_temp :\n# if elt[0] == elt_1[0] or elt[1] == elt_1[1] :\n# prob = True\n# if prob == False :\n# for elt in anciennes_paires[n] :\n# paire_temp.append(elt)\n# #del(anciennes_paires[n])\n# print(paire_temp) \n# existe_deja = False\n# for j in range(len(anciennes_paires)) :\n# groupe = anciennes_paires[j]\n# \n# nb_pareil = 0\n# for elt in groupe :\n# for elt_1 in paire_temp[i-1:] :\n# if elt[0] == elt_1[0] and elt[1] == elt_1[1] :\n# nb_pareil += 1\n# \n# if nb_pareil == len(groupe) and nb_pareil == len(paire_temp[i-1:]) :\n# existe_deja = True\n# \n# \n# if existe_deja == False :\n# anciennes_paires.append(paire_temp[i-1:])\n# #print(paire_temp)\n# paires.append(paire_temp) \n# m = m+1\n# \n# if taille_anciennes_paires == 0 :\n# l = 11\n# print(anciennes_paires)\n# # for n in range(taille_anciennes_paires) :\n# # del(anciennes_paires[n])\n# # print(anciennes_paires) \n# \n# \n# i = i-1\n# print(anciennes_paires)\n# print(paires) \n\n''' pas tres utile je pense '''\ndef recherche_couples(couples_possibles, chaines_1, chaines_2):\n \n for num_chaine in range(0, 4) :\n print(chaines_1[num_chaine])\n print(chaines_2[num_chaine])\n \n \n \n \n \nif __name__ == '__main__':\n# with open(\"grands_graphes.pickle\", 'rb') as fichier :\n# mon_depickler = pickle.Unpickler(fichier)\n# dico_graphes = mon_depickler.load()\n# \n# with open(\"fichiers_pickle/a-minor_test2.pickle\", 'rb') as fichier_pickle :\n# mon_depickler = pickle.Unpickler(fichier_pickle)\n# tab_aminor = mon_depickler.load()\n# \n# for occ in tab_aminor :\n# if occ[\"num_PDB\"] == '5DM6' and occ[\"num_ch\"] == 'X' and occ[\"num_motif\"] == 48 and occ[\"num_occ\"] == 9 :\n# \n# graphe1 = dico_graphes[('5DM6', 'X', 48, 9)]\n# \n# chaines_1 = [[occ[\"a_minor\"][0]]]\n# i = 1\n# for elt in occ[\"a_minor\"] :\n# compteur = elt\n# if i != 1 : chaines_1.append([elt])\n# liaison_B53 = True\n# while liaison_B53 :\n# liaison_B53 = False\n# temp = compteur\n# for voisin in graphe1.successors(compteur) :\n# for arc in graphe1[compteur][voisin] :\n# if voisin not in occ[\"a_minor\"] and voisin not in chaines_1[len(chaines_1)-1] and graphe1[compteur][voisin][arc][\"label\"] == 'B53' :\n# liaison_B53 = True\n# temp = voisin\n# chaines_1[len(chaines_1)-1].append(voisin)\n# \n# for voisin in graphe1.predecessors(compteur) :\n# for arc in graphe1[voisin][compteur] :\n# if voisin not in occ[\"a_minor\"] and voisin not in chaines_1[len(chaines_1)-1] and graphe1[voisin][compteur][arc][\"label\"] == 'B53' :\n# liaison_B53 = True\n# temp = voisin\n# chaines_1[len(chaines_1)-1].append(voisin)\n# compteur = temp\n# i = i +1\n# \n# #with open(\"fichiers_tot_couples_possibles.txt\", 'a') as fichier_tot :\n# #fichier_tot.write(element1 + \"\\n\")\n# #fichier_tot.write(\"Chaines : \" + str(chaines_1) + \"\\n\")\n# graphe2 = dico_graphes[('5J7L', 'DA', 197, 4)]\n# \n# for occ_2 in tab_aminor : \n# if occ_2[\"num_PDB\"] == '5J7L' and occ_2[\"num_ch\"] == 'DA' and occ_2[\"num_motif\"] == 197 and occ_2[\"num_occ\"] == 4 :\n# chaines_2 = [[occ_2[\"a_minor\"][0]]]\n# \n# i = 1\n# for elt in occ_2[\"a_minor\"] :\n# compteur = elt\n# if i != 1 : chaines_2.append([elt])\n# liaison_B53 = True\n# while liaison_B53 :\n# liaison_B53 = False\n# temp = compteur\n# for voisin in graphe2.successors(compteur) :\n# for arc in graphe2[compteur][voisin] :\n# if voisin not in occ_2[\"a_minor\"] and voisin not in chaines_2[len(chaines_2)-1] and graphe2[compteur][voisin][arc][\"label\"] == 'B53' :\n# liaison_B53 = True\n# temp = voisin\n# chaines_2[len(chaines_2)-1].append(voisin)\n# \n# for voisin in graphe2.predecessors(compteur) :\n# for arc in graphe2[voisin][compteur] :\n# if voisin not in occ_2[\"a_minor\"] and voisin not in chaines_2[len(chaines_2)-1] and graphe2[voisin][compteur][arc][\"label\"] == 'B53' :\n# liaison_B53 = True\n# temp = voisin\n# chaines_2[len(chaines_2)-1].append(voisin)\n# compteur = temp\n# i = i+1\n# print(chaines_1)\n# print(chaines_2)\n \n \n #recherche_toutes_paires()\n recherche_paires_9()\n \n \n ","sub_path":"recup_couples_grands_graphes.py","file_name":"recup_couples_grands_graphes.py","file_ext":"py","file_size_in_byte":16091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"314547983","text":"#!/usr/bin/env python -u\nfrom os import path\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nhere = path.abspath(path.dirname(__file__))\n__version__ = None\nwith open(path.join(here, 'urlsresolver', '__version.py')) as __version:\n exec(__version.read())\n\nsetup(\n name='urlsresolver',\n version=__version__,\n description='Python urls resolver library',\n author='Alexandr I. Shurigin',\n author_email='ya@helldude.ru',\n maintainer='Alexandr I. Shurigin',\n maintainer_email='ya@helldude.ru',\n url='https://github.com/phpdude/python-urlsresolver',\n packages=find_packages(),\n test_suite='tests',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: System :: Networking\",\n \"Topic :: Utilities\"\n ],\n install_requires=[\n 'requests',\n 'future'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"176783869","text":"\"\"\"\nResponse Headers\n\"\"\"\nfrom fastapi import FastAPI, Response\nfrom starlette.responses import JSONResponse\n\napp = FastAPI()\n\n\n@app.get(\"/headers-and-object/\")\ndef get_headers(response: Response):\n response.headers[\"X-Cat-Dog\"] = \"alone in the world\"\n return {\"message\": \"Hello World\"}\n\n\n@app.get(\"/headers/\")\ndef get_headers():\n content = {\"message\": \"Hello World\"}\n headers = {\"X-Name\": \"alone in the world\", \"Content-Language\": \"en-US\"}\n return JSONResponse(content=content, headers=headers)","sub_path":"main-20-header.py","file_name":"main-20-header.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"263219071","text":"#推导式(又称解析式)是Python的一种独有特性,\n#推导式是可以从一个数据序列构建另一个新的数据序列的结构体。 \n# 共有三种推导,在Python2和3中都有支持:\n# 列表(list)推导式\n# 字典(dict)推导式\n# 集合(set)推导式\n\n#partten\n#variable = [out_exp for out_exp in input_list if out_exp == 2]\n\nmultiples = [i for i in range(30) if i %3 is 0]\nprint(multiples)\n\nsquared = []\nfor x in range(10):\n squared.append(x**2)\n\nsquared = [x**2 for x in range(10)]\nprint(squared)","sub_path":"Learn/advanced/list_Comprehension.py","file_name":"list_Comprehension.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"171412211","text":"# Copyright (c) 2019-2020, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cupy as cp\n\nfrom ._writer_cuda import _pack\n\n\ndef write_bin(file, binary, buffer=None, append=True):\n \"\"\"\n Writes binary array to file.\n\n Parameters\n ----------\n file : str\n A string of filename to store output.\n binary : ndarray\n Binary array to be written to file.\n buffer : ndarray, optional\n Pinned memory buffer to use when copying data from GPU.\n append : bool, optional\n Append to file if created.\n\n Returns\n -------\n out : ndarray\n An 1-dimensional array containing binary data.\n\n \"\"\"\n\n # Get current stream, default or not.\n stream = cp.cuda.get_current_stream()\n\n if buffer is None:\n buffer = cp.asnumpy(binary)\n else:\n binary.get(out=buffer)\n\n if append is True:\n mode = \"ab\"\n else:\n mode = \"wb\"\n\n with open(file, mode) as f:\n stream.synchronize()\n buffer.tofile(f)\n\n\ndef pack_bin(in1):\n \"\"\"\n Pack binary arrary.\n Data will be packed with little endian for NVIDIA GPU compatibility.\n\n Parameters\n ----------\n in1 : ndarray\n The ndarray to be pack at binary.\n\n Returns\n -------\n out : ndarray\n An 1-dimensional array containing packed binary data.\n\n \"\"\"\n\n out = _pack(in1)\n\n return out\n\n\ndef write_sigmf(data_file, data, buffer=None, append=True):\n \"\"\"\n Pack and write binary array to file, with SigMF spec.\n\n Parameters\n ----------\n file : str\n A string of filename to be read/unpacked to GPU.\n binary : ndarray\n Binary array to be written to file.\n buffer : ndarray, optional\n Pinned memory buffer to use when copying data from GPU.\n append : bool, optional\n Append to file if created.\n\n Returns\n -------\n\n \"\"\"\n\n packed = pack_bin(data)\n\n write_bin(data_file, packed, buffer, append)\n","sub_path":"python/cusignal/io/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"85850466","text":"#!/usr/bin/python3\n\nimport os, sys\nimport struct\nimport binascii\nimport codecs\nimport traceback\nimport time\n\nfrom socket import socket, inet_aton, AF_PACKET, SOCK_RAW\n\npath = os.environ['HOME_DIR']\nsys.path.insert(0, path)\n\nfrom dnx_configure.dnx_exceptions import *\n\nICMP = 1\nTCP = 6\nUDP = 17\n\nclass Sniffer:\n def __init__(self, IPSProxy, wan_int, wan_ip, action):\n self.IPSProxy = IPSProxy\n self.action = action\n self.wan_int = wan_int\n self.wan_ip = wan_ip\n\n self.s = socket(AF_PACKET, SOCK_RAW)\n self.s.bind((self.wan_int, 3))\n\n def Start(self):\n print(f'[+] Sniffing: {self.wan_int}.')\n time.sleep(10)\n while True:\n if (self.IPSProxy.ddos_prevention or self.IPSProxy.portscan_logging):\n send_to_proxy = False\n data, addr = self.s.recvfrom(1600)\n try:\n Packet = PacketParse(data, addr)\n Packet.Parse()\n if (Packet.protocol == TCP):\n if (Packet.dst_ip == self.wan_ip and Packet.tcp_syn and not Packet.tcp_ack):\n send_to_proxy = True\n elif (Packet.dst_ip == self.wan_ip and Packet.tcp_ack and not Packet.tcp_syn):\n send_to_proxy = True\n elif (Packet.src_ip == self.wan_ip and Packet.tcp_syn and Packet.tcp_ack):\n send_to_proxy = True\n elif (Packet.protocol in {ICMP, UDP}):\n if (Packet.dst_ip == self.wan_ip):\n send_to_proxy = True\n\n if (send_to_proxy):\n self.action(Packet)\n except DNXError:\n pass\n except KeyError:\n print('-'*53)\n print('---------------KEY ERROR--------------')\n traceback.print_exc()\n print('-'*53)\n except Exception:\n print('-'*53)\n traceback.print_exc()\n print('-'*53)\n else:\n time.sleep(5*60)\n \nclass PacketParse:\n def __init__(self, data, addr):\n self.data = data\n self.addr = addr\n\n self.tcp_syn = False\n self.tcp_ack = False\n \n def Parse(self):\n self.Ethernet()\n self.Protocol()\n self.IP()\n if (self.protocol == ICMP):\n pass\n if (self.protocol == TCP):\n self.TCP()\n elif (self.protocol == UDP):\n self.UDP()\n else:\n raise IPProtocolError('Packet protocol is not 6/TCP or 17/UDP')\n\n def Ethernet(self):\n s = []\n d = []\n smac = struct.unpack('!6c', self.data[6:12])\n dmac = struct.unpack('!6c', self.data[0:6])\n\n for byte in smac:\n s.append(byte.hex())\n for byte in dmac:\n d.append(byte.hex())\n \n self.src_mac = f'{s[0]}:{s[1]}:{s[2]}:{s[3]}:{s[4]}:{s[5]}'\n self.dst_mac = f'{d[0]}:{d[1]}:{d[2]}:{d[3]}:{d[4]}:{d[5]}'\n \n def IP(self):\n s = struct.unpack('!4B', self.data[26:30])\n d = struct.unpack('!4B', self.data[30:34])\n self.src_ip = f'{s[0]}.{s[1]}.{s[2]}.{s[3]}'\n self.dst_ip = f'{d[0]}.{d[1]}.{d[2]}.{d[3]}'\n\n self.ipv4_header = self.data[14:34]\n\n def Protocol(self):\n self.protocol = self.data[23]\n\n def UDP(self):\n udp_header = struct.unpack('!4H', self.data[34:42])\n self.src_port = udp_header[0]\n self.dst_port = udp_header[1]\n self.udp_length = udp_header[2]\n self.udp_checksum = udp_header[3]\n\n self.udp_header = self.data[34:42]\n\n def TCP(self):\n tcp = self.data[34:66]\n seq_number = tcp[4:8]\n self.seq_number = struct.unpack('!L', seq_number)[0]\n\n tcp_ports = struct.unpack('!2H', self.data[34:38]) #2LH\n self.src_port = tcp_ports[0]\n self.dst_port = tcp_ports[1]\n\n if (self.data[47] & 1 << 1): # SYN\n self.tcp_syn = True\n if (self.data[47] & 1 << 4): # ACK\n self.tcp_ack = True\n\n \n \n \n","sub_path":"dnx_ips/ips_proxy_sniffer.py","file_name":"ips_proxy_sniffer.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"273744070","text":"# implement a while loop that will count up from 0 to 20\r\ncount = 0\r\nwhile count <= 20:\r\n print(count)\r\n count += 1\r\n\r\n# implement a for loop that will count up from 0 to 20\r\nfor num in range(0, 21):\r\n print(num)\r\n\r\nmy_list = [23, \"Happy!\", 3.45, [5.6, 6, \"German\"]]\r\n# implement a for loop that will print out everything in the list\r\nfor i in my_list:\r\n print(i)\r\n\r\n# implement a while loop that will print out everything in the list\r\ncount = 0\r\nwhile count < len(my_list):\r\n print(my_list[count])\r\n count += 1\r\n\r\nnumbers = [2, 4, 5, 7, 8, 9, 10, 11, 12, 14, 15, 17, 18, 20, 21]\r\n# implement a for loop that will print out all the even numbers in a list\r\nfor num in numbers:\r\n if num % 2 == 0:\r\n print(num)\r\n\r\n# implement a function that takes in a list of numbers and returns a new\r\n# list of numbers containing only the even numbers from the original list\r\ndef evenNums(numberList):\r\n evens = []\r\n for num in numberList:\r\n if num % 2 == 0:\r\n evens.append(num)\r\n return evens\r\n\r\n\r\nplayers = [[\"Ronald Caruthers\", \"halfback\", 34, \"6'5\\\"\", 187],\r\n [\"Buford T. Picknan\", \"fullback\", 28, \"6'1\\\"\", 208],\r\n [\"Harold Ruiz\", \"corner\", 26, \"6'4\\\"\", 231],\r\n [\"Philip Nguyen\", \"forward\", 27, \"6'1\\\"\", 196],\r\n [\"Dexter McGowan\", \"guard\", 30, \"6'0\\\"\", 201],\r\n [\"Robert Feyaz\", \"sweeper\", 31, \"6'10\\\"\", 221]]\r\n# implement a function that takes in a list of lists, holding player information\r\n# and prints out the information in an organized and aligned table\r\ndef statTable(playerStatList):\r\n for player in players:\r\n for stat in player:\r\n print(stat, end=\"\\t\")\r\n print(\"\")\r\n return\r\nstatTable(players)\r\n# implement a function that takes in a string and then prints out each\r\n# letter in the string on a separate line\r\ndef foo(string):\r\n for letter in string:\r\n if letter.isalpha():\r\n print(letter)\r\n return\r\nfoo(\"friedrich nietzsche\")\r\n\r\n# implement a function that takes in a list of strings, and prints out each\r\n# string on the same line separated by spaces\r\ndef bar(listOfStrings):\r\n for string in listOfStrings:\r\n print(string, end=\" \")\r\n return\r\nstrings = [\"sieze\", \"the\", \"means\", \"of\", \"production\"]\r\nbar(strings)\r\n","sub_path":"Other Work/Practice/Exam05.py","file_name":"Exam05.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"529090421","text":"from AppKit import NSWorkspace\nfrom Quartz import CGWindowListCopyWindowInfo, kCGWindowListOptionOnScreenOnly, kCGNullWindowID\nimport applescript\nimport psutil\nimport json\nimport logging\n\nwith open('meta.json') as f:\n # Parses metadata for associated applications\n logging.debug(\"Loading 'meta.json'\")\n data = json.load(f)\n\ndef get_process_info():\n # Process information of application\n logging.debug(\"Getting process information...\")\n for element in data:\n # Finds process name for Adobe applications\n process_name = element['processNameMac']\n for process in psutil.process_iter():\n # Finds pid through iteration\n process_info = process.as_dict(attrs=['pid', 'name'])\n if process_info['name'].lower() in process_name:\n element['pid'] = process_info['pid']\n logging.debug(\"Process returns with info: \" +\n str(process_info))\n return element\n\ndef get_title(pid):\n # Processes title of application from PID\n logging.debug(\"Getting title for the application...\")\n curr_app = NSWorkspace.sharedWorkspace().frontmostApplication()\n curr_pid = NSWorkspace.sharedWorkspace().activeApplication()['NSApplicationProcessIdentifier']\n curr_app_name = curr_app.localizedName()\n options = kCGWindowListOptionOnScreenOnly\n windowList = CGWindowListCopyWindowInfo(options, kCGNullWindowID)\n for window in windowList:\n pid = window['kCGWindowOwnerPID']\n windowNumber = window['kCGWindowNumber']\n ownerName = window['kCGWindowOwnerName']\n geometry = window['kCGWindowBounds']\n windowTitle = window.get('kCGWindowName', u'Unknown')\n if curr_pid == pid:\n logging.debug(\"Title of application: \" + windowTitle.encode('ascii','ignore'))\n return windowTitle\n\ndef get_status(app_info, title):\n # Status of application\n if app_info['largeText'].lower() in title.lower() and app_info['splitBy'] != \" - \":\n # Idle detection\n logging.debug(\"Returning to Discord that you are detected as idle...\")\n return \"{}: IDLE\".format(app_info['smallText'])\n else:\n # Project detection\n logging.debug(\"Not idling! Finding project...\")\n title_seperated = title.split(app_info['splitBy'])\n if app_info['splitBy'] == \" - \":\n title_basename = ntpath.basename(\n title_seperated[app_info['splitIndex']])\n logging.debug(\"Returning the title of the project\")\n return \"{}: {}\".format(app_info['smallText'], title_basename)\n else:\n return \"{}: {}\".format(app_info['smallText'], title_seperated[app_info['splitIndex']])\n","sub_path":"api/macos.py","file_name":"macos.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"556373377","text":"import sys\nimport argparse\nimport Models , LoadBatches\n\n\ndef parse(argv):\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--save_weights_path\", type = str )\n\tparser.add_argument(\"--train_images\", type = str )\n\tparser.add_argument(\"--train_annotations\", type = str )\n\tparser.add_argument(\"--n_classes\", type=int )\n\tparser.add_argument(\"--input_height\", type=int , default = 224 )\n\tparser.add_argument(\"--input_width\", type=int , default = 224 )\n\n\tparser.add_argument('--validate',action='store_false')\n\tparser.add_argument(\"--val_images\", type = str , default = \"\")\n\tparser.add_argument(\"--val_annotations\", type = str , default = \"\")\n\n\tparser.add_argument(\"--epochs\", type = int, default = 5 )\n\tparser.add_argument(\"--batch_size\", type = int, default = 2 )\n\tparser.add_argument(\"--val_batch_size\", type = int, default = 2 )\n\tparser.add_argument(\"--load_weights\", type = str , default = \"\")\n\n\tparser.add_argument(\"--model_name\", type = str , default = \"\")\n\tparser.add_argument(\"--optimizer_name\", type = str , default = \"adadelta\")\n\n\n\treturn parser.parse_args(argv)\n\ndef train(args):\n\n\ttrain_images_path = args.train_images\n\ttrain_segs_path = args.train_annotations\n\ttrain_batch_size = args.batch_size\n\tval_batch_size = args.val_batch_size\n\tn_classes = args.n_classes\n\tinput_height = args.input_height\n\tinput_width = args.input_width\n\tvalidate = args.validate\n\tsave_weights_path = args.save_weights_path\n\tepochs = args.epochs\n\n\tload_weights = args.load_weights\n\n\toptimizer_name = args.optimizer_name\n\tmodel_name = args.model_name\n\n\tif validate:\n\t\tval_images_path = args.val_images\n\t\tval_segs_path = args.val_annotations\n\t\tval_batch_size = args.val_batch_size\n\n\tmodelFns = { 'vgg_segnet':Models.VGGSegnet.VGGSegnet , 'vgg_unet':Models.VGGUnet.VGGUnet , 'vgg_unet2':Models.VGGUnet.VGGUnet2 , 'fcn8':Models.FCN8.FCN8 , 'fcn32':Models.FCN32.FCN32 }\n\tmodelFN = modelFns[ model_name ]\n\n\tm = modelFN( n_classes , input_height=input_height, input_width=input_width)\n\tm.compile(loss='categorical_crossentropy',\n\t optimizer= optimizer_name ,\n\t metrics=['accuracy'])\n\n\n\tif len( load_weights ) > 0:\n\t\tm.load_weights(load_weights)\n\n\n\tprint(\"Model output shape\" , m.output_shape)\n\n\toutput_height = m.outputHeight\n\toutput_width = m.outputWidth\n\n\tG = LoadBatches.imageSegmentationGenerator( train_images_path , train_segs_path , train_batch_size, n_classes , input_height , input_width , output_height , output_width )\n\n\n\tif validate:\n\t\tG2 = LoadBatches.imageSegmentationGenerator( val_images_path , val_segs_path , val_batch_size, n_classes , input_height , input_width , output_height , output_width )\n\n\tn_train_images = 367 # hardcoded n images in training dataset\n\tn_val_images = 101 # hardcoded n images in validation dataset\n\t\n\tif not validate:\n\t\tfor ep in range( epochs ):\n\t\t\tprint(\"Epoch %d / %d\\n\" % (ep+1, epochs))\n\t\t\tm.fit_generator( G , int(n_train_images/train_batch_size) , epochs=1 )\n\t\t\tm.save_weights( save_weights_path + \".\" + str( ep ) )\n\t\t\tm.save( save_weights_path + \".model.\" + str( ep ) )\n\telse:\n\t\tfor ep in range( epochs ):\n\t\t\tprint(\"Epoch %d / %d\\n\" % (ep+1, epochs))\n\t\t\tm.fit_generator( G , int(n_train_images/train_batch_size) , validation_data=G2 , validation_steps=int(n_val_images/val_batch_size) , epochs=1 )\n\t\t\tm.save_weights( save_weights_path + \".\" + str( ep ) )\n\t\t\tm.save( save_weights_path + \".model.\" + str( ep ) )\n\n\nif __name__ == '__main__':\n\n\targv = sys.argv\n\targs = parse(argv[1:])\n\n\ttrain(args)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"204117575","text":"import PySimpleGUI as sg\nimport os\nimport codigo.interfaz.visorAvatar as va\nfrom codigo.logica.jugador import Jugador\nimport codigo.interfaz.interfaz_puntaje as pun\nfrom codigo.interfaz.tema import *\nfrom codigo.interfaz.ayuda import ayuda\n\n\ndef check_apodo(apodo):\n '''esta función se encarga de evaluar el campo del APODO al momento\n decrear una partida. Los apodos deben tener mas de 3 caracteres, no poseer espacion en blanco\n no poseer caracteres especiales, y no ser solo numeros'''\n\n carEspecial = '!@#$%^&*()[]{};:,./<>?\\|`~-=_+'\n if len(apodo) < 3 or len(apodo) > 10:\n print('longitud')\n return False\n elif (apodo == '') or (apodo.isspace()== True) or (' ' in apodo):\n\n return False\n elif apodo.isdigit()==True:\n\n return False\n\n for c in apodo:\n\n if c in carEspecial:\n return False\n\n return True\n\ndef nivel(ventana):\n '''Esta función devolvera el nivel elegido por el usuario según el\n el estado de los elementos \"Radio\", que se usan en el layout de nueva partida'''\n if ventana.FindElement('rFacil').Get() == True:\n n = \"facil\"\n elif ventana.FindElement('rMedio').Get() == True:\n n = 'medio'\n else:\n n = 'dificil'\n return n\n\ndef jugar (avatar, value, ventana):\n '''Esta función crea una instancia del objeto jugador y lo retorna'''\n jugador = Jugador(nombre=value['apodo'], dificultad=nivel(ventana), avatar=avatar)\n return jugador\n\n\n\n\ndef actualizar_columnas(ventana, *columna):\n '''Esta función hará visible la columna que recibe como parámetro\n e invisible el resto de las columnas.\n Sirve para actualizar la interfaz principal segun la opción elegida'''\n #Busca en la lista de elementos hasta encontrar una columna\n for e in ventana.element_list():\n if e.Type == 'column':\n #Si la columna está invisible y tiene la key que busco, la hace visible\n if e.Visible == False and e.Key in columna:\n ventana.FindElement(e.Key).update(visible=True)\n elif e.Key in columna:\n ventana.FindElement(e.Key).update(visible=True)\n #Si no, la oculta\n else:\n ventana.FindElement(e.Key).update(visible=False)\n\n\ndef cargando():\n layout=[\n [sg.popup_animated(image_source='blue_blocks.gif', message=\"Cargando...\", alpha_channel=1,time_between_frames=2,no_titlebar=True)],\n ]\n return layout\n\n\ndef nueva_partida(avatar):\n layout = [\n [sg.Text('Apodo:', font=('Italic 24'),key='jugador')],\n [sg.InputText('',size=(20,20),font=('Italic 24'),background_color='#ece6eb',key='apodo')],\n [sg.Frame(\n layout= [[sg.Radio('Fácil', \"dificultad\",font=('Italic 24'), default=True, size=(10,3), key='rFacil')],\n [sg.Radio('Medio', \"dificultad\",font=('Italic 24'), default=False, size=(10,3), key='rMedio')],\n [sg.Radio('Difícil', \"dificultad\",font=('Italic 24'), default=False, size=(10,3), key='rDificil')],],\n\n title='Dificultad' ,title_color='black', relief=sg.RELIEF_SUNKEN,font=('Italic 24'),\n element_justification='center',key='contenedor'),\n sg.Column(avatar.getAvatarLayout(), visible=False,key='colAvatar')],\n [sg.Button('Jugar', size=(10, 2),button_color=('black','#afad71'),font=('Arial', 18),border_width=1, key='confirmar'),sg.Button('cancelar', size=(10, 2),font=('Arial', 18),border_width=1,button_color=('black','#afad71'), key='cancelar')],\n ]\n return layout\n\n\ndef jugar_interfaz(img_boton_largo):\n layout = [\n [sg.Button('Nueva Partida',image_filename=img_boton_largo, border_width=0,font=('Italic 24'),size = (20,3), key='nueva')],\n [sg.Button('Cargar Partida',image_filename=img_boton_largo, border_width=0,font=('Italic 24'), size=(20,3), key='cargar')],\n [sg.Button('Volver',image_filename=img_boton_largo, border_width=0,font=('Italic 24'), size=(20,3), key='volver')],\n ]\n return layout\n\ndef inicio(img_boton_madera):\n layout = [[sg.Button(image_filename=img_boton_madera, border_width=0,button_text='Ayuda', size=(200, 200), font=('Impact', 40), key='ayuda'),\n sg.Button(image_filename=img_boton_madera, border_width=0, button_text='Jugar', size=(200, 200), font=('Impact', 40),key='jugar'),\n sg.Button(image_filename=img_boton_madera, border_width=0,button_text='Puntajes', size=(200, 200), font=('Impact', 30),key='puntajes')],\n [sg.Button('Salir', button_color=('black','#f75404'),size=(10, 2),font=('Arial Black', 20),border_width=1, key='salir')]]\n return layout\n\n\ndef interfaz_principal(img_logo, img_boton_largo, img_boton_madera, avatar):\n colInicial = inicio(img_boton_madera)\n columnaCen2 = jugar_interfaz(img_boton_largo)\n columnaNueva = nueva_partida(avatar)\n #Al ejecutarse, sólo será visible la columna inicial\n layout = [\n [sg.Image(filename=img_logo)],\n [sg.Column(colInicial,justification='center',element_justification='center',key= 'colInicial'),\n sg.Column(columnaCen2,visible=False,justification='cener',element_justification='center',key='colJugar2'),\n sg.Column(columnaNueva, visible=False,justification='cener',element_justification='center', key='colPartida'),\n ],\n ]\n\n return layout\n\ndef lazo_principal():\n #Asigno las rutas de las imagenes a usar.\n #La idea es tener un modulo que cargue al iniciar el programa todas las imagenes necesarias\n #y usar excepciones si no encuentra los archivos\n #-----------------------------------------------\n directorio_partidas = os.path.join('guardados', 'juego_guardado.pckl')\n directorio_avatares = os.path.join('media','media_ii','avatars', '') # sg.popup_get_folder('Image folder to open', default_path='')\n img_boton_largo = os.path.join('media','media_ii','botonlargo.png')\n img_boton_madera = os.path.join('media','media_ii','botonMadera.png')\n img_logo = os.path.join('media','media_ii','logo.png')\n #-----------------------------------------------\n\n #Crea un jugador vacío. Si se cierra la ventana sin comenzar una partida, se retorna al final\n jugador = Jugador('', -1, '')\n avatar = va.Visor(directorio_avatares)\n\n cargar_partida = False\n\n ANCHO = 900 # solo de prueba\n ALTO = 700 # solo de prueba\n mi_tema()\n ventana = sg.Window('ScrabbleAr', interfaz_principal(img_logo, img_boton_largo, img_boton_madera, avatar), size = (ANCHO,ALTO),resizable=True,element_justification='center',no_titlebar=False)\n ventana.Finalize()\n while True:\n\n event, value = ventana.read()\n\n if (event == None) or (event == 'salir'):\n break\n elif event == 'jugar':\n actualizar_columnas(ventana, 'colJugar2')\n elif event == 'cargar':\n if (os.path.isfile(directorio_partidas)):\n #Se asigna un nombre al jugador para que el main pueda validarlo\n jugador.setNombre('Jugador guardado')\n cargar_partida = True\n break\n else:\n sg.popup('¡No hay ninguna partida guardada!')\n elif event == 'volver':\n actualizar_columnas(ventana, 'colInicial')\n elif event == 'nueva':\n actualizar_columnas(ventana, 'colPartida','colAvatar')\n elif event == 'cancelar':\n actualizar_columnas(ventana, 'colJugar2')\n elif event == 'confirmar':\n if check_apodo(ventana.FindElement('apodo').Get()):\n avatarSelec = avatar.getActualRuta()\n decision = sg.popup_yes_no(f'¿Confirmar los datos?\\nNombre: {value[\"apodo\"]}\\nDificultad: {nivel(ventana)}',background_color='#ece6eb',text_color='black', button_color=('black','#f75404'),font=('Arial',14), no_titlebar=True, keep_on_top=True)\n if decision == 'Yes':\n jugador = jugar(avatarSelec, value, ventana)\n break\n else:\n sg.popup_ok('Debe ingresar un Apodo (debe tener entre 3 y 10 caracteres,puede ser alfanumerico, pero no debe contener caracteres especiales)',background_color='#ece6eb',text_color='black', button_color=('black','#f75404'),font=('Arial',14), no_titlebar=True, keep_on_top=True)\n elif event in ('<<<', '>>>'):\n avatarSelec = avatar.controles(event, ventana.FindElement('avatarVisor'))\n\n elif event == 'puntajes':\n pun.puntajes()\n elif event == 'ayuda':\n ayuda()\n\n ventana.Close()\n return jugador, cargar_partida\n\nif __name__ == '__main__':\n lazo_principal()\n","sub_path":"codigo/interfaz/interfaz_inicial.py","file_name":"interfaz_inicial.py","file_ext":"py","file_size_in_byte":8667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"36937134","text":"#为表格中的所有列添加列标题行\n#method1 基础python语句\n\n#!/usr/bin/env python3\nimport sys\nimport csv\ninput_file = sys.argv[1]\noutput_file = sys.argv[2]\nwith open(input_file,'r',newline='') as csv_in_data:\n with open(output_file,'w',newline='') as csv_out_data:\n filereader = csv.reader(csv_in_data)\n filewriter = csv.reader(csv_out_data)\n header = ['Supplier Name','Invoice Number','Part Number','Cost','Purchase Date'] #手动将需要的列属性添加至标题行\n filewriter.writerow(header)\n for row in filereader:\n filewriter.writerow(row)","sub_path":"read_csv_file/12csv_reader_add_header_row.py","file_name":"12csv_reader_add_header_row.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"242399060","text":"\"\"\"\nUse a pre-trained model to classify an image\n\"\"\"\n\nfrom view_classification_functions import *\n# import argparse\nimport pickle\nimport numpy\nimport json\nimport cv2\nimport os\n# ap = argparse.ArgumentParser()\n# ap.add_argument(\"-m\", \"--model\", required=True, help=\"path to model\")\n# ap.add_argument(\"-i\", \"--image\", required=True, help=\"path to image to classify\")\n# ap.add_argument(\"-j\", \"--json\", default='blank', help=\"path to class matching file\")\n# args = vars(ap.parse_args())\n\n\n\ndef classify_view(model_path, img, json_path='blank'):\n try:\n # if True:\n try:\n model = pickle.load(open(model_path, 'rb'))\n except Exception as e:\n print('[ERROR CLASSIFY VIEW]',e)\n return -1\n the_camera = os.path.basename(model_path).split('.')[0]\n # make sure that image is of right size and content (e.g. not gray or erroneous\n # if check_if_image_is_ok(image_path)[0]:\n\n # img = cv2.imread(image_path)\n # prepare the features from images\n features = extract_hog(img)\n features = numpy.asarray(features)\n features = features.reshape(1, -1)\n\n prediction = predict(model, features)[0]\n prob = predict_prob(model, features)[0].tolist()\n\n # go through the contents of json file\n # looking for match on the camera name\n # and if there are 'match' and 'merge' keys\n # use them to match prediciton to\n # previous classifier and merge classes\n found_matching = 0\n found_merge = 0\n if json_path != 'blank' and os.path.isfile(json_path) and os.stat(json_path).st_size > 0:\n with open(json_path, \"r\") as jsonFile:\n datas = json.load(jsonFile)\n for data in datas['cameras']:\n if data['camera'] == the_camera:\n if 'match' in data:\n matching = data['match']\n found_matching = 1\n if 'merge' in data:\n merge = data['merge']\n found_merge = 1\n if found_matching:\n for k, v in matching.items():\n if k == prediction:\n prediction = v\n break\n if found_merge:\n for k, v in merge.items():\n if k == prediction:\n prediction = v\n break\n print(prediction, prob)\n return prediction\n else:\n print(prediction, prob)\n return prediction\n\n except Exception as e:\n print(e)\n return -1","sub_path":"app/classify_view.py","file_name":"classify_view.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"623707450","text":"import cposguf_cluster_actions as cca\nfrom cposguf_run import sql_connection, fetch_query\nimport graph_objects as go\nimport toric_error as te\nimport toric_code as tc\nimport unionfind as uf\nfrom collections import defaultdict as dd\nimport pickling as pk\nimport os\n\n\nlimit = None\nplotnum = 25\nmaxfetch = 5000\nL = [8 + 4 * i for i in range(10)]\nP = [(90 + i)/1000 for i in range(21)]\nminl, maxl = 1, 6\nfile = \"sim4_realr1c_data_gauss8_44-c1_6\"\n\n##############################################\n# definitions:\n\ndef d0(): return [0,0]\ndef d1(): return [[0,0], [0,0]]\ndef d2(): return dd(d1)\n\n\ndef get_count(clusters, data_p, data_n, size, p, n, round, type):\n \"\"\"\n returns a defaultdict of lists of clusters countaining the tuples of the qubits\n \"\"\"\n clusters = cca.listit(clusters)\n\n # Return to principal cluster\n for i, cluster in enumerate(clusters):\n clusters[i] = cca.principal(cluster, ydim=size, xdim=size)\n for cluster in clusters:\n augs, cmid = [], []\n for rot_i in range(4):\n dim = cca.max_dim(cluster)\n augs.append(cluster)\n cmid.append(dim[1])\n fcluster = frozenset(cluster)\n if fcluster in data_p:\n data_p[fcluster][(size, p)][round][type] += 1\n data_n[fcluster][(size, n)][round][type] += 1\n break\n mcluster = cca.mirror(cluster, dim[0])\n mdim = cca.max_dim(mcluster)\n augs.append(mcluster)\n cmid.append(mdim[1])\n fmcluster = frozenset(mcluster)\n if fmcluster in data_p:\n data_p[fmcluster][(size, p)][round][type] += 1\n data_n[fmcluster][(size, n)][round][type] += 1\n break\n cluster = cca.rotate(cluster, dim[0])\n else:\n ftupcluster = frozenset(augs[cmid.index(min(cmid))])\n data_p[ftupcluster][(size, p)][round][type] += 1\n data_n[ftupcluster][(size, n)][round][type] += 1\n\n return data_p, data_n\n\n###########################################################\n\nif os.path.exists(file + \".pkl\"): # Load data_base if pickled file exists\n print(\"loading data \" + file)\n data = pk.load_obj(file)\n data_p, data_n = data[\"data_p\"], data[\"data_n\"]\n countp, countn = data[\"countp\"], data[\"countn\"]\nelse: # Initate database\n data_p, data_n = [dd(d2) for _ in range(2)]\n countp, countn = [dd(d0) for _ in range(2)]\n\nfor lattice in L:\n for p in P:\n\n con, cur = sql_connection()\n print(\"\\nGetting count of L{}, p{}...\".format(lattice, p))\n cur.execute(\"SELECT tree_wins, list_wins FROM cases WHERE lattice = {} and p = {}\".format(lattice, p))\n tlcount = cur.fetchone()\n countp[(lattice, p)] = [tlcount[0], tlcount[1]]\n\n cur.execute(fetch_query(\"COUNT(*)\", lattice, p))\n num = cur.fetchone()[0]\n print(\"fetching {} simulations...\".format(num))\n\n cur.execute(fetch_query(\"ftree_tlist, seed\", lattice, p))\n sims = [cur.fetchone()]\n graph = go.init_toric_graph(lattice)\n\n fetched = 1\n while sims != [None]:\n\n print(\"{:0.1f}%\".format(fetched/num*100))\n sims += cur.fetchmany(maxfetch-1)\n fetched += maxfetch\n\n for type, seed in sims:\n\n # Get errors from seed\n te.apply_random_seed(seed)\n te.init_pauli(graph, pX=float(p))\n\n n = len([\n graph.E[(0, y, x, td)].qID[1:]\n for y in range(lattice) for x in range(lattice) for td in range(2)\n if graph.E[(0, y, x, td)].state\n ])\n countn[(lattice, n)][type] += 1\n\n tc.measure_stab(graph)\n ufg = uf.cluster_farmer(graph)\n ufg.find_clusters(plot_step=0)\n grow_bucket = {\n 0: ufg.tree_grow_bucket,\n 1: ufg.list_grow_bucket\n }\n\n # Analyze clusters after bucket 0 growth\n grow_bucket[type](graph.buckets[0], 0)\n clusters = cca.get_support2clusters(graph, lattice, minl, maxl)\n data_p, data_n = get_count(clusters, data_p, data_n, lattice, p, n, 0, type)\n\n # Analyze clusters after bucket 1 growth\n grow_bucket[type](graph.buckets[1], 1)\n clusters = cca.get_support2clusters(graph, lattice, minl, maxl)\n data_p, data_n = get_count(clusters, data_p, data_n, lattice, p, n, 1, type)\n\n graph.reset()\n sims = [cur.fetchone()]\n else:\n print(\"100%\")\n cur.close()\n con.close()\n\n print(\"Saving data...\")\n data = { # Save to single data file\n \"data_p\": data_p,\n \"data_n\": data_n,\n \"countp\": countp,\n \"countn\": countn\n }\n pk.save_obj(data, file)\n","sub_path":"old/cposguf/cposguf_analyze_sim4_round1clusters.py","file_name":"cposguf_analyze_sim4_round1clusters.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"286769594","text":"import glob\nimport os\nimport random\nimport torch\n\nimport numpy as np\n\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\nclass ImageDataset(Dataset):\n def __init__(self, root, transforms_=None, random_transforms_=None, mode='train'):\n self.random_transform = transforms.Compose(random_transforms_)\n self.transform = transforms.Compose(transforms_)\n\n self.files_A = sorted(glob.glob(os.path.join(root, \"%s/A\" % mode) + \"/*.*\"))\n self.files_B = sorted(glob.glob(os.path.join(root, \"%s/B\" % mode) + \"/*.*\"))\n\n def __getitem__(self, index):\n\n image_A = Image.open(self.files_A[index % len(self.files_A)])\n image_B = Image.open(self.files_B[index % len(self.files_B)])\n\n # Convert grayscale images to rgb\n if image_A.mode != \"RGB\":\n image_A = to_rgb(image_A)\n if image_B.mode != \"RGB\":\n image_B = to_rgb(image_B)\n\n item_A = self.random_transform(image_A)\n item_B = self.random_transform(image_B)\n\n item_A = self.transform(image_A)\n item_B = self.transform(image_B)\n\n return {\"A\": item_A, \"B\": item_B}\n\n def __len__(self):\n return min(len(self.files_A), len(self.files_B))\n\n\nclass ImageDataset_Pixellated(Dataset):\n def __init__(self, root, transforms_=None, random_transforms_=None, mode=\"train\"):\n self.transform = transforms.Compose(transforms_)\n self.random_transform = transforms.Compose(random_transforms_)\n self.files = sorted(glob.glob(os.path.join(root % mode, \"*.jpg\")))\n print(\n \"{} images files found for {}.\".format(\n len(self.files),\n mode,\n )\n )\n\n def __getitem__(self, index):\n\n img = Image.open(self.files[index % len(self.files)])\n # Convert grayscale images to rgb\n if img.mode != \"RGB\":\n img = to_rgb(img)\n w, h = img.size\n img = self.random_transform(img)\n img_A = img.copy()\n img_B = img.copy()\n img_M = img.copy()\n\n # Paste pixellated patch\n patch_size_w = random.randrange(16, w)\n patch_size_h = random.randrange(16, h)\n x1 = random.randint(1, w-patch_size_w)\n y1 = random.randint(1, h-patch_size_h)\n x2 = x1+patch_size_w\n y2 = y1+patch_size_h\n img_M = img_M.crop((x1, y1, x2, y2))\n pixel_size = max(4, random.randint(int(patch_size_w/24), int(patch_size_w/4)))\n img_M = img_M.resize(\n (\n patch_size_w//pixel_size + 1,\n patch_size_h//pixel_size + 1,\n ),\n resample=Image.BILINEAR\n )\n img_M = img_M.resize((patch_size_w, patch_size_h), Image.NEAREST)\n img_B.paste(img_M, (x1, y1))\n\n img_A = self.transform(img_A)\n img_B = self.transform(img_B)\n\n return {\"A\": img_A, \"B\": img_B}\n\n def __len__(self):\n return len(self.files)\n","sub_path":"implementations/discogan/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"464396492","text":"from django.shortcuts import render\nfrom .models import Product,Contact,Orders\nfrom math import ceil\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom PayTm import Checksum\n# Create your views here.\nfrom django.http import HttpResponse\nMERCHANT_KEY = 'tRuuR9l0V6e%!0UM'\n\n# Create your views here.\nfrom django.http import HttpResponse\n\ndef index(request):\n # products = Product.objects.all()\n # print(products)\n # n = len(products)\n # nSlides = n//4 + ceil((n/4)-(n//4))\n\n allProds = []\n catprods = Product.objects.values('category', 'id')\n cats = {item['category'] for item in catprods}\n for cat in cats:\n prod = Product.objects.filter(category=cat)\n n = len(prod)\n nSlides = n // 4 + ceil((n / 4) - (n // 4))\n allProds.append([prod, range(1, nSlides), nSlides])\n\n # params = {'no_of_slides':nSlides, 'range': range(1,nSlides),'product': products}\n # allProds = [[products, range(1, nSlides), nSlides],\n # [products, range(1, nSlides), nSlides]]\n params = {'allProds':allProds}\n return render(request, 'shop/index.html', params)\n\ndef searchMatch(query,item):\n if query in item.desc.lower() or query in item.product_name.lower() or query in item.category.lower() or query in item.subcategory.lower() or query in item.desc or query in item.product_name or query in item.category or query in item.subcategory: \n return True \n else:\n return False \n\n\ndef search(request):\n query=request.GET.get('search')\n allProds = []\n catprods = Product.objects.values('category', 'id')\n cats = {item['category'] for item in catprods}\n for cat in cats:\n prodtemp = Product.objects.filter(category=cat)\n prod=[item for item in prodtemp if searchMatch(query,item)]\n n = len(prod)\n nSlides = n // 4 + ceil((n / 4) - (n // 4))\n if len(prod)!=0:\n allProds.append([prod, range(1, nSlides), nSlides])\n params = {'allProds':allProds,'msg':''}\n if len(allProds)==0 or len(query)<2:\n params={'msg':'Search Not Found!!! Try Again'}\n return render(request, 'shop/search.html', params)\n\n\n\ndef about(request):\n return render(request, 'shop/about.html')\n\ndef contact(request):\n if request.method==\"POST\":\n name=request.POST.get('name','')\n email=request.POST.get('email','')\n phone=request.POST.get('phone','')\n desc=request.POST.get('desc','')\n contact = Contact(name=name, email=email, phone=phone, desc=desc)\n contact.save()\n x=True\n return render(request, 'shop/contact.html',{'x':True,'name':name})\n return render(request,'shop/contact.html')\n\ndef productView(request,myid):\n product = Product.objects.filter(id=myid)\n return render(request, 'shop/prodView.html',{'product':product[0]})\ndef checkout(request):\n if request.method==\"POST\":\n items_json = request.POST.get('itemsJson', '')\n amount = request.POST.get('amount', '')\n name = request.POST.get('name', '')\n email = request.POST.get('email', '')\n address = request.POST.get('address1', '') + \" \" + request.POST.get('address2', '')\n city = request.POST.get('city', '')\n state = request.POST.get('state', '')\n zip_code = request.POST.get('zip_code', '')\n phone = request.POST.get('phone', '')\n order = Orders(items_json=items_json, name=name, email=email, address=address, city=city,\n state=state, zip_code=zip_code, phone=phone,amount=amount)\n order.save()\n x= True\n id = order.order_id\n # return render(request, 'shop/checkout.html', {'thank':thank, 'id': id})\n # Request paytm to transfer the amount to your account after payment by user\n param_dict = {\n\n 'MID': 'MqyctV73637902395722',\n 'ORDER_ID': str(order.order_id),\n 'TXN_AMOUNT': str(amount),\n 'CUST_ID': email,\n 'INDUSTRY_TYPE_ID': 'Retail',\n 'WEBSITE': 'WEBSTAGING',\n 'CHANNEL_ID': 'WEB',\n 'CALLBACK_URL':'http://127.0.0.1:8000/shop/handlerequest/',\n\n }\n param_dict['CHECKSUMHASH'] = Checksum.generate_checksum(param_dict, MERCHANT_KEY)\n return render(request, 'shop/paytm.html', {'param_dict': param_dict})\n\n return render(request, 'shop/checkout.html')\n\n\n@csrf_exempt\ndef handlerequest(request):\n # paytm will send you post request here\n form = request.POST\n response_dict = {}\n for i in form.keys():\n response_dict[i] = form[i]\n if i == 'CHECKSUMHASH':\n checksum = form[i]\n\n verify = Checksum.verify_checksum(response_dict, MERCHANT_KEY, checksum)\n if verify:\n if response_dict['RESPCODE'] == '01':\n print('order successful')\n else:\n print('order was not successful because' + response_dict['RESPMSG'])\n return render(request, 'shop/paymentstatus.html', {'response': response_dict})","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"460461342","text":"import os\nimport sys\nimport getopt\n\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow.keras.utils import multi_gpu_model\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n\nimport utils\nimport Invincea\n\ndef train(train_csv_path, model_path, batch_size, epochs):\n try:\n train_df = pd.read_csv(train_csv_path, header=None)\n train_data, train_label = train_df[0].values, train_df[1].values\n except Exception as e:\n print(e)\n sys.exit(1)\n model = Invincea.Invincea()\n if model_path == None:\n model_path = 'model.ckpt'\n if os.path.isfile(model_path):\n model.load_weights(model_path)\n\n ear = EarlyStopping(monitor='acc', patience=4)\n mcp = ModelCheckpoint(model_path,\n monitor=\"acc\",\n save_best_only=True,\n save_weights_only=False)\n train_generator = utils.DataSequence(train_data, train_label, batch_size, True)\n number_of_gpu = len(tf.config.experimental.list_physical_devices('GPU'))\n if number_of_gpu >= 2:\n parallel_model = multi_gpu_model(model, gpus=number_of_gpu)\n parallel_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])\n try:\n parallel_model.fit_generator(\n train_generator,\n epochs=epochs,\n callbacks=[ear, mcp],\n workers=os.cpu_count(),\n use_multiprocessing=True,\n verbose=1,\n )\n except KeyboardInterrupt:\n model.save(model_path)\n model.save(model_path)\n else:\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])\n model.fit_generator(\n train_generator,\n epochs=epochs,\n callbacks=[ear, mcp],\n workers=os.cpu_count(),\n use_multiprocessing=True,\n verbose=1,\n )\n\ndef main(argv):\n try:\n train_csv_path = None\n model_path = None\n batch_size = 128\n epochs = 100\n optlist, args = getopt.getopt(argv[1:], '', ['help', 'train=', 'model=', 'batch_size=', 'epochs=',])\n for opt, arg in optlist:\n if opt == '--help':\n utils.help()\n sys.exit(0)\n elif opt == '--train':\n train_csv_path = arg\n elif opt == '--model':\n model_path = arg\n elif opt == '--batch_size':\n batch_size = int(arg)\n elif opt == '--epochs':\n epochs = int(arg)\n if train_csv_path == None:\n print('The following values must be input')\n print('train')\n utils.help()\n sys.exit(1)\n train(train_csv_path, model_path, batch_size, epochs)\n except Exception as e:\n print(e)\n sys.exit(1)\n\nif __name__ == '__main__':\n main(sys.argv)","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"223554095","text":"\"\"\"\napplication factory \n* a global Flask instance \n* this is a package \n\n@wenlong 2019-03-11 \n\"\"\"\n\nimport os\n\nfrom flask import Flask\n#from flask_socketio import SocketIO\n\n\n#app = Flask(__name__)\n\n#import flask.views\n\ndef create_app(test_config=None):\n # config an instance of Flask\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n SECRET_KEY='dev',\n # database\n DATABASE=os.path.join(app.instance_path, 'ispeech.sqlite'),\n )\n\n if test_config is None:\n app.config.from_pyfile('config.py', silent=True)\n else:\n app.config.from_mapping(test_config)\n\n #socketio = SocketIO(app)\n \n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n \"\"\"\n @app.route('/')\n def index():\n return 'Index Page'\n \"\"\"\n\n @app.route('/hello', methods=['GET', 'POST'])\n def hello():\n return \"Hello, World!\"\n\n # database\n from . import db\n db.init_app(app)\n\n \n # apply the blueprints to the app\n from . import auth, ispeech\n app.register_blueprint(auth.bp)\n app.register_blueprint(ispeech.bp)\n #app.register_blueprint(audio.bp)\n\n app.add_url_rule('/', endpoint='index')\n \n\n return app\n","sub_path":"speech/ispeech_0415/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"650470613","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# sym.py - High level API to play with the Symbol API\n# Copyright (C) 2012 Axel \"0vercl0k\" Souchet - http://www.twitter.com/0vercl0k\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\nimport sys\nfrom sym_wrappers import *\nimport threads\n\ndef GetSymbolFromAddressMS(address):\n \"\"\"\n Retrieve symbol information from an address via the Windows Symbol API\n \n Example:\n GetSymbolFromAddressMS(0x778de752) = ntdll!RtlAnsiStringToUnicodeString+0x0000007d\n \"\"\"\n handle_process = threads.GetProcessHandle()\n address_info = SymFromAddr(handle_process, address)\n s = None\n \n if address_info != None:\n symbol_name, offset = address_info['s'], address_info['displacement'].value\n module_info = SymGetModuleInfo64(handle_process, address)\n\n if module_info != None:\n s = '%s.%s+%#.8x' % (module_info.ModuleName, symbol_name, offset)\n else:\n s = '%s+%#.8x' % (symbol_name, offset)\n\n return s\n\ndef GetSymbolFromAddressOlly(address):\n \"\"\"\n Retrieve symbol information from an address via the OllyDBG API\n \n Example:\n GetSymbolFromAddressOlly(0x778de752) =\n \"\"\"\n s = DecodeRelativeOffset(address)\n if s == None:\n s = DecodeAddress(address)\n\n return s\n\ndef GetSymbolFromAddress(address):\n \"\"\"\n Try to obtain a symbol via, first the MS API,\n and if it didn't succeed via the OllyDBG API\n \"\"\"\n s = GetSymbolFromAddressMS(address)\n if s == None:\n s = GetSymbolFromAddressOlly(address)\n\n return s\n","sub_path":"ollyapi/sym.py","file_name":"sym.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"594770465","text":"#!/usr/bin/env python\n\n#stdlib imports\nimport os.path\nimport sys\nimport io\n\n#hack the path so that I can debug these functions if I need to\nhomedir = os.path.dirname(os.path.abspath(__file__)) #where is this script?\nshakedir = os.path.abspath(os.path.join(homedir,'..','..'))\nsys.path.insert(0,shakedir) #put this at the front of the system path, ignoring any installed shakemap stuff\n\n#local imports\nfrom shakemap.utils.exception import ShakeMapException\nfrom shakemap.grind.source import Source\n\n#third party\nfrom openquake.hazardlib.gsim import base,abrahamson_2014\nfrom openquake.hazardlib.geo.mesh import Mesh\nimport numpy as np\n\ndef test():\n fault_text = \"\"\"30.979788 103.454422 1\n31.691615 104.419160 1\n31.723569 104.374760 1\n32.532213 105.220821 1\n32.641450 105.135050 20\n31.846790 104.246202 20\n31.942158 104.205286 20\n31.290105 103.284388 20\n30.979788 103.454422 1\"\"\"\n event_text = \"\"\"\n\n \"\"\"\n print('Testing creation of source object...')\n source_text = \"\"\"mech=RS\"\"\"\n ffile = io.StringIO(fault_text)\n efile = io.StringIO(event_text)\n sfile = io.StringIO(source_text)\n source = Source.readFromFile(efile,faultfile=ffile,sourcefile=sfile)\n print('Passed creation of source object.')\n\n print('Testing creation of RuptureContext object...')\n gmpe = abrahamson_2014.AbrahamsonEtAl2014()\n rupture = source.getRuptureContext([gmpe])\n testdict = {'mag':7.9,\n 'strike': -133.083550974,\n 'dip': 49.8524115024,\n 'rake': 45.0,\n 'ztor':0.999999999995,\n 'hypo_lon':103.3639,\n 'hypo_lat':30.9858,\n 'hypo_depth':19.0,\n 'width':27.8623813381}\n for key in testdict.keys():\n value = eval('rupture.%s' % key)\n np.testing.assert_almost_equal(testdict[key],value)\n print('Passed creation of RuptureContext object...')\n \n print('Test setting mechanism and rake/dip...')\n mech = 'RS'\n exp_dip = 40\n exp_rake = 90\n source.setMechanism(mech)\n assert source.getEventParam('dip') == exp_dip\n assert source.getEventParam('rake') == exp_rake\n source.setMechanism('ALL',dip=45,rake=315)\n assert source.getEventParam('rake') == -45\n #this should raise an exception\n try:\n source.setMechanism('ALL',dip=110)\n except ShakeMapException as sme:\n print('Exception raised appropriately for dip greater than 90.')\n #this should raise an exception\n try:\n source.setMechanism('ALL',rake=370)\n except ShakeMapException as sme:\n print('Exception raised appropriately for rake greater than 360.')\n print('Test setting mechanism and rake/dip...')\n\ndef _test_northridge():\n fault_text = \"\"\"\n # Source: Wald, D. J., T. H. Heaton, and K. W. Hudnut (1996). The Slip History of the 1994 Northridge, California, Earthquake Determined from Strong-Motion, Teleseismic, GPS, and Leveling Data, Bull. Seism. Soc. Am. 86, S49-S70.\n 34.315 -118.421 5.000\n 34.401 -118.587 5.000\n 34.261 -118.693 20.427\n 34.175 -118.527 20.427\n 34.315 -118.421 5.000\n \"\"\"\n event_text = \"\"\"\n\n \"\"\"\n source_text = \"\"\"mech=RS\"\"\"\n ffile = io.StringIO(fault_text)\n efile = io.StringIO(event_text)\n sfile = io.StringIO(source_text)\n source = Source.readFromFile(efile,faultfile=ffile,sourcefile=sfile)\n gmpe = abrahamson_2014.AbrahamsonEtAl2014()\n rupture = source.getRuptureContext(gmpe)\n mapwidth = 2.0\n latmin = rupture.hypo_lat - mapwidth\n latmax = rupture.hypo_lat + mapwidth\n lonmin = rupture.hypo_lon - mapwidth\n lonmax = rupture.hypo_lon + mapwidth\n dim = 0.02\n lats = np.arange(latmin,latmax,dim)\n lons = np.arange(lonmin,lonmax,dim)\n lon,lat = np.meshgrid(lons,lats)\n dep = np.zeros_like(lon)\n mesh = Mesh(lon,lat,dep)\n distances = source.getDistanceContext(gmpe,mesh)\n rupture = source.getRuptureContext(gmpe)\n for key in rupture._slots_:\n try:\n value = eval('rupture.%s' % key)\n except:\n print('No value set for %s' % key)\n continue\n print('%s = %s' % (key,str(value))) \n\n cbuf = io.StringIO(fault_text)\n fault = Fault.readFaultFile(cbuf)\n \nif __name__ == '__main__':\n test()\n \n \n","sub_path":"test/grind/source_test.py","file_name":"source_test.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"255622297","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nno_chains = int(input(\"No of chains: \"))\r\nno_samples=int(input(\"Enter no of samples per chain: \"))\r\nburnin = 0.5\r\nno_samples_b = int(no_samples-no_samples*burnin)\r\nweight0 = np.zeros((no_chains, no_samples_b))\r\nweight100 = np.zeros((no_chains, no_samples_b))\r\nweight5000 = np.zeros((no_chains, no_samples_b))\r\nweight10000 = np.zeros((no_chains, no_samples_b))\r\n\r\nweight2000 = np.zeros((no_chains, no_samples_b))\r\nweight3000 = np.zeros((no_chains, no_samples_b))\r\nweight4000 = np.zeros((no_chains, no_samples_b))\r\nweight6000 = np.zeros((no_chains, no_samples_b))\r\nweight7000 = np.zeros((no_chains, no_samples_b))\r\nweight8000 = np.zeros((no_chains, no_samples_b))\r\nweight9000 = np.zeros((no_chains, no_samples_b))\r\nweight11000 = np.zeros((no_chains, no_samples_b))\r\nlikelihood_value = np.zeros((no_chains, no_samples))\r\nfile1 = open(\"gelman_reuben/rhat.txt\",\"w\")\r\n\r\n\r\n\r\n\r\ntemp = [1.0, 1.640670712015276, 1.1040895136738123, 1.2190136542044754, 1.3459001926323562, 1.4859942891369484, 1.8114473285278132, 2.0]\r\n\r\n\r\nfor i in range(no_chains):\r\n\r\n file_name = 'weight[0]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin*no_samples):]\r\n weight0[i, :] = dat\r\n\r\n file_name = 'weight[100]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin * no_samples):]\r\n weight100[i, :] = dat\r\n\r\n file_name = 'weight[5000]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin * no_samples):]\r\n weight5000[i, :] = dat\r\n\r\n file_name = 'weight[10000]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin * no_samples):]\r\n weight10000[i, :] = dat\r\n\r\n file_name = 'weight[2000]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin*no_samples):]\r\n weight2000[i, :] = dat\r\n\r\n file_name = 'weight[3000]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin * no_samples):]\r\n weight3000[i, :] = dat\r\n\r\n file_name = 'weight[4000]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin * no_samples):]\r\n weight4000[i, :] = dat\r\n\r\n file_name = 'weight[6000]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin * no_samples):]\r\n weight6000[i, :] = dat\r\n\r\n file_name = 'weight[7000]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin * no_samples):]\r\n weight7000[i, :] = dat\r\n\r\n file_name = 'weight[8000]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin * no_samples):]\r\n weight8000[i, :] = dat\r\n\r\n file_name = 'weight[9000]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin * no_samples):]\r\n weight9000[i, :] = dat\r\n\r\n file_name = 'weight[11000]_'+str(temp[i]) + '.txt'\r\n dat = np.loadtxt(file_name)\r\n dat = dat[int(burnin * no_samples):]\r\n weight11000[i, :] = dat\r\n\r\n file_name = 'likelihood_value_'+str(temp[i]) + '.txt'\r\n likelihood_value[i, :] = np.loadtxt(file_name)\r\n\r\n file_name = 'acc_test_chain_1.0.txt'\r\n acc_test = np.loadtxt(file_name)\r\n\r\n file_name = 'acc_train_chain_1.0.txt'\r\n acc_train = np.loadtxt(file_name)\r\n\r\n x1 = np.linspace(0, no_samples_b, num=no_samples_b)\r\n\r\n plt.plot(x1, weight0[i], label='Weight[0]')\r\n plt.legend(loc='upper right')\r\n plt.title(\"Weight[0]_Chain\"+str(temp[i]) + \" Trace\")\r\n #plt.ylim(-1,1)\r\n plt.savefig(\r\n 'gelman_reuben/weight[0]_Chain' + str(temp[i])+'_samples.png')\r\n plt.clf()\r\n\r\n\r\n plt.hist(weight0[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[0]_Chain' + str(temp[i])+'_hist.png')\r\n plt.clf()\r\n\r\n plt.plot(x1, weight100[i], label='Weight[100]')\r\n plt.legend(loc='upper right')\r\n plt.title(\"Weight[100]_Chain\" + str(temp[i]) + \" Trace\")\r\n #plt.ylim(-1,1)\r\n plt.savefig(\r\n 'gelman_reuben/weight[100]_Chain' + str(temp[i]) + '_samples.png')\r\n plt.clf()\r\n\r\n plt.hist(weight100[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[100]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.plot(x1, weight5000[i], label='Weight[5000]')\r\n plt.legend(loc='upper right')\r\n plt.title(\"Weight[5000]_Chain\" + str(temp[i]) + \" Trace\")\r\n #plt.ylim(-1,1)\r\n plt.savefig(\r\n 'gelman_reuben/weight[5000]_Chain' + str(temp[i]) + '_samples.png')\r\n plt.clf()\r\n\r\n plt.hist(weight5000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[5000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.plot(x1, weight10000[i], label='Weight[10000]')\r\n plt.legend(loc='upper right')\r\n plt.title(\"Weight[10000]_Chain\" + str(temp[i]) + \" Trace\")\r\n #plt.ylim(-1,1)\r\n plt.savefig(\r\n 'gelman_reuben/weight[10000]_Chain' + str(temp[i]) + '_samples.png')\r\n plt.clf()\r\n\r\n plt.hist(weight10000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[10000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.hist(weight10000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[10000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.hist(weight2000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[2000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.hist(weight3000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[3000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.hist(weight4000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[4000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.hist(weight6000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[6000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.hist(weight7000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[7000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.hist(weight8000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[8000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.hist(weight9000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[9000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\n plt.hist(weight11000[i], bins=20, color=\"blue\", alpha=0.7)\r\n plt.title('Posterior Distribution')\r\n plt.ylabel('Frequency')\r\n plt.xlabel('Parameter Values')\r\n plt.savefig(\r\n 'gelman_reuben/weight[11000]_Chain' + str(temp[i]) + '_hist.png')\r\n plt.clf()\r\n\r\nx1 = np.linspace(0, no_samples_b, num=no_samples_b)\r\nx2 = np.linspace(0, no_samples, num=no_samples)\r\n\r\nplt.plot(x1, weight0[0], label=str(temp[0]))\r\nplt.plot(x1, weight0[1], label=str(temp[1]))\r\nplt.plot(x1, weight0[2], label=str(temp[2]))\r\nplt.plot(x1, weight0[3], label=str(temp[3]))\r\n#plt.legend(loc='upper left')\r\nplt.plot(x1, weight0[4], label=str(temp[4]))\r\nplt.plot(x1, weight0[5], label=str(temp[5]))\r\nplt.plot(x1, weight0[6], label=str(temp[6]))\r\nplt.plot(x1, weight0[7], label=str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[0]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight100[0], label=str(temp[0]))\r\nplt.plot(x1, weight100[1], label=str(temp[1]))\r\nplt.plot(x1, weight100[2], label=str(temp[2]))\r\nplt.plot(x1, weight100[3], label=str(temp[3]))\r\nplt.plot(x1, weight100[4], label=str(temp[4]))\r\nplt.plot(x1, weight100[5], label=str(temp[5]))\r\nplt.plot(x1, weight100[6], label=str(temp[6]))\r\nplt.plot(x1, weight100[7], label=str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[100]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight5000[0], label=str(temp[0]))\r\nplt.plot(x1, weight5000[1], label=str(temp[1]))\r\nplt.plot(x1, weight5000[2], label=str(temp[2]))\r\nplt.plot(x1, weight5000[3], label=str(temp[3]))\r\nplt.plot(x1, weight5000[4], label=str(temp[4]))\r\nplt.plot(x1, weight5000[5], label=str(temp[5]))\r\nplt.plot(x1, weight5000[6], label=str(temp[6]))\r\nplt.plot(x1, weight5000[7], label=str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[5000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight10000[0], label='Weight[10000]'+str(temp[0]))\r\nplt.plot(x1, weight10000[1], label='Weight[10000]'+str(temp[1]))\r\nplt.plot(x1, weight10000[2], label='Weight[10000]'+str(temp[2]))\r\nplt.plot(x1, weight10000[3], label='Weight[10000]'+str(temp[3]))\r\nplt.plot(x1, weight10000[4], label='Weight[10000]'+str(temp[4]))\r\nplt.plot(x1, weight10000[5], label='Weight[10000]'+str(temp[5]))\r\nplt.plot(x1, weight10000[6], label='Weight[10000]'+str(temp[6]))\r\nplt.plot(x1, weight10000[7], label='Weight[10000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[10000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight0[0], label=str(temp[0]))\r\nplt.plot(x1, weight0[1], label=str(temp[1]))\r\nplt.plot(x1, weight0[2], label=str(temp[2]))\r\nplt.plot(x1, weight0[3], label=str(temp[3]))\r\nplt.plot(x1, weight0[4], label=str(temp[4]))\r\nplt.plot(x1, weight0[5], label=str(temp[5]))\r\nplt.plot(x1, weight0[6], label=str(temp[6]))\r\nplt.plot(x1, weight0[7], label=str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[0]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight100[0], label=str(temp[0]))\r\nplt.plot(x1, weight100[1], label=str(temp[1]))\r\nplt.plot(x1, weight100[2], label=str(temp[2]))\r\nplt.plot(x1, weight100[3], label=str(temp[3]))\r\nplt.plot(x1, weight100[4], label=str(temp[4]))\r\nplt.plot(x1, weight100[5], label=str(temp[5]))\r\nplt.plot(x1, weight100[6], label=str(temp[6]))\r\nplt.plot(x1, weight100[7], label=str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[100]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight5000[0], label=str(temp[0]))\r\nplt.plot(x1, weight5000[1], label=str(temp[1]))\r\nplt.plot(x1, weight5000[2], label=str(temp[2]))\r\nplt.plot(x1, weight5000[3], label=str(temp[3]))\r\nplt.plot(x1, weight5000[4], label=str(temp[4]))\r\nplt.plot(x1, weight5000[5], label=str(temp[5]))\r\nplt.plot(x1, weight5000[6], label=str(temp[6]))\r\nplt.plot(x1, weight5000[7], label=str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[5000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight10000[0], label='Weight[10000]'+str(temp[0]))\r\nplt.plot(x1, weight10000[1], label='Weight[10000]'+str(temp[1]))\r\nplt.plot(x1, weight10000[2], label='Weight[10000]'+str(temp[2]))\r\nplt.plot(x1, weight10000[3], label='Weight[10000]'+str(temp[3]))\r\nplt.plot(x1, weight10000[4], label='Weight[10000]'+str(temp[4]))\r\nplt.plot(x1, weight10000[5], label='Weight[10000]'+str(temp[5]))\r\nplt.plot(x1, weight10000[6], label='Weight[10000]'+str(temp[6]))\r\nplt.plot(x1, weight10000[7], label='Weight[10000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[10000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight2000[0], label='Weight[2000]'+str(temp[0]))\r\nplt.plot(x1, weight2000[1], label='Weight[2000]'+str(temp[1]))\r\nplt.plot(x1, weight2000[2], label='Weight[2000]'+str(temp[2]))\r\nplt.plot(x1, weight2000[3], label='Weight[2000]'+str(temp[3]))\r\nplt.plot(x1, weight2000[4], label='Weight[2000]'+str(temp[4]))\r\nplt.plot(x1, weight2000[5], label='Weight[2000]'+str(temp[5]))\r\nplt.plot(x1, weight2000[6], label='Weight[2000]'+str(temp[6]))\r\nplt.plot(x1, weight2000[7], label='Weight[2000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[2000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight3000[0], label='Weight[3000]'+str(temp[0]))\r\nplt.plot(x1, weight3000[1], label='Weight[3000]'+str(temp[1]))\r\nplt.plot(x1, weight3000[2], label='Weight[3000]'+str(temp[2]))\r\nplt.plot(x1, weight3000[3], label='Weight[3000]'+str(temp[3]))\r\nplt.plot(x1, weight3000[4], label='Weight[3000]'+str(temp[4]))\r\nplt.plot(x1, weight3000[5], label='Weight[3000]'+str(temp[5]))\r\nplt.plot(x1, weight3000[6], label='Weight[3000]'+str(temp[6]))\r\nplt.plot(x1, weight3000[7], label='Weight[3000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[3000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight4000[0], label='Weight[4000]'+str(temp[0]))\r\nplt.plot(x1, weight4000[1], label='Weight[4000]'+str(temp[1]))\r\nplt.plot(x1, weight4000[2], label='Weight[4000]'+str(temp[2]))\r\nplt.plot(x1, weight4000[3], label='Weight[4000]'+str(temp[3]))\r\nplt.plot(x1, weight4000[4], label='Weight[4000]'+str(temp[4]))\r\nplt.plot(x1, weight4000[5], label='Weight[4000]'+str(temp[5]))\r\nplt.plot(x1, weight4000[6], label='Weight[4000]'+str(temp[6]))\r\nplt.plot(x1, weight4000[7], label='Weight[4000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[4000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight6000[0], label='Weight[6000]'+str(temp[0]))\r\nplt.plot(x1, weight6000[1], label='Weight[6000]'+str(temp[1]))\r\nplt.plot(x1, weight6000[2], label='Weight[6000]'+str(temp[2]))\r\nplt.plot(x1, weight6000[3], label='Weight[6000]'+str(temp[3]))\r\nplt.plot(x1, weight6000[4], label='Weight[6000]'+str(temp[4]))\r\nplt.plot(x1, weight6000[5], label='Weight[6000]'+str(temp[5]))\r\nplt.plot(x1, weight6000[6], label='Weight[6000]'+str(temp[6]))\r\nplt.plot(x1, weight6000[7], label='Weight[6000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[6000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight7000[0], label='Weight[7000]'+str(temp[0]))\r\nplt.plot(x1, weight7000[1], label='Weight[7000]'+str(temp[1]))\r\nplt.plot(x1, weight7000[2], label='Weight[7000]'+str(temp[2]))\r\nplt.plot(x1, weight7000[3], label='Weight[7000]'+str(temp[3]))\r\nplt.plot(x1, weight7000[4], label='Weight[7000]'+str(temp[4]))\r\nplt.plot(x1, weight7000[5], label='Weight[7000]'+str(temp[5]))\r\nplt.plot(x1, weight7000[6], label='Weight[7000]'+str(temp[6]))\r\nplt.plot(x1, weight7000[7], label='Weight[7000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[7000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight8000[0], label='Weight[8000]'+str(temp[0]))\r\nplt.plot(x1, weight8000[1], label='Weight[8000]'+str(temp[1]))\r\nplt.plot(x1, weight8000[2], label='Weight[8000]'+str(temp[2]))\r\nplt.plot(x1, weight8000[3], label='Weight[8000]'+str(temp[3]))\r\nplt.plot(x1, weight8000[4], label='Weight[8000]'+str(temp[4]))\r\nplt.plot(x1, weight8000[5], label='Weight[8000]'+str(temp[5]))\r\nplt.plot(x1, weight8000[6], label='Weight[8000]'+str(temp[6]))\r\nplt.plot(x1, weight8000[7], label='Weight[8000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[8000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight9000[0], label='Weight[9000]'+str(temp[0]))\r\nplt.plot(x1, weight9000[1], label='Weight[9000]'+str(temp[1]))\r\nplt.plot(x1, weight9000[2], label='Weight[9000]'+str(temp[2]))\r\nplt.plot(x1, weight9000[3], label='Weight[9000]'+str(temp[3]))\r\nplt.plot(x1, weight9000[4], label='Weight[9000]'+str(temp[4]))\r\nplt.plot(x1, weight9000[5], label='Weight[9000]'+str(temp[5]))\r\nplt.plot(x1, weight9000[6], label='Weight[9000]'+str(temp[6]))\r\nplt.plot(x1, weight9000[7], label='Weight[9000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[9000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x1, weight11000[0], label='Weight[11000]'+str(temp[0]))\r\nplt.plot(x1, weight11000[1], label='Weight[11000]'+str(temp[1]))\r\nplt.plot(x1, weight11000[2], label='Weight[11000]'+str(temp[2]))\r\nplt.plot(x1, weight11000[3], label='Weight[11000]'+str(temp[3]))\r\nplt.plot(x1, weight11000[4], label='Weight[11000]'+str(temp[4]))\r\nplt.plot(x1, weight11000[5], label='Weight[11000]'+str(temp[5]))\r\nplt.plot(x1, weight11000[6], label='Weight[11000]'+str(temp[6]))\r\nplt.plot(x1, weight11000[7], label='Weight[11000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Trace Plot\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Parameter Values\")\r\nplt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/weight[11000]_Chain' +'_samples.png')\r\nplt.clf()\r\n\r\nplt.plot(x2, likelihood_value[0], label='Likelihood')\r\n#plt.plot(x2, likelihood_value[1], label='Weight[11000]'+str(temp[1]))\r\n#plt.plot(x2, likelihood_value[2], label='Weight[11000]'+str(temp[2]))\r\n#plt.plot(x2, likelihood_value[3], label='Weight[11000]'+str(temp[3]))\r\n#plt.plot(x2, likelihood_value[4], label='Weight[11000]'+str(temp[4]))\r\n#plt.plot(x2, likelihood_value[5], label='Weight[11000]'+str(temp[5]))\r\n#plt.plot(x2, likelihood_value[6], label='Weight[11000]'+str(temp[6]))\r\nplt.plot(x2, likelihood_value[7], label='Weight[11000]'+str(temp[7]))\r\n#plt.legend(loc='upper left')\r\nplt.title(\"Likelihood Function Trace\")\r\nplt.xlabel(\"Samples\")\r\nplt.ylabel(\"Likelihood Value\")\r\n#plt.ylim(-4, 4)\r\nplt.tight_layout()\r\nplt.savefig(\r\n'gelman_reuben/likelihood_value' +'_samples.png')\r\nplt.clf()\r\n\r\ncolor = 'tab:red'\r\nplt.plot(x2, acc_train, label=\"Train\", color=color)\r\ncolor = 'tab:blue'\r\nplt.plot(x2, acc_test, label=\"Test\", color=color)\r\nplt.xlabel('Samples')\r\nplt.ylabel('Reconstruction Accuracy')\r\nplt.title('Accuracy Value')\r\nplt.legend()\r\nplt.savefig('gelman_reuben/accuracy_value' +'_samples.png')\r\nplt.clf()\r\n\r\n\r\n\r\n\r\n\r\nweight0 = weight0.T\r\nweight100 = weight100.T\r\nweight5000 = weight5000.T\r\nweight10000 = weight10000.T\r\nweight3000 = weight3000.T\r\nweight2000 = weight2000.T\r\nweight4000 = weight4000.T\r\nweight6000 = weight6000.T\r\nweight7000 = weight7000.T\r\nweight8000 = weight8000.T\r\nweight9000 = weight9000.T\r\nweight11000 = weight11000.T\r\n\r\nprint(weight0.shape)\r\nprint(weight100.shape)\r\nprint(weight5000.shape)\r\nprint(weight10000.shape)\r\nprint(weight3000.shape)\r\nprint(weight2000.shape)\r\nprint(weight4000.shape)\r\nprint(weight6000.shape)\r\nprint(weight7000.shape)\r\nprint(weight8000.shape)\r\nprint(weight9000.shape)\r\ndata = np.stack((weight0, weight100, weight2000, weight3000, weight4000, weight5000, weight6000, weight7000, weight8000, weight9000, weight10000, weight11000), axis=2)\r\n#data = np.array([weight0,weight100,weight5000,weight10000])\r\nNchains, Nsamples, Npars = data.shape\r\n\r\nB_on_n = data.mean(axis=1).var(axis=0) # variance of in-chain means\r\nW = data.var(axis=1).mean(axis=0) # mean of in-chain variances\r\n\r\n#print(B_on_n, ' B_on_n mean')\r\n\r\n#print(W, ' W variance ')\r\n\r\n# simple version, as in Obsidian\r\nsig2 = (Nsamples/(Nsamples-1))*W + B_on_n\r\nVhat = sig2 + B_on_n/Nchains\r\nRhat = Vhat/W\r\n\r\nprint(Rhat, ' Rhat')\r\nfile1.write(str(Rhat))\r\n\r\n\r\n# advanced version that accounts for ndof\r\nm, n = np.float(Nchains), np.float(Nsamples)\r\nsi2 = data.var(axis=1)\r\nxi_bar = data.mean(axis=1)\r\nxi2_bar = data.mean(axis=1)**2\r\nvar_si2 = data.var(axis=1).var(axis=0)\r\nallmean = data.mean(axis=1).mean(axis=0)\r\ncov_term1 = np.array([np.cov(si2[:,i], xi2_bar[:,i])[0,1]\r\n for i in range(Npars)])\r\ncov_term2 = np.array([-2*allmean[i]*(np.cov(si2[:,i], xi_bar[:,i])[0,1])\r\n for i in range(Npars)])\r\nvar_Vhat = ( ((n-1)/n)**2 * 1.0/m * var_si2\r\n + ((m+1)/m)**2 * 2.0/(m-1) * B_on_n**2\r\n + 2.0*(m+1)*(n-1)/(m*n**2)\r\n * n/m * (cov_term1 + cov_term2))\r\ndf = 2*Vhat**2 / var_Vhat\r\n\r\n#print(df, ' df ')\r\n#print(var_Vhat, ' var_Vhat')\r\n#print \"gelman_rubin(): var_Vhat = {}, df = {}\".format(var_Vhat, df)\r\n\r\n\r\nRhat *= df/(df-2)\r\n\r\nprint(Rhat, ' Rhat Advanced')\r\nfile1.write(str(Rhat))\r\n","sub_path":"EXTRAS/Gelman Reuben.py","file_name":"Gelman Reuben.py","file_ext":"py","file_size_in_byte":22609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"423801682","text":"# Databricks notebook source\n# MAGIC %md \n# MAGIC ## Training ML models with Databricks & Mlflow\n# MAGIC We will:\n# MAGIC - Train some Machine Learning models with scikit-learn and XGBoost\n# MAGIC - Get some custom performance visualisations with MatplotLib \n# MAGIC - Do manual hyperparameter tuning\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Run the utils notebook\n# MAGIC \n# MAGIC The notebook contains the ML wrapper function `run_algo` that we will be using throughout this notebook. \n# MAGIC \n# MAGIC At a high level it does the following steps:\n# MAGIC 1. Train the model using the train dataset\n# MAGIC 2. Score the train and test dataset\n# MAGIC 3. Run Model Diagnostics\n# MAGIC 4. Log the parameters, metrics, artifacts and model into MLFlow\n\n# COMMAND ----------\n\n# MAGIC %run ./utils\n\n# COMMAND ----------\n\n# DBTITLE 1,Look at the Dataset\n# MAGIC %sql\n# MAGIC \n# MAGIC select * from boston_house_price\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ###Create your MLFlow Experiment\n# MAGIC \n# MAGIC Copy and paste the experiment ID to the cell below\n\n# COMMAND ----------\n\nexperiment_id = '8805954';\n\ndisplayHTML(f\"

Make sure you can see your experiment on #mlflow/experiments/{experiment_id}

\")\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC \n# MAGIC ### Define some global variables\n\n# COMMAND ----------\n\nTABLE_NAME = 'boston_house_price'\n\nFEATURES = [\n 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT'\n]\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC \n# MAGIC ### Using Elastic Net as a baseline algorithm\n# MAGIC \n# MAGIC Elastic net is a regularized regression method that linearly combines the L1 and L2 penalties of the lasso and ridge methods (https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html)\n\n# COMMAND ----------\n\nalgo = ElasticNet\n\nparams = {\n 'pca_params': {'n_components': 12},\n 'algo_params': {'alpha': 0.05, 'l1_ratio': 0.1, 'normalize': False}\n}\n\nrun_name = \"Elastic Net\"\n\ndf_train, df_test, version = prep_data(TABLE_NAME)\nrun_info, model_stats = run_algo(df_train, df_test, version, FEATURES, algo, params, experiment_id, run_name=run_name, do_shap=True)\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC ### When in doubt, use XGBoost\n# MAGIC \n# MAGIC XGBoost is an implementation of gradient boosted decision trees designed for speed and performance. It has been dominating applied machine learning and Kaggle competitions for structured or tabular data. (https://xgboost.readthedocs.io/en/latest/index.html)\n\n# COMMAND ----------\n\nalgo = xgb.XGBRegressor\n\nparams = {\n 'pca_params': {'n_components': 12},\n 'algo_params': {'max_depth': 3, 'learning_rate': 0.1, 'n_estimators': 100, 'objective': 'reg:squarederror'}\n}\n\nrun_name = \"XGBoost\"\ndf_train, df_test, version = prep_data(TABLE_NAME)\nrun_info, model_stats = run_algo(df_train, df_test, version, FEATURES, algo, params, experiment_id, run_name=run_name, do_shap=True)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC Now you have tested 2 different models. Check the Experiment and compare the results.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Hyperparameter Tuning\n# MAGIC User HyperOpt with Spark trials to run distributed hyperparameters tuning across workers in parallel\n\n# COMMAND ----------\n\nspark.conf.set(\"spark.databricks.mlflow.trackHyperopt.enabled\", False)\n\n# COMMAND ----------\n\nfrom functools import partial\nfrom hyperopt import SparkTrials, hp, fmin, tpe, STATUS_FAIL, STATUS_OK\n\nspark_trials = SparkTrials()\nhyperopt_algo = tpe.suggest\n\nn_components_range = np.arange(4, 12, 1, dtype=int)\nmax_depth_range = np.arange(1, 4, 1, dtype=int)\nlearning_rate_range = np.arange(0.01, 0.15, 0.01)\nn_estimators_range = np.arange(100, 500, 1, dtype=int)\n\nparams = {\n 'pca_params': {\n 'n_components': hp.choice('n_components', n_components_range)\n },\n 'algo_params': {\n 'max_depth': hp.choice('max_depth', max_depth_range), \n 'learning_rate': hp.choice('learning_rate', learning_rate_range), \n 'n_estimators': hp.choice('n_estimators', n_estimators_range), \n 'objective': 'reg:squarederror',\n 'eval_metric': 'rmse',\n 'early_stopping_rounds': 100,\n 'verbose': False\n }\n}\n\ndef rmse(y, pred): \n return np.sqrt(mean_squared_error(y, pred))\n\ndef fn(params, df_train, df_test, version, features, algo, experiment_id, run_name):\n run_info, model_stats = run_algo(df_train, df_test, version, features, algo, params, experiment_id=experiment_id, run_name=run_name, do_shap=True, nested=True)\n \n loss = rmse(model_stats['df']['PRICE'].to_numpy(), model_stats['y_pred'])\n return {'loss': loss, 'status': STATUS_OK}\n\nrun_name = \"HyperOpt - XGB\"\n\nalgo = xgb.XGBRegressor\ndf_train, df_test, version = prep_data(TABLE_NAME)\nfmin_objective = partial(fn, df_train=df_train, df_test=df_test, version=version, features=FEATURES, algo=algo, experiment_id=experiment_id, run_name=run_name)\n\nbest_param = fmin(fn=fmin_objective, space=params, algo=hyperopt_algo, max_evals=8, trials=spark_trials) \n\n\n# COMMAND ----------\n\nprint(\"best hyperparameters:\")\nprint(f\"learning_rate: {learning_rate_range[best_param['learning_rate']]}\")\nprint(f\"max_depth: {max_depth_range[best_param['max_depth']]}\")\nprint(f\"n_estimators: {n_estimators_range[best_param['n_estimators']]}\")\nprint(f\"n_components: {n_components_range[best_param['n_components']]}\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC Now you have found the best parameters for your XGBoost model, we can deploy it using the next [notebook]($./03_deployment).","sub_path":"Databricks Bootcamp/Data Science Workshop/02_machine_learning.py","file_name":"02_machine_learning.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"236231839","text":"'''\nzstack node test class\n\n@author: Youyk\n'''\nimport zstackwoodpecker.test_util as test_util\nimport zstackwoodpecker.setup_actions as setup_actions\nimport zstackwoodpecker.operations.node_operations as node_ops\nimport zstacklib.utils.linux as linux\n\nSTOPPED = 'stopped'\nRUNNING = 'running'\nUNKNOWN = 'unknown'\n\nclass ZstackTestNode(object):\n def __init__(self):\n self.node_option = test_util.NodeOption()\n self.state = UNKNOWN\n self.node = None\n self.test_node = None\n\n def is_docker_node(self):\n return self.node_option.get_docker_image()\n\n def add(self, deploy_config):\n if self.state == RUNNING:\n return\n\n if self.is_docker_node():\n self.test_node = setup_actions.DockerNode(deploy_config)\n self.test_node.set_docker_image(self.node_option.get_docker_image())\n else:\n self.test_node = setup_actions.HostNode(deploy_config)\n\n self.test_node.set_username(self.node_option.get_username())\n self.test_node.set_password(self.node_option.get_password())\n self.test_node.set_node_ip(self.node_option.get_management_ip())\n\n self.test_node.start_node()\n if self.wait_for_node_start():\n self.state = RUNNING\n self.node = node_ops.get_management_node_by_host_ip(self.node_option.get_management_ip())[0]\n\n def wait_for_node_start(self, timeout=120):\n if not linux.wait_callback_success(node_ops.is_management_node_start, \\\n self.node_option.get_management_ip(), timeout=timeout, \\\n interval=0.5):\n test_util.test_logger('multi node does not startup on host: %s' \\\n % self.node_option.get_management_ip())\n return False\n test_util.test_logger('multi node startup on host: %s' \\\n % self.node_option.get_management_ip())\n return True\n\n def stop(self):\n self.test_node.stop_node()\n self.state = STOPPED\n\n def check(self):\n import zstackwoodpecker.zstack_test.checker_factory as checker_factory\n checker = checker_factory.CheckerFactory().create_checker(self)\n checker.check()\n\n def set_node_option(self, node_option):\n self.node_option = node_option\n\n def get_node_option(self):\n return self.node_option\n\n def get_node(self):\n return self.node\n\n def get_state(self):\n return self.state\n\n def get_test_node(self):\n return self.test_node\n","sub_path":"zstackwoodpecker/zstackwoodpecker/zstack_test/zstack_test_node.py","file_name":"zstack_test_node.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"339725536","text":"#!/usr/bin/env python\n\"\"\"\nA Python implementation of some of the ideas in the SR-Tesseler paper.\nBasically this does is calculate the area (in pixels) of the Voroni\nregion around a localization and stores that in the localizations fit\narea field.\n\nNote: This ignores the localization category.\n\nNote: This will handle up to on the order of 1M localizations. Analysis\n of files with a lot more localizations than this will likely\n take a long time to analyze.\n\nHazen 09/16\n\"\"\"\n\nimport numpy\nfrom scipy.spatial import Voronoi, voronoi_plot_2d\nfrom shapely.geometry import Polygon\n\nimport storm_analysis.sa_library.readinsight3 as readinsight3\nimport storm_analysis.sa_library.writeinsight3 as writeinsight3\n\n\ndef voronoi(mlist_name, clist_name, density_factor, min_size, verbose = True):\n\n i3_data_in = readinsight3.loadI3GoodOnly(mlist_name)\n n_locs = i3_data_in['xc'].size\n points = numpy.column_stack((i3_data_in['xc'], i3_data_in['yc']))\n\n print(\"Creating Voronoi object.\")\n vor = Voronoi(points)\n\n print(\"Calculating 2D region sizes.\")\n for i, region_index in enumerate(vor.point_region):\n if ((i%10000) == 0):\n print(\"Processing point\", i)\n\n vertices = []\n for vertex in vor.regions[region_index]:\n \n # I think these are edge regions?\n if (vertex == -1):\n vertices = []\n break\n\n vertices.append(vor.vertices[vertex])\n \n if (len(vertices) > 0):\n area = Polygon(vertices).area\n i3_data_in['a'][i] = 1.0/area\n\n # Used median density based threshold.\n ave_density = numpy.median(i3_data_in['a'])\n if verbose:\n print(\"Min density\", numpy.min(i3_data_in['a']))\n print(\"Max density\", numpy.max(i3_data_in['a']))\n print(\"Median density\", ave_density)\n\n # Record the neighbors of each point.\n max_neighbors = 40\n neighbors = numpy.zeros((n_locs, max_neighbors), dtype = numpy.int32) - 1\n neighbors_counts = numpy.zeros((n_locs), dtype = numpy.int32)\n\n print(\"Calculating neighbors\")\n for ridge_p in vor.ridge_points:\n\n p1 = ridge_p[0]\n p2 = ridge_p[1]\n\n # Add p2 to the list for p1\n neighbors[p1,neighbors_counts[p1]] = p2\n neighbors_counts[p1] += 1\n\n # Add p1 to the list for p2\n neighbors[p2,neighbors_counts[p2]] = p1\n neighbors_counts[p2] += 1\n\n if False:\n n1 = neighbors[0,:]\n print(n1)\n print(neighbors[n1[0],:])\n\n # Mark connected points that meet the minimum density criteria.\n print(\"Marking connected regions\")\n i3_data_in['lk'] = -1\n min_density = density_factor * ave_density\n visited = numpy.zeros((n_locs), dtype = numpy.int32)\n\n def neighborsList(index):\n nlist = []\n for i in range(neighbors_counts[index]):\n loc_index = neighbors[index,i]\n if (visited[loc_index] == 0):\n nlist.append(neighbors[index,i])\n visited[loc_index] = 1\n return nlist\n\n cluster_id = 2\n for i in range(n_locs):\n if (visited[i] == 0):\n if (i3_data_in['a'][i] > min_density):\n cluster_elt = [i]\n c_size = 1\n visited[i] = 1\n to_check = neighborsList(i)\n while (len(to_check) > 0):\n\n # Remove last localization from the list.\n loc_index = to_check[-1]\n to_check = to_check[:-1]\n\n # If the localization has sufficient density add to cluster and check neighbors.\n if (i3_data_in['a'][loc_index] > min_density):\n to_check += neighborsList(loc_index)\n cluster_elt.append(loc_index)\n c_size += 1\n\n # Mark as visited.\n visited[loc_index] = 1\n\n # Mark the cluster if there are enough localizations in the cluster.\n if (c_size > min_size):\n print(\"cluster\", cluster_id, \"size\", c_size)\n for elt in cluster_elt:\n i3_data_in['lk'][elt] = cluster_id\n cluster_id += 1\n visited[i] = 1\n\n print(cluster_id, \"clusters\")\n \n # Save the data.\n print(\"Saving results\")\n i3_data_out = writeinsight3.I3Writer(clist_name)\n i3_data_out.addMolecules(i3_data_in)\n i3_data_out.close()\n","sub_path":"storm_analysis/voronoi/voronoi.py","file_name":"voronoi.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"362924929","text":"import json\n\nfrom flask import Flask, request\n\nfrom utilities.config import config\nfrom utilities.directcom import DirectCom\nfrom utilities.runner import runScript\nfrom utilities.utils import (SUCCESS, Utils, direct_responder, registerEvent,\n service_authorized)\n\napp = Flask(__name__)\nservername = config[\"runserv\"][\"url\"]\ndirectcom = DirectCom(app, servername)\nutils = Utils(app, directcom)\n\n\ndef parseFile(dbFileResult):\n return {\"name\": dbFileResult[0],\n \"contents\": dbFileResult[1]}\n\n\ndef parseFiles(dbFileResults):\n return [parseFile(x) for x in dbFileResults]\n\n\ndef saferun(attemptid, executorFile, fileList):\n fileList = [file for file in fileList if len(file[0]) > 0]\n return runScript(attemptid,\n parseFile(executorFile[0]),\n parseFiles(fileList))\n\n\n@app.route(\"/grade\", methods=[\"POST\"])\n@service_authorized\ndef grade():\n \"\"\"Run the grade server on an attempt\"\"\"\n message = json.loads(request.form.get(\"message\"))\n attemptid = message[\"attemptid\"]\n print(attemptid)\n\n # Double callback to actually run the code: it needs the files\n # from the db, first.\n utils.dbcallback(\n lambda executorFile: utils.dbcallback(\n lambda fileList: saferun(\n attemptid,\n executorFile,\n fileList\n ),\n \"get_attempt_files\",\n attemptid=attemptid\n ),\n \"get_executor_file\",\n attemptid=attemptid\n )\n\n return SUCCESS\n\n\ndef grade_result(data):\n \"\"\"Get the grade of the attempt\"\"\"\n correct = 0\n total = 0\n for datum in data:\n if \"weight\" in datum:\n total += datum[\"weight\"]\n else:\n total += 1\n if datum[\"passed\"]:\n if \"weight\" in datum:\n correct += datum[\"weight\"]\n else:\n correct += 1\n\n return correct * 1.0 / total\n\n\n@app.route(\"/run-result\", methods=[\"POST\"])\n@service_authorized\ndef run_result():\n \"\"\"Called when a program has finished running\"\"\"\n message = request.form.get(\"message\")\n message = json.loads(message)\n\n # If the run was a success and actually tested...\n if message.get(\"success\", False):\n results = json.loads(message[\"data\"])\n grade = grade_result(results)\n\n # Record this in the database\n utils.dbcallback(lambda res:\n utils.dbcallback(lambda res: res,\n \"update_best_attempt\",\n attemptid=message[\"attemptid\"]),\n \"update_attempt\",\n attemptid=message[\"attemptid\"],\n results=json.dumps({\"success\": True,\n \"results\": results}),\n grade=grade)\n return SUCCESS\n else:\n grade = 0\n utils.dbcallback(lambda res:\n utils.dbcallback(lambda res: res,\n \"update_best_attempt\",\n attemptid=message[\"attemptid\"]),\n \"update_attempt\",\n attemptid=message[\"attemptid\"],\n results=json.dumps({\"success\": False,\n \"results\":\n message.get(\"error\",\n \"Unknown error\")}),\n grade=grade)\n\n return \"\"\n\n\n@app.route(\"/has-result\", methods=[\"POST\"])\n@direct_responder\ndef has_result():\n \"\"\"Returns whether or not the server has finished grading an attempt\"\"\"\n message = json.loads(request.form.get(\"message\"))\n attemptid = message[\"attemptid\"]\n res = utils.dbcall(\"get_attempt\", attemptid=attemptid)\n return json.dumps(len(res) > 0 and res[0][3])\n\n\nregisterEvent(\"grade\", \"runserv\", servername + \"grade\")\nregisterEvent(\"run-result\", \"runserv\", servername + \"run-result\")\n# app.run(port=config[\"runserv\"][\"port\"], threaded=True,\n# debug=config[\"debug\"])\n","sub_path":"services/runserv.py","file_name":"runserv.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"139392498","text":"__author__ = 'haocheng'\n\nfrom commons.log import Logger\nfrom send_mail import SendMail\nfrom config.config import config\n\n\nserver = config.server\nfro = config.fro\nto = config.to\nsubject = config.subject\nfiles = config.files\n\n\n\nmail = SendMail(server, fro, to, subject, '', files)\n\nlogger = Logger('report/print_log.log')","sub_path":"commons/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"354821044","text":"import re, urlparse, mechanize\n\nfrom neekanee.jobscrapers.jobscraper import JobScraper\nfrom neekanee.htmlparse.soupify import soupify, get_all_text\n\nfrom neekanee_solr.models import *\n\nCOMPANY = {\n 'name': 'Vishay',\n 'hq': 'Malvern, PA',\n\n 'home_page_url': 'http://www.vishay.com',\n 'jobs_page_url': 'http://hr.vishay.com',\n\n 'empcnt': [10001]\n}\n\nclass VishayJobScraper(JobScraper):\n def __init__(self):\n super(VishayJobScraper, self).__init__(COMPANY)\n\n def scrape_job_links(self, url):\n jobs = []\n\n self.br.open(url)\n self.br.select_form('webform')\n self.br.submit()\n\n while True:\n s = soupify(self.br.response().read())\n t = s.find('table', id='jobs')\n r = re.compile(r'^job_details\\.aspx\\?j=\\d+$')\n \n for a in t.findAll('a', href=r):\n tr = a.findParent('tr')\n td = tr.findAll('td')\n \n l = self.parse_location(td[-2].text)\n if not l:\n continue\n\n job = Job(company=self.company)\n job.title = a.text\n job.url = urlparse.urljoin(self.br.geturl(), a['href'])\n job.location = l\n jobs.append(job)\n\n try:\n self.br.find_link(text='Next')\n self.br.select_form('webform')\n self.br.form.new_control('hidden', '__EVENTTARGET', {'value': 'Nextbutton'})\n self.br.form.new_control('hidden', '__EVENTARGUMENT', {'value': ''})\n self.br.form.new_control('hidden', '__LASTFOCUS', {'value': ''})\n self.br.form.fixup()\n\n ctl = self.br.form.find_control('btnSearch')\n self.br.form.controls.remove(ctl)\n\n self.br.submit()\n except mechanize.LinkNotFoundError:\n break\n\n return jobs\n\n def scrape_jobs(self):\n job_list = self.scrape_job_links(self.company.jobs_page_url)\n self.prune_unlisted_jobs(job_list)\n new_jobs = self.new_job_listings(job_list)\n\n for job in new_jobs:\n self.br.open(job.url)\n\n s = soupify(self.br.response().read())\n f = s.find('form', id='webform')\n\n job.desc = get_all_text(f)\n job.save()\n\ndef get_scraper():\n return VishayJobScraper()\n\nif __name__ == '__main__':\n job_scraper = get_scraper()\n job_scraper.scrape_jobs()\n","sub_path":"neekanee/job_scrapers/plugins/com/link/vishay.py","file_name":"vishay.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"595973812","text":"\n\nfrom util import *\nfrom read import readAtoms\nimport math\nfrom write import *\n\n\nclass FindDefects:\n\n # 返回最近的顶点\n def findNeareastPoint(self, oppo_site):\n vertex = [0] * 3 # 保存与哪个顶点最接近\n\n for i in range(3):\n if oppo_site[i] >= 0.8:\n vertex[i] = 1\n\n return vertex\n\n\n\n def atom2point(self, atom_xyz, edge):\n vertex_xyz = [[[[0, 0, 0] for _ in range(edge)] for _ in range(edge)] for _ in range(edge)]\n center_xyz = [[[[0, 0, 0] for _ in range(edge)] for _ in range(edge)] for _ in range(edge)]\n\n time = int(edge / 10)\n time1 = time\n index = 0\n\n for k in range(time1):\n for t in range(time):\n for z in range(80):\n for y in range(t*10, t*10 + 10):\n for x in range(k*10, k*10 + 10):\n vertex_xyz[x][y][z] = atom_xyz[index]\n index += 1\n center_xyz[x][y][z] = atom_xyz[index]\n index += 1\n\n return vertex_xyz, center_xyz\n\n\n\n def isExecute(self, num1, num2, array1, array2):\n for i in range(3):\n if array1[i] == num1 and array2[i] == num2:\n return False\n\n return True\n\n\n def findDefects(self, origin_file, step_file, latConst, edge):\n origin_xyz = readAtoms(origin_file) # 完好晶体的xyz原子坐标\n step_xyz = readAtoms(step_file) # 级联碰撞后的某个时间步的xyz原子坐标\n\n vertex_xyz, center_xyz = self.atom2point(origin_xyz, edge)\n\n flag_v = [[[0] * edge for _ in range(edge) ] for _ in range(edge)] # 顶点处标记是否是缺陷\n flag_c = [[[0] * edge for _ in range(edge)] for _ in range(edge)] # 标记中心点处是否是缺陷\n inter_v = [[[[0, 0, 0] for _ in range(edge)] for _ in range(edge) ] for _ in range(edge)] # 记录位于顶点的坐标\n inter_c = [[[[0, 0, 0] for _ in range(edge)] for _ in range(edge) ] for _ in range(edge)] # 记录位于中心点的坐标\n\n step_coord = step_xyz[0]\n\n interstitials = [] # 保存间隙\n vacancies = [] # 保存空位\n\n\n for xyz in step_xyz:\n\n site = [0, 0, 0] # 位于晶体的哪个晶胞上\n # oppo_site = [0, 0, 0] # 位于晶胞的相对位置\n\n for i in range(3):\n temp_coord = math.fabs(xyz[i] - step_coord[i])\n div = int(temp_coord / latConst)\n site[i] = div\n # oppo_site[i] = round(temp_coord / latConst - div, 6)\n\n\n min_dist = float('inf')\n flag = 0 # 标志是间隙还是空位\n vector_list = [[0,0,0],[0,0,1],[0,1,0],[1,0,0],\n [0,1,1],[1,0,1],[1,1,0],[1,1,1]]\n\n site2 = [0, 0, 0]\n\n for vector in vector_list:\n\n if not self.isExecute(edge - 1, 1, site, vector):\n continue\n\n\n site1 = AandB(site, vector)\n v = vertex_xyz[site1[0]][site1[1]][site1[2]]\n vv = distA2B(v, xyz)\n c = center_xyz[site1[0]][site1[1]][site1[2]]\n cc = distA2B(c, xyz)\n if vv > cc:\n if cc < min_dist:\n min_dist = cc\n site2 = site1\n flag = 0\n else:\n if vv < min_dist:\n min_dist = vv\n site2 = site1\n flag = 1\n\n\n\n if flag == 0:\n if flag_c[site2[0]][site2[1]][site2[2]] == 0:\n flag_c[site2[0]][site2[1]][site2[2]] = 1\n inter_c[site2[0]][site2[1]][site2[2]] = xyz\n else:\n # ii = inter_c[site2[0]][site2[1]][site2[2]] + [1, False]\n # if ii not in interstitials:\n # interstitials.append(ii)\n\n interstitials.append(xyz + [1, True])\n else:\n if flag_v[site2[0]][site2[1]][site2[2]] == 0:\n flag_v[site2[0]][site2[1]][site2[2]] = 1\n inter_v[site2[0]][site2[1]][site2[2]] = xyz\n else:\n # ii = inter_v[site2[0]][site2[1]][site2[2]] + [1, False]\n # if ii not in interstitials:\n # interstitials.append(ii)\n\n interstitials.append(xyz + [1, True])\n\n\n # 遍历flag数组,找出为0的位置,则为空位\n for i in range(edge):\n for j in range(edge):\n for k in range(edge):\n if flag_c[i][j][k] == 0:\n vacancies.append(center_xyz[i][j][k] + [0, True])\n\n if flag_v[i][j][k] == 0:\n vacancies.append(vertex_xyz[i][j][k] + [0, True])\n\n print(len(interstitials))\n # print(interstitials)\n print(len(vacancies))\n # print(vacancies)\n\n return vacancies\n\n\nif __name__ == \"__main__\":\n fd = FindDefects()\n origin_file = \"F:\\\\download\\\\xyz-only-5kev-135-1\\\\5kev-135-1\\\\output_xyz\\\\crystal_mdl.origin.xyz\"\n step_file = \"F:\\\\download\\\\xyz-only-5kev-135-1\\\\5kev-135-1\\\\output_xyz\\\\crystal_mdl.40000.xyz\"\n\n # origin_file = \"E:\\\\xyz\\\\origin.xyz\"\n # step_file = \"E:\\\\xyz\\\\50000.xyz\"\n\n latConst = 2.87\n edge = 80\n\n\n defects = fd.findDefects(origin_file, step_file, latConst, edge)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"findDefects.py","file_name":"findDefects.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"483992399","text":"#!/usr/bin/python3\n# coding=utf-8\n\nimport os\nimport re\nimport json\nimport pandas as pd\nimport xlwt\n\n# 功能:比较两中Java文件,确定代码调用的方法中的接口名,\n# 即获取所有controller中的接口方法,找出调用统一接口工具B类方法中的接口url; 绝大部分有规则,小部分特殊处理\n\niaFilePath = '/Users/jyxioakai/develop/test/ServiceClient.java'\ndicPath = '/Users/jyxioakai/develop/test/controller/'\njson_file = '/Users/jyxioakai/develop/test/calculate_1.json'\n# 调用方式\nfindKey = 'ServiceClient.'\n# 方法名\nmethodKey = 'public static '\n# 接口形式\nfaceKey = '/Server/'\n# 列出所有Controller文件\nfile_list = os.listdir(dicPath)\nia_file = open(iaFilePath)\nfo = open(json_file, 'w', encoding='UTF-8')\n\nmethod_list = []\nface_list = []\ntemp_name = ''\ntemp_face = ''\n# 遍历所有方法名及获取其中的接口名\nfor line in ia_file.readlines():\n methodName = re.search(methodKey + '.*', line)\n interFace = re.search(faceKey + '.*', line)\n simple_name = ''\n if methodName is not None:\n format_str = methodName.group()\n simple_name = format_str.split(' ')[3].split('(')[0]\n if simple_name == 'getToken' or simple_name == temp_name or simple_name == 'getConceptByKeyword' \\\n or simple_name == 'getHttpClient':\n continue\n # print(simple_name)\n method_list.append(simple_name)\n temp_name = simple_name\n if interFace is not None:\n format_face = interFace.group().split('\\\"')[0]\n if format_face == temp_face:\n continue\n # print(format_face)\n face_list.append(format_face)\n temp_face = format_face\n\nprint(len(method_list))\nprint(len(face_list))\n\nmethod_face = {}\nfor index in range(0, len(method_list)):\n method_face[method_list[index]] = face_list[index]\n\n# print(method_face)\n\n# 遍历所有Controller文件,并找出调用上述Java文件中的方法\ncall_array = []\nlist_call_face = {}\nfor java_file in file_list:\n # print(java_file)\n f = open(dicPath + java_file)\n for line in f.readlines():\n result = re.search(findKey + '.*', line)\n if result is not None:\n res_str = result.group().split('(')[0]\n if res_str not in call_array and res_str != 'IaServiceClient;':\n call_array.append(res_str)\n simple_method = res_str.split('.')[1]\n if simple_method in method_face:\n face = method_face[simple_method]\n list_call_face[simple_method] = face\n print(simple_method)\n\n# print(len(call_array))\nprint(len(list_call_face))\n\n# 写到json文件中\njsObj = json.dumps(list_call_face, indent=2)\nfo.write(jsObj)\nfo.close()\n\n# 写入Excel\n# df = pd.DataFrame()\n# for line in jsObj:\n# print(line)\n# for i in line:\n# df1 = pd.DataFrame([i])\n# df = df.append(df1)\n\n# 在excel表格的第1列写入, 不写入index\n# df.to_excel('./files/calculate_1.xls', sheet_name='sheet1', startcol=0, index=False)\n","sub_path":"fileProcessor/FindInterface.py","file_name":"FindInterface.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"419281118","text":"import sqlite3,os\nfrom contextlib import closing\n\nimport python_func\n\nconn = None\nupdated = []\nadded = []\nnot_connect = []\ndeleted = []\n\nclass Computer:\n \"\"\"Builds Computer Object Params are (computerid, name, username, windows, cpu)\"\"\"\n def __init__(self, computerid=0, name=None, username=None, windows=None, cpu=None,\n totalslots=0, currentamount=None, lastlogon=None, ipaddress=None,\n location=None):\n self.computerid = computerid\n self.name = name\n self.username = username\n self.windows = windows\n self.cpu = cpu\n self.currentamount = currentamount\n self.totalslots = totalslots\n self.lastlogon = lastlogon\n self.ipaddress = ipaddress\n self.location = location\n\n\ndef remove_duplicates(x):\n return list(dict.fromkeys(x))\n\ndef logs():\n os.chdir(r\"F:\\Work_Python_Scripts\\Windows_info\\powershell\")\n\n with open (\"CouldNotConnectNoBom.txt\", 'r') as f:\n text = f.read().splitlines()\n for line in text:\n not_connect.append(line.rstrip())\n\n updated_nd = remove_duplicates(updated)\n added_nd = remove_duplicates(added)\n deleted_nd = remove_duplicates(deleted)\n python_func.log_main(added_nd, updated_nd, not_connect, deleted_nd)\n\n\ndef connect():\n \"\"\"Connects to Sqlite Database\"\"\"\n global conn\n if not conn:\n conn = sqlite3.connect(r\"F:\\Databases\\SystemInfo.sqlite3\") # Change path here to where your database is\n conn.row_factory = sqlite3.Row\n\n\ndef close():\n \"\"\"Closes connection to database\"\"\"\n if conn:\n conn.close()\n\ndef make_computer(row):\n \"\"\"Returns List of Computer object use when pulling data from database\"\"\"\n return Computer(\n row[\"computerid\"],\n row[\"name\"],\n row[\"username\"],\n row[\"windows\"],\n row[\"cpu\"],\n row[\"currentamount\"],\n row[\"totalslots\"],\n row[\"lastlogon\"],\n row[\"ipaddress\"],\n row[\"location\"]\n )\n\ndef update_data(computer):\n \"\"\"\n Updates Data Into relevent tables in Database. Params: Pass variables into the two classes and then just pass those into this function\n Used by misc_func.add_computer()\n \"\"\"\n log = \"F:\\Work_Python_Scripts\\logs\\main_logs.txt\"\n sql_updateinfo = \"UPDATE Computers SET username = ?, lastlogon = ?, ipaddress = ?, location = ? WHERE name = ?\"\n sql_newinfo = \"INSERT INTO Computers (name, username, windows, cpu, currentamount, totalslots, lastlogon, ipaddress, location) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n with closing(conn.cursor()) as cursor:\n cursor.execute('SELECT name FROM Computers WHERE name=?', (computer.name,))\n result = cursor.fetchone()\n\n if result:\n updated.append(computer.name)\n cursor.execute(sql_updateinfo, (computer.username,\n computer.lastlogon,\n computer.ipaddress,\n computer.location,\n computer.name))\n\n else:\n try:\n added.append(computer.name)\n except TypeError:\n pass\n cursor.execute(sql_newinfo, (computer.name, computer.username,\n computer.windows, computer.cpu,\n computer.currentamount,\n computer.totalslots, computer.lastlogon,\n computer.ipaddress, computer.location))\n\n conn.commit()\n\n\ndef delete_computer():\n \"\"\"\n Deletes disabled computer accounts from database, will first check if\n if the computer exists in the table\n \"\"\"\n\n file = 'F:\\Work_Python_scripts\\Windows_info\\powershell\\DisabledAccountNoBOM.txt'\n log = \"F:\\Work_Python_scripts\\Windows_info\\powershell\\Deleted.txt\"\n\n with open(file, 'r') as f:\n\n name_list = []\n text = f.read().splitlines()\n\n # Checks if the text file is empty, if not appends to name_list\n if text:\n count = len(text)\n for line in text:\n name_list.append(line)\n\n with closing(conn.cursor()) as cursor:\n for name in name_list:\n # Checks if the computer name is in the database\n cursor.execute(f\"SELECT name FROM Computers WHERE name = ('{name}')\")\n result = cursor.fetchone()\n\n if result:\n deleted.append(result)\n # if it exists delete it\n cursor.execute(f\"DELETE FROM Computers WHERE name = ('{name}')\")\n print(f\"Deleted {name}\")\n else:\n # if not exists display a message\n print(f\"{name} not found\")\n conn.commit()\n","sub_path":"Windows_info/python/database_func.py","file_name":"database_func.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"439507863","text":"from flask import Flask, render_template, request, json, redirect, session\nfrom flask import Markup\nfrom flask_login import LoginManager, login_user, logout_user, login_required, UserMixin\nimport requests\napp = Flask(__name__)\napp.config[\"DEBUG\"] = False\napp.config['SECRET_KEY'] = \"JutzX21JOBqOdxlCV8xqqnxD\"\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n@login_manager.user_loader\ndef load_user(user_id):\n return User(user_id)\nclass User(UserMixin):\n def __init__(self,id):\n self.id = id\n\n\n@app.route(\"/\")\n@login_required\ndef index1():\n r = requests.get('https://api.airtable.com/v0/appUnfcnf7SLuanVf/Imported%20table?api_key=keyJngv7iT0m6kzo2&sortField=_createdTime&sortDirection=desc')\n dict = r.json()\n dataset = []\n for i in dict['records']:\n dict = i['fields']\n dataset.append(dict)\n return render_template('index.html', entries=dataset)\n\n@app.route(\"/templates/about.html\")\n@login_required\ndef chart():\n r = requests.get('https://api.airtable.com/v0/appqTr1WsjkmoPWSY/user_login%20copy?api_key=keyJngv7iT0m6kzo2&sortField=_createdTime&sortDirection=desc')\n dict1 = r.json()\n dict2 = {}\n dataset = []\n name_list = []\n cred_list = []\n\n for i in dict1['records']:\n dict2 = i['fields']\n dataset.append(dict2)\n for item in dataset:\n name_list.append(item.get('Name'))\n cred_list.append(item.get('cred-sum'))\n\n return render_template('about.html', entries = zip(name_list, cred_list))\n\n@app.route(\"/templates/about.html\")\n@login_required\ndef table():\n headers = {\n 'Authorization': 'Bearer keyJngv7iT0m6kzo2',\n }\n\n params = (\n ('maxRecords', '25'),\n ('view', 'Grid View'),\n )\n r = requests.get('https://api.airtable.com/v0/appqTr1WsjkmoPWSY/ref?api_key=keyJngv7iT0m6kzo2&sortField=_createdTime&sortDirection=desc', headers=headers, params=params)\n dict = r.json()\n dataset = []\n for i in dict['records']:\n dict = i['fields']\n\n dataset.append(dict)\n return render_template('about.html', data=dataset)\n\n\n@app.route(\"/templates/products.html\")\n@login_required\ndef map():\n headers = {\n 'Authorization': 'Bearer keyJngv7iT0m6kzo2',\n }\n params = (\n ('view', 'Grid view'),\n )\n r = requests.get('https://api.airtable.com/v0/appgIi4oLnDe3vGNm/%E5%9C%B0%E5%9B%BE?api_key=keyJngv7iT0m6kzo2&sortField=_createdTime&sortDirection=desc',headers=headers, params=params)\n dict = r.json()\n dataset = []\n for i in dict['records']:\n dict = i['fields']\n dataset.append(dict)\n return render_template('products.html', entries = dataset)\n\n@app.route(\"/login\")\ndef login():\n message = 'Please login in first.'\n return render_template('login.html', message=message)\n@app.route(\"/process\",methods=['POST'])\ndef process():\n username = request.form['username']\n password = request.form['password']\n r = requests.get('https://api.airtable.com/v0/appba5G7mQq2Pr8eR/Table%201?api_key=keyJngv7iT0m6kzo2&sortField=_createdTime&sortDirection=desc')\n dict = r.json()\n dataset = []\n user_list = []\n pwd_list = []\n for i in dict['records']:\n dict = i['fields']\n dataset.append(dict)\n for item in dataset:\n user_list = item.get('UserName')\n pwd_list = item.get('Pwd')\n if username == user_list and password == pwd_list:\n login_user(User(1))\n message = \"Dear \" + username + \", welcome to Sharon's pages. Your login has been granted.\"\n return render_template('products.html', message=message)\n message = 'wrong password!'\n return render_template('login.html',message=message)\n@app.route('/logout/')\n@login_required\ndef logout():\n logout_user()\n message = 'Thanks for logging out.'\n return render_template('login.html',message=message)\nif __name__ == '__main__':\n app.run(debug = True)","sub_path":"com5940/problem set 5/flask_app (airtable login update).py","file_name":"flask_app (airtable login update).py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"47984205","text":"# -*- coding: utf-8 -*-\nimport gc\n\nimport scrapy\nfrom selenium import webdriver\nfrom pydispatch import dispatcher\nfrom scrapy import signals\n\nfrom scrapy.cmdline import execute\nimport sys\nimport os\n\nfrom chacha.tutorial.areaList import Area\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nfrom twisted.internet import reactor\n\nfrom scrapy import Request\n\nimport json\nimport urllib\nimport urlparse\nfrom openpyxl import Workbook\nimport xlrd\nimport xlwt\nimport os\nimport time\n\nimport logging\n\nfrom chacha.tutorial.items import ChaXunItem\n\n\nclass QichaSpider(scrapy.Spider):\n name = \"chaxun\"\n allowed_domains = [\"http://www.chacha.top/\"]\n start_urls = [ # 只是做为样例, 在start_requests中, 默认初次只返回第一个计算出的url结果\n 'https://www.chacha.top/sup?tmp=1', # 扶持\n 'https://www.chacha.top/notice?obj_type=7', # 公示\n 'https://www.chacha.top/notice?obj_type=4', # 申报'\n 'https://www.chacha.top/origin?obj_type=1', # 指导性文件'\n 'https://www.chacha.top/origin?obj_type=2', # 扶持政策'\n 'https://www.chacha.top/origin?obj_type=3' # 实施细则'\n ]\n\n def __init__(self, *args, **kwargs):\n self.browser = webdriver.Chrome(executable_path=\"C:/Program Files (x86)/Google/Chrome/Application/chromedriver.exe\")\n self.browser.implicitly_wait(10)\n\n self.hyperBrowser = webdriver.Chrome(executable_path=\"C:/Program Files (x86)/Google/Chrome/Application/chromedriver.exe\")\n self.hyperBrowser.implicitly_wait(10)\n\n self.itemLength = 0 # 搜索时 列表中list的长度\n self.currentUrlIndex = 1 # 当前进行parse的start_urls中的url 42 浦江县开始 18\n\n self.needLogin = True\n self.urlTarget = {}\n\n self.hyperIndexMap = {}\n self.fromUrl = {} # 标记最终的item, 来自哪个查询url, 便于分别保存请求.\n self.hasDownFiles = {} # 标记下载地址pdf 防止重复下载\n\n currentDayFile = time.strftime(\"%Y-%m-%d\", time.localtime())\n\n # parent = os.path.abspath(os.path.join(os.getcwd(), \"..\"))\n parent = 'D:/pydemo/qichacha/chacha'\n self.location = parent + '/download/' + currentDayFile + '/'\n if os.path.exists(self.location):\n pass\n else:\n os.makedirs(self.location)\n\n self.startUrlList = []\n self.initStartUrls()\n\n # os.path.abspath(os.path.join(os.getcwd(), \"../..\"))\n # self.location = os.path.abspath(os.path.join(os.getcwd(), \"../..\")) + '/download/' + currentDayFile + '/'\n super(QichaSpider, self).__init__()\n dispatcher.connect(self.spider_closed, signals.spider_closed)\n\n # 一个index 标记 每次完结之后+1\n # 终止条件 可以换为 后续的url是的继续爬取, 这样就可以保证只有一个url, 然后循环列表, 也是一个爬虫, 然后等完成之后, 后续再进行.\n def initStartUrls(self):\n\n area = Area()\n self.codeName = area.codeName\n for value in area.codeList:\n provinceCode = value['provinceCode']\n cityCode = value['cityCode']\n distCode = value['distCode']\n for url in self.start_urls:\n if distCode != 0:\n url += '&district_code=' + str(distCode)\n if provinceCode != 0:\n url += '&province_code=' + str(provinceCode)\n if cityCode != 0:\n url += '&city_code=' + str(cityCode)\n\n # 如果指定更新区域, 参数传来的city province dist 在这里做过滤\n self.startUrlList.append(url)\n\n\n def getSaveName(self, targetUrl):\n query = str(urlparse.urlparse(targetUrl).query)\n queryParams = dict([(k,v[0]) for k,v in urlparse.parse_qs(query).items()])\n if queryParams.has_key('district_code'):\n self.distCode = queryParams['district_code']\n\n if queryParams.has_key('province_code'):\n self.provinceCode = queryParams['province_code']\n\n if queryParams.has_key('city_code'):\n self.cityCode = queryParams['city_code']\n\n if self.provinceCode == 0:\n provinceName = '全国'\n else:\n provinceName = self.codeName[self.provinceCode]\n\n if self.cityCode == 0:\n cityName = provinceName\n else:\n cityName = self.codeName[self.cityCode]\n\n if self.distCode == 0:\n name = cityName\n else:\n name = self.codeName[self.distCode]\n\n\n traget = self.urlTarget[targetUrl]\n traget['filelocation'] = self.location + provinceName + '/' + cityName + '/' + name + '/'\n if os.path.exists(traget['filelocation']):\n pass\n else:\n os.makedirs(traget['filelocation'])\n\n if targetUrl.find('tmp=1') != -1:\n name += \"_扶持\"\n elif targetUrl.find('obj_type=7') != -1:\n name += \"_公示\"\n elif targetUrl.find('obj_type=4') != -1:\n name += \"_申报\"\n elif targetUrl.find('obj_type=1') != -1:\n name += \"_指导性文件\"\n elif targetUrl.find('obj_type=2') != -1:\n name += \"_扶持政策\"\n elif targetUrl.find('obj_type=3') != -1:\n name += \"_实施细则\"\n\n return name + '.xlsx'\n\n def start_requests(self):\n targetUrl = self.getStartUrl()\n self.initAttrs(targetUrl)\n yield self.make_requests_from_url(targetUrl)\n\n def initAttrs(self, targetUrl):\n traget = {}\n self.urlTarget[targetUrl] = traget\n self.itemLength = 0\n\n traget['saveName'] = self.getSaveName(targetUrl)\n\n wb = Workbook()\n ws = wb.active\n ws.append(['标题', '发文体系', '文号', '序号',\n '公示类型', '进度', '类型', '适用地区',\n '发文时间', '扶持金额', '有效期限', '适用行业',\n '政策分类', '详情', '政策轨迹', '文章地址',\n '数据来源'\n ]) # 设置表头\n traget['wb'] = wb\n traget['ws'] = ws\n\n\n def getStartUrl(self):\n index = self.currentUrlIndex\n length = len(self.startUrlList)\n targetUrl = self.startUrlList[index % length]\n\n return targetUrl\n\n\n def getFileLocation(self, title, targetUrl): # 每个区 重复文件太多. 所以向上一级存储 减少重复文件下载.\n target = self.urlTarget[targetUrl]\n parent = os.path.abspath(os.path.join(target['filelocation'], \"..\"))\n location = parent.replace('\\\\', '/') + '/' + 'files/' + title + '/' # 附件对应地址\n if os.path.exists(location):\n pass\n else:\n os.makedirs(location)\n\n return location\n\n\n def parse(self, response):\n startLen = self.itemLength\n\n logging.info('parse start len ----> ' + str(startLen))\n time.sleep(5)\n\n searchList = response.xpath('//li[@class=\"list-item\"]')\n if len(searchList) == 0: # 某些细则, list样式\n searchList = response.xpath('//li[@class=\"sup-list-item m-b-md\"]')\n\n self.itemLength = len(searchList)\n noEnd = True\n for index, items in enumerate(searchList):\n if index < startLen:\n continue\n\n if index % 100 == 0:\n if self.isInDataBase(items):\n noEnd = False\n logging.info(\" new data has in database ----> \")\n break\n\n progressSpan = items.xpath('.//span[@class=\"policy-label m-l-sm\"]')\n if len(progressSpan) == 0: # 某些细则, list样式\n progressSpan = items.xpath('.//span[@class=\"policy-label\"]')\n for progress in progressSpan: #\n value = self.decodeStr(progress.extract())\n if value.find('申报已截止') != -1:\n noEnd = False\n break\n\n href = items.xpath('.//a/@href')\n if len(href) > 0:\n hyper = href[0].extract()\n hyperUrl = 'https://www.chacha.top' + hyper + '&justindex=' + str(self.currentUrlIndex) # 为了使hyuperUrl不一样\n\n self.hyperIndexMap[hyperUrl] = index\n self.fromUrl[hyperUrl] = response.url\n\n logging.info('current index ----------> ' + str(index) + ' ---- ' + str(startLen))\n\n time.sleep(10)\n\n if self.isHyperAnn(hyperUrl):\n yield Request(url=hyperUrl,callback=self.parseHyperAnn, dont_filter=True)\n elif self.isHyperPolicy(hyperUrl):\n yield Request(url=hyperUrl,callback=self.parseHyperPolicy, dont_filter=True)\n else:\n yield Request(url=hyperUrl,callback=self.parseHyperItem, dont_filter=True)\n\n if noEnd == False:\n logging.info(\" refresh data is end in no more to sub \")\n break # 中断, 已经截至\n\n if self.itemLength > startLen and noEnd: # 继续迭代爬取数据\n logging.info('parse move on ----> ' + response.url)\n yield Request(url=response.url,callback=self.parse, dont_filter=True)\n else:\n time.sleep(60) # sleep 等待前序的一些process Item转换完成\n self.currentUrlIndex = self.currentUrlIndex + 1\n gc.collect()\n if self.currentUrlIndex < len(self.startUrlList):\n targetUrl = self.getStartUrl()\n self.initAttrs(targetUrl)\n logging.info('parse move next url ----> ' + targetUrl)\n yield Request(url=targetUrl,callback=self.parse, dont_filter=True)\n else:\n logging.info('parse end spider----> ' + str(startLen) + '--' + str(self.itemLength) + '---' + response.url)\n\n\n def isHyperlink(self, url):\n return self.isHyperAnn(url) or self.isHyperItem(url) or self.isHyperPolicy(url)\n\n def isHyperItem(self, url):\n return str(url).find('sup_item') != -1\n\n def isHyperAnn(self, url):\n return str(url).find('announce') != -1 or str(url).find('publicity') != -1\n\n def isHyperPolicy(self, url):\n return str(url).find('sup_policy') != -1 or str(url).find('macro_policy') != -1 or str(url).find('imple_regu') != -1\n\n # 需要获取标题 检测是否在数据库中\n def isInDataBase(self, items): # font-18 bold m-r-sm\n titleSpan = items.xpath('.//span[@class=\"font-20 bold m-r-sm\"]/text()')\n if len(titleSpan) == 0:\n titleSpan = items.xpath('.//span[@class=\"font-18 bold m-r-sm\"]/text()')\n for title in titleSpan:\n value = self.decodeStr(title.extract())\n if value.find('is in database ') != -1: # 检测是否在数据库中\n return True\n\n return False\n\n # 搜索 通知公示项\n # 通知申报 announce\n # 公示 publicity\n def parseHyperAnn(self, response): # https://www.chacha.top/announce?id=274b53d9e06f3cc04c95\n logging.info('parse hyper announce publicity : ---> ' + response.url)\n\n if response.url != self.start_urls[0]:\n com = self.initItem()\n com['type'] = self.getType(response.url)\n com['url'] = response.url\n com['index'] = self.hyperIndexMap[response.url]\n com['fromUrl'] = self.fromUrl[response.url]\n\n topDiv = response.xpath('//div[@class=\"policy-item-top m-t-md m-b-md\"]')\n\n titleDiv = topDiv.xpath('.//div[@class=\"policy-title pull-left\"]')\n title = titleDiv.xpath('.//span[@class=\"bold font-24\"]/text()')[0].extract()\n com['title'] = self.decodeStr(title)\n\n progressDiv = titleDiv.xpath('.//span[@class=\"policy-label\"]/text()')\n if len(progressDiv) > 0: # 某些公示性文件 没有进度\n titleLabel = progressDiv[0].extract()\n com['progress'] = self.decodeStr(titleLabel)\n\n infos = topDiv.xpath('p') # 直接按照固定格式\n # 适用地区 发文时间\n # 扶持金额 有效期\n # 适用行业\n for info in infos:\n for infoSpan in info.xpath('span'):\n value = self.decodeStr(infoSpan.extract())\n if value.find('适用地区') != -1:\n areaSpan = infoSpan.xpath('./text()')[0].extract()\n com['area'] = self.decodeStr(areaSpan).replace(\"适用地区\", \"\").replace(\":\",\"\").rstrip().lstrip() # 奇怪的分号\n elif value.find('发文时间') != -1:\n updateTime = infoSpan.xpath('.//span/text()')[0].extract()\n com['updateTime'] = self.decodeStr(updateTime).replace(\"发文时间\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('扶持金额') != -1:\n moneySpan = infoSpan.xpath('.//span//span/text()')\n if len(moneySpan) > 0:\n money = moneySpan[0].extract() #金额\n com['money'] = self.decodeStr(money).replace(\"扶持金额\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('有效期限') != -1:\n validSpan = infoSpan.xpath('./text()')\n if len(validSpan) > 0:\n validTime = validSpan[0].extract()\n com['validTime'] = self.decodeStr(validTime).replace(\"有效期限\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('公示类型') != -1:\n public = infoSpan.xpath('./text()')[0].extract()\n com['notetype'] = self.decodeStr(public).replace(\"公示类型\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('行业') != -1:\n industry = infoSpan.xpath('.//span/text()')[0].extract()\n com['industry'] = self.decodeStr(industry).rstrip().lstrip()\n else:\n pass\n\n\n #申报 内容详情\n leftBox = response.xpath('//div[@class=\"pull-left content-left policy-content-box bg-white m-b-md\"]')\n leftContent = leftBox.xpath('div')\n for left in leftContent:\n value = self.decodeStr(left.extract())\n if value.find('申报详情') != -1 or value.find('公示详情') != -1:\n content = left.xpath('.//div[@class=\"detail-content\"]').extract()\n com['content'] = self.decodeStr(content).rstrip().lstrip()\n elif value.find('资料下载') != -1:\n location = self.getFileLocation(title, self.fromUrl[response.url])\n\n urls = left.xpath('.//li[@class=\"ev-download m-b-sm\"]')\n for url in urls:\n fileUrl = 'http:' + str(url.xpath('@data-href')[0].extract())\n name = location + url.xpath('.//a/text()')[0].extract()\n self.downFileWithName(fileUrl, name)\n elif value.find('政策时间轨迹') != -1:\n content = left.xpath('.//div[@class=\"detail-content\"]').extract()\n com['policyTrail'] = self.decodeStr(content).rstrip().lstrip()\n elif value.find('数据来源') != -1:\n text = left.xpath('.//div/text()')\n if len(text) > 0:\n dataSourceDiv = text[0].extract()\n com['dataSource'] = self.decodeStr(dataSourceDiv).replace(\"数据来源\", \"\").replace(\":\",\"\").rstrip().lstrip()\n else:\n pass\n\n yield com\n\n # 搜索 扶持\n def parseHyperItem(self, response): # https://www.chacha.top/sup_item?id=aad86a5a974c55c59aaf\n logging.info('parse hyper sup_item : ---> ' + response.url)\n if response.url != self.start_urls[0]:\n com = self.initItem()\n com['type'] = self.getType(response.url)\n com['url'] = response.url\n com['index'] = self.hyperIndexMap[response.url]\n com['fromUrl'] = self.fromUrl[response.url]\n\n topDiv = response.xpath('//div[@class=\"policy-item-top m-t-md m-b-md\"]')\n\n titleDiv = topDiv.xpath('.//div[@class=\"policy-title pull-left\"]')\n title = titleDiv.xpath('.//span[@class=\"bold font-24\"]/text()')[0].extract()\n com['title'] = self.decodeStr(title)\n\n progressDiv = titleDiv.xpath('.//span[@class=\"policy-label\"]/text()')\n if len(progressDiv) > 0: # 某些公示性文件 没有进度\n titleLabel = progressDiv[0].extract()\n com['progress'] = self.decodeStr(titleLabel)\n\n infos = topDiv.xpath('p') # 直接按照固定格式\n\n # 适用地区 发文时间 文号\n # 政策分类 扶持金额 有效期限\n for info in infos:\n for infoSpan in info.xpath('span'):\n value = self.decodeStr(infoSpan.extract())\n\n if value.find('适用地区') != -1:\n areaSpan = infoSpan.xpath('./text()')[0].extract()\n com['area'] = self.decodeStr(areaSpan).replace(\"适用地区\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('行业') != -1:\n industry = infoSpan.xpath('.//span/text()')[0].extract() #行业\n com['industry'] = self.decodeStr(industry).rstrip().lstrip()\n elif value.find('有效期限') != -1:\n validSpan = infoSpan.xpath('.//span/text()')\n if len(validSpan) > 0:\n validTime = validSpan[0].extract()\n com['validTime'] = self.decodeStr(validTime).replace(\"有效期限\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('扶持金额') != -1:\n moneySpan = infoSpan.xpath('.//span//span/text()')\n if len(moneySpan) > 0:\n money = moneySpan[0].extract() #金额\n com['money'] = self.decodeStr(money).replace(\"扶持金额\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('发文时间') != -1:\n updateSpan = infoSpan.xpath('./text()')\n if len(updateSpan) > 0:\n updateTime = updateSpan[0].extract()\n com['updateTime'] = self.decodeStr(updateTime).replace(\"发文时间\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('政策分类') != -1:\n types = ''\n for type in infoSpan.xpath('span'):\n value = type.xpath('./text()')[0].extract()\n if types == '':\n types += str(self.decodeStr(value))\n else:\n types += ',' + str(self.decodeStr(value))\n\n com['policyType'] = types\n else:\n pass\n\n\n #申报 扶持详情\n leftBox = response.xpath('//div[@class=\"pull-left content-left policy-content-box bg-white m-b-md\"]')\n leftContent = leftBox.xpath('div')\n for left in leftContent:\n value = self.decodeStr(left.extract())\n if value.find('申报详情') != -1 or value.find('扶持详情') != -1:\n content = left.xpath('.//div[@class=\"detail-content\"]').extract()\n com['content'] = self.decodeStr(content).rstrip().lstrip()\n elif value.find('资料下载') != -1:\n location = self.getFileLocation(title, self.fromUrl[response.url])\n urls = left.xpath('.//li[@class=\"ev-download m-b-sm\"]')\n for url in urls:\n fileUrl = 'http:' + str(url.xpath('@data-href')[0].extract())\n name = location + url.xpath('.//a/text()')[0].extract()\n self.downFileWithName(fileUrl, name)\n elif value.find('政策时间轨迹') != -1:\n content = left.xpath('.//div[@class=\"detail-content\"]').extract()\n com['policyTrail'] = self.decodeStr(content).rstrip().lstrip()\n elif value.find('数据来源') != -1:\n text = left.xpath('.//div/text()')\n if len(text) > 0:\n dataSourceDiv = text[0].extract()\n com['dataSource'] = self.decodeStr(dataSourceDiv).replace(\"数据来源\", \"\").replace(\":\",\"\").rstrip().lstrip()\n else:\n pass\n\n yield com\n\n\n # 搜索 文件 相关政府政策文件 需要登录获取文件\n # 指导性文件 macro_policy\n # 扶持政策 sup_policy\n # 实施细则 imple_regu\n def parseHyperPolicy(self, response): # https://www.chacha.top/sup_policy?id=d0c7431587332fef3a27\n logging.info('parse hyper macro_policy sup_policy imple_regu: ---> ' + response.url)\n if response.url != self.start_urls[0]:\n com = self.initItem()\n com['type'] = self.getType(response.url)\n com['url'] = response.url\n com['index'] = self.hyperIndexMap[response.url]\n com['fromUrl'] = self.fromUrl[response.url]\n\n topDiv = response.xpath('//div[@class=\"policy-item-top m-t-md m-b-md\"]')\n\n titleDiv = topDiv.xpath('.//div[@class=\"policy-title pull-left\"]')\n title = titleDiv.xpath('.//span[@class=\"bold font-24\"]/text()')[0].extract()\n com['title'] = self.decodeStr(title)\n\n progressDiv = titleDiv.xpath('.//span[@class=\"policy-label\"]/text()')\n if len(progressDiv) > 0: # 某些公示性文件 没有进度\n titleLabel = progressDiv[0].extract()\n com['progress'] = self.decodeStr(titleLabel)\n\n infos = topDiv.xpath('p') # 直接按照固定格式\n\n # 适用地区 发文时间 文号\n # 发文体系 有效时间\n for info in infos:\n for infoSpan in info.xpath('span'):\n value = self.decodeStr(infoSpan.extract())\n if value.find('适用地区') != -1:\n areaSpan = infoSpan.xpath('./text()')[0].extract()\n com['area'] = self.decodeStr(areaSpan).replace(\"适用地区\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('发文时间') != -1:\n updateTime = infoSpan.xpath('./text()')[0].extract()\n com['updateTime'] = self.decodeStr(updateTime).replace(\"发文时间\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('文号') != -1:\n number = infoSpan.xpath('./text()')[0].extract()\n com['number'] = self.decodeStr(number).replace(\"文号\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('发文体系') != -1:\n system = infoSpan.xpath('.//span/text()')\n if len(system) > 0:\n money = system[0].extract()\n com['system'] = self.decodeStr(money).replace(\"发文体系\", \"\").replace(\":\",\"\").rstrip().lstrip()\n elif value.find('有效期限') != -1:\n validSpan = infoSpan.xpath('./text()')\n if len(validSpan) > 0:\n validTime = validSpan[0].extract()\n com['validTime'] = self.decodeStr(validTime).replace(\"有效期限\", \"\").replace(\":\",\"\").rstrip().lstrip()\n else:\n pass\n\n # 政策原文\n leftBox = response.xpath('//div[@class=\"pull-left content-left bg-white m-b-md\"]') #特殊的class\n leftContent = leftBox.xpath('div')\n for left in leftContent:\n value = self.decodeStr(left.extract())\n if value.find('政策原文') != -1:\n content = left.xpath('.//div[@class=\"detail-content\"]').extract()\n com['content'] = self.decodeStr(content).rstrip().lstrip()\n elif value.find('资料下载') != -1:\n location = self.getFileLocation(title, self.fromUrl[response.url])\n urls = left.xpath('.//li[@class=\"m-b-sm\"]') #特殊的li\n for url in urls:\n value = url.xpath('.//a').xpath('@href')\n if len(value) > 0:\n fileUrl = 'http:' + str(value[0].extract())\n name = location + url.xpath('.//a/text()')[0].extract()\n self.downFileWithName(fileUrl, name)\n elif value.find('政策时间轨迹') != -1:\n content = left.xpath('.//div[@class=\"detail-content\"]').extract()\n com['policyTrail'] = self.decodeStr(content).rstrip().lstrip()\n elif value.find('数据来源') != -1:\n text = left.xpath('.//div/text()')\n if len(text) > 0:\n dataSourceDiv = text[0].extract()\n com['dataSource'] = self.decodeStr(dataSourceDiv).replace(\"数据来源\", \"\").replace(\":\",\"\").rstrip().lstrip()\n\n else:\n pass\n\n yield com\n\n def downFileWithName(self, fileUrl, name):\n if os.path.exists(name) or self.hasDownFiles.has_key(name):\n pass\n else:\n self.hasDownFiles[name] = 'in' # 可能的重复下载error\n if fileUrl.find('http://') != -1 or fileUrl.find('https://') != -1:\n urllib.urlretrieve(fileUrl, filename=name)\n\n def getType(self, url):\n if url.find('sup_policy') != -1: #文件 扶持\n return 'sup_policy'\n elif url.find('macro_policy') != -1:# 文件 指导性文件\n return 'macro_policy'\n elif url.find('imple_regu') != -1: #文件 实施细则\n return 'imple_regu'\n\n elif url.find('announce') != -1: # 通知 通知\n return 'announce'\n elif url.find('publicity') != -1: # 通知 公示\n return 'publicity'\n\n elif url.find('sup_item') != -1: # 扶持\n return 'sup_item'\n\n def initItem(self):\n com = ChaXunItem()\n com['title'] = ''\n com['progress'] = ''\n com['type'] = ''\n com['area'] = ''\n com['updateTime'] = ''\n com['money'] = ''\n com['validTime'] = ''\n com['industry'] = ''\n com['url'] = ''\n com['policyType'] = ''\n com['content'] = ''\n com['index'] = ''\n com['number'] = ''\n com['system'] = ''\n com['notetype'] = ''\n com['policyTrail'] = ''\n com['dataSource'] = ''\n com['fromUrl'] = ''\n return com\n\n def decodeStr(self, value):\n return json.dumps(value).decode('unicode_escape')\n\n\n def spider_closed(self, spider): #当爬虫退出的时候 关闭chrome\n\n # index = self.index + 1 # 根据index 判断urls的数组长度, 再决定是否后续的添加\n # if index < len(self.start_urls):\n # sys.path.append(os.path.dirname(os.path.abspath(__file__))) # ide调试\n # execute([\"scrapy\",\"crawl\",\"chacha\", \"-a\", \"index=\" + str(index)])\n self.browser.close()\n self.hyperBrowser.close()\n logging.info('close-------------------------> ' + spider.name)\n\n\n\n\n\n\n\n\n","sub_path":"chacha/tutorial/spiders/chaxun.py","file_name":"chaxun.py","file_ext":"py","file_size_in_byte":28067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"422987752","text":"import logging\nimport time\n\nfrom selenium.common.exceptions import ElementClickInterceptedException, ElementNotInteractableException, ElementNotVisibleException, TimeoutException, WebDriverException\nfrom selenium.webdriver.common.by import By\n\nfrom helper.browser import Browser\n\nclass MSRPunchCard:\n _browser: Browser\n\n def do_punch_card(self, link, max_attempts=3):\n for i in range(max_attempts):\n try:\n self._browser.open_in_new_tab(link)\n self._click_through_punch_card()\n except TimeoutException:\n logging.exception(msg='Explore Daily Timeout Exception.')\n except (ElementNotVisibleException, ElementClickInterceptedException, ElementNotInteractableException):\n logging.exception(msg='Element not clickable or visible.')\n except WebDriverException:\n logging.exception(msg='Error.')\n finally:\n if self._verify_punch_card_completion():\n logging.info(msg='Punch Card is completed')\n self._browser.goto_main_window_close_others()\n return\n logging.debug(msg=f'Punch Card did not complete. Attempt: {i}/{max_attempts}')\n self._browser.goto_main_window_close_others()\n logging.info(msg='Punch Card is incomplete. Max number of attempts reached.')\n\n def _click_through_punch_card(self, max_attempts=10):\n for _ in range(max_attempts):\n try:\n if not self._browser.click_element(By.XPATH, '//a[@class= \"offer-cta\"]/child::button[contains(@class, \"btn-primary\")]'):\n break\n time.sleep(1)\n self._browser.goto_latest_window()\n self._browser.close()\n self._browser.goto_latest_window()\n logging.debug(msg='Clicked one punch card quest.')\n except WebDriverException:\n logging.exception(msg='Error occurred when clicking a punch card.')\n\n def _verify_punch_card_completion(self):\n return not self._browser.find_elements(By.XPATH, '//a[@class= \"offer-cta\" and ./button[contains(@class, \"btn-primary\")]]')\n \n ","sub_path":"msreward/worker/dashboard/punchcard.py","file_name":"punchcard.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"175156766","text":"############################################################################################################\n## Description: Calculates (not implomented yet) and displays LapTime values\n## Values displayed: Last, current, Best laptimes\n## Units: seconds\n## Written for: BOLT Senior Design Team\n## Author: Henry Trease\n## Written: Fall 2017\n## Modified: Fall 2017\n############################################################################################################\n\nimport sys\nfrom PyQt5.QtWidgets import QWidget, QPushButton, QLCDNumber, QLabel, QAction, QFrame\nfrom PyQt5.QtGui import QIcon, QPainter, QColor, QPen\nfrom PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal, Qt\nfrom args import Arg_Class\n\nclass LastLapTime(QWidget):\n def __init__(self, parent):\n super(LastLapTime, self).__init__(parent)\n \n self.lastLapTimeMin = 0\n self.lastLapTimeSec = 0\n self.lastLapTimeMsec = 0\n \n self.lastLapTimeLCD = QLCDNumber(self)\n self.lastLapTimeLCD.setDigitCount(9)\n self.lastLapTimeLCD.display(str(self.lastLapTimeMin).zfill(2)+\":\"+str(self.lastLapTimeSec).zfill(2)+\":\"+str(self.lastLapTimeMsec).zfill(3))\n self.lastLapTimeLCD.move(0,20)\n self.lastLapTimeLCD.resize(170,40)\n self.lastLapTimeLCD.setFrameShape(QFrame.NoFrame)\n self.lastLapTimeLCD.setSegmentStyle(QLCDNumber.Flat)\n \n self.lastLapTimeLabel = QLabel(self)\n self.lastLapTimeLabel.setText(\"Last Lap Time:\")\n self.lastLapTimeLabel.move(0,0)\n \n @pyqtSlot(int, int, int)\n def lastLapTime_update(self, min, sec, msec):\n self.lastLapTimeLCD.display(str(min).zfill(2) + ':' + str(sec).zfill(2) + ':' + str(msec).zfill(3))\n \n\nclass CurrentLapTime(QWidget):\n def __init__(self, parent):\n super(CurrentLapTime, self).__init__(parent)\n \n self.arguments = Arg_Class() \n self.currentLapTimeValue = \"00:00:000\"\n \n self.currentLapTimeLCD = QLCDNumber(self)\n self.currentLapTimeLCD.setDigitCount(9)\n self.currentLapTimeLCD.display(self.currentLapTimeValue)\n self.currentLapTimeLCD.move(0, 20)\n\n self.currentLapTimeLCD.resize(270,140)\n \n self.currentLapTimeLCD.setFrameShape(QFrame.NoFrame)\n self.currentLapTimeLCD.setSegmentStyle(QLCDNumber.Flat)\n \n self.currentLapTimeLabel = QLabel(self)\n self.currentLapTimeLabel.setText(\"Current Lap Time:\")\n self.currentLapTimeLabel.move(0,0)\n self.currentLapTimeLabel.hide()\n if arguments.Args.demo:\n self.currentLapTimeLabel.show()\n \n @pyqtSlot(int, int, int)\n def currentLapTime_update(self, min, sec, msec):\n self.currentLapTimeLCD.display(str(min).zfill(2) + ':' + str(sec).zfill(2) + ':' + str(msec).zfill(3))\n \n\n def paintEvent(self, event):\n qp = QPainter(self)\n if self.arguments.Args.demo:\n qp.setPen(Qt.white)\n qp.drawRect(40,70, 90, 50)\n qp.drawRect(50, 80, 70, 30)\n qp.setBrush(Qt.green)\n qp.drawRect(60,70,5,10)\n \nclass BestLapTime(QWidget):\n def __init__(self, parent):\n super(BestLapTime, self).__init__(parent)\n\n self.bestLapTimeValue = \"00:00:000\"\n \n self.bestLapTimeLCD = QLCDNumber(self)\n self.bestLapTimeLCD.setDigitCount(9)\n self.bestLapTimeLCD.display(self.bestLapTimeValue)\n self.bestLapTimeLCD.move(0,20)\n self.bestLapTimeLCD.resize(170,40)\n self.bestLapTimeLCD.setFrameShape(QFrame.NoFrame)\n self.bestLapTimeLCD.setSegmentStyle(QLCDNumber.Flat)\n \n self.bestLapTimeLabel = QLabel(self)\n self.bestLapTimeLabel.setText(\"Best Lap Time: \")\n self.bestLapTimeLabel.move(0, 0)\n\n @pyqtSlot(int, int, int)\n def bestLapTime_update(self, min, sec, msec):\n self.bestLapTimeLCD.display(str(min).zfill(2) + ':' + str(sec).zfill(2) + ':' + str(msec).zfill(3))\n","sub_path":"lapTimePannel.py","file_name":"lapTimePannel.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"310996231","text":"import json\nimport sys\n\n# returns SORTED list of deleted sections by (start, end) times in seconds\ndef parse_ruffcut_json_file(fn):\n lst = []\n with open(fn) as f:\n deletedSelections = json.load(f)\n for e in deletedSelections:\n start = float(e['startSec'])\n end = float(e['endSec'])\n try:\n options = e['options']\n except KeyError:\n options = None\n lst.append({'start': start, 'end': end, 'options': options})\n lst.sort(key=lambda e:e['start'])\n # checks!\n for prev, cur in zip(lst, lst[1:]):\n assert prev['start'] < prev['end'] < cur['start'] < cur['end']\n\n return lst\n\nif __name__ == '__main__':\n lst = parse_ruffcut_json_file(sys.argv[1])\n print(lst)\n","sub_path":"ruffcut_json.py","file_name":"ruffcut_json.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"472531572","text":"# -*- coding: utf-8 -*-\n# @Author: Weijia Sun\n# @Date: 2020-03-26 12:54:15\n# @Last Modified by: Weijia Sun\n# @Last Modified time: 2020-03-26 12:54:36\n\n\"\"\"\nI/O modules\n\"\"\"\nimport os\nimport commentjson\nimport glob\nfrom obspy import read\nfrom obspy.taup import TauPyModel\nfrom obspy.geodetics import gps2dist_azimuth\nfrom obspy.geodetics import kilometers2degrees\n\nfrom tqdm import tqdm\nfrom functools import partial\nfrom multiprocessing import Pool\nimport multiprocessing\n\nimport logging\n\nlogging.basicConfig(level=logging.NOTSET, format='%(asctime)s %(filename)s[line:%(lineno)d] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=\"acc.io.log\", filemode=\"a\"\n )\n\n\ndef _load_json(jsonfile):\n \"\"\"\n Load parameters from the json formatted file\n\n :param str jsonfile: json file containing parameters\n :return dict kwargs: dictionary containing parameters\n \"\"\"\n with open(jsonfile, \"r\") as fp:\n kwargs = commentjson.load(fp)\n\n if kwargs[\"njobs\"] is None:\n kwargs[\"njobs\"] = multiprocessing.cpu_count()\n\n return kwargs\n\n\ndef _acc_read(file, outpath, acc_type, phase, force=True, tt_model=\"ak135\", depth_unit=\"km\"):\n \"\"\"\n Read seismic data.\n\n :param file: file name to be read\n :param outpath: where to save the data\n :param acc_type: event or noise\n :param phase: if acc_type='event', then calculate the traveltime of the phase\n :param force: force to overwrite when file exists if force is True\n :param tt_model: 1-d theoretical model, e.g., ak135 used to calculate traveltime of a specific phase\n :param depth_unit: unit of depth, general one is km, but in some cases it could be m.\n :return:\n \"\"\"\n\n tr = read(file)[0]\n\n station_id = _get_station_id(tr)\n trace_id = tr.id\n\n valid = 0\n if acc_type == \"event\":\n # tt_model = data_selection[\"tt_model\"]\n event_id = _get_event_id(tr)\n # depth_unit = data_selection[\"depth_unit\"]\n tr = _get_event_data(tr, tt_model, phase=phase, acc_type=acc_type, depth_unit=depth_unit)\n if tr is None:\n logging.warn(\"%s no valid arrival for phase %s\", file, phase)\n return 0\n else:\n valid = 1\n elif acc_type == \"noise\":\n event_id = _get_noise_id(tr)\n tr = _get_noise_data(tr, acc_type=acc_type)\n valid = 1\n\n filename = trace_id + \"_\" + event_id + \".pkl\"\n filepath = \"/\".join([outpath, station_id])\n # if not os.path.exists(filepath):\n try:\n os.makedirs(filepath)\n except:\n pass\n\n filen = filepath + \"/\" + filename\n if not force and os.path.exists(filen):\n pass\n else:\n tr.write(filen, format=\"PICKLE\")\n return valid\n\n\ndef _get_sac_origin(tr):\n \"\"\"\n Get the origin time of an event trace in sac format.\n\n :param tr: A event trace\n :return origin: origin time of an event\n\n .. Note::\n The trace should be sac formatted.\n\n \"\"\"\n try:\n origin = tr.stats.starttime - tr.stats.sac.b + tr.stats.sac.o\n except AttributeError:\n logging.critical(\"no valid event origin found in header: tr.stats.sac.o\")\n logging.critical(\"Please check acc_type='event' or 'noise'\")\n raise AttributeError\n return origin\n\n\ndef _get_event_id(tr):\n \"\"\"\n Get event id from a sac-formatted trace.\n\n :param tr:\n :return str event_id: event id\n \"\"\"\n\n origin = _get_sac_origin(tr)\n event_id = origin.datetime.strftime(\"%Y%m%d%H%M%S\")\n return event_id\n\n\ndef _get_event_id_tr(tr):\n \"\"\"\n Get event id from a obspy trace. The obspy trace should have the event_time keyword.\n\n :param tr: a obspy trace\n :return str event_id: event id in \"%Y%m%d%H%M%S\", e.g., '20191019200909'\n \"\"\"\n origin = tr.stats.event_time\n event_id = origin.datetime.strftime(\"%Y%m%d%H%M%S\")\n return event_id\n\n\ndef _get_noise_id(tr):\n \"\"\"\n Get trace id for noise type data. e.g., 'starttime-endtime'\n\n :param tr: an obspy trace\n :return str event_id: noise id\n \"\"\"\n starttime = tr.stats.starttime\n endtime = tr.stats.endtime\n event_id = \"-\".join([starttime.strftime(\"%Y%m%d%H%M%S\"), endtime.strftime(\"%Y%m%d%H%M%S\")])\n return event_id\n\n\ndef _get_station_id(tr):\n \"\"\"\n Get station id of a given trace.\n\n :param tr: trace\n :return station_id: station id formatted as '{newwork}.{station}.{location}'.\n \"\"\"\n\n s = tr.id\n s = s.split(\".\")\n if s[2] == \"\":\n station_id = \".\".join(s[:2])\n else:\n station_id = \".\".join(s[:3])\n\n return station_id\n\n\ndef _get_noise_data(tr, acc_type):\n \"\"\"\n Get update sac-trace header to obspy trace header station_latitude etc.\n\n :param tr:\n :param acc_type:\n :return tr:\n \"\"\"\n\n station_longitude = tr.stats.sac.stlo\n station_latitude = tr.stats.sac.stla\n station_elevation = tr.stats.sac.stel\n\n header = {\"type\": acc_type,\n \"station_latitude\": station_latitude, \"station_longitude\": station_longitude,\n \"station_elevation\": station_elevation\n }\n\n tr.stats.update(header)\n\n return tr\n\n\ndef _get_event_data(tr, tt_model, phase, acc_type, depth_unit=\"km\"):\n \"\"\"\n Update a sac trace to a obspy trace and update trace header,\n and calculate theoretical traveltime of a specific model and phase\n\n :param tr:\n :param tt_model:\n :param phase:\n :param acc_type:\n :param depth_unit:\n :return:\n\n .. Note::\n The input trace should be read from sac-formatted files.\n\n depth_unit is not used. if depth>1000 then unit should be meter,\n since no events deeper than 700 km on the earth.\n\n \"\"\"\n\n model = TauPyModel(model=tt_model)\n\n event_longitude = tr.stats.sac.evlo\n event_latitude = tr.stats.sac.evla\n event_depth = tr.stats.sac.evdp\n try:\n event_magnitude = tr.stats.sac.mag\n except:\n event_magnitude = 6.66\n\n # if depth_unit == \"m\":\n # event_depth /= 1000.0\n # in this case, the depth_unit is considered to be m.\n if event_depth > 1000:\n event_depth /= 1000\n\n station_longitude = tr.stats.sac.stlo\n station_latitude = tr.stats.sac.stla\n station_elevation = tr.stats.sac.stel\n\n try:\n component_azimuth = tr.stats.sac.cmpaz\n component_inclination = tr.stats.sac.cmpinc\n except:\n # print(tr.stats)\n if tr.stats.channel[-1] == \"Z\":\n component_azimuth = 0\n component_inclination = 0\n elif tr.stats.channel[-1] == \"N\":\n component_azimuth = 0\n component_inclination = 90\n elif tr.stats.channel[-1] == \"E\":\n component_azimuth = 90\n component_inclination = 90\n else:\n print(\"component is not ZNE. \", tr.stats.channel)\n os._exit(0)\n\n\n\n event_time = _get_sac_origin(tr)\n\n distance, azimuth, back_azimuth = gps2dist_azimuth(lat1=event_latitude, lon1=event_longitude,\n lat2=station_latitude, lon2=station_longitude,\n a=6378137.0, f=0.0033528106647474805)\n distance = kilometers2degrees(kilometer=distance / 1000.0)\n\n # travel time, slowness, inclinations\n arrivals = model.get_travel_times(source_depth_in_km=event_depth,\n distance_in_degree=distance,\n phase_list=[phase])\n if len(arrivals) < 1:\n return None\n\n arr = arrivals[0]\n\n onset = event_time + arr.time\n phase = phase\n inclination = arr.incident_angle\n slowness = arr.ray_param\n\n # pierce points\n # pp_latitude\n # pp_longitude\n # pp_depth\n\n # ray paths\n # arrivals = model.get_travel_times(source_depth_in_km=event_depth,\n # distance_in_degree=distance,\n # phase_list=[phase])\n header = {\"model\": tt_model, \"type\": acc_type,\n \"event_latitude\": event_latitude, \"event_longitude\": event_longitude, \"event_depth\": event_depth,\n \"event_time\": event_time, \"event_magnitude\": event_magnitude,\n \"station_latitude\": station_latitude, \"station_longitude\": station_longitude,\n \"station_elevation\": station_elevation,\n \"component_azimuth\": component_azimuth, \"component_inclination\":component_inclination,\n \"onset\": onset, \"phase\": phase, \"inclination\": inclination, \"slowness\": slowness,\n \"distance\": distance, \"azimuth\": azimuth, \"back_azimuth\": back_azimuth\n }\n\n tr.stats.update(header)\n\n return tr\n\n\ndef import_data(jsonfile):\n \"\"\"\n Import data from external media\n\n :param jsonfile: parameter filename\n :return:\n \"\"\"\n\n kwargs = _load_json(jsonfile)\n io = kwargs[\"io\"]\n # data_selection = kwargs[\"data_selection\"]\n njobs = kwargs[\"njobs\"]\n\n if njobs > multiprocessing.cpu_count():\n njobs = multiprocessing.cpu_count()\n\n files = glob.glob(io[\"data\"])\n outpath = io[\"outpath\"] + \"/0_raw\"\n acc_type = kwargs[\"acc_type\"]\n phase = kwargs[\"phase\"]\n force = io[\"force\"]\n tt_model = kwargs[\"tt_model\"]\n depth_unit = kwargs[\"depth_unit\"]\n\n if acc_type not in [\"event\", 'noise']:\n print(\"Invalid acc_type: %s. Aborted.\" % acc_type)\n logging.error(\"Invalid acc_type: %s. Aborted.\", acc_type)\n exit(-1)\n\n do_work = partial(_acc_read, outpath=outpath, acc_type=acc_type,\n phase=phase, force=force, tt_model=tt_model, depth_unit=depth_unit)\n numbers = []\n if njobs == 1:\n logging.info('do work sequential (%d cores)', njobs)\n for file in tqdm(files, total=len(files)):\n num = do_work(file)\n numbers.append(num)\n else:\n logging.debug('do work parallel (%d cores)', njobs)\n pool = multiprocessing.Pool(njobs)\n for num in tqdm(pool.imap_unordered(do_work, files), total=len(files)):\n numbers.append(num)\n pool.close()\n pool.join()\n\n logging.info(\"%d/%d files imported.\", sum(numbers), len(files))\n","sub_path":"acc/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":10133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"127768065","text":"\"\"\"\nBase module for the imSim package.\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\nimport os\nimport sys\nimport warnings\nfrom collections import namedtuple, defaultdict\nimport logging\nimport gc\n\n# python_future no longer handles configparser as of 0.16.\n# This is needed for PY2/3 compatabiloty.\ntry:\n import configparser\nexcept ImportError:\n # python 2 backwards-compatibility\n import ConfigParser as configparser\n\nimport numpy as np\nimport pandas as pd\nimport lsst.log as lsstLog\nimport lsst.utils as lsstUtils\nfrom lsst.sims.photUtils import LSSTdefaults, PhotometricParameters\nfrom lsst.sims.utils import ObservationMetaData, radiansFromArcsec\nfrom lsst.sims.utils import applyProperMotion, ModifiedJulianDate\n\n__all__ = ['parsePhoSimInstanceFile', 'PhosimInstanceCatalogParseError',\n 'photometricParameters', 'phosim_obs_metadata',\n 'validate_phosim_object_list',\n 'read_config', 'get_config', 'get_logger']\n\n\nclass PhosimInstanceCatalogParseError(RuntimeError):\n \"Exception class for instance catalog parser.\"\n\n\nPhoSimInstanceCatalogContents = namedtuple('PhoSimInstanceCatalogContents',\n ('commands', 'objects'))\n\n_required_commands = set(\"\"\"rightascension\ndeclination\nmjd\naltitude\nazimuth\nfilter\nrotskypos\nrottelpos\ndist2moon\nmoonalt\nmoondec\nmoonphase\nmoonra\nnsnap\nobshistid\nseed\nseeing\nsunalt\nvistime\nrawSeeing\nFWHMeff\nFWHMgeom\"\"\".split())\n\n\ndef parsePhoSimInstanceFile(fileName, numRows=None):\n \"\"\"\n Read a PhoSim instance catalog into a Pandas dataFrame. Then use\n the information that was read-in to build and return a command\n dictionary and object dataFrame.\n\n Parameters\n ----------\n fileName : str\n The instance catalog filename.\n numRows : int, optional\n The number of rows to read from the instance catalog.\n If None (the default), then all of the rows will be read in.\n\n Returns\n -------\n namedtuple\n This contains the PhoSim commands, the objects, and the\n original DataFrames containing the header lines and object\n lines which were parsed with pandas.read_csv.\n \"\"\"\n\n # Read the text instance file into Pandas. Note that the top of the file\n # has commands in it, followed by one line per object.\n #\n # Note: I have chosen to use pandas here (as opposed to straight numpy e.g.)\n # because Pandas gracefully handles missing values including at the end\n # of lines. Not every line is the same length in the instance file since\n # different classes of objects have different numbers of parameters. The\n # other table reading options do not handle this situation well.\n columnNames = ['STRING', 'VALUE', 'RA', 'DEC', 'MAG_NORM', 'SED_NAME',\n 'REDSHIFT', 'GAMMA1', 'GAMMA2', 'KAPPA',\n 'DELTA_RA', 'DELTA_DEC',\n 'SOURCE_TYPE',\n 'PAR1', 'PAR2', 'PAR3', 'PAR4',\n 'PAR5', 'PAR6', 'PAR7', 'PAR8', 'PAR9', 'PAR10']\n\n dataFrame = pd.read_csv(fileName, names=columnNames, nrows=numRows,\n delim_whitespace=True, comment='#')\n\n # Any missing items from the end of the lines etc were turned into NaNs by\n # Pandas to represent that they were missing. This causes problems later\n # with the checks in the SED calculations in the GalSim interface. So,\n # convert them into 0.0 instead.\n dataFrame.fillna('0.0', inplace=True)\n\n # Split the dataFrame into commands and sources.\n phoSimHeaderCards = dataFrame.query(\"STRING != 'object'\")\n phoSimSources = dataFrame.query(\"STRING == 'object'\")\n\n # Check that the required commands are present in the instance catalog.\n command_set = set(phoSimHeaderCards['STRING'])\n missing_commands = _required_commands - command_set\n if missing_commands:\n message = \"\\nRequired commands that are missing from the instance catalog %s:\\n \" \\\n % fileName + \"\\n \".join(missing_commands)\n raise PhosimInstanceCatalogParseError(message)\n\n # Report on commands that are not part of the required set.\n extra_commands = command_set - _required_commands\n if extra_commands:\n message = \"\\nExtra commands in the instance catalog %s that are not in the required set:\\n \" \\\n % fileName + \"\\n \".join(extra_commands)\n warnings.warn(message)\n\n # Turn the list of commands into a dictionary.\n commands = extract_commands(phoSimHeaderCards)\n\n # This dataFrame will contain all of the objects to return.\n phoSimObjectList = extract_objects(phoSimSources, commands)\n return PhoSimInstanceCatalogContents(commands, phoSimObjectList)\n\n\ndef extract_commands(df):\n \"\"\"\n Extract the phosim commands and repackage as a simple dictionary,\n applying appropriate casts.\n\n Parameters\n ----------\n df : pandas.DataFrame\n DataFrame containing the instance catalog command data.\n\n Returns\n -------\n dict\n A dictionary with the phosim command values.\n \"\"\"\n my_dict = df[['STRING', 'VALUE']].set_index('STRING').T.to_dict('list')\n commands = dict(((key, value[0]) for key, value in my_dict.items()))\n commands['filter'] = int(commands['filter'])\n commands['nsnap'] = int(commands['nsnap'])\n commands['obshistid'] = int(commands['obshistid'])\n commands['seed'] = int(commands['seed'])\n commands['mjd'] = float(commands['mjd'])\n # Add bandpass for convenience\n commands['bandpass'] = 'ugrizy'[commands['filter']]\n return commands\n\n\ndef extract_objects(df, header):\n \"\"\"\n Extract the object information needed by the sims code\n and pack into a new dataframe.\n\n Parameters\n ----------\n df : pandas.DataFrame\n DataFrame containing the instance catalog object data.\n\n header : dictionary\n dictionary containing the PhoSim header cards as output\n by extract_commands()\n (necessary for correctly applying proper motion to stars)\n\n Returns\n -------\n pandas.DataFrame\n A DataFrame with the columns expected by the sims code.\n \"\"\"\n # Check for unhandled source types and emit warning if any are present.\n valid_types = dict(point='pointSource',\n sersic2d='sersic')\n invalid_types = set(df['SOURCE_TYPE']) - set(valid_types)\n if invalid_types:\n warnings.warn(\"Instance catalog contains unhandled source types:\\n%s\\nSkipping these.\"\n % '\\n'.join(invalid_types))\n\n columns = ('uniqueId', 'galSimType',\n 'magNorm', 'sedFilepath', 'redshift',\n 'raJ2000', 'decJ2000',\n 'halfLightRadius',\n 'minorAxis',\n 'majorAxis',\n 'positionAngle', 'sindex',\n 'properMotionRa', 'properMotionDec',\n 'parallax', 'radialVelocity')\n\n # Process point sources and galaxies separately.\n source_type = 'point'\n stars = df.query(\"SOURCE_TYPE=='%s'\" % source_type)\n phosim_stars = pd.DataFrame(np.zeros((len(stars), len(columns))),\n index=stars.index,\n columns=columns)\n phosim_stars['uniqueId'] = pd.to_numeric(stars['VALUE']).tolist()\n phosim_stars['galSimType'] = valid_types[source_type]\n phosim_stars['magNorm'] = pd.to_numeric(stars['MAG_NORM']).tolist()\n phosim_stars['sedFilepath'] = stars['SED_NAME'].tolist()\n phosim_stars['redshift'] = pd.to_numeric(stars['REDSHIFT']).tolist()\n phosim_stars['raJ2000'] = pd.to_numeric(stars['RA']).tolist()\n phosim_stars['decJ2000'] = pd.to_numeric(stars['DEC']).tolist()\n phosim_stars['properMotionRa'] = pd.to_numeric(stars['PAR5']).tolist()\n phosim_stars['properMotionDec'] = pd.to_numeric(stars['PAR6']).tolist()\n phosim_stars['parallax'] = pd.to_numeric(stars['PAR7']).tolist()\n phosim_stars['radialVelocity'] = pd.to_numeric(stars['PAR8']).tolist()\n if len(phosim_stars) > 0:\n phosim_stars = extract_extinction(stars, phosim_stars, 1)\n\n mjd = ModifiedJulianDate(TAI=header['mjd'])\n raICRS, decICRS = applyProperMotion(phosim_stars.raJ2000.values,\n phosim_stars.decJ2000.values,\n phosim_stars.properMotionRa.values,\n phosim_stars.properMotionDec.values,\n phosim_stars.parallax.values,\n phosim_stars.radialVelocity.values,\n mjd=mjd)\n\n phosim_stars = phosim_stars.assign(raICRS=raICRS, decICRS=decICRS)\n\n source_type = 'sersic2d'\n galaxies = df.query(\"SOURCE_TYPE == '%s'\" % source_type)\n phosim_galaxies = pd.DataFrame(np.zeros((len(galaxies), len(columns))),\n index=galaxies.index,\n columns=columns)\n phosim_galaxies['uniqueId'] = pd.to_numeric(galaxies['VALUE']).tolist()\n phosim_galaxies['galSimType'] = valid_types[source_type]\n phosim_galaxies['magNorm'] = pd.to_numeric(galaxies['MAG_NORM']).tolist()\n phosim_galaxies['sedFilepath'] = galaxies['SED_NAME'].tolist()\n phosim_galaxies['redshift'] = pd.to_numeric(galaxies['REDSHIFT']).tolist()\n phosim_galaxies['raJ2000'] = pd.to_numeric(galaxies['RA']).tolist()\n phosim_galaxies['decJ2000'] = pd.to_numeric(galaxies['DEC']).tolist()\n phosim_galaxies['majorAxis'] = \\\n radiansFromArcsec(pd.to_numeric(galaxies['PAR1'])).tolist()\n phosim_galaxies['minorAxis'] = \\\n radiansFromArcsec(pd.to_numeric(galaxies['PAR2'])).tolist()\n phosim_galaxies['halfLightRadius'] = phosim_galaxies['majorAxis']\n phosim_galaxies['positionAngle'] = \\\n (np.pi/180.*pd.to_numeric(galaxies['PAR3'])).tolist()\n phosim_galaxies['sindex'] = pd.to_numeric(galaxies['PAR4']).tolist()\n n_gal = len(phosim_galaxies.raJ2000.values)\n phosim_galaxies = phosim_galaxies.assign(raICRS=phosim_galaxies.raJ2000,\n decICRS=phosim_galaxies.decJ2000,\n properMotionRa=np.zeros(n_gal),\n properMotionDec=np.zeros(n_gal),\n parallax=np.zeros(n_gal),\n radialVelocity=np.zeros(n_gal))\n\n if len(phosim_galaxies) > 0:\n phosim_galaxies = extract_extinction(galaxies, phosim_galaxies, 5)\n\n return pd.concat((phosim_stars, phosim_galaxies), ignore_index=True)\n\n\ndef extract_extinction(raw_df, object_df, ext_par_start):\n \"\"\"\n Extract the extinction parameters for the 4 possible cases as\n described in\n https://bitbucket.org/phosim/phosim_release/wiki/Instance%20Catalog\n\n Parameters\n ----------\n raw_df : pandas.DataFrame\n The data frame containing the raw column data for the object\n entries in the instance catalog.\n object_df : pandas.DataFrame\n The data frame containing the processed column data, but lacking\n the extinction parameters.\n ext_par_start : int\n The starting parameter number such that the column labeled\n \"PAR%i\" % ext_par_start is the column in the raw_df\n corresponding to the first extinction parameter. For point\n sources, ext_par_start=1 (where PAR1 would be 'CCM' or\n 'none').\n\n Returns\n -------\n pandas.DataFrame\n The data frame resulting from adding the extinction parameters to\n the object_df data frame.\n \"\"\"\n dfs = []\n\n selection = raw_df.query(\"PAR%i=='CCM' and PAR%i=='CCM'\"\n % (ext_par_start, ext_par_start+3))\n if len(selection) > 0:\n iAv = 'PAR%i' % (ext_par_start+1)\n iRv = 'PAR%i' % (ext_par_start+2)\n gAv = 'PAR%i' % (ext_par_start+4)\n gRv = 'PAR%i' % (ext_par_start+5)\n assignments = dict(internalAv=pd.to_numeric(selection[iAv]).tolist(),\n internalRv=pd.to_numeric(selection[iRv]).tolist(),\n galacticAv=pd.to_numeric(selection[gAv]).tolist(),\n galacticRv=pd.to_numeric(selection[gRv]).tolist())\n dfs.append(object_df.loc[selection.index].assign(**assignments))\n\n selection = raw_df.query(\"PAR%i=='CCM' and PAR%i=='none'\"\n % (ext_par_start, ext_par_start+3))\n if len(selection) > 0:\n iAv = 'PAR%i' % (ext_par_start+1)\n iRv = 'PAR%i' % (ext_par_start+2)\n assignments = dict(internalAv=pd.to_numeric(selection[iAv]).tolist(),\n internalRv=pd.to_numeric(selection[iRv]).tolist(),\n galacticAv=0,\n galacticRv=0)\n dfs.append(object_df.loc[selection.index].assign(**assignments))\n\n selection = raw_df.query(\"PAR%i=='none' and PAR%i=='CCM'\"\n % (ext_par_start, ext_par_start+1))\n if len(selection) > 0:\n gAv = 'PAR%i' % (ext_par_start+2)\n gRv = 'PAR%i' % (ext_par_start+3)\n assignments = dict(internalAv=0,\n internalRv=0,\n galacticAv=pd.to_numeric(selection[gAv]).tolist(),\n galacticRv=pd.to_numeric(selection[gRv]).tolist())\n dfs.append(object_df.loc[selection.index].assign(**assignments))\n\n selection = raw_df.query(\"PAR%i=='none' and PAR%i=='none'\"\n % (ext_par_start, ext_par_start+1))\n if len(selection) > 0:\n assignments = dict(internalAv=0,\n internalRv=0,\n galacticAv=0,\n galacticRv=0)\n dfs.append(object_df.loc[selection.index].assign(**assignments))\n\n result = pd.concat(dfs)\n gc.collect()\n return result\n\n\ndef validate_phosim_object_list(phoSimObjects):\n \"\"\"\n Remove rows with column values that are known to cause problems with\n the sim_GalSimInterface code.\n\n Parameters\n ----------\n phoSimObjects : pandas.DataFrame\n DataFrame of parsed object lines from the instance catalog.\n\n Returns\n -------\n namedtuple\n A tuple of DataFrames containing the accepted and rejected objects.\n \"\"\"\n bad_row_queries = ('(galSimType==\"sersic\" and majorAxis < minorAxis)',\n '(magNorm > 50)',\n '(galacticAv==0 and galacticRv==0)')\n\n rejected = dict((query, phoSimObjects.query(query))\n for query in bad_row_queries)\n all_rejected = \\\n pd.concat(rejected.values(), ignore_index=True).drop_duplicates()\n accepted = phoSimObjects.query('not (' + ' or '.join(bad_row_queries) + ')')\n if len(all_rejected) != 0:\n message = \"\\nOmitted %i suspicious objects from\" % len(all_rejected)\n message += \" the instance catalog satisfying:\\n\"\n for query, objs in rejected.items():\n message += \"%i %s\\n\" % (len(objs), query)\n message += \"Some rows may satisfy more than one condition.\\n\"\n warnings.warn(message)\n checked_objects = namedtuple('checked_objects', ('accepted', 'rejected'))\n return checked_objects(accepted, all_rejected)\n\n\ndef photometricParameters(phosim_commands):\n \"\"\"\n Factory function to create a PhotometricParameters object based on\n the instance catalog commands.\n\n Parameters\n ----------\n dict\n The phosim commands provided by parsePhoSimInstanceFile.\n\n Returns\n -------\n lsst.sims.photUtils.PhotometricParameters\n The object containing the photometric parameters.\n\n Notes\n -----\n The gain is set to unity so that the resulting eimage has units of\n electrons/pixel. Read noise and dark current are set to zero.\n The effects from all three of those will be added by the\n electronics chain readout code.\n \"\"\"\n config = get_config()\n nsnap = phosim_commands['nsnap']\n vistime = phosim_commands['vistime']\n readout_time = config['electronics_readout']['readout_time']\n exptime = (vistime - (nsnap-1)*readout_time)/float(nsnap)\n return PhotometricParameters(exptime=exptime,\n nexp=nsnap,\n gain=1,\n readnoise=0,\n darkcurrent=0,\n bandpass=phosim_commands['bandpass'])\n\n\ndef phosim_obs_metadata(phosim_commands):\n \"\"\"\n Factory function to create an ObservationMetaData object based\n on the PhoSim commands extracted from an instance catalog.\n\n Parameters\n ----------\n phosim_commands : dict\n Dictionary of PhoSim physics commands.\n\n Returns\n -------\n lsst.sims.utils.ObservationMetaData\n\n Notes\n -----\n The seeing from the instance catalog is the value at 500nm at\n zenith. Do we need to do a band-specific calculation?\n \"\"\"\n bandpass = phosim_commands['bandpass']\n obs_md = ObservationMetaData(pointingRA=phosim_commands['rightascension'],\n pointingDec=phosim_commands['declination'],\n mjd=phosim_commands['mjd'],\n rotSkyPos=phosim_commands['rotskypos'],\n bandpassName=bandpass,\n m5=LSSTdefaults().m5(bandpass),\n seeing=phosim_commands['FWHMeff'])\n # Set the OpsimMetaData attribute with the obshistID info.\n obs_md.OpsimMetaData = {'obshistID': phosim_commands['obshistid']}\n obs_md.OpsimMetaData['FWHMgeom'] = phosim_commands['FWHMgeom']\n obs_md.OpsimMetaData['FWHMeff'] = phosim_commands['FWHMeff']\n obs_md.OpsimMetaData['rawSeeing'] = phosim_commands['rawSeeing']\n obs_md.OpsimMetaData['altitude'] = phosim_commands['altitude']\n return obs_md\n\n\nclass ImSimConfiguration(object):\n \"\"\"\n Configuration parameters for the simulation. All parameters are\n set in a class-level dictionary to ensure that they are the same\n across all class instances.\n\n Individual parameter access is via section name:\n\n >>> config = get_config()\n >>> config['electronics_readout']['readout_time']\n 3.\n \"\"\"\n imsim_sections = defaultdict(dict)\n\n def __getitem__(self, section_name):\n return self.imsim_sections[section_name]\n\n def set_from_config(self, section_name, key, value):\n \"Set the parameter value with the cast from a string applied.\"\n self[section_name][key] = self.cast(value)\n\n @staticmethod\n def cast(value):\n \"\"\"\n Try to do sensible default casting of string representations\n of the parameters that are read from the config file.\n\n Parameters\n ----------\n value : str\n The string value returned, e.g., by ConfigParser.items(...).\n\n Returns\n -------\n None, int, float, str\n Depending on the first workable cast, in that order.\n \"\"\"\n if value == 'None':\n return None\n try:\n if value.find('.') == -1 and value.find('e') == -1:\n return int(value)\n else:\n return float(value)\n except ValueError:\n # Return as the original string.\n return value\n\n\ndef get_config():\n \"\"\"\n Get an ImSimConfiguration object with the current configuration.\n\n Returns\n -------\n ImSimConfiguration object\n \"\"\"\n return ImSimConfiguration()\n\n\ndef read_config(config_file=None):\n \"\"\"\n Read the configuration parameters for the simulation that are not\n given in the instance catalogs.\n\n Parameters\n ----------\n config_file : str, optional\n The file containing the configuration parameters. If None\n (the default), then read the default parameters from\n data/default_imsim_configs.\n\n Returns\n -------\n dict ImSimConfiguration object\n An instance of ImSimConfiguration filled with the parameters from\n config_file.\n \"\"\"\n my_config = ImSimConfiguration()\n cp = configparser.ConfigParser()\n cp.optionxform = str\n if config_file is None:\n config_file = os.path.join(lsstUtils.getPackageDir('imsim'),\n 'data', 'default_imsim_configs')\n cp.read(config_file)\n for section in cp.sections():\n for key, value in cp.items(section):\n my_config.set_from_config(section, key, value)\n return my_config\n\n\ndef get_logger(log_level):\n \"\"\"\n Set up standard logging module and set lsst.log to the same log\n level.\n\n Parameters\n ----------\n log_level : str\n This is converted to logging. and set in the logging\n config.\n \"\"\"\n # Setup logging output.\n logging.basicConfig(format=\"%(message)s\", stream=sys.stdout)\n logger = logging.getLogger()\n logger.setLevel(eval('logging.' + log_level))\n\n # Set similar logging level for Stack code.\n if log_level == \"CRITICAL\":\n log_level = \"FATAL\"\n lsstLog.setLevel(lsstLog.getDefaultLoggerName(),\n eval('lsstLog.%s' % log_level))\n\n return logger\n","sub_path":"python/desc/imsim/imSim.py","file_name":"imSim.py","file_ext":"py","file_size_in_byte":21199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"500679138","text":"# -*- coding=utf-8 -*-\nimport pytest\n\nfrom requester.base_requester import check_fields\nfrom requester.web_requester import WebRequester\nfrom service.optometry_service import copy_goods\nfrom env_constants import constants_yot_test\n\n\"\"\"\n页面操作:视光运营-商品目录-选择商品详情-复制新增商品\n\"\"\"\n\nREQUESTER = WebRequester()\n\n\n@pytest.mark.smoke\ndef test_goods_copy():\n # resp = copy_goods(132398028257624820)\n resp = copy_goods(constants_yot_test.GOODS_ID)\n ages = ('good_unitid', 'good_unitname', 'status', 'good_bidprice', 'good_price', 'good_class_id',\n 'good_class_name', 'good_brand_id', 'good_brand', 'good_variety_id', 'good_variety_code', 'good_variety',\n 'good_factory_id', 'good_factory', 'good_supplier_id', 'good_supplier', 'good_give')\n check_fields(ages, resp['data'].keys())\n ages = ('prop_prop_id', 'prop_prop_name', 'choi_id', 'choi_code', 'choi_value')\n check_fields(ages, resp['data']['list'][0].keys())\n assert resp['data']['good_unitid'] == constants_yot_test.GOODS_UNIT_ID\n assert resp['data']['good_class_name'] == constants_yot_test.GOODS_CLASS_NAME\n assert resp['data']['list'][0]['prop_prop_name'] == u\"颜色\"\n","sub_path":"optometry/cases/1.3.3/test_goods_copy.py","file_name":"test_goods_copy.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"385248460","text":"#!/usr/bin/env python\n# coding: utf-8\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetuptools.setup(\n name='cryptoString',\n version='1.0.1',\n author='andy6804tw',\n author_email='andy6804tw@yahoo.com.tw',\n url='https://github.com/1010code/cryptoString',\n description='A module that returns alphanumeric strings.',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n install_requires=[],\n entry_points={\n 'consoleScripts': [\n 'cryptoString=cryptoString:version'\n ]\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.5',\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"500986877","text":"#!/usr/bin/python\n\nimport requests\nimport random\nimport time\nimport socket\nimport http.client\nfrom pymongo import MongoClient\nfrom bs4 import BeautifulSoup\n\nclient = MongoClient('127.0.0.1', 27017)\n# client.spider.authenticate('baiyang', 'baiyang')\ndb = client.spider\ncollection = db.job51_4\n\n\ndef get_content(url, data = None):\n header = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116'\n }\n timeout = random.choice(range(80, 180))\n while True:\n try:\n rep = requests.get(url, headers=header, timeout=timeout)\n rep.encoding = 'gbk'\n break\n except socket.timeout as e:\n print('3:', e)\n time.sleep(random.choice(range(8, 15)))\n except socket.error as e:\n print('4:', e)\n time.sleep(random.choice(range(20, 60)))\n\n except http.client.BadStatusLine as e:\n print('5:', e)\n time.sleep(random.choice(range(30, 80)))\n\n except http.client.IncompleteRead as e:\n print('6:', e)\n time.sleep(random.choice(range(5, 15)))\n return rep.text\n\n\ndef get_data(html_text):\n final = []\n bs = BeautifulSoup(html_text, \"html.parser\") # 创建BeautifulSoup对象\n content = bs.find(class_='tCompany_center')\n li = content.find(id='tHeader_mk')\n if str(li) == 'None':\n print('none', url)\n else:\n bid_sh = {}\n pass\n name = content.find('h1').string\n detail = content.find(class_='ltype')\n summary = content.find(class_='con_msg')\n position = content.find(class_='table_list')\n bid_sh['name'] = name\n print(url)\n bid_sh['link'] = url\n # bid_sh['detail'] = str(detail)\n # bid_sh['summary'] = str(summary)\n if str(position) == 'None':\n print('no position', url)\n else:\n list_ul = position.find(id='joblistdata')\n list_item = list_ul.find_all(class_='el')\n position_arr = []\n for item in list_item:\n position_detail = {}\n pass\n position_detail['position_name'] = item.find('a').string\n position_detail['position_need'] = item.find(class_='t2').string\n position_detail['position_addr'] = item.find(class_='t3').string\n position_detail['position_money'] = item.find(class_='t4').string\n position_detail['position_time'] = item.find(class_='t5').string\n position_arr.append(position_detail)\n bid_sh['position'] = str(position_arr)\n collection.insert(bid_sh)\n return final\n\n\nif __name__ == '__main__':\n for i in range(4000001, 5000000):\n url = 'http://jobs.51job.com/all/co'+str(i)+'.html'\n html = get_content(url)\n result = get_data(html)\n","sub_path":"job51_4.py","file_name":"job51_4.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"613402781","text":"\n# coding: utf-8\n\n# In[2]:\n\n#From:\n# http://stackoverflow.com/questions/37144423/all-possible-maximum-matchings-of-a-bipartite-graph\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\ndef checkAll(G,m):\n b = nx.bipartite.eppstein_matching(G) # Finds first match\n c = list(b.keys())\n for y in c[int(len(c)/2):]: # Reduces to one occurrence per line\n b.pop(y)\n if len(b) != m: # If new size, break\n return 0\n return b # Add to list of possibilities\n\ndef maximal_matching(G):\n edges = G.edges()\n A = []\n m = len(nx.bipartite.eppstein_matching(G))/2 # Create an expected maximum\n for x in range(len(edges)):\n b = checkAll(G,m)\n if b:\n A += [b]\n else:\n break\n keys = list(b.keys())\n cache = (keys[0],b[keys[0]])\n removed = []\n while 1:\n removed += [(keys[1],b[keys[1]])]\n G.remove_edge(keys[1],b[keys[1]]) # Remove first option\n b = checkAll(G,m)\n if b and cache == (keys[0],b[keys[0]]):\n A += [b]\n else:\n break\n G.add_edges_from(removed)\n G.remove_edge(*edges[x])\n\n return A\n\ndef print_matching(A):\n print(list(eval(x) for x in set(str(x) for x in A)))\n\n","sub_path":"Maximal_matching.py","file_name":"Maximal_matching.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"216496096","text":"from . import DATA_DIR\nimport csv\n\nREMIND_TO_ECOINVENT_EMISSION_FILEPATH = (DATA_DIR / \"remind_to_ecoinvent_emission_mappping.csv\")\n\n\nclass InventorySet:\n \"\"\"\n Hosts different filter sets to for ecoinvent activities and exchanges.\n\n It stores:\n * material_filters: filters for activities related to materials.\n * powerplant_filters: filters for activities related to power generation technologies.\n * emissions_map: REMIND emission labels as keys, ecoinvent emission labels as values\n\n The functions :func:`generate_material_map` and :func:`generate_powerplant_map` can\n be used to extract the actual activity objects as dictionaries.\n These functions return the result of applying :func:`act_fltr` to the filter dictionaries.\n \"\"\"\n\n material_filters = {\n \"steel\": {\"fltr\": \"market for steel,\", \"mask\": \"hot rolled\"},\n \"concrete\": {\"fltr\": \"market for concrete,\"},\n \"copper\": {\"fltr\": \"market for copper\", \"filter_exact\": True},\n \"aluminium\": {\n \"fltr\": [\"market for aluminium, primary\", \"market for aluminium alloy,\"]\n },\n \"electricity\": {\"fltr\": \"market for electricity\"},\n \"gas\": {\"fltr\": \"market for natural gas,\", \"mask\": [\"network\", \"burned\"]},\n \"diesel\": {\"fltr\": \"market for diesel\", \"mask\": [\"burned\", \"electric\"]},\n \"petrol\": {\"fltr\": \"market for petrol,\", \"mask\": \"burned\"},\n \"freight\": {\"fltr\": \"market for transport, freight\"},\n \"cement\": {\"fltr\": \"market for cement,\"},\n \"heat\": {\"fltr\": \"market for heat,\"},\n }\n\n powerplant_filters = {\n \"Biomass IGCC CCS\": {\n \"fltr\": [\n \"Electricity, from CC plant, 100% SNG, truck 25km, post, pipeline 200km, storage 1000m/2025\",\n \"Electricity, at wood burning power plant 20 MW, truck 25km, post, pipeline 200km, storage 1000m/2025\",\n \"Electricity, at BIGCC power plant 450MW, pre, pipeline 200km, storage 1000m/2025\",\n ]\n },\n \"Biomass IGCC\": {\n \"fltr\": \"Electricity, at BIGCC power plant 450MW, no CCS/2025\"\n },\n \"Coal IGCC\": {\n \"fltr\": [\n \"Electricity, at power plant/hard coal, IGCC, no CCS/2025\",\n \"Electricity, at power plant/lignite, IGCC, no CCS/2025\",\n ]\n },\n \"Coal IGCC CCS\": {\n \"fltr\": [\n \"Electricity, at power plant/hard coal, pre, pipeline 200km, storage 1000m/2025\",\n \"Electricity, at power plant/lignite, pre, pipeline 200km, storage 1000m/2025\",\n ]\n },\n \"Coal PC CCS\": {\n \"fltr\": [\n \"Electricity, at power plant/hard coal, post, pipeline 200km, storage 1000m/2025\",\n \"Electricity, at power plant/lignite, post, pipeline 200km, storage 1000m/2025\",\n ]\n },\n \"Gas CCS\": {\n \"fltr\": [\n \"Electricity, at power plant/natural gas, pre, pipeline 200km, storage 1000m/2025\",\n \"Electricity, at power plant/natural gas, post, pipeline 200km, storage 1000m/2025\",\n ]\n },\n \"Biomass CHP\": {\n \"fltr\": {\n \"name\":[\n \"heat and power co-generation, wood chips\",\n \"heat and power co-generation, biogas\",\n ],\n \"reference product\": \"electricity\"\n }\n },\n \"Coal PC\": {\n \"fltr\": [\n \"electricity production, hard coal\",\n \"electricity production, lignite\",\n ],\n \"mask\": \"mine\",\n },\n \"Coal CHP\": {\n \"fltr\": {\n \"name\": [\n \"heat and power co-generation, hard coal\",\n \"heat and power co-generation, lignite\",\n ],\n \"reference product\": \"electricity\"\n }\n },\n \"Gas OC\": {\n \"fltr\": \"electricity production, natural gas, conventional power plant\"\n },\n \"Gas CC\": {\n \"fltr\": \"electricity production, natural gas, combined cycle power plant\"\n },\n \"Gas CHP\": {\n \"fltr\": {\n \"name\": [\n \"heat and power co-generation, natural gas, combined cycle power plant, 400MW electrical\",\n \"heat and power co-generation, natural gas, conventional power plant, 100MW electrical\",\n ],\n \"reference product\": \"electricity\"\n }\n },\n \"Geothermal\": {\"fltr\": \"electricity production, deep geothermal\"},\n \"Hydro\": {\n \"fltr\": [\n \"electricity production, hydro, reservoir\",\n \"electricity production, hydro, run-of-river\",\n ]\n },\n \"Nuclear\": {\"fltr\": \"electricity production, nuclear\", \"mask\": \"aluminium\"},\n \"Oil\": {\n \"fltr\": {\n \"name\": [\n \"electricity production, oil\",\n \"heat and power co-generation, oil\",\n ],\n \"reference product\": \"electricity\"\n },\n \"mask\": \"aluminium\",\n },\n \"Solar CSP\": {\n \"fltr\": [\n \"electricity production, solar thermal parabolic trough, 50 MW\",\n \"electricity production, solar tower power plant, 20 MW\",\n ]\n },\n \"Solar PV\": {\"fltr\": \"electricity production, photovoltaic\"},\n \"Wind\": {\"fltr\": \"electricity production, wind\"},\n }\n\n def __init__(self, db):\n self.db = db\n\n def generate_material_map(self):\n \"\"\"\n Filter ecoinvent processes related to different material demands.\n\n :return: dictionary with materials as keys (see below) and\n sets of related ecoinvent activities as values.\n :rtype: dict\n\n \"\"\"\n\n return self.generate_sets_from_filters(self.material_filters)\n\n def generate_powerplant_map(self):\n \"\"\"\n Filter ecoinvent processes related to electricity production.\n\n :return: dictionary with el. prod. techs as keys (see below) and\n sets of related ecoinvent activities as values.\n :rtype: dict\n\n \"\"\"\n return self.generate_sets_from_filters(self.powerplant_filters)\n\n def get_remind_to_ecoinvent_emissions(self):\n \"\"\"\n Retrieve the correspondence between REMIND and ecoinvent emission labels.\n :return: REMIND emission labels as keys and ecoinvent emission labels as values\n :rtype: dict\n \"\"\"\n\n if not REMIND_TO_ECOINVENT_EMISSION_FILEPATH.is_file():\n raise FileNotFoundError(\n \"The dictionary of emission labels correspondences could not be found.\"\n )\n\n csv_dict = {}\n\n with open(REMIND_TO_ECOINVENT_EMISSION_FILEPATH) as f:\n input_dict = csv.reader(f, delimiter=\";\")\n for row in input_dict:\n csv_dict[row[0]] = row[1]\n\n return csv_dict\n\n def act_fltr(self, db, fltr={}, mask={}, filter_exact=False, mask_exact=False):\n \"\"\"Filter `db` for activities matching field contents given by `fltr` excluding strings in `mask`.\n `fltr`: string, list of strings or dictionary.\n If a string is provided, it is used to match the name field from the start (*startswith*).\n If a list is provided, all strings in the lists are used and results are joined (*or*).\n A dict can be given in the form : to filter for in .\n `mask`: used in the same way as `fltr`, but filters add up with each other (*and*).\n `filter_exact` and `mask_exact`: boolean, set `True` to only allow for exact matches.\n\n :param db: A lice cycle inventory database\n :type db: brightway2 database object\n :param fltr: value(s) to filter with.\n :type fltr: Union[str, lst, dict]\n :param mask: value(s) to filter with.\n :type mask: Union[str, lst, dict]\n :param filter_exact: requires exact match when true.\n :type filter_exact: bool\n :param mask_exact: requires exact match when true.\n :type mask_exact: bool\n :return: list of activity data set names\n :rtype: list\n\n \"\"\"\n result = []\n\n # default field is name\n if type(fltr) == list or type(fltr) == str:\n fltr = {\"name\": fltr}\n if type(mask) == list or type(mask) == str:\n mask = {\"name\": mask}\n\n def like(a, b):\n if filter_exact:\n return a == b\n else:\n return a.startswith(b)\n\n def notlike(a, b):\n if mask_exact:\n return a != b\n else:\n return b not in a\n\n assert len(fltr) > 0, \"Filter dict must not be empty.\"\n for field in fltr:\n condition = fltr[field]\n if type(condition) == list:\n for el in condition:\n # this is effectively connecting the statements by *or*\n result.extend([act for act in db if like(act[field], el)])\n else:\n result.extend([act for act in db if like(act[field], condition)])\n\n for field in mask:\n condition = mask[field]\n if type(condition) == list:\n for el in condition:\n # this is effectively connecting the statements by *and*\n result = [act for act in result if notlike(act[field], el)]\n else:\n result = [act for act in result if notlike(act[field], condition)]\n return result\n\n def generate_sets_from_filters(self, filtr):\n \"\"\"\n Generate a dictionary with sets of activity names for\n technologies from the filter specifications.\n\n :param fltr: A dictionary with labels and filter conditions as given to\n :func:`activity_maps.InventorySet.act_fltr`.\n :return: dictionary with the same keys as provided in filter\n and a set of activity data set names as values.\n :rtype: dict\n \"\"\"\n techs = {tech: self.act_fltr(self.db, **fltr) for tech, fltr in filtr.items()}\n return {\n tech: set([act[\"name\"] for act in actlst]) for tech, actlst in techs.items()\n }\n","sub_path":"rmnd_lca/activity_maps.py","file_name":"activity_maps.py","file_ext":"py","file_size_in_byte":10335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"308875588","text":"\ndef main():\n res = set('123456789')\n m = 0\n for i in range(int(1e6)):\n s = ''\n j = 1\n while len(s+str(i*j)) < 10:\n s += str(i*j)\n j+=1\n if set(s) == res and int(s) > m:\n m = int(s)\n print(m)\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/p38.py","file_name":"p38.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"388892524","text":"# Copyright 2016-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tests.utils\n\n\nclass CommonPathTest(tests.utils.TestCase):\n includes = [(\"@fbcode_macros//build_defs/lib:common_paths.bzl\", \"common_paths\")]\n\n @tests.utils.with_project()\n def test_returns_correct_paths_with_default(self, root):\n result = root.runUnitTests(\n self.includes,\n [\"common_paths.get_buck_out_path()\", \"common_paths.get_gen_path()\"],\n )\n self.assertSuccess(result, \"buck-out\", \"buck-out/gen\")\n\n @tests.utils.with_project()\n def test_returns_correct_paths_with_config(self, root):\n root.updateBuckconfig(\"project\", \"buck_out\", \"buck-out/dev\")\n result = root.runUnitTests(\n self.includes,\n [\"common_paths.get_buck_out_path()\", \"common_paths.get_gen_path()\"],\n )\n self.assertSuccess(result, \"buck-out/dev\", \"buck-out/dev/gen\")\n\n @tests.utils.with_project()\n def test_returns_correct_current_directory(self, root):\n result = root.runUnitTests(self.includes, [\"common_paths.CURRENT_DIRECTORY\"])\n self.assertSuccess(result, \".\")\n","sub_path":"infra_macros/fbcode_macros/tests/lib/common_paths_test.py","file_name":"common_paths_test.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"192633662","text":"import random\nsuits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')\nranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')\nvalues = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10,'Queen':10, 'King':10, 'Ace':11}\nplaying = True\nclass Card():\n\n def __init__(self, suit, rank):\n self.suit = suit\n self.rank = rank\n def __str__(self):\n return self.rank + \" of \" + self.suit\n\nclass Deck():\n\n def __init__(self):\n self.deck = []\n for suit in suits:\n for rank in ranks:\n self.deck.append(Card(suit, rank))\n def __str__(self):\n deck_comp=\"\"\n for card in self.deck:\n deck_comp+=\"\\n\" + card.__str__()\n return \"The deck has \" + deck_comp\n def shuffle(self):\n random.shuffle(self.deck)\n def deal(self):\n sin_card=self.deck.pop()\n return sin_card\n\nclass Hand():\n\n def __init__(self):\n self.value = 0\n self.aces = 0\n self.cards = []\n def add_card(self,card):\n self.cards.append(card)\n self.value += values[card.rank]\n if card.rank == \"Ace\":\n self.aces += 1\n def adjust_for_aces(self):\n while self.value > 21 and self.aces:\n self.value -= 1\n self.aces -= 1\n\nclass Chips():\n def __init__(self,total=100):\n self.total=total\n self.bet=0\n def win(self):\n self.total += self.bet\n def lose(self):\n self.total -= self.bet\n\ndef take_chips(chips):\n while True:\n try:\n chips.bet = int(input(\"Enter your bet!\"))\n except:\n print(\"Enter an Integer\")\n else:\n if chips.bet > chips.total:\n print(f\"you are entering more than total chips the total is {chips.total}\")\n else:\n break\n\ndef hit(deck,hand):\n sin_card = deck.deal()\n hand.add_card(sin_card)\n hand.adjust_for_aces()\n\ndef hit_or_stand(deck,hand):\n global playing\n while True:\n x = input(\"choose Hit or Stand or enter h or s\")\n if x[0] == \"h\":\n hit(deck, hand)\n elif x[0] == \"s\":\n print(\"Dealer's turn\")\n playing = False\n else:\n print(\"Sorry i don't understand enter a valid choice\")\n continue\n break\n\n\ndef player_busts(player, dealer, chips):\n print(\"player lose\")\n chips.lose()\n\ndef player_wins(player, dealer, chips):\n print(\"player wins\")\n chips.win()\n\ndef dealer_busts(player, dealer, chips):\n print(\"dealer busts\")\n chips.lose()\n\ndef dealer_wins(player, dealer, chips):\n print(\"dealer wins\")\n chips.win()\n\ndef push(player, dealer):\n print(\"Both Tied\")\n\n\ndef show_some(player, dealer):\n print(\"Dealers Hand:\")\n print(\"One card hidden!\")\n print(dealer.cards[1])\n print(\"\\n\")\n print(\"Player cards:\")\n for card in player.cards:\n print(card)\n\n\ndef show_all(player, dealer):\n print(\"Dealers Hand:\")\n for card in dealer.cards:\n print(card)\n print(\"\\n\")\n print(\"Player cards:\")\n for card in player.cards:\n print(card)\n\nwhile True:\n print(\"welcome to BlackJack\")\n deck = Deck()\n deck.shuffle()\n player_hand = Hand()\n player_hand.add_card(deck.deal())\n player_hand.add_card(deck.deal())\n dealer_hand = Hand()\n dealer_hand.add_card(deck.deal())\n dealer_hand.add_card(deck.deal())\n player_chips = Chips()\n take_chips(player_chips)\n show_some(player_hand, dealer_hand)\n\n while playing:\n hit_or_stand(deck, player_hand)\n show_some(player_hand, dealer_hand)\n if player_hand.value > 21:\n player_busts(player_hand, dealer_hand, player_chips)\n break\n if player_hand.value <= 21:\n while dealer_hand.value < player_hand.value:\n hit_or_stand(deck, dealer_hand)\n show_all(player_hand, dealer_hand)\n if dealer_hand.value > 21:\n dealer_busts(player_hand, dealer_hand, player_chips)\n elif dealer_hand.value > player_hand.value:\n dealer_wins(player_hand, dealer_hand, player_chips)\n elif player_hand.value > dealer_hand.value:\n dealer_wins(player_hand, dealer_hand, player_chips)\n else:\n push(player_hand, dealer_hand)\n print(\"\\n\")\n print(f\"total player chips are {player_chips.total}\")\n new_game = input(\"wanna play more? yes or no\")\n if new_game[0].lower == \"y\":\n playing = True\n continue\n else:\n print(\"Thank you for playing\")\n break\n\n\n\n\n\n\n\n\n","sub_path":"python_problems/Black_jack.py","file_name":"Black_jack.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"235935285","text":"import requests as webRequest\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport re\r\nimport time\r\nimport json\r\nimport threadpool\r\n\r\nname = ['test']\r\n\r\ndef grabdata(webID):\r\n global name\r\n # print(webID)\r\n webstr = 'http://www.win4000.com/mobile_detail_' + str(webID) + '.html'\r\n # webstr = 'http://www.win4000.com/wallpaper_detail_' + str(webID) + '.html'\r\n # webstr = 'http://www.win4000.com/meinv' + str(webID) + '.html'\r\n webResponse = webRequest.get(webstr)\r\n stra = webResponse.content\r\n stra = stra.decode('utf-8')\r\n soup = BeautifulSoup(stra, \"html.parser\")\r\n strb = soup.find('title')\r\n print (strb)\r\n \r\n # print (strc)\r\n for singleName in name:\r\n if (re.search(singleName, str(strb)) != None):\r\n print (str(webID) + \" \" + str(strb))\r\n # print (webstr)\r\n strc = soup.find('img', attrs={'class': 'pic-large'})\r\n # print (strc)\r\n imgSrc = re.search('src=\".*jpg\"', str(strc)).group()\r\n imgSrc = imgSrc[5:]\r\n imgSrc = imgSrc[:-1]\r\n print (imgSrc)\r\n imgReponse = webRequest.get(imgSrc)\r\n open('.\\\\pics\\\\'+ singleName + '\\\\' + str(webID) + '.jpg', 'wb').write(imgReponse.content)\r\n\r\nif __name__ == \"__main__\":\r\n for singleName in name:\r\n if (os.path.exists('.\\\\pics\\\\' + singleName) == False):\r\n os.mkdir('.\\\\pics\\\\' + singleName) \r\n pool = threadpool.ThreadPool(5)\r\n idSet = [160000]\r\n for i in range(210001, 217916):\r\n idSet.append(i)\r\n requests = threadpool.makeRequests(grabdata, idSet)\r\n [pool.putRequest(req) for req in requests]\r\n pool.wait()\r\n\r\n # grabdata(177921)","sub_path":"test_win4000.py","file_name":"test_win4000.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"200075908","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.dont_write_bytecode = True\n\nimport glob\nimport yaml\nimport json\nimport os\nimport sys\nimport time\nimport logging\nfrom argparse import ArgumentParser\nimport traceback\n\nfrom slackclient import SlackClient\nfrom slackclient._channel import Channel\nfrom websocket._exceptions import WebSocketConnectionClosedException\n\n\ndef dbg(debug_string):\n if debug:\n logging.info(debug_string)\n\n\nclass MySlackClient (SlackClient):\n pass\n\n\nclass MyChannel(Channel):\n \n def post_message(self, message, username, emoji):\n \n as_user = 'false'\n \n if username == '' and emoji == '':\n as_user = 'true'\n\n message_json = {\n \"channel\": self.id, \n \"text\": message, \n \"username\": username, \n \"as_user\": as_user,\n \"icon_url\": '',\n \"icon_emoji\": emoji\n }\n\n if debug:\n print('MESSAGE JSON: ', message_json)\n \n self.server.api_call(\"chat.postMessage\", **message_json)\n\n\nclass RtmBot(object):\n\n def __init__(self, token):\n self.last_ping = 0\n self.token = token\n self.bot_plugins = []\n self.slack_client = None\n\n def connect(self):\n \"\"\"Convenience method that creates Server instance\"\"\"\n self.slack_client = MySlackClient(self.token)\n self.slack_client.rtm_connect()\n\n def reconnect(self):\n del self.slack_client\n self.slack_client = MySlackClient(self.token)\n self.slack_client.rtm_connect()\n\n def start(self):\n self.connect()\n self.load_plugins()\n while True:\n try:\n for reply in self.slack_client.rtm_read():\n self.input(reply)\n self.crons()\n self.output()\n self.autoping()\n time.sleep(.1)\n# except Exception as e:\n# print(e)\n except (ConnectionResetError, TimeoutError, WebSocketConnectionClosedException):\n self.reconnect()\n\n def autoping(self):\n #hardcode the interval to 3 seconds\n now = int(time.time())\n if now > self.last_ping + 3:\n self.slack_client.server.ping()\n self.last_ping = now\n\n def input(self, data):\n if \"type\" in data:\n try:\n print(\"input: \" + str(data))\n if data[\"type\"] == \"message\":\n #if \"team\" in data and \"user\" in data:\n if \"user\" in data:\n #team_id = data[\"team\"]\n #team_id = 'rtmbot'\n user_id = data[\"user\"]\n #if team_id not in profiles:\n #if 'rtmbot' not in profiles:\n #profiles[team_id] = dict()\n #profiles['rtmbot'] = dict()\n if user_id not in profiles: #[team_id]:\n json_res = json.dumps(self.slack_client.api_call(\"users.info\", user=data[\"user\"]), ensure_ascii=False)\n \n if debug:\n print(data)\n print(type(json_res))\n print(json_res)\n print(' ^^^ Try to get json.dumps of user info ^^^ ')\n \n res = json.loads(json_res)\n #profiles[team_id][user_id] = {\n profiles[user_id] = {\n \"name\": res[\"user\"][\"name\"], \n \"tz\": res[\"user\"][\"tz\"],\n \"is_bot\": res[\"user\"][\"is_bot\"],\n \"real_name\": res[\"user\"][\"real_name\"],\n \"tz_offset\": res[\"user\"][\"tz_offset\"],\n \"tz_label\": res[\"user\"][\"tz_label\"],\n }\n #data.update(profiles[team_id][user_id])\n data.update(profiles[user_id])\n print('profiles: ', profiles)\n\n else:\n if debug:\n print(data) # print data to stdout about any other events \n except:\n print(\"Parsing of message data didn't quite work as expected\")\n print(traceback.print_exc())\n \n function_name = \"process_\" + data[\"type\"]\n dbg(\"got {}\".format(function_name))\n for plugin in self.bot_plugins:\n plugin.register_jobs()\n plugin.do(function_name, data)\n\n\n def output(self):\n for plugin in self.bot_plugins:\n limiter = False\n for output in plugin.do_output():\n channel = self.slack_client.server.channels.find(output[0])\n if channel is not None and output[1] is not None:\n if limiter == True:\n time.sleep(.1)\n limiter = False\n message = output[1]\n if len(output) > 3:\n username = output[2]\n emoji = output[3]\n else:\n username = ''\n emoji = ''\n channel.__class__ = MyChannel\n #channel.send_message(\"{}\".format(message)) # send message to channel\n channel.post_message(\"{}\".format(message), username, emoji)\n channel.__class__ = Channel\n limiter = True\n\n def crons(self):\n for plugin in self.bot_plugins:\n plugin.do_jobs()\n\n def load_plugins(self):\n time.sleep(1)\n for plugin in glob.glob(directory+'/plugins/*'):\n sys.path.insert(0, plugin)\n sys.path.insert(0, directory+'/plugins/')\n for plugin in glob.glob(directory+'/plugins/*.py') + glob.glob(directory+'/plugins/*/*.py'):\n logging.info(plugin)\n name = plugin.split('/')[-1][:-3]\n# try:\n self.bot_plugins.append(Plugin(name))\n# except:\n# print \"error loading plugin %s\" % name\n\n\nclass Plugin(object):\n\n def __init__(self, name, plugin_config={}):\n self.name = name\n self.jobs = []\n self.module = __import__(name)\n self.register_jobs()\n self.outputs = []\n if name in config:\n logging.info(\"config found for: \" + name)\n self.module.config = config[name]\n if 'setup' in dir(self.module):\n self.module.setup()\n\n def register_jobs(self):\n if 'crontable' in dir(self.module):\n for interval, function in self.module.crontable:\n self.jobs.append(Job(interval, eval(\"self.module.\"+function)))\n logging.info(self.module.crontable)\n self.module.crontable = []\n else:\n self.module.crontable = []\n\n def do(self, function_name, data):\n if function_name in dir(self.module):\n #this makes the plugin fail with stack trace in debug mode\n if not debug:\n try:\n eval(\"self.module.\"+function_name)(data)\n except:\n dbg(\"problem in module {} {}\".format(function_name, data))\n else:\n eval(\"self.module.\"+function_name)(data)\n if \"catch_all\" in dir(self.module):\n try:\n self.module.catch_all(data)\n except:\n dbg(\"problem in catch all\")\n\n def do_jobs(self):\n for job in self.jobs:\n job.check()\n\n def do_output(self):\n output = []\n while True:\n if 'outputs' in dir(self.module):\n if len(self.module.outputs) > 0:\n logging.info(\"output from {}\".format(self.module))\n output.append(self.module.outputs.pop(0))\n else:\n break\n else:\n self.module.outputs = []\n return output\n\n\nclass Job(object):\n\n def __init__(self, interval, function):\n self.function = function\n self.interval = interval\n self.lastrun = 0\n\n def __str__(self):\n return \"{} {} {}\".format(self.function, self.interval, self.lastrun)\n\n def __repr__(self):\n return self.__str__()\n\n def check(self):\n if self.lastrun + self.interval < time.time():\n if not debug:\n try:\n self.function()\n except:\n dbg(\"problem\")\n else:\n self.function()\n self.lastrun = time.time()\n pass\n\n\nclass UnknownChannel(Exception):\n pass\n\n\ndef main_loop():\n if \"LOGFILE\" in config:\n logging.basicConfig(filename=config[\"LOGFILE\"], level=logging.INFO, format='%(asctime)s %(message)s')\n logging.info(directory)\n try:\n bot.start()\n except KeyboardInterrupt:\n sys.exit(0)\n except:\n logging.exception('OOPS')\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument(\n '-c',\n '--config',\n help='Full path to config file.',\n metavar='path'\n )\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n directory = os.path.dirname(sys.argv[0])\n if not directory.startswith('/'):\n directory = os.path.abspath(\"{}/{}\".format(os.getcwd(),\n directory\n ))\n\n config = yaml.load(open(args.config or 'rtmbot.conf', 'r'))\n debug = config[\"DEBUG\"]\n bot = RtmBot(config[\"SLACK_TOKEN\"])\n site_plugins = []\n files_currently_downloading = []\n job_hash = {}\n profiles = {}\n\n if \"DAEMON\" in config:\n if config[\"DAEMON\"]:\n import daemon\n with daemon.DaemonContext():\n main_loop()\n main_loop()\n\n","sub_path":"rtmbot.py","file_name":"rtmbot.py","file_ext":"py","file_size_in_byte":10281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"326455158","text":"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n '''\n This function loads data as from given files.\n Inputs :\n path of files\n messages_filepath :this is message \n categories_filepath:this is Answer file path\n \n OUTPUT: \n \n return :dataframe as df\n '''\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = pd.merge(messages, categories, left_on='id', right_on='id', how='outer')\n return df\n\n\ndef clean_data(df):\n '''\n This function will clean data.drop unwanted dublicate columns and dat. \n \n INPUT: dataframe \n \n \n OUTPUT: Clean Dataframe\n '''\n categories = df.categories.str.split(';', expand =True)\n row = categories.loc[0]\n category_colnames = row.apply(lambda x: x[:-2]).values.tolist()\n categories.columns = category_colnames\n categories.related.loc[categories.related == 'related-2'] = 'related-1'\n df.drop('categories', axis = 1, inplace = True)\n df = pd.concat([df, categories], axis = 1)\n df.drop_duplicates(subset = 'id', inplace = True)\n return df\n\n\n\ndef save_data(df, database_filename):\n \n '''\n Input : dataframe and database file name\n \n Output :save in database \n '''\n engine=create_engine('sqlite:///' + database_filename)\n df.to_sql('disaster_data',engine,index=False,if_exists = 'replace')\n \n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"184325058","text":"#!/usr/bin/env python3\nfrom simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\nfrom sys import stdout\nimport os\n\ntry:\n platform = Platform.getPlatformByName(\"CUDA\")\nexcept Exception:\n platform = Platform.getPlatformByName(\"OpenCL\")\n\nprotein_pdb = \"proteins/villin/1vii.pdb\"\npdb = PDBFile(protein_pdb)\nforcefield = ForceField('amber99sb.xml', 'tip3p.xml')\n\nmodeller = Modeller(pdb.topology, pdb.positions)\nprint(modeller.topology)\n\nsystem = forcefield.createSystem(modeller.topology, nonbondedMethod=NoCutoff, nonbondedCutoff=1*nanometer, constraints=HBonds)\nintegrator = LangevinIntegrator(300*kelvin, 1/picosecond, 0.002*picoseconds)\nsimulation = Simulation(modeller.topology, system, integrator, platform)\nsimulation.context.setPositions(modeller.positions)\nsimulation.minimizeEnergy()\nsimulation.reporters.append(PDBReporter('/tmp/output.pdb', 25))\nsimulation.reporters.append(StateDataReporter(stdout, 100, step=True, potentialEnergy=True, temperature=True))\nsimulation.step(5000)\n\n","sub_path":"fold.py","file_name":"fold.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"632513881","text":"import sys, pygame, util\r\nfrom pygame.locals import *\r\nfrom heroe import *\r\n\r\nSCREEN_WIDTH = 800\r\nSCREEN_HEIGHT = 400\r\n\r\ndef game():\r\n pygame.init()\r\n screen = pygame.display.set_mode( (SCREEN_WIDTH,SCREEN_HEIGHT) )\r\n pygame.display.set_caption( \"Flappy\" )\r\n background_image = util.cargar_imagen('fondo.jpg');\r\n pygame.mouse.set_visible( False )\r\n heroe = Heroe()\r\n \r\n while True: \r\n heroe.update((SCREEN_WIDTH,SCREEN_HEIGHT)) \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n screen.blit(background_image, (0,0))\r\n screen.blit(heroe.image, heroe.rect) \r\n pygame.display.update()\r\n pygame.time.delay(10)\r\n\r\n \r\nif __name__ == '__main__':\r\n game()\r\n\r\n","sub_path":"ptython_1012019/flappy/principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"317660788","text":"\n# -*- coding: utf-8 -*-\n\n__author__ = 'Farsheed Ashouri'\n\n\"\"\"\n ___ _ _\n / __\\_ _ _ __ ___| |__ ___ ___ __| |\n / _\\/ _` | '__/ __| '_ \\ / _ \\/ _ \\/ _` |\n/ / | (_| | | \\__ \\ | | | __/ __/ (_| |\n\\/ \\__,_|_| |___/_| |_|\\___|\\___|\\__,_|\n\nJust remember: Each comment is like an apology!\nClean code is much better than Cleaner comments!\n'''\n\n\n'''\n@desc: utils/parsers/alfred.py\n@author: F.Ashouri\n\"\"\"\n\nimport re\n\n\ndef parse(alfredJob):\n '''Regex parser\n based on: http://regexr.com/3bg9a\n '''\n #pat = r'Task \\-title \\{Render ([\\w ]+)\\} [\\w \\- \\{ \\n : %]+\"%D\\(([\\w:/\\-]+)\\)\"[ ]+\"%D\\(([\\w\\d . /]+)\\)\"[\\} \\- \\n \\w \\{:]+sho[ \\n]+\"([\\w :\\d/\\.\\-]+)\"'\n pat = r'Task \\-title \\{(.*)}[\\w \\- & \\{ \\n : % \\.]*\"%D\\((.*)\\)\"[& ]*\"%D\\((.*)\\)\"\\} [\\n \\- & \\w\\d\\{:\\}]*sho [\\n]*\"([\\w\\d \\. \\/ & \\-]+)\"'\n data = re.findall(re.compile(pat), alfredJob)\n # namePat = re.compile(r'[-+]?\\d+[\\.]?\\d*') ## extract digits\n result = {'tasks': [{\n 'name': '%s-%s' % (str(data.index(i) + 1).zfill(4), i[0]),\n 'cwd':i[1],\n 'filepath':i[2], # command will act upon this file\n 'target':i[3] # command will produce this file\n } for i in data]\n }\n return result\n","sub_path":"utils/parsers/alfred.py","file_name":"alfred.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"433792337","text":"from ex2 import *\nimport pytest # type: ignore\nimport secrets\nimport hashlib\nfrom typing import Callable, List, Any\nfrom unittest.mock import Mock\n\nEvilNodeMaker = Callable[[List[Block]], Mock]\nKeyFactory = Callable[[], PublicKey]\n\n\ndef test_wallet_functionality_at_init(alice: Node) -> None:\n assert alice.get_address()\n assert alice.get_balance() == 0\n assert alice.create_transaction(alice.get_address()) is None\n\n\ndef test_node_functionality_at_init(alice: Node) -> None:\n assert alice.get_utxo() == []\n assert alice.get_latest_hash() == GENESIS_BLOCK_PREV\n assert alice.get_mempool() == []\n\n\ndef test_mine_single_block_and_generate_coin(alice: Node) -> None:\n block_hash = alice.mine_block()\n assert block_hash != GENESIS_BLOCK_PREV\n assert alice.get_latest_hash() == block_hash\n assert len(alice.get_utxo()) == 1\n assert alice.get_mempool() == []\n assert alice.get_balance() == 1\n\n block = alice.get_block(block_hash)\n assert block.get_block_hash() == block_hash\n assert block.get_prev_block_hash() == GENESIS_BLOCK_PREV\n transactions = block.get_transactions()\n assert transactions[0] == alice.get_utxo()[0]\n assert transactions[0].input is None\n assert transactions[0].output == alice.get_address()\n\n\ndef test_retreive_block_fails_on_junk_hash(alice: Node) -> None:\n with pytest.raises(ValueError):\n alice.get_block(GENESIS_BLOCK_PREV)\n bogus_hash = BlockHash(hashlib.sha256(b\"no_such_block\").digest())\n with pytest.raises(ValueError):\n alice.get_block(bogus_hash)\n h = alice.mine_block()\n with pytest.raises(ValueError):\n alice.get_block(bogus_hash)\n assert alice.get_block(h)\n\n\ndef test_transaction_creation(alice: Node, bob: Node, charlie: Node) -> None:\n alice.mine_block()\n assert alice.get_balance() == 1\n tx = alice.create_transaction(bob.get_address())\n assert tx is not None\n assert tx.input == alice.get_utxo()[0].get_txid()\n assert tx.output == bob.get_address()\n assert bob.get_balance() == 0\n assert charlie.get_balance() == 0\n\n\ndef test_node_updates_when_notified(alice: Node, evil_node_maker: EvilNodeMaker,\n make_key: KeyFactory) -> None:\n block1 = Block(GENESIS_BLOCK_PREV, [Transaction(make_key(), None, Signature(secrets.token_bytes(48)))])\n block_chain = [block1]\n eve = evil_node_maker(block_chain)\n alice.notify_of_block(eve.get_latest_hash(), eve)\n assert alice.get_latest_hash() != GENESIS_BLOCK_PREV\n\n\ndef test_node_updates_when_notified_two_blocks(alice: Node, evil_node_maker: EvilNodeMaker,\n make_key: KeyFactory) -> None:\n tx1 = Transaction(make_key(), None, Signature(secrets.token_bytes(48)))\n block1 = Block(GENESIS_BLOCK_PREV, [tx1])\n tx2 = Transaction(make_key(), None, Signature(secrets.token_bytes(48)))\n block2 = Block(block1.get_block_hash(), [tx2])\n\n block_chain = [block1, block2]\n eve = evil_node_maker(block_chain)\n alice.notify_of_block(eve.get_latest_hash(), eve)\n assert alice.get_latest_hash() == block2.get_block_hash()\n assert tx1 in alice.get_utxo()\n assert tx2 in alice.get_utxo()\n assert len(alice.get_utxo()) == 2\n assert alice.get_block(block1.get_block_hash()) == block1\n assert alice.get_block(block2.get_block_hash()) == block2\n\n\ndef test_node_does_not_update_when_alternate_chain_does_not_lead_to_genesis(alice: Node, evil_node_maker: EvilNodeMaker,\n make_key: KeyFactory) -> None:\n block1 = Block(BlockHash(hashlib.sha256(b\"Not Genesis\").digest()),\n [Transaction(make_key(), None, Signature(secrets.token_bytes(48)))])\n block2 = Block(block1.get_block_hash(), [Transaction(make_key(), None, Signature(secrets.token_bytes(48)))])\n block3 = Block(block2.get_block_hash(), [Transaction(make_key(), None, Signature(secrets.token_bytes(48)))])\n\n evil_node = evil_node_maker([block1, block2, block3])\n\n alice.notify_of_block(block3.get_block_hash(), evil_node)\n assert alice.get_latest_hash() == GENESIS_BLOCK_PREV\n\n\ndef test_node_does_not_fully_update_when_some_transaction_is_bad(alice: Node, bob: Node, evil_node_maker: EvilNodeMaker,\n make_key: KeyFactory) -> None:\n bob.mine_block()\n tx0 = bob.create_transaction(alice.get_address())\n assert tx0 is not None\n\n tx1 = Transaction(make_key(), None, Signature(secrets.token_bytes(48)))\n block1 = Block(GENESIS_BLOCK_PREV, [tx1])\n tx2 = Transaction(make_key(), None, Signature(secrets.token_bytes(48)))\n tx3 = Transaction(make_key(), tx1.get_txid(), tx0.signature) # the sig here is wrong!\n\n block2 = Block(block1.get_block_hash(), [tx2, tx3])\n mock_node = evil_node_maker([block1, block2])\n alice.notify_of_block(mock_node.get_latest_hash(), mock_node)\n assert alice.get_latest_hash() == block1.get_block_hash()\n\n\ndef test_node_does_not_update_when_creating_too_much_money(alice: Node, evil_node_maker: EvilNodeMaker,\n make_key: KeyFactory) -> None:\n tx1 = Transaction(make_key(), None, Signature(secrets.token_bytes(48)))\n tx2 = Transaction(make_key(), None, Signature(secrets.token_bytes(48)))\n block = Block(GENESIS_BLOCK_PREV, [tx1, tx2])\n mock_node = evil_node_maker([block])\n alice.notify_of_block(mock_node.get_latest_hash(), mock_node)\n assert alice.get_latest_hash() == GENESIS_BLOCK_PREV\n assert alice.get_utxo() == []\n\n\ndef test_node_double_spends_when_mempool_clears(alice: Node, bob: Node) -> None:\n alice.mine_block()\n tx1 = alice.create_transaction(bob.get_address())\n assert tx1 is not None\n tx2 = alice.create_transaction(bob.get_address())\n assert tx2 is None\n alice.clear_mempool()\n assert alice.get_mempool() == []\n tx3 = alice.create_transaction(bob.get_address())\n assert tx3 is not None\n\n\ndef test_transactions_to_different_targets_are_different(alice: Node, bob: Node, charlie: Node) -> None:\n alice.mine_block()\n tx1 = alice.create_transaction(bob.get_address())\n alice.clear_mempool()\n tx2 = alice.create_transaction(charlie.get_address())\n assert tx1 is not None and tx2 is not None\n assert tx1.get_txid() != tx2.get_txid()\n\n\ndef test_transaction_rejected_if_we_change_output(alice: Node, bob: Node, charlie: Node) -> None:\n alice.mine_block()\n tx = alice.create_transaction(bob.get_address())\n assert tx is not None\n tx2 = Transaction(charlie.get_address(), tx.input, tx.signature)\n alice.clear_mempool()\n assert alice.add_transaction_to_mempool(tx)\n alice.clear_mempool()\n assert not alice.add_transaction_to_mempool(tx2)\n\n\ndef test_transaction_not_propagated_if_it_double_spends_a_mempool_tx(alice: Node, bob: Node, charlie: Node) -> None:\n alice.connect(bob)\n alice.mine_block()\n tx1 = alice.create_transaction(bob.get_address())\n assert tx1 is not None\n alice.clear_mempool()\n assert tx1 in bob.get_mempool()\n bob.connect(charlie)\n tx2 = alice.create_transaction(charlie.get_address())\n assert tx2 is not None\n assert tx2 in alice.get_mempool()\n assert tx2 not in bob.get_mempool()\n assert tx2 not in charlie.get_mempool()\n\n\ndef test_connections_exist(alice: Node, bob: Node, charlie: Node) -> None:\n assert alice.get_connections() == set()\n alice.connect(bob)\n assert bob in alice.get_connections()\n assert alice in bob.get_connections()\n\n bob.connect(charlie)\n bob.disconnect_from(alice)\n assert bob not in alice.get_connections()\n assert alice not in bob.get_connections()\n assert charlie in bob.get_connections()\n\n\ndef test_connect_to_self_fails(alice: Node) -> None:\n with pytest.raises(Exception):\n alice.connect(alice)\n\n\ndef test_connections_propagate_blocks(alice: Node, bob: Node, charlie: Node) -> None:\n alice.connect(bob)\n alice.mine_block()\n assert len(bob.get_utxo()) == 1\n assert alice.get_latest_hash() == bob.get_latest_hash()\n assert charlie.get_latest_hash() == GENESIS_BLOCK_PREV\n\n\ndef test_connections_propagate_txs(alice: Node, bob: Node, charlie: Node) -> None:\n alice.connect(bob)\n alice.mine_block()\n\n tx = alice.create_transaction(bob.get_address())\n assert tx in bob.get_mempool()\n assert tx not in charlie.get_mempool()\n\n\ndef test_block_hash(alice: Node) -> None:\n block_hash1 = alice.mine_block()\n block1 = alice.get_block(block_hash1)\n assert block1.get_block_hash() == block_hash1\n\n transactions = block1.get_transactions()\n prev = block1.get_prev_block_hash()\n bogus_hash = BlockHash(hashlib.sha256(b\"no_such_block\").digest())\n block2 = Block(bogus_hash, transactions)\n block3 = Block(prev, transactions * 2)\n block4 = Block(prev, [])\n assert block2.get_block_hash() != block_hash1\n assert block3.get_block_hash() != block_hash1\n assert block4.get_block_hash() != block_hash1\n\n\ndef test_catching_up_after_disconnect(alice: Node, bob: Node) -> None:\n alice.connect(bob)\n alice.mine_block()\n alice.disconnect_from(bob)\n h2 = alice.mine_block()\n alice.connect(bob)\n assert bob.get_latest_hash() == h2\n\n\ndef test_longer_chain_overtake(alice: Node, bob: Node) -> None:\n h1 = alice.mine_block()\n h2 = alice.mine_block()\n bob.mine_block()\n alice.connect(bob)\n assert bob.get_latest_hash() == h2\n assert bob.get_block(h2).get_prev_block_hash() == h1\n assert bob.get_block(h1).get_prev_block_hash() == GENESIS_BLOCK_PREV\n assert set(bob.get_utxo()) == set(alice.get_utxo())\n\n\ndef test_tx_surives_in_mempool_if_not_included_in_block(alice: Node, bob: Node) -> None:\n alice.connect(bob)\n bob.mine_block()\n bob.create_transaction(alice.get_address())\n bob.disconnect_from(alice)\n\n alice.clear_mempool()\n block_hash = alice.mine_block()\n bob.connect(alice)\n assert bob.get_latest_hash() == block_hash\n assert len(bob.get_mempool()) == 1\n\n\ndef test_tx_replaced_in_blockchain_when_double_spent(alice: Node, bob: Node, charlie: Node) -> None:\n alice.connect(bob)\n alice.connect(charlie)\n alice.mine_block()\n alice.disconnect_from(charlie)\n tx1 = alice.create_transaction(bob.get_address())\n alice.mine_block()\n alice.disconnect_from(bob)\n\n assert tx1 in bob.get_utxo()\n assert tx1 in alice.get_utxo()\n\n charlie.mine_block()\n charlie.mine_block()\n\n alice.connect(charlie)\n alice.clear_mempool() # in case you restore transactions to mempool\n assert tx1 not in alice.get_utxo()\n assert tx1 not in alice.get_mempool()\n tx2 = alice.create_transaction(charlie.get_address())\n assert tx2 is not None\n assert tx2 in alice.get_mempool()\n alice.mine_block()\n alice.mine_block()\n assert tx2 in alice.get_utxo()\n alice.connect(bob)\n assert tx2 in bob.get_utxo()\n assert tx1 not in bob.get_utxo()\n assert tx1 not in bob.get_mempool()\n\n\ndef test_bob_serves_wrong_block(alice: Node, bob: Node, charlie: Node, monkeypatch: Any) -> None:\n # we ask charlie to create a block\n h1 = charlie.mine_block()\n block = charlie.get_block(h1)\n\n h2 = bob.mine_block()\n\n # now we make bob respond to block requests with charlie's block\n monkeypatch.setattr(bob, \"get_block\", lambda block_hash: block)\n\n alice.connect(bob)\n assert alice.get_latest_hash() == GENESIS_BLOCK_PREV\n assert alice.get_utxo() == []\n","sub_path":"exercise2/ex2_original_files/test_ex2_basic.py","file_name":"test_ex2_basic.py","file_ext":"py","file_size_in_byte":11461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"447378290","text":"#!/usr/bin/env python\nimport logging\n\nimport connexion\nfrom connexion.resolver import MethodViewResolver\n\nlogging.basicConfig(level=logging.INFO)\n\nif __name__ == '__main__':\n app = connexion.FlaskApp(__name__, specification_dir='openapi/', debug=True)\n\n options = {\"swagger_ui\": True}\n app.add_api('pets-api.yaml',\n options=options,\n arguments={'title': 'MethodViewResolver Example'},\n resolver=MethodViewResolver('api'), strict_validation=True, validate_responses=True )\n app.run(port=9090)\n","sub_path":"examples/openapi3/methodresolver/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"114156851","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n\"\"\"\r\nimport BaiduMap.addrGet as addrget\r\n#import headItem.collectBuild as colBuild\r\nfrom headItem.collectBuild import collectbuild\r\nfrom headItem.headleClass import headleClass\r\n\r\n#第一部分 --> 调用百度API查询地址\r\naddrGet=addrget.Baidu();\r\ncityname='佛山市'\r\ncity='fs'\r\ndb='address'\r\nbatch='1'\r\ndistance='2000'\r\ntablename='b_building_zong_real' #固定形式\r\npath ='E:\\str2num.txt' #构建数字字典需要读取的内容\r\n#path = '/Users/yaochunling/Downloads/work/chiese2num.txt'\r\n#将所有的数据导入BD_info表格中\r\naddrGet.get_BD(db,batch,distance,tablename,cityname)\r\n\r\n#第二部分 --> 数据整理\r\n\r\n#根据批次和城市,将所有数据通过去重之后导入ALL_building表格中\r\n\r\ncollectbuild( db ).building_clarify( db, batch, path,cityname)\r\n\r\n#sss = colBuild.collectbuild( db )\r\n\r\n#sss.building_clarify(db,batch,path)\r\n\r\n\r\n#第三部分数据处理 --> id匹配\r\n# 初始化数据\r\n#headleClass(db).initial_data( db,'c_all_building',batch,cityname );\r\n\r\n# 执行分类逻辑\r\nid_tablename='c_building_id'\r\ntablename1='c_all_building'\r\n\r\nheadleClass(db).area_classify(db,tablename1,id_tablename,city,batch ,cityname);\r\n","sub_path":"main_3.py","file_name":"main_3.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"410958592","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 28 18:26:15 2019\n\n@author: 683898\n\"\"\"\n\nclass itemImage:\n \n def __init__(self,index=0,className=None,ImagePath=None,MaskPath=None,\n Height=0,Weidth=0,Xmin=0,Ymin=0,Xmax=0,Ymax=0):\n self.id=index\n self.className=className\n self.ImagePath=ImagePath\n self.MaskPath=MaskPath\n self.Height=Height\n self.Weidth=Weidth\n self.Xmin=Xmin\n self.Ymin=Ymin\n self.Xmax=Xmax\n self.Ymax=Ymax\n \n @property\n def returnData(self):\n return {\n \"id\": self.id,\n \"className\": self.className,\n \"ImagePath\": self.ImagePath,\n \"MaskPath\" : self.MaskPath,\n \"Height\" : self.Height,\n \"Weidth\" : self.Weidth,\n \"Xmin\" : self.Xmin,\n \"Ymin\" : self.Ymin,\n \"Xmax\" : self.Xmax,\n \"Ymax\" : self.Ymax\n }\n \n ","sub_path":"utility/itemImage.py","file_name":"itemImage.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"640137416","text":"import json\r\nimport pandas as pd\r\n\r\n\r\ndef load_session_json_data(filename):\r\n df = pd.read_json(filename, orient='rows')\r\n return df\r\n\r\n\r\nsessions = load_session_json_data('test3.json')\r\n# print(sessions.head(10))\r\nlookup = input(\"Add page: \")\r\nx = sessions[\"Page\"] == lookup\r\nprint(sessions[x])\r\n# data = sessions[lookup].to_json()\r\n\r\n\r\n# lookup = sessions.sessionID == session_id\r\n\r\n# print(len(sessions))\r\n\r\n\"\"\"\r\ncnt = len(csv[csv['Age'] == 22])\r\nprint(cnt) # outputs number of rows where age is 22\r\n\r\ncnt = len(csv[(csv['Age'] == 22) & (csv['Sex'] == 'female')])\r\nprint(cnt) # outputs number of rows where age is 22 and sex is female\r\n\r\ncnt = len(csv[(csv['Age'] < 10) | (csv['Age'] > 30)])\r\nprint(cnt) # outputs number of rows where age is less than 10 or greater than 30\r\n\r\nlookup = sessions.sessionID == session_id\r\ndata = sessions[lookup].to_json()\r\nreturn data\"\"\"\r\n\r\n\r\n","sub_path":"3-APIs/SessExplore/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"565155551","text":"import copy\nimport random\n# Consider using the modules imported above.\n\nclass Hat:\n def __init__(self,**kwargs):\n temp = \"\"\n for color,total in kwargs.items():\n temp += (color + \" \")*total\n \n self.contents = temp.strip().split(\" \")\n\n def draw(self, num_balls):\n if num_balls > len(self.contents):\n return self.contents\n randomChoice = list()\n for i in range(num_balls):\n deleteTheBall = random.choice(self.contents)\n self.contents.remove(deleteTheBall)\n randomChoice.append(deleteTheBall)\n \n return randomChoice\n \n \n\n \ndef experiment(hat, expected_balls, num_balls_drawn, num_experiments):\n solve = 0\n for i in range(num_experiments):\n cp_hat = copy.deepcopy(hat)\n ans = []\n luck = cp_hat.draw(num_balls_drawn)\n\n for color,tot in expected_balls.items():\n if(luck.count(color) >= tot):\n ans.append(True)\n else:\n ans.append(False)\n\n if False not in ans:\n solve +=1\n\n return solve / num_experiments\n\n ","sub_path":"ScientificComputingPython/Project5_probability-calculator/prob_calculator.py","file_name":"prob_calculator.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"553806700","text":"def two_sum(nums, target):\n \"\"\"\n 给定一个整数数组 nums 和一个目标值 target,\n 请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。\n 你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。\n Difficulty: easy\n :param nums: 给定的数组\n :param target: 目标值\n :return:\n \"\"\"\n # 利用哈希表\n # a_dict = {} # 定义一个字典\n # for i, num in enumerate(nums): # 把给定的数组,用enumerate函数提取出值和下标\n # a_dict[num] = i\n # for j, num in enumerate(nums): # 因为不能重复使用同一个值,所以必须再用一个enumerate\n # if (target-num) in a_dict and j != a_dict[target-num]: # 对其进行判断\n # return [j, a_dict[target-num]] # 返回目标结果的下标\n \"\"\"\n analyse: time complexity is O(2n), space complexity is O(n)\n \"\"\"\n\n # 哈希表改进版\n mapping = {}\n for i in range(len(nums)): # 改进核心思路:一边添加一边查找\n num = nums[i] # 将当前遍历到的下标的值取出保存\n if (target - num) in mapping: # 如果存在,则直接返回,由于当前num还没入字典,所以不会重复。\n return mapping[target-num], i # 返回结果值的下标\n mapping[num] = i # 如果当前没有找到,则将当前num加入字典\n \"\"\"\n analyse: time complexity is O(n), space complexity is O(n). 改进后,代码更短,时间复杂度更好。\n 一旦找到,就提前返回。\n \"\"\"\n\n # double pointer\n new_nums = nums[:] # 浅拷贝,为了保留原数组的下标\n new_nums.sort() # 对新数组进行排序\n left = 0\n right = len(new_nums) - 1 # 定义left和right指针分别指向新数组的开头和结尾\n while left < right: # 当left和right不重叠时,不断循环\n if new_nums[left] + new_nums[right] == target: # 当前后两个相加等于目标时\n return nums.index(new_nums[left]), nums.index(new_nums[right])\n elif new_nums[left] + new_nums[right] < target:\n left += 1 # 当前后两个相加小于目标值时,left += 1,右移\n continue\n else: # 当前后两个相加大于目标值时,right -= 1,左移\n right -= 1\n \"\"\"\n analyse: time complexity is O(n^2), space complexity is O(n). 时间复杂度太大,还是用哈希表比较好。\n 但是其中的双指针思想很有用,排序,相加,比较大小,太大,则右边变小,太小,则左边变大。\n \"\"\"\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"array/04-两数之和.py","file_name":"04-两数之和.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"328168895","text":"import time\nfrom os import makedirs, path\nfrom pathlib import Path\n\nfrom keras.optimizers import Adagrad\nfrom numpy import *\n\nfrom corpus_provider import CorpusProvider\nfrom net import Wav2Letter\nfrom spectrogram_batch import LabeledSpectrogramBatchGenerator\n\nbase_directory = Path(path.expanduser('~')) / \"speechless-data\"\nbase_spectrogram_directory = base_directory / \"spectrograms\"\n\n# not Path.mkdir() for compatibility with Python 3.4\nmakedirs(str(base_spectrogram_directory), exist_ok=True)\ntensorboard_log_base_directory = base_directory / \"logs\"\nnets_base_directory = base_directory / \"nets\"\n\n\ndef timestamp() -> str:\n return time.strftime(\"%Y%m%d-%H%M%S\")\n\n\ncorpus = CorpusProvider(base_directory / \"corpus\")\n\n\ndef first_20_examples_sorted_by_length():\n labels = [\n \"thank you dorcas dear\",\n \"yes rachel i do love you\",\n \"dorcas in her strange way was moved\",\n \"you have been so ill my poor rachel\",\n \"thank you rachel my cousin rachel my only friend\",\n \"well she was better though she had had a bad night\",\n \"i like you still rachel i'm sure i'll always like you\",\n \"ill and troubled dear troubled in mind and miserably nervous\",\n \"you resemble me rachel you are fearless and inflexible and generous\",\n \"this transient spring and lighting up are beautiful a glamour beguiling our senses\",\n \"and she threw her arms round her cousin's neck and brave rachel at last burst into tears\",\n \"i have very few to love me now and i thought you might love me as i have begun to love you\",\n \"it is an antipathy an antipathy i cannot get over dear dorcas you may think it a madness but don't blame me\",\n \"yes something everything said rachel hurriedly looking frowningly at a flower which she was twirling in her fingers\",\n \"women can hide their pain better than we men and bear it better too except when shame drops fire into the dreadful chalice\",\n \"and the wan oracle having spoken she sate down in the same sort of abstraction again beside dorcas and she looked full in her cousin's eyes\",\n \"but poor rachel lake had more than that stoical hypocrisy which enables the tortured spirits of her sex to lift a pale face through the flames and smile\",\n \"so there came a step and a little rustling of feminine draperies the small door opened and rachel entered with her hand extended and a pale smile of welcome\",\n \"chelford had a note from mister wylder this morning another note his coming delayed and something of his having to see some person who is abroad continued dorcas after a little pause\",\n \"there was something of sweetness and fondness in her tones and manner which was new to rachel and comforting and she returned the greeting as kindly and felt more like her former self\"]\n\n return sorted([example for example in corpus.examples if example.label.lower() in labels],\n key=lambda x: len(x.label))\n\n\nlabeled_spectrogram_batch_generator = LabeledSpectrogramBatchGenerator(\n examples=corpus.examples[:int(len(corpus.examples) * .95)],\n spectrogram_cache_directory=base_directory / \"spectrogram-cache\" / \"mel\")\n\n\ndef train_wav2letter() -> None:\n wav2letter = Wav2Letter(input_size_per_time_step=labeled_spectrogram_batch_generator.input_size_per_time_step(),\n optimizer=Adagrad(lr=1e-3))\n name = timestamp() + \"-adagrad-complete-95\"\n\n wav2letter.train(labeled_spectrogram_batch_generator.training_batches(),\n tensor_board_log_directory=tensorboard_log_base_directory / name,\n net_directory=nets_base_directory / name,\n test_labeled_spectrogram_batch=labeled_spectrogram_batch_generator.test_batch(),\n samples_per_epoch=labeled_spectrogram_batch_generator.batch_size * 100)\n\n\ntrain_wav2letter()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"636315320","text":"from dataclasses import replace\nfrom typing import Any, Iterator\nfrom unittest.mock import patch\n\nimport pytest\n\nimport black\nfrom tests.util import (\n DEFAULT_MODE,\n PY36_VERSIONS,\n THIS_DIR,\n assert_format,\n dump_to_stderr,\n read_data,\n)\n\nSIMPLE_CASES = [\n \"beginning_backslash\",\n \"bracketmatch\",\n \"class_blank_parentheses\",\n \"class_methods_new_line\",\n \"collections\",\n \"comments\",\n \"comments2\",\n \"comments3\",\n \"comments4\",\n \"comments5\",\n \"comments6\",\n \"comments_non_breaking_space\",\n \"comment_after_escaped_newline\",\n \"composition\",\n \"composition_no_trailing_comma\",\n \"docstring\",\n \"empty_lines\",\n \"expression\",\n \"fmtonoff\",\n \"fmtonoff2\",\n \"fmtonoff3\",\n \"fmtonoff4\",\n \"fmtskip\",\n \"fmtskip2\",\n \"fmtskip3\",\n \"fmtskip4\",\n \"fmtskip5\",\n \"fmtskip6\",\n \"fstring\",\n \"function\",\n \"function2\",\n \"function_trailing_comma\",\n \"import_spacing\",\n \"remove_parens\",\n \"slices\",\n \"string_prefixes\",\n \"tricky_unicode_symbols\",\n \"tupleassign\",\n]\n\nSIMPLE_CASES_PY2 = [\n \"numeric_literals_py2\",\n \"python2\",\n \"python2_unicode_literals\",\n]\n\nEXPERIMENTAL_STRING_PROCESSING_CASES = [\n \"cantfit\",\n \"comments7\",\n \"long_strings\",\n \"long_strings__edge_case\",\n \"long_strings__regression\",\n \"percent_precedence\",\n]\n\nPY310_CASES = [\n \"pattern_matching_simple\",\n \"pattern_matching_complex\",\n \"pattern_matching_extras\",\n \"parenthesized_context_managers\",\n]\n\nSOURCES = [\n \"src/black/__init__.py\",\n \"src/black/__main__.py\",\n \"src/black/brackets.py\",\n \"src/black/cache.py\",\n \"src/black/comments.py\",\n \"src/black/concurrency.py\",\n \"src/black/const.py\",\n \"src/black/debug.py\",\n \"src/black/files.py\",\n \"src/black/linegen.py\",\n \"src/black/lines.py\",\n \"src/black/mode.py\",\n \"src/black/nodes.py\",\n \"src/black/numerics.py\",\n \"src/black/output.py\",\n \"src/black/parsing.py\",\n \"src/black/report.py\",\n \"src/black/rusty.py\",\n \"src/black/strings.py\",\n \"src/black/trans.py\",\n \"src/blackd/__init__.py\",\n \"src/black_primer/cli.py\",\n \"src/black_primer/lib.py\",\n \"src/blib2to3/pygram.py\",\n \"src/blib2to3/pytree.py\",\n \"src/blib2to3/pgen2/conv.py\",\n \"src/blib2to3/pgen2/driver.py\",\n \"src/blib2to3/pgen2/grammar.py\",\n \"src/blib2to3/pgen2/literals.py\",\n \"src/blib2to3/pgen2/parse.py\",\n \"src/blib2to3/pgen2/pgen.py\",\n \"src/blib2to3/pgen2/tokenize.py\",\n \"src/blib2to3/pgen2/token.py\",\n \"setup.py\",\n \"tests/test_black.py\",\n \"tests/test_blackd.py\",\n \"tests/test_format.py\",\n \"tests/test_primer.py\",\n \"tests/optional.py\",\n \"tests/util.py\",\n \"tests/conftest.py\",\n]\n\n\n@pytest.fixture(autouse=True)\ndef patch_dump_to_file(request: Any) -> Iterator[None]:\n with patch(\"black.dump_to_file\", dump_to_stderr):\n yield\n\n\ndef check_file(filename: str, mode: black.Mode, *, data: bool = True) -> None:\n source, expected = read_data(filename, data=data)\n assert_format(source, expected, mode, fast=False)\n\n\n@pytest.mark.parametrize(\"filename\", SIMPLE_CASES_PY2)\n@pytest.mark.python2\ndef test_simple_format_py2(filename: str) -> None:\n check_file(filename, DEFAULT_MODE)\n\n\n@pytest.mark.parametrize(\"filename\", SIMPLE_CASES)\ndef test_simple_format(filename: str) -> None:\n check_file(filename, DEFAULT_MODE)\n\n\n@pytest.mark.parametrize(\"filename\", EXPERIMENTAL_STRING_PROCESSING_CASES)\ndef test_experimental_format(filename: str) -> None:\n check_file(filename, black.Mode(experimental_string_processing=True))\n\n\n@pytest.mark.parametrize(\"filename\", SOURCES)\ndef test_source_is_formatted(filename: str) -> None:\n path = THIS_DIR.parent / filename\n check_file(str(path), DEFAULT_MODE, data=False)\n\n\n# =============== #\n# Complex cases\n# ============= #\n\n\ndef test_empty() -> None:\n source = expected = \"\"\n assert_format(source, expected)\n\n\ndef test_pep_572() -> None:\n source, expected = read_data(\"pep_572\")\n assert_format(source, expected, minimum_version=(3, 8))\n\n\ndef test_long_first_line() -> None:\n source, expected = read_data(\"long_first_line\")\n assert_format(source, expected, mode=black.Mode(use_tabs=True))\n\n\ndef test_docstring_tabs() -> None:\n source, expected = read_data(\"docstring_tabs\")\n assert_format(source, expected, mode=black.Mode(use_tabs=True))\n\n\ndef test_pep_572_remove_parens() -> None:\n source, expected = read_data(\"pep_572_remove_parens\")\n assert_format(source, expected, minimum_version=(3, 8))\n\n\ndef test_pep_572_do_not_remove_parens() -> None:\n source, expected = read_data(\"pep_572_do_not_remove_parens\")\n # the AST safety checks will fail, but that's expected, just make sure no\n # parentheses are touched\n assert_format(source, expected, fast=True)\n\n\n@pytest.mark.parametrize(\"major, minor\", [(3, 9), (3, 10)])\ndef test_pep_572_newer_syntax(major: int, minor: int) -> None:\n source, expected = read_data(f\"pep_572_py{major}{minor}\")\n assert_format(source, expected, minimum_version=(major, minor))\n\n\ndef test_pep_570() -> None:\n source, expected = read_data(\"pep_570\")\n assert_format(source, expected, minimum_version=(3, 8))\n\n\n@pytest.mark.parametrize(\"filename\", PY310_CASES)\ndef test_python_310(filename: str) -> None:\n source, expected = read_data(filename)\n mode = black.Mode(target_versions={black.TargetVersion.PY310})\n assert_format(source, expected, mode, minimum_version=(3, 10))\n\n\ndef test_docstring_no_string_normalization() -> None:\n \"\"\"Like test_docstring but with string normalization off.\"\"\"\n source, expected = read_data(\"docstring_no_string_normalization\")\n mode = replace(DEFAULT_MODE, string_normalization=False)\n assert_format(source, expected, mode)\n\n\ndef test_long_strings_flag_disabled() -> None:\n \"\"\"Tests for turning off the string processing logic.\"\"\"\n source, expected = read_data(\"long_strings_flag_disabled\")\n mode = replace(DEFAULT_MODE, experimental_string_processing=False)\n assert_format(source, expected, mode)\n\n\ndef test_numeric_literals() -> None:\n source, expected = read_data(\"numeric_literals\")\n mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)\n assert_format(source, expected, mode)\n\n\ndef test_numeric_literals_ignoring_underscores() -> None:\n source, expected = read_data(\"numeric_literals_skip_underscores\")\n mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)\n assert_format(source, expected, mode)\n\n\n@pytest.mark.python2\ndef test_python2_print_function() -> None:\n source, expected = read_data(\"python2_print_function\")\n mode = replace(DEFAULT_MODE, target_versions={black.TargetVersion.PY27})\n assert_format(source, expected, mode)\n\n\ndef test_stub() -> None:\n mode = replace(DEFAULT_MODE, is_pyi=True)\n source, expected = read_data(\"stub.pyi\")\n assert_format(source, expected, mode)\n\n\ndef test_python38() -> None:\n source, expected = read_data(\"python38\")\n assert_format(source, expected, minimum_version=(3, 8))\n\n\ndef test_python39() -> None:\n source, expected = read_data(\"python39\")\n assert_format(source, expected, minimum_version=(3, 9))\n","sub_path":"tests/test_format.py","file_name":"test_format.py","file_ext":"py","file_size_in_byte":7120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"394389376","text":"from socket import *\n\nserver_port = 12000\nserver_socket = socket(AddressFamily.AF_INET, SocketKind.SOCK_DGRAM)\nserver_socket.bind(('', server_port))\nprint('THe server is ready to receive')\n\nwhile 1:\n message, client_address = server_socket.recvfrom(2048)\n modified_message = message.upper()\n server_socket.sendto(modified_message, client_address)\n\n","sub_path":"UDPServer.py","file_name":"UDPServer.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"508773086","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n异或运算\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# 1. 加载数据\n\nx_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)\ny_data = np.array([[0], [1], [1], [0]], dtype=np.float32)\n\n# 2. 构建结构\n\n# 2输入节点,2隐藏节点,1输出节点\nx_in = tf.placeholder(tf.float32, [None, 2]) # 站位,定义输入层两个节点\ny_in = tf.placeholder(tf.float32, [None, 1]) # 站位,定义输出层一个节点\n\n# 隐藏层,\nb_h1 = tf.Variable(tf.zeros([2])) # 2表示本层有两个节点\nW_h1 = tf.Variable(tf.random_uniform([2, 2], -1.0, 1.0)) # [2, 2] 表示上层有2个节点,本层有2个节点\ny_h1 = tf.matmul(x_in, W_h1) + b_h1 # 注意x_in与W_h1的顺序,x_in为上层的输出,也就是输入层\nx_h1 = tf.sigmoid(y_h1) # 激活函数tf.sigmoid\n\n# 输出层\nb = tf.Variable(tf.zeros([1])) # 1表示下层有1个节点(下层其实没有,也定义为1)\nW = tf.Variable(tf.random_uniform([2, 1], -1.0, 1.0)) # [2, 1] 表示上层有2个节点,本层有1个节点\ny = tf.matmul(x_h1, W) + b # 注意x_h1与W的顺序,x_h1为上层的输出,对上层输出加权求和,然后加上阈值\nhypothesis = tf.sigmoid(y) # 激活函数tf.sigmoid(双弯曲函数) hypothesis为整个网络的输出,也就是预测值\n\n# 训练目标\n# loss函数 lost(y',y)=-(y*log(y')-(1-y)*log(1-y')) y为真实值,y'为预测值\nloss = tf.reduce_mean(((y_in * tf.log(hypothesis)) + ((1 - y_in) * tf.log(1.0 - hypothesis))) * -1)\noptimizer = tf.train.GradientDescentOptimizer(0.01) # 梯度下降优化器,学习率0.01,每次调整w与b的幅度\ntrain = optimizer.minimize(loss)\n\n# 3. 训练\n\n# 初始化所有变量\ninit = tf.global_variables_initializer()\n# 启动图 (graph)\nsession = tf.Session()\nsession.run(init)\n\n# 训���\nfor step in range(0, 100000):\n session.run(train, feed_dict={x_in: x_data, y_in: y_data}) # 每次训练为计算train,这样就运行了整个网络\n if step % 1000 == 0:\n # 查看值\n print('step ', step)\n print('Hypothesis ', session.run(hypothesis, feed_dict={x_in: x_data, y_in: y_data}))\n print('W_h1 ', session.run(W_h1))\n print('b_h1 ', session.run(b_h1))\n print('W ', session.run(W))\n print('b ', session.run(b))\n print('loss ', session.run(loss, feed_dict={x_in: x_data, y_in: y_data}))\n print(\"---------------------------------------------------------------------------------\")\n\n# 4. 预测\nprint(\"预测\")\nprint(session.run(hypothesis, {x_in: x_data}))\n\n# 5. 保存\nsaver = tf.train.Saver()\nsaver.save(session, \"./xor_model/xor.model\")\n\n# 6. 结束\nsession.close()\n","sub_path":"tensorflow_learn/tf_base/tf_helloworld/xor.py","file_name":"xor.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"354827571","text":"######################################################################\n#\n# File: test/unit/v1/test_version_utils.py\n#\n# Copyright 2019 Backblaze Inc. All Rights Reserved.\n#\n# License https://www.backblaze.com/using_b2_code.html\n#\n######################################################################\nfrom __future__ import annotations\n\nimport warnings\n\nfrom b2sdk.v1 import rename_argument, rename_function\n\nfrom ..test_base import TestBase\n\n\nclass TestRenameArgument(TestBase):\n VERSION = '0.1.10'\n\n def test_warning(self):\n @rename_argument('aaa', 'bbb', '0.1.0', '0.2.0', current_version=self.VERSION)\n def easy(bbb):\n \"\"\" easy docstring \"\"\"\n return bbb\n\n # check that warning is not emitted too early\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n assert easy(5) == 5\n assert easy(bbb=5) == 5\n assert easy.__name__ == 'easy'\n assert easy.__doc__ == ' easy docstring '\n assert len(w) == 0\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n assert easy(aaa=5) == 5\n assert len(w) == 1\n assert issubclass(w[-1].category, DeprecationWarning)\n assert str(\n w[-1].message\n ) == \"'aaa' is a deprecated argument for 'easy' function/method - it was renamed to 'bbb' in version 0.1.0. Support for the old name is going to be dropped in 0.2.0.\", str(\n w[-1].message\n )\n\n def test_outdated_replacement(self):\n with self.assertRaises(\n AssertionError,\n msg=\n f\"rename_argument decorator is still used in version {self.VERSION} when old argument name 'aaa' was scheduled to be dropped in 0.1.2. It is time to remove the mapping.\",\n ):\n\n @rename_argument('aaa', 'bbb', '0.1.0', '0.1.2', current_version=self.VERSION)\n def late(bbb):\n return bbb\n\n assert late # make linters happy\n\n def test_future_replacement(self):\n with self.assertRaises(\n AssertionError,\n msg=\n \"rename_argument decorator indicates that the replacement of argument 'aaa' should take place in the future version 0.2.0, while the current version is 0.2.2. It looks like should be _discouraged_ at this point and not _deprecated_ yet. Consider using 'discourage_argument' decorator instead.\"\n ):\n\n @rename_argument('aaa', 'bbb', '0.2.0', '0.2.2', current_version=self.VERSION)\n def early(bbb):\n return bbb\n\n assert early # make linters happy\n\n def test_inverted_versions(self):\n with self.assertRaises(\n AssertionError,\n msg=\n \"rename_argument decorator is set to start renaming argument 'aaa' starting at version 0.2.2 and finishing in 0.2.0. It needs to start at a lower version and finish at a higher version.\"\n ):\n\n @rename_argument('aaa', 'bbb', '0.2.2', '0.2.0', current_version=self.VERSION)\n def backwards(bbb):\n return bbb\n\n assert backwards # make linters happy\n\n\nclass TestRenameFunction(TestBase):\n VERSION = '0.1.10'\n\n def test_rename_function(self):\n def new(bbb):\n return bbb\n\n for i in ('new', new):\n\n @rename_function(i, '0.1.0', '0.2.0', current_version=self.VERSION)\n def old(bbb):\n return bbb\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n assert old(5) == 5\n assert len(w) == 1\n assert issubclass(w[-1].category, DeprecationWarning)\n assert str(\n w[-1].message\n ) == \"'old' is deprecated since version 0.1.0 - it was moved to 'new', please switch to use that. The proxy for the old name is going to be removed in 0.2.0.\", str(\n w[-1].message\n )\n","sub_path":"test/unit/v1/test_version_utils.py","file_name":"test_version_utils.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"409861004","text":"import io\nimport sys\nimport json\nimport unittest\nimport unittest.mock as mock\nfrom gradio import tunneling, networking, Interface\nimport threading\nimport paramiko\nimport os\n\n\nos.environ[\"GRADIO_ANALYTICS_ENABLED\"] = \"False\"\n\n\nclass TestTunneling(unittest.TestCase):\n def test_create_tunnel(self):\n response = networking.url_request(networking.GRADIO_API_SERVER)\n payload = json.loads(response.read().decode(\"utf-8\"))[0]\n io = Interface(lambda x: x, \"text\", \"text\")\n _, path_to_local_server, _ = io.launch(prevent_thread_lock=True, share=False)\n _, localhost, port = path_to_local_server.split(\":\")\n threading.Thread.start = mock.MagicMock(return_value=None)\n paramiko.SSHClient.connect = mock.MagicMock(return_value=None)\n tunneling.create_tunnel(payload, localhost, port)\n threading.Thread.start.assert_called_once()\n paramiko.SSHClient.connect.assert_called_once()\n io.close()\n\n\nclass TestVerbose(unittest.TestCase): \n \"\"\"Not absolutely needed but just including them for the sake of completion.\"\"\" \n \n def setUp(self):\n self.message = \"print test\"\n self.capturedOutput = io.StringIO() # Create StringIO object\n sys.stdout = self.capturedOutput # and redirect stdout.\n\n def test_verbose_debug_true(self):\n tunneling.verbose(self.message, debug_mode=True)\n self.assertEqual(self.capturedOutput.getvalue().strip(), self.message)\n\n def test_verbose_debug_false(self):\n tunneling.verbose(self.message, debug_mode=False)\n self.assertEqual(self.capturedOutput.getvalue().strip(), '')\n\n def tearDown(self):\n sys.stdout = sys.__stdout__\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_tunneling.py","file_name":"test_tunneling.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"570791659","text":"import pandas as p\nimport numpy as n\nimport matplotlib.pyplot as plt\n\nclass KNN:\n \n def __init__(self,k=10):\n self.k=k\n \n def _distance(self,x1,x2):\n return n.sqrt(sum((x1-x2)**2))\n \n def predict(self,x,y,test):\n val=n.array(sorted([(self._distance(x[i],test),y[i]) for i in range(x.shape[0])])[:self.k])\n j=n.unique(val[:,1],return_counts=True)\n index=j[1].argmax()\n return j[0][index].astype(\"int\")\n \n def plotFreq(self,x,y):\n f=x\n f[\"Labels\"]=y\n classes=n.unique(y)\n class_=[]\n for i in classes:\n class_.append(f.loc[f[\"Labels\"]==i].shape[0])\n plt.bar(classes,class_,width=.1)\n for a,b in zip(classes,class_):\n plt.text(a,b,str(b))\n plt.show()\n \nd_x=p.read_csv(\"Diabetes_XTrain.csv\")\nd_y=p.read_csv(\"Diabetes_YTrain.csv\")\nd_x_test=p.read_csv(\"Diabetes_Xtest.csv\")\nk=KNN()\nans=p.DataFrame([k.predict(d_x.values,d_y.values,d_x_test.values[i]) for i in range(d_x_test.shape[0])])\nk.plotFreq(d_x_test,ans)","sub_path":"Ai Mafia Projects and Assignments/KNN Library/Diabetes_classification.py","file_name":"Diabetes_classification.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"165463935","text":"\"\"\"\nproduct.py contains the class product(), which is at the center of productionizing \nyour ML model. The product() class takes an API script and allows you to easily\ncontainerize the API. You can then deploy the container, export the image or call\nthe deployed API.\n\nSlots:\n--------\nwd : string\n Current working directory\ncurrent_status : string\n Current status of the product\npy_version : string\n Python version in use in the current session\nproduct_name : string\n Name of your product to deploy\nproject_name : string\n Name of your project on the workbench\napi_file : string\n Contains the path to the api_file that should be used\nrequirements_file : string\n Contains the path to the requirements_file that should be used\nport : string\n Contains the port the API should be exposed to\nservice_url : string\n Contains the url to reach your service\nlocal : boolean\n If True, the product will be deployed on localhost\n\"\"\"\n\n# import libs\nimport subprocess\nimport os\nimport sys\n\n# setup the class\nclass product:\n\n # describe the class\n def __init__(self, name = None, project = None):\n\n # store working directory\n self.wd = os.getcwd()\n\n # store status of product\n self.current_status = 'initialized'\n\n # check python version\n major_v = str(sys.version_info[0])\n minor_v = str(sys.version_info[1])\n micro_v = str(sys.version_info[2])\n\n # write to slot\n self.py_version = str(major_v + '.' + minor_v + '.' + micro_v)\n\n # check if product name was given\n if name is None:\n\n # give default name\n name = 'my-product'\n \n # store name in self\n self.product_name = name.replace('_', '-').replace('/', '-')\n\n # check if project name was given\n if project is None:\n\n # give default name\n project = 'my-project'\n\n # store project name\n self.project_name = project.replace('_', '-').replace('/', '-')\n\n # store path to api file\n self.api_file = None\n\n # store path to requirements file\n self.requirements_file = None\n\n # store port to deploy\n self.port = None\n\n # store service url\n self.service_url = None\n\n # store if local deployment\n self.local = None\n \n # build report\n report = \"\"\"\n\n Product Report:\n ---------------\n\n This is an automatically generated report on the status of your product \n deployment. productionize considers a product to be a deployable API.\n\n Your Product\n -----------------------\n Name: {product}\n Project: {project}\n Status: {status}\n\n With the product class you can easily deploy your python API, without\n leaving python. You first have to prepare the deployment, which will\n trigger the build of a Dockerfile in your current working directory:\n\n # initialize the class\n your_api = product()\n\n # prepare the deployment\n your_api.prepare_deployment(api_file = \"/path/api.py\",\n requirements_file = \"/path/requirements.txt\",\n port = \"8000\")\n \n Then you can simply deploy your API using your_api.deploy(). I split\n the deployment process in two parts, so that you are able to adjust\n the Dockerfile, if you need to.\n\n If you want to delete the a deployment, you can just use delete_deployment().\n In case you want to update a deployment, just prepare and then deploy again.\n\n \"\"\".format(product = self.product_name,\n project = self.project_name,\n status = self.current_status)\n\n # print report\n print (report)\n\n # helper method to build Dockerfile\n def __build_dockerfile(self):\n\n \"\"\"\n Private method to build a Dockerfile.\n This function first builds a Docker file from a python script and \n a requirements.txt.\n \"\"\"\n\n # try create a dockerfile\n try:\n\n # store dk_file_path\n dk_file_path = str(self.wd + '/Dockerfile')\n\n # open a new file\n subprocess.call(str('touch ' + dk_file_path).split(), stdout=subprocess.DEVNULL)\n\n # write content\n content = \"\"\"\\\n FROM python:{version}\n RUN mkdir -p /api\n COPY {api_file} /api/api.py\n COPY {requirements_file} /api/requirements.txt\n RUN python -m pip install -r /api/requirements.txt\n EXPOSE {port}\n ENTRYPOINT [\"python\", \"api/api.py\"]\\\n \"\"\".format(version=self.py_version,\n api_file = self.api_file,\n requirements_file = self.requirements_file,\n port = int(self.port))\n\n # open Dockerfile\n file = open(dk_file_path, \"w\")\n\n # write content to Dockerfile\n file.write(content)\n\n # close connection\n file.close()\n\n # write file path to self\n self.dk_file_path = dk_file_path\n\n # check if it worked\n if self.dk_file_path is None:\n\n # raise Exception\n raise Exception('I could not create the Dockerfile in your current working directory: ' + self.wd)\n\n # handle exception\n except:\n\n # raise exception\n raise Exception(str('I could not create the Dockerfile in your current working directory: ' + self.wd))\n\n # main function to deploy API\n def prepare_deployment(self, api_file, requirements_file, port):\n\n \"\"\"\n Main method to prepare the deployment.\n\n This function builds a dockerfile out of the script and the require-\n ments file provided. Together with the system python version, the\n container is defined. After this function, the user can take a look\n to the Dockerfile and even adjust it, however, at the users own risk.\n\n Parameters\n ----------\n api_file : string\n String with the path to the python api file\n requirements_file : string\n String with the path to the requirements file\n port : string\n String with the port number to expose\n name : string\n String with the name of your deployment\n \"\"\"\n\n # check the api file\n if isinstance(api_file, str):\n\n # try to read in first lines\n try:\n\n # read in file\n with open(api_file) as file:\n first_line_api = file.readline()\n\n # store to self, but ensure there is no tilde\n self.api_file = api_file.replace('~','')\n\n # exception handling\n except:\n\n # raise Exception\n raise Exception(str('I could not find your file: ' + api_file))\n \n # if it is not a string\n else:\n\n # raise Exception\n raise Exception('api_file arg should be a path anf a filename to your python file that defines the api: e.g. your_folder/your_script.py')\n \n # check the requirements file\n if isinstance(requirements_file, str):\n\n # try to read in first lines\n try:\n\n # read in file\n with open(requirements_file) as file:\n first_line_req = file.readline()\n\n # store to self, but ensure there is no tilde\n self.requirements_file = requirements_file.replace('~', '')\n\n # exception handling\n except:\n\n # raise Exception\n raise Exception(str('I could not find your file: ' + requirements_file))\n \n # if it is not a string\n else:\n\n # raise Exception\n raise Exception('requirements_file arg should be a path anf a filename to your requirements.txt: e.g. your_folder/your_reqs.txt')\n\n # check if port is string\n if isinstance(port, str):\n\n # store to self\n self.port = str(port)\n\n # if it is not a string\n else:\n\n # raise exception\n raise Exception('port arg should be a string with the desired exposing port: e.g. port = \"8000\"')\n \n # build Dockerfile\n self.__build_dockerfile()\n\n # change status\n self.current_status = 'ready to deploy'\n\n # build report\n report = \"\"\"\n\n Deployment Report:\n ------------------\n\n This is an automatically generated report on the preparation of your\n deployment. The Dockerfile was built using the following files:\n\n {api_file}:\n ----------------\n {first_line_api}\n ----------------\n\n and\n\n {requirements_file}:\n ----------------\n {first_line_req}\n ----------------\n\n The Dockerfile is ready to be used. You can inspect your Dockerfile: \n \n {dk_file_path}\n\n You can of course edit the file, however, this is at your own risk. The\n processes in the container currently run as root. You can change that\n of course, as long as your API allows you to.\n\n Your Product\n -----------------------\n Name: {name}\n Project: {project}\n Status: {status}\n\n You can now deploy your product using the deploy() method.\n\n\n \"\"\".format(api_file = self.api_file,\n first_line_api = first_line_api,\n requirements_file = self.requirements_file,\n first_line_req = first_line_req,\n dk_file_path = self.dk_file_path,\n name = self.product_name,\n project = self.project_name,\n status = self.current_status)\n \n # print report\n print (report)\n\n # helper method to create Dockerfile\n def __build_image(self, local):\n \"\"\"\n Private method to build a Docker image.\n\n This function takes the Dockerfile and creates an image on the Minikube\n internal registry.\n\n Parameters\n ----------\n local : boolean\n If True, the image is build locally\n \"\"\"\n\n # try to create the image on the minikube registry\n try:\n \n # make sure this is run from wd\n os.chdir(self.wd)\n\n # check if local build\n if not local:\n \n # build image from Dockerfile\n command = str('eval $(minikube -p minikube docker-env) && docker build -t ' + self.product_name + '-image:latest .')\n os.system(command)\n\n # if local build\n else:\n\n # build image from Dockerfile\n command = str('docker build -t ' + self.product_name + '-image:latest .')\n os.system(command)\n\n # handle exception\n except:\n\n # raise exception\n raise Exception('I could not build the Docker image from the Dockerfile. In case you edited the file, please check if that was correct.')\n\n # helper method to run a deployment\n def __run_deployment(self, local):\n \"\"\"\n Private method to run a deployment.\n\n This function uses kubectl to run a deployment on minikube.\n\n Parameters\n ----------\n local : boolean\n If True, the product is deployed locally instead of on the workbench\n \"\"\"\n\n # try to run deployment\n try:\n \n # check if local deployment\n if not local:\n\n # run deployment\n command = str('kubectl run ' + self.product_name + ' --image=' + self.product_name + \"-image:latest --image-pull-policy='Never' -n \" + self.project_name)\n os.system(command)\n \n # if local true\n else:\n\n # run container\n command = str('docker run -p ' + self.port + ':' + self.port + ' -d --name ' + self.product_name + ' ' + self.product_name + '-image')\n os.system(command)\n\n # handle exception\n except:\n\n # raise exception\n raise Exception('I could not run the deployment from the Docker image, make sure the Dockerfile is working.')\n\n # helper method to expose pod\n def __expose_pod(self):\n\n \"\"\"\n Private method to expose a deployment.\n\n This function exposes the pod that was just deployed.\n \"\"\"\n\n # try to expose the pod\n try:\n\n # expose the pod\n command = str('kubectl expose pod ' + self.product_name + ' --port=' + self.port + ' --type=NodePort -n ' + self.project_name)\n subprocess.call(command.split(), stdout=subprocess.DEVNULL)\n\n # handle exception\n except:\n\n # raise exception\n raise Exception('I could not expose the service, make sure your workbench is setup properly')\n\n # helper method to get the url\n def __get_url(self):\n\n \"\"\"\n Private method to get the url of a deployment.\n\n This function exposes the service on a minikube level an retreives\n the url.\n \"\"\"\n\n # try to expose service\n try:\n\n # expose the service on minikube\n command = str('minikube service ' + self.product_name + ' -n ' + self.project_name + ' --url')\n service_url = subprocess.check_output(command.split())\n\n # decode url\n service_url = str(service_url.decode(\"utf-8\")).replace(\"\\n\", \"\")\n \n # add route warning\n service_url = service_url + str('/')\n\n # write the url to self\n self.service_url = service_url\n\n # handle exception\n except:\n\n # raise exception\n raise Exception('I could not expose the service to your host machine. Make sure the workbench is properly setup.')\n\n # helper function to check if deployment exists\n def __check_pods(self, product, project):\n\n \"\"\"\n Private method to check if a pod exists.\n\n This function checks, if a pod already exists on Minikube.\n\n Parameters\n ----------\n product : string\n String with the name of the product\n project : string\n String with the name of the project\n \"\"\"\n\n # try to check if service exists\n try:\n\n # check for service\n command = str('kubectl get pod ' + product + ' -n' + project)\n exists = subprocess.call(command.split(), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n\n # check result\n if exists == 0:\n\n # return False\n return True\n \n # if not 0, then False\n else:\n\n # return False\n return False\n \n # if it breaks, it doesn't exist\n except:\n\n # return False\n return False\n\n # helper function to check if service exists\n def __check_svcs(self, product, project):\n\n \"\"\"\n Private method to check if a svc exists.\n\n This function checks, if a svc already exists on Minikube.\n\n Parameters\n ----------\n product : string\n String with the name of the product\n project : string\n String with the name of the project\n \"\"\"\n\n # try to check if service exists\n try:\n\n # check for service\n command = str('kubectl get services ' + product + ' -n' + project)\n exists = subprocess.call(command.split(), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n\n # check result\n if exists == 0:\n\n # return False\n return True\n \n # if not 0, then False\n else:\n\n # return False\n return False\n \n # if it breaks, it doesn't exist\n except:\n\n # return False\n return False\n\n # helper function to check if container exists\n def __check_container(self, product):\n\n \"\"\"\n Private method to check if a container exists.\n\n This function checks, if a container already exists on localhost.\n\n Parameters\n ----------\n product : string\n String with the name of the product\n \"\"\"\n\n # try to check if container exists\n try:\n\n # check for service\n command = str('docker container inspect ' + product)\n exists = subprocess.call(command.split(), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n\n # check result\n if exists == 0:\n\n # return False\n return True\n \n # if not 0, then False\n else:\n\n # return False\n return False\n \n # if it breaks, it doesn't exist\n except:\n\n # return False\n return False\n\n # helper function to delete pod\n def __delete_pod(self, product, project):\n\n \"\"\"\n Main method to delete pod.\n\n This function deletes the pods of specific products and all\n Minikube artifacts with it.\n\n Parameters\n ----------\n product : string\n String that gives the name of the product deployment that should be deleted\n project : string\n String that gives the name of the project in which the product should be deleted\n \"\"\"\n\n # try to delete pod\n try:\n\n # delete the pod\n command = str('kubectl delete pod ' + product + ' -n ' + project)\n subprocess.call(command.split(), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n\n # handle exception\n except:\n\n # raise exception\n raise Exception('I could not delete the pod for deployment: ' + product)\n\n # helper function to delete service\n def __delete_services(self, product, project):\n\n \"\"\"\n Main method to delete services.\n\n This function deletes the services of specific products and all\n Minikube artifacts with it.\n\n Parameters\n ----------\n product : string\n String that gives the name of the product deployment that should be deleted\n project : string\n String that gives the name of the project in which the product should be deleted\n \"\"\"\n\n # try to delete service\n try:\n\n # delete the service\n command = str('kubectl delete service ' + product + ' -n ' + project)\n subprocess.call(command.split(), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n\n # handle exception\n except:\n\n # raise exception\n raise Exception('I could not delete the service for deployment: ' + product) \n\n # helper function to delete docker container\n def __delete_container(self, product):\n\n \"\"\"\n Main method to delete container.\n\n This function deletes the a locally running Docker container\n\n Parameters\n ----------\n product : string\n String that gives the name of the product deployment that should be deleted\n \"\"\"\n\n # try to delete a container\n try:\n\n # stop the container\n command = str('docker stop ' + product)\n subprocess.call(command.split(), stdout=subprocess.DEVNULL)\n\n # rm container\n command = str('docker rm ' + product)\n subprocess.call(command.split(), stdout=subprocess.DEVNULL)\n\n # handle exception\n except:\n\n # raise exception\n raise Exception('I could not delete the container for deployment: ' + product)\n\n # main method to delete products\n def delete_deployment(self, product, project):\n\n \"\"\"\n Main method to delete deployments.\n\n This function deletes the deployment of specific products and all\n Minikube artifacts with it.\n\n Parameters\n ----------\n product : string\n String that gives the name of the product deployment that should be deleted\n project : string\n String that gives the name of the project in which the product should be deleted\n \"\"\"\n\n # try to delete the product deployment\n try:\n\n # check if local deployment\n if not self.local:\n\n # check if pod exists\n if self.__check_pods(product = product, project = project):\n\n # delete the pod\n self.__delete_pod(product = product, project = project)\n\n # if it does not exist\n else:\n\n # print message\n print ('There is no pod for your deployment: ' + product)\n\n # check if service exists\n if self.__check_svcs(product = product, project = project):\n\n # delete the pod\n self.__delete_services(product = product, project = project)\n\n # if it does not exist\n else:\n\n # print message\n print ('There is no service for your deployment: ' + product)\n\n # if local\n else:\n\n # check if the container exists\n if self.__check_container(product = product):\n\n # delete the container\n self.__delete_container(product = product)\n\n # if it does not exist\n else:\n\n # print message\n print ('There is no container for your deployment: ' + product)\n\n # handle exception\n except:\n\n # raise exception\n raise Exception(str('I could not delete the deployment of your product: ' + product))\n\n # main method to deploy product\n def deploy(self, local = False):\n\n \"\"\"\n Main method to deploy the product.\n\n This function takes the Dockerfile and deploys it to the workbench. The\n user can also choose to just run the container locally.\n\n Parameters\n ----------\n local : boolean\n if set to True, the product is build locally and not deployed to\n the workbench.\n \"\"\"\n # check if product is already prepared\n if self.dk_file_path is None:\n\n # raise Exception\n raise Exception('You first need to run prepare_deployment() before deploying your product.')\n\n # store local in self\n self.local = local\n\n # check if local build requested\n if not self.local:\n\n # build docker image on Minikube registry\n self.__build_image(local = self.local)\n\n # check if already exists\n pod_exists_already = self.__check_pods(product = self.product_name, project = self.project_name)\n svc_exists_already = self.__check_svcs(product = self.product_name, project = self.project_name)\n\n # if pod already exists delete it\n if pod_exists_already:\n\n # delete product \n self.__delete_pod(product = self.product_name,\n project = self.project_name)\n\n # if service already exists delete it\n if svc_exists_already:\n\n # delete service\n self.__delete_services(product = self.product_name,\n project = self.project_name)\n\n # run deployment\n self.__run_deployment(local = local)\n\n # expose deployment\n self.__expose_pod()\n\n # get url\n self.__get_url()\n\n # change the status\n self.current_status = 'deployed and healthy'\n\n # build report\n report = \"\"\"\n\n Deployment Report:\n ------------------\n\n This is an automatically generated report on the status of your deployed\n product. Your API is now containerized and hosted on the workbench. You\n can access the API using:\n\n {service_url}\n\n You can call the API in whatever way it is designed. If you want to get\n rid of it, just use the delete_deployment() method. If you just want to\n update the API, you can just use prepare_deployment() to create a new\n Dockerfile and then deploy() again.\n\n Your Product\n -----------------------\n Name: {name}\n Project: {project}\n Status: {status}\n Access: {service_url}\n\n You are not forced to stay on your workbench though. You can use\n the push_product() method to push the image of your product to any\n registry you want.\n \"\"\".format(service_url = self.service_url,\n name = self.product_name,\n project = self.project_name,\n status = self.current_status)\n\n # print report\n print (report)\n \n # if local build is requested\n else:\n\n # build the Docker image locally\n self.__build_image(local = self.local)\n\n # run the docker container locally\n self.__run_deployment(local = self.local)\n\n # change the status\n self.current_status = 'deployed and healthy'\n\n # construct the url\n self.service_url = str('localhost:' + self.port + '/')\n\n # build report\n report = \"\"\"\n\n Deployment Report:\n ------------------\n\n This is an automatically generated report on the status of your deployed\n product. Your API is now containerized and hosted on your local machine.\n You can access the API using:\n\n {service_url}\n\n You can call the API in whatever way it is designed. If you want to get\n rid of it, just use the delete_deployment() method. If you just want to\n update the API, you can just use prepare_deployment() to create a new\n Dockerfile and then deploy() again.\n\n Your Product\n -----------------------\n Name: {name}\n Project: {project}\n Status: {status}\n Access: {service_url}\n\n You are not forced to stay on your local machine though. You can use\n the push_product() method to push the image of your product to any\n registry you want.\n \"\"\".format(service_url = self.service_url,\n name = self.product_name,\n project = self.project_name,\n status = self.current_status)\n\n # print report\n print (report)\n\n # main method to push product to other registry\n def push_product(self, registry):\n \"\"\"\n Main method to push your product.\n\n This function pushes the docker image to any other registry. Depending\n on the privacy setting, the user will need to login to the registry and\n create a token first.\n\n Parameters\n ----------\n registry : string\n Gives the url to the target registry, if you just push to Dockerhub, \n just pass your DockerHub user name to the registry arg\n \"\"\"\n\n # try to push to the registry\n try:\n\n # tag the image to a remote registry\n command = str('eval $(minikube -p minikube docker-env) && docker tag ' + self.product_name + '-image ' + registry + '/' + self.product_name + '-image')\n os.system(command)\n\n # print message\n print (str('> Successfully tagged the image as ' + registry + '/' + self.product_name + '-image'))\n\n # tag the image to a remote registry\n command = str('eval $(minikube -p minikube docker-env) && docker push ' + registry + '/' + self.product_name + '-image')\n os.system(command)\n\n # print message\n print (str('> Successfully pushed image to ' + registry))\n\n # handle exception \n except:\n\n # raise exception\n raise Exception('I could not push the product to another registry. Make sure you have the proper registry url and the correct credentials in case it is a private registry.') ","sub_path":"productionize/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":28720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"596571140","text":"\n\nimport numpy as np\nimport crocoddyl\nimport torch \nfrom utils import solve_crocoddyl, random_array\n\"\"\"\nBase crocoddyl data\n\n\"\"\"\n\n\ndef dataGen(size:int = 100, theta:float = 0.):\n \"\"\"\n Returns position and cost\n \"\"\"\n\n\n x = random_array(size)\n y = []\n\n \n for state in x: \n ddp = solve_crocoddyl(state)\n y.append([ddp.cost])\n \n positions = torch.tensor(x, dtype = torch.float32)\n cost = torch.tensor(y, dtype = torch.float32)\n del ddp,x, y \n return positions, cost\n\n\n\n\n\n\n\n\n\n","sub_path":"Experiments 1/Squared Network Regression/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"295519915","text":"\"\"\"\nAlgorithms :: Practice - recursion\n\"\"\"\n\n# %%\ndef foo1(x=3):\n def foo2(y=2):\n def foo3(z=1):\n return z\n\n return foo3() + y\n\n return foo2() + x\n\n\nprint(foo1())\n\n\n# %%\ndef summ(n: int) -> int:\n if n == 1:\n return n\n else:\n return n + summ(n - 1)\n\n\nprint(summ(4))\n\n\n# %%\ndef factorial(n: int) -> int:\n # Base case\n if n == 1:\n print(f\"Base case frame: n = {n}\")\n return n\n print(f\"Pre-recursive: n = {n} | Calling `{n} * factorial({n - 1})`\")\n # Recursive case\n r = factorial(n - 1)\n print(f\"Post-recursive: r = {r}\")\n return n * r\n\n\nprint(factorial(7))\n\n\n# %%\ndef dec_to_bin(n):\n if n == 0:\n print(f\"Base case frame: n == {n}\")\n return 0\n else:\n print(f\"Pre-recursive: n = {n}\")\n r = 10 * dec_to_bin(int(n / 2))\n print(f\"Post-recursive: r = {r}\")\n return n % 2 + r\n\n\nprint(dec_to_bin(7))\n\n# %%\n# Greatest common denominator - iterative solution\ndef gcd_iter(a, b):\n counter = 1\n print(f\"step = {counter}\")\n while b:\n print(f\"before: a = {a}, b = {b}, a % b = {a % b}\")\n a, b = b, a % b\n print(f\"after : a = {a}, b = {b}\")\n counter += 1\n return a\n\n\nprint(gcd_iter(12, 20))\n\n\n# %%\n# Greatest common denominator - recursive solution\ndef gcd_recur(a, b):\n if b == 0:\n return a\n else:\n return gcd_recur(b, a % b)\n\n\nprint(gcd_recur(12, 20))\n\n\n# %%\n\"\"\"\nImplement a recursive algorithm itos() that converts a number, digit by digit, to a string.\n\n- Don’t convert the entire integer to a string and return it - that’s cheating!\n- The final returned result should be a single string representing the entire number.\n - For example, if we passed the integer 1234 to itos(), the function would return\n '1234' such that type('1234') == str.\n- You can break this problem down into three parts.\n - How do you identify your base case?\n - The pre-recursive work:\n - How do you get to that base case?\n - How do you need to seed your frames on the way to the base case?\n - The post-recursive work:\n - What would you add to the base case as it works its way back through the\n recursed calls?\n - Does the order of what is returned and what is added matter?\n- Annotate your solution with print statements that show, at each frame:\n - the state of the function, specifying what is being passed and\n what is being returned\n - a counter that tracks the frames as they are opened and closed\n\"\"\"\n# %%\nframe = 1\nprint(f\"At global frame = {frame}\")\n\n\ndef itos(n: int) -> str:\n global frame\n frame += 1\n # TODO: Base case\n if n % 10 == n:\n print(f\"Base case frame = {frame}, n = {n}\")\n return str(n)\n # TODO: Recursive case\n print(f\"pre-recursive: n = {n}\")\n digit = n % 10 # Extract last digit\n print(f\"Last digit: {digit}\")\n rest = n // 10 # `Pop` last digit off of number\n print(f\"Rest of number: {rest}\")\n r = itos(rest) # recurse into rest of number\n frame -= 1\n print(f\"post-recursive: r = {r}\")\n\n return r + str(digit)\n\n\nprint(f\"Result: {itos(1234)} (type of {type(itos(1234))})\")\n\n\n# %%\ndef itos(n: int) -> str:\n # Base case: one digit number\n if n % 10 == n: # Clever way of saying `if n < 10`\n return str(n)\n # Recursive case\n digit = n % 10 # Extract last digit\n rest = n // 10 # `Pop` last digit off of number\n print(f\"{n} -> itos({rest}) + str({digit})\")\n r = itos(rest) # recurse into rest of number\n\n return r + str(digit)\n\n\nresult = itos(54321)\nprint(f\"Result: {result} (type of {type(result)})\")\n\n\n# %%\n","sub_path":"cs/lambda_cs/02_algorithms/Algorithms/notes/recursion_practice.py","file_name":"recursion_practice.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"461041080","text":"__author__ = 'koohyh'\nimport numpy as np\nimport pandas as pd\nimport pybedtools as pybt\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sys\n\n\ndef app():\n print('get the common elements')\n down_genes = get_down_genes(which_fraction='FrBC')\n \n\n atac_down_regions_fname = '/bi/home/koohyh/SeqMonk_Projects/Figures/New_Figs_and_Data_ISP_Manuscripts/ATAC_FrBC_Aged_Down_pval01_No_padj.txt'\n h3k4me3_down_regions_fname = '/bi/home/koohyh/SeqMonk_Projects/Figures/New_Figs_and_Data_ISP_Manuscripts/H3K4ME3_FrBC_Aged_Down_pval05_with_padj.txt'\n atac_downs_df = get_features(atac_down_regions_fname)\n h3k4me3_downs_df = get_features(h3k4me3_down_regions_fname)\n \n\n\n down_atac = set(atac_downs_df['Feature'].dropna())\n down_h3k4me3 = set(h3k4me3_downs_df['Feature'].dropna())\n get_common_elements(down_genes, down_atac, down_h3k4me3)\n\ndef get_down_genes(which_fraction):\n # this it read a file that contains these genes, but for now I have the\n #list of genes.\n # This ilst if from Ribo0_mm10 that I have copy in dropbox\n if which_fraction == 'FrD':\n\n down_genes = [\n 'Rftn2','Irs1', 'Gm9747', 'Fcgr4', 'BC094916', 'Fmn2', 'Plxna2', 'Nespas',\\\n 'Rps8', 'Tpk1', 'Exoc6b', 'Tmtc1', 'Rps4y2', 'Fam189a1', 'Mcf2l', 'Large', 'Inpp4b', 'Dcbld1', 'Ddc',\\\n '2610035D17Rik', 'Snx29', 'Ms4a6c', 'Atrnl1'\n ]\n elif which_fraction == 'FrBC':\n down_genes = ['Rplp2', 'Snhg7', 'Rpl10', 'Lgals1', 'Eif4g2', 'Igf1r', 'Scn2b', 'Heyl', 'Tceal8', 'Lrrc17',\n 'Sgce', '4933403G14Rik', 'Rftn2', 'Plxdc2', 'Pcdh9', 'Gm10561', 'Irs1', 'Rps4y2'\n ]\n\n return(set(down_genes))\n\n\ndef get_features(features_fname):\n # will read the features_fname and import it as pd.df and extract the corresponding gene names\n df = pd.read_csv(features_fname, sep='\\t', usecols=[1, 2, 3, 6])\n return(df)\n\ndef get_common_elements(set1, set2, set3):\n union = set1.union(set2.union(set3))\n values_to_sets = {a: ( a in set1, a in set2, a in set3) for a in union}\n sets_to_values = {}\n for a, s in values_to_sets.items():\n if s not in sets_to_values:\n sets_to_values[s] = []\n sets_to_values[s].append(a)\n for k in sets_to_values.keys():\n print(k, ' :==>' ,len(sets_to_values[k]))\n print(sets_to_values[k])\n #print(values_to_sets)\n\n #return(union)\n\n\n#\n# def get_common_elements(set1, set2, set3):\n# # takes three sets and gets at each sub-regions eg in all three sets, in any combination of both, in only one and so on\n# union = set1.union(set2.union(set3))\n# #values_to_sets = {a: (a in set1, a in set2, a in set3) for a in union}\n# values_to_sets = {a: (a in set1, a in set2, a in set3) for a in union}\n# sets_to_values = {}\n# for a, s in values_to_sets.items():\n# if s not in sets_to_values:\n# sets_to_values[s] = []\n# sets_to_values[s].append(a)\n# for k in sets_to_values.keys():\n# print(k, ' :==>' ,len(sets_to_values[k]))\n\n# #print(values_to_sets)\n#\n# #return(union)\n\n\nif __name__ == '__main__':\n sns.set_style('whitegrid')\n sns.set(font_scale=1.5)\n app()\n","sub_path":"pyMiscellaneous/get_common_elements.py","file_name":"get_common_elements.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"132403361","text":"# SF bug #476138: tempfile behavior across platforms\n# Ensure that a temp file can be closed any number of times without error.\n\nimport tempfile\n\nf = tempfile.TemporaryFile(\"w+b\")\nf.write('abc\\n')\nf.close()\nf.close()\nf.close()\n","sub_path":"lib/rubyfox/server/data/lib/Lib/test/test_tempfile.py","file_name":"test_tempfile.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"597442438","text":"from django.contrib import admin\n\nfrom webapp.models import Record\n\n\nclass RecordAdmin(admin.ModelAdmin):\n list_display = ['id', 'author', 'author_email','status','created_at']\n list_display_links = ['id','author']\n list_filter = ['author', 'status']\n search_fields = ['text', 'author_email']\n fields = ['author', 'author_email', 'text', 'status','created_at', 'updated_at']\n readonly_fields = ['created_at', 'updated_at']\n\n\nadmin.site.register(Record, RecordAdmin)\n","sub_path":"source/webapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"42664568","text":"import loggers\n\nclass Constants(object):\n ACKNOWLEDGE_SUCCEEDED = 0\n ACKNOWLEDGE_FAILED = 1\n ACKNOWLEDGE_DISABLED = 2\n\n IN_MEMORY_LOGGER = loggers.InMemoryLogger.__name__\n CSV_LOGGER = loggers.CsvLogger.__name__\n POSTGRES_DATABASE_LOGGER = loggers.PostgresDatabaseLogger.__name__\n\n LOGGER_CLASSES = [\n IN_MEMORY_LOGGER,\n CSV_LOGGER,\n POSTGRES_DATABASE_LOGGER\n ]\n","sub_path":"flexbot/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"404402353","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 24 14:09:05 2016\n\n@author: Andy\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pytz\nfrom datetime import datetime, timedelta\nfrom pytz import timezone\n\n#Load events\nevents = pd.read_csv(\"D:\\Projects\\Kaggle\\Brain\\events.csv\", index_col=0)\n\n#Change datatype to save some memory\nevents.document_id = events.document_id.astype(np.int32)\n\n#Import training clicks data and merge with events\ntrain = pd.merge(pd.read_csv(\"D:\\Projects\\Kaggle\\Brain\\clicks_train.csv\", dtype=np.int32, index_col=0),\n events, left_index=True, right_index=True)\n\n#Now that we have merged we can get rid of events to save on some memory\ndel events\n\n#Lets check out the hour and the day people be doing things\ntrain[\"hour\"] = (train.timestamp // (3600 * 1000)) % 24\ntrain[\"day\"] = train.timestamp // (3600 * 24 * 1000)\n\n#Drop geo_location info except for country code\ntrain.geo_location = train.geo_location.str[:2]\n\n#Get the names of the top 5 countries from geo_location\ncntrys = train.geo_location.value_counts()[:5]\n\n#Remove all entries not in the top 5 most common countries\ntrain = train.loc[train['geo_location'].isin(cntrys.index)]\n \nplt.figure(figsize=(12,4))\ntrain.loc[train['platform'].isin([1])].geo_location.value_counts().hist(bins = 5, label=\"Desktop\", alpha = 0.7, normed = True)\ntrain.loc[train['platform'].isin([2])].geo_location.value_counts().hist(bins = 5, label=\"Phone\", alpha = 0.7, normed = True)\ntrain.loc[train['platform'].isin([3])].geo_location.value_counts().hist(bins = 5, label=\"Tablet\", alpha = 0.7, normed = True)\nplt.xlim(1, 5)\nplt.legend(loc=\"best\")\nplt.xlabel(\"Platform\")\nplt.ylabel(\"Fraction of users\")\n\nplt.figure(figsize=(12,4))\ntrain.loc[train['platform'].isin([1])].geo_location.value_counts().plot(kind = 'bar', label=\"Dekstop\", alpha = 0.6, normed=True)\ntrain.loc[train['platform'].isin([2])].geo_location.value_counts().plot(kind = 'bar', label=\"Dekstop\", alpha = 0.6, normed=True)\ntrain.loc[train['platform'].isin([3])].geo_location.value_counts().plot(kind = 'bar', label=\"Dekstop\", alpha = 0.6, normed=True)\nplt.legend(loc=\"best\")\nplt.xlabel(\"Platform\")\nplt.ylabel(\"Fraction of users\")","sub_path":"Kaggle_brain_cmp_cnt.py","file_name":"Kaggle_brain_cmp_cnt.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"200063909","text":"##Script (Python) \"query_rdf\"\n##Title: Return a given query as RDF\n##parameters=REQUEST\n\nREQUEST.RESPONSE.setHeader('Content-Type', 'application/xml')\n\nkw = REQUEST.form\n\nquery = kw.copy()\nquery['portal_type'] = 'Collector Issue'\n\nif 'sort_on' not in query:\n query['sort_on'] = 'getId'\n\nif 'supporters' in query:\n query['assigned_to'] = query['supporters']\n del query['supporters']\n\ncatalog = context.get_internal_catalog()\nfound = catalog.search(query_request=query, sort_index='modified', reverse=True)\n\nitems = []\nfor item in found:\n info = {'url': item.getURL(),\n # dc namespace\n 'title': item.Title,\n 'description': item.Description,\n 'subjects': (),\n 'creators': (item.submitter_id,),\n 'contributors': item.assigned_to,\n 'date': item.modified.HTML4(),\n # cmfcollector namespace\n 'number': int(item.getId),\n 'responses': item.action_number,\n 'status': item.status,\n 'importance': item.importance,\n 'topic': item.topic,\n 'classification': item.classification,\n }\n items.append(info)\n\noptions = {'collector_url': context.absolute_url(),\n 'issues': tuple(items),\n }\n\nreturn context.issues_as_rdf(**options)\n","sub_path":"CMF_Extras/trunk/CMFCollector/skins/collector/query_rdf.py","file_name":"query_rdf.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"186875741","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QSizePolicy\n\nfrom Buttons import BoldButton, ItalicButton, UnderlineButton\nfrom Commands import BoldCommand, ItalicCommand, UnderlineCommand\nfrom Commands.Triggers import TextEditCommandTrigger\nfrom EditorUtilities import EditorBoldUtil, EditorItalicUtil, EditorUnderlineUtil\nfrom Observers import Observable, Observer, TextEditButtonObserver\n\n\nclass Toolbar(QtWidgets.QWidget, Observable):\n \"\"\"\n Widget linked with TextEdit.\n Responsible for triggering when to change something in text.\n \"\"\"\n\n def __init__(self, editor, parent_container=None):\n super(Toolbar, self).__init__(parent_container)\n\n # UI\n\n # Layout\n self.container = QtWidgets.QHBoxLayout()\n\n # Buttons\n self.bold_button = BoldButton()\n self.italic_button = ItalicButton()\n self.underline_button = UnderlineButton()\n\n # Editor\n self.editor = editor\n\n # COMMANDS\n\n # Triggers\n self.bold_command_trigger = TextEditCommandTrigger(\n BoldCommand(self.editor),\n button=self.bold_button)\n self.italic_command_trigger = TextEditCommandTrigger(\n ItalicCommand(self.editor),\n button=self.italic_button)\n self.underline_command_trigger = TextEditCommandTrigger(\n UnderlineCommand(self.editor),\n button=self.underline_button)\n\n # OBSERVERS\n self.observers = list()\n self.setup_observers()\n\n self.setup_ui()\n self.setup_styles()\n\n def setup_ui(self):\n \"\"\"\n Sets up UI structure.\n \"\"\"\n # INITIALIZE\n\n # Spacer\n spacer = QtWidgets.QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Maximum)\n\n # STRUCTURE\n\n self.container.addWidget(self.bold_button)\n self.container.addWidget(self.italic_button)\n self.container.addWidget(self.underline_button)\n self.container.addSpacerItem(spacer)\n\n # Layout\n\n self.setLayout(self.container)\n\n def setup_styles(self):\n \"\"\"\n Sets up UI styles.\n \"\"\"\n self.container.setContentsMargins(0, 0, 0, 0)\n\n # OBSERVER\n\n def setup_observers(self):\n \"\"\"\n Sets up observers for text states.\n \"\"\"\n self.attach(TextEditButtonObserver(EditorBoldUtil.state_ok, self.editor, self.bold_button))\n self.attach(TextEditButtonObserver(EditorItalicUtil.state_ok, self.editor, self.italic_button))\n self.attach(TextEditButtonObserver(EditorUnderlineUtil.state_ok, self.editor, self.underline_button))\n\n def detach(self, observer):\n \"\"\"\n Detaches observer.\n :param observer: Observer to be detached.\n \"\"\"\n self.observers.remove(observer)\n\n def attach(self, observer):\n \"\"\"\n Attaches observer.\n :param observer: Observer to be attached.\n \"\"\"\n self.observers.append(observer)\n\n def notify(self):\n \"\"\"\n Notifies all observers of event that occurred.\n \"\"\"\n for observer in self.observers:\n assert isinstance(observer, Observer)\n observer.update()\n\n def connect_observer_event(self, event):\n \"\"\"\n Connects (Qt) event to notify observers.\n :param event: Event to be connected.\n \"\"\"\n event.connect(self.notify)\n","sub_path":"Toolbar.py","file_name":"Toolbar.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"429463119","text":"def inversions_count(A, left, mid, right):\n L = A[left:mid+1]\n R = A[mid+1:right+1]\n i = j = 0\n k = left\n count = 0\n while i < len(L) and j < len(R):\n if L[i] > R[j]:\n count += len(L) - i\n A[k] = R[j]\n j += 1\n else:\n A[k] = L[i]\n i += 1\n k += 1\n while i < len(L):\n A[k] = L[i]\n i += 1\n k += 1\n\n while j < len(R):\n A[k] = R[j]\n j += 1\n k += 1\n return count\n\n\ndef inversions_helper(A, low, high):\n count = 0\n if low < high:\n mid = (low + high) // 2\n count += inversions_helper(A, low, mid)\n count += inversions_helper(A, mid+1, high)\n count += inversions_count(A, low, mid, high)\n return count\n\n\ndef inversions(A):\n return inversions_helper(A, 0, len(A)-1)\n\n\narr = [2, 3, 8, 6, 1]\n# arr = [5, 4, 3, 2, 1]\nprint(inversions(arr))\n","sub_path":"2_4_inversions.py","file_name":"2_4_inversions.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"325841586","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport telegram\nimport requests\nimport json\nimport pytz\nfrom telegram.ext import Updater\nfrom telegram.ext import CommandHandler, BaseFilter\nfrom telegram.ext import MessageHandler, Filters, CallbackQueryHandler\nfrom telegram.ext.dispatcher import run_async\nimport wr\nimport logging\nimport time\nimport totable\nimport random\nimport datetime\nimport os\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\n#TOKEN = '707090914:AAFOupGmBjkNIkaZp81IEflkHuDiZgbqOWk'\nTOKEN = '754744500:AAHMdrn9dFwzMkddLOcDTk-3Ertqf7qAZeY'\nREQUEST_KWARGS = {'proxy_url': 'socks5://orbtl.s5.opennetwork.cc:999', 'urllib3_proxy_kwargs': {'username': '298465764', 'password': '56tsGvzP'}}\nupdater = Updater(token=TOKEN, request_kwargs=REQUEST_KWARGS, use_context=False)\n#updater = Updater(token=TOKEN)\nupdates = updater\ndispatcher = updater.dispatcher\nFR = telegram.ForceReply()\ntz = pytz.timezone(\"Europe/Moscow\")\n\n\n@run_async\ndef confirmation(bot, updater):\n time.sleep(random.uniform(0, 0.7))\n players = wr.read_results()\n if str(updater.message.chat.id) in players:\n bot.send_message(chat_id=updater.message.chat.id,\n text='Йо, ты уже в системе. Просто используй команды.')\n else:\n btnlist = [\n telegram.InlineKeyboardButton('Согласен', callback_data='agree')\n ]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n bot.send_message(chat_id=updater.message.chat.id,\n text='Для начала необходимо принять Соглашение на обработку персональный данных.')\n bot.send_message(chat_id=updater.message.chat.id,\n text='Я даю своё согласие на обработку и публикацию моих персональных данных, таких как: результат участия в Экономической карусели, никнейм в Экономической карусели, никнейм в Телеграме.',\n reply_markup=markup)\n\n\n@run_async\ndef get_nick(bot, updater):\n time.sleep(random.uniform(0, 0.7))\n players = wr.read_results()\n id = updater.message.chat.id\n players[str(id)] = [id, '@' + str(updater.message.chat.username)]\n name = updater.message.text\n part_list = wr.read_part()\n if name not in part_list:\n players[str(id)] = players[str(id)]+[name, {}, {}]\n wr.write_results(players)\n btnlist = [\n telegram.InlineKeyboardButton('Меню', callback_data='menu')\n ]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n bot.send_message(chat_id=updater.message.chat.id, text='Привет, {}!'.format(name), reply_markup=markup)\n else:\n bot.send_message(chat_id=updater.message.chat.id, text='Тебя нет в списках!'.format(name))\n\n\n@run_async\ndef query_h(bot, updater,):\n call = updater.callback_query\n if call.message:\n if call.data == 'agree':\n time.sleep(random.uniform(0, 0.7))\n players = wr.read_results()\n id = call.message.chat.id\n message_id = call.message.message_id\n bot.edit_message_text(chat_id=id, message_id=message_id,\n text=call.message.text)\n if str(id) not in players:\n\n bot.send_message(chat_id=id,\n text='А теперь давайте познакомимся. Под каким никнеймом отображать Вас в таблице результатов?',\n reply_markup=FR)\n else:\n bot.send_message(chat_id=id, text='Ты уже в системе, вот меню!')\n show_menu(bot, updater)\n wr.write_results(players)\n if call.data =='contest':\n print_list(bot, updater)\n if call.data == 'past':\n list_past(bot, updater)\n if call.data == 'other':\n feedback(bot, updater)\n if call.data == 'rules':\n chat_id = call.message.chat.id\n message_id = call.message.message_id\n btnlist = [\n telegram.InlineKeyboardButton('Общие Правила', callback_data='general'),\n telegram.InlineKeyboardButton('Назад', callback_data='menu')\n ]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n bot.edit_message_text(chat_id=chat_id, message_id=message_id,\n text='Правила:', reply_markup=markup)\n if call.data == 'general':\n print_rules(bot, updater, 0)\n if call.data == 'menu':\n show_menu(bot, updater)\n if call.data == 'fb':\n msg = bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=call.message.text)\n bot.send_message(chat_id=call.message.chat.id, text='Оставьте свой отзыв ответом на это сообщение.', reply_markup=FR)\n if call.data == 'donate':\n donate(bot, updater)\n if call.data == 'probs':\n problems_list(bot, updater)\n if call.data == 'admin':\n admin(bot, updater)\n if call.data == 'addadmin':\n id = call.message.chat.id\n message_id = call.message.message_id\n bot.edit_message_text(chat_id=id, message_id=message_id, text=call.message.text)\n bot.send_message(chat_id=id, text='Напишите ник нового админа ответом на это сообщение.', reply_markup=FR)\n if call.data[:3] == 'pr_':\n problems = wr.read_problems()\n id = call.message.chat.id\n message_id = call.message.message_id\n grouped = []\n dates = 'c '+problems[call.data[3:]][2][0]+' по '+problems[call.data[3:]][2][1]\n btnlist = [\n telegram.InlineKeyboardButton('Удалить', callback_data='del_{}'.format(call.data[3:])),\n telegram.InlineKeyboardButton('Назад', callback_data='sc_{}'.format(call.data[3:call.data.find('.')]))\n ]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n bot.edit_message_text(chat_id=id, message_id=message_id,\n text='Задача {}\\n'.format(call.data[3:])+problems[call.data[3:]][0]+'\\nОтвет: '+problems[call.data[3:]][1]+'\\nДаты: '+dates, reply_markup=markup)\n # if call.data[:5] == 'hide_':\n # id = call.message.chat.id\n # message_id = call.message.message_id\n # btnlist = [\n # telegram.InlineKeyboardButton('Показать текст задачи.', callback_data='pr_{}'.format(call.data[5:]))\n # ]\n # markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n # bot.edit_message_text(chat_id=id, message_id=message_id, text=call.data[5:], reply_markup=markup)\n\n if call.data == 'list':\n part_list(bot, updater)\n if call.data[:4] == 'add_':\n problems = wr.read_problems()\n btnlist = []\n for problem in problems:\n btnlist.append(telegram.InlineKeyboardButton(problem, callback_data='ch_pr_{}'.format(problem)))\n footer = telegram.InlineKeyboardButton('Назад', callback_data='list')\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=4, footer_buttons=[footer]))\n id = call.message.chat.id\n message_id = call.message.message_id\n players = wr.read_results()\n bot.edit_message_text(chat_id=id, message_id=message_id, text='Участник {}'.format(players[str(id)][2]), reply_markup=markup)\n if call.data[:6] == 'ch_pr_':\n id = call.message.chat.id\n message_id = call.message.message_id\n players = wr.read_results()\n btnlist = []\n for date in range(5, 32):\n if date not in range(10,28):\n date = '-{}-'.format(date)\n if call.data[6:] in players[str(id)][3]:\n if date in players[str(id)][3][call.data[6:]]:\n date = '+{}+'.format(date)\n btnlist.append(telegram.InlineKeyboardButton(date, callback_data='date_{}'.format(date)))\n btnlist.append(telegram.InlineKeyboardButton('-1-', callback_data='aaaaa'))\n btnlist.append(telegram.InlineKeyboardButton('Назад', callback_data='add_{}'.format(call.data[6:])))\n btnlist.append(telegram.InlineKeyboardButton('Обновить', callback_data='ch_pr_{}'.format(call.data[6:])))\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=7))\n bot.edit_message_text(chat_id=id, message_id=message_id,\n text='Участник {}'.format(players[str(id)][2])+'\\nЗадача {}'.format(call.data[6:]), reply_markup=markup)\n if call.data[:5] == 'date_':\n id = call.message.chat.id\n message_id = call.message.message_id\n players = wr.read_results()\n text = call.message.text\n problem = text[text.find('Задача')+7:]\n date = call.data[5:]\n if date[0] == '-':\n bot.send_message(chat_id=id, text='Неправильная дата')\n else:\n if date[0] == '+':\n players[str(id)][3][problem].pop(players[str(id)][3][problem].index(int(date[1:-1])))\n\n else:\n try:\n players[str(id)][3][problem].append(int(date))\n except KeyError:\n players[str(id)][3][problem] = [int(date)]\n wr.write_results(players)\n# bot.send_message(chat_id=id, text='Для обновления данных нажмите кнопку \\\"Обновить.\\\"')\n if call.data[:3] == 'sh_':\n num = call.data[3:]\n print_problem(bot, updater, num)\n if call.data == 'again':\n problems = wr.read_problems()\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=problems[call.message.text[25:-2]][0])\n bot.send_message(chat_id=call.message.chat.id, text='Ваш ответ к задаче {} :'.format(call.message.text[25:-2]), reply_markup=FR)\n # if call.data == 'solved':\n # players = wr.read_results()\n # names = wr.read_names()\n # btnlist = []\n # for pr in players[str(call.message.chat.id)][4]:\n # btnlist.append(telegram.InlineKeyboardButton(names[pr[:pr.find('.')]]+'—'+pr, callback_data='s_{}'.format(pr)))\n # footer = telegram.InlineKeyboardButton('Назад.', callback_data='contest')\n # markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=2, footer_buttons=[footer]))\n # bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text='Ваши решенные задачи!', reply_markup=markup)\n if call.data[:2] == 's_':\n problems = wr.read_problems()\n num = call.data[2:]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu([telegram.InlineKeyboardButton('Назад', callback_data='shc_{}'.format(num[:num.find('.')]))], n_cols=1))\n markup = telegram.InlineKeyboardMarkup(wr.build_menu([telegram.InlineKeyboardButton('Назад', callback_data='shc_{}'.format(num[:num.find('.')]))], n_cols=1))\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text='Задача {}\\n'.format(num)+str(problems[num][0])+'\\n'+'Ответ: {}'.format(problems[num][1]),\n reply_markup=markup)\n if call.data == 'error':\n markup = telegram.InlineKeyboardMarkup(wr.build_menu([telegram.InlineKeyboardButton('Назад', callback_data='contest')], n_cols=1))\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text='Вы пока не можете решать эту задачу.',\n reply_markup=markup)\n if call.data == 'addtask':\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text='Читай ниже.')\n bot.send_message(chat_id=call.message.chat.id, text='Отправьте текст задачи и ответ ответом на это сообщение.', reply_markup=FR)\n if call.data[:4] == 'del_':\n problems = wr.read_problems()\n problem = call.data[4:]\n del(problems[problem])\n wr.write_problems(problems)\n if int(problem[:problem.find('.')]) not in list(int(pr[:pr.find('.')]) for pr in problems.keys()):\n names = wr.read_names()\n del(names[problem[:problem.find('.')]])\n wr.write_names(names)\n markup = telegram.InlineKeyboardMarkup(\n wr.build_menu([telegram.InlineKeyboardButton('Назад', callback_data='probs')], n_cols=1))\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text='Задача успешно удалена.',\n reply_markup=markup)\n if call.data == 'send_results':\n btnlist = [telegram.InlineKeyboardButton('JSON', callback_data='send_json'),\n telegram.InlineKeyboardButton('XLXS', callback_data='send_xlsx')]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=2))\n bot.send_message(chat_id=call.message.chat.id, message_id=call.message.message_id, text='Выберите резы:', reply_markup=markup)\n if call.data == 'send_json':\n doc = open('results.json', 'rb')\n bot.send_document(chat_id=updater.callback_query.message.chat.id, document=doc)\n if call.data == 'send_xlsx':\n totable.totable()\n doc = open('res.xlsx', 'rb')\n bot.send_document(chat_id=updater.callback_query.message.chat.id, document=doc)\n if call.data == 'send_fb':\n send_fb(bot, updater)\n if call.data == 'repost':\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text='Смотрите ниже.')\n bot.send_message(chat_id=call.message.chat.id, text='Ответьте на это сообщение тем, что хотите всем разослать.', reply_markup=FR)\n if call.data == 'setnames':\n names = wr.read_names()\n btnlist = []\n for i in names:\n btnlist.append(telegram.InlineKeyboardButton(i+'-'+names[i], callback_data='set_name_{}'.format(i)))\n footer = telegram.InlineKeyboardButton('Назад', callback_data='probs')\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=2, footer_buttons=[footer]))\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text='Выберите карусель:', reply_markup=markup)\n if call.data[:9] == 'set_name_':\n i = call.data[9:]\n names = wr.read_names()\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text='Выбранная карусель:\\n{}'.format(i+'-'+names[i]))\n bot.send_message(chat_id=call.message.chat.id,\n text='Ответьте на это сообщение, чтобы назвать карусель {}.'.format(i), reply_markup=FR)\n if call.data[:4] == 'shc_':\n problems = wr.read_problems()\n car = call.data[4:]\n btnlist = []\n for pr in list(pr for pr in problems if pr[:pr.find('.')] == car):\n btnlist.append(telegram.InlineKeyboardButton(pr[pr.find('.')+1:], callback_data='s_{}'.format(pr)))\n footer = [telegram.InlineKeyboardButton('Отправить PDF', callback_data='pdf_{}'.format(car)),\n telegram.InlineKeyboardButton('Назад', callback_data='past')]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=2, footer_buttons=footer))\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text='Выберите задачу:', reply_markup=markup)\n if call.data[:3] == 'sc_':\n problems = wr.read_problems()\n car = call.data[3:]\n btnlist = []\n for pr in list(pr for pr in problems if pr[:pr.find('.')] == car):\n btnlist.append(telegram.InlineKeyboardButton(pr[pr.find('.')+1:], callback_data='pr_{}'.format(pr)))\n footer = [telegram.InlineKeyboardButton('Назад', callback_data='probs')]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=4, footer_buttons=footer))\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text='Выберите задачу:', reply_markup=markup)\n if call.data[:4] == 'pdf_':\n if '{}.pdf'.format(call.data) in os.listdir():\n bot.send_document(chat_id=call.message.chat.id, document=open('{}.pdf'.format(call.data), 'rb'))\n else:\n bot.send_message(chat_id=call.message.chat.id, text='PDF для данной Карусели не существует.')\n\n\n@run_async\ndef print_rules(bot, updater, *version):\n chat_id = updater.callback_query.message.chat.id\n message_id = updater.callback_query.message.message_id\n btnlist = [\n telegram.InlineKeyboardButton('Общие Правила', callback_data='general'),\n telegram.InlineKeyboardButton('Назад', callback_data='menu')\n ]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n if version[0] == 0:\n bot.edit_message_text(chat_id=chat_id, message_id=message_id, text='''*Как сдавать ответ:*\nВо время тура необходимо ответить боту на сообщение вида:\\n\\\"Ваш ответ к задаче # :\\\"\\n*сообщинием с только ответом на задачу*, которым будет являться десятичное число. *Числа записываются в виде десятичной дроби с математическим округлением до двух знаков после запятой, через точку*.\n\n*Ход тура и подведение его итогов:*\n\nВремя, которое даётся на решение задач, ограничено временем проведения тура.\n\nВопросы по условию можно задавать на протяжении всего тура в ВК нашей группы: https://vk.com/economic.carousel\n\nВо время тура Вы получаете задание, решаете его и даете только ответ. Независимо от результата (верный ответ или нет), Вы получаете следующее задание. \n\nВремя на решение каждого задания не ограничено, определено только общее время проведения тура.\n\nПроцесс решения заканчивается, если Вы прошли все задачи или если закончилось время на решение.\n\nМеста распределяются согласно количеству набранных баллов. Если кто-то набирает равное количество баллов, то выше ставится тот, у которого больше верных ответов.\n\n*Начисление баллов:*\nПервая задача стоит 3 балла.\n\nЕсли к задаче дан верный ответ, то Вы получает её полную стоимость, а следующая задача будет стоить на 1 балл больше. \n\nЕсли на задачу дан неверный ответ, то команда получает за решение 0 баллов, а следующая задача будет стоить на 3 балла меньше (но не менее 3 баллов она стоить не может).\n\nПо всем техническим вопросам - vk.com/ooodnakov, @ooodnakov''', parse_mode=telegram.ParseMode.MARKDOWN,\n reply_markup=markup)\n else:\n\n bot.edit_message_text(chat_id=chat_id, message_id=message_id, text='''Coming soon...''', reply_markup=markup)\n\n\n@run_async\ndef show_menu(bot, updater):\n time.sleep(random.uniform(0, 0.7))\n btnlist = [\n telegram.InlineKeyboardButton('Начать Контест!', callback_data='contest'),\n telegram.InlineKeyboardButton('Задания прошлых Каруселей', callback_data='past'),\n telegram.InlineKeyboardButton('Правила', callback_data='rules'),\n telegram.InlineKeyboardButton('Поддержать проект/Оставить отзыв', callback_data='other')\n ]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n players = wr.read_results()\n if 'callback_query' in str(updater):\n chat_id = updater.callback_query.message.chat.id\n if str(chat_id) in players.keys():\n message_id = updater.callback_query.message.message_id\n bot.edit_message_text(chat_id=chat_id, message_id=message_id, text='Меню:', reply_markup=markup)\n else:\n bot.send_message(chat_id=chat_id, text='Вы не нажали старт!')\n else:\n chat_id = updater.message.chat.id\n if str(chat_id) in players.keys():\n bot.send_message(chat_id=chat_id, text='Меню', reply_markup=markup)\n else:\n bot.send_message(chat_id=chat_id, text='Вы не нажали старт!')\n\n\n\n@run_async\ndef print_problem(bot, updater, num):\n problems = wr.read_problems()\n id = updater.callback_query.message.chat.id\n message_id = updater.callback_query.message.message_id\n if num == 'end':\n #results\n pass\n else:\n bot.edit_message_text(chat_id=id, message_id=message_id, text='Задача {}\\n'.format(num)+str(problems[num][0]))\n bot.send_message(chat_id=id, text='Ваш ответ к задаче {} :'.format(num), reply_markup=FR)\n\n\nclass FilterNick(BaseFilter):\n def filter(self, message):\n try:\n return 'А теперь давайте познакомимся. Под каким никнеймом отображать Вас в таблице результатов?' == message.reply_to_message.text\n except AttributeError:\n return False\nfilter_nick = FilterNick()\n\n\nclass FilterFB(BaseFilter):\n def filter(self, message):\n try:\n return 'Оставьте свой отзыв ответом на это сообщение.' == message.reply_to_message.text\n except AttributeError:\n return False\n\nfilter_fb = FilterFB()\n\n\nclass FilterAns(BaseFilter):\n def filter(self, message):\n try:\n return 'Ваш ответ к задаче' == message.reply_to_message.text[:18]\n except AttributeError:\n return False\n\nfilter_ans = FilterAns()\n\nclass FilterAA(BaseFilter):\n def filter(self, message):\n try:\n return 'Напишите ник нового админа ответом на это сообщение.' == message.reply_to_message.text\n except AttributeError:\n return False\n\nfilter_aa = FilterAA()\n\n\nclass FilterAT(BaseFilter):\n def filter(self, message):\n try:\n return 'Отправьте текст задачи и ответ ответом на это сообщение.' == message.reply_to_message.text\n except AttributeError:\n return False\n\nfilter_at = FilterAT()\n\n\nclass FilterRep(BaseFilter):\n def filter(self, message):\n try:\n return 'Ответьте на это сообщение тем, что хотите всем разослать.' == message.reply_to_message.text\n except AttributeError:\n return False\n\nfilter_rep = FilterRep()\n\n\nclass FilterName(BaseFilter):\n def filter(self, message):\n try:\n return 'Ответьте на это сообщение, чтобы назвать карусель ' == message.reply_to_message.text[:50]\n except AttributeError:\n return False\n\nfilter_name = FilterName()\n\n\n@run_async\ndef feedback(bot, updater):\n time.sleep(random.uniform(0, 0.7))\n id = updater.callback_query.message.chat.id\n message_id = updater.callback_query.message.message_id\n btnlist = [\n telegram.InlineKeyboardButton('Поддержать проект', callback_data='donate'),\n telegram.InlineKeyboardButton('Через бота', callback_data='fb'),\n telegram.InlineKeyboardButton('Через Google Forms', url='https://forms.gle/UyPgMpSs31WPcPwQ7')\n ]\n footer = telegram.InlineKeyboardButton('Назад', callback_data='menu')\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1, footer_buttons=[footer]))\n bot.edit_message_text(\n chat_id=id, message_id=message_id,\n text='Выберите, как вы хотите оставить отзыв, или хотите поддержать проект?',\n reply_markup=markup)\n\n\n@run_async\ndef thx_fb(bot, updater):\n time.sleep(random.uniform(0, 0.7))\n fb = wr.read_feedback()\n if str(updater.message.from_user.id) in fb:\n fb[str(updater.message.from_user.id)].append(updater.message.text)\n else:\n fb[str(updater.message.from_user.id)] = [updater.message.text]\n bot.send_message(chat_id=updater.message.chat.id, text='Спасибо за отзыв! ')\n wr.write_feedback(fb)\n show_menu(bot, updater)\n\n\n@run_async\ndef donate(bot, updater):\n time.sleep(random.uniform(0, 0.7))\n id = updater.callback_query.message.chat.id\n message_id = updater.callback_query.message.message_id\n btnlist = [\n telegram.InlineKeyboardButton('Назад', callback_data='other')\n ]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n bot.edit_message_text(chat_id=id,\n message_id=message_id,\n text='Поддержать проект можно через нашу страничку ВК \\n(https://vk.com/economic.carousel) или переводом в Сбербанк Онлайн.', reply_markup=markup)\n bot.send_message(chat_id=id, text='2202 2011 4263 4639')\n\n\n@run_async\ndef rest(bot, updater):\n bot.send_message(chat_id=updater.message.chat.id, text='Вы написали боту просто так. Либо вам нужно было ответить на сообщение, либо пользуйтесь меню.')\n\n\n@run_async\ndef clear(bot, updater):\n try:\n wr.clear(str(updater.message.chat.id))\n except KeyError:\n pass\n bot.send_message(chat_id=updater.message.chat.id, text='Чисто.')\n\n\n@run_async\ndef admin(bot, updater):\n players = wr.read_results()\n btnlist = [\n telegram.InlineKeyboardButton('Показать список участников', callback_data='list'),\n telegram.InlineKeyboardButton('Показать задачи', callback_data='probs'),\n telegram.InlineKeyboardButton('Добавить админа', callback_data='addadmin'),\n telegram.InlineKeyboardButton('Отправить результаты', callback_data='send_results'),\n telegram.InlineKeyboardButton('Отправить отзывы', callback_data='send_fb'),\n telegram.InlineKeyboardButton('Отправить всем сообщение через бота', callback_data='repost')\n ]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n if 'callback_query' in str(updater):\n id = updater.callback_query.message.chat.id\n message_id = updater.callback_query.message.message_id\n bot.edit_message_text(chat_id=id, message_id=message_id,\n text='Мeню Админа.',\n reply_markup=markup)\n elif players[str(updater.message.chat.id)][1] not in wr.read_admins():\n id = updater.message.chat.id\n bot.send_message(chat_id=id, text='У вас нет доступа к данным функциям!')\n else:\n id = updater.message.chat.id\n bot.send_message(chat_id=id,\n text='Мeню Админа.',\n reply_markup=markup)\n\n\n@run_async\ndef problems_list(bot, updater):\n problems = wr.read_problems()\n names = wr.read_names()\n btnlist = []\n for name in dict((name, names[name]) for name in names if name != '0' and name != '00'):\n btnlist.append(telegram.InlineKeyboardButton(names[name], callback_data='sc_{}'.format(name)))\n footer = [telegram.InlineKeyboardButton('Добавить задачу', callback_data='addtask'),\n telegram.InlineKeyboardButton('Назвать карусели', callback_data='setnames'),\n telegram.InlineKeyboardButton('Назад', callback_data='admin')]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1, footer_buttons=footer))\n bot.edit_message_text(chat_id=updater.callback_query.message.chat.id,\n message_id=updater.callback_query.message.message_id,\n text='Карусели:', reply_markup=markup)\n\n\n\n@run_async\ndef part_list(bot, updater):\n players = wr.read_results()\n btnlist = []\n for id in players:\n btnlist.append(telegram.InlineKeyboardButton(players[id][2], callback_data='add_{}'.format(updater.callback_query.message.chat.id)))\n footer = telegram.InlineKeyboardButton('Назад', callback_data='admin')\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=2, footer_buttons=[footer]))\n bot.edit_message_text(chat_id=updater.callback_query.message.chat.id, message_id=updater.callback_query.message.message_id, text='Список участников.', reply_markup=markup)\n\n\n@run_async\ndef add_admin(bot, updater):\n admins = wr.read_admins()\n admins.append(updater.message.text)\n wr.write_admins(admins)\n btnlist = [\n telegram.InlineKeyboardButton('Меню админа.', callback_data='admin')\n ]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n bot.send_message(chat_id=updater.message.chat.id, text='Админ {} добавлен!'.format(updater.message.text), reply_markup=markup)\n\n\n@run_async\ndef print_list(bot, updater):\n problems = wr.read_problems()\n names = wr.read_names()\n id = updater.callback_query.message.chat.id\n message_id = updater.callback_query.message.message_id\n players = wr.read_results()\n car = list(int(pr[:pr.find('.')]) for pr in problems.keys() if pr[:pr.find('.')] != '0' and pr[:pr.find('.')] != '00')\n for i in list(dict.fromkeys(car)):\n ran = problems[\"{}.1\".format(i)][2]\n if \"{}.1\".format(i) not in players[str(id)][4]:\n players[str(id)][3][\"{}.1\".format(i)] = ran\n btnlist = []\n for pr in problems:\n if pr in players[str(id)][3] and names[pr[:pr.find('.')]] != '':\n# grouped = []\n# for d in wr.group_consecutives(players[str(id)][3][pr]):\n# if type(d) == list:\n# grouped.append('с {} по {}'.format(d[0], d[-1]))\n# else:\n# grouped.append(str(d))\n# dates = ', '.join(grouped)\n# if datetime.datetime.today().day in players[str(id)][3][pr]:\n# btnlist.append(telegram.InlineKeyboardButton('К: {}, З: *{}* на даты {}'.format(names[pr[:pr.find('.')]],pr, dates),\n# callback_data='sh_{}'.format(pr)))\n# else:\n# btnlist.append(telegram.InlineKeyboardButton('К: {}, З: -{}- на даты {}'.format(names[pr[:pr.find('.')]],pr, dates),\n# callback_data='error'))\n f_d = tz.localize(datetime.datetime.strptime(players[str(id)][3][pr][0], \"%Y-%m-%d %H:%M\"))\n s_d = tz.localize(datetime.datetime.strptime(players[str(id)][3][pr][1], \"%Y-%m-%d %H:%M\"))\n if f_d < datetime.datetime.now(tz=tz) < s_d:\n btnlist.append(telegram.InlineKeyboardButton(names[pr[:pr.find('.')]]+' [Доступно]', callback_data='sh_{}'.format(pr)))\n else:\n btnlist.append(telegram.InlineKeyboardButton(names[pr[:pr.find('.')]]+'[Недоступно]', callback_data='error'))\n\n footer = [telegram.InlineKeyboardButton('Назад', callback_data='menu')]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1, footer_buttons=footer))\n bot.edit_message_text(chat_id=id, message_id=message_id, text='Выберите Карусель:', reply_markup=markup)\n wr.write_results(players)\n\n\n@run_async\ndef answer_problem(bot, updater):\n problems = wr.read_problems()\n answer = updater.message.text\n problem = updater.message.reply_to_message.text[19:-2]\n pr_1 = problem[:problem.find('.')+1]\n pr_2 = problem[problem.find('.')+1:]\n try:\n float(answer)\n players = wr.read_results()\n btnlist = [\n telegram.InlineKeyboardButton('Назад', callback_data='contest')\n ]\n del (players[str(updater.message.chat.id)][3][problem])\n if answer == problems[problem][1]:\n rep = 'Ответ верный!'\n if pr_2 == '1':\n players[str(updater.message.chat.id)][4][problem] = 3\n elif players[str(updater.message.chat.id)][4][pr_1 + str(int(pr_2) - 1)] == 0:\n players[str(updater.message.chat.id)][4][problem] = 3\n else:\n players[str(updater.message.chat.id)][4][problem] = players[str(updater.message.chat.id)][4][pr_1 + str(int(pr_2) - 1)]+1\n else:\n players[str(updater.message.chat.id)][4][problem] = 0\n wr.write_results(players)\n rep = 'Ответ неверный!'\n if int(pr_2) < len(list(key for key in problems if key[:2] == pr_1)):\n players[str(updater.message.chat.id)][3][pr_1 + str(int(pr_2) + 1)] = problems[pr_1 + str(int(pr_2) + 1)][2]\n s_d = tz.localize(\n datetime.datetime.strptime(players[str(updater.message.chat.id)][3][pr_1 + str(int(pr_2) + 1)][1],\n \"%Y-%m-%d %H:%M\"))\n f_d = tz.localize(\n datetime.datetime.strptime(players[str(updater.message.chat.id)][3][pr_1 + str(int(pr_2) + 1)][0],\n \"%Y-%m-%d %H:%M\"))\n if f_d < datetime.datetime.now(tz=tz) < s_d:\n pr_num = pr_1 + str(int(pr_2) + 1)\n btnlist.insert(0, telegram.InlineKeyboardButton('Следующая задача {}'.format(pr_num), callback_data='sh_{}'.format(pr_num)))\n else:\n rep += '\\nСледующая задача пока не доступна.'\n else:\n sum = 0\n for num in players[str(updater.message.chat.id)][4]:\n sum+=players[str(updater.message.chat.id)][4][num]\n rep += '\\nЭто последняя задача из данной Карусели.\\nВаш результат: {} б.'.format(sum)\n\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1))\n bot.send_message(chat_id=updater.message.chat.id, text=rep, reply_markup=markup)\n wr.write_results(players)\n except ValueError:\n bot.send_message(chat_id=updater.message.chat.id, text='Неправильный формат ответа!')\n bot.send_message(chat_id=updater.message.chat.id, text=updater.message.reply_to_message.text, reply_markup=FR)\n\n\n@run_async\ndef add_task(bot, updater):\n problems = wr.read_problems()\n problem = updater.message.text\n answer_ind = problem.find('Ответ: ')\n dates_ind = problem.find('Даты:')\n num_ind = problem.find('\\n')\n num = problem[7:num_ind]\n if dates_ind != -1:\n answer = problem[answer_ind + 7:dates_ind - 1]\n if problem[dates_ind:].find('с') != -1:\n if problem[dates_ind:].find('по') != -1:\n f_date = problem[problem[dates_ind:].find('с')+dates_ind+2:problem[dates_ind:].find('по')+dates_ind-1]\n else:\n f_date = problem[problem[dates_ind:].find('с') + dates_ind + 2:]\n else:\n f_date = \"2020-03-01 00:00\"\n if problem[dates_ind:].find('по') != -1:\n s_date = problem[problem[dates_ind:].find('по')+dates_ind+3:]\n else:\n s_date = \"2020-05-01 00:00\"\n fin_dates = [f_date, s_date]\n else:\n fin_dates = [\"2020-03-01 00:00\", \"2020-05-01 00:00\"]\n answer = problem[answer_ind + 7:]\n problem = problem[num_ind+1:answer_ind-1]\n problems[num] = [problem, answer, fin_dates]\n names = wr.read_names()\n if num[:num.find('.')] not in names:\n names[num[:num.find('.')]] = \"\"\n wr.write_names(names)\n wr.write_problems(problems)\n players = wr.read_results()\n for id in players:\n if num in players[id][3]:\n players[id][3][num]=fin_dates\n wr.write_results(players)\n markup = telegram.InlineKeyboardMarkup(\n wr.build_menu([telegram.InlineKeyboardButton('Назад', callback_data='probs')], n_cols=1))\n bot.send_message(chat_id=updater.message.chat.id, text='Задача успешно добавлена.', reply_markup=markup)\n\n\n@run_async\ndef send_res(bot, updater):\n doc = open('results.json', 'rb')\n bot.send_document(chat_id=updater.callback_query.message.chat.id, document=doc)\n\n\n@run_async\ndef send_fb(bot, updater):\n doc = open('feedback.json', 'rb')\n bot.send_document(chat_id=updater.callback_query.message.chat.id, document=doc)\n\n\n@run_async\ndef repost(bot, updater):\n mess = updater.message.text\n for id in wr.read_results():\n try:\n bot.send_message(chat_id=id, text=mess)\n except telegram.error.Unauthorized:\n pass\n markup = telegram.InlineKeyboardMarkup(\n wr.build_menu([telegram.InlineKeyboardButton('Назад', callback_data='admin')], n_cols=1))\n bot.send_message(chat_id=updater.message.chat.id, text='Успешно!', reply_markup=markup)\n\n\n@run_async\ndef set_name(bot, updater):\n name = updater.message.text\n names = wr.read_names()\n reply = updater.message.reply_to_message.text\n i = reply[reply.find('карусель')+9:-1]\n names[i] = name\n wr.write_names(names)\n markup = telegram.InlineKeyboardMarkup(\n wr.build_menu([telegram.InlineKeyboardButton('Назад', callback_data='setnames')], n_cols=1))\n bot.send_message(chat_id=updater.message.chat.id, text='Успешно!', reply_markup=markup)\n\n\n@run_async\ndef list_past(bot, updater):\n problems = wr.read_problems()\n names = wr.read_names()\n id = updater.callback_query.message.chat.id\n message_id = updater.callback_query.message.message_id\n players = wr.read_results()\n btnlist = [telegram.InlineKeyboardButton(names['0'], callback_data='shc_{}'.format('0')),\n telegram.InlineKeyboardButton(names['00'], callback_data='shc_{}'.format('00'))]\n for name in dict((name, names[name]) for name in names if name != '0' and name != '00'):\n ls = set(pr for pr in problems if pr[:pr.find('.')] == name)\n if set.issubset(ls, set(players[str(id)][4])):\n btnlist.append(telegram.InlineKeyboardButton(names[name], callback_data='shc_{}'.format(name)))\n footer = [telegram.InlineKeyboardButton('Назад', callback_data='menu')]\n markup = telegram.InlineKeyboardMarkup(wr.build_menu(btnlist, n_cols=1, footer_buttons=footer))\n bot.edit_message_text(chat_id = id, message_id=message_id, text='Выберите прошлую Карусель:', reply_markup=markup)\n\n\ndispatcher.add_handler(CallbackQueryHandler(query_h))\ndispatcher.add_handler(CommandHandler('start', confirmation))\ndispatcher.add_handler(CommandHandler('admin', admin))\ndispatcher.add_handler(CommandHandler('menu', show_menu))\ndispatcher.add_handler(CommandHandler('pidr_cl', clear))\ndispatcher.add_handler(MessageHandler(filter_ans, answer_problem))\ndispatcher.add_handler(MessageHandler(filter_nick, get_nick))\ndispatcher.add_handler(MessageHandler(filter_fb, thx_fb))\ndispatcher.add_handler(MessageHandler(filter_aa, add_admin))\ndispatcher.add_handler(MessageHandler(filter_at, add_task))\ndispatcher.add_handler(MessageHandler(filter_rep, repost))\ndispatcher.add_handler(MessageHandler(filter_name, set_name))\ndispatcher.add_handler(MessageHandler(Filters.update, rest))\nupdater.start_polling()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":42005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"337841518","text":"from numpy import genfromtxt\nimport numpy as np\nimport os\nfrom collections import defaultdict\nfrom itertools import permutations, repeat\nimport random\nimport math\nimport sys\nfrom numpy import random as numpy_random\nfrom Racetrack import RaceTrack\n\n\nclass OffPolicyMonteCarloAgent:\n def __init__(self, track, n_episodes=10000, gamma=0.9, epsilon=0.9):\n\n self.track = track\n self.gamma = gamma\n self.n_episodes = n_episodes\n self.epsilon = epsilon\n\n # Initialize Q values and C values\n y_range = track.track.shape[0]\n x_range = track.track.shape[1]\n self.yvel_range = track.max_vel - track.min_vel + 1\n self.xvel_range = track.max_vel - track.min_vel + 1\n self.yacc_range = 3 # -1, 0, +1\n self.xacc_range = 3 # -1, 0, +1\n\n # Initialize state-action values\n self.Q = np.zeros((y_range, x_range, self.yvel_range,\n self.xvel_range, self.yacc_range, self.xacc_range))\n\n # Initialize rewards dictionary\n self.Returns = {}\n\n # Initial Policy\n # For each state: assign equal probability of selecting each valid action from the state\n self.pi = np.zeros(self.Q.shape, dtype=float)\n for y_coord in range(self.Q.shape[0]):\n for x_coord in range(self.Q.shape[1]):\n for y_vel in range(track.min_vel, track.max_vel + 1):\n for x_vel in range(track.min_vel, track.max_vel + 1):\n valid_actions = self.track.possible_actions(\n (y_coord, x_coord, y_vel, x_vel))\n for y_acc, x_acc in valid_actions:\n self.pi[y_coord, x_coord, y_vel, x_vel,\n y_acc, x_acc] = 1/len(valid_actions)\n\n def policy_iteration(self):\n \"\"\"\n \"\"\"\n\n policy_improvement = False\n\n k = 0\n while not policy_improvement:\n print('Iteration {}'.format(k))\n\n # Generate an episode\n G = self.generate_episode()\n\n # Append G values to Returns\n for s_a in G.keys():\n # Create key if not in Returns\n if s_a not in self.Returns.keys():\n self.Returns[s_a] = []\n self.Returns[s_a].append(G[s_a])\n\n # Replace q-values with average returns.\n for s_a in self.Returns.keys():\n self.Q[eval(s_a)[0],\n eval(s_a)[1],\n eval(s_a)[2],\n eval(s_a)[3],\n eval(s_a)[4],\n eval(s_a)[5]] = np.average(self.Returns[s_a])\n print('\\nUpdated Q-values')\n print('Q-value [0, 8, 0, 0, 1, 0]:', self.Q[0, 8, 0, 0, 1, 0])\n print('Q-value [0, 8, 0, 0, 1, 1]:', self.Q[0, 8, 0, 0, 1, 1])\n print('Q-value [0, 8, 0, 0, 0, 1]:', self.Q[0, 8, 0, 0, 0, 1])\n print('\\n')\n\n # Old policy\n old_policy = self.pi.copy()\n\n # Update pi(a | s)\n self.update_policy(initialize=False)\n\n # Check if convergence\n if np.allclose(old_policy, self.pi, atol=0.0001):\n print('Policy iteration converged.')\n policy_improvement = True\n\n # Counter and update epsilon\n self.epsilon = 1/(np.sqrt(k + 1.1))\n\n k += 1\n\n def generate_episode(self):\n \"\"\"\n \"\"\"\n\n crossed_finishing_line = False\n position = list(self.track.random_start_state())\n first_occurence = defaultdict(int)\n\n step = 0\n while not crossed_finishing_line:\n\n # Sample action\n action = self.sample_action_from_state(position)\n\n # Initiate s, a pair if not already in dict\n if str(position + action) not in first_occurence.keys():\n first_occurence[str(position + action)] = step\n\n # New position\n position = list(self.track.apply_action(\n state=position, action=action)[0])\n\n # Update step\n step += 1\n\n # Get projected path\n projected_path = self.track.projected_path(\n state=[position[0], position[1]], speed=[position[2], position[3]])\n\n # Check if goal if reached (is it in the projected reactangle)\n if self.track.crossed_finish_line(projected_path=projected_path):\n print('-- Goal Reached. Terminating Episode.')\n break\n\n # Check if car hits boundery or wall cells\n if self.track.crossed_track_boundary(projected_path=projected_path):\n position = self.random_start_position()\n# print('Outside track cells!')\n continue\n\n print('Steps {}'.format(step))\n\n G = self._get_G_values(\n first_occurence_dict=first_occurence, total_steps=step)\n\n return G\n\n def sample_action_from_state(self, state):\n \"\"\"\n \"\"\"\n\n # Sample action according to our eps-greedy policy\n # Ensure that probabilities we sample from sum to 1\n y_coord, x_coord, y_vel, x_vel = state\n\n actionprobs = self.pi[y_coord, x_coord, y_vel, x_vel]\n total_prob = np.sum(actionprobs)\n if not math.isclose(total_prob, 1, abs_tol=0.01):\n print(\n 'Action probabilities must sum to 1.0, but summed to {}, state: {}, actionprobs: {}'.format(total_prob, state, self.pi[tuple(state)]))\n sys.exit(1)\n\n linear_idx = np.random.choice(\n actionprobs.size, p=actionprobs.ravel())\n a = np.unravel_index(linear_idx, actionprobs.shape)\n # In case the value is greater than the max allowed action we need to translate it back into\n # negative coordinates\n a = [acc if acc <= 1 else 1 - acc for acc in a]\n\n return a\n\n def greedy_action(self, state):\n\n # Find greedy action according to state-action values Q\n Q_state = self.Q[tuple(state)].copy()\n if not (Q_state == 0).all():\n Q_state[Q_state == 0] = np.nan\n a = np.unravel_index(np.nanargmax(Q_state, axis=None), Q_state.shape)\n # In case the value is greater than the max allowed action we need to translate it back into\n # negative coordinates\n a = [acc if acc <= 1 else 1 - acc for acc in a]\n\n return a\n\n def epsilon_soft_policy(self, action, greedy_action, all_state_actions):\n \"\"\"\n \"\"\"\n if greedy_action:\n if action == greedy_action:\n return 1 - self.epsilon + self.epsilon/len(all_state_actions)\n\n return self.epsilon/len(all_state_actions)\n\n return 1/len(all_state_actions)\n\n def random_start_position(self):\n \"\"\"\n \"\"\"\n\n grid_position = list(random.choice(self.track.start_cells))\n velocity = [0, 0]\n\n return grid_position + velocity\n\n def _get_G_values(self, first_occurence_dict, total_steps):\n \"\"\"\n \"\"\"\n\n # Dict. w/ G for first occurence for each s, a pair.\n G = {}\n\n for key, val in first_occurence_dict.items():\n number_rewards = total_steps - val\n\n discounted_rewards = []\n for k in range(number_rewards):\n discounted_rewards.append(self.gamma**k * (-1))\n\n G[key] = sum(discounted_rewards)\n\n return G\n\n def update_policy(self, initialize):\n \"\"\"\n \"\"\"\n\n # Initialize with equal probabilties for all possible actions\n for y_coord in range(self.pi.shape[0]):\n for x_coord in range(self.pi.shape[1]):\n for y_vel in range(self.xvel_range):\n for x_vel in range(self.xvel_range):\n possible_actions = self.track.possible_actions(\n [y_coord, x_coord, y_vel, x_vel])\n for a in possible_actions:\n\n self.pi[y_coord, x_coord, y_vel, x_vel, a[0], a[1]] = self.epsilon / \\\n len(possible_actions)\n\n # Get index of best action\n a_ys, a_xs = tuple(zip(*possible_actions))\n actionvals = self.Q[y_coord, x_coord, y_vel,\n x_vel, a_ys, a_xs]\n a_max_idx = np.argmax(actionvals)\n a_max_y, a_max_x = a_ys[a_max_idx], a_xs[a_max_idx]\n\n self.pi[y_coord, x_coord, y_vel, x_vel, a_max_y,\n a_max_x] += 1 - self.epsilon\n\n actionprobs = self.pi[y_coord,\n x_coord, y_vel, x_vel]\n total_prob = np.sum(actionprobs)\n if not math.isclose(total_prob, 1, abs_tol=0.01):\n print(\n 'Action probabilities must sum to 1.0, but summed to {}, state: {}, actionprobs: {}'.format(total_prob, [y_coord, x_coord, y_vel, x_vel], self.pi[y_coord, x_coord, y_vel, x_vel]))\n sys.exit(1)\n\n @classmethod\n def from_csv(cls, file_path):\n\n file_path = os.path.join(os.getcwd(), file_path)\n\n track = genfromtxt(file_path, delimiter=',')\n track = np.flip(track, axis=0)\n\n return cls(track)\n\n\n# Load map\nrt = RaceTrack.from_csv(\"../racetracks/map1.csv\")\n\n# Run agent\nagent = OffPolicyMonteCarloAgent(rt, epsilon=0.5, gamma=0.9)\nagent.policy_iteration()\n","sub_path":"week4/joakim/OffPolicyMonteCarloAgent.py","file_name":"OffPolicyMonteCarloAgent.py","file_ext":"py","file_size_in_byte":9605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"298906021","text":"\"\"\"\nLow-level module containing various mathematical functions\n\"\"\"\n\n# standard libraries\nfrom math import sqrt, pi, exp\nimport warnings\n\n# external libraries\nimport numpy as np\nfrom scipy import optimize, integrate\n\n# internal libraries\nimport config\n\n\ndef normalize_orbs(eigfuncs_x, xgrid):\n \"\"\"\n Normalizes the KS orbitals within the chosen sphere\n\n Parameters\n ----------\n eigfuncs : ndarray\n The radial KS eigenfunctions :math: 'X_{nl}^{\\sigma}(x)'\n xgrid : ndarray\n The logarithmic grid over which normalization is performed\n\n Returns\n -------\n eigfuncs_x_norm : ndarray\n The radial KS eigenfunctions normalized over the chosen sphere\n \"\"\"\n\n # initialize the normalized eigenfunctions\n eigfuncs_x_norm = eigfuncs_x\n\n # loop over the eigenfunctions\n for n in range(np.shape(eigfuncs_x)[0]):\n # compute the mod squared eigenvalues\n eigfuncs_sq = eigfuncs_x[n].real ** 2 + eigfuncs_x[n].imag ** 2\n # compute the intergal ampsq=4*pi*\\int_dr r^2 |R(r)|^2\n exp_x = np.exp(-xgrid)\n ampsq = int_sphere(exp_x * eigfuncs_sq, xgrid)\n # normalize eigenfunctions\n eigfuncs_x_norm[n] = eigfuncs_x[n] / sqrt(ampsq)\n\n return eigfuncs_x_norm\n\n\ndef int_sphere(fx, xgrid):\n \"\"\"\n Computes integrals over the sphere defined by the logarithmic\n grid provided as input\n\n Parameters\n ----------\n fx : array_like\n The function (array) to be integrated\n xgrid : ndarray\n The logarithmic radial grid\n\n Returns\n -------\n I_sph : float\n The value of the integrand\n\n Notes\n -----\n The integral formula is given by\n .. math:: I = 4 \\pi \\int \\dd{x} e^{3x} f(x)\n \"\"\"\n\n func_int = 4.0 * pi * np.exp(3.0 * xgrid) * fx\n I_sph = np.trapz(func_int, xgrid)\n\n return I_sph\n\n\ndef laplace(y, x, axis=-1):\n \"\"\"\n Computes the second-order derivative d^2 y(x) / dx^2\n over the chosen axis of the input array\n\n Parameters\n ----------\n y : ndarray\n array y(x) on which laplacian is computed\n x : ndarray\n x array\n axis: int, optional\n axis over which derivatives are taken\n default : -1\n\n Returns\n -------\n grad2_y : ndarray\n the laplacian of y\n \"\"\"\n\n # first compute the first-order gradient\n grad1_y = np.gradient(y, x, edge_order=2, axis=axis)\n\n # now compute the second-order gradient\n grad2_y = np.gradient(grad1_y, x, edge_order=2, axis=axis)\n\n return grad2_y\n\n\ndef fermi_dirac(eps, mu, beta, n=0):\n \"\"\"\n Computes the Fermi-Dirac function, see notes\n\n Parameters\n ----------\n mu : array_like\n the chemical potential\n beta : float\n the inverse potential\n eps : array_like\n the energies\n n : int\n energy is raised to power n/2 in the numerator (see notes)\n\n Returns\n -------\n f_fd : array_like\n the fermi dirac occupation(s)\n\n Notes\n -----\n The FD function is defined as:\n .. math:: f^{(n)}_{fd}(\\epsilon, \\mu, \\beta) = \\frac{\\epsilon^{(n/2)}{1+\\exp(1+\\beta(\\epsilon - \\mu))}\n \"\"\"\n\n # dfn the exponential function\n # ignore warnings here\n with np.errstate(over=\"ignore\"):\n fn_exp = np.minimum(np.exp(beta * (eps - mu)), 1e12)\n\n # fermi_dirac dist\n f_fd = (eps) ** (n / 2.0) / (1 + fn_exp)\n\n return f_fd\n\n\ndef fd_int_complete(mu, beta, n):\n \"\"\"\n Computes complete Fermi-Dirac integrals (see notes)\n\n Parameters\n ----------\n mu : float\n chemical potential\n beta: float\n inverse temperature\n n : int\n order of Fermi-Dirac integral (see notes)\n\n Returns\n -------\n I_n : float\n the complete fermi-dirac integral\n\n Notes\n -----\n Complete Fermi-Dirac integrals are of the form\n .. math:: I_(n)(\\mu,\\beta) = \\int_0^\\inf \\dd{\\epsilon} \\epsilon^(n/2) f_fd(\\mu,\\epsilon,\\beta)\n where n is the order of the integral\n \"\"\"\n\n # use scipy quad integration routine\n limup = np.inf\n\n # ignore integration warnings (omnipresent because of inf upper limit)\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n I_n, err = integrate.quad(fermi_dirac, 0, limup, args=(mu, beta, n))\n\n return I_n\n\n\ndef chem_pot(orbs):\n \"\"\"\n Determines the chemical potential by enforcing charge neutrality (see notes)\n Uses scipy.optimize.root_scalar with brentq implementation\n\n Parameters\n ----------\n orbs : object(staticKS.Orbitals)\n the orbitals object\n\n Returns\n -------\n mu : array_like\n chemical potential (spin-dependent)\n\n Notes\n -----\n Finds the roots of the eqn:\n ..math:: \\sum_{nl} (2l+1) f_{fd}(\\epsilon_{nl},\\beta,\\mu) + N_{ub}(\\beta,\\mu) - N_e = 0\n The number of unbound electrons N_{ub} depends on the implementation choice\n \"\"\"\n\n mu = config.mu\n mu0 = mu # set initial guess to existing value of chem pot\n\n # so far only the ideal treatment for unbound electrons is implemented\n if config.unbound == \"ideal\":\n for i in range(config.spindims):\n if config.nele[i] != 0:\n soln = optimize.root_scalar(\n f_root_id,\n x0=mu0[i],\n args=(orbs.eigvals[i], orbs.lbound[i], config.nele[i]),\n method=\"brentq\",\n bracket=[-40, 40],\n options={\"maxiter\": 100},\n )\n mu[i] = soln.root\n # in case there are no electrons in one spin channel\n else:\n mu[i] = np.inf\n\n return mu\n\n\ndef f_root_id(mu, eigvals, lbound, nele):\n \"\"\"\n Functional input for the chemical potential root finding function\n with the ideal approximation for unbound electrons (see notes)\n\n Parameters\n ----------\n mu : array_like\n chemical potential\n eigvals : ndarray\n the energy eigenvalues\n lbound : ndarray\n the lbound [(2l+1)*Theta(e)] matrix\n nele : union(int, float)\n the number of electrons for given spin\n\n Returns\n -------\n f_root : float\n the difference of the predicted electron number with given mu\n and the actual electron number\n\n Notes\n -----\n The returned function is\n ..math:: f = \\sum_{nl} (2l+1) f_{fd}(\\epsilon_{nl},\\beta,\\mu) + N_{ub}(\\beta,\\mu) - N_e\n \"\"\"\n\n # caluclate the contribution from the bound electrons\n if nele != 0:\n occnums = lbound * fermi_dirac(eigvals, mu, config.beta)\n contrib_bound = occnums.sum()\n else:\n contrib_bound = 0.0\n\n # now compute the contribution from the unbound electrons\n # this function uses the ideal approximation\n\n prefac = (2.0 / config.spindims) * config.sph_vol / (sqrt(2) * pi ** 2)\n contrib_unbound = prefac * fd_int_complete(mu, config.beta, 1.0)\n\n # return the function whose roots are to be found\n f_root = contrib_bound + contrib_unbound - nele\n\n return f_root\n","sub_path":"atoMEC/mathtools.py","file_name":"mathtools.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"516218035","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nProject Euler Problem 152\n=======================\n\nThere are several ways to write the number 1/2 as a sum of inverse squares\nusing distinct integers.\n\nFor instance, the numbers {2,3,4,5,7,12,15,20,28,35} can be used:\n\nIn fact, only using integers between 2 and 45 inclusive, there are exactly\nthree ways to do it, the remaining two being:\n{2,3,4,6,7,9,10,20,28,35,36,45} and {2,3,4,6,7,9,12,15,28,30,35,36,45}.\n\nHow many ways are there to write the number 1/2 as a sum of inverse\nsquares using distinct integers between 2 and 80 inclusive?\n\n\"\"\"\n\n\ndef main():\n return \"unimplemented\"\n\n\nif __name__ == \"__main__\":\n import ntpath\n import time\n from common.shared_functions import verify_solution\n\n problem_number = int(ntpath.basename(__file__).replace(\"euler\", \"\").replace(\".py\", \"\"))\n print(\"Retrieving my answer to Euler Problem {0} ...\".format(problem_number))\n\n ts = time.time()\n my_answer = main()\n te = time.time()\n\n print(\"My answer: {1}\".format(problem_number, my_answer))\n\n verification_type = verify_solution(problem_number, my_answer)\n print(\"Verification: {0}\".format(verification_type.name))\n print(\"Took {0} seconds.\".format(te - ts))\n","sub_path":"project-euler/solvers/euler152.py","file_name":"euler152.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"522212268","text":"import PySimpleGUI as sg\nimport os\n\nfirst_column = [\n [\n sg.Text(\"Folder name\"),\n sg.In(size=(25,1), enable_events=True, key='abc'),\n sg.FolderBrowse()\n ],\n [\n sg.Listbox(\n values=[], enable_events=True, size=(40, 20), key='abb'\n )\n ]\n]\n\nsecond_column = [\n [sg.T(\"Choose an image from list on left to display here:\")],\n [sg.Text(size=(40, 1), key='tout')],\n [sg.Image(key=\"-image-\")]\n]\n\nlayout = [\n [\n sg.Column(first_column),\n sg.VSep(),\n sg.Column(second_column)\n ]\n]\n\nwindow = sg.Window(\"Image viewer\", layout)\n\nwhile True:\n event, value = window.read()\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n break\n if event == \"abc\":\n folder = value[\"abc\"]\n try:\n\n file_list = os.listdir(folder)\n except:\n file_list = []\n\n fnames = [\n f\n for f in file_list\n if os.path.isfile(os.path.join(folder, f))\n and f.lower().endswith((\".png\", \".gif\"))\n ]\n window['abb'].update(fnames)\n elif event == 'abb':\n try:\n filename = os.path.join(\n value['abc'], value['abb'][0]\n )\n window['tout'].update(filename)\n window['-image-'].update(filename = filename)\n except:\n pass\n\nwindow.close()\n","sub_path":"Image-Viewer/imageviewer.py","file_name":"imageviewer.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"540426211","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019/4/14 0:06\r\n# @Author : 史哲磊\r\n# @Email : sidian305@163.com\r\n# @File : 寻找素数\r\n# @Software: PyCharm\r\nfor i in range(2,100):\r\n flag=True\r\n for j in range(2,i):\r\n if i//j==i/j:\r\n flag=False\r\n break\r\n if(flag):\r\n print(\"{}是素数。\".format(i))","sub_path":"小天才_学Python。/第六课/寻找素数.py","file_name":"寻找素数.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"212484938","text":"from acounts import Bd\r\nfrom acounts import Conta\r\nfrom acounts import Loguin\r\nfrom feed import Perfil\r\nbanco = Bd()\r\nlog = Loguin()\r\nperfil = Perfil()\r\nclass Sistem:\r\n def __init__(self):\r\n pass\r\n\r\n def menu(self):\r\n print('1 - Cadastrar conta')\r\n print('2 - Loguin')\r\n print('x - Sair')\r\n opcao = input('Digite a opção: ')\r\n\r\n if opcao == '1':\r\n usuario = Conta\r\n usuario.nome = input('Digite seu nome: ')\r\n usuario.idade = input('Digite sua idade: ')\r\n usuario.telefone = input('Digite seu telefone: ')\r\n usuario.endereco = input('Digite seu endereço: ')\r\n log.email = input('Digite seu email: ')\r\n log.senha = input('Digite sua senha: ')\r\n banco.insert_user(usuario,log)\r\n if opcao == '2':\r\n email = input('Digite seu email: ')\r\n senha = input('Digite sua senha: ')\r\n if email in log.email and senha in log.senha:\r\n opc = ''\r\n while opc != 'x':\r\n opc = perfil.menu_feed(None)\r\n return(opcao)\r\n","sub_path":"sistema.py","file_name":"sistema.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"435783345","text":"import plotly.graph_objects as go\nimport pandas as pd\n\ndf = pd.read_csv('/home/arjun/Documents/SFU_Course_Work/Spring2020/cmpt733/blog/blog_git/blog-733/updated.csv')\n\nfig = go.Figure()\n\n# Trial comment for git\n\nfig.add_trace(go.Scattermapbox(\n lat=df.latitude,\n lon=df.longitude,\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=3,\n color='rgb(255, 0, 0)',\n opacity=0.7\n ),\n text=df.description,\n hoverinfo='text'\n ))\n\nfig.update_layout(\n title='UFO Sightings Location and Shape',\n autosize=True,\n hovermode='closest',\n showlegend=False,\n geo = dict(\n lataxis = dict(showgrid = True),\n lonaxis = dict(showgrid = True)),\n mapbox=go.layout.Mapbox(\n accesstoken=\"pk.eyJ1Ijoia2Rlc2FpMTciLCJhIjoiY2s1a2ZzYnlsMGRxcDNrcWxuY245N3M4aiJ9.c1k9nyK3is0jBi9USem_GQ\",\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=38,\n lon=-94\n ),\n pitch=0,\n zoom=-3,\n style='light'\n ),\n)\n\nfig.show()\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","sub_path":"sightings.py","file_name":"sightings.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"498065309","text":"from collect_data.my_twitter import twitter_operations as trd\nfrom collect_data.my_twitter.object import twitter_access_data as tad\nfrom collect_data.data import file_data as df\n\n\ndef main():\n api = init_twitter_api()\n status_list = get_timeline_of_user(api)\n list_of_tweets = trd.get_list_of_tweets(status_list)\n write_tweets_to_file(list_of_tweets)\n\n\ndef write_tweets_to_file(list_of_tweets):\n print('Specify the location you want to save tweets on your machine:')\n path_to_file = str(input())\n df.write_tweets_to_file(path_to_file, list_of_tweets)\n\n\ndef get_timeline_of_user(api):\n print(\"Preferred screen name:\")\n status_list = trd.get_user_timeline(str(input()),\n api,\n 200,\n False)\n return status_list\n\n\ndef init_twitter_api():\n print(\"Twitter Access Data - Path to json-file:\")\n path_to_file = str(input())\n t = tad.TwitterAccessData(path_to_file)\n api = trd.init_api_object(t.get_api_key(),\n t.get_api_secret_key(),\n t.get_access_token(),\n t.get_access_token_secret(),\n \"extended\")\n return api\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"collect_data/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"512232743","text":"# same as described in dictionary. py but it only translate\nfrom googletrans import Translator\n\ntranslator = Translator()\n\ndef Translator_expression(parameters):\n try:\n print(parameters)\n word= parameters.get('any')\n language= parameters.get('language', 'en')\n if(language.lower()=='english'):\n langcode= 'en'\n elif(language.lower()=='chinese'):\n langcode= 'ja'\n elif(language.lower()=='hindi'):\n langcode= 'hi'\n elif(language.lower()=='russian'):\n langcode= 'ru'\n elif(language.lower()=='french'):\n langcode= 'fr'\n elif(language.lower()=='japanese'):\n langcode= 'ja'\n elif(language.lower()=='kenya'):\n langcode= 'sw'\n else:\n langcode= 'en'\n data= translator.translate(word, dest=langcode)\n print(data)\n return word,data.text\n except Exception as e:\n return 'Sorry', 'Please try again'+' or write *Translate cat into hindi*'","sub_path":"translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"25148627","text":"import operator\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom blogs.models import Posts, PageView\nfrom .utils import top_word_counts\n\ndef posts(request):\n # Based on Pankaj Mishra's SO answere here: https://stackoverflow.com/a/45411928/6095646\n # This is Pankaj Mishra's hit counter:\n if (PageView.objects.count()<=0):\n x = PageView.objects.create()\n x.save()\n else:\n x = PageView.objects.all()[0]\n x.hits = x.hits+1\n x.save()\n context = {'pages':x.hits}\n \n # The posts content:\n posts = Posts.objects.all().order_by('-pub_date')\n context.update({'posts':posts})\n post_string = ''\n for post in posts:\n post_string += post.body\n \n # Counting words of Bible.txt + posts content:\n post_words = top_word_counts(post_string.lower())\n alice_words = top_word_counts(\n open(\"wordcounters/Alice.txt\", \"r\").read().lower())\n\n context.update({\n 'post_words': post_words,\n 'alice_words': alice_words\n })\n\n return render(request, 'alls/landings.html', context)\n\n","sub_path":"blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"326754524","text":"import networkx as nx\nimport pickle\nimport os\nimport os.path\nimport numpy as np\n\nfrom concurrent.futures import ThreadPoolExecutor,as_completed\n\n\ndef nameText(i):\n return 'text_%08d' % (i,)\n\n\ndef couP(args):\n G, s, t, prevs, path, log = args\n if len(path) == 0:\n if t in G.adj[s]:\n return 1\n return 0\n ans = 0\n aim = path[0]\n adj = [node for node in G.adj[s] if node[:2]\n == aim[:2] and node not in prevs]\n for a in adj:\n ans += couP((G, a, t, prevs+[a], path[1:],None))\n if log is not None:\n i,x,y,total = log\n print('Get Coup(%s,%s)=%s. [total=%s,layer=%s]' % (x, y, ans, total, i))\n return ans\n\n\ndef calAbyIndex(args):\n index, N, train_ids, G, path = args\n executor = ThreadPoolExecutor()\n A = np.zeros((N, N))\n i = index+1\n # tasks = [ executor.submit(couP,args=(G,nameText(train_ids[x]),nameText(train_ids[y]),[], path[1:-1],(i, x, y, N))) for x in range(N) for y in range(0,x+1) ]\n tasks = []\n for x in range(N):\n for y in range(0,x+1):\n # print('submit %s,%s out of %s.' % (x,y,N))\n tasks.append(executor.submit(couP,args=(G,nameText(train_ids[x]),nameText(train_ids[y]),[], path[1:-1],(i, x, y, N))))\n ens = [ task.result() for task in as_completed(tasks) ]\n t = 0\n for x in range(N):\n for y in range(0,x+1):\n A[x,y] = A[y,x] = ens[t]\n t += 1\n np.save(os.path.join(baseDir, 'output', 'A-%s.pkl' % i), A)\n\n'''\ndef calAbyIndex(args):\n index, N, train_ids, G, path = args\n # executor = ProcessPoolExecutor(max_workers=32)\n A = np.zeros((N, N))\n i = index+1\n for x in range(N):\n for y in range(0, x+1):\n tx, ty = nameText(train_ids[x]), nameText(train_ids[y])\n p = path[1:-1]\n print('Calculating CouP < path=%s , x=%s ,y=%s | total=%s >...' % (i, tx, ty, N))\n c = couP(G, tx, ty, [], p)\n print('Get Coup(%s,%s)=%s.' % (x, y, c))\n A[x, y] = c\n np.save(os.path.join(baseDir, 'output', 'A-%s.pkl' % i), A)\n'''\n\nif __name__ == \"__main__\":\n # baseDir = 'C:/Users/croxx/Desktop/rcv1'\n baseDir = '/home/LAB/penghao/croxx/HIN_PGCN'\n\n G = pickle.load(open(os.path.join(baseDir, 'output', 'G.pkl'), 'rb'))\n paths = [['text', 'entity', 'text'], ['text', 'keyword', 'text'], ['text', 'entity', 'entity', 'text'], ['text', 'entity', 'keyword', 'text'], ['text', 'keyword', 'entity', 'text'], ['text', 'keyword', 'keyword', 'text'], ['text', 'entity', 'entity', 'entity', 'text'], ['text', 'entity', 'entity', 'keyword', 'text'], [\n 'text', 'entity', 'keyword', 'entity', 'text'], ['text', 'entity', 'keyword', 'keyword', 'text'], ['text', 'keyword', 'entity', 'entity', 'text'], ['text', 'keyword', 'entity', 'keyword', 'text'], ['text', 'keyword', 'keyword', 'entity', 'text'], ['text', 'keyword', 'keyword', 'keyword', 'text']]\n\n N = 23194\n '''\n train_ids = []\n _cs = pickle.load(\n open(os.path.join(baseDir, 'output', '_codes.pkl'), 'rb'))\n\n while len(train_ids) < N:\n for c, ts in _cs.items():\n if len(ts) == 0:\n continue\n while ts[0] in train_ids:\n del ts[0]\n train_ids.append(ts[0])\n print('add train id %s' % ts[0])\n del ts[0]\n\n train_ids = sorted(train_ids)\n for i in train_ids:\n print('train id %s' % i)\n pickle.dump(train_ids, open(os.path.join(baseDir, 'output', 'train_ids.pkl'),'wb'))\n '''\n train_ids = pickle.load(\n open(os.path.join(baseDir, 'output', 'train_ids.pkl'), 'rb'))\n print('load finish.')\n executor = ThreadPoolExecutor()\n '''\n for index, path in enumerate(paths):\n A = np.zeros((14,N, N))\n i = index+1\n for x in range(N):\n for y in range(0, x+1):\n tx, ty = nameText(train_ids[x]), nameText(train_ids[y])\n p = path[1:-1]\n print('Calculating CouP < path=%s , x=%s ,y=%s | total=%s >...' %\n (i, tx, ty, N))\n c = couP(G, tx, ty, [], p)\n print('Get Coup(%s,%s)=%s.' % (x, y, c))\n A[i, x, y] = c\n np.save(os.path.join(baseDir, 'output', 'A-%s.pkl' % i), A)\n '''\n # index = input('Please input layer:')\n # index = int(index)\n for index, path in enumerate(paths):\n executor.submit(calAbyIndex,(index, N, train_ids, G, path))\n executor.shutdown(wait=True)\n # calAbyIndex((index,N,train_ids,G,paths[index]))\n \n # pickle.dump(A, open(os.path.join(baseDir, 'output', 'A.pkl'), 'wb'))\n","sub_path":"calSim.py","file_name":"calSim.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"576230869","text":"import streamlit as st \nimport streamlit.components.v1 as stc \n\n# EDA Pkgs\nimport pandas as pd \n\n# NLP Pkgs\nimport spacy\nfrom spacy import displacy\nnlp = spacy.load('en_core_web_sm') # Fixes Error For Deployment for shortlink\nfrom textblob import TextBlob\nfrom collections import Counter\n\n\n# Data Viz Pkgs\nimport matplotlib.pyplot as plt \nimport matplotlib\nmatplotlib.use('Agg')\nimport altair as alt \n\nimport nltk_utils\n\n\n\n\n\nHTML_RANDOM_TEMPLATE = \"\"\"\n
\n
Verse of the Day
\n

{}

\n\"\"\"\n\n\nHTML_WRAPPER = \"\"\"
{}
\"\"\"\nHTML_BANNER = \"\"\"\n
\n

StreamBible App

\n
\n \"\"\"\n\n\n\ndef render_entities(raw_text):\n\tdocx = nlp(raw_text)\n\thtml = displacy.render(docx,style='ent')\n\thtml = html.replace(\"\\n\\n\",\"\\n\")\n\tresult = HTML_WRAPPER.format(html)\n\tstc.html(result,height=1000)\n\n\n\ndef plot_mendelhall_curve(docx):\n\tword_length = [ len(token) for token in docx.split()]\n\tword_length_count = Counter(word_length)\n\tsorted_word_length_count = sorted(dict(word_length_count).items())\n\tx,y = zip(*sorted_word_length_count)\n\tfig = plt.figure(figsize=(20,10))\n\tplt.plot(x,y)\n\tplt.title(\"Plot of Word Length Distribution\")\n\tplt.show()\n\tst.pyplot(fig)\n\ndef get_most_common_tokens(docx,num=2):\n\tword_freq = Counter(docx.split())\n\tmost_common_tokens = word_freq.most_common(num)\n\treturn dict(most_common_tokens)\n\n\n\ndef plot_word_freq_with_altair(docx,num=10):\n\tword_freq = Counter(docx.split())\n\tmost_common_tokens = dict(word_freq.most_common(num))\n\tword_freq_df = pd.DataFrame({'tokens':most_common_tokens.keys(),'counts':most_common_tokens.values()})\n\tc = alt.Chart(word_freq_df).mark_bar().encode(\n\t\tx='tokens',y='counts')\n\tst.altair_chart(c,use_container_width=True)\n\n\n\ndef get_tags(docx):\n\ttagged_docx = TextBlob(docx).tags \n\treturn tagged_docx\n\n\nTAGS = {\n 'NN' : 'green',\n 'NNS' : 'green',\n 'NNP' : 'green',\n 'NNPS' : 'green',\n 'VB' : 'blue',\n 'VBD' : 'blue',\n 'VBG' : 'blue',\n 'VBN' : 'blue',\n 'VBP' : 'blue',\n 'VBZ' : 'blue',\n 'JJ' : 'red',\n 'JJR' : 'red',\n 'JJS' : 'red',\n 'RB' : 'cyan',\n 'RBR' : 'cyan',\n 'RBS' : 'cyan',\n 'IN' : 'darkwhite',\n 'POS' : 'darkyellow',\n 'PRP$' : 'magenta',\n 'PRP$' : 'magenta',\n 'DET' : 'black',\n 'CC' : 'black',\n 'CD' : 'black',\n 'WDT' : 'black',\n 'WP' : 'black',\n 'WP$' : 'black',\n 'WRB' : 'black',\n 'EX' : 'yellow',\n 'FW' : 'yellow',\n 'LS' : 'yellow',\n 'MD' : 'yellow',\n 'PDT' : 'yellow',\n 'RP' : 'yellow',\n 'SYM' : 'yellow',\n 'TO' : 'yellow',\n 'None' : 'off'\n }\n\n\n\ndef mytag_visualizer(tagged_docx):\n\tcolored_text = []\n\tfor i in tagged_docx:\n\t\tif i[1] in TAGS.keys():\n\t\t token = i[0]\n\t\t # print(token)\n\t\t color_for_tag = TAGS.get(i[1])\n\t\t result = '{}'.format(color_for_tag,token)\n\t\t colored_text.append(result)\n\tresult = ' '.join(colored_text)\n\t# print(result)\n\treturn result\n\n\n","sub_path":"Streamlit - Bible/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"91547147","text":"#encoding = utf-8\n\ndef max(num):\n strnum = \"%04d\" % num\n return int(''.join(map(str, sorted([int(s) for s in strnum], reverse=True))))\ndef min(num):\n strnum = \"%04d\" % num\n return int(''.join(map(str, sorted([int(s) for s in strnum], reverse=False))))\n\ndef heidong(num):\n mx = max(num)\n mn = min(num)\n rst = mx - mn\n print(\"%04d - %04d = %04d\" % (mx, mn, rst))\n \n if rst == 6174:\n return\n else:\n heidong(rst)\n\nnum = int(input())\nif num % 1111 == 0:\n print(\"%04d - %04d = %04d\" % (num, num, 0))\nelse:\n heidong(num)\n\n","sub_path":"Basic Level/q1019/1019.py","file_name":"1019.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"304742565","text":"from flask import (Flask, request, Response, jsonify)\nfrom flask_cors import CORS\nimport json\nimport http.client, urllib.parse\nfrom gensim.models import FastText\nfrom seq2seq.evaluate.seq2seq import evaluate\nfrom seq2seq.merger.merger import composer\nimport kss\n# import seq2seq.merger.unicode\n\napp = Flask(__name__)\nCORS(app, send_wildcard=True)\n\n@app.route('/')\ndef home():\n return 'test'\n\n@app.route('/extractverbphrase', methods=['POST'])\n \ndef extractVerbPhrase():\n \"call the NLP API to extract sentence-final verb-phrase and attendant form data from given sourceText\"\n if request.is_json:\n sourceText = request.json.get('sourceText')\n conjugation = request.json.get('conjugation')\n options = request.json.get('options')\n if options is None:\n options = dict(phraseForm='verbPhrase')\n else:\n return jsonify(success=False, error=\"Expected application/json POST data\")\n # set up the call\n body = json.dumps(dict(sourceText=sourceText, options=options))\n headers = {\"Content-Type\": \"application/json; charset=utf-8\",\n \"Accept\": \"application/json; charset=utf-8\",\n \"Cache-Control\": \"no-cache\",\n \"Content-Length\": str(len(body))\n }\n try:\n conn = http.client.HTTPSConnection(\"alpha.mirinae.io\")\n # local test: \n # conn = http.client.HTTPConnection(\"localhost:2000\")\n conn.request(\"POST\", \"/api/nlp/extractverbphrase\", body, headers)\n response = conn.getresponse()\n except:\n # server down?\n return dict(success=False, error=\"Server not responding\")\n #\n if response.status != 200:\n failReason = response.reason\n return dict(success=False, status=response.status, error=response.reason)\n else:\n try:\n # reading data from verbphrase extraction\n data = response.read()\n data = json.loads(data.decode('utf-8'))\n\n # front part of sentence ex. 나는 자전거를\n frontpart = data['response'][0]['sentence']\n verbpart = data['response'][0]['verbPhrase'].split('~ ')[1]\n frontpart = frontpart.split(verbpart)[0]\n\n # morpheme analysis of verbphrase ex. 타:VV ㄹ 수 있:VMOD 다:SEF\n text = data['response'][0]['morphemeString'].replace(' ','_').replace(';', ' ')\n\n # generate sequence for seq2seq = morpheme array + target conjugation mode\n text = text + conjugation\n\n # seq2seq evaluate calculation\n decoded_words = evaluate(text)\n\n # re-compose output morpheme sequence into complete verb phrase\n renewed_words =[]\n for word in decoded_words:\n if word != '':\n word = word.split(':')[0]\n word = word.replace('_', ' ')\n renewed_words.append(word)\n renewed_words = composer(renewed_words)\n\n # put front part and complete verb phase together\n renewed_words = frontpart + renewed_words\n\n # return\n return jsonify(success=True, response=dict(reconjugation=renewed_words))\n except:\n return dict(success=False, error=\"Illegal JSON response\")\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000, debug=True)","sub_path":"masterserver.py","file_name":"masterserver.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"426146634","text":"import PhysicsTools.HeppyCore.framework.config as cfg\nfrom PhysicsTools.Heppy.analyzers.core.all import *\nfrom PhysicsTools.Heppy.analyzers.objects.all import *\nfrom PhysicsTools.Heppy.analyzers.gen.all import *\nfrom CMGTools.HToZZ4L.analyzers.FourLeptonAnalyzer import *\nfrom CMGTools.HToZZ4L.analyzers.FourLeptonAnalyzer2P2F import *\nfrom CMGTools.HToZZ4L.analyzers.FourLeptonAnalyzerRelaxIdIso import *\nfrom CMGTools.HToZZ4L.analyzers.FourLeptonAnalyzer3P1F import *\nfrom CMGTools.HToZZ4L.analyzers.FourLeptonAnalyzerSS import *\nfrom CMGTools.HToZZ4L.analyzers.FourLeptonEventSkimmer import *\n\nfrom CMGTools.HToZZ4L.analyzers.FSRPhotonMaker import *\nfrom CMGTools.HToZZ4L.analyzers.GenFSRAnalyzer import *\nfrom CMGTools.HToZZ4L.analyzers.fourLeptonTree import *\nfrom CMGTools.HToZZ4L.samples.samples_13TeV_Spring15 import triggers_mumu, triggers_ee, triggers_mue, triggers_3e, triggers_3mu, triggers_2mu1e, triggers_2e1mu, triggers_mumu_run1, triggers_ee_run1, triggers_mue_run1, triggers_1mu_iso\nfrom CMGTools.RootTools.samples.triggers_8TeV import triggers_1mu_8TeV, triggers_mumu_8TeV, triggers_mue_8TeV, triggers_ee_8TeV\n\nimport os\n\n\nPDFWeights = []\n\ngenAna = cfg.Analyzer(\n GeneratorAnalyzer, name=\"GeneratorAnalyzer\",\n # BSM particles that can appear with status <= 2 and should be kept\n stableBSMParticleIds = [ 1000022 ],\n # Particles of which we want to save the pre-FSR momentum (a la status 3).\n # Note that for quarks and gluons the post-FSR doesn't make sense,\n # so those should always be in the list\n savePreFSRParticleIds = [ 1,2,3,4,5, 11,12,13,14,15,16, 21 ],\n # Make also the list of all genParticles, for other analyzers to handle\n makeAllGenParticles = True,\n # Make also the splitted lists\n makeSplittedGenLists = True,\n allGenTaus = False,\n # Save LHE weights from LHEEventProduct\n makeLHEweights = True,\n # Print out debug information\n verbose = False,\n )\n\n\ngenFSRAna = cfg.Analyzer(\n GenFSRAnalyzer, name=\"GenFSRAnalyzer\"\n )\n\n\n# Find the initial events before the skim\nskimAnalyzer = cfg.Analyzer(\n SkimAnalyzerCount, name='skimAnalyzerCount',\n useLumiBlocks = False,\n )\n\n# Pick individual events (normally not in the path)\neventSelector = cfg.Analyzer(\n EventSelector,name=\"EventSelector\",\n toSelect = [] # here put the event numbers (actual event numbers from CMSSW)\n )\n# Apply json file (if the dataset has one)\njsonAna = cfg.Analyzer(\n JSONAnalyzer, name=\"JSONAnalyzer\",\n )\n\n# Filter using the 'triggers' and 'vetoTriggers' specified in the dataset\ntriggerAna = cfg.Analyzer(\n TriggerBitFilter, name=\"TriggerBitFilter\",\n )\n\n# Create flags for trigger bits\ntriggerFlagsAna = cfg.Analyzer(\n TriggerBitAnalyzer, name=\"TriggerFlags\",\n processName = 'HLT',\n triggerBits = {\n # Doubles\n 'DoubleMu' : triggers_mumu,\n 'DoubleEl' : triggers_ee,\n 'MuEG' : triggers_mue,\n # Triples\n 'TripleEl' : triggers_3e,\n 'TripleMu' : triggers_3mu,\n 'DoubleMuEl' : triggers_2mu1e,\n 'DoubleElMu' : triggers_2e1mu,\n # Singles\n 'SingleMu' : triggers_1mu_iso,\n # 8 TeV (and closest equivalent in spring 15 mc)\n 'SingleMu_8TeV' : triggers_1mu_8TeV + triggers_1mu_iso,\n 'DoubleMu_8TeV' : triggers_mumu_8TeV + triggers_mumu_run1,\n 'MuEG_8TeV' : triggers_mue_8TeV + triggers_mue_run1,\n 'DoubleEl_8TeV' : triggers_ee_8TeV + triggers_ee_run1,\n }\n )\n\n\n# Select a list of good primary vertices (generic)\nvertexAna = cfg.Analyzer(\n VertexAnalyzer, name=\"VertexAnalyzer\",\n vertexWeight = None,\n fixedWeight = 1,\n verbose = False\n )\n\n\n# This analyzer actually does the pile-up reweighting (generic)\npileUpAna = cfg.Analyzer(\n PileUpAnalyzer, name=\"PileUpAnalyzer\",\n true = True, # use number of true interactions for reweighting\n makeHists=False\n )\n\npdfwAna = cfg.Analyzer(\n PDFWeightsAnalyzer, name=\"PDFWeightsAnalyzer\",\n PDFWeights = [ pdf for pdf,num in PDFWeights ]\n )\n\n\n\nlepAna = cfg.Analyzer(\n LeptonAnalyzer, name=\"leptonAnalyzer\",\n # input collections\n muons='slimmedMuons',\n electrons='slimmedElectrons',\n rhoMuon= 'fixedGridRhoFastjetAll',\n rhoElectron = 'fixedGridRhoFastjetAll',\n # energy scale corrections and ghost muon suppression (off by default)\n doMuScleFitCorrections=False, # \"rereco\"\n doRochesterCorrections=False,\n doElectronScaleCorrections=False, # \"embedded\" in 5.18 for regression\n doSegmentBasedMuonCleaning=True,\n notCleaningElectrons=True, # no deltaR(ele,mu) cleaning at this step\n # inclusive very loose muon selection\n inclusive_muon_id = \"POG_Global_OR_TMArbitrated\",\n inclusive_muon_pt = 5,\n inclusive_muon_eta = 2.4,\n inclusive_muon_dxy = 0.5,\n inclusive_muon_dz = 1.0,\n muon_dxydz_track = \"muonBestTrack\",\n # loose muon selection\n loose_muon_id = \"POG_Global_OR_TMArbitrated\",\n loose_muon_pt = 5,\n loose_muon_eta = 2.4,\n loose_muon_dxy = 0.5,\n loose_muon_dz = 1,\n loose_muon_isoCut = lambda muon : muon.sip3D() < 4 and muon.muonBestTrackType() != 2,\n # inclusive very loose electron selection\n inclusive_electron_id = \"\",\n inclusive_electron_pt = 7,\n inclusive_electron_eta = 2.5,\n inclusive_electron_dxy = 0.5,\n inclusive_electron_dz = 1.0,\n inclusive_electron_lostHits = 1.0,\n # loose electron selection\n loose_electron_id = \"\",\n loose_electron_pt = 7,\n loose_electron_eta = 2.5,\n loose_electron_dxy = 0.5,\n loose_electron_dz = 1.0,\n loose_electron_isoCut = lambda x: x.sip3D() < 4,\n loose_electron_lostHits = 1.0,\n # muon isolation correction method (can be \"rhoArea\" or \"deltaBeta\")\n mu_isoCorr = \"deltaBeta\" ,\n mu_effectiveAreas = \"Phys14_25ns_v1\", #(can be 'Data2012' or 'Phys14_25ns_v1')\n mu_tightId = \"POG_ID_Loose\",\n # electron isolation correction method (can be \"rhoArea\" or \"deltaBeta\")\n ele_isoCorr = \"rhoArea\" ,\n el_effectiveAreas = \"Phys14_25ns_v1\" , #(can be 'Data2012' or 'Phys14_25ns_v1')\n ele_tightId = \"MVA_ID_NonTrig_Phys14Fix_HZZ\",\n # Mini-isolation, with pT dependent cone: will fill in the miniRelIso, miniRelIsoCharged, miniRelIsoNeutral variables of the leptons (see https://indico.cern.ch/event/368826/ )\n doMiniIsolation = False, # off by default since it requires access to all PFCandidates \n packedCandidates = 'packedPFCandidates',\n miniIsolationPUCorr = 'rhoArea', # Allowed options: 'rhoArea' (EAs for 03 cone scaled by R^2), 'deltaBeta', 'raw' (uncorrected), 'weights' (delta beta weights; not validated)\n miniIsolationVetoLeptons = None, # use 'inclusive' to veto inclusive leptons and their footprint in all isolation cones\n # minimum deltaR between a loose electron and a loose muon (on overlaps, discard the electron)\n min_dr_electron_muon = 100.0,\n # do MC matching \n do_mc_match = True, # note: it will in any case try it only on MC, not on data\n match_inclusiveLeptons = False, # match to all inclusive leptons\n )\n\nfrom CMGTools.HToZZ4L.analyzers.ElectronMuonCleaner import ElectronMuonCleaner\neleMuClean = cfg.Analyzer(\n ElectronMuonCleaner, name='eleMuClean',\n selectedMuCut = lambda mu : mu.tightId(), #isPFMuon() or mu.isGlobalMuon(),\n otherMuCut = lambda mu : False, # (mu.isPFMuon() or mu.isGlobalMuon()) and muon.muonBestTrackType() != 2, # uncomment to include also muons with sip > 4\n mustClean = lambda ele, mu, dr: dr < 0.05\n)\n\n## Jets Analyzer (generic)\njetAna = cfg.Analyzer(\n JetAnalyzer, name='jetAnalyzer',\n jetCol = 'slimmedJets',\n copyJetsByValue = False, #Whether or not to copy the input jets or to work with references (should be 'True' if JetAnalyzer is run more than once)\n genJetCol = 'slimmedGenJets',\n rho = ('fixedGridRhoFastjetAll','',''),\n jetPt = 30.,\n jetEta = 4.7,\n jetEtaCentral = 4.7,\n jetLepDR = 0.4,\n jetLepArbitration = (lambda jet,lepton : lepton), # you can decide which to keep in case of overlaps; e.g. if the jet is b-tagged you might want to keep the jet\n cleanSelectedLeptons = True, #Whether to clean 'selectedLeptons' after disambiguation. Treat with care (= 'False') if running Jetanalyzer more than once\n minLepPt = 0,\n lepSelCut = lambda lepton : lepton.tightId() and lepton.relIso04 < (0.4 if abs(lepton.pdgId())==13 else 0.5),\n relaxJetId = False, \n doPuId = True,\n recalibrateJets = False, # True, False, 'MC', 'Data'\n recalibrationType = \"AK4PFchs\",\n mcGT = \"Summer15_V5_p6_MC\",\n jecPath = \"${CMSSW_BASE}/src/CMGTools/RootTools/data/jec/\",\n shiftJEC = 0, # set to +1 or -1 to get +/-1 sigma shifts\n addJECShifts = False,\n smearJets = False,\n shiftJER = 0, # set to +1 or -1 to get +/-1 sigma shifts \n alwaysCleanPhotons = False,\n cleanJetsFromFirstPhoton = False,\n cleanJetsFromTaus = False,\n cleanJetsFromIsoTracks = False,\n doQG = False,\n cleanGenJetsFromPhoton = False,\n )\n\n\nmetAna = cfg.Analyzer(\n METAnalyzer, name=\"metAnalyzer\",\n metCollection = \"slimmedMETs\",\n noPUMetCollection = \"slimmedMETs\", \n copyMETsByValue = False,\n doTkMet = False,\n doMetNoPU = False,\n doMetNoMu = False,\n doMetNoEle = False,\n doMetNoPhoton = False,\n recalibrate = False,\n jetAnalyzerCalibrationPostFix = \"\",\n candidates='packedPFCandidates',\n candidatesTypes='std::vector',\n dzMax = 0.1,\n collectionPostFix = \"\",\n )\n\n\n\nfsrPhotonMaker = cfg.Analyzer(\n FSRPhotonMaker, name=\"fsrPhotonMaker\",\n leptons=\"selectedLeptons\",\n electronID = lambda x: True, #x.electronID(\"POG_MVA_ID_Run2_NonTrig_HZZ\")\n electronVeto = \"superclusterEta\", # alternatives: \"electronEta\" and in the future \"pfCandReference\"\n)\n\n\nfourLeptonAnalyzerSignal = cfg.Analyzer(\n FourLeptonAnalyzer, name=\"fourLeptonAnalyzerSignal\",\n tag = \"Signal\",\n attachFsrToGlobalClosestLeptonOnly = True\n)\n\nfourLeptonAnalyzer2P2F = cfg.Analyzer(\n FourLeptonAnalyzer2P2F, name=\"fourLeptonAnalyzer2P2F\",\n tag = \"2P2F\",\n attachFsrToGlobalClosestLeptonOnly = True\n)\n\nfourLeptonAnalyzer3P1F = cfg.Analyzer(\n FourLeptonAnalyzer3P1F, name=\"fourLeptonAnalyzer3P1F\",\n tag = \"3P1F\",\n attachFsrToGlobalClosestLeptonOnly = True\n)\n\nfourLeptonAnalyzerSS = cfg.Analyzer(\n FourLeptonAnalyzerSS, name=\"fourLeptonAnalyzerSS\",\n tag = \"SS\",\n attachFsrToGlobalClosestLeptonOnly = True\n)\n\nfourLeptonAnalyzerRelaxIdIso = cfg.Analyzer(\n FourLeptonAnalyzerRelaxIdIso, name=\"fourLeptonAnalyzerRelaxIdIso\",\n tag = \"RelaxIdIso\",\n maxCand = 999, # save all, not just the best one\n attachFsrToGlobalClosestLeptonOnly = True\n)\n\nfourLeptonEventSkimmer = cfg.Analyzer(\n FourLeptonEventSkimmer, name=\"fourLeptonEventSkimmer\",\n required = ['bestFourLeptonsSignal','bestFourLeptons2P2F','bestFourLeptons3P1F','bestFourLeptonsSS', 'bestFourLeptonsRelaxIdIso' ]\n\n)\n\ntreeProducer = cfg.Analyzer(\n AutoFillTreeProducer, name='fourLeptonTreeProducer',\n vectorTree = False,\n saveTLorentzVectors = False, # can set to True to get also the TLorentzVectors, but trees will be bigger\n globalVariables = hzz_globalVariables,\n globalObjects = hzz_globalObjects,\n collections = hzz_collections,\n defaultFloatType = 'F',\n)\n\n\n\n\n# Core sequence of all common modules\nhzz4lCoreSequence = [\n skimAnalyzer,\n genAna,\n# genFSRAna,\n #eventSelector,\n jsonAna,\n triggerAna,\n pileUpAna,\n vertexAna,\n lepAna,\n eleMuClean,\n jetAna,\n metAna,\n triggerFlagsAna,\n fsrPhotonMaker,\n fourLeptonAnalyzerSignal, \n fourLeptonAnalyzer2P2F,\n fourLeptonAnalyzer3P1F,\n fourLeptonAnalyzerSS,\n fourLeptonAnalyzerRelaxIdIso,\n fourLeptonEventSkimmer,\n treeProducer\n]\n","sub_path":"CMGTools/HToZZ4L/python/analyzers/hzz4lCore_modules_cff.py","file_name":"hzz4lCore_modules_cff.py","file_ext":"py","file_size_in_byte":11781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"384831701","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport torch\nimport math\nfrom flask import jsonify\nfrom flask import Flask\nfrom flask import request\nimport random\n\napp = Flask(__name__) # 创建1个Flask实例\nquata_degree = 12\nquata_school = 2596\nquata_major = 1703\nquata_province = 33\n\n# train_type 可取值为min和max【min表示使用的是简历中的salary_min训练的模型,max表示使用的是简历中的salary_max训练的模型】\nmin_train_type = 'min'\nmax_train_type = 'max'\n\n# 读取模型\nmin_model_path = 'model_' + min_train_type + '.ckpt'\nmin_model = torch.load(min_model_path)\n# 转化为测试模式\nmin_model.eval()\n\n# 读取模型\nmax_model_path = 'model_' + max_train_type + '.ckpt'\nmax_model = torch.load(max_model_path)\n# 转化为测试模式\nmax_model.eval()\n\n\ndef load_model(train_type):\n if train_type == max_train_type:\n return max_model\n return min_model\n\n\n# 传入的参数:\n# 学校【school_code,_other,_zhuan,_ben,_2_first_rate,_211,_c9,_top_2,_985,province_code】\n# 专业【major_code】\n# 学历【degree】\n# 职么力【zhimeli】\n\n# 返回值:预测的薪资【salary】\nschool_info = pd.read_csv('./datas/university.csv', encoding='utf8', index_col='sid')\nschool_dict = school_info.to_dict()\n\nmajor_info = pd.read_csv('./datas/major.csv', encoding='utf8', index_col='name')\nmajor_info.index = major_info.index.map(lambda name: name.strip())\nmajor_dict = major_info.to_dict()\n\ndegree_info = pd.read_csv('./datas/degree.csv', encoding='utf8', index_col='name')\ndegree_info.index = degree_info.index.map(lambda name: name.strip())\ndegree_dict = degree_info.to_dict()\n\nprovince_info = pd.read_csv('./datas/province.csv', encoding='utf8', index_col='name')\nprovince_info.index = province_info.index.map(lambda name: name.strip())\nprovince_dict = province_info.to_dict()\n\n\n@app.route('/get_salary', methods=['POST', 'GET']) # 路由系统生成 视图对应url,1. decorator=app.route() 2. decorator(first_flask)\ndef first_flask(): # 视图函数\n parameter = request.args.get('P')\n print(parameter)\n res = get_salary(parameter)\n if res > 35000.:\n res = 15000 + random.uniform(-5000, 5000)\n if res < 2000:\n res = 2000 + random.uniform(1000, 2000)\n return jsonify(res)\n\n\ndef transform(pos, quata):\n # 传入向量的位置pos和向量的��度quata,返回长度为quata且pos位置为1其余位置为0的向量\n array = [0] * quata\n array[pos - 1] = 1\n return array\n\n\ndef get_salary(features):\n # parameter = \"西华大学,软件工程,本科,60,min\"\n keys = features.split(',')\n if len(keys) == 5:\n school_name = keys[0].strip()\n major = keys[1].strip()\n degree = keys[2].strip()\n zhimeli = float(keys[3])\n train_type = keys[4].strip()\n\n # 学历\n if degree_dict['id'].get(degree):\n degree_id = degree_dict['id'].get(degree)\n else:\n degree_id = degree_dict['id'].get('unknown')\n\n dict_school = dict(zip(school_dict['name'].values(), school_dict['name'].keys()))\n if dict_school.get(school_name):\n school_id = dict_school.get(school_name)\n else:\n school_id = dict_school.get('unknown')\n\n # 专业\n if major_dict['id'].get(major):\n major_id = major_dict['id'].get(major)\n else:\n major_id = major_dict['id'].get('unknown')\n # 学校的各个维度信息\n # _other,_zhuan,_ben,_2_first_rate,_211,_c9,_top_2,_985 顺序不要变\n _other = school_dict['_other'][school_id]\n _zhuan = school_dict['_zhuan'][school_id]\n _ben = school_dict['_ben'][school_id]\n _2_first_rate = school_dict['_2_first_rate'][school_id]\n _211 = school_dict['_211'][school_id]\n _c9 = school_dict['_c9'][school_id]\n _top_2 = school_dict['_top_2'][school_id]\n _985 = school_dict['_985'][school_id]\n\n # 省份\n if school_dict['province'].get(school_id):\n province_name = school_dict['province'].get(school_id)\n else:\n province_name = 'unknown'\n # 城市\n if school_dict['city'].get(school_id):\n school_city = school_dict['city'].get(school_id)\n else:\n school_city = 'unknown'\n\n province_id = province_dict['id'].get(province_name)\n\n features = []\n\n features.extend(transform(degree_id, quata_degree))\n features.extend(transform(school_id, quata_school))\n features.extend(transform(major_id, quata_major))\n features.append(_other)\n features.append(_zhuan)\n features.append(_ben)\n features.append(_2_first_rate)\n features.append(_211)\n features.append(_c9)\n features.append(_top_2)\n features.append(_985)\n features.extend(transform(province_id, quata_province))\n if zhimeli >= 80:\n zhimeli = 80\n if zhimeli <= 30:\n zhimeli = 30\n # 职么力用log放缩处理\n features.append(math.log(zhimeli))\n # major_high,major_hot,degree_w,city_w,area_w\n # major_high\n if major_dict['high'].get(major):\n major_high = int(major_dict['high'].get(major))\n else:\n major_high = int(major_dict['high'].get('unknown'))\n # major_hot\n if major_dict['hot'].get(major):\n major_hot = int(major_dict['hot'].get(major))\n else:\n major_hot = int(major_dict['hot'].get('unknown'))\n # degree_w\n if degree_dict['w'].get(degree):\n degree_w = int(degree_dict['w'].get(degree))\n else:\n degree_w = int(degree_dict['w'].get('unknown'))\n\n # city_w 3,1,1\n if province_dict['center'].get(province_name):\n center_city = province_dict['center'].get(province_name)\n if center_city == school_city:\n city_w = 3\n else:\n city_w = 1\n else:\n city_w = 1\n # area_w 3,2,1\n area_w = 1\n for area in ['east', 'middle', 'west']:\n if province_dict[area].get(province_name):\n tmp = province_dict[area].get(province_name)\n if tmp > 0:\n area_w = tmp\n break\n features.append(major_high)\n features.append(major_hot)\n features.append(degree_w)\n features.append(city_w)\n features.append(area_w)\n\n features = torch.tensor(features)\n print('features length:{},model type:{}'.format(len(features), train_type))\n model = load_model(train_type)\n out = model(features)\n print('预测毕业薪资为:{:.2f}'.format(math.e ** float(out)))\n res = math.e ** float(out)\n # 过大过小的数据,进行截断处理\n if res > 35000.:\n res = 35000\n if res < 2000:\n res = 3500\n return res\n else:\n print('arguments is error!')\n raise RuntimeError('参数错误')\n\n\n# 训练数据字段顺序\n# degree_code,school_code,major_code,_other,_zhuan,_ben,_2_first_rate,_211,_c9,_top_2,_985,province_code,salary_min,salary_max,zhimeli_min,zhimeli_max\n# 调用模型需要使字段顺序和训练模型时的顺序一致\nif __name__ == '__main__':\n app.config['JSON_AS_ASCII'] = False\n app.run(host='127.0.0.1', port=5000)\n","sub_path":"大数据/pyspark/zml_salary_predict/model_v2/nn_predict_restful.py","file_name":"nn_predict_restful.py","file_ext":"py","file_size_in_byte":7377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"34068072","text":"import json\n\nimport numpy as np\nfrom textblob import TextBlob\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n\nclass Tweet:\n def __init__(self, text, date):\n self.text = text\n self.date = date\n self.vader_pos = None\n self.vader_neg = None\n self.vader_neu = None\n self.vader_compound = None\n self.textblob_polarity = None\n self.textblob_subjectivity = None\n self.textblob_p_pos = None\n self.textblob_p_neg = None\n\n def calculate_vader_score(self):\n ps = SentimentIntensityAnalyzer().polarity_scores(self.text)\n self.vader_pos = np.round(ps[\"pos\"], 2)\n self.vader_neg = np.round(ps[\"neg\"], 2)\n self.vader_neu = np.round(ps[\"neu\"], 2)\n self.vader_compound = np.round(ps[\"compound\"], 2)\n\n def calculate_textblob_score(self, naive):\n tb_pattern = TextBlob(self.text).sentiment\n tb_naive = naive(self.text).sentiment\n self.textblob_polarity = np.round(tb_pattern.polarity, 2)\n self.textblob_subjectivity = np.round(tb_pattern.subjectivity, 2)\n self.textblob_p_pos = np.round(tb_naive.p_pos, 2)\n self.textblob_p_neg = np.round(tb_naive.p_neg, 2)\n\n def calculate_all_scores(self, naive):\n self.calculate_vader_score()\n self.calculate_textblob_score(naive)\n\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n","sub_path":"package/entity/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"593095239","text":"from db import nova_conexao\nfrom mysql.connector import ProgrammingError\n\nwith nova_conexao() as conexao:\n try:\n cursor = conexao.cursor()\n cursor.execute(\"SHOW TABLES\")\n\n for i, tables in enumerate(cursor, start=1):\n print(f'Table {i}: {tables[0]}')\n except ProgrammingError as e:\n print(f'Error: {e.msg}')\n","sub_path":"banco_dados/listar_tabelas.py","file_name":"listar_tabelas.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"94148704","text":"import math\n\nimport click\nimport pandas as pd\n\nfrom . import download_images\n\n\ndef getPointLatLng(x, y, lat, lng, size_x, size_y, zoom):\n parallelMultiplier = math.cos(lat * math.pi / 180)\n degreesPerPixelX = 360 / math.pow(2, zoom + 8)\n degreesPerPixelY = 360 / math.pow(2, zoom + 8) * parallelMultiplier\n pointLat = lat - degreesPerPixelY * (y - size_y / 2)\n pointLng = lng + degreesPerPixelX * (x - size_x / 2)\n\n return (pointLat, pointLng)\n\n\nimage_size = 0.01\n\n\ndef get_patch(row, padding=0.06):\n ne = getPointLatLng(row.size_x, 0, row.center_lat_image, row.center_lon_image, row.size_x, row.size_y, row.zoom)\n nw = getPointLatLng(0, 0, row.center_lat_image, row.center_lon_image, row.size_x, row.size_y, row.zoom)\n se = getPointLatLng(row.size_x, row.size_y, row.center_lat_image, row.center_lon_image, row.size_x, row.size_y,\n row.zoom)\n size_lat = ne[0] - se[0]\n size_lon = ne[1] - nw[1]\n return pd.Series(dict(\n y_min=(0.5 - (row.min_lat - row.center_lat_image) / size_lat) + padding, # add a small buffer\n y_max=(0.5 - (row.max_lat - row.center_lat_image) / size_lat) - padding,\n x_min=(0.5 + (row.min_lon - row.center_lon_image) / size_lon) - padding,\n x_max=(0.5 + (row.max_lon - row.center_lon_image) / size_lon) + padding,\n osm_id=row.osm_id\n ))\n\n\n@click.command()\n@click.option('--input-image-csv', help='CSV of image location, zoom and size information', required=True, type=str)\n@click.option('--input-object-csv', help='CSV of object location data: should be called object_location_data_clustered.csv', required=True, type=str)\n@click.option('--output-csv', help='Path to output bbox CSV', required=True, type=str)\ndef cli(input_image_csv, input_object_csv, output_csv):\n image_df = pd.read_csv(input_image_csv)\n image_df[\"image_id\"] = image_df.apply(\n lambda row: download_images.get_image_id(row.center_lat, row.center_lon, row.zoom, row.size_x, row.size_y),\n axis=1\n )\n object_df = pd.read_csv(input_object_csv).merge(\n image_df, how=\"inner\", on=\"cluster_id\", suffixes=(\"\", \"_image\")\n )\n\n bboxes = object_df.apply(get_patch, axis=1)\n bboxes[\"osm_id\"] = bboxes[\"osm_id\"].astype(int)\n bboxes[\"image_id\"] = object_df[\"image_id\"]\n bboxes.to_csv(output_csv, index=False)\n","sub_path":"osm_ai_tools/scripts/generate_bboxes.py","file_name":"generate_bboxes.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"615063199","text":"\"\"\"\nKellyServices spider created on the top of ATSSpider\n\nscrapy crawl kelly_services -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://kelly.secure.force.com/CandidateExperience/CandExpJobSearch?keywords=&location=&jobCategoryList=\"\n\nSample URL:\n https://kelly.secure.force.com/CandidateExperience/CandExpJobSearch?keywords=&location=&jobCategoryList=\n\"\"\"\n\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import NormalizedJoin, Prefix, ShrinkURL, RemoveBadElements\nfrom brightcorp.lib.utils import extract_first\n\n\nclass KellyServices(ATSSpider):\n\n name = 'kelly_services'\n ref_re = r'id=(.*?)&'\n\n def parse(self, response):\n sel = Selector(response)\n jobcategories = sel.xpath(\n '//div[@class=\"search\"]//select[@class=\"ddljobcategory\"]/option[not(@value=\"null\")]/@value'\n ).extract()\n form_data = {\n 'AJAXREQUEST': '_viewRoot',\n 'JobSearchResults:j_id114:j_id115:searchForm': 'JobSearchResults:j_id114:j_id115:searchForm',\n 'JobSearchResults:j_id114:j_id115:searchForm:txtkeywords': 'Keywords',\n 'JobSearchResults:j_id94:j_id95:searchForm:ddljobcategory': '',\n 'JobSearchResults:j_id114:j_id115:searchForm:txtlocation': 'Postal Code or City, State',\n 'JobSearchResults:j_id114:j_id115:searchForm:ddlRadius': '40',\n 'JobSearchResults:j_id114:j_id115:searchForm:txtExcludeKeywords': 'Exclude Keywords',\n 'JobSearchResults:j_id114:j_id115:searchForm:txtJobReferenceNumber': 'Job Reference #',\n 'com.salesforce.visualforce.ViewState': extract_first(sel.xpath(\n '//input[@id=\"com.salesforce.visualforce.ViewState\"]/@value'\n )),\n 'com.salesforce.visualforce.ViewStateVersion': extract_first(sel.xpath(\n '//input[@id=\"com.salesforce.visualforce.ViewStateVersion\"]/@value'\n )),\n 'com.salesforce.visualforce.ViewStateMAC': extract_first(sel.xpath(\n '//input[@id=\"com.salesforce.visualforce.ViewStateMAC\"]/@value'\n )),\n 'JobSearchResults:j_id114:j_id115:searchForm:btnFind': 'JobSearchResults:j_id114:j_id115:searchForm:btnFind',\n }\n for jobcategory in jobcategories:\n form_data.update({\n 'JobSearchResults:j_id114:j_id115:searchForm:ddljobcategory': jobcategory,\n })\n yield FormRequest(\n callback=self.parse_job_list,\n formdata=form_data,\n url=response.url\n )\n\n def parse_job_list(self, response):\n sel = Selector(text=response.body, type='html')\n for href in sel.xpath(\n '//span[contains(@id, \"JobSearchResults:\")]//div[@class=\"view\"]/a/@href'\n ).extract():\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, href)\n )\n\n # pagination\n next_page = sel.xpath(\n '//div[@id=\"pagerbar\"]/div[@class=\"pages\"]/a[@title=\"Next\"]/@id'\n ).extract()\n if next_page:\n form_data = {\n 'AJAXREQUEST': '_viewRoot',\n 'JobSearchResults:j_id94:j_id95:enhancedSearch': 'JobSearchResults:j_id94:j_id95:enhancedSearch',\n 'com.salesforce.visualforce.ViewState': extract_first(sel.xpath(\n '//input[@id=\"com.salesforce.visualforce.ViewState\"]/@value'\n )),\n 'com.salesforce.visualforce.ViewStateVersion': extract_first(sel.xpath(\n '//input[@id=\"com.salesforce.visualforce.ViewStateVersion\"]/@value'\n )),\n 'com.salesforce.visualforce.ViewStateMAC': extract_first(sel.xpath(\n '//input[@id=\"com.salesforce.visualforce.ViewStateMAC\"]/@value'\n )),\n 'next': 'next',\n next_page[0]: next_page[0],\n }\n yield FormRequest(\n callback=self.parse_job_list,\n formdata=form_data,\n dont_filter=True,\n headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},\n url=urljoin(response.url, '/CandidateExperience/CandExpJobSearch'),\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_xpath(\n 'title',\n '//div[@class=\"jobheader\"]/div[contains(@class, \"title\")]/text()'\n )\n loader.add_xpath(\n 'location',\n '//div/div[contains(text(), \"Location\")]/following-sibling::div[1]/text()'\n )\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.ref_re\n )\n loader.add_value('url', response.url, ShrinkURL(['searchFlag']))\n loader.add_xpath(\n 'description',\n '//div/div[@class=\"jobdescription\"]',\n RemoveBadElements(['a', 'img'])\n )\n loader.add_xpath(\n 'jobtype',\n [\n '//div/div[contains(text(), \"Work Type\")]/following-sibling::div[1]/text()',\n '//div/div[contains(text(), \"Employment Type\")]/following-sibling::div[1]/text()',\n ],\n NormalizedJoin(', ')\n )\n loader.add_xpath(\n 'jobcategory',\n '//div/div[contains(text(), \"Category\")]/following-sibling::div[1]/text()'\n )\n loader.add_xpath(\n 'educationrequirements',\n '//div/div[contains(text(), \"Highest Education\")]/following-sibling::div[1]/text()'\n )\n loader.add_xpath(\n 'industry',\n '//div/div[contains(text(), \"Industry\")]/following-sibling::div[1]/text()'\n )\n loader.add_value('apply_url', response.url, ShrinkURL(['searchFlag']))\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/kelly_services.py","file_name":"kelly_services.py","file_ext":"py","file_size_in_byte":6194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"336617503","text":"import numpy as np\r\n\r\ndef Buishand_U_change_point_detection(inputdata):\r\n inputdata1 = np.array(inputdata)\r\n inputdata_mean = np.mean(inputdata1)\r\n if isinstance(inputdata,list):\r\n n=len(inputdata)\r\n else:\r\n n = inputdata.shape[0]\r\n k = range(n)\r\n Sk = [np.sum(inputdata1[0:x+1] - inputdata_mean) for x in k]\r\n sigma = np.sqrt(np.sum((inputdata1-np.mean(inputdata1))**2)/(n-1))\r\n U = np.sum((Sk[0:(n - 2)]/sigma)**2)/(n * (n + 1))\r\n Ska = np.abs(Sk)\r\n S = np.max(Ska)\r\n K = list(Ska).index(S) + 1\r\n Skk = (Sk/sigma)\r\n# b=[]\r\n# for i in K:\r\n# tmp=inputdata.index[i]\r\n# b.append(tmp.strftime(\"%Y-%m-%d\"))\r\n #print(b)\r\n # plt.plot()\r\n return K\r\n","sub_path":"OutbreakPAD/Detection_Methods/Buishand_U_change_point_detection.py","file_name":"Buishand_U_change_point_detection.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"596376484","text":"\"\"\"create read_number\n\nRevision ID: 24c129ee1f62\nRevises: \nCreate Date: 2019-03-21 04:55:06.964484\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '24c129ee1f62'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('article_data', sa.Column('read_number', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('article_data', 'read_number')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/24c129ee1f62_create_read_number.py","file_name":"24c129ee1f62_create_read_number.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"411659209","text":"import os\nfrom pycrunchbase import CrunchBase\n\n# when you store a key in your bash_profile it automatically loads to memory\n# everytime you open your bash shell so you can call local variables from it\nAPI_KEY = os.environ[\"crunchbaseapi_ACCESS_KEY_ID\"]\ncb = CrunchBase(API_KEY)\ncompany = cb.organization ('netflix')\n\nall_news = []\ndef scrape_page(company_news):\n for news in company_news:\n all_news.append({'title': news.title, 'news_url': news.url, 'uuid': news.uuid,\n 'author':news.author, 'posted_on': news.posted_on,\n 'created_at': news.created_at, 'updated_at': news.updated_at})\n\ncompany_news = company.news\nscrape_page(company_news)\nfor i in xrange(5):\n company_news = cb.more(company_news)\n scrape_page(company_news)\n break\n\n\n# # all_news_urls\n# for news in all_news:\n# INSERT (company, title, news_url, uuid, author, posted_on,\n# posted_on_trust_code,created_at, updated_at) INTO \n# db.insert(company, news_url)\n#\n#\n# CREATE TABLE crunchbase_news (\n# CompanyID string,\n# CompanyName string,\n# newa_url\n# );\n\n\n\n\n# select all urls with more than one\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"591612191","text":"\"\"\"\nHelper functions for generating Concrete UUIDs\n\"\"\"\n\n# Force 'import uuid' to import the Python standard library module\n# named \"uuid\", and not the \"concrete.uuid\" module\nfrom __future__ import absolute_import\n\nimport uuid as python_uuid\n\nfrom thrift.Thrift import TType\nfrom concrete.uuid.ttypes import UUID\nfrom concrete.metadata.ttypes import AnnotationMetadata\nfrom concrete.util.mem_io import communication_deep_copy\n\nfrom inspect import isroutine\nimport random\nimport logging\n\n\ndef generate_UUID():\n \"\"\"Helper function for generating a Concrete UUID object\n \"\"\"\n return UUID(uuidString=str(python_uuid.uuid4()))\n\n\ndef hex_to_bin(h):\n return int(h, 16)\n\n\ndef bin_to_hex(b, n=None):\n h = hex(b)[2:]\n if n is None:\n n = len(h)\n elif len(h) > n:\n raise ValueError('hex string \"%s\" is longer than %d chars' % (h, n))\n return ('0' * (n - len(h))) + h\n\n\ndef split_uuid(u):\n p = u.split('-')\n\n valid_input = (\n len(p) == 5 and\n len(p[0]) == 8 and\n len(p[1]) == 4 and\n len(p[2]) == 4 and\n len(p[3]) == 4 and\n len(p[4]) == 12\n )\n if not valid_input:\n raise ValueError(\n 'uuid \"%s\" is not of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'\n % u\n )\n\n xs = p[0] + p[1]\n ys = p[2] + p[3]\n zs = p[4]\n\n return (xs, ys, zs)\n\n\ndef join_uuid(xs, ys, zs):\n valid_input = (\n len(xs) == 12 and\n len(ys) == 8 and\n len(zs) == 12\n )\n if not valid_input:\n raise ValueError('uuid pieces do not have lengths 12, 8, 12')\n\n u = '-'.join((xs[:8], xs[8:], ys[:4], ys[4:], zs))\n\n return u\n\n\ndef generate_hex_unif(n):\n return ''.join(random.choice('abcdef0123456789') for i in xrange(n))\n\n\ndef generate_uuid_unif():\n return join_uuid(generate_hex_unif(12),\n generate_hex_unif(8),\n generate_hex_unif(12))\n\n\nclass _AnalyticUUIDGenerator(object):\n '''\n UUID generator for a given analytic in a given communication.\n '''\n\n def __init__(self, u):\n (self._xs, ys, zs) = split_uuid(u)\n self._ys = generate_hex_unif(len(ys))\n self._z = hex_to_bin(generate_hex_unif(len(zs)))\n self._z_len = len(zs)\n self._z_bound = 2**(4 * len(zs))\n self.n = 0\n\n def __iter__(self):\n return self\n\n def next(self):\n '''\n Generate and return a new concrete UUID.\n StopIteration will never be raised.\n '''\n self._z = (self._z + 1) % self._z_bound\n self.n += 1\n return UUID(uuidString=join_uuid(\n self._xs, self._ys, bin_to_hex(self._z, self._z_len)\n ))\n\n\nclass AnalyticUUIDGeneratorFactory(object):\n '''\n Factory for a compressible UUID generator.\n\n One factory should be created per communication, and a new generator\n should be created from that factory for each analytic processing the\n communication. Usually each program represents a single analytic,\n so common usage is\n\n augf = AnalyticUUIDGeneratorFactory(comm)\n aug = augf.create()\n for :\n annotation. = aug.next()\n \n\n or if you're creating a new communication\n\n augf = AnalyticUUIDGeneratorFactory()\n aug = augf.create()\n comm = \n comm.uuid = aug.next()\n for :\n annotation. = aug.next()\n \n\n where the annotation objects might be objects of type\n Parse, DependencyParse, TokenTagging, CommunicationTagging, etc.\n '''\n\n def __init__(self, comm=None):\n if comm is None:\n self.comm_uuid = generate_uuid_unif()\n else:\n self.comm_uuid = comm.uuid.uuidString\n\n def create(self):\n '''\n Create and return a UUID generator for a new analytic.\n '''\n return _AnalyticUUIDGenerator(self.comm_uuid)\n\n\ndef _filtered_getmembers(obj):\n '''\n Generate key-value pairs of object members that may contain UUIDs.\n Over-generate, but filter the output enough that concrete objects\n can be traversed recursively using this function without leading to\n stack overflows or infinite loops.\n '''\n\n for k in dir(obj):\n if not (k[0] == '_' or k == 'thrift_spec' or k == 'read' or\n k == 'write' or k == 'validate'):\n v = getattr(obj, k)\n if not (isroutine(v) or\n isinstance(v, int) or isinstance(v, float) or\n isinstance(v, str) or isinstance(v, unicode)):\n yield (k, v)\n\n\n_FILTERED_TTYPES = set((TType.STRUCT, TType.LIST, TType.MAP, TType.SET))\n\n\ndef _fast_filtered_getmembers(obj):\n 'Fast thrift-specific implementation of filtered_getmembers.'\n\n if hasattr(obj, 'thrift_spec'):\n for s in obj.thrift_spec:\n if s is not None:\n t = s[1]\n if t in _FILTERED_TTYPES:\n k = s[2]\n yield (k, getattr(obj, k))\n\n\nclass UUIDClustering(object):\n '''\n Representation of the UUID instance clusters in a concrete\n communication (each cluster represents the set of nested members of\n the communication that reference or are identified by a given UUID).\n '''\n\n def __init__(self, comm):\n self._clusters = dict() # map: UUID -> set of nested members\n self._search(comm)\n\n def hashable_clusters(self):\n '''\n Return the set of unlabeled UUID clusters in a unique and\n hashable format. Two UUIDClusterings c1 and c2 are equivalent\n (the two underlying communications' UUID structures are\n equivalent) if and only if:\n\n c1.hashable_clusters() == c2.hashable_clusters()\n '''\n return set(tuple(sorted(c)) for c in self._clusters.values())\n\n def _search(self, obj, prefix=()):\n '''\n Search obj for UUIDs, calling _add_uuid_field when UUIDs are\n found and calling _search on other object members.\n When _search calls itself, prefix is appended with the object\n member name, forming a uniquely identifiable tuple\n representation of the path from the root object to a nested\n object member.\n '''\n\n if isinstance(obj, UUID):\n self._add_uuid_field(obj.uuidString, prefix)\n elif isinstance(obj, list):\n for (i, v) in enumerate(obj):\n self._search(v, prefix + (('list', i),))\n elif isinstance(obj, set):\n raise ValueError('UUIDClustering does not support sets')\n elif isinstance(obj, dict):\n for (k, v) in obj.items():\n self._search(v, prefix + (('dict', k),))\n else:\n for (k, v) in _filtered_getmembers(obj):\n self._search(v, prefix + (k,))\n\n def _add_uuid_field(self, u, f):\n '''\n Add UUID field f (a unique, hashable representation of the path\n from the root communication to a nested UUID object) to the UUID\n cluster indexed by UUID string u.\n '''\n if u in self._clusters:\n self._clusters[u].add(f)\n else:\n self._clusters[u] = set([f])\n\n\nclass UUIDCompressor(object):\n\n def __init__(self, single_analytic=False):\n self.single_analytic = single_analytic\n\n def compress(self, comm):\n 'Return a deep copy of comm with compressed UUIDs.'\n\n cc = communication_deep_copy(comm)\n self.augf = AnalyticUUIDGeneratorFactory(cc)\n self.augs = dict()\n self.uuid_map = dict()\n\n self._compress_uuids(cc)\n self._compress_uuid_refs(cc)\n\n return cc\n\n def _compress_uuids(self, obj, name_is_uuid=False, tool=None):\n 'Generate new UUIDs in \"uuid\" fields and save mapping'\n\n tool = self._get_tool(obj, tool)\n\n if name_is_uuid:\n if isinstance(obj, UUID):\n obj.uuidString = self._gen_uuid(obj, tool)\n else:\n logging.warning('uuid not instance of UUID')\n\n if not isinstance(obj, UUID): # we already took care of \"uuid\"\n self._apply(\n lambda elt, elt_name_is_uuid: self._compress_uuids(\n elt, name_is_uuid=elt_name_is_uuid, tool=tool\n ),\n obj\n )\n\n def _compress_uuid_refs(self, obj, name_is_uuid=False, tool=None):\n 'Update UUID references (not in \"uuid\" fields) using saved mapping'\n\n tool = self._get_tool(obj, tool)\n\n if isinstance(obj, UUID):\n if not name_is_uuid:\n obj.uuidString = self.uuid_map[obj.uuidString]\n else:\n self._apply(\n lambda elt, elt_name_is_uuid: self._compress_uuid_refs(\n elt, name_is_uuid=elt_name_is_uuid, tool=tool\n ),\n obj\n )\n\n def _get_tool(self, obj, tool=None):\n '''\n Return tool for this object, given the parent tool;\n update self.augs\n '''\n\n if hasattr(obj, 'metadata'):\n if isinstance(obj.metadata, AnnotationMetadata):\n tool = obj.metadata.tool\n else:\n logging.warning('metadata not instance of AnnotationMetadata')\n if self.single_analytic:\n tool = None\n if tool not in self.augs:\n self.augs[tool] = self.augf.create()\n return tool\n\n def _gen_uuid(self, old_uuid, tool):\n '''\n Return a new UUID for the provided tool, using self.augs;\n update self.uuid_map\n '''\n\n aug = self.augs[tool]\n new_uuid = aug.next()\n if old_uuid.uuidString in self.uuid_map:\n raise ValueError('encountered UUID %s twice, aborting' %\n old_uuid.uuidString)\n self.uuid_map[old_uuid.uuidString] = new_uuid.uuidString\n return new_uuid.uuidString\n\n @classmethod\n def _apply(cls, f, x):\n '''\n Apply f to the members of x if it is a basic container type,\n otherwise apply f to x directly.\n '''\n\n if isinstance(x, list):\n for elt in x:\n f(elt, False)\n elif isinstance(x, set):\n for elt in x:\n f(elt, False)\n elif isinstance(x, dict):\n for elt in x.values():\n f(elt, False)\n else:\n for (k, v) in _fast_filtered_getmembers(x):\n f(v, k == 'uuid')\n\n\ndef compress_uuids(comm, verify=False, single_analytic=False):\n '''\n Create a copy of communication comm with UUIDs converted according\n to the compressible UUID scheme. Return a 2-tuple containing that\n new communication and the UUIDCompressor object used to perform\n the conversion.\n\n If verify is True, use a heuristic to verify the UUID link structure\n is preserved in the new communication. If single_analytic is True,\n use a single analytic prefix for all UUIDs in comm.\n\n If verify is True and comm has references added, throw a ValueError\n because verification would cause an infinite loop.\n '''\n\n if verify and hasattr(comm, 'tokenizationForUUID'):\n raise ValueError('cannot verify communication with references')\n\n uc = UUIDCompressor(single_analytic=single_analytic)\n\n new_comm = uc.compress(comm)\n\n num_old_uuids = len(set(uc.uuid_map.keys()))\n num_new_uuids = len(set(uc.uuid_map.values()))\n\n if verify:\n c1 = UUIDClustering(comm).hashable_clusters()\n c2 = UUIDClustering(new_comm).hashable_clusters()\n\n # Verification is c1 == c2;\n # also check UUID map lengths are the same as a sanity-check\n if num_old_uuids == num_new_uuids and c1 == c2:\n logging.info('verified %s (%d uuid instances, %d uuids)'\n % (comm.id, sum(len(c) for c in c1), len(c1)))\n else:\n logging.error('%s failed verification' % comm.id)\n logging.error('uuid counts: %d -> %d'\n % (num_old_uuids, num_new_uuids))\n logging.error('verified number of uuids: %d -> %d'\n % (len(c1), len(c2)))\n logging.error('verified number of uuid instances: %d -> %d'\n % (sum(map(len, c1)), sum(map(len, c2))))\n raise Exception('%s failed verification' % comm.id)\n\n else:\n if num_old_uuids != num_new_uuids:\n logging.warning('uuid counts are not the same (%d -> %d)'\n % (num_old_uuids, num_new_uuids))\n\n return (new_comm, uc)\n","sub_path":"concrete/util/concrete_uuid.py","file_name":"concrete_uuid.py","file_ext":"py","file_size_in_byte":12690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"556390634","text":"from os import sched_get_priority_max\nfrom PySide2.QtWidgets import QWidget, QOpenGLWidget\nfrom PySide2.QtCore import QRect, QSize, Qt, QPoint, QMargins, QLine, QTimer, QRectF, QPointF, Signal\nfrom PySide2.QtGui import QColor, QPen, QPainter, QMouseEvent, QPolygon, QPainterPath, QVector2D, QPainterPathStroker, QPixmap\nfrom llvmlite.ir.values import Value\nfrom descriptors import *\nfrom simulator import Simulator\nfrom elements import *\n\n\nclass WireMap:\n def __init__(self):\n self._map = defaultdict(set)\n self._conns = defaultdict(set)\n\n def add_wire(self, l):\n if l.dy() == 0:\n sx = min(l.x1(), l.x2())\n ex = max(l.x1(), l.x2())\n y = l.y1()\n for x in range(sx + 1, ex + 1):\n self._map[(x, y)].add((x - 1, y))\n self._map[(x - 1, y)].add((x, y))\n else:\n sy = min(l.y1(), l.y2())\n ey = max(l.y1(), l.y2())\n x = l.x1()\n for y in range(sy + 1, ey + 1):\n self._map[(x, y)].add((x, y - 1))\n self._map[(x, y - 1)].add((x, y))\n\n def remove_wire(self, l):\n if l.dy() == 0:\n sx = min(l.x1(), l.x2())\n ex = max(l.x1(), l.x2())\n y = l.y1()\n for x in range(sx + 1, ex + 1):\n self._map[(x, y)].discard((x - 1, y))\n self._map[(x - 1, y)].discard((x, y))\n else:\n sy = min(l.y1(), l.y2())\n ey = max(l.y1(), l.y2())\n x = l.x1()\n for y in range(sy + 1, ey + 1):\n self._map[(x, y)].discard((x, y - 1))\n self._map[(x, y - 1)].discard((x, y))\n\n def _rebuild(self):\n pass\n\n def get_connected_pins(self, desc, pin):\n return self._conns[desc][pin]\n\n\nclass Schematic:\n def __init__(self, root):\n self.root = root\n self.wires = set()\n self.junctions = set()\n self.elements = list()\n self._wire_map = WireMap()\n\n def add_element(self, element):\n self.elements.append(element)\n\n def remove_element(self, element):\n self.elements.remove(element)\n\n def _check_wire(self, l):\n if l.dx() != 0 and l.dy() != 0:\n raise ValueError('a wire should be either horizontal or vertical')\n\n def add_wire(self, l):\n self._check_wire(l)\n self.wires.add(l)\n self._wire_map.add_wire(l)\n\n def remove_wire(self, l):\n self._check_wire(l)\n self._wire_map.remove_wire(l)\n self.wires.discard(l)\n\n def add_junction(self, p):\n self.junctions.add(p)\n\n\ndef _construct_grid_pixmap():\n pixmap = QPixmap(128, 128)\n\n w, h = pixmap.width(), pixmap.height()\n lines = list()\n\n for x in range(0, w, TILE):\n lines.append(QPointF(x, 0))\n lines.append(QPointF(x, h))\n\n for y in range(0, h, TILE):\n lines.append(QPointF(0, y))\n lines.append(QPointF(w, y))\n\n painter = QPainter(pixmap)\n painter.fillRect(pixmap.rect(), Qt.white)\n painter.setPen(QPen(Qt.gray, 0.3))\n painter.drawLines(lines)\n\n return pixmap\n\n\n_GRID_PIXMAP = None\n\n\nclass SchematicEditor(QWidget):\n\n selection_changed = Signal(list)\n\n def __init__(self, simulator: Simulator, parent=None):\n super().__init__(parent)\n\n self.simulator = simulator\n self._schematic = None\n\n self.setMouseTracking(True)\n self.setFocusPolicy(Qt.StrongFocus)\n\n self._antialiased = True\n self._grid_shown = True\n self._grid_lines = list()\n\n global _GRID_PIXMAP\n if _GRID_PIXMAP is None:\n _GRID_PIXMAP = _construct_grid_pixmap()\n\n def add_element(self, element):\n self.schematic.add_element(element)\n self._observe_element(element)\n self.update()\n\n def remove_element(self, element):\n self.schematic.remove_element(element)\n self._unobserve_element(element)\n self.update()\n\n def _observe_element(self, element):\n for pin in element.pins:\n self.simulator.observe(pin.path, self.update)\n\n def _unobserve_element(self, element):\n for pin in element.pins:\n self.simulator.observe(pin.path, self.update)\n\n @property\n def schematic(self):\n return self._schematic\n\n @schematic.setter\n def schematic(self, schematic):\n if self.schematic is not None:\n for element in self.schematic.elements:\n self._unobserve_element(element)\n\n self._schematic = schematic\n\n for element in self.schematic.elements:\n self._observe_element(element)\n\n self.update()\n\n @property\n def grid_shown(self):\n return self._grid_shown\n\n @grid_shown.setter\n def grid_shown(self, value):\n self._grid_shown = bool(value)\n self.update()\n\n @property\n def antialiased(self):\n return self._antialiased\n\n @antialiased.setter\n def antialiased(self, value):\n self._antialiased = bool(value)\n self.update()\n\n def _draw_wire(self, painter, p1, p2):\n painter.setPen(QPen(Qt.black, 2))\n painter.drawLine(p1, p2)\n\n def _draw_grid(self, painter):\n painter.setBrush(_GRID_PIXMAP)\n painter.drawRect(self.rect())\n\n def paintEvent(self, event):\n painter = QPainter(self)\n\n if self.antialiased:\n painter.setRenderHint(QPainter.Antialiasing)\n\n if self.grid_shown:\n self._draw_grid(painter)\n else:\n painter.fillRect(self.rect(), Qt.white)\n\n for element in self.schematic.elements:\n bb = QRectF(element.bb)\n bb.translate(element.pos * TILE)\n\n painter.translate(bb.center())\n painter.rotate(90 * element.rotation)\n painter.translate(-bb.center() + bb.topLeft())\n element.paint(painter)\n for pin in element.pins:\n val = self.simulator.get_pin_value(pin.path)\n if val is None:\n painter.setPen(QPen(Qt.blue, 6))\n elif val != 0:\n painter.setPen(QPen(Qt.green, 6))\n else:\n painter.setPen(QPen(Qt.black, 6))\n painter.drawPoint(pin.pos)\n painter.translate(-bb.topLeft())\n\n for wire in self.schematic.wires:\n self._draw_wire(painter, wire.p1() * TILE, wire.p2() * TILE)\n","sub_path":"schematic.py","file_name":"schematic.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"417035722","text":"# This file is part of the Blockchain Data Trading Simulator\n# https://gitlab.com/MatthiasLohr/bdtsim\n#\n# Copyright 2020 Matthias Lohr \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom typing import List, Tuple, Type\n\nfrom web3 import Web3\n\nfrom bdtsim.util.xor import xor_crypt\nfrom .merkle import MerkleTreeNode, MerkleTreeLeaf, MerkleTreeHashLeaf, from_leaves\n\n\nB032 = b'\\x00' * 32\n\n\nclass DecodingError(Exception):\n pass\n\n\nclass NodeDigestMismatchError(DecodingError):\n def __init__(self, in1: MerkleTreeLeaf, in2: MerkleTreeLeaf, out: MerkleTreeLeaf, index_in: int,\n index_out: int, expected_digest: bytes, actual_digest: bytes) -> None:\n self.in1 = in1\n self.in2 = in2\n self.out = out\n self.index_in = index_in\n self.index_out = index_out\n self.expected_digest = expected_digest\n self.actual_digest = actual_digest\n\n\nclass LeafDigestMismatchError(NodeDigestMismatchError):\n pass\n\n\ndef crypt(value: bytes, index: int, key: bytes) -> bytes:\n return xor_crypt(value, Web3.solidityKeccak(['uint256', 'bytes32'], [index, key]))\n\n\ndef encode(root: MerkleTreeNode, key: bytes) -> MerkleTreeNode:\n leaves_enc = [crypt(leaf.data, index, key) for index, leaf in enumerate(root.leaves)]\n digests_enc = [crypt(digest, 2 * len(leaves_enc) + index, key) for index, digest in enumerate(root.digests_pack)]\n return from_leaves([MerkleTreeLeaf(x) for x in leaves_enc]\n + [MerkleTreeHashLeaf(x) for x in digests_enc]\n + [MerkleTreeHashLeaf(B032)])\n\n\ndef encode_forge_first_leaf(root: MerkleTreeNode, key: bytes) -> MerkleTreeNode:\n leaf_data = [leaf.data for leaf in root.leaves]\n leaf_data[0] = b'\\0' * len(leaf_data[0])\n leaf_data_enc = [crypt(data, index, key) for index, data in enumerate(leaf_data)]\n digests_enc = [crypt(digest, 2 * len(leaf_data_enc) + index, key) for index, digest in enumerate(root.digests_pack)]\n return from_leaves([MerkleTreeLeaf(x) for x in leaf_data_enc]\n + [MerkleTreeHashLeaf(x) for x in digests_enc]\n + [MerkleTreeHashLeaf(B032)])\n\n\ndef encode_forge_first_leaf_first_hash(root: MerkleTreeNode, key: bytes) -> MerkleTreeNode:\n leaf_data = [leaf.data for leaf in root.leaves]\n leaf_data[0] = b'\\0' * len(leaf_data[0])\n leaf_data_enc = [crypt(data, index, key) for index, data in enumerate(leaf_data)]\n digests = root.digests_pack\n digests[0] = MerkleTreeNode(MerkleTreeLeaf(leaf_data[0]), MerkleTreeLeaf(leaf_data[1])).digest\n digests_enc = [crypt(digest, 2 * len(leaf_data_enc) + index, key) for index, digest in enumerate(digests)]\n return from_leaves([MerkleTreeLeaf(x) for x in leaf_data_enc]\n + [MerkleTreeHashLeaf(x) for x in digests_enc]\n + [MerkleTreeHashLeaf(B032)])\n\n\ndef decode(root: MerkleTreeNode, key: bytes) -> Tuple[MerkleTreeNode, List[NodeDigestMismatchError]]:\n leaf_bytes_enc = root.leaves\n if not math.log2(len(leaf_bytes_enc)).is_integer():\n raise ValueError('Merkle Tree must have 2^x leaves')\n if leaf_bytes_enc[-1] != B032:\n raise ValueError('The provided Merkle Tree does not appear to be encoded')\n\n errors: List[NodeDigestMismatchError] = []\n digest_start_index = int(len(leaf_bytes_enc) / 2)\n node_index = 0\n digest_index = digest_start_index\n nodes: List[MerkleTreeNode] = [MerkleTreeLeaf(crypt(leaf_bytes_enc[i].data, i, key))\n for i in range(0, digest_start_index)]\n while len(nodes) > 1:\n nodes_new = []\n for i in range(0, len(nodes), 2):\n node = MerkleTreeNode(nodes[i], nodes[i + 1])\n expected_digest = crypt(leaf_bytes_enc[digest_index].data, digest_start_index + digest_index, key)\n\n if node_index < digest_start_index:\n error_type: Type[NodeDigestMismatchError] = LeafDigestMismatchError\n actual_digest = node.digest\n else:\n error_type = NodeDigestMismatchError\n actual_digest = Web3.solidityKeccak(['bytes32', 'bytes32'], [\n crypt(leaf_bytes_enc[node_index].data, digest_start_index + node_index, key),\n crypt(leaf_bytes_enc[node_index + 1].data, digest_start_index + node_index + 1, key)\n ])\n\n if expected_digest != actual_digest:\n errors.append(error_type(\n in1=leaf_bytes_enc[node_index],\n in2=leaf_bytes_enc[node_index + 1],\n out=leaf_bytes_enc[digest_index],\n index_in=node_index,\n index_out=digest_index,\n expected_digest=expected_digest,\n actual_digest=actual_digest\n ))\n\n node_index += 2\n digest_index += 1\n nodes_new.append(node)\n\n nodes = nodes_new\n\n return nodes[0], errors\n","sub_path":"bdtsim/protocol/fairswap/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"198546837","text":"# -#- coding: utf-8 -*-\nimport hashlib\nimport os\nimport shutil\nfrom datetime import datetime\nimport mimetypes as mimes\n\nfrom django.core.cache import cache\nfrom django.conf import settings\nfrom django.core.files import File\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.utils._os import safe_join\n\nfrom elfinder.conf import settings as elfinder_settings\nfrom elfinder.volume_drivers.base import BaseVolumeDriver\n\n\nclass FileExists(IOError):\n pass\n\n\nclass WrapperBase(object):\n def __init__(self, root):\n self.root = root\n\n def rename(self, new_name):\n parent_dir = os.path.dirname(self.path)\n new_abs_path = safe_join(self.root, parent_dir, new_name)\n if not os.path.exists(new_abs_path):\n os.rename(self.path, new_abs_path)\n self.path = new_abs_path\n else:\n raise FileExists()\n\n def is_dir(self):\n return False\n\n def is_file(self):\n return False\n\n def get_hash(self):\n return '%s_%s' % (self._real_hash(self.root)[0:2], self._real_hash(self.path))\n\n def get_parent_hash(self):\n if os.path.abspath(self.path) == os.path.abspath(self.root):\n return ''\n parent_path = os.path.dirname(self.path)\n return DirectoryWrapper(parent_path, self.root).get_hash()\n\n def _real_hash(self, path):\n path = '%s' % path\n enc_path = path.encode('utf8')\n m = hashlib.md5()\n m.update(enc_path)\n return str(m.hexdigest())\n\n\nclass FileWrapper(WrapperBase):\n def __init__(self, file_path, root):\n if not os.path.isfile(file_path):\n raise ValueError(\"'%s' is not a valid file path\" % file_path)\n self._file = None\n self.path = file_path\n super(FileWrapper, self).__init__(root)\n\n def is_file(self):\n return True\n\n def get_path(self):\n return self._file_path\n\n def set_path(self, path):\n self._file_path = path\n if self._file is not None:\n self._file.close()\n self._file = None\n\n path = property(get_path, set_path)\n\n @property\n def name(self):\n return self._file.name\n\n def get_chunks(self):\n if self._file is None:\n self._file = File(open(self.path, 'rb'))\n return self._file.chunks()\n\n def get_contents(self):\n if self._file is None:\n self._file = File(open(self.path))\n self._file.seek(0)\n return self._file.read()\n\n def set_contents(self, data):\n if self._file is not None:\n self._file.close()\n self._file = None\n _file = File(open(self.path, \"ab\"))\n _file.write(data)\n _file.close()\n\n contents = property(get_contents, set_contents)\n\n def get_info(self):\n path = self.path\n info = {\n 'name': os.path.basename(path),\n 'hash': self.get_hash(),\n 'date': datetime.fromtimestamp(os.stat(path).st_mtime).strftime(\"%d %b %Y %H:%M\"),\n 'size': self.get_size(),\n 'read': os.access(path, os.R_OK),\n 'write': os.access(path, os.W_OK),\n 'rm': os.access(path, os.W_OK),\n 'url': self.get_url(),\n 'phash': self.get_parent_hash() or '',\n }\n if settings.DEBUG:\n info['abs_path'] = path\n\n # parent_hash = self.get_parent_hash()\n # if parent_hash:\n # info['phash'] = parent_hash\n\n mime, is_image = self.get_mime(path)\n # if is_image and self.imglib and False:\n # try:\n # import Image\n # l['tmb'] = self.get_thumb_url(f)\n # except ImportError:\n # pass\n # except Exception:\n # raise\n\n info['mime'] = mime\n\n return info\n\n def get_size(self):\n return os.lstat(self.path).st_size\n\n def get_url(self):\n rel_path = os.path.relpath(self.path, self.root).replace('\\\\', '/')\n user_path = '%s/' % (self.root.split('/')[-1],)\n return '%s%s%s' % (elfinder_settings.ELFINDER_FS_DRIVER_URL, user_path, rel_path)\n\n def get_mime(self, path):\n mime = mimes.guess_type(path)[0] or 'Unknown'\n if mime.startswith('image/'):\n return mime, True\n else:\n return mime, False\n\n def remove(self):\n os.remove(self.path)\n\n @classmethod\n def mkfile(cls, file_path, root):\n if not os.path.exists(file_path):\n f = open(file_path, \"w\")\n f.close()\n return cls(file_path, root)\n else:\n raise Exception(\"File '%s' already exists\" % os.path.basename(file_path))\n\n\nclass DirectoryWrapper(WrapperBase):\n def __init__(self, dir_path, root):\n if not os.path.isdir(dir_path):\n raise ValueError(\"'%s' is not a valid dir path\" % dir_path)\n self.path = dir_path\n super(DirectoryWrapper, self).__init__(root)\n\n def is_dir(self):\n return True\n\n def get_path(self):\n return self._dir_path\n\n def set_path(self, path):\n self._dir_path = path\n\n path = property(get_path, set_path)\n\n def get_info(self):\n path = self.path\n info = {\n 'name': os.path.basename(path),\n 'hash': self.get_hash(),\n 'date': datetime.fromtimestamp(os.stat(path).st_mtime).strftime(\"%d %b %Y %H:%M\"),\n 'mime': 'directory',\n 'size': self.get_size(),\n 'read': os.access(path, os.R_OK),\n 'write': os.access(path, os.W_OK),\n 'rm': os.access(path, os.W_OK),\n 'dirs': self.has_dirs(),\n 'phash': self.get_parent_hash() or ''\n }\n if settings.DEBUG:\n info['abs_path'] = path\n\n # parent_hash = self.get_parent_hash()\n # if parent_hash:\n # info['phash'] = parent_hash\n\n return info\n\n def get_size(self):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(self.path):\n for f in filenames:\n fp = safe_join(self.root, dirpath, f)\n if os.path.exists(fp):\n total_size += os.stat(fp).st_size\n return total_size\n\n def has_dirs(self):\n for item in os.listdir(self.path):\n if os.path.isdir(os.path.join(self.path, item)):\n return True\n return False\n\n def remove(self):\n shutil.rmtree(self.path)\n\n @classmethod\n def mkdir(cls, dir_path, root):\n recent_cache_key = f'recent-mkdir-{dir_path}'\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n cache.set(recent_cache_key, dir_path, 2)\n return cls(dir_path, root)\n else:\n # FIXME: Ugly hack\n if cache.get(recent_cache_key):\n return cls(dir_path, root)\n else:\n raise Exception(\"Directory '%s' already exists\" % os.path.basename(dir_path))\n\n\nclass FileSystemVolumeDriver(BaseVolumeDriver):\n def __init__(self, fs_driver_root=elfinder_settings.ELFINDER_FS_DRIVER_ROOT,\n *args, **kwargs):\n self.root = os.path.abspath(fs_driver_root)\n\n def get_volume_id(self):\n # return u\"fsvolume\"\n return DirectoryWrapper(self.root, self.root).get_hash().split(\"_\")[0]\n\n def get_info(self, target):\n path = self._find_path(target)\n return self._get_path_info(path)\n\n def get_tree(self, target, ancestors=False, siblings=False):\n path = self._find_path(target)\n\n tree = [self._get_path_info(path)]\n tree.extend([self._get_path_info(safe_join(self.root, path, child)) for child in os.listdir(path)])\n\n if ancestors:\n proc_path = path\n while proc_path != self.root:\n tree.append(self._get_path_info(proc_path))\n proc_path, head = os.path.split(proc_path)\n for ancestor_sibling in os.listdir(proc_path):\n ancestor_sibling_abs = safe_join(self.root, proc_path, ancestor_sibling)\n if os.path.isdir(ancestor_sibling_abs):\n tree.append(self._get_path_info(ancestor_sibling_abs))\n\n if siblings and not (path == self.root):\n parent_path, curr_dir = os.path.split(path)\n for sibling in os.listdir(parent_path):\n if sibling == curr_dir:\n continue\n sibling_abs = safe_join(self.root, parent_path, sibling)\n tree.append(self._get_path_info(sibling_abs))\n # print\n # print \"*******************************************\"\n # print\n # for t in tree:\n # print t\n # print\n return tree\n\n def read_file_view(self, request, hash):\n file_path = self._find_path(hash)\n from django.http import HttpResponse\n resp = HttpResponse(content_type='application/force-download')\n file = FileWrapper(file_path, self.root)\n for chunk in file.get_chunks():\n resp.write(chunk)\n\n return resp\n\n def mkdir(self, name, parent):\n parent_path = self._find_path(parent)\n new_abs_path = safe_join(self.root, parent_path, name)\n return DirectoryWrapper.mkdir(new_abs_path, self.root).get_info()\n\n def mkfile(self, name, parent):\n parent_path = self._find_path(parent)\n new_abs_path = safe_join(self.root, parent_path, name)\n return FileWrapper.mkfile(new_abs_path, self.root).get_info()\n\n def rename(self, name, target):\n obj = self._get_path_object(self._find_path(target))\n obj.rename(name)\n return {\n \"added\": [obj.get_info()],\n \"removed\": [target],\n }\n\n def list(self, target):\n dir_list = []\n for item in self.get_tree(target):\n dir_list.append(item['name'])\n return dir_list\n\n def paste(self, targets, source, dest, cut):\n \"\"\" Moves/copies target files/directories from source to dest. \"\"\"\n # source_dir = self._get_path_object(source)\n dest_dir = self._get_path_object(self._find_path(dest))\n added = []\n removed = []\n if dest_dir.is_dir():\n for target in targets:\n orig_abs_path = self._find_path(target)\n orig_obj = self._get_path_object(orig_abs_path)\n new_abs_path = safe_join(self.root, dest_dir.get_path(), os.path.basename(orig_abs_path))\n if cut:\n _fnc = shutil.move\n removed.append(orig_obj.get_info()['hash'])\n else:\n if orig_obj.is_dir():\n _fnc = shutil.copytree\n else:\n _fnc = shutil.copy\n _fnc(orig_abs_path, new_abs_path)\n added.append(self._get_path_info(new_abs_path))\n\n return {\"added\": added,\n \"removed\": removed}\n\n def remove(self, target):\n obj = self._get_path_object(self._find_path(target))\n obj.remove()\n return target\n\n def upload(self, files, parent):\n added = []\n parent = self._get_path_object(self._find_path(parent))\n if parent.is_dir():\n for upload in files.getlist('upload[]'):\n new_abs_path = safe_join(self.root, parent.path, upload.name)\n try:\n new_file = FileWrapper.mkfile(new_abs_path, self.root)\n new_file.contents = upload.read()\n added.append(new_file.get_info())\n except Exception:\n pass\n return {\"added\": added}\n\n # private methods\n\n def _find_path(self, fhash, root=None, resolution=False):\n if root is None:\n root = '%s' % self.root\n final_path = None\n\n if not fhash:\n return root\n\n for dirpath, dirnames, filenames in os.walk(root):\n for f in filenames:\n f = safe_join(self.root, dirpath, f)\n f_obj = FileWrapper(f, self.root)\n # rf = f\n #f = f.encode('utf8')\n if fhash == f_obj.get_hash():\n final_path = f\n if resolution:\n try:\n final_path = str(final_path, 'utf8')\n except:\n pass\n return final_path\n for d in dirnames:\n d = safe_join(self.root, dirpath, d)\n d_obj = DirectoryWrapper(d, self.root)\n # rd = d\n if fhash == d_obj.get_hash():\n final_path = d\n if resolution:\n try:\n final_path = str(final_path, 'utf8')\n except:\n pass\n return final_path\n d = os.path.abspath(dirpath)\n d_obj = DirectoryWrapper(d, self.root)\n # rd = d\n if fhash == d_obj.get_hash():\n final_path = d\n if resolution:\n try:\n final_path = str(final_path, 'utf8')\n except:\n pass\n return final_path\n\n return final_path\n\n def _get_path_object(self, path):\n if os.path.isdir(path):\n return DirectoryWrapper(path, root=self.root)\n else:\n return FileWrapper(path, root=self.root)\n\n def _get_path_info(self, path):\n return self._get_path_object(path).get_info()\n","sub_path":"elfinder/volume_drivers/fs_driver.py","file_name":"fs_driver.py","file_ext":"py","file_size_in_byte":13672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"67178897","text":"import urllib\nimport urllib.request\nimport http.cookiejar\nimport time\nfrom urllib.parse import urlparse\n\n\ndef GetNowTime():\n return time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(time.time()))\n\nhosturl = 'https://quest.pecs.uwaterloo.ca/psp/SS/?cmd=login&languageCd=ENG'\nposturl = hosturl\ncook = http.cookiejar.CookieJar()\nopener=urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cook))\n\n\n\npostData = {'timezoneOffset' : '',\n 'userid' : '',\n 'pwd' : '',\n 'Submit' : 'Sign in',\n }\n\nheaders = ['User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36'],\nprint(headers)\n\nopener.addheaders = headers\n\nr = opener.open(hosturl,urllib.parse.urlencode(postData).encode())\n\nr = opener.open(\"https://quest.pecs.uwaterloo.ca/psc/SS/ACADEMIC/SA/c/UW_SS_MENU.UW_SS_ADMAPP.GBL?Page=UW_SS_ADMAPP_STAT&Action=U&ACAD_CAREER=UG&ADMIT_TERM=1169&ADM_APPL_NBR=00605795&APPL_PROG_NBR=0&EMPLID=20653407&STDNT_CAR_NBR=0&TargetFrameName=None\")\n\nsa = r.read().decode('utf-8')\nfile = open('log.txt','w')\nfile.write(sa)\nfile.close()\nfile = open('log.txt','r')\nlist1 = file.readlines()\nn = 0\nresult1 = ''\nfor n in list1:\n if(n.find('
')!=-1):\n print(n)\n result1 = n\n\nfile.close()\nstatus = 'Error'\ntimeup = GetNowTime()\nif(result1.find('Application')!=-1):\n status = 'No Updates...Still waiting...'\nelif(result1.find('Admit')!=-1):\n status = 'Admitted!!!!!!'\n if(result1.find('Conditional')!=1):\n status = 'Got Conditional Admitted'\nelse:\n status = 'Got declined'\n\nfile = open('log.txt','w+')\nfile.write(status+'\\n')\nfile.write(timeup)\nfile.close()\n\n\n\n","sub_path":"Admitornot.py","file_name":"Admitornot.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"227201591","text":"import sys\nsys.stdin = open('input_5186.txt', 'r')\n\nfor test_case in range(int(input())):\n N = float(input())\n result = ''\n while True:\n value = N * 2\n under_num_list = [int(num) for num in str(value)[2:]]\n if sum(under_num_list) != 0:\n result += str(int(N * 2))\n N = float('0.' + str(value)[2:]) if int(N * 2) == 0 else N * 2 - 1\n else:\n result += '1'\n print('#{} {}'.format(test_case + 1, result))\n break\n if len(result) > 12:\n print('#{} overflow'.format(test_case + 1))\n break\n ","sub_path":"02_algorithm/sw_expert_academy/AD_Start/5186.py","file_name":"5186.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"115785050","text":"import ROOT\n\n# global parameters\nintLumi = 5.0e+06 #in pb-1\nana_tex = \"e^{+}e^{-} #rightarrow Z/#gamma^{*} #rightarrow #mu^{+}#mu^{-}\"\ndelphesVersion = \"3.4.3pre04\"\nenergy = 91.2\ncollider = \"FCC-ee\"\ninputDir = \"FCCee/Z_Zmumu/\"\nformats = ['png','pdf']\nyaxis = ['lin','log']\nstacksig = ['stack','nostack']\noutdir = 'FCCee/Z_Zmumu/plots/'\n\nvariables = ['mz','mz_zoom','mz_zoom2','mz_zoom3']\n\n###Dictonnary with the analysis name as a key, and the list of selections to be plotted for this analysis. The name of the selections should be the same than in the final selection\nselections = {}\nselections['Z'] = [\"sel0\",\"sel1\"]\n\nextralabel = {}\nextralabel['sel0'] = \"Selection: N_{Z} = 1\"\nextralabel['sel1'] = \"Selection: N_{Z} = 1; 80 GeV < m_{Z} < 100 GeV\"\n\n\ncolors = {}\ncolors['Z_Pythia8'] = ROOT.kRed\ncolors['Z_Whizard'] = ROOT.kBlue+1\n\nplots = {}\nplots['Z'] = {'signal':{'Z_Pythia8':['p8_ee_Z_Zmumu_ecm91']},\n 'backgrounds':{'Z_Whizard':['wizhardp8_ee_Z_Zmumu_ecm91']\n }\n }\n\n\nlegend = {}\nlegend['Z_Pythia8'] = 'Z Pythia8'\nlegend['Z_Whizard'] = 'Z Whizard'\n\n\n\n\n\n","sub_path":"FCCeeAnalyses/Z_Zmumu/dataframe/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"565257589","text":"\"\"\"\nFile: network.py\nAuthor Vu Nguyen\nDate: 12/7/2020\nSection: 31\nDescription: This is a file contain a main program and network class\n that'll execute all the basic function of both Phone and\n Switchboard\n\"\"\"\n\n\"\"\"\nnetwork.py is both the definition file for the Network class as well as the driver for the program.\n\nIn network you need to implement the functions which the driver will call for the all the different commands.\n\"\"\"\n\nfrom phone import Phone\nfrom switchboard import Switchboard\nimport json\n\"\"\"\nimport json\nimport csv (you can do either if you choose, or just use the regular file io)\n\nSome constants below are for the driver, don't remove them unless you mean to. \n\"\"\"\n\nHYPHEN = \"-\"\nQUIT = 'quit'\nSWITCH_CONNECT = 'switch-connect'\nSWITCH_ADD = 'switch-add'\nPHONE_ADD = 'phone-add'\nNETWORK_SAVE = 'network-save'\nNETWORK_LOAD = 'network-load'\nSTART_CALL = 'start-call'\nEND_CALL = 'end-call'\nDISPLAY = 'display'\nTRUNK_CONNECT = 'Trunk_Connect'\nPHONE = 'Phone'\n\n\nclass Network:\n def __init__(self):\n \"\"\"\n Construct a network by creating the switchboard container object\n\n You are free to create any additional data/members necessary to maintain this class.\n \"\"\"\n\n # extra\n self.switch_board = [] # object switchboard\n self.network_info = {} # This is where I'll dump everything into a json file\n self.connectable = False\n\n def load_network(self, filename):\n \"\"\"\n :param filename: the name of the file to be loaded. Assume it exists and is in the right format.\n If not, it's ok if your program fails.\n :return: updated the new self.network_info and self.switch_board\n \"\"\"\n with open(filename, 'r') as json_file:\n json_reader = json.load(json_file)\n\n area_code_info = {}\n the_entire_file = {}\n\n # This reset and re-append the switch_board from file and add the info the the phone and trunk_line.\n self.switch_board = []\n for a_c in json_reader:\n switch_board = Switchboard(a_c)\n self.switch_board.append(switch_board)\n\n # The sub info of the self.network (includes Phone and Trunk Connect)\n area_code_info[PHONE] = [Phone(phone_num, switch_board) for phone_num in json_reader[a_c][PHONE]]\n area_code_info[TRUNK_CONNECT] = [trunk_line for trunk_line in json_reader[a_c][TRUNK_CONNECT]]\n\n the_entire_file[a_c] = area_code_info\n\n area_code_info = {}\n\n # Go through each one of the area_code from a new dictionary\n for area_c in the_entire_file:\n\n # This set up the switch_board for checking.\n checking_area_code = None\n for sw_bo in self.switch_board:\n if sw_bo.area_code == area_c:\n checking_area_code = sw_bo\n\n # Checking through other connect area_code and adding it to the checking_area_code.\n for trunk_code in the_entire_file[area_c][TRUNK_CONNECT]:\n for sw_bo in self.switch_board:\n if sw_bo.area_code == trunk_code:\n checking_area_code.add_trunk_connection(sw_bo)\n\n for phone_num in the_entire_file[area_c][PHONE]:\n checking_area_code.add_phone(phone_num.number)\n\n self.network_info = the_entire_file\n\n def save_network(self, filename):\n \"\"\"\n the format: {'area_code': {'Phone': [strings of phone number w/o area_code], 'Trunk Connect':\n [strings of other area_code that connect through a trunk line]}}\n\n :param filename: the name of your file to save the network. Remember that you need to save all the\n connections, but not the active phone calls (they can be forgotten between save and load).\n You must invent the format of the file, but if you wish you can use either json or csv libraries.\n :return: put all of the infomation from the_entire_info into a json file.\n \"\"\"\n with open(filename, 'w') as json_file:\n\n the_entire_info = {}\n for switch_board in self.switch_board:\n the_entire_info.update(switch_board.jsonify())\n\n json.dump(the_entire_info, json_file)\n\n def add_switchboard(self, area_code):\n \"\"\"\n add switchboard should create a switchboard and add it to your network.\n\n By default it is not connected to any other boards and has no phone lines attached.\n :param area_code: the area code for the new switchboard\n :return: updated the self.network_info and self.switch_board\n \"\"\"\n self.switch_board.append(Switchboard(area_code))\n self.network_info[area_code] = {PHONE: [], TRUNK_CONNECT: []}\n\n def adding_phone(self, a_code, phone_num):\n \"\"\"\n :param a_code: The area code of the phone number that user want to add\n :param phone_num: the number w/o the area code\n :return: print the info and update the self.network_info\n \"\"\"\n\n # This condition checks to see if the area code exist\n if a_code not in self.network_info:\n print(a_code, \"doesn't exist\")\n else:\n sw_bo = None\n\n # This condition find the switch board in the list.\n for switch_board in self.switch_board:\n if switch_board.area_code == a_code:\n sw_bo = switch_board\n\n # This condition checks for duplication of phone number in same local switchboard\n if phone_num in sw_bo.phone_list:\n print(\"\\tCan't add duplicated phone number in the same area code\")\n else:\n self.network_info[a_code][PHONE].append(Phone(phone_num, sw_bo)) # Change\n sw_bo.add_phone(phone_num)\n print('\\t', a_code + '-' + phone_num, 'successfully added')\n\n def connect_switchboards(self, area_1, area_2):\n \"\"\"\n Connect switchboards should connect the two switchboards (creates a trunk line between them)\n so that long distance calls can be made.\n\n :param area_1: area-code 1\n :param area_2: area-code 2\n :return: success/failure\n \"\"\"\n if area_1 not in self.network_info:\n print(\"\\tCan't connect because\", area_1, \"doesn't exist\")\n elif area_2 not in self.network_info:\n print(\"\\tCan't connect because\", area_2, \"doesn't exist\")\n else:\n\n # This loops through all the sb stores in the network to retrieve the two sb that needed to be connect.\n sw_bo_one = None\n sw_bo_two = None\n for switch_board in self.switch_board:\n if switch_board.area_code == area_1:\n sw_bo_one = switch_board\n\n elif switch_board.area_code == area_2:\n sw_bo_two = switch_board\n\n # This add the switchboard to the others trunk_connect list as an object.\n sw_bo_one.add_trunk_connection(sw_bo_two)\n sw_bo_two.add_trunk_connection(sw_bo_one)\n self.network_info[area_1][TRUNK_CONNECT].append(area_2)\n self.network_info[area_2][TRUNK_CONNECT].append(area_1)\n\n print(\"\\tSuccessfully connected\", area_1, 'with', area_2)\n\n def connecting_call(self, src_a_code, src_phone_num, dest_a_code, dest_phone_num):\n \"\"\"\n This is a conditional helper function that checks whether if it's possible to connect\n two phone number to each other.\n\n :param src_a_code: the caller phone number's area code\n :param src_phone_num: the caller phone number w/o area code\n :param dest_a_code: the receiver phone number's area code\n :param dest_phone_num: the receiver phone number w/o area code\n :return: True or False\n \"\"\"\n # Checks for the right start switch_board.\n for switch_board in self.switch_board:\n if switch_board.area_code == src_a_code:\n if switch_board.connect_call(switch_board, dest_phone_num, []):\n self.connectable = True\n\n # This updated the phone busy attribute\n if self.connectable:\n first_phone = None\n second_phone = None\n # Assign phone object to two phone number.\n for a_c in self.network_info:\n if a_c in [src_a_code, dest_a_code]:\n\n # This loop through all of the phone number a correct area_code\n for phone_num in self.network_info[a_c][PHONE]:\n if phone_num.number == src_phone_num:\n first_phone = phone_num\n elif phone_num.number == dest_phone_num:\n second_phone = phone_num\n\n first_phone.connect(second_phone)\n second_phone.connect(first_phone)\n\n return self.connectable\n\n def disconnecting_call(self, area_code, phone):\n \"\"\"\n :param area_code: the area_code of the phone that that want to disconnect\n :param phone: the phone number without area code\n :return: True or False\n \"\"\"\n for a_c in self.network_info:\n if a_c == area_code:\n for phone_num in self.network_info[a_c][PHONE]:\n if phone_num.number == phone:\n return phone_num.disconnect()\n\n def display(self):\n \"\"\"\n Display should output the status of the phone network as described in the project.\n \"\"\"\n for a_code in self.network_info:\n print('Switchboard with area code: ', a_code)\n\n print('\\tTrunk lines are: ')\n for trunk_line in self.network_info[a_code][TRUNK_CONNECT]:\n print('\\t\\tTrunk line connection to: ', trunk_line)\n\n print('\\tLocal phone numbers are: ')\n for phone_num in self.network_info[a_code][PHONE]:\n print('\\t\\tPhone with number:', phone_num.number, end=' ')\n if not phone_num.busy:\n print('is not in use')\n else:\n print('is connected to', HYPHEN.join([phone_num.busy.switchboard.area_code, phone_num.busy.number]))\n\n\nif __name__ == '__main__':\n the_network = Network()\n s = input('Enter command: ')\n\n while s.strip().lower() != QUIT:\n split_command = s.split()\n if len(split_command) == 3 and split_command[0].lower() == SWITCH_CONNECT:\n area_1 = split_command[1]\n area_2 = split_command[2]\n the_network.connect_switchboards(area_1, area_2)\n\n elif len(split_command) == 2 and split_command[0].lower() == SWITCH_ADD:\n the_network.add_switchboard(split_command[1])\n\n elif len(split_command) == 2 and split_command[0].lower() == PHONE_ADD:\n number_parts = split_command[1].split(HYPHEN)\n a_code = number_parts[0]\n phone_number = ''.join(number_parts[1:])\n the_network.adding_phone(a_code, phone_number)\n\n elif len(split_command) == 2 and split_command[0].lower() == NETWORK_SAVE:\n the_network.save_network(split_command[1])\n print('\\tNetwork saved to {}.'.format(split_command[1]))\n\n elif len(split_command) == 2 and split_command[0].lower() == NETWORK_LOAD:\n the_network.load_network(split_command[1])\n print('\\tNetwork loaded from {}.'.format(split_command[1]))\n\n elif len(split_command) == 3 and split_command[0].lower() == START_CALL:\n src_number_parts = split_command[1].split(HYPHEN)\n src_area_code = src_number_parts[0]\n src_number = ''.join(src_number_parts[1:])\n\n dest_number_parts = split_command[2].split(HYPHEN)\n dest_area_code = dest_number_parts[0]\n dest_number = ''.join(dest_number_parts[1:])\n\n if the_network.connecting_call(src_area_code, src_number, dest_area_code, dest_number):\n print('\\t', HYPHEN.join(src_number_parts), 'and', HYPHEN.join(dest_number_parts),\n 'are now connected.')\n else:\n print('\\t', HYPHEN.join(src_number_parts), 'and', HYPHEN.join(dest_number_parts),\n 'were not connected.')\n\n elif len(split_command) == 2 and split_command[0].lower() == END_CALL:\n number_parts = split_command[1].split('-')\n area_code = number_parts[0]\n number = ''.join(number_parts[1:])\n\n if the_network.disconnecting_call(area_code, number):\n print('\\tHanging up...\\n\\tConnection Terminated.')\n else:\n print(\"\\tUnable to disconnect\")\n\n elif len(split_command) >= 1 and split_command[0].lower() == DISPLAY:\n the_network.display()\n\n s = input('Enter command: ')\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":13040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"340962071","text":"from bs4 import BeautifulSoup\n\nfrom mwscanner import BASE_URL\nfrom mwscanner.Mixins import TableReaderMixin, UrlLoaderMixin\nfrom mwscanner.Department import Department\nfrom mwscanner.builders.DisciplinesBuilder import DisciplinesBuilder\nfrom multiprocessing.dummy import Pool as ThreadPool\n\n\nclass DepartmentBuilder(TableReaderMixin, UrlLoaderMixin):\n\n def getDisciplineListURL(self, code):\n # This method take the url of the\n # disciplines from the department code\n return BASE_URL + 'graduacao/oferta_dis.aspx?cod={}'.format(code)\n\n def buildFromHtml(self, code, name):\n # This method builds the list of disciplines that belongs\n # to this department. This list will be later used to\n # process the creation of the Discipline object.\n\n response = self.getFromUrl(self.getDisciplineListURL(code))\n\n if response.status_code != 200:\n return\n\n # Make the parse for html\n raw_html = BeautifulSoup(response.content, 'html.parser')\n table_data = self.readDatatableTableFromHTML(raw_html)\n\n # in table there are 3 types of data:\n # 'Código': the code of a discipline that belongs to the\n # current department\n # 'Denominação': name of the discipline\n # 'Ementa': garbage (it was a icon with a link on\n # the table, but those information where\n # ignored when scrapping)\n\n # the table_data can be empty\n\n disciplines = []\n\n if table_data is not None:\n\n def createCourses(data):\n\n discipline = DisciplinesBuilder().buildDiscipline(\n data['Código'], data['Denominação'], code)\n\n disciplines.append(\n discipline\n )\n\n return discipline\n\n pool = ThreadPool(16)\n c = pool.map(createCourses, table_data)\n pool.close()\n pool.join()\n\n print(\"[Department {}] Finished\".format(name))\n return disciplines\n\n def buildDepartment(self, campus, code, name, initials):\n\n disciplines = self.buildFromHtml(code, name)\n\n department = Department()\n department.setCampus(campus)\n department.setCode(code)\n department.setDisciplines(disciplines)\n department.setInitials(initials)\n department.setName(name)\n\n return department\n","sub_path":"mwscanner/builders/DepartmentBuilder.py","file_name":"DepartmentBuilder.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"402393432","text":"# Fuzz Tests for _pickle\n\nimport test.fuzzhelper as fh\nimport _pickle\n\ndef dir_func(i):\n d = dir(i)\n if 'memo' in d and fh.bool():\n setattr(i, 'persistent_id', fh.dict())\n if 'persistent_id' in d and fh.bool():\n setattr(i, 'persistent_id', fh.function())\n return d\n\ndef test():\n for _ in range(9999):\n fh.check(_pickle,\n valid_inputs = [\n ((),{}),\n ((fh.object,),{}),\n ((fh.bytes, fh.bytes),{}),\n ((fh.file_object, fh.bool, fh.bytes, fh.bytes),{}),\n ((fh.object, fh.int, fh.bool),{}),\n ((fh.object, fh.file_object, fh.int, fh.bool),{}),\n ((fh.bytes, fh.bool, fh.bytes, fh.bytes),{}),\n ((fh.file_object, fh.bool, fh.bytes, fh.bytes),{})\n ],\n expected_exceptions = (TypeError, ValueError, AttributeError),\n dir_func = dir_func)\n\nif __name__ == '__main__': fh.do_test_loop(test)\n","sub_path":"Lib/test/fuzz_pickle.py","file_name":"fuzz_pickle.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"169731045","text":"from __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plot_setting import *\n\nfrom stp_steady import *\nfrom lif_act_funct import *\n\nN=400\n\ntau = 0.02\n\nJ_ee = 1.\nJ_ei = -1.5\n\nJ_ie = 1.5\nJ_ii = -1.5\n\nstd_J_ee = 2.661\nstd_J_ei = 0.81\n\nstd_J_ie = 0.48\nstd_J_ii = 0.5\n\nC_ee = 40\nC_ei = 8\nC_ie = 40\nC_ii = 40\n\np_ee = 0.1\np_ei = 0.1\np_ie = 0.1\np_ii = 0.5\n\n\n","sub_path":"plotting_scripts/fir_dist_self_consist.py","file_name":"fir_dist_self_consist.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"405815496","text":"import os, dialog\n\nfrom database import DataBase\n\nclass Service:\n def __init__(self, dbfile, user):\n self.dbfile = dbfile\n self.user = user\n self.step = 0\n\n def run(self, bot, update):\n raise NotImplementedError\n\nclass ServiceAdd(Service):\n def __init__(self, dbfile, user):\n super().__init__(dbfile, user)\n\n self.first_name = None\n self.last_name = None\n self.photo = None\n\n def run(self, bot, update):\n if self.step == 0:\n bot.send_message(self.user.chat_id, dialog.add_last_name)\n elif self.step == 1:\n self.last_name = update.message.text\n bot.send_message(self.user.chat_id, dialog.add_first_name)\n elif self.step == 2:\n self.first_name = update.message.text\n bot.send_message(self.user.chat_id, dialog.add_photo)\n elif self.step == 3:\n self.photo = update.message.photo[-1].file_id\n\n DataBase(self.dbfile).add_gotoer(self.photo, self.first_name, self.last_name)\n\n bot.send_message(self.user.chat_id, dialog.add_success)\n\n self.step = None\n return\n\n self.step += 1\n","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"644949510","text":"#!/usr/bin/env python\n\"\"\"\n\nCopyright (c) 2020 Alex Forencich\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\"\"\"\n\nimport itertools\nimport logging\nimport os\n\nimport cocotb_test.simulator\n\nimport cocotb\nfrom cocotb.clock import Clock\nfrom cocotb.triggers import RisingEdge, Timer\nfrom cocotb.regression import TestFactory\n\nfrom cocotbext.axi import AxiStreamSink, AxiStreamBus\nfrom cocotbext.uart import UartSource\n\n\nclass TB:\n def __init__(self, dut, baud=3e6):\n self.dut = dut\n\n self.log = logging.getLogger(\"cocotb.tb\")\n self.log.setLevel(logging.DEBUG)\n\n cocotb.start_soon(Clock(dut.clk, 8, units=\"ns\").start())\n\n self.source = UartSource(dut.rxd, baud=baud, bits=len(dut.m_axis_tdata), stop_bits=1)\n\n self.sink = AxiStreamSink(AxiStreamBus.from_prefix(dut, \"m_axis\"), dut.clk, dut.rst)\n\n dut.prescale.setimmediatevalue(int(1/8e-9/baud/8))\n\n async def reset(self):\n self.dut.rst.setimmediatevalue(0)\n await RisingEdge(self.dut.clk)\n await RisingEdge(self.dut.clk)\n self.dut.rst.value = 1\n await RisingEdge(self.dut.clk)\n await RisingEdge(self.dut.clk)\n self.dut.rst.value = 0\n await RisingEdge(self.dut.clk)\n await RisingEdge(self.dut.clk)\n\n\nasync def run_test(dut, payload_lengths=None, payload_data=None):\n\n tb = TB(dut)\n\n await tb.reset()\n\n for test_data in [payload_data(x) for x in payload_lengths()]:\n\n await tb.source.write(test_data)\n\n rx_data = bytearray()\n\n while len(rx_data) < len(test_data):\n rx_data.extend(await tb.sink.read())\n\n tb.log.info(\"Read data: %s\", rx_data)\n\n assert tb.sink.empty()\n\n await Timer(2, 'us')\n\n await RisingEdge(dut.clk)\n await RisingEdge(dut.clk)\n\n\ndef prbs31(state=0x7fffffff):\n while True:\n for i in range(8):\n if bool(state & 0x08000000) ^ bool(state & 0x40000000):\n state = ((state & 0x3fffffff) << 1) | 1\n else:\n state = (state & 0x3fffffff) << 1\n yield state & 0xff\n\n\ndef size_list():\n return list(range(1, 16)) + [128]\n\n\ndef incrementing_payload(length):\n return bytearray(itertools.islice(itertools.cycle(range(256)), length))\n\n\ndef prbs_payload(length):\n gen = prbs31()\n return bytearray([next(gen) for x in range(length)])\n\n\nif cocotb.SIM_NAME:\n\n factory = TestFactory(run_test)\n factory.add_option(\"payload_lengths\", [size_list])\n factory.add_option(\"payload_data\", [incrementing_payload, prbs_payload])\n factory.generate_tests()\n\n\n# cocotb-test\n\ntests_dir = os.path.abspath(os.path.dirname(__file__))\nrtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))\n\n\ndef test_uart_rx(request):\n dut = \"uart_rx\"\n module = os.path.splitext(os.path.basename(__file__))[0]\n toplevel = dut\n\n verilog_sources = [\n os.path.join(rtl_dir, f\"{dut}.v\"),\n ]\n\n parameters = {}\n\n parameters['DATA_WIDTH'] = 8\n\n extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}\n\n sim_build = os.path.join(tests_dir, \"sim_build\",\n request.node.name.replace('[', '-').replace(']', ''))\n\n cocotb_test.simulator.run(\n python_search=[tests_dir],\n verilog_sources=verilog_sources,\n toplevel=toplevel,\n module=module,\n parameters=parameters,\n sim_build=sim_build,\n extra_env=extra_env,\n )\n","sub_path":"tb/uart_rx/test_uart_rx.py","file_name":"test_uart_rx.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"625566712","text":"try: import simplejson as json\nexcept: import json\n\nfrom django import template\nfrom django.template import Library\nfrom django.template import RequestContext\nfrom django.template import resolve_variable\n\nfrom hackerhouses.core.misc import get_recent_houses\nfrom hackerhouses.core.models import House, HouseSpirit\nfrom django.conf import settings\n\nregister = Library()\n\nINCLUDE_TEMPLATE = \"\"\"\n\n\n\"\"\" % settings.GOOGLE_API_KEY\n\n\nBASIC_TEMPLATE = \"\"\"\n
\n\n\n\"\"\"\n\n\nclass GMapNode (template.Node):\n def __init__(self, params, nodelist):\n self.params = params\n self.nodelist = nodelist\n \n def render (self, context):\n locations = []\n uid = self.nodelist.render(context) \n self.params['detail'] = 'false'\n \n if uid and self.params['object'] == 'house':\n objects = House.objects.filter(id=int(uid))\n self.params['detail'] = 'true'\n elif self.params['object'] == 'spirit':\n objects = HouseSpirit.objects.filter(id=int(uid))\n self.params['detail'] = 'true'\n else:\n objects = get_recent_houses(num=100)\n \n for obj in objects:\n locations.append(obj.get_map_detail())\n self.params['locations'] = json.dumps(locations)\n \n for k,v in self.params.items():\n try:\n self.params[k] = resolve_variable(v, context)\n except:\n pass \n return BASIC_TEMPLATE % self.params\n\ndef do_gmap(parser, token):\n items = token.split_contents()\n\n nodelist = parser.parse(('endgmap',))\n parser.delete_first_token()\n \n #Default values \n parameters={\n 'name' : \"default\",\n 'width' : \"300px\",\n 'height' : \"300px\",\n 'object' : \"house\",\n }\n \n for item in items[1:]:\n param, value = item.split(\":\")\n param = param.strip()\n value = value.strip()\n if parameters.has_key(param):\n if value[0]==\"\\\"\":\n value = value[1:-1]\n parameters[param] = value\n return GMapNode(parameters, nodelist)\n\n\n\n\nclass GMapScriptNode (template.Node):\n def __init__(self):\n pass \n def render (self, context):\n return INCLUDE_TEMPLATE\n\ndef do_gmap_script(parser, token):\n try:\n tag_name = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\"Arguments required\" % token.contents[0])\n return GMapScriptNode()\n\n\n\n\nregister.tag('gmap', do_gmap)\nregister.tag('gmap-script', do_gmap_script)\n\n","sub_path":"trunk/hackerhouses/core/templatetags/gmaps.py","file_name":"gmaps.py","file_ext":"py","file_size_in_byte":4485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"358407422","text":"import logging\nfrom crawler.items import NewEvents\nimport scrapy\nimport re\nfrom datetime import datetime\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass NewEventSpider(scrapy.Spider):\n name = 'new_event'\n allowed_domains = [\"vsd.vn\"]\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name)\n self.cookies, self.token = '', ''\n self.domain = \"https://vsd.vn\"\n self.origin_page = \"https://vsd.vn/vi//alo/ISSUER\"\n self.page = 1\n self.current_date = datetime.today().strftime('%d/%m/%Y')\n # self.current_date = '20/08/2021'\n self.events = []\n\n def start_requests(self):\n request = scrapy.Request(url=self.origin_page, callback=self.parse)\n yield request\n\n def parse(self, response):\n logging.info(\"Start parse response\")\n\n # get cookies\n self.detectToken(response)\n\n message_events = NewEvents()\n list_new = response.css('.list-news')\n if len(list_new) == 0:\n return\n news = list_new[0].css('li')\n\n is_current_date = False\n for new_event in news:\n event = {'title': new_event.css('a::text').get(),\n 'link': self.domain + new_event.css('a').attrib['href'],\n 'date': new_event.css('.time-news::text').get()\n }\n\n is_current_date = self.filter_events(event)\n if not is_current_date:\n break\n\n has_error = False\n if is_current_date:\n self.page += 1\n try:\n has_error = self.update_list_news_data()\n except:\n has_error = True\n message_events['events'] = self.format_message_to_mattermost(has_error)\n yield message_events\n\n @staticmethod\n def extract_date(date):\n get_date = re.search(\"([0-9]{2}/[0-9]{2}/[0-9]{4})\", date)\n return get_date[0]\n\n def format_message_to_mattermost(self, has_error):\n message = f' ##### New events. Time: {self.current_date}. Total records: {len(self.events)}\\n'\n for i, event in enumerate(self.events, 1):\n title = event['title']\n link = event['link']\n message += f'{i}: [{title}]({link}) \\n'\n if has_error:\n message += f'Check missing events in [Page {self.page}](https://vsd.vn/vi//alo/ISSUER{self.page})\\n'\n return message\n\n def update_list_news_data(self):\n\n cookies = {\n '__VPToken': self.cookies,\n }\n\n headers = {\n 'Connection': 'keep-alive',\n 'sec-ch-ua': '\"Chromium\";v=\"92\", \" Not A;Brand\";v=\"99\", \"Google Chrome\";v=\"92\"',\n 'Accept': '*/*',\n 'X-Requested-With': 'XMLHttpRequest',\n 'sec-ch-ua-mobile': '?0',\n '__VPToken': self.token,\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',\n 'Content-Type': 'application/json;charset=UTF-8',\n 'Origin': 'https://vsd.vn',\n 'Sec-Fetch-Site': 'same-origin',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Dest': 'empty',\n 'Accept-Language': 'en-US,en;q=0.9',\n }\n data = '{\"CurrentPage\":' + str(self.page) + ' }'\n\n response = requests.post(\n self.origin_page, data=data, cookies=cookies, headers=headers)\n if response.status_code != 200:\n return True\n html_file = BeautifulSoup(response.text, \"html.parser\")\n news = html_file.select_one('ul.list-news').select('li')\n is_current_date = False\n for new in news:\n\n link_element = new.select_one('a')\n event = {'title': link_element.get_text(),\n 'link': self.domain + link_element['href'],\n 'date': new.select_one('.time-news').get_text()\n }\n is_current_date = self.filter_events(event)\n if not is_current_date:\n break\n\n if is_current_date:\n self.page += 1\n self.update_list_news_data()\n return False\n\n def filter_events(self, event):\n date_str = self.extract_date(event['date'])\n if date_str >= self.current_date:\n self.events.append(event)\n return True\n return False\n\n def detectToken(self, response):\n # get cookies\n cookies = response.headers.getlist('Set-Cookie')[0].decode(\"utf-8\")\n has_token = re.search(\"__VPToken=.+?;\", cookies)\n if has_token:\n self.cookies = cookies.split(';')[0].split('=')[1]\n self.token = response.css('head').css('meta')[-1].attrib['content']\n\n # get token\n metas = response.css('head').css('meta')\n for meta in metas:\n if 'name' in meta.attrib and meta.attrib['name'] == '__VPToken':\n self.token = meta.attrib['content']\n","sub_path":"crawler/spiders/new_event.py","file_name":"new_event.py","file_ext":"py","file_size_in_byte":4965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"382266598","text":"import unittest\nfrom unittest import TestCase\nfrom myhdl import (Signal, Simulation, delay, instance, now, StopSimulation,\n traceSignals)\nfrom fpgaedu.hdl import ClockGen\n\nclass ClockGenTestCase(TestCase):\n\n def test_clockgen_basic(self):\n\n clk = Signal(False)\n half_period = 10\n clockgen = ClockGen(clk, half_period)\n\n @instance\n def test():\n for i in range(0, 100, 2):\n self.assertEquals(now(), i*half_period)\n yield clk.posedge\n self.assertEquals(now(), (i+1)*half_period)\n yield clk.negedge\n\n raise StopSimulation()\n\n sim = Simulation(clockgen, test)\n sim.run()\n\n\nif __name__ == '__main__':\n unittest.main()\n \n","sub_path":"tests/hdl/test_clockgen.py","file_name":"test_clockgen.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"216186000","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nfrom HTMLParser import HTMLParser\r\nimport os\r\nimport codecs\r\nclass MyHTMLParser(HTMLParser):\r\n num = False\r\n lec = False\r\n fullList = \"\"\r\n def handle_starttag(self, tag, attrs):\r\n if tag == \"th\" and attrs == [(\"bgcolor\",\"#eceaeb\"),(\"align\",\"right\")]:\r\n self.num = True\r\n if tag == \"td\" and attrs == [(\"colspan\",\"2\")]:\r\n self.lec = True\r\n def handle_endtag(self, tag):\r\n if self.num and tag ==\"th\":\r\n self.num = False\r\n if self.lec and tag == \"td\":\r\n self.lec = False\r\n \r\n def handle_data(self, data):\r\n if self.num:\r\n self.fullList = self.fullList[:-1] +'\\n' + data[:4] + data[5:] +\"~\"\r\n if self.lec:\r\n if (\" \".join(data.split()))[::-1] == \"\":\r\n self.fullList = self.fullList + \" +\"\r\n else:\r\n self.fullList = self.fullList + (\" \".join(data.split()))[::-1]+\"+\"\r\n \r\nif __name__ == '__main__':\r\n parser = MyHTMLParser()\r\n fn = os.path.join(os.getcwd(), 'Data_Dumps\\\\TAU\\\\Bid\\\\2012')\r\n for file1 in os.listdir(fn):\r\n with codecs.open(fn + '\\\\' + file1, 'r',encoding = 'utf-8') as f:\r\n while(True):\r\n r = f.read()\r\n if r == \"\":\r\n break\r\n else :\r\n parser.feed(r)\r\n \r\n with codecs.open(fn + '\\\\2012.txt', 'w',encoding = 'utf-8') as f:\r\n f.write(parser.fullList)\r\n","sub_path":"DB/coursesParser.py","file_name":"coursesParser.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"54292783","text":"# Specifies the adapter class to use, allowing you to alter certain\n# default behavior\n# ACCOUNT_ADAPTER = \"allauth.account.adapter.DefaultAccountAdapter\"\n\n# Specifies the login method to use - whether the user logs in by\n# entering their username, email address, or either one of both.\n# Setting this to email requires ACCOUNT_EMAUIL_REQUIRED=TRUE\nACCOUNT_AUTHENTICATION_METHOD = 'username'\n\n# Controls the life time of the session. Set to None to ask the user (\"Remember me?\"). \n# False to not remember, and True to always remember.\n# ACCOUNT_SESSION_REMEMBER (=None)\n\n# How long before the session cookie expires in seconds. Defaults to 1814400 seconds.\n# ACCOUNT_SESSION_COOKIE_AGE (=1814400)\n\n# Specifies the adapter class to user, allowing you to alter certain default behavior.\nSOCIALACCOUNT_ADAPTER = \"social_capxv2.users.adapter.MySocialAccountAdapter\"\n\n# Request email addres from 3rd party account provider? E.g. using OpenID AX, or the \n# Facebook \"email\" permission. \n# SOCIALACCOUNT_QUERY_EMAIL(=ACCOUNT_EMAIL_REQUIRED)\n\n# Attempt to bypass the signup form by using fields (e.g. username, email) retrieved\n# from the social account provider. If a conflict arises due to duplicate email address\n# the signup form will still kick in.\n# SOCIALACCOUNT_AUTO_SIGNUP = True\n\n# The user is required to hand over an email address when signing up using a social account.\n# SOCIALACCOUNT_EMAIL_REQUIRED = ACCOUNT_EMAIL_REQUIRED\n\n# As ACCOUNT_EMAIL_VERIFICATION, but for social accounts.\n# SOCIALACCOUNT_EMAIL_VERIFICATION = ACCOUNT_EMAIL_VERIFICATION\n\n# Used to override forms, for example: ['signup': 'myapp_forms.SignupForm']\n# SOCIALACCOUNT_FORMS = []\n\n# Dictionary containing provider specific settings.\nSOCIALACCOUNT_PROVIDERS = {\n 'facebook':\n {'METHOD': 'oauth2',\n 'SCOPE': ['email', 'public_profile', 'user_friends'],\n 'AUTH_PARAMS': {'auth_type': 'reauthenticate'},\n 'FIELDS': [\n 'id',\n 'email',\n 'name',\n 'first_name',\n 'last_name',\n 'verified',\n 'locale',\n 'timezone',\n 'link',\n 'gender',\n 'updated_time'],\n 'EXCHANGE_TOKEN': True,\n 'VERIFIED_EMAIL': False,\n 'VERSION': 'v2.4'\n },\n 'instagram':\n {'METHOD': 'OAUTH2',\n 'SCOPE': ['basic'],\n 'AUTH_PARAMS': {'auth_type': 'reauthenticate'},\n 'FIELDS': [\n 'media',\n 'feed',\n 'recent_media',\n 'locations',\n 'caption',\n 'comments',\n 'likes',\n 'user',\n ]\n }\n }\n\n# Indicates whether or not the access tokens are sotred in the database.\nSOCIALACCOUNT_STORE_TOKENS = True","sub_path":"config/settings/allauth_settings.py","file_name":"allauth_settings.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"200915082","text":"# -*- coding: utf-8 -*-\n\nimport math\n\nimport numpy as np\nimport pytest\n\nimport amrex\n\n\ndef test_mfab_loop(make_mfab):\n mfab = make_mfab()\n ngv = mfab.nGrowVect\n print(f\"\\n mfab={mfab}, mfab.nGrowVect={ngv}\")\n\n for mfi in mfab:\n bx = mfi.tilebox().grow(ngv)\n marr = mfab.array(mfi)\n\n # print(mfab)\n # print(mfab.num_comp)\n # print(mfab.size)\n # print(marr.size)\n # print(marr.nComp)\n\n # index by index assignment\n # notes:\n # - this is AMReX Array4, F-order indices\n # - even though we iterate by fastest varying index,\n # such loops are naturally very slow in Python\n three_comps = mfab.num_comp == 3\n if three_comps:\n for i, j, k in bx:\n # print(i,j,k)\n marr[i, j, k, 0] = 10.0 * i\n marr[i, j, k, 1] = 10.0 * j\n marr[i, j, k, 2] = 10.0 * k\n else:\n for i, j, k in bx:\n # print(i,j,k)\n marr[i, j, k] = 10.0 * i\n\n # note: offset from index space in numpy\n # in numpy, we start indices from zero, not small_end\n\n # numpy representation: non-copying view, including the\n # guard/ghost region\n # note: in numpy, indices are in C-order!\n marr_np = np.array(marr, copy=False)\n\n # check the values at start/end are the same: first component\n assert marr_np[0, 0, 0, 0] == marr[bx.small_end]\n assert marr_np[0, -1, -1, -1] == marr[bx.big_end]\n # same check, but for all components\n for n in range(mfab.num_comp):\n small_end_comp = list(bx.small_end) + [n]\n big_end_comp = list(bx.big_end) + [n]\n assert marr_np[n, 0, 0, 0] == marr[small_end_comp]\n assert marr_np[n, -1, -1, -1] == marr[big_end_comp]\n\n # now we do some faster assignments, using range based access\n # this should fail as out-of-bounds, but does not\n # does Numpy not check array access for non-owned views?\n # marr_np[24:200, :, :, :] = 42.\n\n # all components and all indices set at once to 42\n marr_np[:, :, :, :] = 42.0\n\n # values in start & end still match?\n assert marr_np[0, 0, 0, 0] == marr[bx.small_end]\n assert marr_np[-1, -1, -1, -1] == marr[bx.big_end]\n\n # all values for all indices match between multifab & numpy view?\n for n in range(mfab.num_comp):\n for i, j, k in bx:\n assert marr[i, j, k, n] == 42.0\n\n # separate test: cupy assignment & reading\n # TODO\n\n\ndef test_mfab_simple(make_mfab):\n mfab = make_mfab()\n assert mfab.is_all_cell_centered\n # assert(all(not mfab.is_nodal(i) for i in [-1, 0, 1, 2])) # -1??\n assert all(not mfab.is_nodal(i) for i in [0, 1, 2])\n\n for i in range(mfab.num_comp):\n mfab.set_val(-10 * (i + 1), i, 1)\n mfab.abs(0, mfab.num_comp)\n for i in range(mfab.num_comp):\n assert mfab.max(i) == (10 * (i + 1)) # Assert: None == 10 for i=0\n assert mfab.min(i) == (10 * (i + 1))\n\n mfab.plus(20.0, 0, mfab.num_comp)\n for i in range(mfab.num_comp):\n np.testing.assert_allclose(mfab.max(i), 20.0 + (10 * (i + 1)))\n np.testing.assert_allclose(mfab.min(i), 20.0 + (10 * (i + 1)))\n\n mfab.mult(10.0, 0, mfab.num_comp)\n for i in range(mfab.num_comp):\n np.testing.assert_allclose(mfab.max(i), 10.0 * (20.0 + (10 * (i + 1))))\n np.testing.assert_allclose(mfab.min(i), 10.0 * (20.0 + (10 * (i + 1))))\n mfab.invert(10.0, 0, mfab.num_comp)\n for i in range(mfab.num_comp):\n np.testing.assert_allclose(mfab.max(i), 1.0 / (20.0 + (10 * (i + 1))))\n np.testing.assert_allclose(mfab.min(i), 1.0 / (20.0 + (10 * (i + 1))))\n\n\n@pytest.mark.parametrize(\"nghost\", [0, 1])\ndef test_mfab_ops(boxarr, distmap, nghost):\n src = amrex.MultiFab(boxarr, distmap, 3, nghost)\n dst = amrex.MultiFab(boxarr, distmap, 1, nghost)\n\n src.set_val(10.0, 0, 1)\n src.set_val(20.0, 1, 1)\n src.set_val(30.0, 2, 1)\n dst.set_val(0.0, 0, 1)\n\n # dst.add(src, 2, 0, 1, nghost)\n # dst.subtract(src, 1, 0, 1, nghost)\n # dst.multiply(src, 0, 0, 1, nghost)\n # dst.divide(src, 1, 0, 1, nghost)\n\n dst.add(dst, src, 2, 0, 1, nghost)\n dst.subtract(dst, src, 1, 0, 1, nghost)\n dst.multiply(dst, src, 0, 0, 1, nghost)\n dst.divide(dst, src, 1, 0, 1, nghost)\n\n print(dst.min(0))\n np.testing.assert_allclose(dst.min(0), 5.0)\n np.testing.assert_allclose(dst.max(0), 5.0)\n\n # dst.xpay(2.0, src, 0, 0, 1, nghost)\n # dst.saxpy(2.0, src, 1, 0, 1, nghost)\n dst.xpay(dst, 2.0, src, 0, 0, 1, nghost)\n dst.saxpy(dst, 2.0, src, 1, 0, 1, nghost)\n np.testing.assert_allclose(dst.min(0), 60.0)\n np.testing.assert_allclose(dst.max(0), 60.0)\n\n # dst.lin_comb(6.0, src, 1,\n # 1.0, src, 2, 0, 1, nghost)\n dst.lin_comb(dst, 6.0, src, 1, 1.0, src, 2, 0, 1, nghost)\n np.testing.assert_allclose(dst.min(0), 150.0)\n np.testing.assert_allclose(dst.max(0), 150.0)\n\n\ndef test_mfab_mfiter(make_mfab):\n mfab = make_mfab()\n assert iter(mfab).is_valid\n assert iter(mfab).length == 8\n\n cnt = 0\n for mfi in mfab:\n cnt += 1\n\n assert iter(mfab).length == cnt\n\n\n@pytest.mark.skipif(\n amrex.Config.gpu_backend != \"CUDA\", reason=\"Requires AMReX_GPU_BACKEND=CUDA\"\n)\ndef test_mfab_ops_cuda_numba(make_mfab_device):\n mfab_device = make_mfab_device()\n # https://numba.pydata.org/numba-doc/dev/cuda/cuda_array_interface.html\n from numba import cuda\n\n ngv = mfab_device.nGrowVect\n\n # assign 3: define kernel\n @cuda.jit\n def set_to_three(array):\n i, j, k = cuda.grid(3)\n if i < array.shape[0] and j < array.shape[1] and k < array.shape[2]:\n array[i, j, k] = 3.0\n\n # assign 3: loop through boxes and launch kernels\n for mfi in mfab_device:\n bx = mfi.tilebox().grow(ngv)\n marr = mfab_device.array(mfi)\n marr_numba = cuda.as_cuda_array(marr)\n\n # kernel launch\n threadsperblock = (4, 4, 4)\n blockspergrid = tuple(\n [math.ceil(s / b) for s, b in zip(marr_numba.shape, threadsperblock)]\n )\n set_to_three[blockspergrid, threadsperblock](marr_numba)\n\n # Check results\n shape = 32**3 * 8\n sum_threes = mfab_device.sum_unique(comp=0, local=False)\n assert sum_threes == shape * 3\n\n\n@pytest.mark.skipif(\n amrex.Config.gpu_backend != \"CUDA\", reason=\"Requires AMReX_GPU_BACKEND=CUDA\"\n)\ndef test_mfab_ops_cuda_cupy(make_mfab_device):\n mfab_device = make_mfab_device()\n # https://docs.cupy.dev/en/stable/user_guide/interoperability.html\n import cupy as cp\n import cupy.profiler\n\n # AMReX -> cupy\n ngv = mfab_device.nGrowVect\n print(f\"\\n mfab_device={mfab_device}, mfab_device.nGrowVect={ngv}\")\n\n # assign 3\n with cupy.profiler.time_range(\"assign 3 [()]\", color_id=0):\n for mfi in mfab_device:\n bx = mfi.tilebox().grow(ngv)\n marr = mfab_device.array(mfi)\n marr_cupy = cp.array(marr, copy=False)\n # print(marr_cupy.shape) # 1, 32, 32, 32\n # print(marr_cupy.dtype) # float64\n\n # write and read into the marr_cupy\n marr_cupy[()] = 3.0\n\n # verify result with a .sum_unique\n with cupy.profiler.time_range(\"verify 3\", color_id=0):\n shape = 32**3 * 8\n # print(mfab_device.shape)\n sum_threes = mfab_device.sum_unique(comp=0, local=False)\n assert sum_threes == shape * 3\n\n # assign 2\n with cupy.profiler.time_range(\"assign 2 (set_val)\", color_id=1):\n mfab_device.set_val(2.0)\n with cupy.profiler.time_range(\"verify 2\", color_id=1):\n sum_twos = mfab_device.sum_unique(comp=0, local=False)\n assert sum_twos == shape * 2\n\n # assign 5\n with cupy.profiler.time_range(\"assign 5 (ones-like)\", color_id=2):\n\n def set_to_five(mm):\n xp = cp.get_array_module(mm)\n assert xp.__name__ == \"cupy\"\n mm = xp.ones_like(mm) * 10.0\n mm /= 2.0\n return mm\n\n for mfi in mfab_device:\n bx = mfi.tilebox().grow(ngv)\n marr = mfab_device.array(mfi)\n marr_cupy = cp.array(marr, copy=False)\n\n # write and read into the marr_cupy\n fives_cp = set_to_five(marr_cupy)\n marr_cupy[()] = 0.0\n marr_cupy += fives_cp\n\n # verify\n with cupy.profiler.time_range(\"verify 5\", color_id=2):\n sum = mfab_device.sum_unique(comp=0, local=False)\n assert sum == shape * 5\n\n # assign 7\n with cupy.profiler.time_range(\"assign 7 (fuse)\", color_id=3):\n\n @cp.fuse(kernel_name=\"set_to_seven\")\n def set_to_seven(x):\n x[...] = 7.0\n\n for mfi in mfab_device:\n bx = mfi.tilebox().grow(ngv)\n marr = mfab_device.array(mfi)\n marr_cupy = cp.array(marr, copy=False)\n\n # write and read into the marr_cupy\n set_to_seven(marr_cupy)\n\n # verify\n with cupy.profiler.time_range(\"verify 7\", color_id=3):\n sum = mfab_device.sum_unique(comp=0, local=False)\n assert sum == shape * 7\n\n # TODO: @jit.rawkernel()\n\n\n@pytest.mark.skipif(\n amrex.Config.gpu_backend != \"CUDA\", reason=\"Requires AMReX_GPU_BACKEND=CUDA\"\n)\ndef test_mfab_ops_cuda_pytorch(make_mfab_device):\n mfab_device = make_mfab_device()\n # https://docs.cupy.dev/en/stable/user_guide/interoperability.html#pytorch\n import torch\n\n # assign 3: loop through boxes and launch kernel\n for mfi in mfab_device:\n marr = mfab_device.array(mfi)\n marr_torch = torch.as_tensor(marr, device=\"cuda\")\n marr_torch[:, :, :] = 3\n\n # Check results\n shape = 32**3 * 8\n sum_threes = mfab_device.sum_unique(comp=0, local=False)\n assert sum_threes == shape * 3\n\n\n@pytest.mark.skipif(\n amrex.Config.gpu_backend != \"CUDA\", reason=\"Requires AMReX_GPU_BACKEND=CUDA\"\n)\ndef test_mfab_ops_cuda_cuml(make_mfab_device):\n mfab_device = make_mfab_device()\n # https://github.com/rapidsai/cuml\n # https://github.com/rapidsai/cudf\n # maybe better for particles as a dataframe test\n # import cudf\n # import cuml\n\n # AMReX -> RAPIDSAI cuML\n # arr_cuml = ...\n # assert(arr_cuml.__cuda_array_interface__['data'][0] == arr.__cuda_array_interface__['data'][0])\n # TODO\n","sub_path":"tests/test_multifab.py","file_name":"test_multifab.py","file_ext":"py","file_size_in_byte":10360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"596228482","text":"import os, sys\nimport cv2\nfrom functools import cmp_to_key\n\nFPS = 15.0\nDATA_NAME = 'raw1'\n\ncur_dir = sys.path[0]\nprint(f\"INFO - Current reference: {cur_dir}\")\n\nimg_dir = os.path.join(cur_dir, '../data/raw/img/' + DATA_NAME)\nvideo_dir = os.path.join(cur_dir, '../data/processed/video/')\nprint(f\"INFO - Input image directory: {img_dir}\")\nprint(f\"INFO - Output video directory: {video_dir}\")\n\nif not os.path.exists(video_dir):\n os.makedirs(video_dir)\n\nvideo_name = f\"{video_dir}{DATA_NAME}.mp4\"\nimages = [img for img in os.listdir(img_dir) if img.endswith(\".png\")]\n\nimages.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))\n\nframe = cv2.imread(os.path.join(img_dir, images[0]))\nheight, width, layers = frame.shape\n\nfourcc = cv2.VideoWriter_fourcc(*'MP4V')\nvideo = cv2.VideoWriter(video_name, fourcc, FPS, (width, height))\n\nfor image in images:\n print(f\"INFO - Reading image: {image}\")\n video.write(cv2.imread(os.path.join(img_dir, image)))\n\ncv2.destroyAllWindows()\nvideo.release()\n","sub_path":"Jams/utilities/form_video.py","file_name":"form_video.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"185261514","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/playlist/m3u.py\n# Compiled at: 2009-03-30 14:23:58\nimport os, itertools\n\nclass NotM3uError(Exception):\n\n def __init__(self, fname):\n self.msg = \"File '%s' is not an M3U format file.\" % fname\n\n\nclass M3uException(Exception):\n\n def __str__(self):\n return self.msg\n\n\nclass BadM3uEntryFormat(M3uException):\n\n def __init__(self, line):\n self.msg = \"Not a correct M3U line:\\n'%s'\"\n\n\nclass MalformattedM3uEntry(M3uException):\n\n def __init__(self, lineno, line):\n self.msg = \"Malformed M3uEntry at line %d: '%s'\" % (lineno, line)\n\n\nclass M3uEntryLacksPath(M3uException):\n\n def __init__(self, lineno):\n self.msg = 'M3u entry at line %d lacks media URI' % lineno\n\n\nclass M3uEntry(object):\n\n def __init__(self, length, title, path, lineno):\n self.length = length\n self.title = title\n self.path = path\n self.infoline = lineno\n self.pathline = lineno + 1\n self._existsfunc = os.path.exists\n\n @property\n def filename(self):\n raise NotImplementedError\n\n @property\n def exists(self):\n return self._existsfunc(self.path)\n\n\nclass M3uFileReader(object):\n\n def __init__(self, fp):\n self.fp = fp\n self.start()\n\n def start(self):\n self.fp.seek(0)\n self.lc = itertools.count(1)\n first = self.fp.readline().strip()\n if first != '#EXTM3U':\n raise NotM3uError(getattr(self.fp, 'name', self.fp.__class__.__name__))\n self.lc.next()\n\n def next(self):\n infoln, pathln = self.lc.next(), self.lc.next()\n infoline, path = self.fp.readline().strip().split(':'), self.fp.readline().strip()\n if infoline == ['']:\n raise StopIteration\n if path == ['']:\n raise M3uEntryLacksPath(infoln)\n if infoline[0] == '#EXTINF':\n (length, title) = infoline[1].split(',')\n return M3uEntry(length, title, path, infoln)\n else:\n raise MalformattedM3uEntry(1, infoline)\n\n\nclass M3u(object):\n\n def __init__(self, pathOrFile):\n if hasattr(pathOrFile, 'close'):\n self._fp = pathOrFile\n else:\n self.path = pathOrFile\n\n @property\n def fp(self):\n fp = getattr(self, '_fp', False)\n if fp:\n return fp\n else:\n self._fp = open(self.path, 'r')\n return self._fp\n\n def __iter__(self):\n self.filereader = M3uFileReader(self.fp)\n return self.filereader","sub_path":"pycfiles/playlist-0.1.0-py2.5/m3u.py","file_name":"m3u.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"647387570","text":"#! /usr/bin/python\n\n# ESIM Copyright (C) 2016 Michael Stensby, Mate Verunica\n#\n# This software may be modified and distributed under the terms\n# of the MIT license. See the LICENSE file for details.\n\nfrom __future__ import print_function\n\nimport sys, math, random\n\n# helper function for HSBC\ndef powers_of_2(number):\n\tpower_of_2 = 2**(number-1).bit_length()\n\tpower_of_2 /= 2\n\tpower_of_2 = int(math.log(number,2))\n\treturn power_of_2\n\n# independent error model\ndef independent(time,block_size, num_blocks, err_prob, correction_flag):\n\ttime_used = 0\n\tsuccess_frames = 0\n\tsent_frames = 0\n\twhile(time_used < time):\n\t\t# send a frame as K blocks\n\t\twhile True:\n\t\t\tframe_errors = 0\n\t\t\tfor block in range (num_blocks):\n\t\t\t\tblock_errors = 0\n\t\t\t\tfor bit in range (block_size):\n\t\t\t\t\ttime_used+=1\n\t\t\t\t\tif (random.random() <= err_prob) :\n\t\t\t\t\t\tblock_errors+=1\n\t\t\t\tif block_errors > correction_flag:\n\t\t\t\t\t# frame has error\n\t\t\t\t\tframe_errors+=1\n\t\t\tsent_frames+=1\n\n\t\t\t# frame sent correctly?\n\t\t\tif frame_errors > 0:\n\t\t\t\t# failed sending, resend whole frame\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tsuccess_frames+=1\n\t\t\t\tbreak\n\treturn time_used, success_frames, sent_frames\n\n# burst error model\ndef burst(time, feedback_time, block_size, num_blocks, err_prob, burst_len, non_burst_len, correction_flag):\n\ttime_used = 0\n\tsuccess_frames = 0\n\tsent_frames = 0\n\tburst_remain = burst_len\n\tnon_burst_remain = 0\n\twhile(time_used < time):\n\t\t# send a frame as K blocks\n\t\twhile True:\n\t\t\tframe_errors = 0\n\t\t\t# send each block of frame\n\t\t\tfor block in range (num_blocks):\n\t\t\t\tblock_errors = 0\n\t\t\t\t# send each bit of block\n\t\t\t\tfor bit in range (block_size):\n\t\t\t\t\ttime_used+=1\n\t\t\t\t\tif (burst_remain > 0): # in burst\n\t\t\t\t\t\tburst_remain-=1\n\t\t\t\t\t\tif (random.random() <= err_prob):\n\t\t\t\t\t\t\tblock_errors+=1\n\t\t\t\t\t\tif burst_remain == 0: # end of burst, starting non-burst\n\t\t\t\t\t\t\tnon_burst_remain = non_burst_len\n\t\t\t\t\telif (non_burst_remain > 0): # non-burst\n\t\t\t\t\t\tnon_burst_remain-=1\n\t\t\t\t\t\tif non_burst_remain == 0: #end of non-burst, starting burst\n\t\t\t\t\t\t\tburst_remain = burst_len\n\n\t\t\t\tif block_errors > correction_flag:\n\t\t\t\t\t# frame has error\n\t\t\t\t\tframe_errors+=1\n\n\t\t\tsent_frames+=1\n\t\t\ttime_used+= feedback_time\n\n\t\t\t# frame sent correctly?\n\t\t\tif frame_errors > 0:\n\t\t\t\t# failed sending, resend whole frame\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tsuccess_frames+=1\n\t\t\t\tbreak\n\treturn time_used, success_frames, sent_frames\n\ndef main():\n\t# check args\n\tif (len(sys.argv) < 10):\n\t\traise ValueError('Invalid Number of Arguments')\n\n\t# assign variables\n\tmodel = sys.argv[1]\n\tfeedback_time = int(sys.argv[2])\n\tnum_blocks = int(sys.argv[3])\n\tframe_size = int(sys.argv[4])\n\terr_prob = float(sys.argv[5])\n\tburst_len = int(sys.argv[6])\n\tnon_burst_len = int(sys.argv[7])\n\ttime = int(sys.argv[8])\n\tnum_trials = int(sys.argv[9])\n\n\t# calculate r (num blocks)\n\tif (num_blocks > 0):\n\t\t# calculate num of hamming bits needed (num of powers of 2 less than size)\n\t\tblock_size = frame_size / num_blocks\n\t\tblock_size += powers_of_2(block_size)\n\t\terr_correction = 1\n\n\telif (num_blocks == 0):\n\t\t# no error correction needed\n\t\tblock_size = frame_size\n\t\terr_correction = 0\n\t\tnum_blocks = 1\n\n\t# initialize variables and lists\n\ttime_used = []\n\tsuccess_frames = []\n\tsent_frames = []\n\tthroughput = []\n\tavg_throughput = 0.000000\n\ttransmissions = []\n\tavg_transmissions = 0.000000\n\tstd_dev_trans = 0.0\n\tstd_dev_through = 0.0\n\tif (model == \"B\") or (model == \"b\"):\n\t\terr_prob = err_prob * ((non_burst_len+burst_len)/burst_len)\n\n\t# run trials\n\tfor trial in range(num_trials):\n\t\trandom.seed(sys.argv[9+trial])\n\t\ttime_used.append(0)\n\t\tsuccess_frames.append(0)\n\t\tsent_frames.append(0.000000)\n\t\tthroughput.append(0.0000000)\n\t\ttransmissions.append(0)\n\t\tif (model == \"I\") or (model == \"i\"): # independent\n\t\t\ttime_used[trial], success_frames[trial], sent_frames[trial] = independent(time,block_size,num_blocks,err_prob,err_correction)\n\t\telif (model == \"B\") or (model == \"b\"): # burst\n\t\t\ttime_used[trial], success_frames[trial], sent_frames[trial] = burst(time,feedback_time,block_size,num_blocks,err_prob,burst_len, non_burst_len,err_correction)\n\t\t# debug print statement: print(time_used[trial], success_frames[trial], sent_frames[trial])\n\t\tthroughput[trial] = ((frame_size*success_frames[trial]/float(time_used[trial])))\n\t\ttransmissions[trial] = (sent_frames[trial] / float(success_frames[trial]))\n\n\t# calculate avg transmissions and throughput\n\tfor trial in range(num_trials):\n\t\tavg_transmissions += float(transmissions[trial])\n\t\tavg_throughput += float(throughput[trial])\n\tavg_transmissions /= float(num_trials)\n\tavg_throughput /= float(num_trials)\n\n\t# calculate std dev for each\n\tfor trial in range(num_trials):\n\t\tstd_dev_trans += ((transmissions[trial]-avg_transmissions)**2)\n\t\tstd_dev_through += ((throughput[trial]-avg_throughput)**2)\n\tstd_dev_trans = (std_dev_trans/(num_trials - 1))** 0.5\n\tstd_dev_through = (std_dev_through/(num_trials -1)) ** 0.5\n\n\t# calculate confidence intervals\n\tcon_trans = (avg_transmissions-(2.776*std_dev_trans/(5**0.5)),avg_transmissions+(2.776*std_dev_trans/(5**0.5)))\n\tcon_through = (avg_throughput-(2.776*std_dev_through/(5**0.5)),avg_throughput+(2.776*std_dev_through/(5**0.5)))\n\n\t# print results\n\tfor i in range (1,(10+num_trials)):\n\t\tprint(sys.argv[i], end=\" \")\n\tprint(end =\"\\n\")\n\tprint(avg_transmissions,\" \",con_trans,\"\\n\",avg_throughput,\" \",con_through,end =\"\\n\")\n\treturn\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ESIM/esim.py","file_name":"esim.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"648854086","text":"from app import db\nfrom app.models.planet import Planet\nfrom flask import request, Blueprint, make_response, jsonify\n\nplanets_bp = Blueprint(\"planets\", __name__, url_prefix=\"/planets\")\n\n\n# accessing/getting all planets\n\n@planets_bp.route(\"\", methods=[\"GET\", \"POST\"])\ndef handle_planets():\n if request.method == \"GET\":\n planets = Planet.query.all()\n planets_response = []\n for planet in planets:\n planets_response.append({\n \"id\": planet.id,\n \"name\": planet.name,\n \"description\": planet.description,\n \"planet_moons\": planet.planet_moons\n })\n return jsonify(planets_response)\n elif request.method == \"POST\":\n request_body = request.get_json()\n new_planet = Planet(name=request_body[\"name\"],\n description=request_body[\"description\"],\n planet_moons=request_body[\"planet_moons\"])\n\n\n db.session.add(new_planet) \n db.session.commit() \n return make_response(f\"Planet {new_planet.name} successfully created\", 201)\n\n\n# accessing a direct planet using their ID \n# By \"getting\" we can then -- > Put, Delete\n\n@planets_bp.route(\"/\", methods=[\"GET\", \"PUT\", \"DELETE\"])\ndef handle_planet(planet_id):\n planets = Planet.query.get(planet_id)\n\n if planets is None:\n return make_response(\"\", 404)\n elif request.method == \"GET\":\n return {\n \"id\": planets.id,\n \"name\": planets.name,\n \"description\": planets.description,\n \"planet_moons\": planets.planet_moons \n }\n elif request.method == \"PUT\":\n planet_data = request.get_json()\n\n planets.name = planet_data[\"name\"]\n planets.description = planet_data[\"description\"]\n\n db.session.commit()\n\n return make_response(f\"Planet #{planets.id} successfully updated\")\n\n elif request.method == \"DELETE\":\n db.session.delete(planets)\n db.session.commit()\n return make_response(f\"Planet #{planets.id} successfully deleted\")\n\n\n\n\n\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"245625104","text":"# Code your solution here\nmonth = input()\n\nmonth_dict = {\n \"january\" : \"31\",\n \"february\" : \"28\",\n \"march\" : \"30\",\n \"april\" : \"31\",\n \"may\" : \"30\",\n \"june\" : \"31\",\n \"july\" : \"30\",\n \"august\" : \"31\",\n \"september\" : \"30\",\n \"october\" : \"31\",\n \"november\" : \"30\",\n \"december\" : \"31\"\n}\n\nif month in month_dict.keys():\n data = month_dict.get(month)\n","sub_path":"introduction_and_environment/data_types_and_control_flow/3_month_days/Solution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"2653889","text":"'''\n70. Climbing Stairs\n\nYou are climbing a stair case. It takes n steps to reach to the top.\n\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\n\nNote: Given n will be a positive integer.\n\n\nExample 1:\n\nInput: 2\nOutput: 2\nExplanation: There are two ways to climb to the top.\n\n1. 1 step + 1 step\n2. 2 steps\nExample 2:\n\nInput: 3\nOutput: 3\nExplanation: There are three ways to climb to the top.\n\n1. 1 step + 1 step + 1 step\n2. 1 step + 2 steps\n3. 2 steps + 1 step\n'''\n\nclass Solution:\n def climbStairs(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n # 2 ways to step in i-th stair:\n # from (i-1) with a single step\n # form (i-2) with a step of 2\n # so the number of ways to step in i-th stair is \n # dp[i] = dp[i-1]+dp[i-2]\n #\n # which are Fibonacci numbers\n\n dp = [1,2]\n for i in range(2,n):\n dp.append(dp[i-1]+dp[i-2])\n return dp[n-1]\n\n\n\n\n\n","sub_path":"problem70.py","file_name":"problem70.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"312704638","text":"import logging\nimport os\nimport queue\nimport threading\n\nfrom telegram.ext import CommandHandler, Filters, MessageHandler, Updater\n\nfrom client import chats, connect_ws, contexts\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG\n)\n\nBOT_TOKEN = os.environ[\"BOT_TOKEN\"]\n\n\ndef start(update, context):\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=f\"{update.effective_user.first_name}, just talk to me\",\n )\n\n\ndef talk(update, context):\n incoming_message = update.message.text\n chat_id = update.effective_chat.id\n if chat_id not in chats:\n chats[chat_id] = queue.Queue()\n user = update.effective_user\n username = f\"{user.first_name} {user.last_name}\"\n threading.Thread(target=connect_ws, args=(chat_id, username)).start()\n chats[chat_id].put(incoming_message)\n contexts[chat_id] = context\n\n\ndef unknown(update, context):\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"sorry, I don't have any command. just talk to me\",\n )\n\n\nif __name__ == \"__main__\":\n updater = Updater(token=BOT_TOKEN, use_context=True)\n dispatcher = updater.dispatcher\n\n start_handler = CommandHandler(\"start\", start)\n message_handler = MessageHandler(Filters.text & (~Filters.command), talk)\n unknown_handler = MessageHandler(Filters.command, unknown)\n\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(message_handler)\n dispatcher.add_handler(unknown_handler)\n\n updater.start_polling()\n","sub_path":"parlai/chat_service/services/telegram/tg.py","file_name":"tg.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"327685107","text":"import unittest\n\nfrom ..common_neon.pythnetwork import PythNetworkClient\n\nfrom ..common_neon.solana_tx import SolPubKey\nfrom ..common_neon.solana_interactor import SolInteractor\nfrom ..common_neon.config import Config\n\nfrom ..mempool.gas_price_calculator import GasPriceCalculator\n\nfrom unittest.mock import patch, call\nfrom decimal import Decimal\n\n\nclass FakeConfig(Config):\n @property\n def pyth_mapping_account(self) -> SolPubKey:\n return SolPubKey.from_string('BmA9Z6FjioHJPpjT39QazZyhDRUdZy2ezwx4GiDdE2u2') # only for devnet\n\n @property\n def min_gas_price(self) -> int:\n return 0\n\n\nclass TestGasPriceCalculator(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n config = FakeConfig()\n solana_url = \"https://api.devnet.solana.com\" # devnet\n solana = SolInteractor(config, solana_url)\n testee = GasPriceCalculator(config, solana)\n testee.update_mapping()\n cls.testee = testee\n cls.config = config\n\n def setUp(self) -> None:\n # reset time on test begins\n self.testee.recent_sol_price_update_time = None\n\n @patch.object(PythNetworkClient, 'get_price')\n def test_success_update_price(self, mock_get_price):\n \"\"\"\n Should succesfully calculate gas price on first attempt\n \"\"\"\n sol_price = Decimal('156.3')\n neon_price = Decimal('0.25')\n\n mock_get_price.side_effect = [{'status': 1, 'price': neon_price}, {'status': 1, 'price': sol_price}]\n\n self.testee.update_gas_price()\n gas_price = self.testee.min_gas_price\n expected_price = (sol_price / self.testee.neon_price_usd) * (1 + self.testee.operator_fee) * pow(Decimal(10), 9)\n self.assertEqual(gas_price, expected_price)\n\n mock_get_price.assert_has_calls([call('Crypto.NEON/USD'), call('Crypto.SOL/USD')])\n\n @patch.object(PythNetworkClient, 'get_price')\n def test_success_update_price_after_retry_due_to_wrong_price_status(self, mock_get_price):\n \"\"\"\n Should retry get_price after wrong price status\n \"\"\"\n sol_price = Decimal('156.3')\n\n mock_get_price.side_effect = [\n None,\n {'status': 0, 'price': sol_price}, # <--- Wrong price status\n None,\n {'status': 1, 'price': sol_price}\n ]\n\n for i in range(2):\n self.testee.update_gas_price()\n\n gas_price = self.testee.min_gas_price\n expected_price = (sol_price / self.testee.neon_price_usd) * (1 + self.testee.operator_fee) * pow(Decimal(10), 9)\n self.assertEqual(gas_price, expected_price)\n\n mock_get_price.assert_has_calls([call('Crypto.NEON/USD'), call('Crypto.SOL/USD')] * 2)\n\n @patch.object(PythNetworkClient, 'get_price')\n def test_success_update_price_after_retry_due_to_get_price_exception(self, mock_get_price):\n \"\"\"\n Should retry get_price after exception\n \"\"\"\n sol_price = Decimal('156.3')\n\n mock_get_price.side_effect = [\n None,\n Exception(\"Test exception happened\"),\n None,\n {'status': 1, 'price': sol_price}\n ]\n\n for i in range(2):\n self.testee.update_gas_price()\n\n gas_price = self.testee.min_gas_price\n expected_price = (sol_price / self.testee.neon_price_usd) * (1 + self.testee.operator_fee) * pow(Decimal(10), 9)\n self.assertEqual(gas_price, expected_price)\n\n mock_get_price.assert_has_calls([call('Crypto.NEON/USD'), call('Crypto.SOL/USD')] * 2)\n","sub_path":"proxy/testing/test_gas_price_calculator.py","file_name":"test_gas_price_calculator.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"485325389","text":"# -*- coding: utf-8 -*-\nimport sys\nimport numpy as np\nimport astropy as ap\nimport matplotlib.pyplot as plt\nimport math as m\nimport argparse\nfrom astropy.cosmology import WMAP9 as cosmo\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"filein\",help=\"File containing magnitudes and redshifts\")\nparser.add_argument(\"-d\",\"--diagnostic\",action=\"store_true\",help=\"Include diagnostic plots. Vmax vs Magnitude, Number of Galaxies per M bin and Density (Sum of Luminosity Function) vs Redshift\")\nparser.add_argument(\"-z\",\"--zbin\",type=int,help=\"Number of redshift bins. Default=19\",default=1)\nparser.add_argument(\"-m\",\"--mbin\",type=int,help=\"Number of Absolute Magnitude bins. Default=19\",default=19)\nparser.add_argument(\"-p\",\"--zmin\",type=float,help=\"Minimum redshift to consider in luminosity function, default=0.5\", default=0.35)\nparser.add_argument(\"-q\",\"--zmax\",type=float,help=\"Maximum redshift to consider in luminosity function, default=1.3\", default=0.55)\nparser.add_argument(\"-r\",\"--Mmin\",type=float,help=\"Minimum redshift to consider in luminosity function, default=--26.7\", default=-24)\nparser.add_argument(\"-s\",\"--Mmax\",type=float,help=\"Minimum redshift to consider in luminosity function, default=-18.5\", default=-15)\nparser.add_argument(\"-ama\",\"--appmax\",type=float,help='Maximum apparent magnitude to consider part of the survey',default=30)\nparser.add_argument(\"-ami\",\"--appmin\",type=float,help='Maximum apparent magnitude to consider part of the survey',default=12)\nparser.add_argument(\"-om\",\"--OmegaMatter\",type=float,help=\"Omega Matter, if you want to define your own cosmology\", default=-1)\nparser.add_argument(\"-ov\",\"--OmegaVacuum\",type=float,help=\"Omega Vacuum, if you want to define your own cosmology\",default=-1)\nparser.add_argument(\"-ho\",\"--HubbleConstant\",type=float,help=\"Hubble Constant if you want to define your own cosmology\",default=-1)\nparser.add_argument(\"-sh\",\"--shrink\",type=int,help=\"Testing an incompleteness correction\",default=-1)\nparser.add_argument(\"-rc\",\"--randcorr\",action=\"store_true\",help=\"Include random incompleteness. This will invoke the random incompleteness part of the code and will remove a random number of sources per each magnitude bin\") \nparser.add_argument(\"-ps\",\"--phistar\",type=float,help=\"Phi* value for input Schechter Function\",default=0.00715)\nparser.add_argument(\"-ms\",\"--mstar\",type=float,help=\"M* value for input Schechter Fucntion\",default=-21.17)\nparser.add_argument(\"-al\",\"--schalpha\",type=float,help=\"alpha value for input Schechter Function\",default=-1.03)\nparser.add_argument(\"-fo\",\"--fileout\",help=\"Filename of PDF you want to generate\",default='LuminosityFunctionPlot.pdf')\n\n\nargs=parser.parse_args()\n\noldabsmag,oldzarr=np.loadtxt(args.filein,unpack=True)\n\nprint(args.zbin,args.mbin,args.zmin,args.zmax,args.Mmin,args.Mmax)\n\nappmag=oldabsmag+5*(np.log10(100000*cosmo.luminosity_distance(oldzarr).value))\nabsmag=np.array([])\nzarr=np.array([])\nif args.shrink>0:\n\tfor i in range(0,len(oldabsmag)):\n\t\tif i%args.shrink==0:\n\t\t\tabsmag=np.append(absmag,oldabsmag[i])\n\t\t\tzarr=np.append(zarr,oldzarr[i])\nelse:\n\tfor i in range(0,len(oldabsmag)):\n\t\tif np.logical_and(appmag[i]<29.37,appmag[i]>17):\n\t\t\tabsmag=np.append(absmag,oldabsmag[i])\n\t\t\tzarr=np.append(zarr,oldzarr[i])\n\n\nappmax=args.appmax\nappmin=args.appmin\n\nprint(zarr)\nzdif=args.zmax-args.zmin\nzbinsize=zdif/args.zbin\nMdif=args.Mmax-args.Mmin\nMbinsize=float(Mdif)/float(args.mbin)\nprint(Mdif,Mbinsize)\nzVariable=args.zmin\nMPopsByz=[]\nzBinWhere=[]\ni=0\nzbinup=np.arange(args.zbin)*1.\nzbinlow=np.arange(args.zbin)*1.\n\nappmag=absmag+5*(np.log10(100000*cosmo.luminosity_distance(zarr).value))\n\nif args.randcorr:\n\tappmagbins=np.linspace(appmin,appmax,num=args.mbin)\n\tappmagbinsize=(appmax-appmin)/args.mbin\n\tgood_z=np.random.random(len(zarr))\n\tappmagbins_include_good_z=np.zeros_like(appmagbins)\n\tfor i in range(0,len(appmagbins)):\n\t\tif appmagbins[i]<23.5:\n\t\t\tappmagbins_include_good_z[i]=0.97\n\t\telse:\n\t\t\tappmagbins_include_good_z[i]=0.97-i*0.03*good_z[i]\n\t\t\tprint(good_z[i])\n\tspec_success_rate=np.zeros_like(zarr)\n\tfor i in range(0,len(zarr)):\n\t\tfor j in range(0,len(appmagbins)-1):\n\t\t\tif appmag[i]appmagbins[j+1]:\n\t\t\t\tif good_z[i]=zVariable) & (zarrMVariable))[0]])\n\t\tMBinUp[j]=MVariable\n\t\tMVariable=MVariable+Mbinsize\n\t\tMBinLow[j]=MVariable\n\tzVariable=zVariable+zbinsize\n\nzVariable=args.zmin\nMVariable=args.Mmin\n\nif args.randcorr:\n\tWeightArray=np.zeros_like(zarr)\n\tfor i in range(0,args.zbin):\n\t\tfor j in range(0,args.mbin):\n\t\t\tzuse1=float(len(np.where(spec_success_rate[LumFuncWhere[i][j]]==1)[0]))\n\t\t\tzuse0=float(len(np.where(spec_success_rate[LumFuncWhere[i][j]]==0)[0]))\n\t\t\tif zuse1!=0:\n\t\t\t\tspecweight=(zuse1+zuse0)/zuse1\n\t\t\telse:\n\t\t\t\tspecweight=0\n\t\t\tWeightArray[LumFuncWhere[i][j]]=specweight\n\tprint(WeightArray)\n\nzupper=[]\nzlower=[]\nfor i in range(0,len(LumFuncWhere)):\n\tzupper.append([])\n\tzlower.append([])\n\tfor j in range(0,len(LumFuncWhere[i])):\n\t\tzupper[i].append(np.arange(len(LumFuncWhere[i][j])*1.))\n\t\tzlower[i].append(np.arange(len(LumFuncWhere[i][j])*1.))\n\t\tfor k in range(0,len(LumFuncWhere[i][j])):\n\t\t\ttempm=appmag[LumFuncWhere[i][j][k]]\n\t\t\ttempz=zarr[LumFuncWhere[i][j][k]]\n\t\t\twhile np.logical_and(tempm=zbinup[i]:\n\t\t\t\ttempz=zbinup[i]\n\t\t\tzupper[i][j][k]=tempz\n\t\t\ttempm=appmag[LumFuncWhere[i][j][k]]\n\t\t\ttempz=zarr[LumFuncWhere[i][j][k]]\n\t\t\twhile np.logical_and(tempm>appmin,tempz>=zbinlow):\n\t\t\t\ttempm=absmag[LumFuncWhere[i][j][k]]+5*(np.log10(100000*cosmo.luminosity_distance(tempz).value))\n\t\t\t\ttempz=tempz-0.01\n\t\t\ttempz=tempz+0.01\n\t\t\twhile np.logical_and(tempm>appmin,tempz>=zbinlow):\n\t\t\t\ttempm=absmag[LumFuncWhere[i][j][k]]+5*(np.log10(100000*cosmo.luminosity_distance(tempz).value))\n\t\t\t\ttempz=tempz-0.001\n\t\t\ttempz=tempz+0.001\n\t\t\twhile np.logical_and(tempm>appmin,tempz>=zbinlow):\n\t\t\t\ttempm=absmag[LumFuncWhere[i][j][k]]+5*(np.log10(100000*cosmo.luminosity_distance(tempz).value))\n\t\t\t\ttempz=tempz-0.0001\n\t\t\ttempz=tempz+0.0001\n\t\t\tif tempz<=zbinlow[i]:\n\t\t\t\ttempz=zbinlow[i]\n\t\t\tzlower[i][j][k]=tempz\n\ntotvol=cosmo.comoving_volume(zbinup).value/(4*m.pi/0.000312)-cosmo.comoving_volume(zbinlow).value/(4*m.pi/(0.000312))\n\nCMV=[]\np=0\nq=0\ns=0\n\nfor p in range(0,len(LumFuncWhere)):\n\tCMV.append([])\n\tfor q in range(0,len(LumFuncWhere[p])):\n\t\tCMV[p].append(np.arange(len(LumFuncWhere[p][q])*1.))\n\t\tif len(CMV[p][q])!=0:\n\t\t\tCMV[p][q]=cosmo.comoving_volume(zupper[p][q]).value/(4*m.pi/0.000312)-cosmo.comoving_volume(zlower[p][q]).value/(4*m.pi/(0.000312))\t\t#Comoving volume from astropy calculates full comoving volume at z. Multiply by ratio of solid angle of full sky and solid angle of COSMOS field. Take difference of comoving volume at zmax and comoving volume at zmin for each source to find Max comoving volume the source could fall in and still be part of its bin. \nLumFunc=[]\nLumFuncErr=[]\nLogErr=[]\nNGal=[]\nDensity=np.arange(len(CMV)*1.)\n\ndef schechter_fit(sample_M, phi=0.4*np.log(10)*args.phistar, M_star=args.mstar, alpha=args.schalpha, e=2.718281828):\n schechter = phi*(10**(0.4*(alpha+1)*(M_star-sample_M)))*(e**(-np.power(10,0.4*(M_star-sample_M))))\n return schechter\n\nAveCMV=[]\n\nif args.randcorr:\n\tfor i in range(0,len(CMV)):\n\t\tAveCMV.append(np.arange(len(CMV[i]))*1.)\n\t\tLumFunc.append(np.arange(len(CMV[i]))*1.)\n\t\tLumFuncErr.append(np.arange(len(CMV[i]))*1.)\n\t\tLogErr.append(np.arange(len(CMV[i]))*1.)\n\t\tNGal.append(np.arange(len(CMV[i]))*1.)\n\t\tfor j in range(0,len(CMV[i])):\n\t\t\tval=0.0\n\t\t\terr=0.0\n\t\t\tTOTCMV=0.0\n\t\t\tNGalparam=0\n\t\t\tfor k in range(0,len(CMV[i][j])):\n\t\t\t\tif spec_success_rate[LumFuncWhere[i][j][k]]==1:\n\t\t\t\t\tif args.shrink>0:\n\t\t\t\t\t\tval=val+(args.shrink*WeightArray[LumFuncWhere[i][j][k]])/(CMV[i][j][k])\t#LumFunc=Sum(1/Vmaxi) from i=0 to N \n\t\t\t\t\telse:\n\t\t\t\t\t\tval=val+WeightArray[LumFuncWhere[i][j][k]]/(CMV[i][j][k])\t#LumFunc=Sum(1/Vmaxi) from i=0 to N\n\t\t\t\t\tif CMV[i][j][k]==0:\n\t\t\t\t\t\terr=err+0\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tif args.shrink>1:\n\t\t\t\t\t\t\terr=err+np.power(args.shrink*WeightArray[LumFuncWhere[i][j][k]],2)/(np.power((CMV[i][j][k]*Mbinsize),2))\t\t#Poission Error for Wilmer 2006 is sqrt(sum(1/Vmax**2))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\terr=err+np.power(WeightArray[LumFuncWhere[i][j][k]],2)/(np.power((CMV[i][j][k]*Mbinsize),2))\t\t#Poission Error for Wilmer 2006 is sqrt(sum(1/Vmax**2))\n\t\t\t\t\tNGalparam=NGalparam+1\n\t\t\t\t\tTOTCMV=TOTCMV+CMV[i][j][k]\n\t\t\tLumFunc[i][j]=val/Mbinsize\n\t\t\tprint(LumFunc[i][j],schechter_fit((MBinUp[j]+MBinLow[j])/2),schechter_fit((MBinUp[j]+MBinLow[j])/2)-LumFunc[i][j])\n\t\t\tNGal[i][j]=NGalparam\n\t\t\tAveCMV[i][j]=TOTCMV/NGal[i][j]\n\t\t\tif err==0:\n\t\t\t\tLumFuncErr[i][j]=1\n\t\t\t\tLogErr[i][j]=1\n\t\t\telse:\n\t\t\t\tLumFuncErr[i][j]=m.sqrt(err)\n\t\t\t\tLogErr[i][j]=LumFuncErr[i][j]/(LumFunc[i][j]*np.log(10)) #Calculating log(err)\n\t\tDensity[i]=sum(LumFunc[i])\nelse:\n\tfor i in range(0,len(CMV)):\n\t\tAveCMV.append(np.arange(len(CMV[i]))*1.)\n\t\tLumFunc.append(np.arange(len(CMV[i]))*1.)\n\t\tLumFuncErr.append(np.arange(len(CMV[i]))*1.)\n\t\tLogErr.append(np.arange(len(CMV[i]))*1.)\n\t\tNGal.append(np.arange(len(CMV[i]))*1.)\n\t\tfor j in range(0,len(CMV[i])):\n\t\t\tval=0.0\n\t\t\terr=0.0\n\t\t\tTOTCMV=0.0\n\t\t\tfor k in range(0,len(CMV[i][j])):\n\t\t\t\tif args.shrink>0:\n\t\t\t\t\tval=val+args.shrink/(CMV[i][j][k])\t#LumFunc=Sum(1/Vmaxi) from i=0 to N \n\t\t\t\telse:\n\t\t\t\t\tval=val+1/(CMV[i][j][k])\t#LumFunc=Sum(1/Vmaxi) from i=0 to N\n\t\t\t\tif CMV[i][j][k]==0:\n\t\t\t\t\terr=err+0\t\t\n\t\t\t\telse:\n\t\t\t\t\tif args.shrink>0:\n\t\t\t\t\t\terr=err+float(args.shrink)/((CMV[i][j][k]*Mbinsize)**2)\t\t#Poission Error for Wilmer 2006 is sqrt(sum(1/Vmax**2))\n\t\t\t\t\telse:\n\t\t\t\t\t\terr=err+1./((CMV[i][j][k]*Mbinsize)**2)\n\t\t\t\tTOTCMV=TOTCMV+CMV[i][j][k]\n\t\t\tLumFunc[i][j]=val/Mbinsize\n\t\t\tprint(LumFunc[i][j],schechter_fit((MBinUp[j]+MBinLow[j])/2),schechter_fit((MBinUp[j]+MBinLow[j])/2)-LumFunc[i][j])\n\t\t\tNGal[i][j]=len(CMV[i][j])\n\t\t\tAveCMV[i][j]=TOTCMV/NGal[i][j]\n\t\t\tif err==0:\n\t\t\t\tLumFuncErr[i][j]=1\n\t\t\t\tLogErr[i][j]=1\n\t\t\telse:\n\t\t\t\tLumFuncErr[i][j]=m.sqrt(err)\n\t\t\t\tLogErr[i][j]=LumFuncErr[i][j]/(LumFunc[i][j]*np.log(10)) #Calculating log(err)\n\t\tDensity[i]=sum(LumFunc[i])\n\n\nschechter_range=np.linspace(-24,-15,1000)\nZBinMid=(zbinup+zbinlow)/2.\nMBinMid=(MBinUp+MBinLow)/2.\nMRange=(args.Mmax-args.Mmin)\n\t#Plotting! These commands create plots based on the number of z and M bins. Last few plots could be the same if the number of z bins cannot be easily split. (ex. if you only want 3 z bins,\n\t#program will create 4 plots, but the bottom two will be the same. \n\n\ndef autolabel(rects):\n\tfor rect in rects:\n\t\theight=rect.get_height()\n\t\tif not m.isinf(height):\n\t\t\taxes[1].text(rect.get_x() + rect.get_width()/2.,1.05*height,'%d' % int(np.power(10,height)),ha='center',va='bottom',fontsize='x-small')\n\n\nwith PdfPages(args.fileout) as pdf:\n\tf,axes=plt.subplots(nrows=2,ncols=1,sharex=True,gridspec_kw={'height_ratios':[3,1]})\n\tcode=axes[0].errorbar(MBinMid,np.log10(LumFunc[0]),yerr=LogErr[0],fmt='.',label='1/V$_{MAX}$ code')\n\tschech_func=axes[0].plot(schechter_range,np.log10(schechter_fit(schechter_range)),label='Schechter Function used to make parent sample')\n\tngallabel=axes[0].plot(MBinMid,np.log10(NGal[0]/(Mbinsize*totvol)),label='Number of galaxies divided by max volume')\n\tf.text(0.04,0.65,'Log$_{10}(\\Phi_{M}$) (Mpc$^{-3} mag^{-1}$)',va='center',rotation='vertical')\n\taxes[0].legend(loc=4)\n\tplt.xlim((args.Mmin,args.Mmax))\n\tndist=axes[1].bar(MBinMid,np.log10(NGal[0]),Mbinsize,label='Number of sources per Absolute Magnitude Bin')\n\tautolabel(ndist)\n\tplt.xlabel('Absolute Magnitude (M)')\n\tplt.ylabel('Log$_{10}$(Number of Galaxies)')\n\tplt.subplots_adjust(hspace=0)\n\tpdf.savefig(orientation='landscape')\n\tif args.diagnostic:\n\t\tfig2=plt.figure(2)\n\t\tfor i in range(0,len(CMV[0])):\n\t\t\tplt.plot(MBinMid[i],AveCMV[0][i],'.')\n\t\tplt.ylabel('Comoving Volume (Mpc$^{-3}$)')\n\t\tplt.xlabel('Absolute Magnitude (M)')\n\t\tplt.xlim((args.Mmin,args.Mmax))\n\t\tpdf.savefig(orientation='landscape')\n","sub_path":"TEST_LUM_FUNC.py","file_name":"TEST_LUM_FUNC.py","file_ext":"py","file_size_in_byte":12916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"228821730","text":"#Copyright: Rip Lyster 2016\n\n#Barebones.py taken from the barebones example on the 112 website\n\nfrom tkinter import *\nfrom DrawToPrintBackEnd import *\nimport copy\nimport random\n\ndef init(data):\n data.profiles = []\n data.numProfiles = 0\n data.points = []\n data.width = 800\n data.height = 800\n data.outlineColor = \"#990033\"\n data.bgColor = \"#666699\"\n data.bBgColor = \"white\"\n data.shapeColor = \"black\"\n data.gridColor = \"grey\"\n data.gridPos = (0,0)\n data.gridSpacing = 60\n data.dBoxX = 20\n data.dBoxY = 20\n data.dBoxW = 600\n data.dBoxH = 600\n data.pointRad = 4\n data.mBoxX = 640\n data.mBoxY = 20\n data.mBoxW = 180\n data.mBoxH = 200\n data.message = \"\"\n data.coordinates = []\n data.sliceButtX = 640\n data.sliceButtY = 640\n data.sliceButtW = 140\n data.sliceButtH = 140\n data.settingH = 125\n data.settingW = 125\n data.objHX = 40\n data.objHY = 650\n data.objHeight = 1.0\n data.layerHX = 185\n data.layerHY = 650\n data.layerHeight = 0.2\n data.bedHeight = 200\n data.profileX = 650\n data.profileY = 480\n data.extrusionX = 330\n data.extrusionY = 650\n data.extrusion = 1\n data.bufferX = 475\n data.bufferY = 650\n data.buffer = 1.4\n data.splash = 1\n data.help = 0\n data.splashClickSize = 35\n data.splashClickSizeDS = 1\n data.splashFunTexts = [FunText(),FunText(),FunText(),FunText(),FunText(),FunText(),FunText()]\n\nclass FunText(object):\n text1 = [\"so much\",\n \"such\",\n \"very\",\n \"much\",\n \"good\",\n \"wow\"]\n\n text2 = [\"3d\",\n \"print\",\n \"draw\",\n \"generative\",\n \"doge\",\n \"112\",\n \"polygon\",\n \"layers\",\n \"additive\"]\n\n color = [\"red\",\"orange\",\"blue\",\"indigo\",\"violet\",\"pink\"]\n\n def __init__(self):\n self.x = random.randint(0, 800)\n self.y = random.randint(0, 800)\n self.dx = random.randint(-9, 9)\n self.dy = random.randint(-9, 9)\n self.id1 = random.randint(0,5)\n self.id2 = random.randint(0,8)\n self.color = random.randint(0,5)\n\n def getX(self):\n return self.x\n\n def getY(self):\n return self.y\n\n def getDx(self):\n self.x += self.dx\n\n def getDy(self):\n self.y += self.dy\n\n def getText(self):\n return FunText.text1[self.id1] + \" \" + FunText.text2[self.id2]\n\n def getColor(self):\n return FunText.color[self.color]\n\ndef drawSplashScreen(canvas,data):\n canvas.create_rectangle(0,0,data.width+10,data.height+10,fill=\"#e6ffcc\")\n\n for text in data.splashFunTexts:\n x1 = text.getX()\n # print(x1)\n y1 = text.getY()\n # print(y1)\n if(x1 < data.width and x1 > 0 and\n y1 < data.height and y1 > 0):\n text.getDx()\n text.getDy()\n canvas.create_text(text.getX(),text.getY(),fill=text.getColor(),\n text=text.getText(),\n font=\"Haettenschweiler 30\",\n justify=CENTER,anchor=CENTER)\n else:\n text.__init__()\n\n canvas.create_text(data.width/2,data.height/3,fill=\"#248f24\",\n text=\"Draw2Print\",font=\"Haettenschweiler 90\",\n justify=CENTER,anchor=CENTER)\n canvas.create_text(data.width/2,data.height-30,fill=\"#248f24\",\n text=\"A project by Rip Lyster\",font=\"Haettenschweiler 25\",\n justify=CENTER,anchor=CENTER)\n\n clickFont = \"Haettenschweiler %d\" %data.splashClickSize\n canvas.create_text(data.width/2,2*data.height/3,fill=\"#248f24\",\n text=\"Click Anywhere To Start\",font=clickFont,\n justify=CENTER,anchor=CENTER)\n\n if(data.splashClickSize > 40):\n data.splashClickSizeDS = -1\n elif(data.splashClickSize < 30):\n data.splashClickSizeDS = 1\n data.splashClickSize += data.splashClickSizeDS\n\ndef drawHelpScreen(canvas,data):\n canvas.create_rectangle(0,0,data.width+10,data.height+10,fill=\"LightSalmon2\")\n canvas.create_rectangle(20,150,data.width-20,data.height-20,\n fill=\"white\",width=5,outline=\"grey\")\n canvas.create_text(data.width/2,75,text=\"Help\",fill=\"grey\",\n font=\"Haettenschweiler 80\")\n helpText = \"\"\" Draw2Print allows you to take any 2d image and create a 3d printable file from it. You\n can create profiles and build up object complexity using simple drawing techniques.\n To create objects, use the following steps:\n\n 1. Click on the canvas to draw the desired shape\n\n 2. Select the height of that profile using the profile height setting\n\n 3. If you want to add another profile, click the add profile button and continue drawing\n the next profile\n\n 4. When you're satisfied with the object, modify the layer height and buffer options to\n the desired values\n\n 5. When you're completely satisfied with all of the settings select the SLICE OBJECT\n button\n\n 6. Open the folder that you run this program from and find the gcode.txt file\n\n 7. Use this gcode.txt file as input into your 3d printer to print your object\n\n Click anywhere to return to the canvas\n \"\"\"\n canvas.create_text(40,170,anchor=NW,font=\"Haettenschweiler 19\",\n justify=LEFT,width=720,text=helpText,\n fill=\"grey25\")\n\ndef initMessage(data):\n data.message = \"\"\"\n Instructions:\\n\n 1. Draw your profile\\n\n 2. Save profiles\\n\n 3. Modify settings\\n\n 4. Slice your model\\n\n 5. Print\\n\\n\\n\\n\\n\n Press H for help\"\"\"\n\ndef drawPrevProfile(canvas,data):\n if(len(data.profiles) > 0):\n for profile in data.profiles:\n canvas.create_polygon(getPoints(profile[1]),fill=data.gridColor,\n stipple=\"gray50\")\n prevPoint=None\n for point in getPoints(profile[1]):\n canvas.create_oval(point[0]-data.pointRad,point[1]-data.pointRad,\n point[0]+data.pointRad,point[1]+data.pointRad,\n fill=data.gridColor)\n if(prevPoint!=None):\n canvas.create_line(prevPoint,point,fill=data.outlineColor)\n prevPoint = point\n canvas.create_line(getPoints(profile[1])[0],getPoints(profile[1])[-1],fill=data.outlineColor)\n\ndef drawSettings(canvas,data):\n def drawNumSetting(x,y,var,text,units):\n canvas.create_rectangle(x,y,\n x+data.settingW,\n y+data.settingH,\n fill = data.bBgColor,\n outline = data.gridColor,\n width=2)\n canvas.create_text(x+data.settingW-10,\n y+4*data.settingH/5,\n anchor=E,justify=\"center\",\n text = \"+\", font = \"Sans 40\",\n fill=\"grey\")\n canvas.create_text(x+12,\n y+4*data.settingH/5-6,\n anchor=W,justify=\"center\",\n text = \"-\", font = \"Sans 40\",\n fill=\"grey\")\n canvas.create_text(x+data.settingW/2,\n y-2, anchor=S,\n justify=\"center\",text=\"%s\" %(text),\n font = \"Sans 11\")\n canvas.create_text(x+data.settingW/2,\n y+2*data.settingH/5,\n anchor=CENTER,justify=\"center\",\n text=(\"%.2f%s\" %(var,units)) if isinstance(var,float) else (\"%d%s\" %(var,units)),\n font=\"Sans 22\" if units != \"\" else \"Sans 27\")\n drawNumSetting(data.objHX,data.objHY,data.objHeight,\"Object Height\",\"mm\")\n drawNumSetting(data.layerHX,data.layerHY,data.layerHeight,\"Layer Height\",\"mm\")\n drawNumSetting(data.profileX,data.profileY,data.numProfiles,\"Profiles\",\"\")\n drawNumSetting(data.extrusionX,data.extrusionY,data.extrusion,\"Extrude Per MM\",\"\")\n drawNumSetting(data.bufferX,data.bufferY,data.buffer,\"Stroke Buffer\",\"mm\")\n\ndef drawSliceButt(canvas,data):\n canvas.create_rectangle(data.sliceButtX,data.sliceButtY,\n data.sliceButtX+data.sliceButtW,data.sliceButtY+data.sliceButtH,\n fill = data.bBgColor,outline = data.gridColor,width=2)\n canvas.create_text(data.sliceButtX+data.sliceButtW/2,\n data.sliceButtY+data.sliceButtH/2,\n justify = \"center\",text = \"SLICE\\nOBJECT\",\n font = \"Sans 24\")\n\ndef drawDBox(canvas,data):\n canvas.create_rectangle(data.dBoxX,data.dBoxY,\n data.dBoxX+data.dBoxW,\n data.dBoxY+data.dBoxH,\n outline=data.gridColor,width=2,\n fill=data.bBgColor)\n for lineX in range(data.gridSpacing,data.dBoxW+60,data.gridSpacing):\n canvas.create_line(data.dBoxX+lineX, data.dBoxY,\n data.dBoxX+lineX, data.dBoxH+data.dBoxY,\n fill=data.gridColor)\n canvas.create_text(data.dBoxX+lineX-12,data.dBoxY+10,\n text=str(int(lineX/3)),\n fill=data.gridColor)\n for lineY in range(data.gridSpacing,data.dBoxH+60,data.gridSpacing):\n canvas.create_line(data.dBoxX,data.dBoxY+lineY,\n data.dBoxW+data.dBoxX,data.dBoxX+lineY,\n fill=data.gridColor)\n canvas.create_text(data.dBoxX+12,data.dBoxY+lineY-10,\n text=str(int(lineY/3)),\n fill=data.gridColor)\n\ndef drawShape(canvas, data):\n if(len(data.points) > 0):\n canvas.create_polygon(data.points,fill=data.shapeColor)\n prevPoint=None\n for point in data.points:\n canvas.create_oval(point[0]-data.pointRad,point[1]-data.pointRad,\n point[0]+data.pointRad,point[1]+data.pointRad,\n fill=data.outlineColor)\n if(prevPoint!=None):\n canvas.create_line(prevPoint,point,fill=data.outlineColor)\n prevPoint = point\n canvas.create_line(data.points[0],data.points[-1],fill=data.outlineColor)\n\ndef drawMessageBox(canvas,data):\n initMessage(data)\n canvas.create_text(625, 20, text=data.message, anchor=NW,\n font=\"Sans 12\")\n\ndef createCoords(points):\n coords = []\n for point in points:\n coord = ((point[0]-20)/3,(point[1]-20)/3)\n coords.append(coord)\n return coords\n\ndef getPoints(coords):\n points = []\n for coord in coords:\n point = ((coord[0]*3+20),(coord[1]*3+20))\n points.append(point)\n return points\n\ndef pressedArea(event,x,y,h,w):\n return(event.y > y and event.x > x and\n event.y < y + h and event.x < x + w)\n\ndef mousePressed(event, data):\n if(data.splash == 0 and data.help == 0):\n if(pressedArea(event,data.dBoxX,data.dBoxY,data.dBoxH,data.dBoxW)):\n data.points.append((event.x,event.y))\n data.coordinates = createCoords(data.points)\n if(pressedArea(event,data.sliceButtX,data.sliceButtY,data.sliceButtH,data.sliceButtW)):\n if(data.numProfiles == 0):\n writeFile(\"gcode.txt\",getGCode(data.coordinates,data.extrusion,data.layerHeight,data.buffer,\n 200,data.objHeight))\n elif(data.numProfiles > 0):\n writeFile(\"gcode.txt\",getProfilesGCode(data.profiles,data.extrusion,data.layerHeight,data.buffer,\n 200))\n print(\"Written to file gcode.txt\")\n\n #Object Height\n if(pressedArea(event,data.objHX,data.objHY,data.settingH,data.settingW/4)):\n if(data.objHeight > 0.2): data.objHeight -= 1.0\n if(pressedArea(event,data.objHX+3*data.settingW/4,data.objHY,data.settingH,data.settingW/4)):\n data.objHeight += 1.0\n\n #Layer Height\n if(pressedArea(event,data.layerHX,data.layerHY,data.settingH,data.settingW/4)):\n if(data.layerHeight > 0.1): data.layerHeight -= 0.1\n if(pressedArea(event,data.layerHX+3*data.settingW/4,data.layerHY,data.settingH,data.settingW/4)):\n data.layerHeight += 0.1\n\n #Extrusion\n if(pressedArea(event,data.extrusionX,data.extrusionY,data.settingH,data.settingW/4)):\n if(data.extrusion > 0.1): data.extrusion -= 0.1\n if(pressedArea(event,data.extrusionX+3*data.settingW/4,data.extrusionY,data.settingH,data.settingW/4)):\n data.extrusion += 0.1\n\n #Buffer\n if(pressedArea(event,data.bufferX,data.bufferY,data.settingH,data.settingW/4)):\n if(data.buffer > 0.2): data.buffer -= 0.2\n if(pressedArea(event,data.bufferX+3*data.settingW/4,data.bufferY,data.settingH,data.settingW/4)):\n data.buffer += 0.2\n\n\n #Profiles\n if(pressedArea(event,data.profileX,data.profileY,data.settingH,data.settingW/4)):\n if(data.numProfiles > 0):\n data.numProfiles -= 1\n data.points = getPoints(data.profiles.pop()[1])\n if(pressedArea(event,data.profileX+3*data.settingW/4,data.profileY,data.settingH,data.settingW/4) and\n len(data.points) > 2):\n data.numProfiles += 1\n data.profiles.append((data.objHeight,copy.deepcopy(data.coordinates)))\n data.points = []\n data.coordinates = []\n data.objHeight = 1.0\n\n if(data.splash == 1 and data.help == 0):\n data.splash = 0\n if(data.splash == 0 and data.help == 1):\n data.help = 0\n\ndef keyPressed(event, data):\n if(data.splash == 0 and data.help == 0):\n if(event.keysym == \"Delete\"):\n data.points.pop()\n data.coordinates.pop()\n if(event.keysym == \"h\"):\n data.help = 1\n\ndef timerFired(data):\n pass\n\ndef redrawAll(canvas, data):\n if(data.splash == 0 and data.help == 0):\n canvas.create_rectangle(0,0,data.width+10,data.height+10,fill=data.bgColor)\n drawDBox(canvas,data)\n drawPrevProfile(canvas,data)\n drawShape(canvas,data)\n drawMessageBox(canvas,data)\n drawSliceButt(canvas,data)\n drawSettings(canvas,data)\n if(data.splash == 1 and data.help == 0):\n drawSplashScreen(canvas,data)\n if(data.splash == 0 and data.help == 1):\n drawHelpScreen(canvas,data)\n\n#Run function taken from Barebones.py on the course website\ndef run(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n init(data)\n # create the root and the canvas\n root = Tk()\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack()\n # set up events\n root.bind(\"\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n print(\"Good bye!\")\n\nrun(800, 800)","sub_path":"DrawToPrintGUI.py","file_name":"DrawToPrintGUI.py","file_ext":"py","file_size_in_byte":16240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"516959324","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef main():\n s = sys.argv[1]\n pair1 = 1\n pair2 = 0\n ans = \"\"\n while pair1 < len(s):\n ans += s[pair1] + s[pair2]\n pair2 += 2\n pair1 += 2\n if len(s) % 2 == 1:\n ans += s[-1]\n print(ans)\n\nif __name__ == '__main__':\n main()\n","sub_path":"ca117/lab-051/q1_051.py","file_name":"q1_051.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"525956013","text":"#!/usr/bin/env python3\n\nimport time\nimport speech_recognition as sr\nfrom speech_recognition import Recognizer, Microphone\n\nclass TriggerListener():\n def __init__(self):\n self.recognizer = Recognizer()\n self.microphone = Microphone(device_index=0, sample_rate=16000, chunk_size=256)\n self.is_trigger_listener_active = None\n self.start_listener()\n\n def stop_listen_for_trigger(self):\n print(\"Stopping listener for trigger\")\n self.stop_listening(wait_for_stop=False)\n self.is_trigger_listener_active = False\n\n def process_trigger(self, data):\n if \"computer\" in data.lower():\n print(\"computer found\")\n self.stop_listen_for_trigger()\n\n def listen_for_trigger(self, recognizer, audio):\n try:\n print(\"Sound detected...\")\n trigger = recognizer.recognize_sphinx(audio, keyword_entries=[('computer', 0.5)])\n if trigger:\n print(f\"Trigger detected! - {trigger}\")\n self.process_trigger(trigger)\n except sr.UnknownValueError:\n print(\"Sphinx could not understand audio\")\n except sr.RequestError as e:\n print(\"Sphinx error; {0}\".format(e))\n\n def start_listener(self):\n print(\"Starting trigger listener\")\n self.is_trigger_listener_active = True\n self.recognizer.pause_threshold = 0.5\n\n with self.microphone as source:\n self.recognizer.adjust_for_ambient_noise(source, duration=2.0)\n\n self.stop_listening = self.recognizer.listen_in_background(self.microphone, self.listen_for_trigger, phrase_time_limit=1.5)\n\n while self.is_trigger_listener_active:\n print(\"Listening...\")\n time.sleep(1.0)\n\n print(\"TriggerListener finished!\")\n\nif __name__ == \"__main__\":\n while True:\n TriggerListener()\n print(\"TriggerListener main thread finished! 1\")\n TriggerListener()\n print(\"TriggerListener main thread finished! 2\")\n\n\n\n\n\"\"\" r = sr.Recognizer()\nr.pause_threshold = 0.5\n\n# Setting chunk_size to a small value (e.g. 256) increases the sensitiy of the sound detection\nm = sr.Microphone(device_index=0, sample_rate=16000, chunk_size=256)\n\ndef process_trigger(data):\n print(data)\n if \"computer\" in data.lower():\n print(\"computer found\")\n\ndef stop_listen_for_trigger():\n print(\"Stopping listener for trigger\")\n stop_listening(wait_for_stop=False)\n\ndef listen_for_trigger(recognizer, audio):\n try:\n print(\"Sound detected...\")\n trigger = recognizer.recognize_sphinx(audio, keyword_entries=[('computer', 0.5)])\n if trigger:\n print(f\"Trigger detected! - {trigger}\")\n process_trigger(trigger)\n # stop_listen_for_trigger()\n except sr.UnknownValueError:\n print(\"Sphinx could not understand audio\")\n except sr.RequestError as e:\n print(\"Sphinx error; {0}\".format(e))\n\nwith m as source:\n r.adjust_for_ambient_noise(source, duration=2.0)\n\nstop_listening = r.listen_in_background(m, listen_for_trigger, phrase_time_limit=1.5)\n\ncounter = 0\nwhile counter < 500:\n counter += 1\n print(\"Listening...\")\n time.sleep(1.0)\n\n# stop_listening(wait_for_stop=False) \"\"\"\n","sub_path":"Python Tests/stt/stt_listener_class.py","file_name":"stt_listener_class.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"461947681","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom fake_useragent import UserAgent, FakeUserAgentError\nimport pyautogui\nimport time\n\n\noptions = Options()\ntry:\n ua = UserAgent()\n ua.update()\n userAgent = ua.random\nexcept FakeUserAgentError:\n userAgent = \"Mozilla / 5.0 (Windows NT 10.0; Win64; x64) AppleWebKit / 537.36 (KHTML, как Gecko) Chrome / \" \\\n \"72.0.3626.121 Safari / 537.36\"\n\noptions.add_argument(f'user-agent={userAgent}')\nbrowser = webdriver.Chrome(options=options, executable_path=\"../chromedriver.exe\")\n# browser = webdriver.Chrome()\n\n# Переходим на страницу, на которой нужно что-то сделать\nbrowser.get('https://www.instagram.com/')\n\ntime.sleep(5)\nprint('start')\nduration = 1\ninterval = 0.1\n\nimgXY = pyautogui.locateCenterOnScreen('../imgs/start.png')\npyautogui.moveTo(imgXY[0], imgXY[1]+85, duration=duration)\npyautogui.click()\npyautogui.typewrite(['k', 'r', 'o', 'n', 's', 'i', 'r', 'i', 'u', 's', '@', 'g', 'm', 'a', 'i',\n 'l', '.', 'c', 'o', 'm'], interval=interval)\ntime.sleep(1)\n\n# imgXY = pyautogui.locateCenterOnScreen('../imgs/name.png')\npyautogui.moveTo(imgXY[0], imgXY[1]+125, duration=duration)\npyautogui.click()\npyautogui.typewrite(['M', 'u', 'r', 'a', 'd', 'enter'], interval=interval)\ntime.sleep(1)\n\n# imgXY = pyautogui.locateCenterOnScreen('../imgs/username.png')\npyautogui.moveTo(imgXY[0], imgXY[1]+175, duration=duration)\npyautogui.click()\npyautogui.typewrite(['m', 'u', 'r', 'a', 'd', '4', '0', '1', 't', 'enter'], interval=interval)\ntime.sleep(1)\n\n# imgXY = pyautogui.locateCenterOnScreen('../imgs/password.png')\npyautogui.moveTo(imgXY[0], imgXY[1]+220, duration=duration)\npyautogui.click()\npyautogui.typewrite(['3', '2', '4', '4', 'd', 'm', 'k', 'enter'], interval=interval)\ntime.sleep(1)\n\n# imgXY = pyautogui.locateCenterOnScreen('../imgs/reg.png')\npyautogui.moveTo(imgXY[0], imgXY[1]+265, duration=duration)\npyautogui.click()\n\n# -----------------------------------------------------------------------------------------------------------\n# Получаем указатель на поле ввода текста в форме постинга\n# login = browser.find_element_by_name('login')\n# password = browser.find_element_by_name('password')\n# webdriver.ActionChains(browser).move_to_element(login).click(login).perform()\n# webdriver.ActionChains(browser).move_to_element(password).click(password).perform()\n# login.send_keys('deit91@yandex.ru')\n# password.send_keys('M3244dmk')\n\n# textarea1 = browser.find_elements_by_class_name('header-services-menu-link-fsJlE header-services-menu-link-\n# not-authenticated-3Uyu_')\n# webdriver.ActionChains(browser).move_to_element(textarea[0]).click(textarea[0]).perform()\n# webdriver.ActionChains(browser).key_down().send_keys('c').key_up().perform()\n# textarea[1].send_keys('909 484-31-21')\n# print(textarea[1].get_attribute('value'))\n\n# submit = browser.find_elements_by_tag_name('button')\n# print(submit)\n# for sub in submit:\n# if (sub.get_attribute('data-marker') == 'login-form/submit'):\n# print(sub.get_attribute('class'))\n# print('-------')\n# print(sub.text)\n# sub.click()\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"264649279","text":"# Copyright (C) 2019 The Raphielscape Company LLC.\n#\n# Licensed under the Raphielscape Public License, Version 1.b (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n\n\"\"\" Userbot module containing commands for keeping notes. \"\"\"\n\nfrom userbot import (BOTLOG, BOTLOG_CHATID, CMD_HELP,\n is_mongo_alive, is_redis_alive)\nfrom userbot.modules.dbhelper import (get_note, get_notes,\n add_note, delete_note)\nfrom userbot.events import register\nfrom asyncio import sleep\n\n\n@register(outgoing=True, pattern=\"^.saved$\")\nasync def notes_active(event):\n \"\"\" For .saved command, list all of the notes saved in a chat. \"\"\"\n cmd = event.text[0]\n if not cmd.isalpha() and cmd not in (\"/\", \"#\", \"@\", \"!\"):\n if not is_mongo_alive() or not is_redis_alive():\n await event.edit(\"`Database connections failing!`\")\n return\n\n message = \"`There are no saved notes in this chat`\"\n notes = await get_notes(event.chat_id)\n for note in notes:\n if message == \"`There are no saved notes in this chat`\":\n message = \"Notes saved in this chat:\\n\"\n message += \"🔹 **{}**\\n\".format(note[\"name\"])\n else:\n message += \"🔹 **{}**\\n\".format(note[\"name\"])\n\n await event.edit(message)\n\n\n@register(outgoing=True, pattern=r\"^.clear (\\w*)\")\nasync def remove_notes(event):\n \"\"\" For .clear command, clear note with the given name.\"\"\"\n cmd = event.text[0]\n if not cmd.isalpha() and cmd not in (\"/\", \"#\", \"@\", \"!\"):\n if not is_mongo_alive() or not is_redis_alive():\n await event.edit(\"`Database connections failing!`\")\n return\n notename = event.pattern_match.group(1)\n if await delete_note(event.chat_id, notename) is False:\n return await event.edit(\"`Couldn't find note:` **{}**\"\n .format(notename))\n else:\n return await event.edit(\"`Successfully deleted note:` **{}**\"\n .format(notename))\n\n\n@register(outgoing=True, pattern=r\"^.save (\\w*)\")\nasync def add_filter(event):\n \"\"\" For .save command, saves notes in a chat. \"\"\"\n cmd = event.text[0]\n if not cmd.isalpha() and cmd not in (\"/\", \"#\", \"@\", \"!\"):\n if not is_mongo_alive() or not is_redis_alive():\n await event.edit(\"`Database connections failing!`\")\n return\n\n notename = event.pattern_match.group(1)\n string = event.text.partition(notename)[2]\n if event.reply_to_msg_id:\n string = \" \" + (await event.get_reply_message()).text\n\n msg = \"`Note {} successfully. Use` #{} `to get it`\"\n\n if await add_note(event.chat_id, notename, string[1:]) is False:\n return await event.edit(msg.format('updated', notename))\n else:\n return await event.edit(msg.format('addded', notename))\n\n\n@register(outgoing=True, pattern=\"^.note (\\w*)\")\nasync def save_note(event):\n \"\"\" For .save command, saves notes in a chat. \"\"\"\n cmd = event.text[0]\n if not cmd.isalpha() and cmd not in (\"/\", \"#\", \"@\", \"!\"):\n if not is_mongo_alive() or not is_redis_alive():\n await event.edit(\"`Database connections failing!`\")\n return\n note = event.text[6:]\n note_db = await get_note(event.chat_id, note)\n if not await get_note(event.chat_id, note):\n return await event.edit(\"`Note` **{}** `doesn't exist!`\"\n .format(note))\n else:\n return await event.edit(\" 🔹 **{}** - `{}`\"\n .format(note, note_db[\"text\"]))\n\n\n@register(incoming=True, pattern=r\"#\\w*\", disable_edited=True)\nasync def note(event):\n \"\"\" Notes logic. \"\"\"\n try:\n if not (await event.get_sender()).bot:\n if not is_mongo_alive() or not is_redis_alive():\n return\n\n notename = event.text[1:]\n note = await get_note(event.chat_id, notename)\n if note:\n await event.reply(note[\"text\"])\n except:\n pass\n\n\n@register(outgoing=True, pattern=\"^.rmnotes (.*)\")\nasync def kick_marie_notes(kick):\n \"\"\" For .rmfilters command, allows you to kick all \\\n Marie(or her clones) filters from a chat. \"\"\"\n if not kick.text[0].isalpha() and kick.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n bot_type = kick.pattern_match.group(1)\n if bot_type not in [\"marie\", \"rose\"]:\n await kick.edit(\"`That bot is not yet supported!`\")\n return\n await kick.edit(\"```Will be kicking away all Notes!```\")\n await sleep(3)\n resp = await kick.get_reply_message()\n filters = resp.text.split(\"-\")[1:]\n for i in filters:\n if bot_type == \"marie\":\n await kick.reply(\"/clear %s\" % (i.strip()))\n if bot_type == \"rose\":\n i = i.replace('`', '')\n await kick.reply(\"/clear %s\" % (i.strip()))\n await sleep(0.3)\n await kick.respond(\n \"```Successfully purged bots notes yaay!```\\n Gimme cookies!\"\n )\n if BOTLOG:\n await kick.client.send_message(\n BOTLOG_CHATID, \"I cleaned all Notes at \" +\n str(kick.chat_id)\n )\n\nCMD_HELP.update({\n \"notes\": \"\\\n#\\\n\\nUsage: Gets the note with name notename\\\n\\n\\n.save \\\n\\nUsage: Saves notedata as a note with the name notename\\\n\\n\\n.clear \\\n\\nUsage: Deletes the note with name notename.\\\n\"\n})\n","sub_path":"userbot/modules/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"357028052","text":"import os\n\nfrom flask import render_template, flash, redirect, url_for, request, current_app, Blueprint, send_from_directory\nfrom flask_login import login_required, current_user\nfrom flask_ckeditor import upload_success, upload_fail\nfrom vfinance.utils import lookup, redirect_back\nfrom vfinance.forms import QuoteForm, TradeForm\nfrom vfinance.models import User, TradeHistory, Portfolio, Watchlist\nfrom vfinance.extensions import db\nfrom vfinance.kchart import chart_plot_upload\n\n\nadmin_bp = Blueprint('admin', __name__)\n\n@admin_bp.route('/quote', methods=[\"GET\", \"POST\"])\n@login_required\ndef get_quote():\n form = QuoteForm()\n if form.validate_on_submit():\n symbol = form.symbol.data\n \n quote = lookup(symbol)\n \n if quote:\n \n return redirect(url_for(\".show_quote\", symbol = symbol))\n else:\n flash(\"Cannot find\", 'warning')\n return redirect_back()\n return render_template(\"admin/quote.html\", form = form)\n \n\n@admin_bp.route('/quote/', methods =[\"GET\",\"POST\"])\n@login_required\ndef show_quote(symbol):\n form = QuoteForm()\n quote = lookup(symbol)\n company = quote[\"name\"]\n price = quote[\"price\"]\n symbol = quote['symbol']\n \n # link = chart_plot_upload(symbol)\n # if form.validate_on_submit():\n # symbol = form.symbol.data\n # quote = lookup(symbol)\n # company = quote[\"name\"]\n # price = quote[\"price\"]\n # return redirect(url_for(\".show_quote\", symbol = symbol))\n return render_template(\"admin/show_quote.html\",form = form, company = company, price = price, symbol = symbol)\n\n\n@admin_bp.route(\"/trade/\", methods=['GET', 'POST'])\n@login_required\ndef get_trade(symbol):\n # symbol = request.args.get(\"symbol\")\n form = TradeForm()\n quote = lookup(symbol)\n if form.validate_on_submit():\n symbol = form.symbol.data\n quantity = form.quantity.data\n price = form.price.data\n action = form.action.data\n return redirect(url_for('.review_order', symbol = symbol, quantity= quantity, price = price, action = action))\n form.symbol.data = symbol\n form.price.data = quote['price']\n return render_template(\"admin/get_trade.html\", form = form)\n\n# @admin_bp.route('/trade/review///', methods=['GET', 'POST'])\n# @login_required\n# def review_order(symbol, quantity, price):\n \n# return render_template(\"admin/review_order.html\", symbol = symbol, price = price, quantity = quantity)\n\n@admin_bp.route('/trade/review', methods=['GET', 'POST'])\n@login_required\ndef review_order():\n cash = current_user.cash\n symbol = request.args.get('symbol')\n price = request.args.get('price')\n quantity = request.args.get('quantity')\n action = request.args.get('action')\n totalprice = float(price)*float(quantity)\n able_to_trade = True\n stocklist = []\n if action == 'Buy':\n cash_balance = float(cash) - totalprice\n if cash_balance < 0:\n able_to_trade = False\n flash(\"Your order be rejected if you do not have enough cash to cover this closing transaction.\", 'danger')\n return redirect_back()\n else:\n cash_balance = float(cash) + totalprice\n portfolio = Portfolio.query.with_parent(current_user).all()\n # check if any existed portfolio, if no, return direct_back\n if portfolio:\n \n for company in portfolio: \n stocklist.append(company.symbol)\n \n if symbol in stocklist:\n id = company.id\n stock = Portfolio.query.get_or_404(id)\n own_quantity = float(stock.quantity)\n if float(quantity) > own_quantity:\n able_to_trade = False\n flash(\"Your share isn't enough for the trade. the order was rejected\", 'danger')\n return redirect_back()\n else:\n flash(\"You dont have any share yet\", 'danger')\n return redirect_back()\n\n # if symbol in company.symbol:\n # id = company.id\n # stock = Portfolio.query.get_or_404(id) \n # own_quantity = float(stock.quantity)\n # if float(quantity) > own_quantity:\n # able_to_trade = False\n # flash(\"Your share isn't enough for the trade. the order was rejected\", 'danger')\n # return redirect_back()\n \n # flash(\"You dont have any share yet\", 'danger')\n # return redirect_back() \n else:\n able_to_trade = False\n flash(\"You portfolio is empty\", 'danger')\n return redirect_back()\n \n # if cash_balance < 0:\n # flash(\"You dont have enough money\", 'danger')\n # return redirect_back()\n \n return render_template(\"admin/review_order.html\",able_to_trade = able_to_trade, action = action, symbol = symbol, price= price, quantity = quantity, cash = cash, totalprice=totalprice, cash_balance=cash_balance)\n\n@admin_bp.route('/trade/placeorder', methods=['GET', 'POST'])\n@login_required\ndef place_order():\n # get trade condition\n able_to_trade = request.args.get('able_to_trade')\n action = request.args.get(\"action\")\n if able_to_trade:\n # get original cash and position\n cash = float(current_user.cash)\n # position = float(current_user.position)\n \n\n # update cash and position back to database\n if action ==\"Buy\":\n totalprice = float(request.args.get('totalprice'))\n current_user.cash = cash - totalprice\n # current_user.position = position + totalprice\n else:\n totalprice = float(request.args.get('totalprice'))\n current_user.cash = cash + totalprice\n # current_user.position = position - totalprice\n\n #update trade_history\n symbol = request.args.get('symbol')\n name = lookup(symbol)['name']\n price = request.args.get('price')\n quantity = request.args.get('quantity')\n \n \n \n trade_history = TradeHistory(symbol = symbol, name = name, price = price, action = action, quantity = quantity)\n db.session.add(trade_history)\n current_user.trade_history.append(trade_history)\n\n # update portfolio\n portfolio = Portfolio.query.with_parent(current_user).all()\n if portfolio:\n # check if the stock already existed\n for company in portfolio:\n # if exists update the averge pruchase price and quantity\n if symbol == company.symbol:\n # get purchase price and symbol\n id = company.id\n stock = Portfolio.query.get_or_404(id)\n purchase_price = float(stock.purchase_price)\n own_quantity = float(stock.quantity)\n\n # caculate new purchase price and quantity\n if action ==\"Buy\":\n new_purchase_price = (purchase_price * own_quantity + float(quantity) * float(price))/(own_quantity + float(quantity))\n new_own_quantity = own_quantity + float(quantity)\n else:\n new_own_quantity = own_quantity - float(quantity)\n if new_own_quantity == 0:\n db.session.delete(stock)\n db.session.commit()\n return redirect(url_for('home.index'))\n \n new_purchase_price = (purchase_price*own_quantity - float(quantity) * float(price))/(own_quantity-float(quantity))\n \n\n stock.purchase_price = new_purchase_price\n stock.quantity =new_own_quantity\n db.session.commit()\n return redirect(url_for(\"home.index\"))\n # if not exist in portfolio, create it \n new_portfolio = Portfolio(symbol = symbol, name = name, purchase_price = price, quantity = quantity)\n current_user.portfolio.append(new_portfolio)\n else:\n new_portfolio = Portfolio(symbol = symbol, name = name, purchase_price = price, quantity = quantity)\n current_user.portfolio.append(new_portfolio)\n\n db.session.commit()\n return redirect(url_for(\"home.index\"))\n\n@admin_bp.route(\"/trade_history\", methods=['GET', 'POST'])\n@login_required\ndef trade_history():\n # get data from database\n page = request.args.get('page', 1, type=int)\n per_page = 10\n pagination = TradeHistory.query.with_parent(current_user).order_by(TradeHistory.timestamp.desc()).paginate(page, per_page)\n trade_history = pagination.items\n return render_template(\"admin/trade_history.html\", trade_history = trade_history, pagination = pagination)\n\n\n\n\n\n@admin_bp.route('/watchlist')\n@login_required\ndef show_watchlist():\n form = QuoteForm()\n page = request.args.get('page',1, type= int)\n per_page = 10\n pagination = Watchlist.query.with_parent(current_user).order_by(Watchlist.symbol.asc()).paginate(page, per_page)\n watchlist = pagination.items\n\n symbols=[] \n prices =[] \n changes =[]\n changePercents =[]\n openprices =[]\n highs =[]\n lows =[]\n volumes =[]\n week52Highs =[]\n week52Lows =[]\n\n if watchlist:\n for stock in watchlist:\n quote = lookup(stock.symbol)\n \n price= quote[\"price\"]\n symbol= quote[\"symbol\"]\n change= quote[\"change\"]\n changePercent= quote[\"changePercent\"]\n volume= quote[\"volume\"]\n week52High= quote[\"week52High\"]\n week52Low= quote[\"week52Low\"]\n openprice =quote[\"open\"]\n high =quote['high']\n low = quote[\"low\"]\n\n symbols.append(symbol)\n prices.append(price) \n changes.append(change)\n changePercents.append(changePercent)\n openprices.append(openprice)\n highs.append(high)\n lows.append(low)\n volumes.append(volume)\n week52Highs.append(week52High)\n week52Lows.append(week52Low)\n return render_template('admin/watchlist.html', watchlist = watchlist, pagination = pagination, form = form,\n prices = prices, changes = changes, changePercents = changePercents, openprices = openprices,\n highs = highs, lows = lows, week52Highs = week52Highs, week52Lows = week52Lows, symbols = symbols,\n volumes = volumes)\n\n\n\n\n@admin_bp.route(\"/watchlist/new\", methods=['GET', 'POST'])\n@login_required\ndef add_to_watchlist():\n \n symbol = request.args.get(\"symbol\")\n symbol_in_watchlist = Watchlist.query.filter_by(symbol = symbol)\n for stock in symbol_in_watchlist:\n if stock.symbol == symbol:\n \n flash(\"Already existed in your watchlist\",'success')\n return redirect(url_for('admin.show_watchlist'))\n \n \n watchlist = Watchlist(symbol = symbol)\n db.session.add(watchlist)\n current_user.watchlist.append(watchlist)\n db.session.commit()\n \n return redirect(url_for('admin.show_watchlist'))\n\n@admin_bp.route(\"/watchlist//delete\", methods = [\"POST\"])\n@login_required\ndef delete_from_watchlist(symbol):\n stocks = Watchlist.query.filter_by(symbol = symbol)\n if stocks:\n id = stocks[0].id\n stock = Watchlist.query.get_or_404(id)\n db.session.delete(stock)\n db.session.commit()\n flash(\"Delete from your watchlist\",'success')\n return redirect_back()\n","sub_path":"vfinance/blueprints/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":11787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"138423350","text":"\n\n# test classalloc for messed class and process\nimport unittest\nimport random\nimport os\n\n#\nimport rlp\n\n# to tested\nfrom classalloc import (\n CmAfter,\n CmBefore,\n Cmstud,\n Cmclass,\n CmTarget,\n ClassAllocator,\n)\ndef datagen(n=120):\n # data gen for . more than 100..\n # return CmBefore assembled;\n assert(n>100)\n list_cmstud = [] \n for i in range(n):\n c = Cmstud(\n studid = i,\n subjcomb = random.randrange(1,7) if i %2 ==0 else random.randrange(1,4)\n #should make unevenly\n )\n # print(\"append %s\" % c.studid)\n list_cmstud.append(c)\n cmt = CmTarget(\n cmplan = b'00',\n cm_num = int(n/20),\n cm_subjs = 6,\n cm_max = 30,\n cm_min = 18,\n cm_stud = n\n )\n cmb = CmBefore(\n cmplan = b'00',\n cmstuds = list_cmstud,\n cmtarget = cmt\n )\n\n return cmb\n\nclass TestData(unittest.TestCase):\n # test the data structures;\n def test_init_datas(self):\n # generate many ob and stay in obj\n cma = Cmstud(\n studid = 1,\n subjcomb = 1\n )\n self.assertIsInstance(cma, Cmstud)\n\n def setUp(self):\n # init a classallocator\n # gen a file\n self.filename = \"test_cmbefore.file\"\n fn = self.filename\n cmb = datagen()\n self.cmb = cmb\n with open(fn, 'wb') as f:\n f.write(rlp.encode(cmb))\n \n ca = ClassAllocator(fn)\n self.ca = ca\n \n def test_allocate(self):\n if self.ca:\n self.ca.allocat()\n self.assertEqual(len(self.ca.cmsbjs[-1]), 0)\n self.assertEqual(len(self.ca.cmcs), 6)\n # self.assertGreater(len(self.ca.cmcs[-1]), 0)\n \n def test_export(self):\n self.assertTrue(self.ca.export_result())\n \n\n def tearDown(self):\n if os.path.exists(self.filename):\n os.remove(self.filename)\n pass\n \n\n\n\n\n","sub_path":"schecsite/scheduler/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"334868977","text":"import csv\nimport json\n\nimport pytest\n\nfrom instascrape import Post, Profile\n\n\nclass TestProfile:\n @pytest.fixture\n def page_instance(self):\n profile_url = \"https://www.instagram.com/chris_greening/\"\n profile_obj = Profile(profile_url)\n profile_obj.load()\n return profile_obj\n\n def test_to_dict(self, page_instance):\n assert type(page_instance.to_dict()) == dict\n\n # @pytest.mark.file_io\n # def test_to_json(self, page_instance, tmpdir):\n # file = tmpdir.join(\"data.json\")\n # page_instance.to_json(fp=str(file))\n # with open(str(file), \"r\") as injson:\n # json_dict = json.load(injson)\n # assert page_instance.to_dict() == json_dict\n\n # @pytest.mark.file_io\n # def test_to_csv(self, page_instance, tmpdir):\n\n # # write to CSV\n # file = tmpdir.join(\"data.csv\")\n # page_instance.to_csv(fp=str(file))\n\n # # reread the csv\n # with open(str(file), mode=\"r\") as infile:\n # reader = csv.reader(infile)\n # csv_dict = {row[0]: row[1] for row in reader}\n\n # # have to convert everything to str otherwise AssertionError will trip\n # # up comparing stuff like True == 'True'\n # str_dict = {}\n # for key, val in page_instance.to_dict().items():\n # if val is None:\n # val = \"\"\n # str_dict[key] = str(val)\n\n # assert str_dict == csv_dict\n\n def test_get_recent_posts(self, page_instance):\n posts = page_instance.get_recent_posts(amt=6)\n assert len(posts) == 6\n assert all([type(post) is Post for post in posts])\n assert all([hasattr(post, \"id\") for post in posts])\n","sub_path":"tests/scrapers/test_profile.py","file_name":"test_profile.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"545923473","text":"'''\nCreated on Apr 4, 2017\n@author: ltyrala\nflow control training\n'''\nimport unittest\nfrom enum import Enum\n\n\nclass PrimeGenerator:\n\n def __init__(self):\n pass\n\n @staticmethod\n def is_prime(number):\n '''\n Test if number is prime,\n use for else loop\n\n :param number: some numeric value, grater then 0\n :type number: int\n :returns: True if number is prime\n :rtype: bool\n '''\n if number < 2:\n return False\n\n for denominator in range(2, number):\n if (number % denominator) == 0:\n break\n else:\n return True\n return False\n\n def get_prime_larger_then(self, number):\n '''\n Find first prime larger then number\n use while loop\n\n :param number: some numeric value, grater then 0\n :type number: int\n :returns: prime number\n :rtype: int\n '''\n number += 1\n while not self.is_prime(number):\n number += 1\n\n return number\n\n def get_prime_smaller_then(self, number):\n '''\n Find first prime smaller then number\n use while loop\n\n :param number: some numeric value, grater then 0\n :type number: int\n :returns: prime number\n :rtype: int\n '''\n if number < 2:\n return 2\n\n number -= 1\n while not self.is_prime(number):\n number -= 1\n\n return number\n\n def get_range(self, start, stop):\n '''\n Get prime numbers between start and stop values\n\n :param start: some numeric value, grater then 0\n :type start: int\n :param stop: some numeric value, grater then 0\n :type stop: int\n :returns: prime number list\n :rtype: list(int)\n '''\n return [i for i in range(start, stop+1) if self.is_prime(i)]\n\n\nclass PKOQueue:\n '''\n PKO client queue,\n ServiceType - type of available service\n _service_dict defines number of workers\n _queue - clients on queue\n '''\n class ServiceType(Enum):\n C = 'Cash register'\n B = 'Bonds'\n L = 'Loans'\n\n def __init__(self, init_count=0):\n self._service_dict = {self.ServiceType.C: 0,\n self.ServiceType.B: 0,\n self.ServiceType.L: 0}\n self._queue = []\n self._count = init_count\n\n def open_service(self, service_type):\n '''\n open service type\n '''\n if service_type in self._service_dict:\n self._service_dict[service_type] += 1\n\n def close_service(self, service_type):\n if service_type in self._service_dict \\\n and self._service_dict > 0:\n self._service_dict[service_type] -= 1\n\n def queue_add(self, service_type):\n if service_type in self._service_dict \\\n and self._service_dict[service_type] > 0:\n self._count += 1\n self._queue.append((service_type, self._count))\n\n def queue_release(self, service_type):\n# return self._queue_release_fair(service_type)\n return self._queue_release_prime(service_type)\n\n def _queue_release_fair(self, service_type):\n if service_type in self._service_dict \\\n and self._service_dict[service_type] > 0:\n return self._queue.pop(0)\n else:\n return None\n\n def _queue_release_prime(self, service_type):\n if self._service_dict[service_type] > 0:\n for q in self._queue:\n if q[0] == service_type:\n if PrimeGenerator.is_prime(q[1]):\n self._queue.remove(q)\n return q\n else:\n return self._queue.pop(0)\n else:\n return None\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_is_prime_negative(self):\n prime_generator = PrimeGenerator()\n self.assertFalse(prime_generator.is_prime(-1))\n self.assertFalse(prime_generator.is_prime(-10))\n\n def test_is_prime(self):\n prime_generator = PrimeGenerator()\n self.assertTrue(prime_generator.is_prime(2))\n self.assertTrue(prime_generator.is_prime(3))\n self.assertTrue(prime_generator.is_prime(5))\n self.assertTrue(prime_generator.is_prime(13))\n self.assertTrue(prime_generator.is_prime(751))\n self.assertTrue(prime_generator.is_prime(997))\n\n self.assertFalse(prime_generator.is_prime(1))\n self.assertFalse(prime_generator.is_prime(4))\n self.assertFalse(prime_generator.is_prime(8))\n self.assertFalse(prime_generator.is_prime(9))\n self.assertFalse(prime_generator.is_prime(100))\n self.assertFalse(prime_generator.is_prime(999))\n self.assertFalse(prime_generator.is_prime(1000))\n self.assertFalse(prime_generator.is_prime(11111))\n\n def test_prime_larger(self):\n prime_generator = PrimeGenerator()\n self.assertEqual(prime_generator.get_prime_larger_then(1000000),\n 1000003)\n\n def test_prime_smaller(self):\n prime_generator = PrimeGenerator()\n self.assertEqual(prime_generator.get_prime_smaller_then(-10), 2)\n self.assertEqual(prime_generator.get_prime_smaller_then(0), 2)\n self.assertEqual(prime_generator.get_prime_smaller_then(1), 2)\n self.assertEqual(prime_generator.get_prime_smaller_then(47), 43)\n\n def test_get_prime_range(self):\n prime_generator = PrimeGenerator()\n self.assertEqual([41, 43, 47],\n prime_generator.get_range(40, 47))\n self.assertEqual([2, 3, 5, 7],\n prime_generator.get_range(-40, 7))\n\n def test_pko_one(self):\n pko_queue = PKOQueue()\n pko_queue.open_service(pko_queue.ServiceType.C)\n pko_queue.queue_add(pko_queue.ServiceType.C)\n pko_queue.queue_add(pko_queue.ServiceType.C)\n pko_queue.queue_add(pko_queue.ServiceType.B)\n self.assertEqual((pko_queue.ServiceType.C, 2),\n pko_queue.queue_release(pko_queue.ServiceType.C))\n self.assertEqual((pko_queue.ServiceType.C, 1),\n pko_queue.queue_release(pko_queue.ServiceType.C))\n self.assertEqual(None,\n pko_queue.queue_release(pko_queue.ServiceType.B))\n\n def test_pko_prime_priority(self):\n pko_queue = PKOQueue(10)\n pko_queue.open_service(pko_queue.ServiceType.C)\n pko_queue.queue_add(pko_queue.ServiceType.C)\n pko_queue.queue_add(pko_queue.ServiceType.C)\n pko_queue.queue_add(pko_queue.ServiceType.C)\n self.assertEqual((pko_queue.ServiceType.C, 11),\n pko_queue.queue_release(pko_queue.ServiceType.C))\n self.assertEqual((pko_queue.ServiceType.C, 13),\n pko_queue.queue_release(pko_queue.ServiceType.C))\n self.assertEqual((pko_queue.ServiceType.C, 12),\n pko_queue.queue_release(pko_queue.ServiceType.C))\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n","sub_path":"lab/03/test_flow_control_pass.py","file_name":"test_flow_control_pass.py","file_ext":"py","file_size_in_byte":7200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"178230127","text":"import csv\nimport sys\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\nTEST_SIZE = 0.4\n\n\ndef main():\n\n # Check command-line arguments\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python shopping.py data\")\n\n # Load data from spreadsheet and split into train and test sets\n evidence, labels = load_data(sys.argv[1])\n X_train, X_test, y_train, y_test = train_test_split(\n evidence, labels, test_size=TEST_SIZE\n )\n\n # Train model and make predictions\n model = train_model(X_train, y_train)\n predictions = model.predict(X_test)\n sensitivity, specificity = evaluate(y_test, predictions)\n\n # Print results\n print(f\"Correct: {(y_test == predictions).sum()}\")\n print(f\"Incorrect: {(y_test != predictions).sum()}\")\n print(f\"True Positive Rate: {100 * sensitivity:.2f}%\")\n print(f\"True Negative Rate: {100 * specificity:.2f}%\")\n\n\ndef load_data(filename):\n \"\"\"\n Load shopping data from a CSV file `filename` and convert into a list of\n evidence lists and a list of labels. Return a tuple (evidence, labels).\n\n evidence should be a list of lists, where each list contains the\n following values, in order:\n 0- Administrative, an integer\n 1- Administrative_Duration, a floating point number\n 2- Informational, an integer\n 3- Informational_Duration, a floating point number\n 4- ProductRelated, an integer\n 5- ProductRelated_Duration, a floating point number\n 6- BounceRates, a floating point number\n 7- ExitRates, a floating point number\n 8- PageValues, a floating point number\n 9- SpecialDay, a floating point number\n 10- Month, an index from 0 (January) to 11 (December)\n 11- OperatingSystems, an integer\n 12- Browser, an integer\n 13- Region, an integer\n 14- TrafficType, an integer\n 15- VisitorType, an integer 0 (not returning) or 1 (returning)\n 16- Weekend, an integer 0 (if false) or 1 (if true)\n\n labels should be the corresponding list of labels, where each label\n is 1 if Revenue is true, and 0 otherwise.\n \"\"\"\n #raise NotImplementedError\n with open(filename) as f:\n reader = csv.reader(f)\n next(reader)\n\n evidence = []\n labels = []\n for row in reader:\n #evidence.append([cell for cell in row[:17]])\n temp = []\n for i in range(17):\n if i in (15,16,10):\n if i == 16:\n temp.append(1 if row[i]==\"TRUE\" else 0)\n if i == 15:\n #print(row[15])\n temp.append(1 if row[i] == 'Returning_Visitor' else 0)\n if i == 10:\n if row[i] == 'Jan':\n temp.append(0)\n if row[i] == 'Feb':\n temp.append(1)\n if row[i] == 'March':\n temp.append(2)\n if row[i] == 'April':\n temp.append(3)\n if row[i] == 'May':\n temp.append(4)\n if row[i] == 'June':\n temp.append(5)\n if row[i] == 'July':\n temp.append(6)\n if row[i] == 'Aug':\n temp.append(7)\n if row[i] == 'Sept':\n temp.append(8)\n if row[i] == 'Oct':\n temp.append(9)\n if row[i] == 'Nov':\n temp.append(10)\n if row[i] == 'Dec':\n temp.append(11)\n \n else:\n temp.append(row[i])\n \n evidence.append(temp)\n #labels.append(0 if row[17] == 'TRUE' else 1)\n if row[17] == \"FALSE\":\n labels.append(0)\n else:\n labels.append(1)\n #print(type(evidence))\n #print(labels)\n # print(evidence[0])\n # print(len(evidence[0]))\n # print(labels[0])\n return evidence,labels\n\n\n\ndef train_model(evidence, labels):\n \"\"\"\n Given a list of evidence lists and a list of labels, return a\n fitted k-nearest neighbor model (k=1) trained on the data.\n \"\"\"\n #raise NotImplementedError\n model = KNeighborsClassifier(n_neighbors = 1)\n model.fit(evidence,labels)\n return model\n\n\ndef evaluate(labels, predictions):\n \"\"\"\n Given a list of actual labels and a list of predicted labels,\n return a tuple (sensitivity, specificty).\n\n Assume each label is either a 1 (positive) or 0 (negative).\n\n `sensitivity` should be a floating-point value from 0 to 1\n representing the \"true positive rate\": the proportion of\n actual positive labels that were accurately identified.\n\n `specificity` should be a floating-point value from 0 to 1\n representing the \"true negative rate\": the proportion of\n actual negative labels that were accurately identified.\n \"\"\"\n #raise NotImplementedError\n length = len(labels)\n true1 = 0\n true2 = 0\n false1 = 0\n false2 = 0\n\n for i in range(length):\n if labels[i] == True:\n true2 += 1\n if predictions[i] == True:\n true1 += 1\n\n for i in range(length):\n if labels[i] == True:\n false2 += 1\n if predictions[i] == True:\n false1 += 1\n\n sensitivity = (true1//true2)\n specificity = (false1//false2)\n\n return sensitivity,specificity\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cs50 projects/project 4/shopping/shopping.py","file_name":"shopping.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"396870331","text":"from pyspark import SparkContext\nfrom pyspark.sql import SQLContext\n\nsc = SparkContext()\nsqlContext = SQLContext(sc)\n\nimport numpy as np\nfrom scipy.ndimage import maximum_position\nfrom pyspark.sql import Row\n\ns = 32\ncrop_size = int(s*s)\nw = s//2\nn_rows = 170 # n_eta\nn_cols = 360 # n_phi\n\ndef crop_around_max(arr,r,c):\n #global n_rows, n_cols, w\n return np.array(arr, dtype=np.float32).reshape(n_rows,n_cols)[r-w:r+w,c-w:c+w].flatten()\n\ndef process_en(en):\n nonzero = (en > 0.)\n en[nonzero] = (np.log10(en[nonzero])+1.3)/4.\n return en\n\ndef process_t(b):\n return b/50.\n\ndef log_noise(lin_noise):\n nonzero = (lin_noise > 0.)\n lin_noise[nonzero] = np.log10(lin_noise[nonzero])\n return lin_noise\n\ndef process_evt(row):\n #global crop_size\n \n ### Get channel max ###\n arr_ref = np.array(row.EB_adc6, dtype=np.float32).reshape(n_rows,n_cols)\n r, c = maximum_position(arr_ref)\n \n ### Row object can be cast as python dict ###\n ### Note down out of range maxima ###\n row_dict = row.asDict()\n if c < w or c >= n_cols-w or r < w or r >= n_rows-w:\n evt_out = {k:np.full(crop_size, -999, dtype=np.float32).tolist() for k,arr in row_dict.iteritems()}\n evt_out['keep'] = False\n return Row(**evt_out)\n \n ### Initialize output dict as cropped input Row dict ###\n evt_out = {k:crop_around_max(arr,r,c) for k,arr in row_dict.iteritems()}\n #evt_out = {k:np.array(arr, dtype=np.float32).flatten() for k,arr in row_dict.iteritems()}\n '''\n ### Process Energy ###\n dict_en = ['EBenergy', 'EBenergyRed']\n for k in dict_en:\n evt_out[k] = process_en(evt_out[k])\n \n ### Process Time ###\n dict_t = ['EBtime', 'EBtimeRed']\n for k in dict_t:\n evt_out[k] = process_t(evt_out[k])\n '''\n ### Process Digis ###\n presample = np.mean([evt_out['EB_adc0'], evt_out['EB_adc1'], evt_out['EB_adc2']], axis=0)\n #presample = log_noise(presample)\n dict_adc = ['EB_adc%d'%sample for sample in range(10)]\n for k in dict_adc:\n evt_out[k] = process_digi(evt_out[k],presample)\n \n ### Keep event ###\n ### Pyspark only accepts list types ###\n evt_out = {k:arr.tolist() for k,arr in evt_out.iteritems()}\n evt_out['keep'] = True\n return Row(**evt_out)\n\n# Case 1\ndef process_digi(adc,_):\n nonzero = (adc > 0.)\n adc[nonzero] = np.log10(adc[nonzero])-2.3\n return adc\n\n\ndef concat(row):\n row_dict = row.asDict()\n evt_out = [row_dict['EBenergy'], row_dict['EBtime'], \\\n row_dict['EBenergyRed'], row_dict['EBtimeRed'], \\\n row_dict['EB_adc0'], row_dict['EB_adc1'], row_dict['EB_adc2'], \\\n row_dict['EB_adc3'], row_dict['EB_adc4'], row_dict['EB_adc5'], \\\n row_dict['EB_adc6'], row_dict['EB_adc7'], row_dict['EB_adc8'], row_dict['EB_adc9']]\n return Row(features=evt_out, labels=1)\n\ndf = sqlContext \\\n .read.format(\"org.dianahep.sparkroot\") \\\n .load(\"hdfs:/cms/bigdatasci/mandrews/SinglePhotonPt50_FEVTDEBUG_n250k_IMG.root\")\n\n#n_events = df.count()\n#branch_list = df.columns\n#print \" >> N of events:\", n_events\n#print \" >> Input branch list:\",branch_list\n\ndf_out = df.rdd.map(process_evt).toDF()\ndf_out = df_out.filter(df_out.keep == True).drop(df_out.keep)\ndf_out = df.rdd.map(concat).toDF()\n\ndf_out.write.save(\"hdfs:/cms/bigdatasci/mandrews/SinglePhotonPt50_IMGCROP_n250k.parquet\", format=\"parquet\")\n","sub_path":"crop_preprocess_EBcrops_bySpark.py","file_name":"crop_preprocess_EBcrops_bySpark.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"526698161","text":"import cv2 as cv\nimport numpy as np\n\nimg = np.zeros((512,512,3),np.uint8) # 创建黑色的图像\n\ncv.line(img,(0,0),(511,511),(255,255,0),5)# 绘制一条厚度为5的蓝色对角线.需要传递线的开始和结束坐标\n\ncv.rectangle(img,(384,0),(510,128),(0,255,255),3)#在图像的右上角绘制一个黄色矩形。绘制矩形需要矩形的左上角和右下角\n\ncv.circle(img,(447,63),63,(0,0,255),-1)#绘制一个圆,需要其中心坐标和半径。我们将在上面绘制的矩形内绘制一个圆。\n\ncv.ellipse(img,(256,256),(100,50),0,90,180,(255,255,0),-1)\n#要绘制椭圆,我们需要传递几个参数。一个参数是中心位置(x,y)。\n# 下一个参数是轴长度(长轴长度,短轴长度)。angle是椭圆沿逆时针方向旋转的角度。\n# startAngle和endAngle表示从主轴沿顺时针方向测量的椭圆弧的开始和结束。即给出0和360给出完整的椭圆。\n\npts = np.array([[10,5],[20,30],[70,20],[50,10]],np.int32)\npts = pts.reshape((-1,1,2))\ncv.polylines(img,[pts],True,(0,255,255))\n#绘制多边形,需要顶点的坐标。将这些点组成形状为ROWSx1x2的数组,其中ROWS是顶点数,并且其类型应为int32\n#如果第三个参数为False,您将获得一条连接所有点的折线,而不是闭合形状。\n\n\nfont = cv.FONT_HERSHEY_COMPLEX\ncv.putText(img,'OpenCV',(10,400), font, 2,(255,255,255),1,cv.LINE_AA)#向图像添加文本\n\n\ncv.imshow('image',img) #窗口显示图像\ncv.waitKey(0)\ncv.destroyAllWindows()","sub_path":"04.draw.py","file_name":"04.draw.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"47526897","text":"import numpy as np\nimport torch\nfrom torch.nn import Module, Linear\nfrom torch.nn.functional import relu\nfrom tqdm import tqdm\n\n\"\"\"Summary of Pytorch training\n\n Pros:\n very explicit\n speed 6s / epoch\n debuggeable\n\n Cons:\n new framework to learn\n more code for same work\n\n\"\"\"\n\n\n# Sanity check\nif torch.__version__ != '1.4.0':\n raise ValueError('This must be run with pytorch 1.4.0')\n\n\nclass Model(Module):\n \"\"\"Model creation: subclassing approach\"\"\"\n def __init__(self):\n super().__init__()\n\n # Layers instantiation (no need for Input layer)\n self.linear1 = Linear(in_features=(20), out_features=(20))\n self.linear2 = Linear(in_features=(20), out_features=(10))\n self.linear3 = Linear(in_features=(10), out_features=(1))\n\n def forward(self, x):\n x = self.linear1(x)\n x = relu(x)\n x = self.linear2(x)\n x = relu(x)\n x = self.linear3(x)\n return x\n\n # NOTE no need to implement backward\n # since it is already infered\n\n\ndef loss_compute(y_true, y_pred):\n return (y_true - y_pred)**2\n\n\ndef train():\n # Learn to sum 20 nums\n train_samples = torch.randn(size=(10000, 20))\n train_targets = torch.sum(train_samples, dim=-1)\n test_samples = torch.randn(size=(100, 20))\n test_targets = torch.sum(test_samples, dim=-1)\n\n # Model\n model = Model()\n\n # Training loop\n epochs = 10\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n\n for epoch in range(epochs):\n\n # Fancy progress bar\n pbar = tqdm(range(len(train_samples)))\n\n # Metrics\n loss_metric = []\n\n # Batches iteration, batch_size = 1\n for batch_id in pbar:\n\n # Getting sample target pair\n sample = train_samples[batch_id]\n target = train_targets[batch_id]\n\n # Adding batch dim since batch=1\n sample = sample.unsqueeze(0)\n target = target.unsqueeze(0)\n\n # Forward pass: needs to be recorded by gradient tape\n target_pred = model(sample)\n loss = loss_compute(target, target_pred)\n\n # Backward pass: \n # Init previous gradients to 0\n optimizer.zero_grad()\n # Running backward pass for computing gradients\n loss.backward()\n # Update weights\n optimizer.step()\n\n # Tracking progress\n loss_metric.append(loss.item())\n loss_metric_avg = sum(loss_metric) / (batch_id+1)\n pbar.set_description('Training Loss: %.3f' % loss_metric_avg)\n\n # At the end of the epoch test the model\n test_targets_pred = model(test_samples)\n test_targets_pred = test_targets_pred.squeeze()\n test_loss = loss_compute(test_targets, test_targets_pred)\n test_loss_avg = torch.mean(test_loss).item()\n print('Validation Loss: %.3f' % test_loss_avg)\n\n\nif __name__ == '__main__':\n train()","sub_path":"pytorch1.4/train_dense_module.py","file_name":"train_dense_module.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"82378426","text":"'''\n215. Kth Largest Element in an Array\n\n\nFind the kth largest element in an unsorted array. Note that it is the kth largest element in the sorted order, not the kth distinct element.\n\nExample 1:\n\nInput: [3,2,1,5,6,4] and k = 2\nOutput: 5\nExample 2:\n\nInput: [3,2,3,1,2,4,5,5,6] and k = 4\nOutput: 4\nNote:\nYou may assume k is always valid, 1 ≤ k ≤ array's length.\n\n\n'''\n\n\n'''\nclass Solution(object):\n def findKthLargest(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n # QuickSelect idea: AC in 52 ms\n # ---------------------------\n # clear idea, but use too much memory\n pivot = nums[len(nums)//2]\n left = [l for l in nums if l < pivot]\n equal = [e for e in nums if e == pivot]\n right = [r for r in nums if r > pivot]\n\n if k <= len(right):\n return self.findKthLargest(right, k)\n elif (k - len(right)) <= len(equal):\n return equal[0]\n else:\n return self.findKthLargest(left, k - len(right) - len(equal)) \n \n'''\n\n'''\n> 类型:PriorityQueue + MinHeap\n> Time Complexity O(NlogK) 因为每次heap插入时间是log(1), 插入k个就是logk\n> Space Complexity O(N)\n关于heap的使用口诀:\n\nmaxheap: 更换heap[0]中的最大值,放入条件为:放入比heap[0]小的值,heap初始为float('inf')\nminheap: 更换heap[0]中的最小值,放入条件为:放入比heap[0]大的值,heap初始为-float('inf')\nimport heapq\n'''\n\nclass Solution(object):\n def findKthLargest(self, nums, k):\n min_heap = [-float('inf')] * k\n heapq.heapify(min_heap)\n for num in nums:\n if num > min_heap[0]:\n heapq.heappop(min_heap)\n heapq.heappush(min_heap, num)\n return min_heap[0]\n\n\n\n'''\n\n# quicksort\n\nclass Solution:\n def findKthLargest(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\" \n return self.helper(nums, 0, len(nums) - 1, k)\n \n \n def helper(self, nums, low, high, k):\n left, right = low, high\n pivot = nums[(low + high)//2]\n i = left\n while i <= right:\n if nums[i] < pivot:\n self.swap(nums, i, right)\n right -= 1\n elif nums[i] > pivot:\n self.swap(nums, i, left)\n left += 1\n i += 1\n else:\n i += 1\n if k <= left: # k - 1 < left\n return self.helper(nums, low, left - 1, k)\n elif k - 1 > right:\n return self.helper(nums, right + 1, high, k)\n else:\n return pivot\n \n \n \n \n def swap(self, nums, i, j):\n tmp = nums[i]\n nums[i] = nums[j]\n nums[j] = tmp\n \n'''\n\n# 2020/03/26, similar to above\n\nclass Solution:\n def findKthLargest(self, nums: List[int], k: int) -> int:\n # kth larget = n - k th smallest\n return self.partition(nums, 0, len(nums) - 1, len(nums) - k)\n\n def partition(self, nums, start, end, k):\n # find kth smallest element in nums, here k is representing the index, and it starts from 0\n # only consider the index [start : end]\n # the following if ... return is necessary\n if start == end:\n return nums[k]\n l, r = start, end\n p = nums[(l + r) // 2]\n i = l\n while i <= r:\n if nums[i] > p:\n nums[i], nums[r] = nums[r], nums[i]\n r -= 1\n elif nums[i] < p:\n nums[i], nums[l] = nums[l], nums[i]\n l += 1;\n i += 1\n else:\n i += 1\n if k <= l:\n return self.partition(nums, start, l, k)\n # i is larger than or equal to r\n if k >= i:\n return self.partition(nums, i, end, k)\n return p\n\n\n# cpp, rewrite, priority queue\n\n'''\nclass Solution {\npublic:\n int findKthLargest(vector& nums, int k) {\n priority_queue, greater> pq(k, INT_MIN);\n for(int &num: nums){\n if (num > pq.top()){\n pq.pop();\n pq.push(num);\n }\n if (pq.size() > k) pq.pop();\n }\n return pq.top();\n \n }\n};\n'''\n\n\n'''\n2020/03/26, two pointer, jiuzhang template\n2020/05/23, revise, totally forget\n2021/04/14, revise, one pass\n\nRuntime: 68 ms, faster than 55.50% of Python3 online submissions for Kth Largest Element in an Array.\nMemory Usage: 13.7 MB, less than 60.00% of Python3 online submissions for Kth Largest Element in an Array.\n'''\n\n\nclass Solution:\n def findKthLargest(self, nums: List[int], k: int) -> int:\n # kth larget = n - k th smallest\n return self.partition(nums, 0, len(nums) - 1, len(nums) - k)\n\n def partition(self, nums, start, end, k):\n # find kth smallest element in nums, here k is representing the index, and it starts from 0\n # only consider the index [start : end]\n # the following if ... return is not necessary\n if start == end:\n return nums[k]\n l, r = start, end\n p = nums[(l + r) // 2]\n while l <= r:\n while l <= r and nums[l] < p:\n l += 1\n while l <= r and nums[r] > p:\n r -= 1\n if l <= r:\n nums[l], nums[r] = nums[r], nums[l]\n l += 1;\n r -= 1\n # right <= left by stoping condition\n if k <= r:\n return self.partition(nums, start, r, k)\n if k >= l:\n return self.partition(nums, l, end, k)\n return nums[k]","sub_path":"0215. Kth Largest Element in an Array.py","file_name":"0215. Kth Largest Element in an Array.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"396346380","text":"from whales.modules.features_extractors.feature_extraction import FeatureExtraction\n\n\nclass Identity(FeatureExtraction):\n def __init__(self, logger=None):\n super().__init__(logger)\n self.description = \"Identity\"\n self.needs_fitting = False\n self.parameters = {}\n\n def method_transform(self):\n data = self.parameters[\"data\"]\n # Caution with nan values as they cannot go into the classifiers\n return data\n\n\nPipelineMethod = Identity\n","sub_path":"src/whales/modules/features_extractors/identity.py","file_name":"identity.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372351547","text":"import math\nimport multiprocessing as mp\nimport random\nimport time\n\nimport tensorflow as tf\nimport numpy as np\n\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg.eigen.arpack import eigsh\n\nimport openea.modules.load.read as rd\nimport openea.modules.train.batch as bat\nfrom openea.modules.utils.util import load_session\nfrom openea.modules.finding.evaluation import valid, test, early_stop\nfrom openea.models.basic_model import BasicModel\nfrom openea.modules.base.optimizers import generate_optimizer\nfrom openea.modules.utils.util import merge_dic\nimport os\n\n'''\nRefactoring based on https://github.com/1049451037/GCN-Align\n'''\n_LAYER_UIDS = {}\n\n\n# ******************************inits************************\ndef uniform(shape, scale=0.05, name=None):\n \"\"\"Uniform init.\"\"\"\n initial = tf.random_uniform(shape, minval=-scale, maxval=scale, dtype=tf.float32)\n return tf.Variable(initial, name=name)\n\n\ndef glorot(shape, name=None):\n \"\"\"Glorot & Bengio (AISTATS 2010) init.\"\"\"\n init_range = np.sqrt(6.0 / (shape[0] + shape[1]))\n initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)\n return tf.Variable(initial, name=name)\n\n\ndef zeros(shape, name=None):\n \"\"\"All zeros.\"\"\"\n initial = tf.zeros(shape, dtype=tf.float32)\n return tf.Variable(initial, name=name)\n\n\ndef ones(shape, name=None):\n \"\"\"All ones.\"\"\"\n initial = tf.ones(shape, dtype=tf.float32)\n return tf.Variable(initial, name=name)\n\n\ndef trunc_normal(shape, name=None, normalize=True):\n initial = tf.Variable(tf.truncated_normal(shape, stddev=1.0 / math.sqrt(shape[0])))\n if not normalize:\n return initial\n return tf.nn.l2_normalize(initial, 1)\n\n\n# *******************************layers**************************\ndef get_layer_uid(layer_name=''):\n \"\"\"Helper function, assigns unique layer IDs.\"\"\"\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]\n\n\ndef sparse_dropout(x, keep_prob, noise_shape):\n \"\"\"Dropout for sparse tensors.\"\"\"\n random_tensor = keep_prob\n random_tensor += tf.random_uniform(noise_shape)\n dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)\n pre_out = tf.sparse_retain(x, dropout_mask)\n return pre_out * (1. / keep_prob)\n\n\ndef dot(x, y, sparse=False):\n \"\"\"Wrapper for tf.matmul (sparse vs dense).\"\"\"\n print(x)\n if sparse:\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n res = tf.matmul(x, y)\n return res\n\n\ndef load_attr(ent_num, kgs):\n cnt = {}\n entity_attributes_dict = merge_dic(kgs.kg1.entity_attributes_dict, kgs.kg2.entity_attributes_dict)\n for _, vs in entity_attributes_dict.items():\n for v in vs:\n if v not in cnt:\n cnt[v] = 1\n else:\n cnt[v] += 1\n fre = [(k, cnt[k]) for k in sorted(cnt, key=cnt.get, reverse=True)]\n print(fre)\n attr2id = {}\n num = int(0.7 * len(cnt))\n for i in range(num):\n attr2id[fre[i][0]] = i\n attr = np.zeros((ent_num, num), dtype=np.float32)\n for ent, vs in entity_attributes_dict.items():\n for v in vs:\n if v in attr2id:\n attr[ent][attr2id[v]] = 1.0\n return attr\n\n\nclass Layer(object):\n \"\"\"Base layer class. Defines basic API for all layer objects.\n Implementation inspired by keras (http://keras.io).\n # Properties\n name: String, defines the variable scope of the layer.\n logging: Boolean, switches Tensorflow histogram logging on/off\n # Methods\n _call(inputs): Defines computation graph of layer\n (i.e. takes input, returns output)\n __call__(inputs): Wrapper for _call()\n _log_vars(): Log all variables\n \"\"\"\n\n def __init__(self, **kwargs):\n allowed_kwargs = {'name', 'logging'}\n for kwarg in kwargs.keys():\n assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg\n name = kwargs.get('name')\n if not name:\n layer = self.__class__.__name__.lower()\n name = layer + '_' + str(get_layer_uid(layer))\n self.name = name\n self.vars = {}\n logging = kwargs.get('logging', False)\n self.logging = logging\n self.sparse_inputs = False\n\n def _call(self, inputs):\n return inputs\n\n def __call__(self, inputs):\n with tf.name_scope(self.name):\n if self.logging and not self.sparse_inputs:\n tf.summary.histogram(self.name + '/inputs', inputs)\n outputs = self._call(inputs)\n if self.logging:\n tf.summary.histogram(self.name + '/outputs', outputs)\n return outputs\n\n def _log_vars(self):\n for var in self.vars:\n tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])\n\n\nclass Dense(Layer):\n \"\"\"Dense layer.\"\"\"\n\n def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,\n act=tf.nn.relu, bias=False, featureless=False, **kwargs):\n super(Dense, self).__init__(**kwargs)\n\n if dropout:\n self.dropout = placeholders['dropout']\n else:\n self.dropout = 0.\n\n self.act = act\n self.sparse_inputs = sparse_inputs\n self.featureless = featureless\n self.bias = bias\n\n # helper variable for sparse dropout\n self.num_features_nonzero = placeholders['num_features_nonzero']\n\n with tf.variable_scope(self.name + '_vars'):\n self.vars['weights'] = glorot([input_dim, output_dim],\n name='weights')\n if self.bias:\n self.vars['bias'] = zeros([output_dim], name='bias')\n\n if self.logging:\n self._log_vars()\n\n def _call(self, inputs):\n x = inputs\n\n # dropout\n if self.sparse_inputs:\n x = sparse_dropout(x, 1 - self.dropout, self.num_features_nonzero)\n else:\n x = tf.nn.dropout(x, 1 - self.dropout)\n\n # transform\n output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)\n\n # bias\n if self.bias:\n output += self.vars['bias']\n\n return self.act(output)\n\n\nclass GraphConvolution(Layer):\n \"\"\"Graph convolution layer. (featureless=True and transform=False) is not supported for now.\"\"\"\n\n def __init__(self, input_dim, output_dim, placeholders, dropout=0.,\n sparse_inputs=False, act=tf.nn.relu, bias=False,\n featureless=False, transform=True, init=glorot, **kwargs):\n super(GraphConvolution, self).__init__(**kwargs)\n\n if dropout:\n self.dropout = placeholders['dropout']\n else:\n self.dropout = 0.\n\n self.act = act\n self.support = placeholders['support']\n self.sparse_inputs = sparse_inputs\n self.featureless = featureless\n self.bias = bias\n self.transform = transform\n\n # helper variable for sparse dropout\n self.num_features_nonzero = placeholders['num_features_nonzero']\n\n with tf.variable_scope(self.name + '_vars'):\n for i in range(len(self.support)):\n if input_dim == output_dim and not self.transform and not featureless:\n continue\n self.vars['weights_' + str(i)] = init([input_dim, output_dim],\n name='weights_' + str(i))\n if self.bias:\n self.vars['bias'] = zeros([output_dim], name='bias')\n\n if self.logging:\n self._log_vars()\n\n def _call(self, inputs):\n x = inputs\n\n # dropout\n if self.dropout:\n if self.sparse_inputs:\n x = sparse_dropout(x, 1 - self.dropout, self.num_features_nonzero)\n else:\n x = tf.nn.dropout(x, 1 - self.dropout)\n\n # convolve\n supports = list()\n for i in range(len(self.support)):\n if 'weights_' + str(i) in self.vars:\n if not self.featureless:\n pre_sup = dot(x, self.vars['weights_' + str(i)], sparse=self.sparse_inputs)\n else:\n pre_sup = self.vars['weights_' + str(i)]\n else:\n pre_sup = x\n support = dot(self.support[i], pre_sup, sparse=True)\n supports.append(support)\n output = tf.add_n(supports)\n\n # bias\n if self.bias:\n output += self.vars['bias']\n\n return self.act(output)\n\n\n# *******************************************************************\n# ****************************metrics***********************************\ndef masked_softmax_cross_entropy(preds, labels, mask):\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)\n\n\ndef masked_accuracy(preds, labels, mask):\n \"\"\"Accuracy with masking.\"\"\"\n correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))\n accuracy_all = tf.cast(correct_prediction, tf.float32)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n accuracy_all *= mask\n return tf.reduce_mean(accuracy_all)\n\n\ndef get_placeholder_by_name(name):\n try:\n return tf.get_default_graph().get_tensor_by_name(name + \":0\")\n except:\n return tf.placeholder(tf.int32, name=name)\n\n\ndef align_loss(outlayer, ILL, gamma, k):\n left = ILL[:, 0]\n right = ILL[:, 1]\n t = len(ILL)\n left_x = tf.nn.embedding_lookup(outlayer, left)\n right_x = tf.nn.embedding_lookup(outlayer, right)\n A = tf.reduce_sum(tf.abs(left_x - right_x), 1)\n neg_left = get_placeholder_by_name(\"neg_left\") # tf.placeholder(tf.int32, [t * k], \"neg_left\")\n neg_right = get_placeholder_by_name(\"neg_right\") # tf.placeholder(tf.int32, [t * k], \"neg_right\")\n neg_l_x = tf.nn.embedding_lookup(outlayer, neg_left)\n neg_r_x = tf.nn.embedding_lookup(outlayer, neg_right)\n B = tf.reduce_sum(tf.abs(neg_l_x - neg_r_x), 1)\n C = - tf.reshape(B, [t, k])\n D = A + gamma\n L1 = tf.nn.relu(tf.add(C, tf.reshape(D, [t, 1])))\n neg_left = get_placeholder_by_name(\"neg2_left\") # tf.placeholder(tf.int32, [t * k], \"neg2_left\")\n neg_right = get_placeholder_by_name(\"neg2_right\") # tf.placeholder(tf.int32, [t * k], \"neg2_right\")\n neg_l_x = tf.nn.embedding_lookup(outlayer, neg_left)\n neg_r_x = tf.nn.embedding_lookup(outlayer, neg_right)\n B = tf.reduce_sum(tf.abs(neg_l_x - neg_r_x), 1)\n C = - tf.reshape(B, [t, k])\n L2 = tf.nn.relu(tf.add(C, tf.reshape(D, [t, 1])))\n return (tf.reduce_sum(L1) + tf.reduce_sum(L2)) / (2.0 * k * t)\n\n\n# ***************************models****************************************\nclass Model(object):\n def __init__(self, **kwargs):\n allowed_kwargs = {'name', 'logging'}\n for kwarg in kwargs.keys():\n assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg\n name = kwargs.get('name')\n if not name:\n name = self.__class__.__name__.lower()\n self.name = name\n\n logging = kwargs.get('logging', False)\n self.logging = logging\n\n self.vars = {}\n self.placeholders = {}\n\n self.layers = []\n self.activations = []\n\n self.inputs = None\n self.outputs = None\n\n self.loss = 0\n self.accuracy = 0\n self.optimizer = None\n self.opt_op = None\n\n def _build(self):\n raise NotImplementedError\n\n def build(self):\n \"\"\" Wrapper for _build() \"\"\"\n with tf.variable_scope(self.name):\n self._build()\n\n # Build sequential layer model\n self.activations.append(self.inputs)\n for layer in self.layers:\n hidden = layer(self.activations[-1])\n self.activations.append(hidden)\n self.outputs = self.activations[-1]\n\n # Store model variables for easy access\n variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)\n self.vars = {var.name: var for var in variables}\n\n # Build metrics\n self._loss()\n self._accuracy()\n\n self.opt_op = self.optimizer.minimize(self.loss)\n\n def predict(self):\n pass\n\n def _loss(self):\n raise NotImplementedError\n\n def _accuracy(self):\n raise NotImplementedError\n\n def save(self, sess=None):\n if not sess:\n raise AttributeError(\"TensorFlow session not provided.\")\n saver = tf.train.Saver(self.vars)\n save_path = saver.save(sess, \"tmp/%s.ckpt\" % self.name)\n print(\"Model saved in file: %s\" % save_path)\n\n def load(self, sess=None):\n if not sess:\n raise AttributeError(\"TensorFlow session not provided.\")\n saver = tf.train.Saver(self.vars)\n save_path = \"tmp/%s.ckpt\" % self.name\n saver.restore(sess, save_path)\n print(\"Model restored from file: %s\" % save_path)\n\n\nclass MLP(Model):\n def __init__(self, args, placeholders, input_dim, **kwargs):\n super(MLP, self).__init__(**kwargs)\n self.args = args\n self.inputs = placeholders['features']\n self.input_dim = input_dim\n # self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions\n self.output_dim = placeholders['labels'].get_shape().as_list()[1]\n self.placeholders = placeholders\n\n self.optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)\n\n self.build()\n\n def _loss(self):\n # Weight decay loss\n for var in self.layers[0].vars.values():\n self.loss += self.args.weight_decay * tf.nn.l2_loss(var)\n\n # Cross entropy error\n self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],\n self.placeholders['labels_mask'])\n\n def _accuracy(self):\n self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],\n self.placeholders['labels_mask'])\n\n def _build(self):\n self.layers.append(Dense(input_dim=self.input_dim,\n output_dim=self.args.hidden1,\n placeholders=self.placeholders,\n act=tf.nn.relu,\n dropout=True,\n sparse_inputs=self.args.sparse_inputs,\n logging=self.logging))\n\n self.layers.append(Dense(input_dim=self.args.hidden1,\n output_dim=self.output_dim,\n placeholders=self.placeholders,\n act=lambda x: x,\n dropout=True,\n logging=self.logging))\n\n def predict(self):\n return tf.nn.softmax(self.outputs)\n\n\nclass GCN(Model):\n def __init__(self, args, placeholders, input_dim, **kwargs):\n super(GCN, self).__init__(**kwargs)\n self.args = args\n # *************add***************\n\n # ************************************\n self.inputs = placeholders['features']\n self.input_dim = input_dim\n # self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions\n self.output_dim = placeholders['labels'].get_shape().as_list()[1]\n self.placeholders = placeholders\n\n self.optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)\n\n self.build()\n\n def _loss(self):\n # Weight decay loss\n for var in self.layers[0].vars.values():\n self.loss += self.args.weight_decay * tf.nn.l2_loss(var)\n\n # Cross entropy error\n self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],\n self.placeholders['labels_mask'])\n\n def _accuracy(self):\n self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],\n self.placeholders['labels_mask'])\n\n def _build(self):\n self.layers.append(GraphConvolution(input_dim=self.input_dim,\n output_dim=self.args.hidden1,\n placeholders=self.placeholders,\n act=tf.nn.relu,\n dropout=True,\n sparse_inputs=True,\n logging=self.logging))\n\n self.layers.append(GraphConvolution(input_dim=self.args.hidden1,\n output_dim=self.output_dim,\n placeholders=self.placeholders,\n act=lambda x: x,\n dropout=True,\n logging=self.logging))\n\n def predict(self):\n return tf.nn.softmax(self.outputs)\n\n\nclass GCN_Align_Unit(Model):\n def __init__(self, args, placeholders, input_dim, output_dim, ILL, sparse_inputs=False, featureless=True, **kwargs):\n super(GCN_Align_Unit, self).__init__(**kwargs)\n self.args = args\n\n self.inputs = placeholders['features']\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.placeholders = placeholders\n self.ILL = ILL\n self.sparse_inputs = sparse_inputs\n self.featureless = featureless\n\n self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.args.learning_rate)\n self.build()\n\n\n def _loss(self):\n self.loss += align_loss(self.outputs, self.ILL, self.args.gamma, self.args.neg_triple_num)\n\n def _accuracy(self):\n pass\n\n def _build(self):\n self.layers.append(GraphConvolution(input_dim=self.input_dim,\n output_dim=self.output_dim,\n placeholders=self.placeholders,\n act=tf.nn.relu,\n dropout=False,\n featureless=self.featureless,\n sparse_inputs=self.sparse_inputs,\n transform=False,\n init=trunc_normal,\n logging=self.logging))\n\n self.layers.append(GraphConvolution(input_dim=self.output_dim,\n output_dim=self.output_dim,\n placeholders=self.placeholders,\n act=lambda x: x,\n dropout=False,\n transform=False,\n logging=self.logging))\n\n\nclass GCN_Utils:\n def __init__(self, args, kgs):\n self.args = args\n self.kgs = kgs\n\n @staticmethod\n def sparse_to_tuple(sparse_mx):\n def to_tuple(mx):\n if not sp.isspmatrix_coo(mx):\n mx = mx.tocoo()\n coords = np.vstack((mx.row, mx.col)).transpose()\n values = mx.data\n shape = mx.shape\n return coords, values, shape\n\n if isinstance(sparse_mx, list):\n for i in range(len(sparse_mx)):\n sparse_mx[i] = to_tuple(sparse_mx[i])\n else:\n sparse_mx = to_tuple(sparse_mx)\n\n return sparse_mx\n\n @staticmethod\n def normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n def preprocess_adj(self, adj):\n \"\"\"Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.\"\"\"\n adj_normalized = self.normalize_adj(adj + sp.eye(adj.shape[0]))\n return self.sparse_to_tuple(adj_normalized)\n\n @staticmethod\n def construct_feed_dict(features, support, placeholders):\n \"\"\"Construct feed dictionary for GCN-Align.\"\"\"\n feed_dict = dict()\n feed_dict.update({placeholders['features']: features})\n feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})\n return feed_dict\n\n def chebyshev_polynomials(self, adj, k):\n \"\"\"Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).\"\"\"\n print(\"Calculating Chebyshev polynomials up to order {}...\".format(k))\n\n adj_normalized = self.normalize_adj(adj)\n laplacian = sp.eye(adj.shape[0]) - adj_normalized\n largest_eigval, _ = eigsh(laplacian, 1, which='LM')\n scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])\n\n t_k = list()\n t_k.append(sp.eye(adj.shape[0]))\n t_k.append(scaled_laplacian)\n\n def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):\n s_lap = sp.csr_matrix(scaled_lap, copy=True)\n return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two\n\n for i in range(2, k + 1):\n t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))\n\n return self.sparse_to_tuple(t_k)\n\n @staticmethod\n def func(triples):\n head = {}\n cnt = {}\n for tri in triples:\n if tri[1] not in cnt:\n cnt[tri[1]] = 1\n head[tri[1]] = {tri[0]}\n else:\n cnt[tri[1]] += 1\n head[tri[1]].add(tri[0])\n r2f = {}\n for r in cnt:\n r2f[r] = len(head[r]) / cnt[r]\n return r2f\n\n @staticmethod\n def ifunc(triples):\n tail = {}\n cnt = {}\n for tri in triples:\n if tri[1] not in cnt:\n cnt[tri[1]] = 1\n tail[tri[1]] = {tri[2]}\n else:\n cnt[tri[1]] += 1\n tail[tri[1]].add(tri[2])\n r2if = {}\n for r in cnt:\n r2if[r] = len(tail[r]) / cnt[r]\n return r2if\n\n def get_weighted_adj(self, e, KG):\n r2f = self.func(KG)\n r2if = self.ifunc(KG)\n M = {}\n for tri in KG:\n if tri[0] == tri[2]:\n continue\n if (tri[0], tri[2]) not in M:\n M[(tri[0], tri[2])] = max(r2if[tri[1]], 0.3)\n else:\n M[(tri[0], tri[2])] += max(r2if[tri[1]], 0.3)\n if (tri[2], tri[0]) not in M:\n M[(tri[2], tri[0])] = max(r2f[tri[1]], 0.3)\n else:\n M[(tri[2], tri[0])] += max(r2f[tri[1]], 0.3)\n row = []\n col = []\n data = []\n for key in M:\n row.append(key[1])\n col.append(key[0])\n data.append(M[key])\n return sp.coo_matrix((data, (row, col)), shape=(e, e))\n\n def get_ae_input(self, attr):\n return self.sparse_to_tuple(sp.coo_matrix(attr))\n\n def load_data(self, attr):\n ae_input = self.get_ae_input(attr)\n triples = self.kgs.kg1.relation_triples_list + self.kgs.kg2.relation_triples_list\n adj = self.get_weighted_adj(self.kgs.entities_num, triples)\n train = np.array(self.kgs.train_links)\n return adj, ae_input, train\n\n\nclass GCN_Align(BasicModel):\n def __init__(self):\n super().__init__()\n self.attr = None\n self.opt = 'SGD'\n self.act_func = tf.nn.relu\n self.dropout = 0.0\n # *****************************add*******************************************************\n self.struct_loss = None\n self.struct_optimizer = None\n self.vec_ae = None\n self.vec_se = None\n self.num_supports = None\n self.utils = None\n self.adj = None\n self.ae_input = None\n self.train = None\n self.e = None\n self.support = None\n self.adj = None\n self.ph_ae = None\n self.ph_se = None\n self.model_ae = None\n self.model_se = None\n self.feed_dict_se = None\n self.feed_dict_ae = None\n\n def init(self):\n assert self.args.alignment_module == 'mapping'\n assert self.args.neg_triple_num > 1\n assert self.args.learning_rate >= 0.01\n\n self.num_supports = self.args.support_number\n self.utils = GCN_Utils(self.args, self.kgs)\n self.attr = load_attr(self.kgs.entities_num, self.kgs)\n self.adj, self.ae_input, self.train = self.utils.load_data(self.attr)\n self.e = self.ae_input[2][0]\n self.support = [self.utils.preprocess_adj(self.adj)]\n self.ph_ae = {\n \"support\": [tf.sparse_placeholder(tf.float32) for _ in range(self.args.support_number)],\n \"features\": tf.sparse_placeholder(tf.float32),\n \"dropout\": tf.placeholder_with_default(0., shape=()),\n \"num_features_nonzero\": tf.placeholder_with_default(0, shape=())\n }\n self.ph_se = {\n \"support\": [tf.sparse_placeholder(tf.float32) for _ in range(self.args.support_number)],\n \"features\": tf.placeholder(tf.float32),\n \"dropout\": tf.placeholder_with_default(0., shape=()),\n \"num_features_nonzero\": tf.placeholder_with_default(0, shape=())\n }\n self.model_ae = GCN_Align_Unit(self.args, self.ph_ae, input_dim=self.ae_input[2][1],\n output_dim=self.args.ae_dim, ILL=self.train,\n sparse_inputs=True, featureless=False, logging=False)\n self.model_se = GCN_Align_Unit(self.args, self.ph_se, input_dim=self.e, output_dim=self.args.se_dim,\n ILL=self.train, sparse_inputs=False,\n featureless=True, logging=False)\n\n self.session = load_session()\n tf.global_variables_initializer().run(session=self.session)\n\n def train_embeddings(self, loss, optimizer, output):\n # **t=train_number k=neg_num\n neg_num = self.args.neg_triple_num\n train_num = len(self.kgs.train_links)\n train_links = np.array(self.kgs.train_links)\n pos = np.ones((train_num, neg_num)) * (train_links[:, 0].reshape((train_num, 1)))\n neg_left = pos.reshape((train_num * neg_num,))\n pos = np.ones((train_num, neg_num)) * (train_links[:, 1].reshape((train_num, 1)))\n neg2_right = pos.reshape((train_num * neg_num,))\n neg2_left = None\n neg_right = None\n feed_dict_se = None\n feed_dict_ae = None\n\n for i in range(1, self.args.max_epoch + 1):\n start = time.time()\n if i % 10 == 1:\n neg2_left = np.random.choice(self.e, train_num * neg_num)\n neg_right = np.random.choice(self.e, train_num * neg_num)\n feed_dict_ae = self.utils.construct_feed_dict(self.ae_input, self.support, self.ph_ae)\n feed_dict_ae.update({self.ph_ae['dropout']: self.args.dropout})\n feed_dict_ae.update({'neg_left:0': neg_left, 'neg_right:0': neg_right,\n 'neg2_left:0': neg2_left, 'neg2_right:0': neg2_right})\n feed_dict_se = self.utils.construct_feed_dict(1., self.support, self.ph_se)\n feed_dict_se.update({self.ph_se['dropout']: self.args.dropout})\n feed_dict_se.update({'neg_left:0': neg_left, 'neg_right:0': neg_right,\n 'neg2_left:0': neg2_left, 'neg2_right:0': neg2_right})\n batch_loss1, _ = self.session.run(fetches=[self.model_ae.loss, self.model_ae.opt_op],\n feed_dict=feed_dict_ae)\n batch_loss2, _ = self.session.run(fetches=[self.model_se.loss, self.model_se.opt_op],\n feed_dict=feed_dict_se)\n\n batch_loss = batch_loss1 + batch_loss2\n print('epoch {}, avg. relation triple loss: {:.4f}, cost time: {:.4f}s'.format(i, batch_loss,\n time.time() - start))\n\n # ********************no early stop********************************************\n if i >= self.args.start_valid and i % self.args.eval_freq == 0:\n self.feed_dict_se = feed_dict_se\n self.feed_dict_ae = feed_dict_ae\n flag = self.valid_(self.args.stop_metric)\n self.flag1, self.flag2, self.early_stop = early_stop(self.flag1, self.flag2, flag)\n if self.early_stop or i == self.args.max_epoch:\n break\n vec_se = self.session.run(output, feed_dict=feed_dict_se)\n vec_ae = self.session.run(self.model_ae.outputs, feed_dict=feed_dict_ae)\n self.vec_se = vec_se\n self.vec_ae = vec_ae\n return vec_se, vec_ae\n\n def test(self, save=True):\n if self.args.test_method == \"sa\":\n beta = self.args.beta\n embeddings = np.concatenate([self.vec_se * beta, self.vec_ae * (1.0 - beta)], axis=1)\n else:\n embeddings = self.vec_se\n embeds1 = np.array([embeddings[e] for e in self.kgs.test_entities1])\n embeds2 = np.array([embeddings[e] for e in self.kgs.test_entities2])\n rest_12, _, _ = test(embeds1, embeds2, None, self.args.top_k, self.args.test_threads_num,\n metric=self.args.eval_metric, normalize=self.args.eval_norm, csls_k=0, accurate=True)\n test(embeds1, embeds2, None, self.args.top_k, self.args.test_threads_num,\n metric=self.args.eval_metric, normalize=self.args.eval_norm, csls_k=self.args.csls, accurate=True)\n if save:\n ent_ids_rest_12 = [(self.kgs.test_entities1[i], self.kgs.test_entities2[j]) for i, j in rest_12]\n rd.save_results(self.out_folder, ent_ids_rest_12)\n\n def save(self):\n if self.args.dropout == 0:\n ent_embeds = self.vec_se\n attr_embeds = self.vec_ae\n rd.save_embeddings(self.out_folder, self.kgs, ent_embeds, None, attr_embeds, mapping_mat=None)\n else:\n for i in range(10):\n ent_embeds = self.session.run(self.model_se.outputs, feed_dict=self.feed_dict_se)\n attr_embeds = self.session.run(self.model_ae.outputs, feed_dict=self.feed_dict_ae)\n sub_dir = os.path.join(self.out_folder, \"model%02d\"%i)\n if not os.path.exists(sub_dir):\n os.mkdir(sub_dir)\n if not sub_dir.endswith(\"/\"):\n sub_dir += \"/\"\n rd.save_embeddings(sub_dir, self.kgs, ent_embeds, None, attr_embeds, mapping_mat=None)\n\n def valid_(self, stop_metric):\n se = self.session.run(self.model_se.outputs, feed_dict=self.feed_dict_se)\n if self.args.test_method == \"sa\":\n ae = self.session.run(self.model_ae.outputs, feed_dict=self.feed_dict_ae)\n beta = self.args.beta\n embeddings = np.concatenate([se*beta, ae*(1.0-beta)], axis=1)\n else:\n embeddings = se\n embeds1 = np.array([embeddings[e] for e in self.kgs.valid_entities1])\n embeds2 = np.array([embeddings[e] for e in self.kgs.valid_entities2 + self.kgs.test_entities2])\n hits1_12, mrr_12 = valid(embeds1, embeds2, None, self.args.top_k, self.args.test_threads_num,\n metric=self.args.eval_metric)\n if stop_metric == 'hits1':\n return hits1_12\n return mrr_12\n\n def run(self):\n t = time.time()\n self.train_embeddings(self.struct_loss, self.struct_optimizer, self.model_se.outputs)\n print(\"Training ends. Total time = {:.3f} s.\".format(time.time() - t))\n","sub_path":"openea/approaches/gcn_align.py","file_name":"gcn_align.py","file_ext":"py","file_size_in_byte":32257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"332786550","text":"#coding=utf-8\nfrom parse import parse_url\nimport re\n\nclass NeiHan:\n def __init__(self):\n self.start_url = \"http://neihanshequ.com/\" #1.url\n self.pattern = re.compile(r\"(.*?)

\",re.S)\n\n def get_content_list(self,html_str): #获取页面内容\n '''\n
\n\n\t\t\t
\n\t\t\t\t

\n\t\t\t\t

上午去看牙,发现看牙的女医生长得挺漂亮,心想:现在女孩都喜欢有钱的,一定要找机会表现出来我的实力。 女医生问:牙坏了,拔么? 故作紧张地问道:拔了牙的话影响我开宾利么? 美女医生:不影响,就是吹牛逼的时候有点漏风!

\n\t\t\t\t

\n\t\t\t
\n '''\n content_list = self.pattern.findall(html_str)\n return content_list\n\n def save_content_list(self,content_list): #保存\n with open(\"neihan.txt\",\"a\") as f:\n for content in content_list:\n print(content)\n f.write(content[0])\n f.write(\"\\n\")\n f.write(content[1])\n f.write(\"\\n\")\n\n def run(self):\n #1.url\n #2.发送请求,获取响应\n html_str = parse_url(self.start_url)\n #3.提取数据\n content_list = self.get_content_list(html_str)\n #4.保存\n self.save_content_list(content_list)\n\nif __name__ == '__main__':\n neihan = NeiHan()\n neihan.run()","sub_path":"ch3/code/neihan.py","file_name":"neihan.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"470969510","text":"from pony import orm\nfrom vkwave.bots import Keyboard\n\nfrom jacob.database.utils import lists, students\nfrom jacob.services import keyboard as kbs\nfrom jacob.services.keyboard.common import Keyboards, StudentsNavigator\n\nJSONStr = str\n\n\nclass ListsKeyboards(Keyboards):\n \"\"\"Набор клавиатур для навигации в режиме редактирования Списков.\"\"\"\n\n def __init__(self, admin_id: int, return_to: str, list_id: int):\n super().__init__(admin_id)\n self.return_to = return_to\n self.list_id = list_id\n\n def menu(self) -> JSONStr:\n \"\"\"\n Главное меню Списков (половины алфавита, сохранить, отменить).\n\n Returns:\n JSONStr: Клавиатура\n \"\"\"\n kb = kbs.common.alphabet(self.admin_id)\n if len(kb.buttons[-1]):\n kb.add_row()\n kb.add_text_button(text=\"✅ Сохранить\", payload={\"button\": \"save\"})\n\n return kb.get_keyboard()\n\n def submenu(self, half: int) -> JSONStr:\n \"\"\"\n Подменю призыва (список букв в рамках половины алфавита).\n\n Args:\n half: индекс половины алфавита\n\n Returns:\n JSONStr: Клавиатура\n\n \"\"\"\n kb = super().submenu(half)\n return kb\n\n def students(self, letter: str) -> JSONStr:\n \"\"\"\n Список студентов на букву.\n\n Args:\n letter: Первая буква фамилии для поиска студентов\n\n Returns:\n JSONStr: Клавиатура\n\n \"\"\"\n with orm.db_session:\n data = students.get_list_of_students_by_letter(self.admin_id, letter)\n half_index = self._find_half_index_of_letter(letter)\n selected = lists.get_students_in_list(self.list_id)\n\n kb = Keyboard()\n for item in data:\n if len(kb.buttons[-1]) == 2:\n kb.add_row()\n label = \" \"\n if item in selected:\n label = \"✅ \"\n kb.add_text_button(\n text=f\"{label}{item.last_name} {item.first_name}\",\n payload={\n \"button\": \"student\",\n \"student_id\": item.id,\n \"letter\": letter,\n \"name\": f\"{item.last_name} {item.first_name}\",\n },\n )\n if kb.buttons[-1]:\n kb.add_row()\n kb.add_text_button(\n text=\"◀️ Назад\",\n payload={\"button\": \"half\", \"half\": half_index},\n )\n\n return kb.get_keyboard()\n\n\nclass ListNavigator(StudentsNavigator):\n def __init__(self, admin_id: int, list_id: int):\n super().__init__(admin_id)\n self.return_to = \"edit_students_in_list\"\n self.list_id = list_id\n\n def render(self) -> ListsKeyboards:\n return ListsKeyboards(self.admin_id, self.return_to, self.list_id)\n\n\ndef group_menu() -> JSONStr:\n kb = Keyboard()\n kb.add_text_button(\n \"👥 Студенты\",\n payload={\"button\": \"students\"},\n )\n kb.add_text_button(\"📃 Списки\", payload={\"button\": \"lists\"})\n kb.add_row()\n kb.add_text_button(\n text=\"◀️ Назад\",\n payload={\"button\": \"main_menu\"},\n )\n\n return kb.get_keyboard()\n\n\n@orm.db_session\ndef list_of_lists(group_id: int) -> JSONStr:\n kb = Keyboard()\n\n with orm.db_session:\n lists_of_group = lists.get_lists_of_group(group_id)\n\n for lst in lists_of_group:\n if len(kb.buttons[-1]) == 2:\n kb.add_row()\n kb.add_text_button(lst.name, payload={\"button\": \"list\", \"list\": lst.id})\n\n if kb.buttons[-1]:\n kb.add_row()\n\n kb.add_text_button(\n text=\"➕ Создать список\",\n payload={\"button\": \"create_list\"},\n )\n kb.add_row()\n kb.add_text_button(\n text=\"◀️ Назад\",\n payload={\"button\": \"group_mgmt\"},\n )\n\n return kb.get_keyboard()\n\n\ndef list_menu() -> JSONStr:\n kb = Keyboard()\n\n kb.add_text_button(\"✏ Переименовать\", payload={\"button\": \"rename_list\"})\n kb.add_text_button(\n \"👥 Список студентов\",\n payload={\"button\": \"edit_students_in_list\"},\n )\n kb.add_row()\n kb.add_text_button(\"🔥 Удалить список\", payload={\"button\": \"remove_list\"})\n kb.add_row()\n kb.add_text_button(\n text=\"◀️ Назад\",\n payload={\"button\": \"lists\"},\n )\n\n return kb.get_keyboard()\n","sub_path":"jacob/services/keyboard/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"478581589","text":"\"\"\"\n9. Palindrome Number\n\nGiven an integer x, return true if x is palindrome integer.\n\nAn integer is a palindrome when it reads the same backward as forward. For example, 121 is palindrome while 123 is not.\n\nExample 1:\n\nInput: x = 121\nOutput: true\n\nExample 2:\n\nInput: x = -121\nOutput: false\nExplanation: From left to right, it reads -121. From right to left, it becomes 121-. Therefore it is not a palindrome.\n\nExample 3:\n\nInput: x = 10\nOutput: false\nExplanation: Reads 01 from right to left. Therefore it is not a palindrome.\n\nExample 4:\n\nInput: x = -101\nOutput: false\n\nConstraints:\n\n-2^31 <= x <= 2^31 - 1\n \nFollow up: Could you solve it without converting the integer to a string?\n\"\"\"\n\n###############################################################################\n\"\"\"\nSolution 1: build up the integer in reverse, then compare\n\nO(log n) time, where n is the given integer\nO(1) extra space\n\"\"\" \nclass Solution:\n def isPalindrome(self, x: int) -> bool:\n if x < 0:\n return False\n \n y = x # copy of x to work with, so can preserve x to compare with at end\n z = 0 # build up to be palindrome of x\n\n while y > 0:\n z = z * 10 + y % 10\n y //= 10\n \n return z == x\n\n###############################################################################\n\"\"\"\nSolution 2: convert int to string, then compare string to its reversal\n\nO(n) time\nO(n) extra space: for string and its reversal\n\n\"\"\" \nclass Solution2:\n def isPalindrome(self, x: int) -> bool:\n s = str(x)\n \n return s == s[::-1]\n #return s == \"\".join(reversed(s)) # also works\n #return s == str(reversed(s)) # does NOT work for strings \n\n###############################################################################\n\"\"\"\nSolution 3: convert int to string, then use two pointers to compare\ncharacters in string\n\nO(n) time\nO(n) extra space: for string\n\"\"\" \nclass Solution3:\n def isPalindrome(self, x: int) -> bool:\n if x < 0:\n return False\n \n s = str(x)\n #s = f\"{x}\"\n #s = \"{}\".format(x)\n \n i = 0\n j = len(s) - 1\n \n while i < j:\n if s[i] != s[j]:\n return False\n \n i += 1\n j -= 1\n \n return True\n\n###############################################################################\n\nif __name__ == \"__main__\":\n def test(x, comment=None):\n print(\"=\"*80)\n if comment:\n print(comment)\n\n print()\n print(f\"x = {x}\")\n\n res = sol.isPalindrome(x)\n\n print(f\"\\nres = {res}\\n\")\n\n\n sol = Solution()\n sol = Solution2()\n #sol = Solution3()\n\n comment = \"LC example 1; answer = True\"\n x = 121\n test(x, comment)\n\n comment = \"LC example 2; answer = False\"\n x = -121\n test(x, comment)\n\n comment = \"LC example 3; answer = False\"\n x = 10\n test(x, comment)\n\n comment = \"LC example 4; answer = False\"\n x = -101\n test(x, comment)\n","sub_path":"math/0009_palindrome_number.py","file_name":"0009_palindrome_number.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"345513554","text":"from collections import Counter\nfrom math import log10\nimport re\n\nclass Document():\n def __init__(self, query):\n self.query = query\n self.url = ''\n self.title = ''\n self.headers = []\n self.body_hits = {}\n self.anchors = []\n self.anchor_counts = []\n\n self.url_length = 0\n self.title_length = 0\n self.header_length = 0\n self.anchor_length = 0\n self.body_length = 0\n self.pagerank = 0\n \n self.avlen_url = 0\n self.avlen_title = 0\n self.avlen_header = 0\n self.avlen_body = 0\n self.avlen_anchor = 0\n \n self.query_terms = Counter(query.split())\n self.url_terms = Counter()\n self.title_terms = Counter()\n self.header_terms = Counter()\n self.anchor_terms = Counter()\n \n def set_title(self, title):\n title = title.lower()\n self.title = title\n title_tokens = title.split()\n self.title_length = len(title_tokens)\n self.title_terms = Counter(title_tokens)\n \n def set_url(self, url):\n self.url = url\n url_tokens = re.findall(r'[a-z0-9]+', url.lower())\n self.url_length = len(url_tokens)\n self.url_terms = Counter(url_tokens)\n \n def set_body_length(self, length):\n self.body_length = length\n \n def set_pagerank(self, rank):\n self.pagerank = rank\n \n def set_averages(self, avlen_url, avlen_title, avlen_header, avlen_body, avlen_anchor):\n self.avlen_url = avlen_url\n self.avlen_title = avlen_title\n self.avlen_header = avlen_header\n self.avlen_body = avlen_body\n self.avlen_anchor = avlen_anchor\n \n def add_header(self, header):\n header = header.lower()\n self.headers.append(header)\n header_tokens = header.split()\n self.header_length += len(header_tokens)\n self.header_terms.update(header_tokens)\n \n def add_body_hits(self, term, positions):\n term = term.lower()\n self.body_hits[term] = positions\n \n def add_anchor(self, anchor_text, count):\n anchor_text = anchor_text.lower()\n self.anchors.append(anchor_text)\n self.anchor_counts.append(count)\n for term in anchor_text.split():\n self.anchor_length += count\n self.anchor_terms[term] += count\n \n def make_vectors(self, doc_freq_dict):\n self.query_vec = []\n self.url_vec = []\n self.title_vec = []\n self.header_vec = []\n self.body_vec = []\n self.anchor_vec = []\n self.idf_vec = []\n \n for term in self.query_terms:\n self.query_vec.append(self.query_terms[term])\n self.url_vec.append(self.url_terms[term])\n self.title_vec.append(self.title_terms[term])\n self.header_vec.append(self.header_terms[term])\n self.body_vec.append(len(self.body_hits.get(term,[])))\n self.anchor_vec.append(self.anchor_terms[term])\n self.idf_vec.append(log10(98999/doc_freq_dict[term]))","sub_path":"rank/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"570661716","text":"from sys import platform\r\nimport os, wget, urllib, sys\r\n\r\nkubectl_version = \"v1.21.0\"\r\n\r\n\r\n\r\ndef check_os_func():\r\n print(\"Checking your Operating System type\")\r\n if platform == \"linux\" or platform == \"linux2\":\r\n return \"You have Linux OS\"\r\n\r\n elif platform == \"win32\":\r\n return \"You have Windows OS\"\r\n\r\n else:\r\n return \"OS is neither Linux nor windows\"\r\n\r\ndef check_env_vars():\r\n pass\r\n\r\n\r\ndef download_file_from_web():\r\n\r\n remote_url = \"https://dl.k8s.io/release/\"+kubectl_version+\"/bin/windows/amd64/kubectl.exe\"\r\n \r\n local_file = \"kubectl.exe\"\r\n \r\n try:\r\n wget.download(remote_url, \"C:\\\\bin\\\\\")\r\n return \"Y\"\r\n except (urllib.error.HTTPError,urllib.error.URLError) as exception:\r\n return exception\r\n \r\n except:\r\n return (\"Error Occured\")\r\n\r\nl1 = sys.argv\r\n\r\n\r\n\r\nif len(l1) == 2 :\r\n if l1[1] == \"1\":\r\n print(check_os_func())\r\n elif l1[1] == \"2\": \r\n print(check_env_vars())\r\n elif l1[1] == \"3\":\r\n print(download_file_from_web())\r\n else:\r\n print(\"Please enter number between 1, 2 and 3\")\r\n\r\nelse:\r\n print(\"Enter Only One Argument\") ","sub_path":"needtochangethename.py","file_name":"needtochangethename.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"217291595","text":"import xgboost as xgb\nfrom matplotlib import pylab as plt\nfrom sklearn.preprocessing import LabelEncoder\nimport operator\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\n\n\nfrom xgboost.sklearn import XGBClassifier\n\nfrom sklearn.grid_search import GridSearchCV, RandomizedSearchCV\nfrom sklearn.datasets import make_classification\nfrom sklearn.cross_validation import StratifiedKFold\n\n\nsns.set(font_scale = 1.5)\n\nparams_grid = {\n 'max_depth': [3, 4, 5, 6, 8, 10],\n 'n_estimators': [3, 4, 5, 6, 7],\n 'learning_rate': np.linspace(1e-16, 1, 3)\n}\n\nparams_fixed = {\n 'objective': 'binary:logistic',\n 'silent': 1\n}\nnum_rounds = 5\n\n\n\ntrain_df = pd.read_csv(\"./data/train.csv\", dtype={\"Age\": np.float64}, header=0)\ntest_df = pd.read_csv(\"./data/test.csv\", dtype={\"Age\": np.float64}, header=0)\nmean_age = np.mean(train_df['Age'])\ntrain_df['Age'].fillna(mean_age, inplace=True)\ntest_df['Age'].fillna(mean_age, inplace=True)\nids = test_df['PassengerId']\n\ntrain_df = train_df.drop(['Name', 'Ticket', 'Cabin', 'Embarked'], axis=1)\ntest_df = test_df.drop(['Name', 'Ticket', 'Cabin', 'Embarked'],axis=1)\ndtrain = pd.get_dummies(train_df)\ndtest = pd.get_dummies(test_df)\nlabel = dtrain['Survived']\ndtrain = dtrain.drop(['Survived'],axis=1)\n\n\ncv = StratifiedKFold(label, n_folds=5)\n\nbst_grid = GridSearchCV(\n estimator=XGBClassifier(params_fixed),\n param_grid=params_grid,\n cv=cv,\n scoring='accuracy'\n)\n\nbst_grid.fit(dtrain, label)\nprint(\"Best accuracy obtained: {0}\".format(bst_grid.best_score_))\nprint(\"Parameters:\")\nfor key, value in bst_grid.best_params_.items():\n print(\"\\t{}: {}\".format(key, value))\n\n#bst = xgb.XGBClassifier(max_depth=4, n_estimators=5, learning_rate=0.5).fit(dtrain, label)\n#predictions = bst.predict(dtest)\n#submission = pd.DataFrame({ 'PassengerId': ids,'Survived': predictions })\n#submission.to_csv(\"submission.csv\", index=False)\n","sub_path":"xgb_forest.py","file_name":"xgb_forest.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"11249248","text":"#!/usr/bin/env python3\n\nfrom distutils.core import setup\n\nimport os\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n if not path.endswith('__pycache__') and not filename.endswith(\".pyc\"):\n paths.append(os.path.relpath(os.path.join(path, filename), directory))\n return paths\n\nextra_files = package_files('xaled_utils/')\n\n#print extra_files\n\nsetup(\n name='xaled_utils',\n version='0.8.1', # major.minor.fix: MAJOR incompatible API changes, MINOR add backwards-compatible functionality, FIX bug fixes\n description='Frequently used functions library for Python3 By Khalid Grandi (github.com/xaled).',\n long_description='Frequently used functions library for Python3 By Khalid Grandi (github.com/xaled).',\n long_description_content_type='text/x-rst',\n keywords='library utils common',\n author='Khalid Grandi',\n author_email='kh.grandi@gmail.com',\n url='https://github.com/xaled/xaled_utils/',\n install_requires=['requests', 'pycrypto', 'pyaml', 'lxml'],\n python_requires='>=3',\n packages=['xaled_utils'],\n package_data={'': extra_files},\n )\n","sub_path":"pypi_install_script/xaled_utils-0.8.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106063602","text":"import numpy as np\nimport logging\nfrom point_mass2d import PointMass2d\nfrom tensorflow.python.platform import flags\nFLAGS = flags.FLAGS\n\n\nclass EnvOperations:\n\n @classmethod\n def separate_parallel(cls, arr):\n assert arr.ndim >= 2, \"rank must be more than 2.\"\n if arr.ndim == 2:\n arr = np.expand_dims(arr, axis=2)\n axes = list(range(arr.ndim))\n axes[:2] = [1, 0]\n return np.squeeze(arr.transpose(axes))\n\n @classmethod\n def compute_adv(cls, observation, reward, gamma, baseline):\n def discount_cumsum(x, gamma):\n x = x * gamma\n return np.cumsum(x[:, ::-1], axis=-1)[:, ::-1] / gamma\n\n assert reward.ndim > 1, \"reward must greater than 2 dims.\"\n gammas = np.array([gamma ** i for i in range(reward.shape[1])])\n target = discount_cumsum(reward, gammas)\n baseline.fit(observation, target)\n values = np.concatenate(\n [np.array([baseline.predict(ob) for ob in observation]), np.zeros([observation.shape[0], 1])],\n axis=-1)\n advantage = reward + gamma * values[:, 1:] - values[:, :-1]\n advantage = discount_cumsum(advantage, gammas)\n return advantage\n\n @classmethod\n def rollout(cls, env, agent, no_reset_when_done=False):\n\n ob_sub_traj = []\n ac_sub_traj = []\n rw_sub_traj = []\n no_sub_traj = []\n dn_sub_traj = []\n\n mean_sub_traj = []\n log_std_sub_traj = []\n\n for _ in range(FLAGS.inner_batch_size):\n done_count = 0\n ob = env.reset()\n data = []\n state = (np.zeros([1, FLAGS.hidden_dim]), np.zeros([1, FLAGS.hidden_dim]))\n ct = np.zeros([1, FLAGS.hidden_dim])\n for time_step in range(FLAGS.inner_time_steps):\n # env.render()\n ac, mean, log_std = agent.act(ob, ct)\n next_ob, rw, dn, _ = env.step(ac)\n if done_count >= 4:\n dn = True\n data.append((ob, ac, rw, next_ob, dn, mean, log_std))\n ct, state = agent.next_context(state, (ob, ac, rw))\n if dn:\n assert (time_step+1) % 100 == 0\n ob = env.reset()\n done_count += 1\n else:\n ob = next_ob\n\n\n ob, ac, rw, next_ob, dn, mean, log_std = zip(*data)\n ob_sub_traj.append(np.array(ob))\n ac_sub_traj.append(np.array(ac))\n rw_sub_traj.append(np.array(rw))\n no_sub_traj.append(np.array(next_ob))\n dn_sub_traj.append(np.array(dn))\n mean_sub_traj.append(np.array(mean))\n log_std_sub_traj.append(np.array(log_std))\n\n # observation = cls.separate_parallel(np.array(ob_sub_traj))\n # action = cls.separate_parallel(np.array(ac_sub_traj))\n # reward = cls.separate_parallel(np.array(rw_sub_traj))\n # next_observation = cls.separate_parallel(np.array(no_sub_traj))\n # done = cls.separate_parallel(np.array(dn_sub_traj))\n #\n # mean = cls.separate_parallel(np.array(mean_sub_traj))\n # log_std = cls.separate_parallel(np.array(log_std_sub_traj))\n\n observation = np.array(ob_sub_traj)\n action = np.array(ac_sub_traj)\n reward = np.array(rw_sub_traj)\n next_observation = np.array(no_sub_traj)\n done = np.array(dn_sub_traj)\n mean = np.array(mean_sub_traj)\n log_std = np.array(log_std_sub_traj)\n\n returns = []\n # for i, j in zip(reward, done):\n # # end = np.where(j)[0][0] if np.where(j)[0].size > 0 else j.shape[0]\n # end = j.shape[0]-1\n # returns.append(sum(i[:end + 1]))\n avg_return = reward.reshape([-1, 100]).sum(-1).mean()#sum(returns) / len(returns)\n\n sub_traj = (\n observation,\n action,\n reward,\n next_observation,\n done,\n\n mean,\n log_std,\n avg_return,\n )\n\n return sub_traj\n\n\n\n\n\n @classmethod\n def perform_inner_trajectories(cls, env, agent, baseline, task_id, add_to_context=True, eval_q=False):\n agent.restore_weights()\n ob_inner_batch = []\n ac_inner_batch = []\n ad_inner_batch = []\n rw_inner_batch = []\n dn_inner_batch = []\n mean_inner_batch = []\n log_std_inner_batch = []\n avg_returns = []\n norm = np.nan\n for inner_step in range(FLAGS.num_inner_loop + 1):\n observation, action, reward, next_observation, done, mean, log_std, avg_return = cls.rollout(env, agent)\n if inner_step == 0:\n agent.replay_buffer.push_back(task_id, (observation, action, reward, next_observation, done))\n # advantage = np.array(agent.compute_adv(task_id, observation, action, reward))\n advantage = np.array(np.zeros_like(reward))\n agent.update_weights((observation, action, reward, advantage, done, mean, log_std), inner_step)\n else:\n advantage = cls.compute_adv(observation.reshape([-1, FLAGS.inner_time_steps // 4, env.observation_space.shape[0]]), reward.reshape([-1, FLAGS.inner_time_steps // 4]), FLAGS.gamma, baseline).reshape([FLAGS.inner_batch_size, FLAGS.inner_time_steps])\n ob_inner_batch.append(observation)\n ac_inner_batch.append(action)\n ad_inner_batch.append(advantage)\n rw_inner_batch.append(reward)\n dn_inner_batch.append(done)\n mean_inner_batch.append(mean)\n log_std_inner_batch.append(log_std)\n avg_returns.append(avg_return)\n\n inner_batch = (\n np.array(ob_inner_batch),\n np.array(ac_inner_batch),\n np.array(ad_inner_batch),\n np.array(rw_inner_batch),\n np.array(dn_inner_batch),\n np.array(mean_inner_batch),\n np.array(log_std_inner_batch),\n )\n\n return inner_batch, np.array(avg_returns), norm\n\n @classmethod\n def sample_meta_batch(cls, env, agent, baseline, eval_q=False):\n task_ids = list(env.get_all_task_idx())\n agent.save_weights()\n ob_meta_batch = []\n ac_meta_batch = []\n ad_meta_batch = []\n rw_meta_batch = []\n dn_meta_batch = []\n mean_meta_batch = []\n log_std_meta_batch = []\n meta_batch_returns = []\n eval_batch_returns = []\n\n for i in range(FLAGS.meta_batch_size):\n add2ctx = not i >= FLAGS.meta_batch_size - FLAGS.extra_rl_batch_size\n task_id = task_ids[i % len(task_ids)]\n # env.call_sync(\"reset_task\", idx=task_id)\n # env.set_task(task_id=task_id)\n env.reset_task(idx=task_id)\n\n (ob_inner_batch,\n ac_inner_batch,\n ad_inner_batch,\n rw_inner_batch,\n dn_inner_batch,\n mean_inner_batch,\n log_std_inner_batch), returns, norm = cls.perform_inner_trajectories(env, agent, baseline, task_id, add_to_context=add2ctx)\n if eval_q:\n _, lfb_returns, _ = cls.perform_inner_trajectories(env, agent, baseline, task_id, add_to_context=False, eval_q=eval_q)\n eval_batch_returns.append(lfb_returns)\n\n ob_meta_batch.append(ob_inner_batch)\n ac_meta_batch.append(ac_inner_batch)\n ad_meta_batch.append(ad_inner_batch)\n rw_meta_batch.append(rw_inner_batch)\n dn_meta_batch.append(dn_inner_batch)\n mean_meta_batch.append(mean_inner_batch)\n log_std_meta_batch.append(log_std_inner_batch)\n if i < FLAGS.meta_batch_size - FLAGS.extra_rl_batch_size:\n meta_batch_returns.append(returns)\n np.set_printoptions(precision=4)\n logging.info(\" task [{}/{}] id [{}] average return each update: {}, \"\n \"min/max mean {:.4f}/{:.4f}, min/max log_std {:.4f}/{:.4f} \"\n .format(i,\n FLAGS.meta_batch_size - 1,\n task_id, returns,\n np.min(mean_inner_batch),\n np.max(mean_inner_batch),\n np.min(log_std_inner_batch),\n np.max(log_std_inner_batch)))\n if eval_q:\n logging.info(\" EVAL [{}/{}] id [{}] average return each update: {}, \"\n .format(i,\n FLAGS.meta_batch_size - 1,\n task_id, lfb_returns))\n meta_batch = (\n np.array(ob_meta_batch),\n np.array(ac_meta_batch),\n np.array(ad_meta_batch),\n np.array(rw_meta_batch),\n np.array(dn_meta_batch),\n np.array(mean_meta_batch),\n np.array(log_std_meta_batch)\n )\n batch_returns = (np.array(meta_batch_returns),)\n if eval_q:\n batch_returns += (np.array(eval_batch_returns),)\n return meta_batch, batch_returns","sub_path":"model archives/dist-version/env_wrappers.py","file_name":"env_wrappers.py","file_ext":"py","file_size_in_byte":9148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"332949585","text":"import requests\nimport datetime\nfrom collections import Counter\nfrom bs4 import BeautifulSoup\n\n\ndef requestTo():\n url = \"http://time-rtu.ru/?group=БСБО-02-16\"\n r = requests.get(url)\n return r.text\n\n\ndef soupCreate(text):\n soup = BeautifulSoup(text, \"html.parser\")\n return soup\n\n\ndef getDate(soup):\n date = []\n card = []\n\n card = soup.findAll('div', id='card')\n for i in card:\n temp_date = i.find('div', id='date')\n temp_date = temp_date.text\n date.append(temp_date.encode('l1').decode())\n return date\n\n\ndef getListOfDays():\n url = \"http://time-rtu.ru/?group=БСБО-02-16\"\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n card = soup.findAll('div', id='day')\n\n\n list = []\n for i in card:\n lesson = i.find('td')\n if lesson != None:\n list.append(1)\n else:\n list.append(0)\n return list\n\n\n\ndef getCurrentDate():\n current = str(datetime.datetime.today())\n current = current.split(\" \")\n current = current[0].split(\"-\")\n return current\n\n\ndef getMaket(soup):\n maket = soup.findAll('div', id='day')\n\n return maket\n\n\ndef getTime(maket):\n time = []\n for i in maket:\n fullDay = i.findAll('tr')\n for lessonTime in fullDay:\n tempTime = lessonTime.find('td', id='time')\n time.append(tempTime.text.split()[0])\n\n return time\n\n\ndef getLessons(maket):\n finalLessons = []\n counter = 0\n for i in maket:\n fullDay = i.findAll('tr')\n\n for lessons in fullDay:\n\n lesson = lessons.findAll('td', id='lesson')\n for soloLesson in lesson:\n lessonName = soloLesson.find('div', id='dist')\n a = lessonName.text.encode('l1').decode().split()\n finalLessons.append(\" \".join(a))\n\n return finalLessons\n\n\ndef getDateId(current, date):\n id = 0\n for i in date:\n tempDate = i.split()\n tempDate = tempDate[1].split(\"(\")\n tempDate = tempDate[1]\n tempDate = tempDate[0:-1]\n print(tempDate)\n finalDate = tempDate.split(\".\")\n checkVar = Counter(finalDate) == Counter(current)\n id += 1\n if checkVar == True:\n break\n return id\n\n\ndef getMessageToSend(day_id, time, finalLessons):\n i = 0\n finalList = getListOfDays()\n message_to_send = \"\"\n for y in finalList:\n print(y)\n if y == 1:\n while i < 7*2:\n message_to_send = message_to_send + time[i] + finalLessons[i] + \"\\n\"\n i += 1\n else:\n message_to_send = \"\"\n message_to_send = \"Выходной\"\n\n return message_to_send\n\n\ntext = requestTo()\nsoup = soupCreate(text)\ndate = getDate(soup)\n\ncurrent = getCurrentDate()\nday_id = getDateId(current, date)\n\nfullDay = getMaket(soup)\ntime = getTime(fullDay)\nfinalLessons = getLessons(fullDay)\n\nmessage_to_send = getMessageToSend(day_id, time, finalLessons)\nprint(message_to_send)","sub_path":"raspisanie.py","file_name":"raspisanie.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"56537597","text":"# Copyright (C) 2011-2018 Vas Vasiliadis\n# University of Chicago\n##\n__author__ = 'Vas Vasiliadis '\n\nimport sys\nimport time\nimport driver\nimport boto3\nimport os\nfrom gas import app, db\n\n\"\"\"A rudimentary timer for coarse-grained profiling\n\"\"\"\n\nclass Timer(object):\n def __init__(self, verbose=True):\n self.verbose = verbose\n\n def __enter__(self):\n self.start = time.time()\n return self\n\n def __exit__(self, *args):\n self.end = time.time()\n self.secs = self.end - self.start\n if self.verbose:\n print(\"Total runtime: {0:.6f} seconds\".format(self.secs))\n\n# copies the annotated results file and the associated log from AnnTools instance to the S3 gas-results bucket.\ndef upload_file(s3, directory_file, key):\n s3.meta.client.upload_file(directory_file + '.annot.vcf', 'gas-results', key + '.annot.vcf')\n s3.meta.client.upload_file(directory_file + '.vcf.count.log', 'gas-results', key + '.vcf.count.log')\n print(\"Update results and log files successfully.\")\n\ndef update_database(annotation_table, key, job_id):\n annotation_table.update_item(Key={'job_id': job_id},\n AttributeUpdates={'job_status': {'Value': 'COMPLETED', 'Action': 'PUT'},\n 's3_results_bucket': {'Value': 'gas-results', 'Action': 'PUT'},\n 's3_key_result_file': {'Value': key + '.annot.vcf', 'Action': 'PUT'},\n 's3_key_log_file': {'Value': key + '.vcf.count.log', 'Action': 'PUT'},\n 'complete_time': {'Value': int(time.time()), 'Action': 'PUT'}},\n Expected={'job_status': {'Value': 'RUNNING', 'ComparisonOperator': 'EQ'}})\n print('Update database successfully.')\n\n# delete files on the AnnTools instance\ndef remove_file(task_file, directory_file, task_dir):\n os.remove(task_file)\n os.remove(directory_file + '.annot.vcf')\n os.remove(directory_file + '.vcf.count.log')\n os.rmdir(task_dir)\n print('Delete task files successfully.')\n\nif __name__ == '__main__':\n # Call the AnnTools pipeline\n if len(sys.argv) > 3:\n with Timer():\n driver.run(sys.argv[1], 'vcf')\n print('Finish annotation successfully.')\n # upload results and logs\n try:\n s3 = boto3.resource('s3')\n task_file = sys.argv[1]\n directory_file = task_file.replace('.vcf', '')\n key = sys.argv[2].replace('.vcf', '')\n upload_file(s3, directory_file, key)\n except Exception as e:\n print(e)\n print('Cannot upload files.')\n # remove files on AnnTools instance\n try:\n task_file = sys.argv[1]\n directory_file = task_file.replace('.vcf', '')\n task_dir = os.path.dirname(task_file)\n remove_file(task_file, directory_file, task_dir)\n except Exception as e:\n print(e)\n print('Cannot remove files on AnnTools instance.')\n # update the database\n try:\n job_id = sys.argv[3]\n key = sys.argv[2].replace('.vcf', '')\n dynamodb = boto3.resource('dynamodb', region_name=app.config['AWS_REGION_NAME'])\n annotation_table = dynamodb.Table('huangxy_annotations')\n update_database(annotation_table, key, job_id)\n except Exception as e:\n print(e)\n print('Cannot update the database.')\n # publish a notification to this topic\n try:\n sns = boto3.client('sns', region_name=app.config['AWS_REGION_NAME']) \n sns_response = sns.publish(TopicArn=app.config['AWS_SNS_JOB_COMPLETE_TOPIC'],\n Message=json.dumps(annotation_table.get_item(Key={'job_id': job_id})['Item']))\n except Exception as e:\n print(e)\n else:\n print(\"A valid .vcf file must be provided as input to this program.\")\n\n\n\n","sub_path":"gas/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"352808727","text":"\"\"\"\nTest shap decomposition calculations\n\"\"\"\nimport logging\nfrom typing import Set, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom pytools.viz.dendrogram import LinkageTree\nfrom sklearndf.pipeline import RegressorPipelineDF\n\nfrom facet.crossfit import LearnerCrossfit\nfrom facet.inspection import LearnerInspector\n\nlog = logging.getLogger(__name__)\n\n\ndef test_shap_decomposition(regressor_inspector: LearnerInspector) -> None:\n\n # noinspection PyPep8Naming\n def _calculate_relative_syn_and_red(\n feature_x: str, feature_y: str, is_indirect_syn_valid: bool\n ) -> Tuple[float, float, float, float]:\n iv = regressor_inspector.shap_interaction_values(consolidate=None)\n # Get 3 components for each feature:\n # S = interaction SHAP\n # A, B = independent SHAP\n # U, V = sum of interactions with 3rd variables\n iv_x = iv.xs(feature_x, level=-1)\n iv_y = iv.xs(feature_y, level=-1)\n X = iv_x.sum(axis=1).rename(\"X\")\n Y = iv_y.sum(axis=1).rename(\"Y\")\n A = iv_x.loc[:, feature_x]\n B = iv_y.loc[:, feature_y]\n S = iv_x.loc[:, feature_y]\n U = X - A - S\n V = Y - B - S\n # calculate the \"indirect\" S, such that cov(U, S) == 0 and cov(V, S) == 0\n k_U = max(0.0, cov(S, U) / var(S)) if is_indirect_syn_valid else 0.0\n k_V = max(0.0, cov(S, V) / var(S)) if is_indirect_syn_valid else 0.0\n print_list(**{\"cov(U, S) / var(S)\": k_U, \"cov(V, S) / var(S)\": k_V})\n varS = var(S)\n Su = S if varS == 0 else S * k_U\n Sv = S if varS == 0 else S * k_V\n U_ = U - Su\n V_ = V - Sv\n print_list(\n stdS=std(S),\n stdSu=std(Su),\n stdSv=std(Sv),\n stdU=std(U),\n stdU_=std(U_),\n stdV=std(V),\n stdV_=std(V_),\n )\n # calculate the minimal shared vector R, such that cov(X_ - R, Y_ - R) == 0\n X_ = X - S - Su\n Y_ = Y - S - Sv\n AUT = X_ + Y_\n AUT_asym = X_\n R_ = AUT / 2\n dXY = std(X_ - Y_)\n dR = std(R_)\n R = R_ * (1 - dXY / (2 * dR))\n print_list(\n stdX=std(X),\n stdY=std(Y),\n stdX_=std(X_),\n stdY_=std(Y_),\n stdR=std(R),\n covX_R_Y_R=round(cov(X_ - R, Y_ - R), 15),\n )\n SYN = 2 * S + Su + Sv\n SYN_asym = S + Su\n RED = 2 * R\n RED_asym = R\n UNI = X + Y - RED\n UNI_asym = X - RED_asym\n syn = std(SYN)\n aut = std(AUT)\n red = std(RED)\n uni = std(UNI)\n syn_asym = std(SYN_asym)\n aut_asym = std(AUT_asym)\n red_asym = std(RED_asym)\n uni_asym = std(UNI_asym)\n print_list(syn=syn, aut=aut, red=red, uni=uni)\n return (\n syn / (syn + aut),\n red / (red + uni),\n syn_asym / (syn_asym + aut_asym),\n red_asym / (red_asym + uni_asym),\n )\n\n for i, j, indirect_syn in [\n (\"LSTAT\", \"RM\", False),\n (\"LSTAT\", \"DIS\", True),\n (\"LSTAT\", \"AGE\", False),\n (\"LSTAT\", \"NOX\", False),\n (\"LSTAT\", \"CRIM\", False),\n (\"RM\", \"DIS\", False),\n (\"RM\", \"AGE\", False),\n (\"RM\", \"NOX\", False),\n (\"RM\", \"CRIM\", False),\n ]:\n print(f\"\\ncomparing features X={i} and Y={j}\")\n\n syn_rel, red_rel, syn_rel_asym, red_rel_asym = _calculate_relative_syn_and_red(\n feature_x=i, feature_y=j, is_indirect_syn_valid=indirect_syn\n )\n\n syn_matrix = regressor_inspector.feature_synergy_matrix(symmetrical=True)\n red_matrix = regressor_inspector.feature_redundancy_matrix(symmetrical=True)\n syn_matrix_asym = regressor_inspector.feature_synergy_matrix()\n red_matrix_asym = regressor_inspector.feature_redundancy_matrix()\n\n print_list(\n syn_rel=syn_rel,\n red_rel=red_rel,\n syn_rel_asym=syn_rel_asym,\n red_rel_asym=red_rel_asym,\n syn_matrix=syn_matrix.loc[i, j],\n red_matrix=red_matrix.loc[i, j],\n syn_matrix_asym=syn_matrix_asym.loc[i, j],\n red_matrix_asym=red_matrix_asym.loc[i, j],\n percentage=True,\n )\n\n assert np.isclose(red_matrix.loc[i, j], red_rel)\n assert np.isclose(red_matrix.loc[j, i], red_rel)\n assert np.isclose(syn_matrix.loc[i, j], syn_rel)\n assert np.isclose(syn_matrix.loc[j, i], syn_rel)\n assert np.isclose(red_matrix_asym.loc[i, j], red_rel_asym)\n assert np.isclose(syn_matrix_asym.loc[i, j], syn_rel_asym)\n\n # check basic matrix properties\n\n n_features = len(regressor_inspector.features)\n\n for matrix in (syn_matrix, syn_matrix_asym, red_matrix, red_matrix_asym):\n # matrix shape is n_features x n_features\n assert matrix.shape == (n_features, n_features)\n\n # values on the diagonal are all 1.0\n for a in range(n_features):\n assert matrix.iloc[a, a] == 1.0\n\n # there are no nan values\n assert matrix.notna().all().all()\n\n\ndef test_shap_decomposition_matrices(\n best_lgbm_crossfit: LearnerCrossfit[RegressorPipelineDF],\n feature_names: Set[str],\n regressor_inspector: LearnerInspector,\n) -> None:\n # Shap decomposition matrices (feature dependencies)\n association_matrix: pd.DataFrame = regressor_inspector.feature_association_matrix(\n clustered=False, symmetrical=True\n )\n\n # check that dimensions of pairwise feature matrices are equal to # of features,\n # and value ranges:\n for matrix, matrix_name in zip(\n (\n association_matrix,\n regressor_inspector.feature_synergy_matrix(),\n regressor_inspector.feature_redundancy_matrix(),\n ),\n (\"association\", \"synergy\", \"redundancy\"),\n ):\n matrix_full_name = f\"feature {matrix_name} matrix\"\n n_features = len(feature_names)\n assert len(matrix) == n_features, f\"rows in {matrix_full_name}\"\n assert len(matrix.columns) == n_features, f\"columns in {matrix_full_name}\"\n\n # check values\n for c in matrix.columns:\n assert (\n 0.0\n <= matrix.fillna(0).loc[:, c].min()\n <= matrix.fillna(0).loc[:, c].max()\n <= 1.0\n ), f\"Values of [0.0, 1.0] in {matrix_full_name}\"\n\n # check actual values:\n assert association_matrix.values == pytest.approx(\n np.array(\n [\n [1.0, 0.043, 0.233, 0.0, 0.162, 0.078]\n + [0.192, 0.156, 0.009, 0.022, 0.035, 0.008, 0.07],\n [0.043, 1.0, 0.155, 0.0, 0.056, 0.055]\n + [0.017, 0.225, 0.024, 0.021, 0.049, 0.145, 0.034],\n [0.233, 0.155, 1.0, 0.0, 0.123, 0.207]\n + [0.15, 0.044, 0.069, 0.225, 0.241, 0.149, 0.209],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0] + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.162, 0.056, 0.123, 0.0, 1.0, 0.051]\n + [0.017, 0.156, 0.19, 0.08, 0.15, 0.025, 0.029],\n [0.078, 0.055, 0.207, 0.0, 0.051, 1.0]\n + [0.088, 0.005, 0.081, 0.14, 0.027, 0.058, 0.49],\n [0.192, 0.017, 0.15, 0.0, 0.017, 0.088]\n + [1.0, 0.128, 0.015, 0.269, 0.14, 0.096, 0.295],\n [0.156, 0.225, 0.044, 0.0, 0.156, 0.005]\n + [0.128, 1.0, 0.255, 0.158, 0.273, 0.132, 0.023],\n [0.009, 0.024, 0.069, 0.0, 0.19, 0.081]\n + [0.015, 0.255, 1.0, 0.223, 0.188, 0.035, 0.049],\n [0.022, 0.021, 0.225, 0.0, 0.08, 0.14]\n + [0.269, 0.158, 0.223, 1.0, 0.284, 0.182, 0.097],\n [0.035, 0.049, 0.241, 0.0, 0.15, 0.027]\n + [0.14, 0.273, 0.188, 0.284, 1.0, 0.027, 0.031],\n [0.008, 0.145, 0.149, 0.0, 0.025, 0.058]\n + [0.096, 0.132, 0.035, 0.182, 0.027, 1.0, 0.057],\n [0.07, 0.034, 0.209, 0.0, 0.029, 0.49]\n + [0.295, 0.023, 0.049, 0.097, 0.031, 0.057, 1.0],\n ]\n ),\n abs=0.02,\n )\n\n # cluster associated features\n association_linkage = regressor_inspector.feature_association_linkage()\n\n assert isinstance(association_linkage, LinkageTree)\n\n\n#\n# auxiliary functions\n#\n\n\ndef cov(a: np.ndarray, b: np.ndarray) -> float:\n \"\"\"\n covariance, assuming a population mean of 0\n :param a: array of floats\n :param b: array of floats\n :return: covariance of a and b\n \"\"\"\n return (a * b).mean()\n\n\ndef var(a: np.ndarray) -> float:\n \"\"\"\n variance, assuming a population mean of 0\n :param a: array of floats\n :return: variance of a\n \"\"\"\n return cov(a, a)\n\n\ndef std(a: np.ndarray) -> float:\n \"\"\"\n standard deviation, assuming a population mean of 0\n :param a: array of floats\n :return: standard deviation of a\n \"\"\"\n return np.sqrt(var(a))\n\n\ndef corr(a: np.ndarray, b: np.ndarray) -> float:\n \"\"\"\n pearson correlation, assuming a population mean of 0\n :param a: array of floats\n :param b: array of floats\n :return: pearson correlation of a and b\n \"\"\"\n return cov(a, b) / np.sqrt(var(a) * var(b))\n\n\ndef print_list(*args, percentage: bool = False, **kwargs):\n \"\"\"\n print all arguments, including their names\n :param args: the arguments to print (as their names, print integers indicating \\\n the position)\n :param percentage: if `true`, print all arguments as % values\n :param kwargs: the named arguments to print\n :return:\n \"\"\"\n\n def _prt(_value, _name: Union[str, int]):\n if percentage:\n _value *= 100\n print(f\"{_name}: {_value:.4g}{'%' if percentage else ''}\")\n\n for name, arg in enumerate(args):\n _prt(arg, _name=name)\n for name, arg in kwargs.items():\n _prt(arg, _name=name)\n","sub_path":"test/test/facet/test_shap_decomposition.py","file_name":"test_shap_decomposition.py","file_ext":"py","file_size_in_byte":9872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"35737965","text":"#imprimir los \"n\" primeros numeros divididos entre 4 y multiplicados por 5\r\nimport os\r\n\r\n#INPUT\r\nn=int(os.sys.argv[1])\r\n\r\n#ITERADOR\r\nfor i in range(n):\r\n print(i*(5/4))\r\n\r\n#FIN ITERADOR\r\nprint(\"fin del bucle\")\r\n","sub_path":"rango-6.py","file_name":"rango-6.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"398160055","text":"\n'''\n##########################\n#### HELPER FUNCTIONS ####\n##########################\n'''\n\n#FUNCTION: Read tweets into dictionary from JSON files\ndef read_tweets(tweet_files, json):\n\tprint(\"Creating dictionary...\")\n\t\n\t#Read the specified files\n\ttweets = []\n\tfor file in tweet_files:\n\t\twith open(file, 'r') as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\ttweets.append(json.loads(line))\n\treturn tweets\n\n#FUNCTION: Create a dataframe from the tweets dictionary\ndef create_df(tweets, pd):\n\t#Load key pieces of data in a dataframe\n\tprint(\"Creating data frame...\")\n\tdf = pd.DataFrame()\n\tdf['text'] = list(map(lambda tweet: tweet['text'], tweets))\n\tdf['favorite_count'] = list(map(lambda tweet: tweet['favorite_count'], tweets))\n\tdf['retweet_count'] = list(map(lambda tweet: tweet['retweet_count'], tweets))\n\tdf['user_name'] = list(map(lambda tweet: tweet['user']['screen_name'],tweets))\n\tdf['user_followers'] = list(map(lambda tweet: tweet['user']['followers_count'],tweets))\n\n\treturn df\n\n#FUNCTION: Print key statistics about a dataframe of tweets\ndef print_stats(df):\n\t#Print key statistics\n\tprint(\"*********** Key Statistics ***********\")\n\tprint(\"Number of Tweets: \" + str(len(df.index)))\n\tprint(\"Number of Favorites: \" + str(sum(df['favorite_count'])))\n\tprint(\"Number of Retweets: \" + str(sum(df['retweet_count'])))\n\tprint(\"Number of Distinct Users: \" + str(len(set(df['user_name']))))\n\tprint(\"First-Order Reach (Followers of Users): \"+ str(sum(df['user_followers'])))\n\tprint(\"**************************************\")\n\n#FUNCTION: Creates horizontal bar chart based on dictionary data\ndef hbar_chart(data, title, x_axis):\n\timport matplotlib.pyplot as plt\n\timport numpy as np\n\n\tplt.rcdefaults()\n\tfig, ax = plt.subplots(figsize=(12,8))\n\tmyValues = list(data.values())[0:25]\n\tmyDimensions = list(data.keys())[0:25]\n\ty_pos = np.arange(len(myDimensions))\n\tax.barh(y_pos, myValues, color = 'green')\n\tax.set_yticks(y_pos)\n\tax.set_yticklabels(myDimensions)\n\tplt.xlabel(x_axis)\n\tplt.title(title)\n\tplt.gca().invert_yaxis()\n\tplt.show()\n\n\treturn myValues, myDimensions\n\n\n'''\n##########################\n### Analysis Functions ###\n##########################\n'''\n\n#FUNCTION: Get Frequency of Words by Prevalence\ndef getWordFreq(tweet_text, re, min=25):\n\t#Imports\n\tfrom collections import Counter\n\n\t#Create a new Text Blob that compiles all the tweets into a single block of text\n\ttext_string = ''\n\tfor each in tweet_text:\n\t\ttext_string = text_string + each\n\ttext_string = text_string.lower()\n\n\t#Get Word Frequencies\n\tfrequency = {}\n\tmatch_pattern = re.findall(r'\\b[a-z]{8,15}\\b', text_string)\n\tcounts = Counter(match_pattern)\n\tfrequency = dict(counts)\n\n\t#Sort the list\n\toutput_dict = {}\n\tsorted_list = sorted(frequency, key=frequency.__getitem__, reverse=True)\n\tfor word in sorted_list:\n\t\tif frequency[word] > min:\n\t\t\toutput_dict[word] = frequency[word]\n\n\treturn output_dict\n\n\n\n#FUNCTION: Get Frequency of Words by Prevalence for a set of users\ndef getWordFreqUsers(df, user_list, re):\n\t\n\t#Imports\n\tfrom collections import Counter\n\tfrequency = {}\n\twords_seen = []\n\tall_users = []\n\n\t#populate the all_users list\n\tfor each in df['user_name']:\n\t\tall_users.append(each.lower())\n\n\t#Loop through rows in dataframe and count words\n\tfor index,row in df.iterrows():\n\t\tif row['user_name'] in user_list:\n\t\t\t# Get the tweet text\n\t\t\ttweet_text = row['text']\n\t\t\ttweet_text = tweet_text.lower()\n\n\t\t\t#Separate into words\n\t\t\twords = re.findall(r'\\b[a-z]{8,15}\\b', tweet_text)\n\t\t\tfor word in words:\n\t\t\t\tif word not in all_users:\n\t\t\t\t\tif word not in words_seen:\n\t\t\t\t\t\tfrequency[word] = 1\n\t\t\t\t\t\twords_seen.append(word)\n\t\t\t\t\telse:\n\t\t\t\t\t\tfrequency[word] = frequency[word] + 1\n\n\t#Sort the list\n\toutput_dict = {}\n\tsorted_list = sorted(frequency, key=frequency.__getitem__, reverse=True)\n\tfor word in sorted_list:\n\t\toutput_dict[word] = frequency[word]\n\n\treturn output_dict\n\n\n\n#FUNCTION: Get Users by Frequency of tweet\ndef getUserFreq(df, min=10):\n\t#Imports\n\tfrom collections import Counter\n\n\t#Instance Variables\n\tfrequency = {}\n\toutput_dict = {}\n\tuser_list = df['user_name']\n\n\t#Get the counts\n\tcounts = Counter(user_list)\n\tfrequency = dict(counts)\n\n\t#Sort the dictionary\n\tsorted_list = sorted(frequency, key=frequency.__getitem__, reverse=True)\n\tfor user in sorted_list:\n\t\tif frequency[user] > min:\n\t\t\toutput_dict[user] = frequency[user]\n\n\treturn output_dict\n\n\n\n#FUNCTION: Get Users by Volume of Reach\ndef getUserReach(df, min=10):\n\n\t#Instance Variables\n\tfrequency = {}\n\toutput_dict = {}\n\n\t#Create a dataframe for lookups\n\tlookup = {}\n\tfor index, row in df.iterrows():\n\t\tlookup[row['user_name']] = row['user_followers']\n\n\t#Get the reach counts\n\tusers_seen = []\n\tfor user in df['user_name']:\n\t\tif user not in users_seen:\n\t\t\tfrequency[user] = lookup[user]\n\t\t\tusers_seen.append(user)\n\t\telse:\n\t\t\tfrequency[user] = frequency[user]+ lookup[user]\n\n\n\t#Sort the dictionary\n\tsorted_list = sorted(frequency, key=frequency.__getitem__, reverse=True)\n\tfor user in sorted_list:\n\t\tif frequency[user] > min:\n\t\t\toutput_dict[user] = frequency[user]\n\n\treturn output_dict\n\n\n#FUNCTION: Get Users by Word\ndef getUserWord(df, word, minV=10):\n\n\t#Instance Variables\n\tfrequency = {}\n\toutput_dict = {}\n\tusers_seen = []\n\tuser_name = ''\n\n\t#Create a dataframe for lookups\n\tfor index, row in df.iterrows():\n\t\tif word in row['text'] :\n\t\t\tuser_name = row['user_name']\n\t\t\tif user_name not in users_seen:\n\t\t\t\tfrequency[user_name] = 1\n\t\t\t\tusers_seen.append(user_name)\n\t\t\telse:\n\t\t\t\tfrequency[user_name] = frequency[user_name] + 1\n\n\n\t#Sort the dictionary\n\tsorted_list = sorted(frequency, key=frequency.__getitem__, reverse=True)\n\tfor user in sorted_list:\n\t\toutput_dict[user] = frequency[user]\n\n\treturn output_dict\n\n\n\n\n'''\nMAIN FUNCTION FOR EXECUTION OF CODE\n'''\ndef main():\n\t## SETUP ##\n\n\t#Imports\n\tprint(\"Importing tools...\")\n\timport pandas as pd\n\timport json as js\n\timport re as re\n\timport matplotlib as mpl\n\timport matplotlib.pyplot as plt\n\timport numpy as np\n\n\t#Instance Variables\n\ttweet_files = ['healthtech/ht.json', 'digitalhealth/dh.json']\n\n\t#Populate Data Frame & Print Stats\n\ttweets = read_tweets(tweet_files, js)\n\tdf = create_df(tweets, pd)\n\tprint_stats(df)\n\n\t## ANALYSIS ##\n\t\n\t'''## Get word frequency and plot ##\n\twordFrequency = getWordFreq(df['text'],re)\n\thbar_chart(wordFrequency, \"Top Words in Tweet Text\", \"Word Prevalence\")\n\n\t#Get user frequency and plot\n\tuserFrequency = getUserFreq(df)\n\thbar_chart(userFrequency, \"Number of Tweets by User\", \"Number of Tweets\")\n\t\n\t#User Reach\n\tuserReach = getUserReach(df)\n\ttopUserValues, topUsers = hbar_chart(userReach, \"Reach by User\", \"Reach (# of Followers x # of Tweets)\")\n\n\t#Word Frequency for Top Users\n\tuserWordFreqUsers = getWordFreqUsers(df, topUsers, re)\n\thbar_chart(userWordFreqUsers, \"Top Words for Top Users\", \"Word Prevalence\")'''\n\n\t#Word Frequency for Top Users\n\tUserWord = getUserWord(df, \"wearable\", re)\n\thbar_chart(UserWord, \"Top Users for Word\", \"Tweet Count\")\n\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"analyze_tweets.py","file_name":"analyze_tweets.py","file_ext":"py","file_size_in_byte":6888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"480579666","text":"from apis.BeautifulSoup import BeautifulSoup as bs, BeautifulStoneSoup as bss, Tag\r\nfrom apis import googl\r\nfrom urlparse import urlparse\r\nimport re\r\nimport urllib, urllib2\r\nimport httplib\r\n\r\nclass Plugin:\r\n\tdef __init__(self, controller):\r\n\t\tself.c = controller\r\n\t\tself.useragent = 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.60 Safari/537.1'\r\n\r\n\r\n\tdef sizeof_fmt(self, num):\r\n\t\tfor x in ['bytes','KB','MB','GB','TB']:\r\n\t\t\tif num < 1024.0:\r\n\t\t\t\treturn \"%3.1f%s\" % (num, x)\r\n\t\t\tnum /= 1024.0\r\n\r\n\r\n\tdef on_incoming(self, msg):\r\n\t\tif not msg.type == msg.CHANNEL: return\r\n\r\n\t\turls = re.findall(r'\\(?\\bhttps?://[-A-Za-z0-9+&@#/%?=~_()|!:,.;]*[-A-Za-z0-9+&@#/%=~_()|]', msg.body)\r\n\t\tfor url in urls:\r\n\t\t\tif url.startswith('(') and url.endswith(')'):\r\n\t\t\t\turl = url[1:-1]\r\n\r\n\t\t\to = urlparse(url)\r\n\t\t\tconn = httplib.HTTPConnection(o.netloc)\r\n\t\t\tconn.request(\"HEAD\", o.path)\r\n\t\t\thead = conn.getresponse()\r\n\r\n\t\t\tif 'text/html' in head.getheader('content-type'):\r\n\t\t\t\tmessage = 'Title: '+bs(urllib.urlopen(url), convertEntities=bs.HTML_ENTITIES).title.string.strip().replace('\\n', '')\r\n\t\t\telse:\r\n\t\t\t\tmessage = '%s: %s (%s)' % (re.search(r'/([^/]+)$', url).groups(1)[0], head.getheader('content-type'), self.sizeof_fmt(int(head.getheader('content-length'))))\r\n\t\t\tself.c.privmsg(msg.channel, message)\r\n\r\n\r\n\tdef trigger_w(self, msg):\r\n\t\t\"Usage: w . Prints a short description of the corresponding wikipedia article.\"\r\n\t\tif len(msg.args) == 0:\r\n\t\t\tself.c.notice(msg.nick, \"Please specify a search term\")\r\n\t\t\treturn\r\n\r\n\t\tparams = {'action':'opensearch', 'format':'xml', 'limit':'2', 'search':urllib.quote_plus(' '.join(msg.args))}\r\n\t\t\r\n\t\tresp = bss(urllib.urlopen(\"http://en.wikipedia.org/w/api.php?%s\" % urllib.urlencode(params)), convertEntities=bs.HTML_ENTITIES)\r\n\r\n\t\tif resp.textTag:\r\n\t\t\tindex = 1 if 'may refer to:' in resp.descriptionTag.string else 0\r\n\t\t\tself.c.privmsg(msg.channel, resp.findAll('description')[index].string)\r\n\t\telse:\r\n\t\t\tself.c.privmsg(msg.channel, '%s: No articles were found.'%' '.join(msg.args))\r\n\t\t\r\n\r\n\tdef trigger_g(self, msg):\r\n\t\t\"Usage: g . Prints title & short description of first google result.\"\r\n\t\tif len(msg.args) == 0:\r\n\t\t\tself.c.notice(msg.nick, \"Please specify a search term\")\r\n\t\t\treturn\r\n\r\n\t\turl = \"https://www.google.com.au/search?q=%s\" % (urllib.quote_plus(' '.join(msg.args)),)\r\n\t\treq = urllib2.Request(url, None, {'User-agent':self.useragent})\r\n\t\tentry = bs(urllib2.urlopen(req), convertEntities=bs.HTML_ENTITIES).find('div', 'vsc')\r\n\r\n\t\tif not entry:\r\n\t\t\tself.c.privmsg(msg.channel, '%s: No entries were found.'%' '.join(msg.args))\r\n\t\t\treturn\r\n\r\n\t\turl = googl.get_short(entry.find('a','l')['href'], self.c.config)\r\n\t\tmessage = \"\\002\\0032G\\0034o\\0038o\\0032g\\0033l\\0034e\\003 ::\\002 %s \\002::\\002 %s \\002::\\002 %s\" % (\r\n\t\t\tself.tag2string(entry.find('a','l')),\r\n\t\t\tself.tag2string(entry.find('span','st')),\r\n\t\t\turl,)\r\n\t\tself.c.privmsg(msg.channel, message)\r\n\r\n\r\n\tdef trigger_yt(self, msg):\r\n\t\t\"Usage: yt . Prints title and link to first youtube result.\"\r\n\t\tif len(msg.args) == 0:\r\n\t\t\tself.c.notice(msg.nick, \"Please specify a search term\")\r\n\t\t\treturn\r\n\r\n\t\turl = \"http://www.youtube.com/results?search_query=%s\" % (urllib.quote_plus(' '.join(msg.args)),)\r\n\t\treq = urllib2.Request(url, None, {'User-agent':self.useragent})\r\n\t\tentry = bs(urllib2.urlopen(req), convertEntities=bs.HTML_ENTITIES).find('div', 'yt-lockup-content')\r\n\r\n\t\tif not entry:\r\n\t\t\tself.c.privmsg(msg.channel, '%s: No entries were found.'%' '.join(msg.args))\r\n\t\t\treturn\r\n\r\n\t\tmessage = \"\\002You\\0030,4Tube\\003 ::\\002 %s \\002::\\002 %s \\002::\\002 %s\" % (\r\n\t\t\tentry.find('a', 'yt-uix-contextlink').string,\r\n\t\t\tself.tag2string(entry.find('p', 'description')),\r\n\t\t\t\"www.youtube.com\"+entry.find('a', 'yt-uix-contextlink')['href'],)\r\n\t\tself.c.privmsg(msg.channel, message)\r\n\r\n\r\n\tdef trigger_ud(self, msg):\r\n\t\t\"Usage: ud . Prints first UrbanDictionary result.\"\r\n\r\n\t\turl = \"http://www.urbandictionary.com/define.php?term=\"+urllib.quote_plus(' '.join(msg.args))\r\n\t\tsoup = bs(urllib2.urlopen(url), convertEntities=bs.HTML_ENTITIES)\r\n\t\tword = soup.find('td', 'word')\r\n\t\tif not word:\r\n\t\t\tself.c.privmsg(msg.channel, '%s: No entries were found.'%' '.join(msg.args))\r\n\t\t\treturn\r\n\r\n\t\tword = self.tag2string(word).strip()\r\n\t\tdefi = self.tag2string(soup.find('div', 'definition')).split('(2**31-1):\n return 0\n return int(val[::-1])\n\nif __name__ == \"__main__\":\n a = Solution()\n print(a.reverse(-123))\n print(a.reverse(2324))","sub_path":"reverse_integer.py","file_name":"reverse_integer.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"188195645","text":"import json\nimport random\nimport util\n\ndef direction_to_food(food, you, occupied, height, width):\n head_pos = you[0]\n optimal_move_score = {}\n possible_move_score = {}\n\n if int(food[\"y\"]) < int(head_pos[\"y\"]) and not util.check_lane(head_pos, occupied, \"up\", width, height):\n print('f trying up')\n\n if util.is_possible_move(\"up\", you, occupied, height, width, spacing=2):\n score = util.score_move(head_pos, occupied, \"up\")\n optimal_move_score['up'] = score\n\n elif int(food[\"y\"]) > int(head_pos[\"y\"]) and not util.check_lane(head_pos, occupied, \"down\", width, height):\n print('f trying down')\n if util.is_possible_move(\"down\", you, occupied, height, width, spacing=2):\n score = util.score_move(head_pos, occupied, \"down\")\n optimal_move_score['down'] = score\n\n if int(food[\"x\"]) < int(head_pos[\"x\"]) and not util.check_lane(head_pos, occupied, \"left\", width, height):\n print('f trying left')\n\n if util.is_possible_move(\"left\", you, occupied, height, width, spacing=2):\n score = util.score_move(head_pos, occupied, \"left\")\n optimal_move_score['left'] = score\n\n elif int(food[\"x\"]) > int(head_pos[\"x\"]) and not util.check_lane(head_pos, occupied, \"up\", width, height):\n print('f trying right')\n if util.is_possible_move(\"right\", you, occupied, height, width, spacing=2):\n score = util.score_move(head_pos, occupied, \"right\")\n optimal_move_score['right'] = score\n\n if optimal_move_score:\n best_moves = util.minimums(optimal_move_score)\n print(\"best moves are {} with scores {}\".format(best_moves.keys(), best_moves.values()))\n move = random.choice(best_moves.keys())\n return move\n\n print('f I ended up in moveTried')\n if util.is_possible_move(\"up\", you, occupied, height, width):\n score = util.score_move(head_pos, occupied, \"up\")\n possible_move_score[\"up\"] = score\n\n if util.is_possible_move(\"down\", you, occupied, height, width):\n score = util.score_move(head_pos, occupied, \"down\")\n possible_move_score[\"down\"] = score\n\n if util.is_possible_move(\"left\", you, occupied, height, width):\n score = util.score_move(head_pos, occupied, \"left\")\n possible_move_score[\"left\"] = score\n\n if util.is_possible_move(\"right\", you, occupied, height, width):\n score = util.score_move(head_pos, occupied, \"right\")\n possible_move_score[\"right\"] = score\n\n if not possible_move_score:\n print(\"failure, no possible moves, trying to go towards tail\")\n move = 'left'\n\n else:\n best_moves = util.minimums(possible_move_score)\n print(\"best moves are {} with scores {}\".format(best_moves.keys(), best_moves.values()))\n move = random.choice(best_moves.keys())\n print(move)\n return move\n\n\ndef get_closest_food(foods, headPos):\n closest_distance = util.calculate_direction(foods[0], headPos)\n closest_food = foods[0]\n\n for food in foods:\n new_distance = util.calculate_direction(food, headPos)\n if closest_distance > new_distance:\n closest_distance = new_distance\n closest_food = food\n\n return closest_food\n","sub_path":"app/food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"138716983","text":"# Default imports\nimport numpy as np\n\nipl_matches_array =np.genfromtxt(\"data/ipl_matches_small.csv\", dtype=\"|S50\", skip_header=1, delimiter=\",\")\n\ndef get_total_deliveries_played(batsmen):\n batsmen1 = batsmen\n #ipl_matches_array = np.genfromtxt(path,dtype='|S50', skip_header=1,delimiter = ',')\n del_batsman_array = ipl_matches_array[:,(11,13)]\n count = 0\n for i in del_batsman_array:\n if i[1] == batsmen1:\n count = count + 1\n\n return count\nget_total_deliveries_played('SR Tendulkar')\n","sub_path":"q01_get_total_deliveries_players/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"383920258","text":"import jwt\n\nfrom src.errors import handleErr, InvalidMagicLinkError\n\nglobal db\nglobal secret\n\ndef doMagic(email, token, doInvalidate = True):\n try:\n searchQuery = {\n \"$and\": [\n {\n \"email\": email,\n \"token\": hash(token)\n },\n {\n \"$nor\": [ { \"token\": \"\" } ]\n }\n ]\n }\n invalidateQuery = { \"$set\": { \"token\": \"\" }}\n\n if doInvalidate:\n result = db.users.update_one(searchQuery, invalidateQuery)\n if (result.modified_count == 0):\n raise InvalidMagicLinkError()\n else:\n result = db.users.find_one(searchQuery)\n if result is None:\n raise InvalidMagicLinkError()\n\n return [ True, None ]\n\n except Exception as err:\n return [ None, handleErr(err) ]\n\ndef tokenIfMagic(email, token):\n data0, err0 = doMagic(email, token)\n if data0:\n token = jwt.encode(payload = { \"email\": email }, key = secret, algorithm = \"HS256\")\n return [ token, None ] \n else: \n return [ None, handleErr(err0) ]","sub_path":"src/endpoints/magic/get/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"143800817","text":"import socket\nimport re\nimport time\nimport click\nfrom pprint import pprint\nfrom prometheus_client import Gauge, Counter, Histogram, Summary, start_http_server\n\n\nSESSION_CNT = Gauge(\n \"haproxy_origin_session_counter\", \"Session counter from stick table\", [\"origin\"]\n)\nCONNECTION_RATE = Gauge(\"haproxy_origin_conn_rate\", \"Connection Rate\", [\"origin\"])\nCONNECTION_CNT = Gauge(\"haproxy_origin_conn_cur\", \"Current Connections\", [\"origin\"])\nREQUEST_RATE = Gauge(\"haproxy_origin_http_req_rate\", \"HTTP request rate\", [\"origin\"])\nBYTES_IN = Gauge(\"haproxy_origin_bytes_in_rate\", \"Bytes in rate\", [\"origin\"])\nBYTES_OUT = Gauge(\"haproxy_origin_bytes_out_rate\", \"Bytes out rate\", [\"origin\"])\n\n\ndef update(command, host=None, port=None, fid=None):\n \"\"\" Connect to socket\n \"\"\"\n if host and port:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, port))\n elif fid:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(fid)\n else:\n raise ValueError(\"Need socket details\")\n # Connect to server and send data\n sock.sendall(bytes(command + \"\\n\", \"utf-8\"))\n f = sock.makefile(\"r\")\n while True:\n line = f.readline()\n if line == \"\":\n break\n parse_line(line)\n sock.shutdown(socket.SHUT_WR)\n sock.recv(999999)\n sock.close()\n\n\ndef normalize_pair(key, value):\n obj = re.match(r\"(^[^()]*)(\\((.*)\\))?\", key)\n assert obj\n key = obj.group(1)\n # -> rates are in (average) bytes per \n # period is in miliseconds\n if obj.group(3):\n interval = int(obj.group(3) or 1)\n value = float(value) * (1000 / interval)\n return key, value\n\n\ndef parse_line(line):\n line = line.strip()\n if not line or line[0] == \"#\":\n return\n obj = dict()\n q = re.match(r\"^0x[0-9a-f]+: (.*)$\", line)\n if q:\n keyvaluepairs = q.group(1).strip().split(\" \")\n for item in keyvaluepairs:\n key, value = normalize_pair(*item.split(\"=\"))\n try:\n obj[key] = float(value)\n except:\n obj[key] = value\n origin = obj.get(\"key\")\n SESSION_CNT.labels(origin).set(obj.get(\"sess_cnt\"))\n CONNECTION_RATE.labels(origin).set(obj.get(\"conn_rate\"))\n CONNECTION_CNT.labels(origin).set(obj.get(\"conn_cur\"))\n REQUEST_RATE.labels(origin).set(obj.get(\"http_req_rate\"))\n BYTES_IN.labels(origin).set(obj.get(\"bytes_in_rate\"))\n BYTES_OUT.labels(origin).set(obj.get(\"bytes_out_rate\"))\n\n\n@click.command()\n@click.option(\"--socket\")\n@click.option(\"--host\")\n@click.option(\"--port\", type=int, default=9999)\n@click.option(\"--listen\", type=int, default=8085)\n@click.option(\"--table\", default=\"node_track_origin\")\ndef main(host, port, socket, listen, table):\n start_http_server(listen)\n # While loop\n while True:\n update(\"show table {}\".format(table), host=host, port=port, fid=socket)\n time.sleep(10)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"files/haproxy_exporter.py","file_name":"haproxy_exporter.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"220311302","text":"#!/usr/bin/python3\n\"\"\"\nFunction to print on the stdout\n\"\"\"\n\n\ndef say_my_name(first_name, last_name=\"\"):\n \"\"\"\n Print on the stdout a string.\n\n Args:\n first_name (str): name\n last_name (str): last name.\n\n Return:\n str: first_name and last_name concatenated.\n \"\"\"\n if not isinstance(first_name, str):\n raise TypeError(\"first_name must be a string\")\n if not isinstance(last_name, str):\n raise TypeError(\"last_name must be a string\")\n print(\"My name is {} {}\".format(first_name, last_name))\n","sub_path":"0x07-python-test_driven_development/3-say_my_name.py","file_name":"3-say_my_name.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"99702866","text":"\n\n\"\"\"\"\"\"\"\"\"\nEncoders basic description:\n\nSay that you have hidden vectors\nf1, f2, f3 and f4 (corresponding to \"the brown dog eats\") for the forward model and\nb1, b2, b3, b4 (corresponding to \"eats dog brown the\") for the backward model. \nThen you either:\n\n- take the last: \nyou use the concatenation of (f4 and b4).\n\n- take the maximum: \nyou compute max(f1,f2,f3,f4) component-wise\n(the same for the forward and for the backward),\nthus getting a vector of the same dimensionality of f_i, and also max(b1,b2,b3,b4) and,\nagain you concatenate them.\n\nThen you feed the vectors obtained in this way to a \nlinear classifier to train the probing tasks.\n\n\"\"\"\"\"\n\n##############################################\n\n# Imports\n\n#############################################\n\nfrom keras import Model\nfrom keras.layers import Input\nfrom keras.layers import LSTM\nfrom keras.layers import Dense\nfrom keras.layers import GlobalMaxPooling1D\nfrom keras.layers import Concatenate\nfrom keras.layers import Bidirectional\nfrom keras.layers import Reshape\nfrom pylab import *\nfrom corpora_tools import *\n\n\n##############################################\n\n# Corpus and vectorization\n\n#############################################\n\n# SpaCy lineBYline pipeline\nit_nlp = spacy.load('it_core_news_sm')\nen_nlp = spacy.load('en_core_web_sm')\n\nln = file_len(ep_en)\n\nnlp = en_nlp\nfiles = ep_en\ntrees = []\nsents = []\nfile = open(files, 'r')\nline = file.readline()\nfor line in file:\n doc = nlp(line)\n # tokenize sentences\n sents.append(line)\n # dependency trees sent)\n trees.append([list(to_nltk_tree(sn.root)) for sn in doc.sents])\n# transforms trees in integers sequence, eos is the highest integers\ndep_trees, eos = clean_tree(trees, get_eos=True)\n\n# visualizing obtained data\nmax_len(dep_trees, box_plot=True)\n\n# vectorization (with corpus cleaning and all)\nlang_model = sents2space(sents, 'en', 300, 0)\n\n\n# setting the network corpus\ns2t_network_corpus = s2t_trainig_set(cp, dep_trees, lang_model, eos)\n\n# Training data set\nnetwork_corpus = s2t_network_corpus\ntrain_size = int((len(network_corpus)*65)/100)\nclass_size = eos+1\nx_size = len(network_corpus[0][1])\ny_size = len(network_corpus[0][0])\nx = list(np.zeros(train_size))\ny = list(np.zeros(train_size))\nfor i in range(train_size):\n x[i] = [vec for vec in network_corpus[i][1]]\n y[i] = network_corpus[i][0]\n # reshape: sample, time steps, feature at each time step.\n # if I have 1000 sentences of 10 words, presented in a 3-dim vector:\n # is nb_samples = 1000, time steps = 10, input_dim = 3\nX = array(x).reshape(train_size, x_size, 300) # reshapes date into 3D matrix\nY = array(y).reshape(train_size, y_size, 1) # reshapes date into 3D matrix\n\n\n##############################################\n\n# Max/Lastencoders models\n\n#############################################\n\n# x_size = 3\n# y_size = 3\n# class_size = 3\n\n# shared input\ninputs = Input(shape=(None, 300))\n\n\n'# MAX_ENCODER #'\n# Forward LSTM\n# input_shape = (time_steps, features)\nf_lstm = LSTM(512, return_sequences=True)(inputs)\n\n# Forward Max-pooling component-wise\nmax_f = GlobalMaxPooling1D()(f_lstm)\n\n# backward LSTM\n# input_shape = (time_steps, features)\nb_lstm = LSTM(512, return_sequences=True, go_backwards=True)(inputs)\n\n# Backwards Max-pooling component-wise\nmax_b = GlobalMaxPooling1D()(b_lstm)\n\n# Concatenate (extract here the output after training, using command)\nconcatenation = Concatenate(axis=-1)([max_f, max_b])\n\n# to create a dim= representation\ndense = Dense(300)(concatenation)\n\n# reshape into a 3D tensor list (add time_steps=1)\nreshape = Reshape((1, 300), input_shape=(x_size, 300))(dense)\n\n# decoder (theoretically is a one-to-many, practically is a many-to-many)\noutputs = LSTM(y_size, return_sequences=True)(reshape)\n\nr2 = Reshape((y_size, 1), input_shape=(x_size, y_size))(outputs)\n\nout = Dense(class_size, activation='sigmoid')(r2)\n\n# d2 = TimeDistributed(Dense(101, activation='sigmoid'))(outputs)\n\n# Compiling the model\nmax_model = Model(inputs=inputs, outputs=out)\nmax_model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nmax_model.summary()\nmax_model.fit(X, Y, epochs=3)\n\n\n'# LAST_ENCODER #'\n# Forward LSTM\n# input_shape = (time_steps, features)\n# bi_lstm = Bidirectional(LSTM(512, return_sequences=False),\n# merge_mode='concat')(inputs)\n\n# last_dense = Dense(300)(bi_lstm)\n\n# r1 = Reshape((1, 300), input_shape=(x_size, 300))(last_dense)\n\n# last_lstm = LSTM(y_size, return_sequences=True)(r1)\n\n# r2 = Reshape((y_size, 1), input_shape=(x_size, y_size))(last_lstm)\n\n# out = Dense(class_size, activation='sigmoid')(r2)\n\n# # Compiling the model\n# last_model = Model(inputs=inputs, outputs=out)\n# last_model.compile(optimizer='adam',\n# loss='sparse_categorical_crossentropy',\n# metrics=['accuracy'])\n# last_model.summary()\n# last_model.fit(X, Y, epochs=3)\n\n\n##############################################\n\n# Test/Training\n\n#############################################\n\n# Testing\nto_test = len(network_corpus)-train_size\npredictions = list(np.zeros(to_test))\ncorrect = list(np.zeros(to_test))\nvi_correct = 0\nfor e in range(to_test):\n print('Prediction'+str(e)+'/'+str(to_test))\n test = []\n test = [v for v in network_corpus[e+train_size][1]]\n correct[e] = network_corpus[e+train_size][0]\n X = array(test).reshape(1, x_size, 300)\n resp = max_model.predict(X, verbose=2) # gives probability distributions\n predictions[e] = resp.argmax(axis=-1) # extract the actual predicted sequence\n print('Completed')\n\n##############################################\n\n# Hidden representation extraction\n\n#############################################\n\n# Hidden max-representation extraction\nlayer_name = 'hidene_layer_name' # use .summary() to extract the name of the layer\nhidden_max = Model(inputs=max_model.input,\n outputs=max_model.get_layer(layer_name).output)\nintermediate_output = hidden_max.predict(X)\n\n\n\n\n","sub_path":"seq2tree_bilstm.py","file_name":"seq2tree_bilstm.py","file_ext":"py","file_size_in_byte":6020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"137472800","text":"import pytest\nimport torch\n\nfrom sbi.simulators.linear_gaussian import linear_gaussian\n\n\n@pytest.mark.parametrize(\"D, N\", ((1, 10000), (5, 100000)))\ndef test_linearGaussian_simulator(D: int, N: int):\n \"\"\"Test linear Gaussian simulator. \n \n Args:\n D: parameter dimension\n N: number of samples\n \"\"\"\n\n true_parameters = torch.zeros(D)\n num_simulations = N\n parameters = true_parameters.repeat(num_simulations).reshape(-1, D)\n observations = linear_gaussian(parameters)\n\n # Check shapes.\n assert parameters.shape == torch.Size(\n (N, D)\n ), f\"wrong shape of parameters: {parameters.shape} != {torch.Size((N, D))}\"\n assert observations.shape == torch.Size([N, D])\n\n # Chec mean and std.\n assert torch.allclose(\n observations.mean(axis=0), true_parameters, atol=5e-2\n ), f\"Expected mean of zero, obtained {observations.mean(axis=0)}\"\n assert torch.allclose(\n observations.std(axis=0), torch.ones(D), atol=5e-2\n ), f\"Expected std of one, obtained {observations.std(axis=0)}\"\n","sub_path":"tests/linearGaussian_simulator_test.py","file_name":"linearGaussian_simulator_test.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"620427094","text":"from lazyflow.graph import Operator, InputSlot, OutputSlot\n\nfrom lazyflow.operators.ioOperators import OpStreamingHdf5Reader, OpInputDataReader\n\nimport uuid\n\nclass DatasetInfo(object):\n \"\"\"\n Struct-like class for describing dataset info.\n \"\"\"\n class Location():\n FileSystem = 0\n ProjectInternal = 1\n \n def __init__(self):\n Location = DatasetInfo.Location\n self.location = Location.FileSystem # Whether the data will be found/stored on the filesystem or in the project file\n self._filePath = \"\" # The original path to the data (also used as a fallback if the data isn't in the project yet)\n self._datasetId = \"\" # The name of the data within the project file (if it is stored locally)\n self.allowLabels = True # Whether or not this dataset should be used for training a classifier.\n self.axisorder = None\n\n @property\n def filePath(self):\n return self._filePath\n \n @filePath.setter\n def filePath(self, newPath):\n self._filePath = newPath\n # Reset our id any time the filepath changes\n self._datasetId = str(uuid.uuid1())\n \n @property\n def datasetId(self):\n return self._datasetId \n\nclass OpDataSelection(Operator):\n \"\"\"\n The top-level operator for the data selection applet, implemented as a single-image operator.\n The applet uses an OperatorWrapper to make it suitable for use in a workflow.\n \"\"\"\n name = \"OpDataSelection\"\n category = \"Top-level\"\n \n SupportedExtensions = OpInputDataReader.SupportedExtensions\n\n # Inputs \n ProjectFile = InputSlot(stype='object') #: The project hdf5 File object (already opened)\n ProjectDataGroup = InputSlot(stype='string') #: The internal path to the hdf5 group where project-local datasets are stored within the project file\n WorkingDirectory = InputSlot(stype='filestring') #: The filesystem directory where the project file is located\n Dataset = InputSlot(stype='object') #: A DatasetInfo object\n\n # Outputs\n ImageName = OutputSlot(stype='string') #: The name of the output image\n Image = OutputSlot() #: The output image\n AllowLabels = OutputSlot(stype='bool') #: A bool indicating whether or not this image can be used for training\n \n def __init__(self, *args, **kwargs):\n super(OpDataSelection, self).__init__(*args, **kwargs)\n self._opReader = None\n \n def setupOutputs(self):\n datasetInfo = self.Dataset.value\n internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId\n\n # Data only comes from the project file if the user said so AND it exists in the project\n datasetInProject = (datasetInfo.location == DatasetInfo.Location.ProjectInternal)\n datasetInProject &= self.ProjectFile.connected() and \\\n internalPath in self.ProjectFile.value\n\n if self._opReader is not None:\n self.Image.disconnect()\n self._opReader.cleanUp()\n \n # If we should find the data in the project file, use a dataset reader\n if datasetInProject:\n self._opReader = OpStreamingHdf5Reader(parent=self)\n self._opReader.Hdf5File.setValue(self.ProjectFile.value)\n self._opReader.InternalPath.setValue(internalPath)\n providerSlot = self._opReader.OutputImage\n else:\n # Use a normal (filesystem) reader\n self._opReader = OpInputDataReader(parent=self)\n if datasetInfo.axisorder is not None:\n self._opReader.DefaultAxisOrder.setValue( datasetInfo.axisorder )\n self._opReader.WorkingDirectory.connect( self.WorkingDirectory )\n self._opReader.FilePath.setValue(datasetInfo.filePath)\n providerSlot = self._opReader.Output \n \n # Connect our external outputs to the internal operators we chose\n self.Image.connect(providerSlot)\n \n # Set the image name and usage flag\n self.AllowLabels.setValue( datasetInfo.allowLabels )\n self.ImageName.setValue(datasetInfo.filePath)\n\n def propagateDirty(self, slot, subindex, roi):\n # Output slots are directly connected to internal operators\n pass\n","sub_path":"ilastik/applets/dataSelection/opDataSelection.py","file_name":"opDataSelection.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"439211956","text":"import requests\nfrom lxml import etree\nimport re\nimport pymysql\nimport eventlet # 导入eventlet这个模块\nimport time\n\n\ndef get(num):\n conn = pymysql.connect(\n host=\"192.168.1.8\",\n port=3306,\n user=\"root\",\n password=\"123456\",\n database=\"goods\",\n charset=\"utf8\",\n )\n cursor = conn.cursor()\n url = \"http://bbs1.people.com.cn/quickSearch.do?field=title&threadtype=1&content=%E7%96%AB%E6%83%85&pageNo={}\".format(\n num, )\n headers = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Connection\": \"keep-alive\",\n \"Cookie\": \"a4e1f7e77e424a2eac26a83e0ae9b40f=WyIyNzczMTM2NjA1Il0; sso_c=0; sfr=1; wdcid=767a0b7852881e25; wdses=6320ccb9c1bbb721; wdlast=1582112190\",\n \"Host\": \"bbs1.people.com.cn\",\n \"Referer\": \"http://bbs1.people.com.cn/quickSearch.do?field=title&threadtype=1&content=%E7%96%AB%E6%83%85&pageNo=766\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n }\n headers1 = {\n \"Accept\": \"text/html, */*; q=0.01\",\n # \"Referer\": \"http://bbs1.people.com.cn/post/1/1/1/174881539.html\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n response = requests.get(url, headers=headers).text\n html = etree.HTML(response)\n a_urls = html.xpath(\"//p[@class='treeTitle']/a[@class='treeReply']\")\n for a in a_urls:\n a_url = \"http://bbs1.people.com.cn\" + a.xpath(\"./@href\")[0]\n response1 = requests.get(a_url, headers=headers).text\n try:\n content_path = re.search(r'content_path=\"(.*?)\"', response1).group(1)\n html1 = etree.HTML(response1)\n title = \"\".join(a.xpath(\".//text()\")).strip()\n # 内容\n a1 = content_path.split(\"posts\")[0] + \"txt_new\" + content_path.split(\"posts\")[1]\n print(a1)\n eventlet.monkey_patch() # 必须加这条代码\n with eventlet.Timeout(5, False): # 设置超时时间为2秒\n time.sleep(1)\n print('没有跳过这条输出')\n response2 = requests.get(a1, headers=headers1).text\n html2 = etree.HTML(response2)\n content = \"\".join(html2.xpath(\"//text()\")).strip()\n # 评论\n comment = \"\"\n lis = html1.xpath(\"//ul[@class='subUL']/li\")\n if len(lis) > 0:\n for li in lis:\n t1 = \"\".join(li.xpath(\"./p/a[@class='treeReply']/text()\")).strip()\n comment = comment + \"\\n\" + t1\n new_content = (content + comment).strip()\n else:\n new_content = content.strip()\n # print(new_content.strip())\n source = \"强国论坛\"\n word = \"疫情\"\n if len(new_content) > 0:\n print(title)\n try:\n sql = \"insert into luntan(title, content, source, url, word) values(%s,%s, %s, %s, %s)\"\n cursor.execute(sql, (title, new_content, source, a_url, word))\n conn.commit()\n except Exception as e:\n print(e)\n\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n for i in range(707, 766):\n print(\"当前为第%s页\" % (i))\n get(i)","sub_path":"02/0220/强国论坛.py","file_name":"强国论坛.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"430290348","text":"\r\nclass Walltotalblocks:\r\n def __init__(self, totalwallm2=\"0\", blockhightcm=\"0\", blocklengthcm=\"0\"):\r\n self.totalwallm2 = totalwallm2\r\n self.blockhightcm = blockhightcm\r\n self.blocklengthcm= blocklengthcm\r\n \r\n @property\r\n def totalwallm2(self):\r\n print(\"Retrieving the total m2\")\r\n \r\n return self.__totalwallm2\r\n \r\n # This is the setter\r\n @totalwallm2.setter\r\n def totalwallm2(self, value):\r\n if value.isdigit():\r\n self.__totalwallm2 = value\r\n else:\r\n print(\"Please only enter numbers of centimeters in length\") \r\n \r\n @property\r\n def blockhightcm(self):\r\n print(\"Retrieving the masonary block hight in centimeters\")\r\n return self.__blockhightcm\r\n \r\n @blockhightcm.setter\r\n def blockhightcm(self, value):\r\n if value.isdigit():\r\n self.__blockhightcm = value\r\n else:\r\n print(\"Please only enter numbers of centimeters in hight\")\r\n \r\n @property\r\n def blocklengthcm(self):\r\n print(\"Retrieving the masonary block length in centimeters\")\r\n return self.__blocklengthcm\r\n \r\n @blocklengthcm.setter\r\n def blocklengthcm(self, value):\r\n if value.isdigit():\r\n self.__blocklengthcm = value\r\n else:\r\n print(\"Please only enter numbers of centimeters in length\")\r\n \r\n \r\n def gettotalmasonryblocks(self):\r\n return int(self.__totalwallm2) / (float(self.__blockhightcm) * float(self.__blocklengthcm) / 10000)\r\n\r\n \r\ndef main():\r\n aWalltotalblocks = Walltotalblocks()\r\n \r\n totalwallm2 = input(\"Total m2 to be build : \")\r\n blocklengthcm = input(\"Centimeters in hight of the masonry block : \")\r\n blockhightcm = input(\"Centimeters in lenght of the masonry block: \")\r\n \r\n aWalltotalblocks.totalwallm2 = totalwallm2\r\n aWalltotalblocks.blockhightcm = blockhightcm\r\n aWalltotalblocks.blocklengthcm = blocklengthcm\r\n \r\n print(\"Total m2 to be build :\", aWalltotalblocks.totalwallm2)\r\n print(\"Centimeters in hight :\", aWalltotalblocks.blockhightcm)\r\n print(\"Centimeters in lenght :\", aWalltotalblocks.blocklengthcm)\r\n print(\"The number of Masonry blocks for this construction is :\", aWalltotalblocks.gettotalmasonryblocks())\r\n \r\nmain()","sub_path":"masonryblock_calculation.py","file_name":"masonryblock_calculation.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"468820077","text":"import unittest\nimport os\nimport pandas as pd \nfrom flood_forecast.preprocessing.process_usgs import process_intermediate_csv, create_csv\nfrom flood_forecast.preprocessing.interpolate_preprocess import fix_timezones, interpolate_missing_values\n\nclass DataQualityTests(unittest.TestCase):\n def setUp(self):\n self.test_data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\"test_data\")\n\n def test_intermediate_csv(self): \n df = pd.read_csv(os.path.join(self.test_data_path, \"big_black_test_small.csv\"), sep=\"\\t\")\n result_df, max_flow, min_flow = process_intermediate_csv(df)\n self.assertEqual(result_df.iloc[1]['datetime'].hour, 6)\n self.assertGreater(max_flow, 2640)\n self.assertLess(min_flow, 1600)\n\n def test_tz_interpolate_fix(self):\n \"\"\"\n Additional function to test interpolation\n \"\"\"\n file_path = os.path.join(self.test_data_path, \"river_test_sm.csv\")\n revised_df = fix_timezones(file_path)\n self.assertEqual(revised_df.iloc[0]['cfs'], 0.0)\n self.assertEqual(revised_df.iloc[1]['tmpf'], 19.94)\n revised_df = interpolate_missing_values(revised_df)\n self.assertEqual(0, sum(pd.isnull(revised_df['cfs'])))\n self.assertEqual(0, sum(pd.isnull(revised_df['precip'])))\n\n def test_chunking(self):\n self.assertEqual(1,1)\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"tests/usgs_tests.py","file_name":"usgs_tests.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"502096114","text":"#Author: Rishab Kanwal\t\n#Calculates Power and energy used while cycling using command line arguments i.Mass of rider ii.Mass of bike iii.Velocity in m/s iv. Coefficient of draft v.km covered\n#CSCI 1300\n#Assignment 2\n#Problem 2\n#TA: Sina Aghil\nimport sys\nimport random\nif len(sys.argv) != 6:\n print(\"Use command line arguments as follows i.Mass of rider ii.Mass of bike iii.Velocity in m/s iv. Coefficient of draft v.km covered\")\nelse: \n k = 0.18 #k is a coefficient related to rider position (0.18)\n Cr = 0.001 #Cr is a coefficient related to the resistance of the bike on the road (0.001)\n g = 9.8 # g is gravity considered constant at 9.8 m/s^2\n Pa = k * float(sys.argv[4]) * float(sys.argv[3])**3 # Pa is the power required to overcome air resistance\n Pr = Cr * g * (float(sys.argv[1]) + float(sys.argv[2])) * float(sys.argv[3]) # Pr is the power required to overcome air resistance\n Ps = Pa + Pr #Ps is the power required per second\n print(str(round(Ps)) + \"W\")\n tT = (float(sys.argv[5]) * 1000) / float(sys.argv[3]) #Calculates time required in seconds to cover distance given in command line(in km)\n Et = Ps * tT #Calculates total energy \n print(\"Total energy \" + str(round(Et)) + \"kJ\")\n tTm = int(tT / 60) #Converts time required to minutes\n Emt = 0 # Introduces vaiable for energy used total\n x = 0\n while x < tTm:\n Cd = random.uniform(0.5, 1) #Generates a random number for drag between .5 and 1\n Pa = k * Cd * float(sys.argv[3])**3 #Pa is the power required to overcome air resistance\n Pr = Cr * g * (float(sys.argv[1]) + float(sys.argv[2])) * float(sys.argv[3]) ## Pa is the power required to overcome rolling resistance\n Ps = Pa + Pr #Ps is the power required per second\n Em = Ps * tTm #Calculates energy used in a minute\n Emt = Em + Emt #Sums current minutes energy and energy used earlier\n print(\"The total energy so far is \" + str(round(float(Emt))) + \"KJ\")\n x = x + 1\n AEt = Emt / tTm #Calculates the average energy used\n print(\"The average energy is \" + str(round(float(AEt))) + \"KJ\") \n","sub_path":"Assignment2/cycling.py","file_name":"cycling.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"609091947","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 11 19:35:44 2019\n\n@author: mushtu\n\"\"\"\n\n# Dictionary nested inside a dictionary nested inside a dictionary\nd = {'key1':{'nestkey':{'subnestkey':'value'}}}\n\n# Accessing values by calling the keys\nd['key1']['nestkey']['subnestkey']\n\n\n\n\n\n\n\n\n\n\n\n# Empty nested dictionary \nDict = { 'Dict1': { }, \n 'Dict2': { }} \nprint(Dict) \n \n\n\n\n\n\n\n\n\n\n\n\n\n \n# Nested dictionary having same keys \nDict = { 'Dict1': {'name': 'Ali', 'age': '19'}, \n 'Dict2': {'name': 'Bob', 'age': '25'}} \nprint(Dict) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n# Nested dictionary of mixed dictionary keys \nDict = { 'Dict1': {1: 'G', 2: 'F', 3: 'G'}, \n 'Dict2': {'Name': 'Geeks', 1: [1, 2]} } \nprint(Dict) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Create a typical dictionary\nd = {'key1':1,'key2':2,'key3':3}\n# Method to return a list of all keys \nd.keys()\n# Method to grab all values\nd.values()\n# Method to return tuples of all items\nd.items()\n\n\n\n\n\n\n\n\n\n\n\n# Python code to demonstrate working of \n# str() and items() \n \n# Initializing dictionary \ndict1 = { 'Name' : 'Sam', 'Age' : 21 } \n \n# using str() to display dict1 as string \nprint (\"The constituents of dictionary as string are : \") \nprint (str(dict1)) \n \n# using str() to display dict1 as list \nprint (\"The constituents of dictionary as list are : \") \nprint (dict1.items()) \n\n\n\n\n\n\n\n\n\n\n\n\n# Python code to demonstrate working of \n# len() and type() \n \n# Initializing dictionary \ndict1 = { 'Name' : 'Sam', 'Age' : 21, 'ID' : 2541997 } \n \n# using len() to display dic size \nprint (\"The size of dic is : \",end=\"\") \nprint (len(dict1)) \n \n# using type() to display data type \nprint (\"The data type of dic is : \",end=\"\") \nprint (type(dict1)) \n\n\n\n\n\n\n\n\n\n# Python code to demonstrate working of \n# clear() and copy() \n \n# Initializing dictionary \ndict1 = { 'Name' : 'Dan', 'Age' : 19 } \n \n# Initializing dictionary \ndict3 = {} \n \n# using copy() to make shallow copy of dictionary \ndict3 = dict1.copy() \n \n# printing new dictionary \nprint (\"The new copied dictionary is : \") \nprint (dict3.items()) \n \n# clearing the dictionary \ndict1.clear() \ndict1 \n# printing cleared dictionary \nprint (\"The contents of deleted dictionary is : \",end=\"\") \nprint (dict1.items()) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Python code to demonstrate working of update() \n# Initializing dictionary 1 \ndict1 = { 'Name' : 'Jon', 'Age' : 20 } \n \n# Initializing dictionary 2 \ndict2 = { 'ID' : 2541997 } \n \n# using update to add dict2 values in dict 1 \ndict1.update(dict2) \ndict1 \n\nw={\"house\":\"Haus\",\"cat\":\"Katze\",\"red\":\"rot\"}\nw1 = {\"red\":\"rouge\",\"blau\":\"bleu\"}\nw.update(w1)\nw\n\n\n\n\n\n\n\n\n\n# Initial Dictionary \nDict = { 5 : 'Welcome', 6 : 'To', 7 : 'RPI', \n 'A' : {1 : 'RPI', 2 : 'For', 3 : 'CS'}, \n 'B' : {1 : 'Great', 2 : 'Life'}} \nprint(\"Initial Dictionary: \") \nprint(Dict) \n \n\n\n\n\n\n\n\n \n# Deleting a Key value \ndel Dict[6] \nprint(\"\\nDeleting a specific key: \") \nprint(Dict) \n \n# Deleting a Key from \n# Nested Dictionary \ndel Dict['A'][2] \nprint(\"\\nDeleting a key from Nested Dictionary: \") \nprint(Dict) \n \n\n\n\n\n\n\n \n# Deleting a Key \n# using pop() \nDict.pop(5) \nprint(\"\\nPopping specific element: \") \nprint(Dict) \n \n# Deleting a Key-value pair \n# using popitem() \nDict.popitem() \nprint(\"\\nPops first element: \") \nprint(Dict) \n \n\n\n\n\n\n\n\n \n# Deleting entire Dictionary \nDict.clear() \nprint(\"\\nDeleting Entire Dictionary: \") \nprint(Dict) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##For loops\nd = {'Red': 1, 'Green': 2, 'Blue': 3}\nfor k in d:\n print(k,' corresponds to ',d[k])\n\n\n\n\n\n\n\n\n##another way\nfor color_key, value in d.items():\n print(color_key, 'corresponds to ', d[color_key]) \n\n\n\n\n##Only keys\nfor k in d.keys():\n\tprint (k)\n\n#Only values \nfor j in d.values():\n\tprint (j)\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n \n##Lists to Dictionary\n\ndishes = [\"pizza\", \"sauerkraut\", \"paella\", \"Hamburger\"]\ncountries = [\"Italy\", \"Germany\", \"Spain\", \"USA\"]\n##List comprehension\nname_to_value_dict = {key:value for key, value in zip(countries, dishes)}\nname_to_value_dict\n\n##Another method:for loop\nname_to_value_dict = {}\nfor key, value in zip(countries, dishes):\n name_to_value_dict[key] = value\nname_to_value_dict\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##Problem 1\nd1 = {'a': 100, 'b': 200}\nd2 = {'x': 300, 'y': 200}\nd = d1.copy()\nd.update(d2)\nprint(d)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##Problem 2\ndictionary={'one':5, 'two':1, 'three':6, 'four':10}\nval = list(dictionary.values()) \nval.sort() \nres = val[-2] \nprint(res) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##Problem 3\n# stores name and corresponding salaries \nsalary = {\"emp1\" : 50000, \"emp2\" : 60000, \"emp3\" : 5000} \n \n# stores the salaries only \nlist1 = salary.values() \nprint(sum(list1)) # prints the sum of all salaries \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##Problem 4\n\n# Python3 code to demonstrate \n# getting selective dictionary keys \n# using list comprehension \n \n# initializing dictionary \ntest_dict = {\"A\" : 1, \"B\" : 2, \"C\" : 3, \"D\" : 4} \n# initializing selective list keys \nselect_list = ['C', 'D'] \n# using list comprehension \n# getting selective dictionary keys \nres = [test_dict[i] for i in select_list if i in test_dict] \n \n# printing result \nprint (\"The selected values from list keys is : \" + str(res)) \n\n\n##Method 2\n\n# Python3 code to demonstrate \n# getting selective dictionary keys \n# using set.intersection() \n \n# initializing dictionary \ntest_dict = {\"A\" : 1, \"B\" : 2, \"C\" : 3, \"D\" : 4} \n# initializing selective list keys \nselect_list = ['C', 'D'] \n# using set.intersection() \n# getting selective dictionary keys \ntemp = list(set(select_list).intersection(test_dict)) \nres = [test_dict[i] for i in temp] \nres \n# printing result \nprint (\"The selected values from list keys is : \" + str(res)) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#Problem 5\n##Method 1\n# Python3 code to demonstrate \n# Element Occurrence in dictionary value list \n# using list comprehension + sum() \n# initializing dictionary \ntest_dict = { \"A\" : [1, 4, 5, 3], \n \"B\" : [4, 6], \n \"C\" : [5, 2, 1] } \n \n# initializing test list \ntest_list = [2, 1] \n \n# using list comprehension + sum() \n# Element Occurrence in dictionary value list \nres = {idx : sum(1 for i in j if i in test_list) \n for idx, j in test_dict.items()} \nres \n# print result \nprint(\"The summation of element occurrence : \" + str(res)) \n\n##Method 2\n\n# Python3 code to demonstrate \n# Element Occurrence in dictionary value list \n# using collections.Counter() \nfrom collections import Counter \n \n# initializing dictionary \ntest_dict = { \"A\" : [1, 4, 5, 3], \n \"B\" : [4, 6], \n \"C\" : [5, 2, 1] } \n \n# initializing test list \ntest_list = [2, 1] \n# using collections.Counter() \n# Element Occurrence in dictionary value list \n# omits the 0 occurrence word key \nres = (Counter(j for j in test_dict \n for i in test_list if i in test_dict[j])) \nres \n# print result \nprint(\"The summation of element occurrence : \" + str(res)) \n","sub_path":"CS1010Fall2019-master/Class Excercises/Dictionaries2.py","file_name":"Dictionaries2.py","file_ext":"py","file_size_in_byte":6846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"287787361","text":"import cv2\nimport numpy as np\n\nfrom common.device.c_android import AndroidDevice\nfrom common.device.c_ios import IOSDevice\nfrom common.runtime import Config\nfrom games.azurelane.scenes.enemy_search import EnemySearch\n\n\ndef main():\n # 从设备获得屏幕截图\n cfg = Config()\n # cfg.device_type = 'local'\n print(cfg)\n width = 1136\n height = 640\n\n if cfg.device_type == 'ios':\n d = IOSDevice(cfg=cfg)\n width = d.screen_x\n height = d.screen_y\n screen = d.screen_capture_handler(gray=False)\n elif cfg.device_type == 'android':\n d = AndroidDevice(cfg=cfg)\n width = d.screen_x\n height = d.screen_y\n screen = d.screen_capture_handler(gray=False)\n else:\n # 读取本地磁盘的截屏文件\n screen = cv2.imread(cfg.screenshot_to_disk_file_name)\n\n # 灰度转换方法一: 读取RGB后转换\n screen_gray = cv2.cvtColor(screen, cv2.COLOR_RGB2GRAY)\n # 灰度转换方法二: 直接读取\n # screen_gray = cv2.imread(image_path, 0)\n\n feature_paths = [\n # ('./games/azurelane/assets/scenes_feature/zh-cn/continue_next_auto_fight_confirm.png', 0.7),\n ('./games/arknights/assets/scenes_feature/level_finish_detection.png', 0.7),\n ]\n for fp in feature_paths:\n feature = cv2.imread(fp[0], 0)\n feature_w, feature_h = feature.shape[::-1]\n res = cv2.matchTemplate(screen_gray, feature, cv2.TM_CCOEFF_NORMED)\n print(fp[0])\n possible_targets = []\n # 使用灰度图像中的坐标对原始RGB图像进行标记\n loc = np.where(res >= fp[1])\n for pt in zip(*loc[::-1]):\n x, y = pt[0] + feature_w / 2, pt[1] + feature_h / 2\n if len(possible_targets) > 0:\n last = possible_targets[len(possible_targets) - 1]\n if x - last[0] > 20:\n possible_targets.append((x, y))\n else:\n possible_targets.append((x, y))\n\n cv2.rectangle(screen, pt, (pt[0] + feature_w, pt[1] + feature_h), (7, 249, 151), 2)\n print(possible_targets)\n\n # 红色方框框出不可点击区域\n if cfg.game_name == 'azurelane':\n red_zones = EnemySearch.red_zones\n for i in range(len(red_zones)):\n zone = red_zones[i]\n cv2.rectangle(screen, zone[0], zone[1], (0, 0, 255), 3)\n\n # 显示图像\n window_name = \"detect-result\"\n cv2.namedWindow(window_name, 0)\n cv2.resizeWindow(window_name, width, height)\n cv2.moveWindow(window_name, 100, 100)\n cv2.imshow(window_name, screen)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"510677179","text":"# Implementation of game - Pong.\n\nimport simplegui\nimport random\n\n# initialize globals - pos and vel encode vertical info for paddles\nWIDTH = 600\nHEIGHT = 400 \nBALL_RADIUS = 20\nPAD_WIDTH = 8\nPAD_HEIGHT = 80\nHALF_PAD_WIDTH = PAD_WIDTH / 2\nHALF_PAD_HEIGHT = PAD_HEIGHT / 2\nLEFT = False\nRIGHT = True\n\n# initialize ball_pos and ball_vel for new bal in middle of table\n# if direction is RIGHT, the ball's velocity is upper right, else upper left\ndef spawn_ball(direction):\n global ball_pos, ball_vel # these are vectors stored as lists\n ball_pos = [300,200]\n if direction:\n ball_vel = [random.random()*3 + 1,-random.random() - 1]\n else:\n ball_vel = [-random.random()*3 - 1,-random.random() - 1]\n\n# define event handlers\ndef new_game():\n global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel # these are numbers\n global score1, score2 # these are ints\n spawn_ball(random.choice([LEFT, RIGHT]))\n paddle1_pos = [HALF_PAD_WIDTH, HEIGHT/2]\n paddle2_pos = [WIDTH - HALF_PAD_WIDTH, HEIGHT/2]\n paddle1_vel = 0\n paddle2_vel = 0\n score1 = 0\n score2 = 0\n \ndef draw(canvas):\n global score1, score2, paddle1_pos, paddle2_pos, ball_pos, ball_vel\n \n # draw mid line and gutters\n canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, \"White\")\n canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, \"White\")\n canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, \"White\")\n \n #draw scores\n canvas.draw_text(str(score1), [185,100], 50, 'Green')\n canvas.draw_text(str(score2), [385,100], 50, 'Green')\n \n # update ball\n ball_pos[0] += ball_vel[0]\n ball_pos[1] += ball_vel[1]\n \n # draw ball\n canvas.draw_circle(ball_pos, BALL_RADIUS, 1, 'Red', 'Red')\n \n # update paddle's vertical position, keep paddle on the screen\n if paddle1_pos[1] + paddle1_vel >= 40 and paddle1_pos[1] + paddle1_vel <= 360:\n paddle1_pos[1] += paddle1_vel\n \n if paddle2_pos[1] + paddle2_vel >= 40 and paddle2_pos[1] + paddle2_vel <= 360:\n paddle2_pos[1] += paddle2_vel\n \n # draw paddles\n canvas.draw_polygon([[paddle1_pos[0] - 4, paddle1_pos[1] - 40], [paddle1_pos[0] - 4, paddle1_pos[1] + 40], [paddle1_pos[0] + 4, paddle1_pos[1] + 40], [paddle1_pos[0] + 4, paddle1_pos[1] - 40]], 2, 'Purple', 'Purple')\n canvas.draw_polygon([[paddle2_pos[0] - 4, paddle2_pos[1] - 40], [paddle2_pos[0] - 4, paddle2_pos[1] + 40], [paddle2_pos[0] + 4, paddle2_pos[1] + 40], [paddle2_pos[0] + 4, paddle2_pos[1] - 40]], 2, 'Orange', 'Orange')\n \n # determine whether paddle and ball collide\n if ball_pos[0] - 20 <= 8:\n if ball_pos[1] - paddle1_pos[1] <= 40 and ball_pos[1] - paddle1_pos[1] >= -40:\n ball_vel[0] = -(ball_vel[0] + 0.1 * ball_vel[0])\n else:\n score2 += 1\n spawn_ball(RIGHT)\n if ball_pos[0] + 20 >= 592:\n if ball_pos[1] - paddle2_pos[1] <= 40 and ball_pos[1] - paddle2_pos[1] >= -40:\n ball_vel[0] = -(ball_vel[0] + 0.1 * ball_vel[0])\n else:\n spawn_ball(LEFT)\n score1 += 1\n if ball_pos[1] - 20 <= 0 or ball_pos[1] + 20 >= 400:\n ball_vel[1] *= -1\n \n \ndef keydown(key):\n global paddle1_vel, paddle2_vel\n if key == simplegui.KEY_MAP[\"w\"]:\n paddle1_vel -= 4\n elif key == simplegui.KEY_MAP[\"s\"]:\n paddle1_vel += 4\n if key == simplegui.KEY_MAP[\"up\"]:\n paddle2_vel -= 4\n elif key == simplegui.KEY_MAP[\"down\"]:\n paddle2_vel += 4\n \ndef keyup(key):\n global paddle1_vel, paddle2_vel\n if key == simplegui.KEY_MAP[\"w\"]:\n paddle1_vel += 4\n elif key == simplegui.KEY_MAP[\"s\"]:\n paddle1_vel -= 4\n if key == simplegui.KEY_MAP[\"up\"]:\n paddle2_vel += 4\n elif key == simplegui.KEY_MAP[\"down\"]:\n paddle2_vel -= 4\n\n\n# create frame\nframe = simplegui.create_frame(\"Pong\", WIDTH, HEIGHT)\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\nframe.set_keyup_handler(keyup)\nframe.add_button(\"Restart\", new_game, 100)\n\n\n# start frame\nnew_game()\nframe.start()","sub_path":"Pong.py","file_name":"Pong.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"18376874","text":"\n\nfrom xai.brain.wordbase.nouns._dogfight import _DOGFIGHT\n\n#calss header\nclass _DOGFIGHTS(_DOGFIGHT, ):\n\tdef __init__(self,): \n\t\t_DOGFIGHT.__init__(self)\n\t\tself.name = \"DOGFIGHTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"dogfight\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_dogfights.py","file_name":"_dogfights.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"386623138","text":"\"\"\"\nFunctions to load and dump yaml formatted descriptions of a demography.\nThe strictyaml schema defined here follows the DemeGraph construction API.\n\"\"\"\n# TODO: add symmetric_migration and subgraph schemas.\n\nfrom strictyaml import (\n Optional,\n Map,\n MapPattern,\n Float,\n Int,\n Seq,\n Str,\n dirty_load,\n as_document,\n)\n\nimport demes\n\nNumber = Int() | Float()\n\n_epoch_schema = Map(\n {\n \"start_time\": Number,\n Optional(\"end_time\"): Number,\n Optional(\"initial_size\"): Number,\n Optional(\"final_size\"): Number,\n }\n)\n\n_migration_schema = Map(\n {\n Optional(\"start_time\"): Number,\n Optional(\"end_time\"): Number,\n \"source\": Str(),\n \"dest\": Str(),\n \"rate\": Float(),\n }\n)\n\n_pulse_schema = Map(\n {\"time\": Number, \"source\": Str(), \"dest\": Str(), \"proportion\": Float()}\n)\n\n_deme_schema = Map(\n {\n Optional(\"ancestor\"): Str(),\n Optional(\"start_time\"): Number,\n Optional(\"end_time\"): Number,\n Optional(\"initial_size\"): Number,\n Optional(\"final_size\"): Number,\n Optional(\"epochs\"): Seq(_epoch_schema),\n }\n)\n\n_deme_graph_schema = Map(\n {\n \"description\": Str(),\n \"time_units\": Str(),\n \"generation_time\": Number,\n Optional(\"doi\"): Str(),\n Optional(\"default_Ne\"): Number,\n \"demes\": MapPattern(Str(), _deme_schema),\n Optional(\"migrations\"): Seq(_migration_schema),\n Optional(\"pulses\"): Seq(_pulse_schema),\n }\n)\n\n\ndef loads(string):\n yaml = dirty_load(string, schema=_deme_graph_schema, allow_flow_style=True)\n d = yaml.data # data dict\n g = demes.DemeGraph(\n description=d.get(\"description\"),\n time_units=d.get(\"time_units\"),\n generation_time=d.get(\"generation_time\"),\n doi=d.get(\"doi\"),\n default_Ne=d.get(\"default_Ne\"),\n )\n for deme_id, deme_dict in d.get(\"demes\", dict()).items():\n if \"epochs\" in deme_dict:\n deme_dict[\"epochs\"] = [\n demes.Epoch(**epoch_dict) for epoch_dict in deme_dict[\"epochs\"]\n ]\n g.deme(deme_id, **deme_dict)\n for migration_dict in d.get(\"migrations\", []):\n g.migration(**migration_dict)\n for pulse_dict in d.get(\"pulses\", []):\n g.pulse(**pulse_dict)\n return g\n\n\ndef load(filename):\n with open(filename) as f:\n return loads(f.read())\n\n\ndef dumps(demes_graph):\n d = demes_graph.asdict_compact()\n doc = as_document(d, schema=_deme_graph_schema)\n return doc.as_yaml()\n\n\ndef dump(demes_graph, filename):\n with open(filename, \"w\") as f:\n f.write(dumps(demes_graph))\n","sub_path":"demes/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"428026666","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone\nfrom notification.models import Notification\n\n# Create your views here.\n@login_required\ndef notification_view(request, user_id):\n notifications = Notification.objects.filter(receiver__id=user_id)\n received_notification = []\n for notified in notifications:\n if not notified.viewed_at:\n received_notification.append(notified.tweet)\n notified.viewed_at = timezone.now()\n notified.save()\n \n \n return render(request, \"notification_view.html\",{\"rn\": received_notification[::-1]})","sub_path":"notification/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"34624309","text":"__author__ = 'Guillermo Avendano-Franco'\n\nfrom _searcher import Searcher\n\n\nclass GreyWolf(Searcher):\n def __init__(self, population, params=None, fraction_evaluated=0.8, generation_size=32, stabilization_limit=10):\n self.population = population\n\n Searcher.__init__(self, self.population, fraction_evaluated, generation_size, stabilization_limit)\n self.a = None\n self.c = None\n self.set_params(params)\n\n def set_params(self, params):\n if params is None:\n self.a = 1\n self.c = 1\n return\n if 'a' not in params:\n self.a = 1\n else:\n self.a = params['a']\n if 'c' not in params:\n self.c = 1\n else:\n self.c = params['c']\n\n def run_one_cycle(self):\n pass\n","sub_path":"pychemia/searcher/_grey.py","file_name":"_grey.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"353104186","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime as dt\n\nfrom sklearn.decomposition import PCA\nimport sklearn as sk\nimport sklearn.ensemble\nimport sklearn.linear_model\nimport sklearn.naive_bayes\nimport sklearn.discriminant_analysis\nfrom xgboost import XGBClassifier\n\n\n############################################################\n\n\nDIR = '../input/'\ntrain_set = pd.read_csv(DIR + 'train.csv')\nprint('Import finished')\n\nX = train_set.drop('label', axis=1)\nX = X/255.0\nlabels = train_set['label']\ndel train_set\n\n\n############################################################\n\ndef reduce_byPCA(df, n):\n pca = PCA(n_components=n)\n pca.fit(df)\n df = pca.transform(df)\n\n columns = ['pca{}'.format(x) for x in range(n)]\n\n return pd.DataFrame(df, columns=columns)\n\n\n############################################################\n\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import ShuffleSplit\n\nMLA_AT_columns = ['pca','best1_score', 'best1_param', 'best2_score','best2_param', 'best3_score', 'best3_param']\nMLA_AT = pd.DataFrame(columns=MLA_AT_columns)\n\n\ndef GridSearch_method(model, model_name, params):\n\n n_pca = 150\n X_pca = reduce_byPCA(X, n_pca)\n\n cv_split = ShuffleSplit(n_splits=4, test_size=.25, train_size=.75, random_state=8)\n\n clf = GridSearchCV(model, params, cv=cv_split, return_train_score=False).fit(X_pca, labels)\n\n gs_results = pd.DataFrame(clf.cv_results_).loc[:, ['mean_test_score', 'rank_test_score', 'params']].sort_values \\\n (by='rank_test_score')\n\n MLA_AT.loc[model_name, 'pca'] = n_pca\n for rank in [1, 2, 3]:\n MLA_AT.loc[model_name, 'best{}_score'.format(rank)] = clf.cv_results_['mean_test_score'][rank -1]\n MLA_AT.loc[model_name, 'best{}_param'.format(rank)] = str(clf.cv_results_['params'][rank - 1])\n\n print(clf.best_params_)\n return gs_results\n\n############################################################\n\nparams = {'n_estimators' : [25, 50, 75, 125],\n 'base_estimator__max_depth' : [1, 5, 10, 20],\n 'max_features': [0.6, 0.8, 1.0],\n 'max_samples' : [0.05, 0.1, 0.2, 0.5]}\n\nBC = GridSearch_method(model=sk.ensemble.BaggingClassifier(),\n model_name='sk.ensemble.BaggingClassifier',\n params=params)\n\n\nBC.to_csv('BC.csv', index=False)\nMLA_AT.to_csv('MLA_AT_BC.csv', index=False)\n\n","sub_path":"Project4_Digit_Recognition/Separation_of_MLA_GridSearch/DR_BC.py","file_name":"DR_BC.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"638474918","text":"import numpy as np\nimport theano as th\nimport theano.tensor as T\n\nimport thul\nDefault = thul.Default\n\n'''Common optimizers'''\n\nclass Optimizer(object):\n '''\n Abstract class for all optimizers\n '''\n pass\n\nclass ParamOptimizer(Optimizer):\n '''\n Abstract class for all optimizer that only optimizes parameters of specific model (There are other optimizers for model topo)\n\n Args:\n name_: string\n '''\n def __init__(self, name_):\n self.name = name_\n\nclass SGDOptimizer(ParamOptimizer):\n '''\n class for all SGD based optimizer\n\n Args:\n name_: name for this optimizer, will affect optimizer parameters\n '''\n def __init__(self, name_):\n super(SGDOptimizer, self).__init__(name_)\n\n def apply_grad(self, mdl_, v_params_, s_grads_, params_group_='params', oparams_group_='oparams'):\n '''\n Apply gradient to optimizer\n\n Args:\n mdl_: Model instance to put optimizer parameters\n v_params_: list of variables to apply gradient\n s_grads_: list of variables representing symbolic gradient\n params_group_: string, name of variable group of model contains parameters\n oparams_group_: string, name of variable group of model contains optimizer parameters\n\n Returns: list of symbolic updates\n\n Notes:\n Implementation of this method should always make sure the first updates\n in the list corresponds to v_params_\n '''\n raise NotImplementedError()\n return []\n\n\nclass VanillaSGD(SGDOptimizer):\n '''\n Just calculates gradients and applys.\n\n Optimizer parameters:\n lr: dtype is\n\n '''\n def __init__(self, name_):\n super(VanillaSGD, self).__init__(name_)\n\n def apply_grad(self, mdl_, v_params_, s_grads_, params_group_='params', oparams_group_='oparams', lr_=1e-3):\n '''\n Args:\n lr_: float, initial learn rate\n\n '''\n assert isinstance(lr_, float)\n with mdl_.get_group(oparams_group_):\n v_lr = mdl_.get_variable(name_=self.name + '_lr', init_=np.asarray(lr_, dtype=th.config.floatX))\n return [p - g * v_lr for (p, g) in zip(v_params_, s_grads_)]\n\n\nclass AdamSGD(SGDOptimizer):\n def __init__(self, name_='ozer_adam'):\n super(AdamSGD,self).__init__(name_)\n\n def apply_grad(\n self, mdl_,\n v_params_, s_grads_,\n params_group_='params', oparams_group_='oparams',\n lr_=1e-3,\n beta1_=.9,\n beta2_=.999,\n eps_=1e-9):\n def get_shared_shape(v_):\n return v_.get_value(return_internal_type=True).shape\n\n with mdl_.get_group(oparams_group_):\n v_lr = mdl_.get_variable(name_=self.name + '_lr', init_=np.asarray(lr_, dtype=th.config.floatX))\n v_beta = mdl_.get_variable(name_=self.name + '_beta', init_=np.asarray([beta1_, beta2_], dtype=th.config.floatX))\n v_eps = mdl_.get_variable(name_=self.name + '_eps', init_=np.asarray(eps_, dtype=th.config.floatX))\n v_timestep = mdl_.get_variable(name_=self.name + '_timestep', init_=np.asarray(eps_, dtype=th.config.floatX))\n v_m_li = [mdl_.get_variable(name_='adam_m_'+p.name, shape_=get_shared_shape(p), init_=0.) for p in v_params_]\n v_v_li = [mdl_.get_variable(name_='adam_v_'+p.name, shape_=get_shared_shape(p), init_=0.) for p in v_params_]\n s_bs = 1. / (1. - v_beta * v_timestep)\n s_b1, s_b2 = v_beta[0], v_beta[1]\n s_b1s, s_b2s = s_bs[0], s_bs[1]\n r_m = [(m, (m*s_b1 + (1.-s_b1)*g)) for m,g in zip(v_m_li,s_grads_)]\n r_v = [(v, (v*s_b2 + (1.-s_b2)*g*g)) for v,g in zip(v_v_li,s_grads_)]\n r_grad = [(p, p-(s_b1s*m*v_lr)/(T.sqrt(s_b2s*v)+v_eps)) for p,m,v in zip(v_params_,v_m_li,v_v_li)]\n return r_grad + r_m + r_v + [(v_timestep, v_timestep+1)]\n\n","sub_path":"thul/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"263264655","text":"\"\"\"Assignment 2: Trees for Treemap\n\n=== CSC148 Fall 2016 ===\nDiane Horton and David Liu\nDepartment of Computer Science,\nUniversity of Toronto\n\n=== Module Description ===\nThis module contains the basic tree interface required by the treemap\nvisualiser. You will both add to the abstract class, and complete a\nconcrete implementation of a subclass to represent files and folders on your\ncomputer's file system.\n\n\"\"\"\nimport os\nfrom random import randint\nimport math\n\n\nclass AbstractTree:\n \"\"\"A tree that is compatible with the treemap visualiser.\n\n This is an abstract class that should not be instantiated directly.\n\n You may NOT add any attributes, public or private, to this class.\n However, part of this assignment will involve you adding and implementing\n new public *methods* for this interface.\n\n === Public Attributes ===\n @type data_size: int\n The total size of all leaves of this tree.\n @type colour: (int, int, int)\n The RGB colour value of the root of this tree.\n Note: only the colours of leaves will influence what the user sees.\n\n === Private Attributes ===\n @type _root: obj | None\n The root value of this tree, or None if this tree is empty.\n @type _subtrees: list[AbstractTree]\n The subtrees of this tree.\n @type _parent_tree: AbstractTree | None\n The parent tree of this tree; i.e., the tree that contains this tree\n as a subtree, or None if this tree is not part of a larger tree.\n\n === Representation Invariants ===\n - data_size >= 0\n - If _subtrees is not empty, then data_size is equal to the sum of the\n data_size of each subtree.\n - colour's elements are in the range 0-255.\n\n - If _root is None, then _subtrees is empty, _parent_tree is None, and\n data_size is 0.\n This setting of attributes represents an empty tree.\n - _subtrees IS allowed to contain empty subtrees (this makes deletion\n a bit easier).\n\n - if _parent_tree is not empty, then self is in _parent_tree._subtrees\n \"\"\"\n def __init__(self, root, subtrees, data_size=0):\n \"\"\"Initialize a new AbstractTree.\n\n If is empty, is used to initialize this tree's\n data_size.\n Otherwise, the parameter is ignored, and this\n tree's data_size is computed from the data_sizes of the subtrees.\n\n If is not empty, should not be specified.\n\n This method sets the _parent_tree attribute for each subtree to self.\n\n A random colour is chosen for this tree.\n\n Preconditions:\n data_size >= 0\n\n Representation invariants:\n if is None, then parent is None, data_size is 0,\n and is an empty list.\n\n @type self: AbstractTree\n @type root: object\n @type subtrees: list[AbstractTree]\n @type data_size: int\n @rtype: None\n \"\"\"\n self._root = root\n self._subtrees = subtrees\n self._parent_tree = None\n\n # 1. Initialize self.colour and self.data_size, according to the\n # docstring.\n\n self.colour = (randint(0, 255), randint(0, 255), randint(0, 255))\n if self._subtrees == []:\n self.data_size = data_size\n else:\n self.data_size = 0\n for i in subtrees:\n self.data_size += i.data_size\n\n # 2. Properly set all _parent_tree attributes in self._subtrees\n\n for st in self._subtrees:\n st._parent_tree = self\n\n def is_empty(self):\n \"\"\"Return True if this tree is empty.\n\n @type self: AbstractTree\n AbstractTree which to check\n @rtype: bool\n Whether it's empty or not\n True if it is empty\n False if it isn't empty\n \"\"\"\n return self._root is None\n\n def generate_treemap(self, rect):\n \"\"\"Run the treemap algorithm on this tree and return the rectangles.\n\n Each returned tuple contains a pygame rectangle and a colour:\n ((x, y, width, height), (r, g, b)).\n\n One tuple should be returned per non-empty leaf in this tree.\n\n If the tree has size 0,\n return an empty list.\n\n If the tree is just a single leaf with positive size,\n return a list containing just the single rectangle that covers the\n whole display area, as well as the colour of that leaf.\n\n Otherwise,\n if the display area's width is greater than its height:\n divide the display area into vertical rectangles, one smaller\n rectangle per non-zero-sized subtree, in proportion to the sizes of\n the subtrees.\n\n If the height is greater than or equal to the width,\n then make horizontal rectangles instead of vertical\n ones, and do the analogous operations as above.\n\n @type self: AbstractTree\n @type rect: (int, int, int, int)\n Input is in the pygame format: (x, y, width, height)\n @rtype: list[((int, int, int, int), (int, int, int))]\n \"\"\"\n\n if self.is_empty():\n return []\n elif self.data_size == 0:\n return []\n elif self._subtrees == [] and self.data_size > 0:\n return [(rect, self.colour)]\n\n n_empty = self.get_non_empty_leaves()\n final_rect_pos = find_last(n_empty)\n lst = []\n x, y, width, height = rect\n if width > height:\n # divide the rectangle into vertical strips\n offset1 = 0\n for i in range(0, len(n_empty)):\n if n_empty[i].data_size > 0 and i != final_rect_pos:\n fraction = n_empty[i].data_size / self.data_size\n adj_width = int(math.floor(fraction * width))\n lst += [n_empty[i].generate_treemap((x + offset1, y,\n adj_width, height))]\n offset1 += adj_width\n elif i == final_rect_pos:\n # if it is the last one, make it cover the remaining area\n adj_width = width - offset1\n lst += [n_empty[i].generate_treemap((x + offset1, y,\n adj_width, height))]\n else:\n # width <= height\n # divide the rectangle into horizontal strips\n offset2 = 0\n for i in range(0, len(n_empty)):\n if n_empty[i].data_size > 0 and i != final_rect_pos:\n fraction = n_empty[i].data_size / self.data_size\n adj_height = int(math.floor(fraction * height))\n lst += [n_empty[i].generate_treemap((x, y + offset2,\n width, adj_height))]\n offset2 += adj_height\n elif i == final_rect_pos:\n # if it is the last one, make it cover the remaining area\n adj_height = height - offset2\n lst += [n_empty[i].generate_treemap((x, y + offset2,\n width, adj_height))]\n return extract_nested(lst)\n\n # Helpers used for treemap_visualizer =====================================\n def get_non_empty_leaves(self):\n \"\"\"\n Finds all the non-empty leaves in self._subtrees and returns them in a\n list\n @type self: AbstractTree\n @rtype: list[AbstractTree]\n \"\"\"\n result = []\n for i in self._subtrees:\n if not i.is_empty() and i.data_size != 0:\n result.append(i)\n return result\n\n def delete_leaf(self):\n \"\"\"\n Deletes the leaf from the tree (i.e. make the leaf an empty tree) and\n updates the data_size attributes of all ancestors\n\n === Preconditions: ===\n self is a leaf\n\n @type self: AbstractTree\n @rtype: None\n \"\"\"\n self.update_data_size(0 - self.data_size)\n self.data_size = 0\n self._parent_tree = None\n self._subtrees = []\n self._root = None\n\n def increase_data_size(self):\n \"\"\"\n Increases the data size of the selected leaf by 1%\n Also applies changes to the data sizes of the leaf's ancestors\n\n @type self: AbstractTree\n @rtype: None\n \"\"\"\n self.update_data_size(int(math.ceil(self.data_size * 0.01)))\n\n def decrease_data_size(self):\n \"\"\"\n Decreases the data size of the selected leaf by 1% to a minimum of 1\n Also applies changes to the data sizes of the leaf's ancestors\n\n @type self: AbstractTree\n @rtype: None\n \"\"\"\n new_data_size = self.data_size - math.ceil(self.data_size * 0.01)\n if new_data_size >= 1:\n self.update_data_size(0 - int(math.ceil(self.data_size * 0.01)))\n else:\n pass\n\n def update_data_size(self, size_increment):\n \"\"\"\n Updates the data size of self and all of its ancestors\n\n === Representation invariants ===\n size_increment < 0, decrease size\n size > 0, increase size\n =================================\n\n @type self: AbstractTree\n @type size_increment: int\n @rtype: None\n \"\"\"\n if self is None:\n pass\n elif self.is_empty():\n pass\n elif self._parent_tree is None:\n self.data_size += size_increment\n else:\n self.data_size += size_increment\n self._parent_tree.update_data_size(size_increment)\n\n def generate_leafmap(self, rect):\n \"\"\"Run the treemap algorithm on this tree and return the leaves.\n Note: The leaves are in the exact same order as the rectangles.\n\n @type self: AbstractTree\n @type rect: (int, int, int, int)\n Input is in the pygame format: (x, y, width, height)\n Rectangle which to draw the leaves onto\n @rtype: list[AbstractTree]\n List of leaves (in the same order as generate_treemap rectangles)\n \"\"\"\n if self.is_empty():\n return []\n elif self.data_size == 0:\n return []\n elif self._subtrees == [] and self.data_size > 0:\n return [self]\n\n n_empty = self.get_non_empty_leaves()\n lst = []\n x, y, width, height = rect\n if width > height:\n offset1 = 0\n for i in range(0, len(n_empty)):\n if i < len(n_empty) - 1:\n fraction = n_empty[i].data_size / self.data_size\n adj_width = math.floor(fraction * width)\n lst += [n_empty[i].generate_leafmap((x + offset1, y,\n adj_width, height))]\n offset1 += adj_width\n else:\n adj_width = width - offset1\n lst += [n_empty[i].generate_leafmap((x + offset1, y,\n adj_width, height))]\n else:\n offset2 = 0\n for i in range(0, len(n_empty)):\n if i < len(n_empty) - 1:\n fraction = self._subtrees[i].data_size / self.data_size\n adj_height = math.floor(fraction * height)\n lst += [n_empty[i].generate_leafmap((x, y + offset2,\n width, adj_height))]\n offset2 += adj_height\n else:\n adj_height = height - offset2\n lst += [n_empty[i].generate_leafmap((x, y + offset2,\n width, adj_height))]\n return extract_nested(lst)\n\n def find_leaf(self, coordinates, width, height):\n \"\"\"\n Finds the leaf containing the coordinates (specified by a tuple)\n\n @type self: AbstractTree\n @type coordinates: tuple(int, int)\n @type width: int\n @type height: int\n @rtype: AbstractTree\n \"\"\"\n map_ = self.generate_treemap((0, 0, width, height))\n leaves_map = self.generate_leafmap((0, 0, width, height))\n x, y = coordinates\n for i in range(0, len(map_)):\n if map_[i][0][0] <= x and (map_[i][0][0] + map_[i][0][2]) >= x and \\\n map_[i][0][1] <= y and \\\n (map_[i][0][1] + map_[i][0][3]) >= y:\n # check if x and y coordinates are both inside the rectangle\n # find the fraction of the total area that this represents, and\n # based on that, find the corresponding file/folder\n return leaves_map[i]\n\n def __eq__(self, other):\n \"\"\"\n Sees if two leaves are equal\n\n === Precondition: ===\n self is a leaf and self._subtrees is []\n other is a leaf and other._subtrees is []\n\n @type self: AbstractTree\n first leaf of two to be compared\n @type other: AbstractTree\n second leaf of two to be compared\n @rtype: bool\n \"\"\"\n return self._root == other.get_root() and \\\n self._parent_tree == other._parent_tree\n\n def get_root(self):\n \"\"\"\n Gets the root of a tree without having to explicity access the private\n attribute\n @type self: AbstractTree\n @rtype: None\n \"\"\"\n return self._root\n\n # ========================================================================\n\n # To be implemented in subclasses, do nothing here\n def get_separator(self):\n \"\"\"Return the string used to separate nodes in the string\n representation of a path from the tree root to a leaf.\n\n Used by the treemap visualiser to generate a string displaying\n the items from the root of the tree to the currently selected leaf.\n\n This is overridden by each AbstractTree subclass, to customize\n how these items are separated for different data domains.\n\n @type self: AbstractTree\n @rtype: str\n \"\"\"\n raise NotImplementedError\n\n\nclass FileSystemTree(AbstractTree):\n \"\"\"A tree representation of files and folders in a file system.\n\n The internal nodes represent folders, and the leaves represent regular\n files (e.g., PDF documents, movie files, Python source code files, etc.).\n\n The _root attribute stores the *name* of the folder or file, not its full\n path. E.g., store 'assignments', not '/Users/David/csc148/assignments'\n\n The data_size attribute for regular files as simply the size of the file,\n as reported by os.path.getsize.\n\n === Inherited attributes ===\n @type _root: str\n Name of dir/file\n @type _subtrees: list[FileSystemTree]\n Everything inside dir (empty list for files)\n @type _data_size: int\n Size of everything contained in dir (file size for files)\n @type _parent_tree: FileSystemTree\n The parent FileSystemTree, aka the parent directory (automatically set)\n \"\"\"\n def __init__(self, path):\n \"\"\"Store the file tree structure contained in the given file or folder.\n\n Precondition: is a valid path for this computer.\n\n @type self: FileSystemTree\n @type path: str\n @rtype: None\n \"\"\"\n # # os.path.isdir\n # # os.listdir\n # # os.path.join\n # # os.path.getsize\n # # os.path.basename\n\n if not os.path.isdir(path):\n AbstractTree.__init__(self, os.path.basename(path), [],\n os.path.getsize(path))\n else:\n # don't specify size (auto calculated)\n subtrees = []\n for d in os.listdir(path):\n subtrees += [FileSystemTree(os.path.join(path, d))]\n AbstractTree.__init__(self, os.path.basename(path), subtrees)\n\n def get_separator(self):\n \"\"\"Return the string used to separate nodes in the string\n representation of a path from the tree root to a leaf.\n\n Used by the treemap visualiser to generate a string displaying\n the items from the root of the tree to the currently selected leaf.\n\n === Preconditions: ===\n self is a leaf, self._subtrees = []\n\n @type self: FileSystemTree\n The leaf which to return the path of\n @rtype: str\n The path of the leaf\n \"\"\"\n if self is None:\n return ''\n elif self._parent_tree is None:\n return self._root\n else:\n p = os.path.join(self._parent_tree.get_separator(), self._root)\n return p\n\n\n# Helpers for AbstractTree ===================================================\n\ndef extract_nested(nested_lst):\n \"\"\"\n Extract a list from a nested list. Does not mutate the original list\n @type nested_lst: list[object|list]\n @rtype: list[object]\n \"\"\"\n if not isinstance(nested_lst, list):\n return nested_lst\n else:\n lst = []\n for i in range(0, len(nested_lst)):\n if isinstance(nested_lst[i], list):\n lst += extract_nested(nested_lst[i])\n else:\n lst += [nested_lst[i]]\n return lst\n\n\ndef find_last(lst):\n \"\"\"\n Find the last non-zero sized leaf in a list of subtrees and\n return its position\n @type lst: list[AbstractTree]\n @rtype: int\n \"\"\"\n result = len(lst)-1\n for i in range(len(lst)-1, -1):\n if lst[i].is_empty() or lst[i].data_size == 0:\n result = i\n else:\n return result\n return result\n\n# ============================================================================\n\nif __name__ == '__main__':\n import python_ta\n # Remember to change this to check_all when cleaning up your code.\n python_ta.check_errors(config='pylintrc.txt')\n python_ta.check_all(config='pylintrc.txt')\n","sub_path":"tree_data.py","file_name":"tree_data.py","file_ext":"py","file_size_in_byte":17913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"3071679","text":"from docopt import docopt\nimport geopandas as gpd\nimport pandas as pd\nimport random\n\n\ndef sample_rvps(cdb, STATE_CODE, DEM_RVPS, REP_RVPS, SAMPLE_SIZE):\n \"\"\"\n This function reads in three files: congressional data, and two RVP files.\n It finds all the RVPs within the state boundaries and takes a random\n sample. Returns a DataFrame of RVPs with geometry in equidistant projection.\n \"\"\"\n\n dems = gpd.read_file(DEM_RVPS)\n reps = gpd.read_file(REP_RVPS)\n\n dems[\"party\"] = \"D\"\n reps[\"party\"] = \"R\"\n\n # Append Democrats to Republicans to get all VRPs\n all_rvps = dems.append(reps)\n all_rvps[\"pd\"] = 0\n all_rvps.to_file(\"all_rvps_before_conversion.shp\")\n\n # Make a copy so we do not set values on a copy of a slice\n # We will set values of democrat and republican later\n cdb_state = cdb.query(f\"STATEFP == '{STATE_CODE}'\").copy()\n\n # Convert to WGS84\n # all_rvps = all_rvps.to_crs({\"init\": \"epsg:4326\"})\n all_rvps = all_rvps.to_crs(\"EPSG:4326\")\n all_rvps.to_file(\"all_rvps.shp\")\n # print(all_rvps[:10])\n\n # Do a spatial join to get all the RVPs that are in the state\n # All the RVPs should be in the state\n points_in_state = gpd.sjoin(all_rvps, cdb_state, how=\"inner\", op=\"within\")\n # print(points_in_state[:10])\n\n # Now that we've got all the points in the state, let's try and convert back\n # to an equidistant projection\n\n points_in_state_eqd = points_in_state.to_crs(\n {\n \"proj\": \"eqdc\",\n \"lat_0\": 0,\n \"lon_0\": 0,\n \"lat_1\": 20,\n \"lat_2\": 60,\n \"x_0\": 0,\n \"y_0\": 0,\n \"datum\": \"NAD83\",\n \"units\": \"m\",\n \"no_defs\": True,\n }\n )\n\n # Now that we have the equidistant projection, let's try to calculate\n # Euclidean distances to and from all points.\n points_in_state_eqd.to_file(\"points_before_downsampling.shp\")\n\n # Downsample and return\n points_downsampled = points_in_state_eqd.sample(SAMPLE_SIZE, random_state=0)\n points_downsampled = points_downsampled.sort_index()\n points_downsampled.reset_index(inplace=True)\n\n return points_downsampled\n","sub_path":"build/lib/sample_rvps/sample_rvps.py","file_name":"sample_rvps.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"505730863","text":"class Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n # A simplified version of my last submission\n if len(strs) <= 1: return [strs]\n \n seen = {}\n \n for string in strs:\n sortStr = ''.join(sorted(string))\n if sortStr not in seen:\n seen[sortStr] = []\n seen[sortStr].append(string)\n \n return seen.values()\n \n \n # TC: O(n*klogk)\n # k is ave size of string in strs, klogk is .sort() TC\n # n is the length of the strs\n \n # SC: O(n*k)\n # worst case, dict will have n entries and each entry store a size k string\n # in this problem, dict{key:string} is different from key:int\n # the former one take O(nk) where the later takes O(n)\n # another angle to consider this problem, the SC of strs is O(nk)\n # as the dict stores the same amount of information, thus the SC should be the same \n","sub_path":"49_GroupAnagrams/49_GroupAnagrams_2.py","file_name":"49_GroupAnagrams_2.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"257507934","text":"# Function to sort and merge two sorted lists and to calculate the number of \n# cross inversions\ndef merge(Half1,Half2):\n Numbers = []\n Half1len = len(Half1)\n count = 0\n crossInversions = 0\n while(True):\n # When one of the given lists is empty, add all the elements of the \n # other list to the end of the sorted list of numbers\n if len(Half1) == 0 or len(Half2) == 0:\n if len(Half1) == 0:\n Numbers.extend(Half2)\n break\n # Increment the number of cross inversion by the number of elements left\n # in the first list as each of them comes after the elements in the second\n # list in the sorted order\n else:\n crossInversions += len(Half1)\n Numbers.extend(Half1)\n break\n # If none of the lists are empty, pop the smallest element from the two lists\n # and append it to the sorted list of numbers\n else:\n # If the second list has the smallest element, add the number of number of \n # remaining elements in the first list to the cross inversions count\n if Half1[0] > Half2[0]:\n crossInversions += Half1len - count\n Numbers.append(Half2.pop(0))\n else:\n Numbers.append(Half1.pop(0))\n count += 1\n return Numbers,crossInversions\n\n# Function that sorts the given list using mergesort. Returns the sorted list and the \n# total number of inversions\ndef MergeSort(Numbers):\n # Base case: if the size of the list passed is 0\n if len(Numbers) == 1:\n return Numbers,0\n # If the list has two elements\n elif len(Numbers) == 2:\n # Return as it is if the numbers are already sorted\n if Numbers[0] <= Numbers[1]:\n return Numbers,0\n # Swap the elements if they are not sorted and return 1 as there is 1 inversion\n else:\n temp = Numbers.pop()\n Numbers.insert(0,temp)\n return Numbers,1\n # Break the list into smaller lists of roughly half the number of elements\n # and recursively sort them. Then using the merge function, merge and calculate\n # the number of cross inversions. Return the sorted list and the total number of \n # inversions\n else:\n UnsortedHalf1 = [Numbers[i] for i in range(0,int(len(Numbers)/2))]\n UnsortedHalf2 = [Numbers[i] for i in range(int(len(Numbers)/2),len(Numbers))]\n SortedHalf1,leftInversions = MergeSort(UnsortedHalf1)\n SortedHalf2,rightInversions = MergeSort(UnsortedHalf2)\n Numbers,crossInversions = merge(SortedHalf1,SortedHalf2)\n return Numbers, (leftInversions+rightInversions+crossInversions)","sub_path":"MergeSort/MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"290329554","text":"'''\nCreated on 25 Apr 2012\n\n@author: eeaston\n'''\nimport os\nimport ConfigParser\nimport sys\nimport socket\n\nfrom path import path\n\nfrom server import HTTPTestServer\n\n\nclass PyramidTestServer(HTTPTestServer):\n port_seed = 65532\n\n def __init__(self, **kwargs):\n self.config = None\n self.original_config = None\n\n # Always print debug output for this process\n os.environ['DEBUG'] = '1'\n\n # Discover externally accessable hostname so selenium can get to it\n kwargs['hostname'] = kwargs.get('hostname', socket.gethostbyname(os.uname()[1]))\n\n super(PyramidTestServer, self).__init__(**kwargs)\n\n def pre_setup(self):\n \"\"\" Make a copy of the development and testing ini file and set the port number and host\n \"\"\"\n # We need the development.ini as well here as they are chained\n dev_cfg = path(os.getcwd()) / 'development.ini'\n dev_cfg_copy = self.workspace / 'development.ini'\n path.copy(dev_cfg, dev_cfg_copy)\n\n self.original_config = path(os.getcwd()) / 'testing.ini'\n self.config = self.workspace / 'testing.ini'\n path.copy(self.original_config, self.config)\n\n parser = ConfigParser.ConfigParser()\n parser.read(self.original_config)\n parser.set('server:main', 'port', self.port)\n parser.set('server:main', 'host', self.hostname)\n with self.config.open('w') as fp:\n parser.write(fp)\n\n # Set the uri to be the external hostname and the url prefix\n self._uri = \"http://%s:%s/%s\" % (os.uname()[1], self.port, parser.get('app:main', 'url_prefix'))\n\n @property\n def run_cmd(self):\n return [path(sys.exec_prefix) / 'bin' / 'pserve', self.config]\n","sub_path":"pkglib/testing/pyramid_server.py","file_name":"pyramid_server.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"244839576","text":"# coding=utf-8\nimport copy\nimport re\n\nimport jieba\nimport jieba.analyse\nimport numpy as np\nimport pymysql\nimport time\nfrom aip import AipNlp\nimport datetime\n\n\nclass Sentiment:\n def __init__(self):\n self.db = pymysql.connect(\"localhost\", \"xuanchuanbu\", \"xuanchuanbu\",\n \"xuanchuanbu\", 3306, charset='utf8mb4')\n self.cursor = self.db.cursor()\n\n def insert_into_user(self, monitor_user_list):\n sql = \"INSERT INTO monitor_user(`uid`, `mid`, `name`, `gender`, `type`, \" \\\n \"`follow_num`, `fan_num`, `level`, `address`, `school`, `introduction`, \" \\\n \"`v_flag`, `v_info`, `img_url`, `last_time`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,\" \\\n \"%s,%s,%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE`name`=VALUES(`name`),\" \\\n \"`gender`=VALUES(`gender`),`type`=VALUES(`type`),`follow_num`=VALUES(\" \\\n \"`follow_num`),`fan_num`=VALUES(`fan_num`),`level`=VALUES(`level`),\" \\\n \"`address`=VALUES(`address`),`school`=VALUES(`school`),`introduction`=VALUES(\" \\\n \"`introduction`),`v_flag`=VALUES(`v_flag`),`v_info`=VALUES(`v_info`),\" \\\n \"`img_url`=VALUES(`img_url`),`last_time`=VALUES(`last_time`)\"\n self.cursor.executemany(sql, monitor_user_list)\n self.db.commit()\n\n def insert_into_user(self, articles):\n print(u'正在插入articles表')\n sql = \"INSERT INTO test_article(`aid`, `uid`, `rdate`, `likenum`, `retweet`, \" \\\n \"`comment`, \" \\\n \"`full_text`, `url`,`img`,`video`, `tool`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,\" \\\n \"%s,%s,%s)\"\n self.cursor.executemany(sql, articles)\n self.db.commit()\n\n def insert_into_articles(self):\n print(u'正在插入articles表')\n sql = \"INSERT INTO articles(`aid`, `uid`, `mid`, `title`, `rdate`, `summary`, \" \\\n \"`full_text`, `url`, `relate_tju`,`tool`, `p_or_n`) VALUES (%s,%s,%s,%s,%s,%s,\" \\\n \"%s,%s,%s,%s,%s)ON DUPLICATE KEY UPDATE`title`=VALUES(`title`),`rdate`=VALUES(\" \\\n \"`rdate`),`summary`=VALUES(`summary`),`full_text`=VALUES(`full_text`),\" \\\n \"`url`=VALUES(`url`),`relate_tju`=VALUES(`relate_tju`),`tool`=VALUES(`tool`),\" \\\n \"`p_or_n`=VALUES(`p_or_n`)\"\n self.cursor.executemany(sql, self.article_list)\n self.db.commit()\n\n def insert_into_article_keywords(self):\n print(u'正在插入articles_keywords表')\n sql = \"REPLACE INTO articles_keywords(aid, word, frequency) VALUES (%s,%s,%s)\"\n self.cursor.executemany(sql, self.aid_keyword_list)\n self.db.commit()\n\n # 提取文本关键字计算权重\n def extract_keyword(self):\n self.aid_keyword_list = []\n jieba.analyse.set_stop_words(\"./spider/spiders/停用词.txt\")\n for i in range(len(self.aid_text_list_process)):\n tags = jieba.analyse.extract_tags(self.aid_text_list[i][1], topK=10,\n withWeight=True,\n allowPOS=('ns', 'n', 'vn', 'v'))\n for j in range(len(tags)):\n temp = [self.aid_text_list[i][0], tags[j][0], tags[j][1]]\n aid_keyword = copy.copy(temp)\n self.aid_keyword_list.append(aid_keyword)\n\n # 读取文件,生成三个列表,urls代表生成的全文列表,用作情感分析\n # total_data代表转换格式后的数据(日期,relate_tju)\n # aid_text_list代表文章id和文本的列表,为生成关键字的数据列表\n def output(self, article_list):\n self.article_list = article_list\n self.urls = []\n self.total_data = []\n self.aid_text_list = []\n for row in article_list:\n temp = [row[0], row[6]]\n aid_text = copy.copy(temp)\n self.aid_text_list.append(aid_text)\n self.urls.append(row[6])\n # print(type(row[6]).__name__)\n # str_date = re.search(\"[0-9]+.*[0-9]$\", row[4]).group()\n # row[4] = datetime.strptime(str_date, \"%Y-%m-%d %H:%M\") # 转换日期格式\n list1 = copy.copy(row)\n self.total_data.append(list1)\n\n def sentimentClassify(self):\n \"\"\" 你的 APPID AK SK \"\"\"\n # 利用百度云提供的API接口实现情感分析\n APP_ID = '16021158' # 用自己申请的百度极性分析api\n API_KEY = 'mHcRxtN067mbZ4fqybaCVtUW'\n SECRET_KEY = 'ElorKISgn0dCP5q2SQjeDe2Ozg5TmO89'\n client = AipNlp(APP_ID, API_KEY, SECRET_KEY)\n\n self.pp = []\n self.total_data_process = []\n self.aid_text_list_process = []\n for i in range(0, len(self.urls)):\n text = self.urls[i]\n # print(text)\n # 通过百度提供的接口方法进行情感倾向提取\n try:\n result = client.sentimentClassify(text)\n self.total_data_process.append(self.total_data[i])\n self.aid_text_list_process.append(self.aid_text_list[i])\n except:\n print(\"数据格式出错,这条数据内容为:\")\n self.pp.append(0)\n # print(result)\n else:\n # 如果解析错误则填写上空值,使得程序不会出错而停止运行\n if \"error_code\" in result.keys():\n print(result)\n pp_sentences = []\n np_sentences = []\n sentences_list = re.split(\"。\", text) # 减小句子长度,防止解析错误\n sentences_size = len(sentences_list) - 1\n flag = 0\n for j in range(sentences_size):\n # print(text)\n # 通过百度提供的接口方法进行情感倾向提取\n result = client.sentimentClassify(text[j])\n if \"error_code\" in result.keys():\n flag = 1 # 解析错误直接中断循环\n break\n else:\n data = result['items']\n items = data[0]\n pp_sentences.append(items['positive_prob'])\n np_sentences.append(items['negative_prob'])\n if flag == 0:\n positive_prob_result = np.mean(pp_sentences)\n negative_prob_result = np.mean(np_sentences)\n if positive_prob_result > negative_prob_result:\n self.pp.append(1)\n else:\n self.pp.append(-1)\n else:\n self.pp.append(0)\n else:\n data = result['items']\n items = data[0]\n positive_prob = items['positive_prob']\n negative_prob = items['negative_prob']\n if positive_prob > negative_prob:\n self.pp.append(1)\n else:\n self.pp.append(-1)\n time.sleep(0.2)\n\n def re_combine_data(self):\n for i in range(0, len(self.pp)):\n # total_data_process[i].insert(0, i + 1) # 向数据列表中插入主键,1,2,3,。。。\n j = self.pp[i]\n self.article_list[i].append(j) # 向数据列表中插入极性分析的结果\n\n def getWeiboUsers(self):\n sql = \"\"\"\n SELECT\n\t\tt.uid,\n\t\tt.mid,\n\t\t`name`,\n\t\tgender,\n\t\t`type`,\n\t\tfollow_num,\n\t\tfan_num,\n\t\t`level`,\n\t\taddress,\n\t\tschool,\n\t\tintroduction,\n\t\tv_flag,\n\t\tv_info,\n\t\timg_url,\n t.last_time,\n\t\tff + af + IFNULL(`if`, 0) AS activity\n\t\tFROM\n\t\t(\n\t\tSELECT\n\t\tmonitor_user.id,\n\t\tmonitor_user.uid,\n\t\tmonitor_user.mid,\n\t\tmonitor_user.name,\n\t\tmname,\n\t\tmonitor_user.gender,\n\t\tmonitor_user.type,\n\t\tmonitor_user.follow_num,\n\t\tmonitor_user.fan_num,\n\t\tmonitor_user.level,\n\t\tmonitor_user.address,\n\t\tmonitor_user.school,\n\t\tmonitor_user.introduction,\n\t\tmonitor_user.v_flag,\n\t\tmonitor_user.v_info,\n\t\tmonitor_user.img_url,\n monitor_user.last_time,\n\t\tCOUNT(aid) AS article_num,\n\t\tIFNULL(monitor_user.fan_num - temp_monitor_user.fan_num,0) AS `ff`,\n\t\tIFNULL(monitor_user.follow_num - temp_monitor_user.follow_num,0) AS `af`,\n\t\tmonitor_user.follow_num + monitor_user.fan_num AS `influence`\n\t\tFROM\n\t\tmonitor_user\n\t\tJOIN medias ON monitor_user.mid = medias.mid\n\t\tLEFT JOIN articles ON monitor_user.uid = articles.uid AND monitor_user.mid =\n\t\tarticles.mid\n\t\tLEFT JOIN temp_monitor_user ON monitor_user.uid = temp_monitor_user.uid AND\n\t\tmonitor_user.mid = temp_monitor_user.mid\n\t\tGROUP BY\n\t\tmonitor_user.uid,\n\t\tmonitor_user.mid\n\t\t) t\n\t\tLEFT JOIN(\n\t\tSELECT\n\t\tmonitor_user.id,\n\t\tCOUNT(aid) AS `if`\n\t\tFROM\n\t\tmonitor_user\n\t\tLEFT JOIN articles ON monitor_user.uid = articles.uid AND monitor_user.mid =\n\t\tarticles.mid\n\t\tWHERE\n\t\tDATE(rdate) >= DATE_SUB(CURDATE(), INTERVAL 1 DAY)\n\t\tGROUP BY\n\t\tmonitor_user.id) tt\n\t\tON\n\t\tt.id = tt.id\n\t\tLEFT JOIN(\n\t\tSELECT\n\t\tmonitor_user.id,\n\t\tCOUNT(aid) AS `article_negative_num`\n\t\tFROM\n\t\tmonitor_user\n\t\tLEFT JOIN articles ON monitor_user.uid = articles.uid AND monitor_user.mid =\n\t\tarticles.mid\n\t\tWHERE\n\t\tp_or_n=-1 AND relate_tju=1\n\t\tGROUP BY\n\t\tmonitor_user.id\n\t\t) ttt\n\t\tON t.id=ttt.id\n\t\tWHERE t.mid=1\n\t\tORDER BY activity DESC\n \"\"\"\n self.cursor.execute(sql)\n rows = self.cursor.fetchall()\n # userList = []\n # for row in rows:\n # userList.append(row[0])\n # return userList\n return rows\n\n def getWeiboArticles(self):\n sql = \"SELECT aid FROM articles WHERE mid=1\"\n self.cursor.execute(sql)\n rows = self.cursor.fetchall()\n articleList = []\n for row in rows:\n articleList.append(row[0])\n return articleList\n\n def analyze_article(self, article_list):\n self.output(article_list)\n self.sentimentClassify()\n self.re_combine_data()\n self.extract_keyword()\n self.insert_into_articles()\n self.insert_into_article_keywords()\n\n\nif __name__ == '__main__':\n sentiment = Sentiment()\n articles = sentiment.cursor.execute(\n 'SELECT `rdate`, `likenum`, `retweet`, `comment`, `full_text`, `url`, `img`, '\n '`video`, `tool` FROM `test_article` GROUP BY aid ORDER BY rdate DESC')\n import xlwt\n\n results = sentiment.cursor.fetchall()\n fields = sentiment.cursor.description\n workbook = xlwt.Workbook()\n sheet = workbook.add_sheet('sheet',cell_overwrite_ok=True)\n\n for field in range(0, len(fields)):\n sheet.write(0, field, fields[field][0])\n\n row = 1\n col = 0\n for row in range(1, len(results) + 1):\n for col in range(0, len(fields)):\n sheet.write(row, col, u'%s' % results[row - 1][col])\n\n workbook.save('out.xls')\n print(articles)\n","sub_path":"useless/sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":10882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"461124570","text":"from django.utils.translation import gettext_lazy as _\nfrom django.db import models\n\nfrom PIL import Image\n\nfrom game.storage import OverwriteStorage\n\ndef character_directory_path(instance, filename):\n # Note the \"s\" at the end making it plural\n filename = f\"{instance.name}\" + \".\" + filename.split(\".\").pop()\n return f\"characters/{instance.type}s/{filename}\"\n\ndef template_directory_path(instance, filename):\n filename = f\"{instance.name}\" + \".\" + filename.split(\".\").pop()\n return f\"templates/characters/{filename}\"\n\nclass Character(models.Model):\n class Type(models.TextChoices):\n SURVIVOR = \"Survivor\", _(\"Survivor\")\n KILLER = \"Killer\", _(\"Killer\")\n\n name = models.CharField(max_length=255, unique=True)\n type = models.CharField(choices=Type.choices, max_length=15)\n is_licensed = models.BooleanField(\n default=False,\n verbose_name=\"Licensed Character?\"\n ) \n \n # Template overlay that will be put overlay the bg+border layer \n # May or may not be transparent bg\n template = models.ImageField(\n upload_to=template_directory_path,\n storage=OverwriteStorage(),\n blank=True,\n null=True\n )\n\n # Storage allows for existing images to be overwritten\n image = models.ImageField(\n upload_to=character_directory_path, \n storage=OverwriteStorage(),\n blank=True,\n null=True\n )\n\n class Meta:\n ordering = [\"type\", \"name\"]\n\n def __str__(self):\n return f\"[{self.type}] {self.name}\" \n\n def save(self, *args, **kwargs):\n super(Character, self).save(*args, **kwargs)\n \n # if self.image:\n # image = Image.open(self.image)\n # left = 0\n # top = 0\n # right = 256\n # bottom = 256\n \n # size = (left, top, right, bottom)\n # image = image.crop(size)\n # image.save(self.image.path)\n\n # if self.template:\n # image = Image.open(self.template)\n # image.save(self.template.path) \n","sub_path":"server/game/models/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"479217482","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport socket\nimport sys\n\nimport time\nimport pickle\nimport threading\nimport keyboard\nfrom datetime import datetime\nfrom pygame.locals import *\n\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import pyqtSlot\n\nclass Window(QWidget):\n\tdef __init__(self,socket):\n\t\tsuper().__init__()\n\t\tself.title = 'PyQt5 button - pythonspot.com'\n\t\tself.left = 10\n\t\tself.top = 10\n\t\tself.width = 320\n\t\tself.height = 200\n\t\tself.socket = socket\n\t\tself.initUI()\n\n\tdef initUI(self):\n\t\tself.setWindowTitle(self.title)\n\t\tself.setGeometry(self.left, self.top, self.width, self.height)\n\n\t\tbutton = QPushButton('Pause', self)\n\t\tbutton.setToolTip('This is an example button')\n\t\tbutton.move(100,70)\n\t\tbutton.clicked.connect(self.on_click)\n\n\t\tself.show()\n\n\t@pyqtSlot()\n\tdef on_click(self):\n\t\tself.socket.send(\"PAUSE\".encode())\n\nclass Client:\n\t\"\"\"Classe que consome e requisita o video que sera\n\tenviado pelo servidor.\n\n\tVariaveis da classe:\n\tip = Ipv4 do servidor\n\tport = Porta do servidor\n\tpackage_size = Tamanho do pacote UDP\n\tsock = Socket utilizado pelo client\n\tcurrentTime = Armazena o tempo atual\n\tpackages_ord_size = De quantos em quantos pacotes\n\tvao ordenar\n\t\"\"\"\n\tdef __init__(self,ip=\"localhost\",port=3236,package_size=3072,packages_ord_size=300):\n\t\t\"\"\"Inicia as variaveis com os seguintes valores(default):\n\t\tip = localhost\n\t\tport = 3232\n\t\tpackage_size = 3072\n\t\tsock = cria socket IPV4 e UDP\n\t\tcurrentTime = tempo atual\n\t\tpackages_ord_size = 100\n\t\t\"\"\"\n\t\tself.ip = ip\n\t\tself.port = port\n\t\tself.package_size = package_size\n\t\tself.sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n\t\tself.sockTCP = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.currentTime = datetime.now()\n\t\tself.packages_ord_size = packages_ord_size\n\t\tself.t = None\n\t\tself.sockTCP.connect((self.ip,self.port+1))\n\n\tdef sendTime(self):\n\t\t\"\"\"Envia o seguinte pacote para o servidor:\n\t\tping [Tempo atual em microsegundos]\n\n\t\t\"\"\"\n\t\tself.currentTime = datetime.now()\n\t\tping = \" ping \"+str(self.currentTime.microsecond)\n\t\tself.sockTCP.send(ping.encode())\n\n\tdef threadUI(self,socket):\n\t\tprint(socket)\n\t\tapp = QApplication(sys.argv)\n\t\tex = Window(socket)\n\t\tsys.exit(app.exec_())\n\n\tdef reqStreamFile(self,fileR):\n\t\t\"\"\"Requisita o streaming de um arquivo para o servidor e ao receber\n\t\t\"printa\" os bytes no stdout\n\n\t\tParametros:\n\t\tfileR = Nome do arquivo desejado\n\t\t\"\"\"\n\t\tself.t = threading.Thread(target=self.threadUI,args=(self.sockTCP,))\n\t\tself.t.start()\n\n\t\tself.sendTime()\n\t\tself.sockTCP.send((\" file \"+fileR).encode())\n\t\tself.sock.sendto(\"\".encode(),(self.ip,self.port))\n\t\tdata, addr = self.sock.recvfrom(self.package_size,self.port)\n\n\t\ti = 0\n\t\tvet = []\n\t\tpackages = []\n\t\twhile data:\n\t\t\ti += 1\n\t\t\t# Picke carrega o pacote\n\t\t\t# vet = (BYTES,ID)\n\t\t\tvet = pickle.loads(data)\n\t\t\tpackages.append(vet)\n\t\t\t# Juntar pacotes = packages_ord_size\n\t\t\tif len(packages) < self.packages_ord_size:\n\t\t\t\tpackages.sort(key=lambda a: a[1])\n\t\t\telse:\n\t\t\t\t# Juntou os pacotes e envia\n\t\t\t\tfor p in packages:\n\t\t\t\t\tpass\n\t\t\t\t\t#sys.stdout.buffer.write(p[0])\n\t\t\t\tpackages = []\n\t\t\tdata, addr = self.sock.recvfrom(self.package_size)\n\t\t\t#Ping\n\t\t\tif i==1000:\n\t\t\t\tself.sendTime()\n\t\t\t\ti = 0\n\t\t# Confore se não ficou nada sem enviar\n\t\tfor p in packages:\n\t\t\tpass\n\t\t\t#sys.stdout.buffer.write(p[0])\n\n\nif __name__ == \"__main__\":\n\tnew = Client()\n\tif len(sys.argv) >= 3:\n\t\tnew.ip = sys.argv[2]\n\tif len(sys.argv) >= 4:\n\t\tnew.port = int(sys.argv[3])\n\tprint(\"Cliente Iniciado\")\n\tprint()\n\tprint(\"Arquivo requisitado : \"+sys.argv[1])\n\tprint(\"IP : \"+new.ip)\n\tprint(\"Porto : \"+str(new.port))\n\tnew.reqStreamFile(sys.argv[1])\n\tapp = QApplication(sys.argv)\n\tex = Window()\n\tsys.exit(app.exec_())\n","sub_path":"Streaming de Video UDP com Threads/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"69838644","text":"import six\n\nfrom graphite.errors import InputParameterError\nfrom graphite.render.attime import parseTimeOffset\nfrom graphite.logger import log\nfrom graphite.functions.aggfuncs import aggFuncs, aggFuncAliases\n\n\nclass ParamTypes(object):\n pass\n\n\nclass ParamType(object):\n options = []\n\n def __init__(self, name, validator=None):\n self.name = name\n self.validator = validator\n\n @classmethod\n def register(cls, name, *args):\n setattr(ParamTypes, name, cls(name, *args))\n\n def isValid(self, value):\n if self.validator is None:\n # if there's no validator for the type we assume True\n return True\n\n return self.validator(value)\n\n\ndef validateBoolean(value):\n return isinstance(value, bool)\n\n\ndef validateFloat(value):\n return isinstance(value, float) or validateInteger(value)\n\n\ndef validateInteger(value):\n return isinstance(value, six.integer_types)\n\n\ndef validateIntOrInf(value):\n return validateInteger(value) or value == float('inf')\n\n\ndef validateInterval(value):\n try:\n parseTimeOffset(value)\n except Exception:\n return False\n return True\n\n\ndef validateSeriesList(value):\n return isinstance(value, list)\n\n\nParamType.register('boolean', validateBoolean)\nParamType.register('date')\nParamType.register('float', validateFloat)\nParamType.register('integer', validateInteger)\nParamType.register('interval', validateInterval)\nParamType.register('intOrInterval')\nParamType.register('intOrInf', validateIntOrInf)\nParamType.register('node', validateInteger)\nParamType.register('nodeOrTag')\nParamType.register('series')\nParamType.register('seriesList', validateSeriesList)\nParamType.register('seriesLists', validateSeriesList)\nParamType.register('string')\nParamType.register('tag')\n\n# special type that accepts everything\nParamType.register('any')\n\n\nclass ParamTypeAggFunc(ParamType):\n\n def __init__(self, name, validator=None):\n if validator is None:\n validator = self.validateAggFuncs\n\n super(ParamTypeAggFunc, self).__init__(name=name, validator=validator)\n self.options = self.getValidAggFuncs()\n\n @classmethod\n def getValidAggFuncs(cls):\n return list(aggFuncs.keys()) + list(aggFuncAliases.keys())\n\n @classmethod\n def getDeprecatedAggFuncs(cls):\n return [name + 'Series' for name in cls.getValidAggFuncs()]\n\n @classmethod\n def getAllValidAggFuncs(cls):\n return cls.getValidAggFuncs() + cls.getDeprecatedAggFuncs()\n\n def validateAggFuncs(self, value):\n if value in self.getValidAggFuncs():\n return True\n\n if value in self.getDeprecatedAggFuncs():\n log.warning('Deprecated aggregation function \"{value}\" used'.format(value=value))\n return True\n\n return False\n\n\nParamTypeAggFunc.register('aggFunc')\n\n\nclass ParamTypeAggOrSeriesFunc(ParamTypeAggFunc):\n options = []\n\n def __init__(self, name, validator=None):\n if validator is None:\n validator = self.validateAggOrSeriesFuncs\n super(ParamTypeAggOrSeriesFunc, self).__init__(name=name, validator=validator)\n\n def setSeriesFuncs(self, funcs):\n # check for each of the series functions whether they have an 'aggregator'\n # property being set to 'True'. If so we consider them valid aggregators.\n for name, func in funcs.items():\n if getattr(func, 'aggregator', False) is not True:\n continue\n\n self.options.append(name)\n\n def validateAggOrSeriesFuncs(self, value):\n if self.validateAggFuncs(value):\n return True\n\n if value in self.options:\n return True\n\n return False\n\n\nParamTypeAggOrSeriesFunc.register('aggOrSeriesFunc')\n\n\nclass Param(object):\n __slots__ = ('name', 'type', 'required', 'default', 'multiple', '_options', 'suggestions')\n\n def __init__(self, name, paramtype, required=False, default=None, multiple=False, options=[],\n suggestions=None):\n self.name = name\n if not isinstance(paramtype, ParamType):\n raise Exception('Invalid type %s for parameter %s' % (paramtype, name))\n self.type = paramtype\n self.required = bool(required)\n self.default = default\n self.multiple = bool(multiple)\n self._options = options\n self.suggestions = suggestions\n\n @property\n def options(self):\n options = list(set(getattr(self, '_options', []) + getattr(self.type, 'options', [])))\n options.sort(key=str)\n return options\n\n def toJSON(self):\n jsonVal = {\n 'name': self.name,\n 'type': self.type.name,\n }\n if self.required:\n jsonVal['required'] = True\n if self.default is not None:\n jsonVal['default'] = self.default\n if self.multiple:\n jsonVal['multiple'] = True\n if self.options:\n jsonVal['options'] = self.options\n if self.suggestions:\n jsonVal['suggestions'] = self.suggestions\n return jsonVal\n\n def validateValue(self, value, func):\n # if value isn't specified and there's a default then the default will be used,\n # we don't need to validate the default value because we trust that it is valid\n if value is None and self.default is not None:\n return True\n\n # None is ok for optional params\n if not self.required and value is None:\n return True\n\n # parameter is restricted to a defined set of values, but value is not in it\n if self.options and value not in self.options:\n raise InputParameterError(\n 'Invalid option specified for function \"{func}\" parameter \"{param}\": {value}'.format(\n func=func, param=self.name, value=repr(value)))\n\n if not self.type.isValid(value):\n raise InputParameterError(\n 'Invalid \"{type}\" value specified for function \"{func}\" parameter \"{param}\": {value}'.format(\n type=self.type.name, func=func, param=self.name, value=repr(value)))\n\n return True\n\n\ndef validateParams(func, params, args, kwargs):\n valid_args = []\n\n if len(params) == 0 or params[len(params)-1].multiple is False:\n if len(args) + len(kwargs) > len(params):\n raise InputParameterError(\n 'Too many parameters specified for function \"{func}\"'.format(func=func))\n\n for i in range(len(params)):\n if len(args) <= i:\n # requirement is satisfied from \"kwargs\"\n value = kwargs.get(params[i].name, None)\n if value is None:\n if params[i].required:\n # required parameter is missing\n raise InputParameterError(\n 'Missing required parameter \"{param}\" for function \"{func}\"'.format(\n param=params[i].name, func=func))\n else:\n # got multiple values for keyword argument\n if params[i].name in valid_args:\n raise InputParameterError(\n 'Keyword parameter \"{param}\" specified multiple times for function \"{func}\"'.format(\n param=params[i].name, func=func))\n else:\n # requirement is satisfied from \"args\"\n value = args[i]\n\n params[i].validateValue(value, func)\n valid_args.append(params[i].name)\n\n return True\n","sub_path":"webapp/graphite/functions/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":6842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"29768660","text":"# Standard import for pandas, numpy and matplot\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Read in the csv file and display some of the basic info\ncrimes=pd.read_csv(\"dc_crime2018423.csv\",parse_dates=['START_DATE'])\n\nprint( \"Data types in the file:\")\nprint(crimes.dtypes)\nprint(\"Summary of the input file:\")\nprint(crimes.describe())\n\nprint(\"Basic UCR Rank stats:\")\nprint(crimes['ucr-rank'].describe())\n\n#from http://pbpython.com/simple-graphing-panddc_crime2018423.csvas.html\n\n# Filter the columns down to the ones we need to look at for customer sales\ncrimetypes = crimes[['WARD','offensegroup','ucr-rank','OFFENSE','START_DATE']]\nprint(crimetypes.head())\n\n#Group the customers by name and sum their sales\n#customer_group = customers.groupby('name')\n#sales_totals = customer_group.sum()\n\noffense_group = crimetypes.groupby('OFFENSE')\nprint(offense_group.size())\n\nucr_avg = offense_group.mean()\nucr_avg.sort_values(by='ucr-rank').head()\n\n\n# Create a basic bar chart for the sales data and show it\nbar_plot = ucr_avg.sort_values(by='ucr-rank',ascending=False).plot(kind='bar',legend=None,title=\"Average UCR Rank by Ward\")\nbar_plot.set_xlabel(\"Ward\")\nbar_plot.set_ylabel(\"UCR Rank (1-9)\")\nplt.show()\n\n# Do a similar chart but break down by category in stacked bars\n# Select the appropriate columns and group by name and category\ncrimetypes = crimes[['offensegroup','WARD','ucr-rank','START_DATE']]\nprint(crimetypes.head())\nward_group=crimetypes.groupby(['offensegroup','WARD']).sum()\nprint(ward_group.head(20))\n\n\n# Plot and show the stacked bar chart\nstack_bar_plot = ward_group.unstack().plot(kind='bar',stacked=True,title=\"Crimes by Offense Group by Ward\",figsize=(9, 6))\nstack_bar_plot.set_xlabel(\"Ward\")\nstack_bar_plot.set_ylabel(\"Average UCR Rank\")\nstack_bar_plot.legend([\"Ward 1\",\"Ward 2\",\"Ward 3\",\"Ward 4\",\"Ward 5\",\"Ward 6\",\"Ward 7\",\"Ward 8\"], loc=9,ncol=4)\nplt.show()\n\n\n# Create a simple histogram\ncrime_patterns = crimetypes[['ucr-rank','START_DATE']]\ncrime_plot = crime_patterns['ucr-rank'].hist(bins=20)\ncrime_plot.set_title(\"Crime Patterns\")\ncrime_plot.set_xlabel(\"UCR Rank (1-9)\")\ncrime_plot.set_ylabel(\"Number of Crime\")\nplt.show()\n\n\n# Create a line chart showing purchases by month\ncrime_patterns = crime_patterns.set_index('START_DATE')\nmonth_plot = crime_patterns.resample('M',how=sum).plot(title=\"Total Crimes by Month\",legend=None)\nfig = month_plot.get_figure()\n\n#Show the image, then save it\nplt.show()\nfig.savefig(\"crime_over_time.png\")\n","sub_path":"share20180425/blockwise_explore_crime_orig_csv_20180425.py","file_name":"blockwise_explore_crime_orig_csv_20180425.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"355240440","text":"from youtube_utils import *\nfrom sheets import *\n\nchannel_file = \"ChannelList.csv\"\nchannels = getChannels(channel_file)\n\nYT_api_key = \"\"\nYT_api = buildYTApi(YT_api_key)\n\nscraper = YTScraper(YT_api, channels)\n\ndef scrape():\n\tfor channel in channels:\n\t\tchannel_name = channel[0]\n\t\tchannel_id = channel[1]\n\t\tupload_ids = scraper.getUploadIds(channel_id)\n\t\tfor upload_id in upload_ids:\n\t\t\tscraper.getCommentThreads(upload_id, channel_id)\n\n\tscraper.postProc()\n\nscrape()\nmakeSheet(scraper.authors, channels)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"259772601","text":"from docs_conf.conf import *\n\nbranch = 'latest'\nmaster_doc = 'index'\n\nlinkcheck_ignore = [\n r'http://localhost:.*',\n 'http://CONSUL_SERVER_UI:30270/ui/#/dc1/services',\n r'https://.*h=frankfurt',\n r'http.*frankfurt.*',\n r'http.*simpledemo.onap.org.*',\n r'http://ANY_K8S_IP.*',\n 'http://so-monitoring:30224',\n r'http://SINK_IP_ADDRESS:667.*',\n r'http.*K8S_HOST:30227.*',\n r'http.*K8S_NODE_IP.*'\n]\n\nintersphinx_mapping = {}\n\nhtml_last_updated_fmt = '%d-%b-%y %H:%M'\n\ndef setup(app):\n app.add_css_file(\"css/ribbon.css\")\n","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"466191213","text":"from airflow import DAG\nfrom airflow.operators.bash import BashOperator\n\nfrom datetime import datetime \n\ndefault_args = {\n 'start_date': datetime(2020, 1, 1)\n}\n\nwith DAG('trigger_rule',default_args=default_args, schedule_interval='@daily', catchup=False) as dag:\n \n task1 = BashOperator(\n task_id='task1',\n bash_command='exit 1',\n do_xcom_push=False\n )\n\n task2 = BashOperator(\n task_id='task2',\n bash_command='exit 1',\n do_xcom_push=False\n )\n\n task3 = BashOperator(\n task_id='task3',\n bash_command='exit 0',\n do_xcom_push=False,\n trigger_rule='all_failed'\n )\n\n[task1, task2] >> task3","sub_path":"Learning/Part1/Part1-dag/trigger_rule.py","file_name":"trigger_rule.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"434183090","text":"# -*- coding: utf-8 -*-\n#\n# Copyright © 2013 Red Hat, Inc.\n#\n# This software is licensed to you under the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of the License\n# (GPLv2) or (at your option) any later version.\n# There is NO WARRANTY for this software, express or implied, including the\n# implied warranties of MERCHANTABILITY, NON-INFRINGEMENT, or FITNESS FOR A\n# PARTICULAR PURPOSE.\n# You should have received a copy of GPLv2 along with this software; if not,\n# see http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt\n\nfrom copy import deepcopy\nimport os\n\nfrom pulp_rpm.common import models\nfrom pulp_rpm.plugins.importers.yum import utils\n\n# primary.xml element tags -----------------------------------------------------\nMETADATA_FILE_NAME = 'primary'\n\nCOMMON_SPEC_URL = 'http://linux.duke.edu/metadata/common'\nRPM_SPEC_URL = 'http://linux.duke.edu/metadata/rpm'\n\n\n# primary.xml element tags -----------------------------------------------------\nPACKAGE_TAG = '{%s}package' % COMMON_SPEC_URL\n\nNAME_TAG = '{%s}name' % COMMON_SPEC_URL\nARCH_TAG = '{%s}arch' % COMMON_SPEC_URL\nVERSION_TAG = '{%s}version' % COMMON_SPEC_URL\nCHECKSUM_TAG = '{%s}checksum' % COMMON_SPEC_URL\nSUMMARY_TAG = '{%s}summary' % COMMON_SPEC_URL\nDESCRIPTION_TAG = '{%s}description' % COMMON_SPEC_URL\nPACKAGER_TAG = '{%s}packager' % COMMON_SPEC_URL\nURL_TAG = '{%s}url' % COMMON_SPEC_URL\nTIME_TAG = '{%s}time' % COMMON_SPEC_URL\nSIZE_TAG = '{%s}size' % COMMON_SPEC_URL\nLOCATION_TAG = '{%s}location' % COMMON_SPEC_URL\nFORMAT_TAG = '{%s}format' % COMMON_SPEC_URL\n\nFILE_TAG = '{%s}file' % COMMON_SPEC_URL\n\nRPM_LICENSE_TAG = '{%s}license' % RPM_SPEC_URL\nRPM_VENDOR_TAG = '{%s}vendor' % RPM_SPEC_URL\nRPM_GROUP_TAG = '{%s}group' % RPM_SPEC_URL\nRPM_BUILDHOST_TAG = '{%s}buildhost' % RPM_SPEC_URL\nRPM_SOURCERPM_TAG = '{%s}sourcerpm' % RPM_SPEC_URL\nRPM_HEADER_RANGE_TAG = '{%s}header-range' % RPM_SPEC_URL\nRPM_PROVIDES_TAG = '{%s}provides' % RPM_SPEC_URL\nRPM_REQUIRES_TAG = '{%s}requires' % RPM_SPEC_URL\nRPM_ENTRY_TAG = '{%s}entry' % RPM_SPEC_URL\n\n\n# package information dictionary -----------------------------------------------\n\n# the package information dictionary is a combination of the PACKAGE_INFO_SKEL\n# and PACKAGE_FORMAT_SKEL dictionaries\n# all fields, along with their default values, are guaranteed to be there\n\nPACKAGE_INFO_SKEL = {'type': None,\n 'name': None,\n 'arch': None,\n 'version': None,\n 'release': None,\n 'epoch': None,\n 'checksum': None,\n 'checksumtype': None,\n 'summary': None,\n 'description': None,\n 'changelog': None,\n 'build_time': None,\n 'url': None,\n 'time': None,\n 'size': None,\n 'filename': None,\n 'relative_url_path': None}\n\nPACKAGE_FORMAT_SKEL = {'vendor': None,\n 'license': None,\n 'group': None,\n 'header_range': {'start': None, 'end': None},\n 'buildhost': None,\n 'requires': [],\n 'provides': [],\n 'sourcerpm': None,\n 'files': []}\n\n# RPM entry dictionary ---------------------------------------------------------\n\n# RPM entry dictionaries will make up the values in the requires and provides lists\n\nRPM_ENTRY_SKEL = {'name': None,\n 'version': None,\n 'release': None,\n 'epoch': None,\n 'flags': None}\n\n# file information dictionary --------------------------------------------------\n\n# file information dictionaries will make up the values in the files lists\n\nFILE_INFO_SKEL = {'path': None}\n\n# element processing methods ---------------------------------------------------\n\ndef process_package_element(package_element):\n \"\"\"\n Process a parsed primary.xml package element into a package information\n dictionary.\n\n :param package_element: parsed primary.xml package element\n :return: package information dictionary\n :rtype: pulp_rpm.common.models.RPM\n \"\"\"\n # NOTE the use of deepcopy relies on cpython's very sensible policy of never\n # duplicating string literals, this may not hold up in other implementations\n # the python interpreter.\n package_info = deepcopy(PACKAGE_INFO_SKEL)\n package_info['type'] = package_element.attrib['type']\n\n name_element = package_element.find(NAME_TAG)\n if name_element is not None:\n package_info['name'] = name_element.text\n\n arch_element = package_element.find(ARCH_TAG)\n if arch_element is not None:\n package_info['arch'] = arch_element.text\n\n version_element = package_element.find(VERSION_TAG)\n if version_element is not None:\n package_info['version'] = version_element.attrib['ver']\n package_info['release'] = version_element.attrib.get('rel', None)\n package_info['epoch'] = version_element.attrib.get('epoch', None)\n\n checksum_element = package_element.find(CHECKSUM_TAG)\n if checksum_element is not None:\n package_info['checksumtype'] = checksum_element.attrib['type']\n package_info['checksum'] = checksum_element.text\n\n summary_element = package_element.find(SUMMARY_TAG)\n if summary_element is not None:\n package_info['summary'] = summary_element.text\n\n description_element = package_element.find(DESCRIPTION_TAG)\n if description_element is not None:\n package_info['description'] = description_element.text\n\n url_element = package_element.find(URL_TAG)\n if url_element is not None:\n package_info['url'] = url_element.text\n\n time_element = package_element.find(TIME_TAG)\n if time_element is not None:\n package_info['time'] = int(time_element.attrib['file'])\n package_info['build_time'] = int(time_element.attrib['build'])\n\n size_element = package_element.find(SIZE_TAG)\n if size_element is not None:\n package_info['size'] = int(size_element.attrib['package'])\n\n location_element = package_element.find(LOCATION_TAG)\n if location_element is not None:\n href = location_element.attrib['href']\n filename = os.path.basename(href)\n package_info['relativepath'] = href\n package_info['filename'] = filename\n # we don't make any attempt to preserve the original directory structure\n # this element will end up being converted back to XML and stuffed into\n # the DB on the unit object, so this is our chance to modify it.\n location_element.attrib['href'] = filename\n\n format_element = package_element.find(FORMAT_TAG)\n package_info.update(_process_format_element(format_element))\n\n if package_info['arch'].lower() == 'src':\n model = models.SRPM.from_package_info(package_info)\n else:\n model = models.RPM.from_package_info(package_info)\n # add the raw XML so it can be saved in the database later\n rpm_namespace = utils.Namespace('rpm', RPM_SPEC_URL)\n model.raw_xml = utils.element_to_raw_xml(package_element, [rpm_namespace], COMMON_SPEC_URL)\n return model\n\n\ndef _process_format_element(format_element):\n \"\"\"\n Process a parsed primary.xml package format element (child element of\n package element) into a package format dictionary.\n\n :param format_element: parsed primary.xml package format element\n :return: package format dictionary\n :rtype: dict\n \"\"\"\n # NOTE the use of deepcopy relies on cpython's very sensible policy of never\n # duplicating string literals, this may not hold up in other implementations\n # the python interpreter.\n package_format = deepcopy(PACKAGE_FORMAT_SKEL)\n\n if format_element is None:\n return package_format\n\n vendor_element = format_element.find(RPM_VENDOR_TAG)\n if vendor_element is not None:\n package_format['vendor'] = None # XXX figure out which attrib this is\n\n license_element = format_element.find(RPM_LICENSE_TAG)\n if license_element is not None:\n package_format['license'] = license_element.text\n\n group_element = format_element.find(RPM_GROUP_TAG)\n if group_element is not None:\n package_format['group'] = group_element.text\n\n header_range_element = format_element.find(RPM_HEADER_RANGE_TAG)\n if header_range_element is not None:\n package_format['header_range']['start'] = int(header_range_element.attrib['start'])\n package_format['header_range']['end'] = int(header_range_element.attrib['end'])\n\n build_host_element = format_element.find(RPM_BUILDHOST_TAG)\n if build_host_element is not None:\n package_format['buildhost'] = build_host_element.text\n\n sourcerpm_element = format_element.find(RPM_SOURCERPM_TAG)\n if sourcerpm_element is not None:\n package_format['sourcerpm'] = sourcerpm_element.text\n\n provides_element = format_element.find(RPM_PROVIDES_TAG)\n if provides_element is not None:\n package_format['provides'].extend(_process_rpm_entry_element(e) for e in provides_element.findall(RPM_ENTRY_TAG))\n\n requires_element = format_element.find(RPM_REQUIRES_TAG)\n if requires_element is not None:\n package_format['requires'].extend(_process_rpm_entry_element(e) for e in requires_element.findall(RPM_ENTRY_TAG))\n\n package_format['files'].extend(_process_file_element(e) for e in format_element.findall(FILE_TAG))\n\n return package_format\n\n\ndef _process_rpm_entry_element(rpm_entry_element):\n \"\"\"\n Process a parsed RPM entry element (child elements of both provides and\n requires elements) into an RPM entry dictionary.\n\n :param rpm_entry_element: parsed RPM entry element\n :return: RPM entry dictionary\n :rtype: dict\n \"\"\"\n # NOTE the use of deepcopy relies on cpython's very sensible policy of never\n # duplicating string literals, this may not hold up in other implementations\n # the python interpreter.\n rpm_entry = deepcopy(RPM_ENTRY_SKEL)\n\n rpm_entry['name'] = rpm_entry_element.attrib['name']\n rpm_entry['version'] = rpm_entry_element.attrib.get('ver', None)\n rpm_entry['release'] = rpm_entry_element.attrib.get('rel', None)\n rpm_entry['epoch'] = rpm_entry_element.attrib.get('epoch', None)\n rpm_entry['flags'] = rpm_entry_element.attrib.get('flags', None)\n\n return rpm_entry\n\n\ndef _process_file_element(file_element):\n \"\"\"\n Process a parsed file element (child element of the files element) into a\n file information dictionary.\n\n :param file_element: parsed file element\n :return: file information dictionary\n :rtype: dict\n \"\"\"\n # NOTE the use of deepcopy relies on cpython's very sensible policy of never\n # duplicating string literals, this may not hold up in other implementations\n # the python interpreter.\n file_info = deepcopy(FILE_INFO_SKEL)\n\n file_info['path'] = file_element.text\n\n return file_info\n","sub_path":"plugins/pulp_rpm/plugins/importers/yum/repomd/primary.py","file_name":"primary.py","file_ext":"py","file_size_in_byte":11025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"305523625","text":"#!/home/shabelson/github/Studio3_Year_Final/Python/ROS/env/tvenv3/bin/python\n\n\n\n\nglobal host\nhost = \"192.168.1.10\"\n\nimport paho.mqtt as mqtt\nfrom paho.mqtt import client\nfrom time import sleep\nimport rospy\nfrom sensor_msgs.msg import Image\nimport codecs\nfrom std_msgs.msg import String \nimport traceback as tb\nimport time\n\n\n\nclass GetCForce():\n\tdef __init__(self):\n\t\trospy.loginfo(\"Force Grabber init\")\n\t\tself.pub_sensor = rospy.Publisher(\"/ST3/Tool_force\",String,queue_size=1)\n\t\tself.mqttClient = client.Client(\"Tool Force Connect\")\n\t\tself.mqttClient.connect(host=host,port=1883)\n\t\tself.mqttClient.subscribe(\"/ToolF\")\n\t\tself.mqttClient.on_message = self.OnMsg\n\t\t\n\t\tprint (\"setup Done\",self.mqttClient.is_connected())\n\tdef OnMsg(self,client,userData,msg):\n\t\tprint (\"Got Msg %f\"%(time.time()))\n\t\ttry:\n\t\t\tval = float(msg.payload)\n\t\texcept Exception as e:\n\t\t\tprint (e)\n\t\t\treturn\n\n\t\t\n\t\n\t\tself.pub_sensor.publish(str(val))\n\n\nif __name__ ==\"__main__\":\n\tgc = GetCForce()\t\n\tnode = rospy.init_node(\"Force Sensor\")\n\ttry:\n\t\trospy.loginfo(\"ForceSensor go To Loop\")\n\t\tgc.mqttClient.loop_forever()\n\texcept ValueError as e:\n\t\tprint (\"here\")\n\t\tpass \n\texcept Exception as e:\n\t\tprint (tb.print_exc())\n\tprint (\"WWWOWOWOWOWOW\")\n\tprint (e)\n\trospy.logfatal(str(e)+\" _ Force Sensor node\",)\n\tgc.mqttClient.disconnect()\n\tgc.mqttClient.loop_stop()\n\n","sub_path":"Python/ROS/shahar_Exp/ForceGrabber.py","file_name":"ForceGrabber.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"264479620","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef plotLearningCurve():\n fig=plt.figure(0, figsize=(10,8) )\n# fig.clf()\n# plt.ioff()\n# plt.subplot(211)\n plt.plot(trn_error[:60], label='Training Set Error', linestyle=\"--\", linewidth=2)\n plt.plot(tst_error[:60], label='Validation Set Error', linewidth=2)\n plt.title('Learning Curve')\n plt.xlabel('Epoch')\n plt.ylim([-0.01,0.17])\n plt.ylabel('MSE')\n plt.legend()\n \n# plt.subplot(212)\n# plt.plot(trn_class_accu, label='Training Set Accuracy', linestyle=\"--\", linewidth=2)\n# plt.plot(tst_class_accu, label='Validation Set Accuracy', linewidth=2)\n# plt.ylim([0,103])\n# plt.ylabel('Percent')\n# plt.xlabel('Epoch')\n# plt.title('Classification Accuracy')\n# plt.legend(loc=4)\n \n# plt.draw()\n plt.tight_layout(pad=2.1)\n# plt.savefig(figPath)\n \ntrn_error = np.loadtxt(\"trn_error\")\ntst_error = np.loadtxt(\"tst_error\")\n\nplotLearningCurve()\nplt.show()","sub_path":"mlp-withRemap/153sigmoid/plotLearningCurve.py","file_name":"plotLearningCurve.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"202619532","text":"import json\n\nfrom django.http import HttpResponse\nfrom django.utils.safestring import mark_safe\n\n\nfrom geoinfo.models import Polygon\nfrom geoinfo.serializers import extractor\nfrom claim.models import Organization, OrganizationType\n\n\n# def export_layer(request, layer_id):\n\n# layer = Layer.objects.get(id=layer_id)\n# layer_json = layer.generate_json(add=False)['polygons']\n# responce = HttpResponse(layer_json)\n# responce['Content-Disposition'] = 'attachment; filename=%s.json' % layer.name\n# return responce\n\n\ndef get_polygons_tree(request, polygon_id):\n data = mark_safe(json.dumps(extractor(polygon_id)))\n return HttpResponse(data, content_type='application/json')\n\n\ndef add_org(request):\n print(request.POST)\n\n layer = Polygon.objects.get(\n polygon_id=request.POST['layer_id'])\n\n polygon = Polygon(\n polygon_id=request.POST['centroid'],\n centroid=request.POST['centroid'],\n address=request.POST['address'],\n layer=layer,\n level=Polygon.building,\n zoom=17,\n is_verified=True)\n polygon.save()\n\n org_type = OrganizationType.objects.get(\n type_id=request.POST['org_type'])\n\n organization = Organization(\n name=request.POST['org_name'],\n org_type=org_type)\n organization.save()\n\n polygon.organizations.add(organization)\n\n return HttpResponse(status=201)\n","sub_path":"geoinfo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"308059403","text":"#!/usr/bin/env python3\n# CODING:UTF-8\n# _*_ @Author:Amee _*_\n# _*_ @Date:2019-10-29 _*_\n# _*_ Project : chongqing guoyuan _*_\n\n'''\ntodo\n解决库的数据为空或者不足问题\n解决放箱位置选择问题\n解决161- 171 131 的offset 偏移问题\n\n\n前箱有箱,放后箱位置不够,\n修改有箱情况下的引导值计算方式,增加一个引导值,求平均值\n加入箱门朝向\n\n绝对值 使用存在问题?\n发给PLC的距离最后要取整\n日志记录加入所有涉及到的距离值\n\nerror_count, lidar_reconnect_count, plc_reconnect_count 超过10次退出重启\n__init__方法加入try 捕获写入log\n'''\n\n\nimport matplotlib.pyplot as plt\nfrom FreeMeasureC401 import FM_TCP\nimport threading\nimport json\nfrom socket import socket, AF_INET, SOCK_STREAM\nimport traceback\nimport os\nimport numpy as np\nfrom time import sleep,time,strftime,localtime\nimport struct\nfrom datetime import datetime\nfrom FeedDog import FeedWatchDog\n\n# 初始化看门狗程序,本地端口为9998\nFD = FeedWatchDog(9998)\n# 第一次喂狗\nFD.feed()\n\nft20Len = 605.8 # 单位为米 20尺箱长度cm\nft40Len = 1219.2 # 40尺箱子长度cm\ngapLen = 8 # 双20尺箱子间距为8cm\ngateLen = 5 # 箱门距离边缘距离,单位为cm\nsleep_time = 0.1\n\nprocess_msg = \\\n\"\"\"+------------------------{0}------------------------------+\\n\n工况:{1}\\n\n山侧:【车尾】 {2} 【箱尾】 {3} 【箱头】 {4} 【距离】 {5}\\n\n水侧:【车尾】 {6} 【箱尾】 {7} 【箱头】 {8} 【距离】 {9}\\n\n循环用时{10}\\n\n+-------------------------------------------------------------+\\n\"\"\"\n\n\n# 对两个列表进行排序\ndef sortFirstElement(x, y):\n newtuple = [(x, y) for x, y in zip(x, y)]\n\n output_x = []\n output_y = []\n for ele in sorted(newtuple):\n output_x.append(ele[0])\n output_y.append(ele[1])\n\n return output_x, output_y\n\n\nclass TrunkPosition(object):\n\n def __init__(self):\n try:\n self.config = self.loadCfgFile()\n\n self.lidarM_reconnect_count = 0 # 山侧扫描仪重连计数\n self.lidarR_reconnect_count = 0 # 水侧扫描仪重连计数\n self.plc_reconnect_count = 0 # PLC断线重连计数\n\n self.lidarMState = True # 监测山侧扫描仪的状态,离线为False\n self.lidarRState = True # 监测水侧扫描仪的状态\n self.plcState = True\n self.initLidarM()\n self.initLidarR()\n self.refreshAllLidar()\n\n self.plt = plt\n if self.config[\"PLCSwitch\"]:\n self.initPLC()\n\n # communication with IRC via threading\n self.t = threading.Thread(target=self._threadHandler)\n self.lock = threading.Lock()\n self.thread_switch = False\n self.recMsg = []\n\n self.hasMTrunk = False #\n self.hasRTrunk = False # 监测水侧是否集卡\n self.distanceM = 1000 # 返回给PLC的山侧的距离数据\n self.distanceR = 1000\n self.cpsCtl = [0,'',0,0,0]\n\n # PLC\n self.plc_msg = ''\n self.spreaderSize = 1 # 吊具尺寸,作业箱尺寸\n self.trunkOffset = 5 # 集卡车尾到锁头的偏移距离,单位为厘米?? todo\n self.taskType = True # 装箱(TRUE)或者卸箱(False)的任务类型\n self.workPosition = 0 # 作业地点 山侧(2)或者水侧(1),不作业(0)\n self.needGuide = False # 是否需要引导(外集卡需要引导(1),内集卡不需要(0)\n self.container_in_out = 0 # 1:进箱指令,2:出箱指令 0:无效\n\n self.boxPos_M = 0 # 用来记录托板上箱子个数和位置情况:0:空箱 1:前箱有箱 2:后箱有箱 3:满箱\n self.boxPos_R = 0\n\n self.plcmessage_printcount=0\n\n # 存储最近三次的距离数值,取平均值作为本次的结果\n self.M_queue = []\n self.R_queue = []\n\n self.lidarM_count = 0 # 山侧扫描仪的心跳\n self.lidarR_count = 0\n self.lidarM_last_count = self.lidarM.getCount()\n self.lidarR_last_count = self.lidarR.getCount()\n\n self.Mcontainer_head_point = -11 # 山侧有箱情况下的箱头X坐标\n self.Mcontainer_tail_point = -11 # 山侧有箱情况下的箱尾X坐标\n self.Mtrunk_tail_point = -11 # 山侧车板尾X坐标\n\n self.Rcontainer_head_point = -11 # 水侧有箱情况下的箱头X坐标\n self.Rcontainer_tail_point = -11 # 水侧有箱情况下的��尾X坐标\n self.Rtrunk_tail_point = -11 # 水侧车板尾X坐标\n\n # self.MhasGate = self.config['MhasGate'] # 山侧箱门是否朝向车尾\n # self.RhasGate = self.config['RhasGate'] # 水侧箱门是否朝向车尾\n except Exception:\n traceback.print_exc()\n self.write_log(traceback.format_exc())\n\n def initLidarM(self):\n self.lidarM = FM_TCP(\n self.config[\"MlidarIP\"], self.config[\"MlidarPort\"],\n self.config[\"MlidarCID\"], self.config[\"MlidarName\"]\n )\n self.lidarM.setOffsetX(self.config[\"MXoffset\"])\n self.lidarM.setOffsetY(self.config[\"MYoffset\"])\n self.lidarM.setOffsetTheta(self.config[\"MThetaoffset\"])\n self.lidarM.setXReverse(self.config[\"MXReverse\"])\n self.lidarM.setYReverse(self.config[\"MYReverse\"])\n self.lidarM.connect()\n self.lidarM.sendStart()\n\n def initLidarR(self):\n # config the river-side lidar\n self.lidarR = FM_TCP(\n self.config[\"RlidarIP\"], self.config[\"RlidarPort\"],\n self.config[\"RlidarCID\"], self.config[\"RlidarName\"]\n )\n self.lidarR.setOffsetX(self.config[\"RXoffset\"])\n self.lidarR.setOffsetY(self.config[\"RYoffset\"])\n self.lidarR.setOffsetTheta(self.config[\"RThetaoffset\"])\n self.lidarR.setXReverse(self.config[\"RXReverse\"])\n self.lidarR.setYReverse(self.config[\"RYReverse\"])\n self.lidarR.connect()\n self.lidarR.sendStart()\n\n def refreshAllLidar(self):\n self.lidarM.setOffsetX(self.config[\"MXoffset\"])\n self.lidarM.setOffsetY(self.config[\"MYoffset\"])\n self.lidarM.setOffsetTheta(self.config[\"MThetaoffset\"])\n self.lidarM.setXReverse(self.config[\"MXReverse\"])\n self.lidarM.setYReverse(self.config[\"MYReverse\"])\n\n self.lidarR.setOffsetX(self.config[\"RXoffset\"])\n self.lidarR.setOffsetY(self.config[\"RYoffset\"])\n self.lidarR.setOffsetTheta(self.config[\"RThetaoffset\"])\n self.lidarR.setXReverse(self.config[\"RXReverse\"])\n self.lidarR.setYReverse(self.config[\"RYReverse\"])\n\n def initPLC(self):\n self.PLC_socket = socket(AF_INET, SOCK_STREAM)\n self.PLC_socket.connect((self.config[\"PLCIP\"], self.config[\"PLCPort\"]))\n\n def is_lidar_alive(self):\n # 扫描仪的有效性判断,对不正常心跳值进行计数判断\n lidarM_count = self.lidarM.getCount()\n lidarR_count = self.lidarR.getCount()\n\n if lidarM_count == self.lidarM_last_count:\n self.lidarM_count += 1\n else:\n self.lidarM_last_count = lidarM_count\n self.lidarM_count = 0\n\n if lidarR_count == self.lidarR_last_count:\n self.lidarR_count += 1\n else:\n self.lidarR_last_count = lidarR_count\n self.lidarR_count = 0\n\n if self.lidarM_count > 5:\n self.write_log(\"{} Mountain Lidar heartbeat pauses and reconnects\".format(self.config['deviceID']))\n self.lidarMState = False\n if self.lidarR_count > 5:\n self.write_log(\"{} River Lidar heartbeat pauses and reconnects\".format(self.config['deviceID']))\n self.lidarRState = False\n\n def loadCfgFile(self, filename='cpsconfig.json'):\n '''load config information for config-file '''\n with open(filename, 'r') as f:\n config = json.load(f)\n return config\n\n def sendPLC(self, message):\n '''send messages to PLC'''\n self.PLC_socket.sendall(message)\n\n def disconnected(self):\n self.thread_switch = False\n self.joinThread()\n self.PLC_socket.close()\n\n def startThread(self):\n self.thread_switch = True\n self.t.start()\n\n def joinThread(self):\n self.t.join()\n\n def _threadHandler(self):\n try:\n while self.thread_switch:\n recv = self.PLC_socket.recv(22)\n # print('recving from plc :',recv)\n self.spreaderSize = struct.unpack('>H',recv[6:8])[0] # 1: 20英尺; 2: 40英尺; 3: 双20英尺;\n # 开锁:集卡进行卸箱任务\n self.taskType = not (struct.unpack('>H',recv[10:12])[0] >> 2 & 1)\n self.trunkOffset = struct.unpack('>H',recv[12:14])[0]\n self.workPosition = struct.unpack('>H',recv[16:18])[0]\n self.needGuide = struct.unpack('>H',recv[18:20])[0]\n self.container_in_out = struct.unpack('>h',recv[20:22])[0] #\n\n self.plc_msg = \"\"\"\n 【箱子作业尺寸:{0}】1:20尺 2:40尺 3:双20尺\n 【任务类型:{1}】\n 【车尾锁头偏移:{2}】\n 【作业位置:{3}】\n 【是否引导:{4}】\n [进出箱 :{5}]\n \"\"\".format('20尺' if self.spreaderSize == 1 else '40尺' if self.spreaderSize == 2 else '双20���',\n '闭锁-装箱' if self.taskType else '开锁-卸箱',self.trunkOffset,\n '不作业' if not self.workPosition else '山侧' if self.workPosition == 2 else '水侧',\n '需要引导' if self.needGuide else '不需要引导',\n '进箱' if self.container_in_out==1 else '出箱' if self.container_in_out==2 else '待定')\n\n except Exception as e:\n print('线程出现故障,退出了!!',e )\n # self.disconnected()\n\n def write_log(self, log_info):\n if self.config[\"logSwitch\"]:\n # 记录日志数据\n now = datetime.now().strftime('-%m-%d')\n fileName = \"log\" + now + '.txt'\n timeStamp = strftime(\"%Y-%m-%d %H:%M:%S\", localtime())\n with open(fileName, 'a') as filewriter:\n timeStamp = strftime(\"%Y-%m-%d %H:%M:%S\", localtime())\n filewriter.write( timeStamp+' '+log_info+'\\n\\n')\n\n def dataLog(self, x, y, msg=''):\n if self.config['dataLog']:\n # 减小长度\n x = [round(ele, 3) for ele in x]\n y = [round(ele, 3) for ele in y]\n now = datetime.now().strftime('-%m-%d')\n fileName = 'dataLog' + now + '.txt'\n timeStamp = strftime(\"%Y-%m-%d %H:%M:%S\", localtime())\n with open(fileName, 'a') as file:\n file.write(timeStamp + msg + '\\n' + str(x) +'\\n'+str(y) + '\\n')\n\n def cutAll(self, x, y):\n '''\n 初步切割:查找需要范围内的点,temp_x y用来画图显示 trunk_x y用来记录集卡点数据\n '''\n # 截取了高于地面上车体部分\n trunkTopHeight = 5\n trunk_x, trunk_y = [], []\n temp_x, temp_y = [], []\n for index in range(len(x)):\n if self.config[\"leftEdge\"] < x[index] <= self.config[\"rightEdge\"]and \\\n 0 < y[index] < self.config[\"maxHeight\"]:\n temp_x.append(x[index])\n temp_y.append(y[index])\n # 截取所有数据中的车体部分\n if self.config['minTrunkHeight'] < y[index] < trunkTopHeight:\n trunk_x.append(x[index])\n trunk_y.append(y[index])\n return temp_x, temp_y, trunk_x, trunk_y\n\n def findTailPoint(self, trunk_x, trunk_y):\n \"\"\"\n return 车尾x坐标,箱尾x坐标,箱头X坐标\n \"\"\"\n box_tail_point = -15\n box_head_point = -15\n trunk_tail_point = -15\n boxPos = 0\n trunk_x, trunk_y = sortFirstElement(trunk_x, trunk_y)\n box_x = []\n box_y = []\n boxHeight = 5 # 载箱后的箱子高度不会超过5米\n\n # if self.plcmessage_printcount > 10:\n # msg = \"原始尾部数据:x={0} y={1}\\n头部数据{2},{3}\".format(trunk_x[:5], trunk_y[:5], trunk_x[-5:], trunk_y[-5:])\n # print(msg)\n # self.write_log(msg)\n\n # 对尾部数据进行滤波处理\n if len(trunk_x) > 18:\n for i in range(5):\n if (trunk_x[1] - trunk_x[0]) > self.config[\"threshold\"]:\n del trunk_x[0]\n del trunk_y[0]\n else:\n break\n\n tailpoint_Xsub = trunk_x[1]-trunk_x[0]\n # tailpoint_Ysub = abs(trunk_y[1]-trunk_y[0])\n\n if len(trunk_y) > 10 and 0.035 < tailpoint_Xsub <= self.config[\"threshold\"]:\n trunk_tail_point = (trunk_x[1]+trunk_x[0])/2\n elif len(trunk_y) > 10 and tailpoint_Xsub <= 0.035:\n trunk_tail_point = trunk_x[0]\n\n # 集卡车长度值\n trunkLength = trunk_x[-1] - trunk_tail_point\n\n # 判断车体是否完全进入检测范围\n if trunk_tail_point > self.config[\"leftEdge\"] + 1 and trunkLength > 10:\n # 车体完全进入区间 判断有没有放箱\n for index in range(len(trunk_x)):\n # 记录托板上面箱子的值\n if (self.config['minBoxHeight'] < trunk_y[index] < boxHeight) and \\\n ((trunk_tail_point-0.1) <= trunk_x[index] < trunk_tail_point + 12.5):\n box_x.append(trunk_x[index])\n box_y.append(trunk_y[index])\n\n if len(box_x) < 15:\n box_x = [] # 点数太少认为是跳点干扰引起\n box_y = []\n\n if len(box_x) < 15:\n # 空箱状态:\n boxPos = 0\n else:\n # if self.plcmessage_printcount > 10:\n # msg = \"箱尾原始头部数据:x={0} y={1}\".format(box_x[-5:], box_y[-5:])\n # msg += \"箱尾原始尾部数据:x={0} y={1}\".format(box_x[:5], box_y[:5])\n # print(msg)\n # self.write_log(msg)\n\n # 滤波处理\n if len(box_x) > 18:\n for i in range(8):\n if box_x[1] - box_x[0] > self.config[\"threshold\"] :\n del box_x[0]\n del box_y[0]\n else:\n break\n if len(box_x) > 50:\n for i in range(8):\n if abs(box_x[-2] - box_x[-1] ) > self.config[\"threshold\"]:\n del box_x[-1]\n del box_y[-1]\n else:\n break\n\n box_tail_Xsub = box_x[1] - box_x[0]\n if 0.035 < box_tail_Xsub <= self.config[\"threshold\"]:\n box_tail = (box_x[0]+box_x[1])/2\n else:\n box_tail = box_x[0]\n\n if box_tail < trunk_tail_point + 3:\n # 后箱位有箱\n boxPos += 2\n if max(box_x) > trunk_tail_point + 7:\n # 前箱位有箱\n boxPos += 1\n\n box_head_point = box_x[-1]\n box_tail_point = box_tail\n\n return trunk_tail_point, box_tail_point, box_head_point, boxPos\n\n def plotMR(self, x1, y1, x2, y2, work_msg_M, work_msg_R):\n if self.config['plotSwitch']:\n self.plt.clf()\n self.plt.axis([self.config['leftEdge'], self.config['rightEdge'], -0.5, 19.6])\n # 画两条地平线\n self.plt.plot([self.config['leftEdge'], self.config['rightEdge']], [0, 0], '-')\n self.plt.plot([self.config['leftEdge'], self.config['rightEdge']], [10, 10], '-')\n # self.plt.xlabel('水侧:车尾{}箱尾{}箱头{} / 山侧:车尾{}箱尾{}箱头{}'.format(\n # int(self.Rtrunk_tail_point * 100), int(self.Rcontainer_tail_point * 100),\n # int(self.Rcontainer_head_point * 100), int(self.Mtrunk_tail_point*100),\n # int(self.Mcontainer_tail_point*100), int(self.Mcontainer_head_point*100),\n # ), fontproperties='SimHei')\n # 在xlabel 上标注当前的工况\n self.plt.xlabel(work_msg_R+' / '+work_msg_M, fontproperties='SimHei', fontsize=13)\n # title 标注距离值\n self.plt.title(self.config[\"deviceID\"] + ' R :{}cm / M :{}cm'.format(\n self.distanceR, self.distanceM))\n self.plt.plot(x1, y1, 'b.') # M\n y2 = [ele + 10 for ele in y2]\n self.plt.plot(x2, y2, 'g.') # R\n # 画出中心线\n self.plt.plot([0, 0], [-1, 40], '-')\n\n # 画出2个20尺辅助车厢线,方便调试\n if self.hasMTrunk and abs(self.distanceM) < 500:\n # 注意画图的单位是米,传来的参数单位是厘米\n Mx = self.Mtrunk_tail_point\n x = []\n assistLineM_x = [Mx, Mx + ft20Len*0.5/100, Mx + ft20Len/100, Mx +ft20Len*1.5/100, Mx + ft40Len/100]\n temp = [[temp for ele in range(11)] for temp in assistLineM_x]\n for ele in temp:\n x += ele\n y = [1+ele*0.5 for ele in range(1, 12, 1)] * 5 # 11 个点\n self.plt.plot(x, y, 'r.')\n # 在图中标明车尾,箱尾,箱头三点的坐标点\n self.plt.text(self.Mtrunk_tail_point-2, 7,'车尾{}'.format(\n round(self.Mtrunk_tail_point,3)),\n fontsize=13,color='purple',fontproperties='SimHei')\n if self.boxPos_M:\n self.plt.text(self.Mcontainer_tail_point-2, 8, '箱尾{}'.format(\n round(self.Mcontainer_tail_point,3)),\n fontsize=13, color='purple', fontproperties='SimHei')\n self.plt.text(self.Mcontainer_head_point-2, 8, '箱头{}'.format(\n round(self.Mcontainer_head_point,3)),\n fontsize=13, color='purple', fontproperties='SimHei')\n\n if self.hasRTrunk and abs(self.distanceR) < 500:\n # 注意画图的单位是米,传来的参数单位是厘米\n Mx = self.Rtrunk_tail_point\n x = []\n assistLineM_x = [Mx, Mx + ft20Len*0.5/100, Mx + ft20Len/100, Mx + ft20Len*1.5/100, Mx + ft40Len/100]\n temp = [[temp for ele in range(11)] for temp in assistLineM_x]\n for ele in temp:\n x += ele\n y = [10+ele*0.5 for ele in range(1, 12, 1)] * 5 # 11 个点\n self.plt.plot(x, y, 'r.')\n # 在图中标明车尾,箱尾,箱头三点的坐标点\n self.plt.text(self.Rtrunk_tail_point - 2, 15, '车尾{}'.format(round(self.Rtrunk_tail_point, 3)),\n fontsize=13, color='orange', fontproperties='SimHei')\n if self.boxPos_R:\n # 有箱时,标注箱尾和箱头的坐标点\n self.plt.text(self.Rcontainer_tail_point - 2, 16, '箱尾{}'.format(\n round(self.Rcontainer_tail_point, 3)),\n fontsize=13, color='orange', fontproperties='SimHei')\n self.plt.text(self.Rcontainer_head_point - 2, 16, '箱头{}'.format(\n round(self.Rcontainer_head_point, 3)),\n fontsize=13, color='orange', fontproperties='SimHei')\n\n self.plt.draw()\n self.plt.pause(0.001)\n else:\n self.plt.close()\n sleep(0.1)\n\n def getCPSControl(self):\n self.cpsCtl[0] += 1\n if self.cpsCtl[0] > 255:\n self.cpsCtl[0] = 0\n sendInfo = b''\n sendInfo += struct.pack('>H', self.cpsCtl[0])\n\n num1 = 0\n num1 = num1 | self.lidarMState\n num1 = num1 | (0 << 1)\n # num1 = num1 | (self.lidarM.getDirtyState() << 1)\n num1 = num1 | (0b1 << 2)\n num1 = num1 | (self.hasMTrunk << 3)\n num1 = num1 | (1 << 8)\n num1 = num1 | (0 << 9)\n num1 = num1 | (0b1 << 10)\n num1 = num1 | (self.hasRTrunk << 11)\n\n sendInfo += struct.pack('>H', num1)\n self.cpsCtl[1] = bin(num1)\n self.cpsCtl[2] = int(self.distanceR)\n self.cpsCtl[3] = int(self.distanceM)\n sendInfo += struct.pack('>h', self.cpsCtl[2])\n sendInfo += struct.pack('>h', self.cpsCtl[3])\n\n if not self.plcState:\n self.cpsCtl[4] = 5\n sendInfo += struct.pack('>H', 5)\n else:\n self.cpsCtl[4] = 0\n sendInfo += struct.pack('>H', 0)\n # print('send : ', sendInfo)\n return sendInfo\n\n def cal_distance_M(self, x, y):\n # 计算山侧的引导距离值\n work_msg = '山侧:'\n if len(x) > 20: # 有集卡出现\n hasTrunk = True\n self.Mtrunk_tail_point, self.Mcontainer_tail_point,\\\n self.Mcontainer_head_point, self.boxPos_M = self.findTailPoint(x, y)\n\n # test todo\n # 如果箱门朝向车尾,则修正箱门到边缘差值引起的误差\n # if self.MhasGate:\n # self.Mcontainer_tail_point -= 0.05\n # work_msg += ' 有箱门 '\n self.Mcontainer_tail_point -= 0.025\n\n if self.workPosition and self.Mtrunk_tail_point > -12:\n # 尺寸: 20英尺; 2: 40英尺; 3: 双20英尺;\n if self.spreaderSize == 2: # 40尺作业情况\n if self.boxPos_M == 0: # 空箱\n work_msg += '40 呎 空板 装箱作业'\n distance = -int(self.Mtrunk_tail_point * 100 + ft40Len * 0.5 + self.trunkOffset - 5.75) \\\n - self.config[\"Mountain 40ft unload offset\"]\n else: # 满箱\n work_msg += '40 呎 满箱 吊箱作业'\n distance = -int(self.Mcontainer_tail_point * 100 + ft40Len * 0.5) + \\\n -self.config[\"Mountain 40ft load offset\"]\n\n distance += self.config['Mountain 40ft offset'] # 吊具中心偏移量\n\n elif self.boxPos_M == 0:\n # 20 尺 空板 前箱\n if self.Mtrunk_tail_point * 100 < -ft20Len:\n work_msg += '20 呎 空板 前箱装箱作业'\n distance = -int(self.Mtrunk_tail_point * 100 + ft20Len * 1.5 + self.trunkOffset - 5.75 +\n gapLen) - self.config[\"Mountain 20ft head empty offset\"]\n else:\n # 后箱位置\n work_msg += '20 呎 空板 后箱装箱作业'\n distance = -int(self.Mtrunk_tail_point * 100 + ft20Len * 0.5 + self.trunkOffset - 5.75) + \\\n -self.config[\"Mountain 20ft back empty offset\"]\n\n distance += self.config['Mountain 20ft offset']\n\n elif self.boxPos_M == 1: # 20 有前箱\n if self.Mtrunk_tail_point * 100 < -ft20Len:\n # if self.container_in_out == 1: # 进箱 抓前箱\n work_msg += '20 呎 有前箱 前箱吊箱作业'\n distance = -int(self.Mcontainer_tail_point * 100 + ft20Len * 0.5) \\\n -self.config['Mountain 20ft forward unload offset']\n\n else:\n work_msg += '20 呎 有前箱 后箱装箱作业'\n distance = (-int(self.Mcontainer_tail_point * 100 - ft20Len * 0.5 - gapLen) -\n (ft20Len*0.5 + self.Mtrunk_tail_point*100) )/2 - \\\n self.config[\"Mountain 20ft forward load offset\"]\n\n distance += self.config['Mountain 20ft offset']\n\n elif self.boxPos_M == 2: # 20尺有后箱\n # if self.container_in_out == 1: # 进箱 抓后箱\n if self.Mtrunk_tail_point * 100 >= -ft20Len:\n work_msg += '20 呎 有后箱 后箱吊箱作业'\n distance = ( (-int(self.Mcontainer_tail_point * 100 + ft20Len * 0.5)) +\n ft20Len * 0.5 - self.Mcontainer_head_point * 100) / 2 + \\\n -self.config[\"Mountain 20ft backward load offset\"]\n else: # 出箱,作业前箱\n work_msg += '20 呎 有后箱 前箱装箱作业'\n distance = -int(self.Mcontainer_tail_point * 100 + ft20Len * 1.5 + gapLen) -\\\n self.config[\"Mountain 20ft backward unload offset\"]\n\n distance += self.config['Mountain 20ft offset']\n\n else:\n # 20 尺满箱,作业前箱\n if self.Mtrunk_tail_point * 100 < -ft20Len:\n work_msg += '20 呎 满箱 前箱吊箱作业'\n distance = (-int(self.Mcontainer_tail_point * 100 + ft20Len * 1.5 + gapLen) +\n ft20Len * 0.5 - self.Mcontainer_head_point * 100) / 2 + \\\n -self.config[\"Mountain 20ft full load head offset\"]\n else:\n # 作业后箱\n work_msg += '20 呎 满箱 后箱吊箱作业'\n distance = -int(self.Mcontainer_tail_point * 100 + ft20Len * 0.5) + \\\n -self.config[\"Mountain 20ft full load back offset\"]\n\n distance += self.config['Mountain 20ft offset']\n else:\n # 不需要引导的情况\n work_msg += \"不需要引导\"\n distance = 1000\n else:\n work_msg += \"集卡未出现\"\n hasTrunk = False\n distance = 1000\n\n return int(distance), work_msg, hasTrunk\n\n def cal_distance_R(self, x, y):\n work_msg = '水侧:'\n if len(x) > 20: # 有集卡出现\n hasTrunk = True\n self.Rtrunk_tail_point, self.Rcontainer_tail_point,\\\n self.Rcontainer_head_point, self.boxPos_R = self.findTailPoint(x, y)\n\n # test todo\n # 如果箱门朝向车尾,则修正箱门到边缘差值引起的误差\n # if self.RhasGate:\n # self.Rcontainer_tail_point -= 0.05\n # work_msg += ' 有箱门 '\n self.Rcontainer_tail_point -= 0.025\n\n if self.workPosition and self.Rtrunk_tail_point > -12:\n # 尺寸: 20英尺; 2: 40英尺; 3: 双20英尺;\n if self.spreaderSize == 2: # 40尺作业情况\n if self.boxPos_R == 0: # 空箱\n work_msg += '40 呎 空板 装箱作业'\n distance = -int(self.Rtrunk_tail_point * 100 + ft40Len * 0.5 + self.trunkOffset - 5.75) \\\n -self.config[\"River 40ft unload offset\"]\n else: # 满箱\n work_msg += '40 呎 满箱 吊箱作业'\n distance = -int(self.Rcontainer_tail_point * 100 + ft40Len * 0.5) + \\\n -self.config[\"River 40ft load offset\"]\n\n distance += self.config['River 40ft offset'] # 吊具中心偏移量\n\n elif self.boxPos_R == 0:\n # 20 尺 空板 前箱\n if self.Rtrunk_tail_point * 100 < -ft20Len:\n work_msg += '20 呎 空板 前箱装箱作业'\n distance = -int(self.Rtrunk_tail_point * 100 + ft20Len * 1.5 + self.trunkOffset - 5.75 +\\\n gapLen) - self.config[\"River 20ft head empty offset\"]\n else:\n # 后箱位置\n work_msg += '20 呎 空板 后箱装箱作业'\n distance = -int(self.Rtrunk_tail_point * 100 + ft20Len * 0.5 + self.trunkOffset - 5.75) + \\\n -self.config[\"River 20ft back empty offset\"]\n\n distance += self.config['River 20ft offset'] # 20呎吊具中心偏移量\n\n elif self.boxPos_R == 1: # 20 有前箱\n if self.Rtrunk_tail_point * 100 < -ft20Len:\n # if self.container_in_out == 1: # 进箱 抓前箱\n work_msg += '20 呎 有前箱 前箱吊箱作业'\n distance = -int(self.Rcontainer_tail_point * 100 + ft20Len * 0.5) \\\n - self.config['River 20ft forward unload offset']\n else: # 出箱 作业后箱\n work_msg += '20 呎 有前箱 后箱装箱作业'\n distance = (-int(self.Rcontainer_tail_point * 100 - ft20Len * 0.5 - gapLen) -\n (ft20Len * 0.5 + self.Rtrunk_tail_point * 100)) / 2 - \\\n self.config[\"River 20ft forward load offset\"]\n\n distance += self.config['River 20ft offset']\n\n elif self.boxPos_R == 2: # 20尺有后箱\n # if self.container_in_out == 1: # 进箱 抓后箱\n if self.Rtrunk_tail_point * 100 >= -ft20Len:\n work_msg += '20 呎 有后箱 后箱吊箱作业'\n distance = ((-int(self.Rcontainer_tail_point * 100 + ft20Len * 0.5 ))+\n ft20Len*0.5 - self.Rcontainer_head_point*100)/2 + \\\n -self.config[\"River 20ft backward load offset\"]\n else: # 出箱,作业前箱\n work_msg += '20 呎 有后箱 前箱装箱作业'\n distance = -int(self.Rcontainer_tail_point * 100 + ft20Len * 1.5 + gapLen)+\\\n -self.config[\"River 20ft backward unload offset\"]\n\n distance += self.config['River 20ft offset']\n\n else:\n # 20 尺满箱,作业前箱\n if self.Rtrunk_tail_point * 100 < -ft20Len:\n work_msg += '20 呎 满箱 前箱吊箱作业'\n distance = (-int(self.Rcontainer_tail_point * 100 + ft20Len * 1.5 + gapLen) +\n ft20Len*0.5-self.Rcontainer_head_point*100)/2 + \\\n -self.config[\"River 20ft full load head offset\"]\n else:\n # 作业后箱\n work_msg += '20 呎 满箱 后箱吊箱作业'\n distance = -int(self.Rcontainer_tail_point * 100 + ft20Len * 0.5) + \\\n -self.config[\"River 20ft full load back offset\"]\n\n distance += self.config['River 20ft offset']\n\n else:\n # 不需要引导的情况\n work_msg += \"不需要引导\"\n distance = 1000\n else:\n work_msg += \"集卡未出现\"\n hasTrunk = False\n distance = 1000\n\n return int(distance), work_msg, hasTrunk\n\n def distance_filter(self):\n self.M_queue.append(self.distanceM)\n self.R_queue.append(self.distanceR)\n\n # 只存储最近三次数据\n if len(self.M_queue) > 3:\n del self.R_queue[0]\n if len(self.M_queue) > 3:\n del self.M_queue[0]\n\n self.distanceR = int(np.mean(self.R_queue))\n self.distanceM = int(np.mean(self.M_queue))\n\n def start(self):\n if self.config['PLCSwitch']:\n self.startThread() # 开启线程,接收PLC发送的数据,并解包\n\n count = 0\n error_count = 0\n time_count = 0\n work_msg_M = ''\n work_msg_R = ''\n\n while True:\n starttime = time()\n self.plcmessage_printcount += 1\n try:\n # print(\"------------counting: %d--------------\" % count)\n if count < self.config[\"scanConfigRotation\"]:\n count += 1\n else:\n count = 0\n self.config = self.loadCfgFile()\n self.refreshAllLidar()\n try:\n self.lidarM.sendHeartBeat()\n except Exception:\n self.lidarMState = False\n else:\n self.lidarMState = True\n\n try:\n self.lidarR.sendHeartBeat()\n except Exception:\n self.lidarRState = False\n else:\n self.lidarRState = True\n\n # getXY 返回的XY值可能不止一套\n x1, y1 = self.lidarM.getXY()\n x2, y2 = self.lidarR.getXY()\n\n self.is_lidar_alive()\n\n plot_x1, plot_y1, Mtrunk_x, Mtrunk_y = self.cutAll(x1, y1)\n plot_x2, plot_y2, Rtrunk_x, Rtrunk_y = self.cutAll(x2, y2)\n\n # test todo\n ################################################################\n if self.config['lockOffset']:\n self.trunkOffset = self.config['lockOffset']\n else:\n self.trunkOffset = 5\n # self.needGuide = True\n # self.spreaderSize = self.config['spreaderSize'] # 1:20 呎,2:40呎\n # self.container_in_out = 2 # 进出箱信号\n # self.workPosition = 1 # 作业位置\n # self.MhasGate = self.config['MhasGate']\n # self.RhasGate = self.config['RhasGate']\n\n #################################################################\n\n if self.needGuide:\n self.distanceM, work_msg_M, self.hasMTrunk = self.cal_distance_M(Mtrunk_x, Mtrunk_y)\n self.distanceR, work_msg_R, self.hasRTrunk = self.cal_distance_R(Rtrunk_x, Rtrunk_y)\n\n # 取最近三次的平均值作为本次的引导数据\n self.distance_filter()\n\n # 根据配置文件的开关选择是否画图显示\n if self.config[\"plotSwitch\"]:\n self.plotMR(plot_x1, plot_y1, plot_x2, plot_y2, work_msg_M, work_msg_R)\n else:\n sleep(0.1)\n self.plt.close()\n\n # log data xy\n if self.plcmessage_printcount > 10 and (abs(self.distanceM) < 200 or abs(self.distanceR) < 200):\n self.dataLog(plot_x1, plot_y1)\n self.dataLog(plot_x2, plot_y2)\n\n # 回传给PLC的信息\n if self.config['PLCSwitch']:\n msg = self.getCPSControl()\n try:\n self.sendPLC(msg)\n except Exception:\n self.disconnected()\n self.plcState = False\n self.plc_reconnect_count += 1\n sleep(0.5)\n else:\n self.plc_reconnect_count = 0\n self.plcState = True\n\n if not self.plcState:\n try:\n self.initPLC()\n except Exception:\n m = \"{}:PLC disconnected {}\".format(\n self.config['deviceID'],traceback.format_exc())\n self.write_log(m)\n print(m)\n self.plcState = False\n self.plc_reconnect_count += 1\n else:\n self.startThread()\n self.plcState = True\n self.plc_reconnect_count = 0\n\n if not self.lidarMState:\n try:\n self.lidarM.disconnect()\n self.lidarM.joinThread()\n self.initLidarM()\n except Exception:\n self.lidarMState = False\n m = \"{}:CPS Mountain Lidar disconnected {}\".format(\n self.config['deviceID'],traceback.format_exc())\n print(m)\n self.write_log(m)\n self.lidarM_reconnect_count += 1\n else:\n self.lidarMState = True\n self.lidarM_reconnect_count = 0\n\n if not self.lidarRState:\n try:\n self.lidarR.disconnect()\n self.lidarR.joinThread()\n self.initLidarR()\n except Exception:\n self.lidarRState = False\n m = \"{}:CPS River lidar disconnected {}\".format(\n self.config['deviceID'], traceback.format_exc()\n )\n self.write_log(m)\n self.lidarR_reconnect_count += 1\n else:\n self.lidarRState = True\n self.lidarR_reconnect_count = 0\n\n # if the reconnecting count is bigger than 15 then exit\n if self.lidarR_reconnect_count > 10 or self.lidarM_reconnect_count > 10 or \\\n self.plc_reconnect_count > 10:\n self.write_log('plc or lidar reconnectint count is more than 15 times and reboot')\n os._exit(0)\n\n # print key message for user\n if self.plcmessage_printcount > 10:\n print(self.plc_msg)\n loop_time = time() - starttime\n msg_all=process_msg.format(\n self.config['deviceID'], work_msg_M +\" / \" + work_msg_R,\n round(self.Mtrunk_tail_point,3),round(self.Mcontainer_tail_point,3),\n round(self.Mcontainer_head_point,3), self.distanceM,\n round(self.Rtrunk_tail_point,3), round(self.Rcontainer_tail_point,3),\n round(self.Rcontainer_head_point, 3), self.distanceR, round(loop_time, 3))\n print(msg_all)\n # 出现连续15次以上的循环超时,则重启程序\n if loop_time > 2:\n time_count += 1\n print('loop time over flow:{}'.format(loop_time))\n self.write_log('loop time over flow:{}'.format(loop_time))\n else:\n time_count = 0\n if time_count > 15:\n self.write_log('loop time count is more than 15 times . exit now')\n os._exit(0)\n self.plcmessage_printcount = 0\n\n except KeyboardInterrupt:\n os._exit(0)\n except Exception as e:\n error_count += 1\n self.write_log(\"catched an exception \" + traceback.format_exc())\n traceback.print_exc()\n sleep(0.3)\n finally:\n if error_count > 10:\n self.write_log('error count more than 15 times then exit and reboot')\n os._exit(0)\n # 喂狗\n FD.feed()\n\n\nif __name__ == '__main__':\n try:\n obj = TrunkPosition()\n obj.start()\n except Exception as e:\n traceback.format_exc()\n sleep(1)\n # with open('cpsError.txt','a') as filewriter:\n # timeStamp = strftime(\"%Y-%m-%d %H:%M:%S\", localtime())\n # filewriter.write(timeStamp + ' \\n'+ traceback.format_exc()+'\\n\\n')\n os._exit(0)\n\n\n\n\n\n\n","sub_path":"CPSCode/double/cps_gy.py","file_name":"cps_gy.py","file_ext":"py","file_size_in_byte":40394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"263167791","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPlots and compares two Geojson files: before and after running simplify.py.\n\n Typical usage:\n python3 plotter.py --original_path original-data/geoId-01.geojson\n --simplified_path simplified-data/geoId-01-simple.geojson\n\"\"\"\n\nfrom absl import flags\nfrom absl import app\nimport geopandas as gpd\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('original_path',\n default=None,\n help='Path to original geojson to be compared.')\nflags.DEFINE_string('simplified_path',\n default=None,\n help='Path to simplified geojson to be compared.')\nflags.mark_flag_as_required('original_path')\nflags.mark_flag_as_required('simplified_path')\n\n\ndef main(_):\n original = gpd.read_file(FLAGS.original_path)\n simple = gpd.read_file(FLAGS.simplified_path)\n\n _, (ax1, ax2) = plt.subplots(ncols=2, sharex=True, sharey=True)\n f1 = original.plot(ax=ax1)\n f2 = simple.plot(ax=ax2)\n f1.set_title('Original.')\n f2.set_title('Simplified.')\n\n plt.show()\n\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"tools/geojson_simplifier/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"75356457","text":"class Solution:\n\t# @param head, a ListNode\n\t# @return a ListNode\n\tdef deleteDuplicates(self, head):\n\t\tif head is None or head.next is None:\n\t\t\treturn head\n\t\tdummy = ListNode(0)\n\t\ttail = dummy\n\t\twhile head is not None and head.next is not None:\n\t\t\tif head.val !=head.next.val:\n\t\t\t\ttail.next=head\n\t\t\t\ttail=head\n\t\t\t\thead=head.next\n\t\t\t\ttail.next=None\n\t\t\telse:\n\t\t\t\thead=remove(head)\n\t\tif head is not None:\n\t\t\ttail.next=head\n\t\treturn dummy.next\n\ndef remove(head):\n\tif head is None and head.next is not None:\n\t\treturn head\n\n\tval = head.val\n\twhile head is not None:\n\t\tif head.val!=val:\n\t\t\treturn head\n\t\thead=head.next\n\treturn head\n","sub_path":"code/RemoveDuplicatesfromSortedListII.py","file_name":"RemoveDuplicatesfromSortedListII.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"84339803","text":"##############################################################################\n#\n# Copyright (c) 2006-2007 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Grok interfaces\n\"\"\"\nfrom zope import interface\nfrom zope.interface.interfaces import IInterface\nfrom zope.component.interfaces import IObjectEvent\nfrom zope.publisher.interfaces.http import IHTTPRequest\nfrom zope.container.interfaces import IContainer as IContainerBase\n\n# Expose interfaces from grokcore.* packages as well:\nimport grokcore.annotation.interfaces\nimport grokcore.component.interfaces\nimport grokcore.formlib.interfaces\nimport grokcore.json.interfaces\nimport grokcore.security.interfaces\nimport grokcore.site.interfaces\nimport grokcore.view.interfaces\nimport grokcore.viewlet.interfaces\n\nfrom grokcore.component.interfaces import IContext\nfrom grokcore.component.interfaces import IGrokErrors\n\n\nclass IGrokBaseClasses(grokcore.annotation.interfaces.IBaseClasses,\n grokcore.component.interfaces.IBaseClasses,\n grokcore.security.interfaces.IBaseClasses,\n grokcore.site.interfaces.IBaseClasses,\n grokcore.view.interfaces.IBaseClasses,\n grokcore.json.interfaces.IBaseClasses):\n Model = interface.Attribute(\"Base class for persistent content objects \"\n \"(models).\")\n Container = interface.Attribute(\"Base class for containers.\")\n OrderedContainer = interface.Attribute(\"Base class for ordered containers.\")\n Application = interface.Attribute(\"Base class for applications.\")\n XMLRPC = interface.Attribute(\"Base class for XML-RPC methods.\")\n REST = interface.Attribute(\"Base class for REST views.\")\n Traverser = interface.Attribute(\"Base class for custom traversers.\")\n Indexes = interface.Attribute(\"Base class for catalog index definitions.\")\n Role = interface.Attribute(\"Base class for roles.\")\n\n\nclass IGrokDirectives(grokcore.component.interfaces.IDirectives,\n grokcore.security.interfaces.IDirectives,\n grokcore.site.interfaces.IDirectives,\n grokcore.view.interfaces.IDirectives):\n\n def permissions(permissions):\n \"\"\"Specify the permissions that comprise a role.\n \"\"\"\n\n def site(class_or_interface):\n \"\"\"Specifies the site that an indexes definition is for.\n\n It can only be used inside grok.Indexes subclasses.\n \"\"\"\n\n\nclass IGrokEvents(interface.Interface):\n\n IObjectCreatedEvent = interface.Attribute(\"\")\n\n ObjectCreatedEvent = interface.Attribute(\"\")\n\n IObjectModifiedEvent = interface.Attribute(\"\")\n\n ObjectModifiedEvent = interface.Attribute(\"\")\n\n IObjectCopiedEvent = interface.Attribute(\"\")\n\n ObjectCopiedEvent = interface.Attribute(\"\")\n\n IObjectAddedEvent = interface.Attribute(\"\")\n\n ObjectAddedEvent = interface.Attribute(\"\")\n\n IObjectMovedEvent = interface.Attribute(\"\")\n\n ObjectMovedEvent = interface.Attribute(\"\")\n\n IObjectRemovedEvent = interface.Attribute(\"\")\n\n ObjectRemovedEvent = interface.Attribute(\"\")\n\n IContainerModifiedEvent = interface.Attribute(\"\")\n\n ContainerModifiedEvent = interface.Attribute(\"\")\n\n IBeforeTraverseEvent = interface.Attribute(\"\")\n\n IApplicationInitializedEvent = interface.Attribute(\"\")\n\n ApplicationInitializedEvent = interface.Attribute(\"\")\n\n\nclass IGrokAPI(grokcore.formlib.interfaces.IGrokcoreFormlibAPI,\n grokcore.security.interfaces.IGrokcoreSecurityAPI,\n grokcore.site.interfaces.IGrokcoreSiteAPI,\n grokcore.view.interfaces.IGrokcoreViewAPI,\n grokcore.viewlet.interfaces.IGrokcoreViewletAPI,\n IGrokBaseClasses, IGrokDirectives,\n IGrokEvents, IGrokErrors):\n\n # BBB this is deprecated\n def grok(dotted_name):\n \"\"\"Grok a module or package specified by ``dotted_name``.\n\n NOTE: This function will be removed from the public Grok\n public API. For tests and interpreter sessions, use\n grok.testing.grok().\n \"\"\"\n\n # BBB this is deprecated\n def grok_component(name, component, context=None, module_info=None,\n templates=None):\n \"\"\"Grok an arbitrary object. Can be useful during testing.\n\n name - the name of the component (class name, or global instance name\n as it would appear in a module).\n component - the object (class, etc) to grok.\n context - the context object (optional).\n module_info - the module being grokked (optional).\n templates - the templates registry (optional).\n\n Note that context, module_info and templates might be required\n for some grokkers which rely on them.\n\n NOTE: This function will be removed from the public Grok\n public API. For tests and interpreter sessions, use\n grok.testing.grok_component().\n \"\"\"\n\n def notify(event):\n \"\"\"Send ``event`` to event subscribers.\"\"\"\n\n def getSite():\n \"\"\"Get the current site.\"\"\"\n\n def getApplication():\n \"\"\"Return the nearest enclosing `grok.Application`.\"\"\"\n\n IRESTSkinType = interface.Attribute('The REST skin type')\n\n\nclass IGrokView(grokcore.view.interfaces.IGrokView):\n \"\"\"Grok views all provide this interface.\"\"\"\n\n def application_url(name=None):\n \"\"\"Return the URL of the closest application object in the\n hierarchy or the URL of a named object (``name`` parameter)\n relative to the closest application object.\n \"\"\"\n\n def flash(message, type='message'):\n \"\"\"Send a short message to the user.\"\"\"\n\n\nclass IGrokForm(grokcore.formlib.interfaces.IGrokForm, IGrokView):\n \"\"\"All Grok forms provides this interface.\"\"\"\n\n\nclass IREST(interface.Interface):\n context = interface.Attribute(\"Object that the REST handler presents.\")\n\n request = interface.Attribute(\"Request that REST handler was looked\"\n \"up with.\")\n\n body = interface.Attribute(\n \"\"\"The text of the request body.\"\"\")\n\n\nclass IApplication(interface.Interface):\n \"\"\"Marker-interface for grok application factories.\n\n Used to register applications as utilities to look them up and\n provide a list of grokked applications.\n \"\"\"\n\n\nclass IIndexDefinition(interface.Interface):\n \"\"\"Define an index for grok.Indexes.\n \"\"\"\n\n def setup(catalog, name, context):\n \"\"\"Set up index called name in given catalog.\n\n Use name for index name and attribute to index. Set up\n index for interface or class context.\n \"\"\"\n\n\nclass IRESTLayer(IHTTPRequest):\n \"\"\"REST-specific Request functionality.\n\n Base Interfaces for defining REST-layers.\n \"\"\"\n\n\nclass IRESTSkinType(IInterface):\n \"\"\"Skin type for REST requests.\n \"\"\"\n\n\nclass IContainer(IContext, IContainerBase):\n \"\"\"A Grok container.\n \"\"\"\n\n\nclass IApplicationInitializedEvent(IObjectEvent):\n \"\"\"A Grok Application has been created with success and is now ready\n to be used.\n\n This event can be used to trigger the creation of contents or other tasks\n that require the application to be fully operational : utilities installed\n and indexes created in the catalog.\"\"\"\n","sub_path":"grok/tags/1.3/src/grok/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":7716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"553515615","text":"# coding=utf-8\n\nif __name__ == '__main__':\n S = input()\n K = int(input())\n\n for elm in S:\n if elm != '1':\n ctr = S.index(elm)\n break\n else:\n ctr = -1\n\n if ctr == -1:\n print(1)\n else:\n if ctr+1 > K:\n print(1)\n else:\n print(S[ctr])\n","sub_path":"20180818/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"295021348","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport os\n\n\ndef getWeeds(net, frame, confi_thresh, NMSthresh, labelsPath, show=False):\n\n \"\"\"\n This function takes a network, frame, confidance threshold, NMS threshold, labels path and show permsion\n and return the high confidance and low confidance weeds in the frame\n \n \"\"\"\n\n LABELS = open(labelsPath).read().strip().split(\"\\n\")\n\n np.random.seed(42)\n COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),dtype=\"uint8\")\n \n #determine only the *output* layer names that we need from YOLO\n ln = net.getLayerNames()\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n (H, W) = frame.shape[:2]\n\n #makeing blob image for the net input\n blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (512, 512),swapRB=True, crop=False)\n\n #set the net input\n net.setInput(blob)\n\n #forward the input and get the output\n outlayers = net.forward(ln)\n\n #true boxes with high confidance\n boxes = []\n confidances = []\n classIDs = []\n\n #true boxes with high confidance\n unkown_boxes = []\n unkown_confidances = []\n unkown_classIDs = []\n\n weeds = []\n true_weeds = []\n\n #iterate over the output layers\n for out in outlayers:\n\n #itrate over the output grid vector\n for grid in out:\n \n #get the confidance of the two classes 0:Crops and 1:Weeds\n #The Yolov3 grid vector contians |x|y|w|h|Pc|class1 Predection|class2 Predection|....etc \n #Taking the last two items on the vector gives us the predection of out classes\n scores = grid[5:]\n\n #get the indicaes of the max predection\n predicted_class = np.argmax(scores)\n\n #get the predection\n predect = scores[predicted_class]\n\n #predect if there is an object\n object_pred = grid[5]\n\n #To just predect weeds\n if predicted_class == 1:\n\n #rescale the x,y,w and h to the image scale\n box = grid[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype(\"int\")\n \n #use the center (x, y)-coordinates to derive the top and\n #and left corner of the bounding box\n x = int(centerX - width / 2)\n y = int(centerY - height / 2)\n\n #update our list of bounding box coordinates, confidences,\n #and class IDs\n boxes.append([x, y, int(width), int(height),centerX ,centerY])\n confidances.append(float(predect))\n classIDs.append(predicted_class)\n\n \n\n #apply non-maxima suppression to suppress weak, overlapping bounding\n #boxes \n true_boxes = cv2.dnn.NMSBoxes(boxes, confidances, 0, NMSthresh) #TODO: Play with confidance parmater to have the best results\n \n\n if show:\n if len(true_boxes) > 0:\n #loop over the indexes we are keeping\n for i in true_boxes.flatten():\n\n #extract the bounding box coordinates\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n (centerX , centerY) = (boxes[i][4], boxes[i][5])\n cv2.circle(frame, (centerX,centerY) , 5, (0, 0, 255) , 4)\n\n if confidances[i] > confi_thresh:\n true_weeds.append([x, y, int(width), int(height),centerX ,centerY,confidances[i]])\n else:\n weeds.append([x, y, int(width), int(height),centerX ,centerY,confidances[i]])\n\n #draw a bounding box rectangle and label on the image\n color = [int(c) for c in COLORS[classIDs[i]]]\n cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n text = \"{}: {:.4f}\".format(LABELS[classIDs[i]], confidances[i])\n cv2.putText(frame, text, (x + 5, y + 20), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)\n\n # if len(unkown_objects) > 0:\n # #loop over the indexes we are keeping\n # for i in unkown_objects.flatten():\n # #extract the bounding box coordinates\n # (x, y) = (unkown_boxes[i][0], unkown_boxes[i][1])\n # (w, h) = (unkown_boxes[i][2], unkown_boxes[i][3])\n # (centerX , centerY) = (unkown_boxes[i][4], unkown_boxes[i][5])\n # weeds.append([x, y, int(width), int(height),centerX ,centerY,unkown_confidances[i]])\n # cv2.circle(frame, (centerX,centerY) , 5, (255, 0, 0) , 4)\n # #draw a bounding box rectangle and label on the image\n # color = [int(c) for c in COLORS[unkown_classIDs[i]]]\n # cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n # text = \"{}: {:.4f}\".format(LABELS[unkown_classIDs[i]], unkown_confidances[i])\n # cv2.putText(frame, text, (x + 5, y + 20), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)\n\n\n det = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n plt.figure(figsize=(12,8))\n plt.imshow(det)\n plt.show()\n\n return true_weeds, weeds\n\n\nif __name__ == \"__main__\":\n #load the class labels our YOLO model was trained on\n labelsPath = 'C:\\\\Users\\\\MohammedSGF\\\\Desktop\\\\Senior Project\\\\WeedDetec\\\\Crop_and_weed_detection\\\\performing_detection\\\\data\\\\names\\\\obj.names'\n\n\n\n #load weights and cfg\n weightsPath = 'C:\\\\Users\\\\MohammedSGF\\\\Desktop\\\\Senior Project\\\\WeedDetec\\\\Crop_and_weed_detection\\\\performing_detection\\\\data\\\\weights\\\\crop_weed_detection.weights'\n configPath = 'C:\\\\Users\\\\MohammedSGF\\\\Desktop\\\\Senior Project\\\\WeedDetec\\\\Crop_and_weed_detection\\\\performing_detection\\\\data\\\\cfg\\\\crop_weed.cfg'\n\n\n print(\"[INFO] loading YOLO from disk...\")\n net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\n\n\n frame = cv2.imread('C:\\\\Users\\\\MohammedSGF\\\\Desktop\\\\Senior Project\\\\WeedDetec\\\\Crop_and_weed_detection\\\\performing_detection\\\\data\\\\images\\\\test_4.jpg')\n \n #parameters\n confi_thresh = 0.3\n NMSthresh = 0.5\n\n true_weeds, weeds = getWeeds(net, frame, confi_thresh, NMSthresh, labelsPath, show=True)\n print(true_weeds)\n print(weeds)","sub_path":"performing_detection/opencv/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":6280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"232338107","text":"import aiomas\nimport random\nimport irc.client\n\nimport lrrbot.decorators\nfrom lrrbot import storage\nfrom lrrbot.main import bot, log\n\n@bot.command(\"explain (.*?)\")\n@lrrbot.decorators.throttle(30, params=[4], count=2)\ndef explain_response(lrrbot, conn, event, respond_to, command):\n\t\"\"\"\n\tCommand: !explain TOPIC\n\tMod-Only: true\n\tSection: text\n\n\tProvide an explanation for a given topic.\n\t--command\n\tCommand: !explain show\n\tMod-Only: true\n\tSection: text\n\n\tProvide an explanation for the currently-live show.\n\t\"\"\"\n\tcommand = \" \".join(command.split())\n\tif command.lower() == \"show\":\n\t\tcommand = lrrbot.show_override or lrrbot.show\n\t\tif command is None and lrrbot.is_mod(event):\n\t\t\tconn.privmsg(respond_to, \"Current show not set.\")\n\tresponse_data = storage.data[\"explanations\"].get(command.lower())\n\tif not response_data:\n\t\treturn\n\tif response_data[\"access\"] == \"sub\":\n\t\tif not lrrbot.is_sub(event) and not lrrbot.is_mod(event):\n\t\t\tlog.info(\"Refusing explain %s due to inadequate access\" % command)\n\t\t\tsource = irc.client.NickMask(event.source)\n\t\t\tconn.privmsg(source.nick, \"That is a sub-only command.\")\n\t\t\treturn\n\tif response_data[\"access\"] == \"mod\":\n\t\tif not lrrbot.is_mod(event):\n\t\t\tlog.info(\"Refusing explain %s due to inadequate access\" % command)\n\t\t\tsource = irc.client.NickMask(event.source)\n\t\t\tconn.privmsg(source.nick, \"That is a mod-only command.\")\n\t\t\treturn\n\tresponse = response_data['response']\n\tif isinstance(response, (tuple, list)):\n\t\tresponse = random.choice(response)\n\tconn.privmsg(respond_to, response)\n\n@aiomas.expose\ndef modify_explanations(commands):\n\tlog.info(\"Setting explanations to %r\" % commands)\n\tstorage.data[\"explanations\"] = {k.lower(): v for k, v in commands.items()}\n\tstorage.save()\n\nbot.rpc_server.explain = aiomas.rpc.ServiceDict({\n\t'modify_explanations': modify_explanations,\n})\n","sub_path":"lrrbot/commands/explain.py","file_name":"explain.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"611699853","text":"debug = False\ntest = False\n\nserver = ('127.0.0.1', 7230,)\n\nsession_key = b'x44M9Kp2lnVn3BiFTwI3HkxGHaVFaumD'\n\ndatabase = dict(\n auth=False,\n login='admin',\n passw='+1234567',\n host=['127.0.0.1:27017'],\n name='cck.npark',\n)\n\nredis = dict(\n server=('127.0.0.1', 6379,),\n db=0,\n)\n\nmemcache = dict(\n server=('127.0.0.1', 11211,),\n expire=60 * 60 * 24,\n prefix='cck.npark',\n)\n\nminify = dict(\n plain=True,\n strict=True,\n code=True,\n)\n\nstatic = '/assets'\nmedia = '/media'\n\nmail = dict(\n address=('Н.Парк', 'default@gkcck.ru',),\n recipients=('nekrasovka@gkcck.ru', 'diaksid@mail.ru',),\n server=('smtp.yandex.ru', 465,),\n user=('default@gkcck.ru', '+1234567',),\n ssl=True,\n prefix='Н.ПАРК',\n)\n\nyandex = {'metrika': 35324435,\n 'search': {'key': '',\n 'searchid': 0,\n 'login': ''}}\n","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"430250265","text":"'''\nMorse code, as we are all aware, consists of dots and dashes. Lets define a \"Morse code sequence\"\nas simply a series of dots and dashes (and nothing else). So \".--.-.--\" would be a morse code\nsequence, for instance.\n\nDashes obviously take longer to transmit, that's what makes them dashes. Lets say that a dot takes\n1 unit of time to transmit, and a dash takes 2 units of time. Then we can say that the \"size\" of a\ncertain morse code sequence is the sum of the time it takes to transmit the dots and dashes. So,\nfor instance \"..-.\" would have a size of 5 (since there's three dots taking three units of time and\none dash taking two units of time, for a total of 5). The sequence \"-.-\" would also have a size of 5.\n\nIn fact, if you list all the the possible Morse code sequences of size 5, you get:\n\n..... ...- ..-. .-.. -... .-- -.- --.\n\nA total of 8 different sequences.\n\nYour task is to write a function called Morse(X) which generates all morse code sequences of size X\nand returns them as an array of strings (so Morse(5) should return the 8 strings I just mentioned,\nin some order).\n\nUse your function to generate and print out all sequences of size 10.\n\nBonus: Try and write your code so that it can generate Morse(35) (or even Morse(36) or higher, but that\ntakes a significant amount of memory) in a \"reasonable\" amount of time. \"Reasonable\" obviously depend on\nwhat computer and programming language you are using, but a good rule of thumb should be that it should\nfinish in less than a minute.\nBeing a morse enthusiast I have dealt with the code itself, numbers and letters.\n'''\n\nimport re\n# read in the morse letters and numbers\nmorse_dict = {}\n# create a morse dictionary\nwith open('morse.txt') as f:\n while True:\n line = f.readline()\n if not line:\n break\n if line[0].isalnum():\n a = line[0]\n b = re.findall('\\w\\s(\\S+)', line)\n morse_dict[a] = b\ndef morse(x):\n # convert morse to units of time\n morse_count = {} # morse character and unit count\n #N = 2 # value of sequence required\n sequence = [] # number of sequences found\n code_count = 0 # unit count of character\n for k,v in morse_dict.items():\n for char in v[0]:\n if char == '.':\n code_count += 1\n else:\n code_count += 2\n if code_count == x:\n sequence.append(v)\n morse_count[k] = code_count\n code_count = 0\n return sequence\n\nif __name__ == '__main__':\n N = 10\n ans = morse(N)\n print(ans)","sub_path":"albums/3/challenge77_easy/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"286431826","text":"\"\"\"\n2.\tПосчитать четные и нечетные цифры введенного натурального числа.\nНапример, если введено число 34560, то у него 3 четные цифры\n(4, 6 и 0) и 2 нечетные (3 и 5).\n\nПодсказка:\nДля извлечения цифр числа используйте арифм. операции\n\nПример:\nВведите натуральное число: 44\nВ числе 44 всего 2 цифр, из которых 2 чётных и 0 нечётных\n\nЗДЕСЬ ДОЛЖНА БЫТЬ РЕАЛИЗАЦИЯ ЧЕРЕЗ ЦИКЛ\n\"\"\"\n\nwhile True:\n try:\n num = int(input('Введите натуральное число (для выхода введите 0): '))\n except Exception:\n print('Некорректный ввод')\n continue\n if num == 0:\n print('Программа завершена')\n break\n even = 0\n odd = 0\n for i in str(abs(num)):\n if int(i) % 2 == 0:\n even += 1\n else:\n odd += 1\n print(f'В числе {num} всего {even + odd} цифр, из которых {even} четных и {odd} нечетных')\n","sub_path":"Урок 2. Практическое задание/task_2/task_2_1.py","file_name":"task_2_1.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"83511598","text":"from SPARQLWrapper import SPARQLWrapper, JSON\nfrom wikidata_models import WikidataEntity\n\nwikidata_label_query_cache = {} \n\ndef query_wikidata_for_label_and_description(items, sparql_endpoint):\n items = ' wd:'.join(items)\n items = \"wd:\" + items\n\n query = \"\"\"SELECT ?qnode ?qnodeLabel ?qnodeDescription WHERE \n {{\n VALUES ?qnode {{{items}}}\n SERVICE wikibase:label {{ bd:serviceParam wikibase:language \"[AUTO_LANGUAGE],en\". }}\n }}\n \"\"\".format(items=items)\n sparql = SPARQLWrapper(sparql_endpoint, agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36')\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n try:\n results = sparql.query().convert()\n except Exception as e:\n raise e\n response = dict()\n for i in range(len(results[\"results\"][\"bindings\"])):\n try:\n qnode = results[\"results\"][\"bindings\"][i][\"qnode\"][\"value\"].split(\n \"/\")[-1]\n label = results[\"results\"][\"bindings\"][i][\"qnodeLabel\"][\"value\"]\n desc = results[\"results\"][\"bindings\"][i][\"qnodeDescription\"][\"value\"]\n response[qnode] = {'label': label, 'description': desc}\n except (IndexError, KeyError):\n pass\n return response\n\n\ndef get_labels_and_descriptions(items, sparql_endpoint):\n response = dict()\n missing_items = {}\n for item in items:\n wp = WikidataEntity.query.filter_by(wd_id=item).first()\n if wp:\n label = desc = \"\"\n if wp.label:\n label = wp.label\n if wp.description:\n desc = wp.description\n response[item] = dict(label=label, description=desc)\n else:\n if item not in wikidata_label_query_cache:\n missing_items[item]=True\n else:\n if item not in wikidata_label_query_cache:\n missing_items[item]=True\n try:\n if missing_items:\n wikidata_label_query_cache.update(missing_items)\n missing_items=list(missing_items.keys())\n additional_items = query_wikidata_for_label_and_description(\n missing_items, sparql_endpoint)\n response.update(additional_items)\n try:\n for item in additional_items:\n WikidataEntity.add_or_update(item, do_session_commit=False, **additional_items[item])\n except Exception as e:\n print(e)\n WikidataEntity.do_commit()\n\n except: # eg 502 bad gateway error\n pass\n return response\n\n\ndef get_qnode_url(id):\n url=\"\"\n first_letter=str(id).upper()[0]\n try:\n num=int(id[1:])\n if first_letter==\"P\" and num<10000:\n url=\"https://www.wikidata.org/wiki/Property:\"+id\n if first_letter==\"Q\" and num<1000000000:\n url=\"https://www.wikidata.org/wiki/\"+id\n except: #conversion to int failed, is not Pnum or Qnum\n pass\n return url\n\nclass QNode:\n def __init__(self, id, value, context=\"\", label=\"\", description=\"\"):\n self.id = id\n self.value = value\n self.context = context\n self.label = label\n self.description = description\n self.url=get_qnode_url(self.id)\n\n def update(self, label=\"\", description=\"\", **kwargs):\n self.label=label\n self.description=description\n ","sub_path":"backend/wikidata_utils.py","file_name":"wikidata_utils.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"30817571","text":"\"\"\"\nContains all the widget representations used in the designer and specifies all the styles that can be applied to them\n\"\"\"\nimport os\n# ======================================================================= #\n# Copyright (C) 2019 Hoverset Group. #\n# ======================================================================= #\nimport re\nimport sys\nfrom tkinter import IntVar, ttk, filedialog, StringVar\n\nimport studio.feature.variable_manager as var_manager\nfrom hoverset.ui.icons import get_icon_image\nfrom hoverset.ui.panels import FontInput, ColorPicker\nfrom hoverset.ui.pickers import ColorDialog\nfrom hoverset.ui.widgets import (CompoundList, Entry, SpinBox, Spinner, Frame, Application, set_ttk_style,\n Label, ToggleButton, Button)\nfrom hoverset.util.color import to_hex\nfrom hoverset.util.validators import numeric_limit\nfrom studio.lib.properties import all_supported_cursors, BUILTIN_BITMAPS\n\n\nclass Editor(Frame):\n\n def __init__(self, master, style_def=None):\n super().__init__(master)\n self.config(**self.style.dark, width=150, height=25)\n self.pack_propagate(False)\n self.grid_propagate(0)\n self._on_change = None\n\n def on_change(self, func, *args, **kwargs):\n self._on_change = lambda val: func(val, *args, **kwargs)\n\n def set(self, value):\n raise NotImplementedError()\n\n def get(self):\n raise NotImplementedError()\n\n\nclass Choice(Editor):\n class ChoiceItem(CompoundList.BaseItem):\n\n def render(self):\n if not self.value:\n Label(self, **self.style.dark_text, text=\"select\", anchor=\"w\").pack(fill=\"both\")\n return\n Label(self, **self.style.dark_text, text=self._value, anchor=\"w\").pack(fill=\"both\")\n\n def __init__(self, master, style_def=None):\n super().__init__(master, style_def)\n if style_def is None:\n style_def = {}\n self.style_def = style_def\n self._spinner = Spinner(self, **self.style.dark_input)\n self._spinner.pack(fill=\"x\")\n self._spinner.on_change(self.spinner_change)\n self.set_up()\n values = style_def.get(\"options\", ())\n if values:\n if not style_def.get('allow_empty', True):\n self._spinner.set_values(('', *values))\n else:\n self._spinner.set_values(values)\n\n def set_up(self):\n self._spinner.set_item_class(Choice.ChoiceItem)\n\n def spinner_change(self, value):\n if self._on_change is not None:\n self._on_change(value)\n\n def set(self, value):\n # Convert to string as values of type _tkinter.Tcl_Obj are common in ttk and may cause unpredictable behaviour\n self._spinner.set(str(value))\n\n def get(self):\n return self._spinner.get()\n\n\nclass Boolean(Editor):\n def __init__(self, master, style_def=None):\n super().__init__(master, style_def)\n self.config(**self.style.dark, **self.style.dark_highlight_active)\n self._var = IntVar()\n self._check = ttk.Checkbutton(self, command=self.check_change, text='',\n variable=self._var)\n set_ttk_style(self._check, **self.style.dark_checkbutton)\n self._check.pack(fill=\"x\")\n\n def check_change(self):\n if self._var.get():\n self._check.config(text=\"True\")\n else:\n self._check.config(text=\"False\")\n if self._on_change is not None:\n self._on_change(self._var.get())\n\n def set(self, value):\n if value:\n self._var.set(1)\n self._check.config(text=\"True\")\n else:\n self._check.config(text=\"False\")\n self._var.set(0)\n\n def get(self):\n return bool(self._var.get())\n\n\nclass Relief(Choice):\n class ReliefItem(Choice.ChoiceItem):\n\n def render(self):\n if not self.value:\n Label(self, width=2, **self.style.dark_text, bd=2).pack(side=\"left\")\n Label(self, text=\"select\", **self.style.dark_text).pack(side=\"left\", padx=4)\n else:\n Label(self, relief=self.value, width=2, **self.style.dark_text, bd=2).pack(side=\"left\")\n Label(self, text=self.value, **self.style.dark_text).pack(side=\"left\", padx=4)\n\n def set_up(self):\n self._spinner.set_item_class(Relief.ReliefItem)\n self._spinner.set_values((\n '', \"flat\", \"raised\", \"sunken\", \"groove\", \"ridge\"\n ))\n\n\nclass Cursor(Choice):\n class CursorItem(Choice.ChoiceItem):\n\n def render(self):\n if not self.value:\n super().render()\n return\n Label(self, **self.style.dark_text, cursor=self.value,\n text=self.value, anchor='w').pack(fill=\"both\")\n\n def set_up(self):\n self._spinner.set_item_class(Cursor.CursorItem)\n self._spinner.set_values(('',) + all_supported_cursors())\n\n\nclass Bitmap(Choice):\n class BitmapItem(Choice.ChoiceItem):\n\n def render(self):\n if not self.value:\n Label(self, **self.style.dark_text, width=2).pack(side=\"left\")\n Label(self, **self.style.dark_text, text=\"select\").pack(side=\"left\")\n else:\n Label(self, **self.style.dark_text, bitmap=self.value).pack(side=\"left\")\n Label(self, **self.style.dark_text, text=self.value).pack(side=\"left\")\n\n def set_up(self):\n self._spinner.set_item_class(Bitmap.BitmapItem)\n self._spinner.set_values(('',) + BUILTIN_BITMAPS)\n\n\nclass Layout(Choice):\n class LayoutItem(Choice.ChoiceItem):\n\n def render(self):\n Label(self, **self.style.dark_text, anchor=\"w\", image=get_icon_image(self.value.icon, 14, 14),\n text=\" \" + self.value.name, compound='left').pack(fill=\"x\")\n\n def set_up(self):\n self._spinner.set_item_class(Layout.LayoutItem)\n self._spinner.set_values(self.style_def.get(\"options\"))\n\n def set(self, value):\n # Override default conversion of value to string by Choice class\n self._spinner.set(value)\n\n\nclass Color(Editor):\n\n def __init__(self, master, style_def=None):\n super().__init__(master, style_def)\n self.config(**self.style.dark_highlight_active)\n self._entry = Entry(self, **self.style.dark_input)\n self._color_button = Label(self, relief='groove', bd=1)\n self._color_button.bind('', self._chooser)\n self._color_button.place(x=2, y=2, width=20, height=20)\n self._picker = ColorPicker(self, **self.style.dark_button)\n self._picker.place(relx=1, x=-22, y=0, width=20, height=20)\n self._picker.on_pick(self.set)\n self._entry.place(x=22, y=0, relheight=1, relwidth=1, width=-46)\n self._entry.on_change(self._change)\n\n def _change(self, value=None):\n value = self._entry.get() if value is None else value\n val = self._parse_color(value)\n if val:\n self._color_button.config(bg=value)\n if self._on_change:\n self._on_change(value)\n\n def _parse_color(self, value):\n try:\n val = self.winfo_rgb(value)\n except Exception:\n return \"\"\n val = tuple(map(lambda x: round((x/65535)*255), val))\n return to_hex(val)\n\n def get(self):\n return self._entry.get()\n\n def set(self, value):\n self.adjust(value)\n\n def on_change(self, func, *args, **kwargs):\n super().on_change(func, *args, **kwargs)\n\n def adjust(self, value):\n self._entry.update_idletasks()\n self._entry.set(value)\n try:\n self._color_button.config(bg=value)\n except Exception:\n self._color_button.config(bg=\"#000000\")\n\n def _chooser(self, *_):\n dialog = ColorDialog(self.window)\n dialog.update_idletasks()\n self.window.update_idletasks()\n dialog.post(self._color_button, side=\"auto\", padding=4)\n if self.get().startswith(\"#\"):\n dialog.set(self.get())\n elif self.get():\n dialog.set(self._parse_color(self.get()))\n dialog.on_change(self.adjust)\n\n\nclass TextMixin:\n\n def _change(self, *_):\n if self._on_change:\n self._on_change(self.get())\n\n def get(self):\n return self._entry.get()\n\n def set(self, value):\n self._entry.set(value)\n\n\nclass Text(TextMixin, Editor):\n\n def __init__(self, master, style_def=None):\n super().__init__(master, style_def)\n if style_def is None:\n style_def = {}\n self.config(**self.style.dark_highlight_active)\n self._entry = Entry(self, **self.style.dark_input)\n self._entry.pack(fill=\"x\")\n self._entry.on_entry(self._change)\n if style_def.get(\"readonly\", False):\n self._entry.config(state='disabled')\n\n\nclass Number(TextMixin, Editor):\n\n def __init__(self, master, style_def=None):\n super().__init__(master, style_def)\n self.config(**self.style.dark_highlight_active)\n self._entry = SpinBox(self, from_=-9999, to=9999, **self.style.spinbox)\n self._entry.config(**self.style.no_highlight)\n self._entry.set_validator(numeric_limit, -9999, 9999)\n self._entry.pack(fill=\"x\")\n self._entry.on_change(self._change)\n\n\nclass Duration(TextMixin, Editor):\n UNITS = ('ns', 'ms', 'sec', 'min', 'hrs')\n MULTIPLIER = {\n 'ns': 1e-6, 'ms': 1, 'sec': 1e3, 'min': 6e4, 'hrs': 3.6e5\n }\n\n def __init__(self, master, style_def=None):\n super().__init__(master, style_def)\n if style_def is None:\n style_def = {}\n self.config(**self.style.dark_highlight_active)\n self._entry = SpinBox(self, from_=0, to=1e6, **self.style.spinbox)\n self._entry.config(**self.style.no_highlight)\n self._entry.set_validator(numeric_limit, 0, 1e6)\n self._entry.on_change(self._change)\n self._unit = Spinner(self, **self.style.dark_input)\n self._unit.config(**self.style.no_highlight, width=50)\n self._unit.set_item_class(Choice.ChoiceItem)\n self._unit.set_values(Duration.UNITS)\n self._metric = style_def.get(\"units\", \"ms\")\n self._unit.set(self._metric)\n self._unit.pack(side=\"right\")\n self._unit.on_change(self._change)\n self._entry.pack(side='left', fill=\"x\")\n\n def get(self):\n if self._entry.get() == '':\n return ''\n else:\n m1 = self.MULTIPLIER.get(self._unit.get(), 1) # Multiplier 1 converts to milliseconds, default is ms\n m2 = self.MULTIPLIER.get(self._metric, 1) # Multiplier 2 converts to required units, default is ms\n return int((self._entry.get() * m1) / m2)\n\n\nclass Font(Editor):\n\n def __init__(self, master, style_def=None):\n super().__init__(master, style_def)\n self.config(height=50, **self.style.dark_highlight_active)\n self._input = FontInput(self)\n self._input.pack(fill='both', expand=True)\n self.on_change = self._input.on_change\n\n def get(self):\n return self._input.get()\n\n def set(self, value):\n self._input.set(value)\n\n\nclass Dimension(Number):\n SHORT_FORMS = {\n \"pixels\": \"px\",\n }\n\n def __init__(self, master, style_def=None):\n super().__init__(master, style_def)\n if style_def is None:\n style_def = {}\n self._entry.config(from_=0, to=1e6)\n self._entry.set_validator(numeric_limit, 0, 1e6)\n self._entry.pack_forget()\n unit = self.SHORT_FORMS.get(style_def.get(\"units\", \"pixels\"), 'px')\n Label(self, **self.style.dark_text_passive, text=unit).pack(side=\"right\")\n self._entry.pack(side=\"left\", fill=\"x\")\n\n\nclass Anchor(Editor):\n\n def __init__(self, master, style_def):\n super().__init__(master, style_def)\n style_def = style_def if style_def else {}\n # This flag determines whether multiple anchors are allowed at a time\n self.multiple = style_def.get(\"multiple\", True) # set to True to obtain a sticky property editor\n self.config(width=150, height=110)\n self.n = ToggleButton(self, text=\"N\", width=20, height=20)\n self.n.grid(row=0, column=0, columnspan=3, sticky='ns')\n self.w = ToggleButton(self, text='W', width=20, height=20)\n self.w.grid(row=1, column=0, sticky='ew')\n self.pad = Frame(self, width=60, height=60, **self.style.dark, **self.style.dark_highlight_active)\n self.pad.grid(row=1, column=1, padx=1, pady=1)\n self.pad.grid_propagate(0)\n self.pad.grid_columnconfigure(0, minsize=60)\n self.pad.grid_rowconfigure(0, minsize=60)\n self.floating = Frame(self.pad, **self.style.dark_on_hover, width=20, height=20)\n self.floating.grid(row=0, column=0, pady=1, padx=1)\n self.e = ToggleButton(self, text=\"E\", width=20, height=20)\n self.e.grid(row=1, column=2, sticky='ew')\n self.s = ToggleButton(self, text='S', width=20, height=20)\n self.s.grid(row=2, column=0, columnspan=3, sticky='ns')\n self.anchors = {\n \"n\": self.n, \"w\": self.w, \"e\": self.e, \"s\": self.s\n }\n self._order = (\"n\", \"s\", \"e\", \"w\")\n self._selected = []\n self._exclusive_pairs = ({\"n\", \"s\"}, {\"e\", \"w\"})\n self._is_multiple = re.compile(r'(.*[ns].*[ns])|(.*[ew].*[ew])')\n for anchor in self.anchors:\n self.anchors[anchor].on_change(self._change, anchor)\n\n def _is_exclusive_of(self, anchor1, anchor2):\n return {anchor1, anchor2} in self._exclusive_pairs\n\n def _change(self, _, anchor):\n if not self.multiple:\n self._sanitize(anchor)\n self._adjust()\n if self._on_change:\n self._on_change(self.get())\n\n def _sanitize(self, anchor):\n ex_anchor = [i for i in self.get() if self._is_exclusive_of(i, anchor)]\n if len(ex_anchor):\n self.anchors.get(ex_anchor[0]).set(False)\n\n def _adjust(self):\n sticky = '' if self.get() == 'center' else self.get()\n self.floating.grid(row=0, column=0, pady=1, padx=1, sticky=sticky)\n\n def get(self):\n anchor = ''.join([i for i in self._order if self.anchors[i].get()])\n # No anchor means center but only when we are acting as an anchor editor\n # if self.multiple is True then we are a stickiness editor and an empty string will suffice\n if anchor == '':\n if not self.multiple:\n return 'center'\n return anchor\n\n def set(self, value):\n # Ignore invalid values\n if self._is_multiple.match(str(value)) and not self.multiple:\n return\n # Assume no anchor means center\n value = '' if value == 'center' else value\n for anchor in str(value):\n self.anchors.get(anchor).set(False)\n if self.anchors.get(anchor):\n self.anchors.get(anchor).set(True)\n self._adjust()\n\n\nclass Image(Text):\n\n def __init__(self, master, style_def=None):\n super().__init__(master, style_def)\n self._picker = Button(self, **self.style.dark_button, width=25, height=25, text=\"...\")\n self._entry.pack_forget()\n self._picker.pack(side=\"right\")\n self._entry.pack(side=\"left\", fill=\"x\")\n self._picker.on_click(self._pick)\n\n def _change(self, *_):\n # Do not broadcast changes for invalid paths\n # TODO Add indicator for invalid paths\n if not os.path.exists(self.get()):\n return\n super()._change()\n\n def _pick(self, *_):\n path = filedialog.askopenfilename(parent=self)\n if path:\n self._entry.set(path)\n if self._on_change:\n self._on_change(path)\n\n\nclass Variable(Choice):\n class VariableChoiceItem(Choice.ChoiceItem):\n\n def render(self):\n if self.value:\n item = var_manager.VariableItem(self, self.value)\n item.pack(fill=\"both\")\n item.pack_propagate(0)\n else:\n Label(self, text=\"\", **self.style.dark_text).pack(fill=\"x\")\n\n def set_up(self):\n var_pane: var_manager.VariablePane = var_manager.VariablePane.get_instance()\n var_pane.register_editor(self)\n values = [i.var for i in var_pane.variables]\n self._spinner.set_item_class(Variable.VariableChoiceItem)\n self._spinner.set_values((\n '', *values,\n ))\n\n def set(self, value):\n # Override default conversion of value to string by Choice class\n var_pane = var_manager.VariablePane.get_instance()\n var = list(filter(lambda x: x.name == value, var_pane.variables))\n if len(var):\n value = var[0].var\n self._spinner.set(value)\n\n def on_var_add(self, var):\n self._spinner.add_values(var)\n\n def on_var_delete(self, var):\n self._spinner.remove_value(var)\n\n def destroy(self):\n var_manager.VariablePane.get_instance().unregister_editor(self)\n super().destroy()\n\n\nclass Stringvariable(Variable):\n # TODO Check for any instances where class is needed otherwise delete\n\n def set_up(self):\n var_pane: var_manager.VariablePane = var_manager.VariablePane.get_instance()\n # filter to obtain only string variables\n var_pane.register_editor(self)\n values = [i.var for i in var_pane.variables if i.var.__class__ == StringVar]\n self._spinner.set_item_class(Variable.VariableChoiceItem)\n self._spinner.set_values((\n '', *values,\n ))\n\n\ndef get_editor(parent, definition):\n type_ = definition.get(\"type\").capitalize()\n editor = getattr(sys.modules[__name__], type_, Text)\n return editor(parent, definition)\n\n\nclass StyleItem(Frame):\n\n def __init__(self, parent, style_definition, on_change=None):\n super().__init__(parent.body)\n self.definition = style_definition\n self.name = style_definition.get(\"name\")\n self.config(**self.style.dark)\n self._label = Label(self, **parent.style.dark_text_passive, text=style_definition.get(\"display_name\"),\n anchor=\"w\")\n self._label.grid(row=0, column=0, sticky='ew')\n # self._label.config(**parent.style.dark_highlight_active)\n self._editor = get_editor(self, style_definition)\n self._editor.grid(row=0, column=1, sticky='ew')\n self.grid_columnconfigure(1, weight=1, uniform=1)\n self.grid_columnconfigure(0, weight=1, uniform=1)\n self._on_change = on_change\n self._editor.set(style_definition.get(\"value\"))\n self._editor.on_change(self._change)\n\n def _change(self, value):\n if self._on_change:\n self._on_change(self.name, value)\n\n def on_change(self, callback, *args, **kwargs):\n self._on_change = lambda name, val: callback(name, val, *args, **kwargs)\n\n def hide(self):\n self.grid_propagate(False)\n self.configure(height=0, width=0)\n\n def show(self):\n self.grid_propagate(True)\n\n def set(self, value):\n self._editor.set(value)\n\n\nif __name__ == '__main__':\n root = Application()\n root.load_styles(\"../../hoverset/ui/themes/default.css\")\n boolean = Boolean(root)\n boolean.pack(side=\"top\")\n\n relief = Relief(root)\n relief.pack(side=\"top\")\n relief.on_change(lambda x: print(x))\n relief.set(\"groove\")\n\n cursor = Cursor(root)\n cursor.pack(side=\"top\")\n cursor.set(\"spider\")\n\n bitmap = Bitmap(root)\n bitmap.pack(side=\"top\")\n bitmap.set(\"hourglass\")\n\n choice = Choice(root, {'options': (\"orange\", \"red\", \"yellow\")})\n choice.pack(side=\"top\")\n choice.set(\"oran\")\n\n color = Color(root)\n color.pack(side=\"top\")\n color.on_change(lambda x: print(x))\n color.set(\"#dfdf45\")\n\n text = Text(root)\n text.pack(side=\"top\")\n text.on_change(lambda x: print(x))\n text.set(\"This is a sample\")\n\n number = Number(root)\n number.pack(side=\"top\")\n number.on_change(lambda x: print(x))\n number.set(456)\n\n duration = Duration(root, {\"units\": \"ms\"})\n duration.pack(side=\"top\")\n duration.on_change(lambda x: print(x))\n duration.set(456)\n\n anc = Anchor(root, {\"units\": \"ms\"})\n anc.pack(side=\"top\")\n anc.on_change(lambda x: print(x))\n anc.set('nswe')\n\n font = Font(root)\n font.pack(side=\"top\")\n font.on_change(lambda x: print(x))\n font.set(\"TkDefaultFont\")\n root.mainloop()\n","sub_path":"studio/ui/editors.py","file_name":"editors.py","file_ext":"py","file_size_in_byte":20470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"204274443","text":"# This program registers 'cluster_status' which schedules\n# computations when a processor is available.\n\n# job computation runs at dispynode servers\ndef compute(path):\n import hashlib, time, os\n csum = hashlib.sha1()\n with open(os.path.basename(path), 'rb') as fd:\n while True:\n data = fd.read(1024000)\n if not data:\n break\n csum.update(data)\n time.sleep(5)\n return csum.hexdigest()\n\n\n# 'cluster_status' notification function. It is executed by dispy\n# to indicate node / job status changes. Here node iniitialization and\n# job done status are used to schedule jobs, so at most one job is\n# running on a node (even if a node has more than one processor). Data\n# files are assumed to be 'data000', 'data001' etc.\ndef cluster_status(status, node, job):\n if status == dispy.DispyJob.Finished:\n print('sha1sum for %s: %s' % (job.id, job.result))\n elif status == dispy.DispyJob.Terminated:\n print('sha1sum for %s failed: %s' % (job.id, job.exception))\n elif status == dispy.DispyNode.Initialized:\n print('node %s with %s CPUs available' % (node.ip_addr, node.avail_cpus))\n else:\n return\n\n global submitted\n data_file = 'data%03d' % submitted\n if os.path.isfile(data_file):\n submitted += 1\n # 'node' and 'dispy_job_depends' are consumed by dispy;\n # 'compute' is called with only 'data_file' as argument(s)\n job = cluster.submit_node(node, data_file, dispy_job_depends=[data_file])\n job.id = data_file\n \n\nif __name__ == '__main__':\n import dispy, sys, os\n cluster = dispy.JobCluster(compute, cluster_status=cluster_status)\n submitted = 0\n while True:\n try:\n cmd = sys.stdin.readline().strip().lower()\n except KeyboardInterrupt:\n break\n if cmd == 'quit' or cmd == 'exit':\n break\n\n cluster.wait()\n cluster.print_status()\n","sub_path":"py3/examples/job_scheduler.py","file_name":"job_scheduler.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"380537908","text":"\"\"\"\nSlow event-related design for HRF estimation for M1, V1, and A1.\n\nSingle-run task that includes the following conditions:\n- flashing checkerboard\n- finger tapping\n- listening to tones/music\n\nOriginally created by Jakub Kaczmarzyk and adapted to combine tasks.\n\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\nimport pandas as pd\n\n# These tracks are 20 seconds long.\n# 10s versions created by\n# https://www.audiocheck.net/audiofrequencysignalgenerator_sinetone.php\n# Durations doubled with Audacity.\n_TONE_FILES = ['audio/250Hz_20s.wav',\n 'audio/500Hz_20s.wav',\n 'audio/600Hz_20s.wav',\n 'audio/750Hz_20s.wav',\n 'audio/850Hz_20s.wav']\nTRIAL_DICT = {1: 'checkerboard', 2: 'tone', 3: 'fingertapping'}\nN_CONDS = len(TRIAL_DICT.keys()) # audio, checkerboard, tapping\nN_BLOCKS = 5 # for detection task\nN_TRIALS = 14 # for each condition\nDUR_RANGE = (1, 5) # avg of 3s\nITI_RANGE = (3, 11.84) # max determined to minimize difference from TASK_TIME\nTASK_TIME = 438 # time for trials in task\nSTART_DUR = 6 # fixation before trials\nEND_DUR = 6 # fixation after trials\n# total time = TASK_TIME + START_DUR + END_DUR = 450 = 7.5 mins\n\n\ndef detection_timing():\n block_dur = 18\n rest_dur = 12\n durs = [block_dur] * N_BLOCKS * N_CONDS\n itis = [rest_dur] * N_BLOCKS * N_CONDS\n trial_types = list(range(1, N_CONDS+1)) * N_BLOCKS\n trial_types = [TRIAL_DICT[tt] for tt in trial_types]\n np.random.shuffle(trial_types)\n timing_info = np.vstack((durs, itis, trial_types)).T\n timing_df = pd.DataFrame(columns=['duration', 'iti', 'trial_type'],\n data=timing_info)\n return timing_df\n\n\ndef estimation_timing(seed=None):\n \"\"\"\n Produces lists containing n_conds arrays of n_trials length for trial\n durations and intertrial intervals based on a uniform distribution.\n The process is iterative to minimize the amount of duration lost\n \"\"\"\n length = (np.average(DUR_RANGE) + np.average(ITI_RANGE)) * N_TRIALS\n if np.abs((length * N_CONDS) - TASK_TIME) > 1:\n raise Exception('Inputs do not seem compatible with total desired '\n 'time.')\n missing_time_per_cond = np.finfo(dtype='float64').max\n if not seed:\n seed = np.random.randint(1000, 9999)\n\n while not np.isclose(missing_time_per_cond, 0.0, atol=.5):\n state = np.random.RandomState()\n trial_durs = state.uniform(DUR_RANGE[0], DUR_RANGE[1], N_TRIALS)\n trial_itis = state.uniform(ITI_RANGE[0], ITI_RANGE[1], N_TRIALS)\n missing_time_per_cond = length - np.sum(trial_durs + trial_itis)\n seed += 1\n\n # Fill in one trial's ITI with missing time for constant total time\n missing_time_per_cond += (TASK_TIME / N_CONDS) - length\n trial_itis[-1] += missing_time_per_cond\n\n all_cond_trial_durs = [np.random.permutation(trial_durs) for _ in range(N_CONDS)]\n all_cond_trial_itis = [np.random.permutation(trial_itis) for _ in range(N_CONDS)]\n trials = list(range(1, N_CONDS + 1)) * N_TRIALS\n np.random.shuffle(trials)\n durations = []\n itis = []\n c = {t: 0 for t in np.unique(trials)}\n for condition in trials:\n durations.append(all_cond_trial_durs[condition-1][c[condition]])\n itis.append(all_cond_trial_itis[condition-1][c[condition]])\n c[condition] += 1\n\n trials = [TRIAL_DICT[t] for t in trials]\n timing_info = np.vstack((durations, itis, trials)).T\n timing_df = pd.DataFrame(columns=['duration', 'iti', 'trial_type'],\n data=timing_info)\n return timing_df, seed\n\n\ndef determine_timing(ttype, seed=None):\n if ttype not in ['Detection', 'Estimation']:\n raise Exception()\n\n n_tones = len(_TONE_FILES)\n n_repeats = int(np.ceil(N_TRIALS / n_tones))\n tone_nums = np.arange(n_tones)\n tone_nums = np.repeat(tone_nums, n_repeats)\n np.random.shuffle(tone_nums) # pylint: disable=E1101\n tone_files = [_TONE_FILES[tn] for tn in tone_nums]\n\n # set order of trials\n if ttype == 'Estimation':\n timing_df, seed = estimation_timing(seed=seed)\n elif ttype == 'Detection':\n # temporary requirement that trials divide evenly into block\n timing_df = detection_timing()\n\n c = 0\n for trial in timing_df.index:\n if timing_df.loc[trial, 'trial_type'] == 'tone':\n timing_df.loc[trial, 'stimulus'] = tone_files[c]\n c += 1\n else:\n timing_df.loc[trial, 'stimulus'] = None\n return timing_df, seed\n\n\ndef main():\n subjects = np.arange(1, 5, dtype=int).astype(str) # 5\n sessions = np.arange(1, 11, dtype=int).astype(str) # 10\n ttypes = ['Detection', 'Estimation']\n d = {}\n seed = 1\n for sub in subjects:\n print('Compiling subject {0}'.format(sub))\n d[sub] = {}\n for ses in sessions:\n print(' Compiling session {0}'.format(ses))\n d[sub][ses] = {}\n for ttype in ttypes:\n print('\\tCompiling {0} task'.format(ttype))\n print('\\t Updating seed to {0}'.format(seed))\n df, seed = determine_timing(ttype, seed=seed)\n\n df.to_csv('config/sub-{0}_ses-{1}_task-primary{2}_run-01_'\n 'config.tsv'.format(sub.zfill(2), ses.zfill(2), ttype),\n sep='\\t', index=False)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"generate_config_files.py","file_name":"generate_config_files.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"148841926","text":"from zeit.cms.i18n import MessageFactory as _\nimport collections\nimport gocept.lxml.interfaces\nimport grokcore.component as grok\nimport pkg_resources\nimport zeit.cms.content.property\nimport zeit.cms.content.reference\nimport zeit.cms.content.xmlsupport\nimport zeit.cms.type\nimport zeit.edit.container\nimport zeit.edit.interfaces\nimport zeit.newsletter.interfaces\nimport zope.component\nimport zope.interface\n\n\nBODY_NAME = 'newsletter_body'\n\n\n@zope.interface.implementer(zeit.newsletter.interfaces.INewsletter)\nclass Newsletter(zeit.cms.content.xmlsupport.XMLContentBase,\n collections.Mapping):\n\n default_template = pkg_resources.resource_string(\n __name__, 'template.xml').decode('utf-8')\n\n subject = zeit.cms.content.property.ObjectPathProperty(\n '.head.subject', zeit.newsletter.interfaces.INewsletter['subject'])\n\n def keys(self):\n return [BODY_NAME]\n\n def __iter__(self):\n return iter(self.keys())\n\n def __len__(self):\n return len(self.keys())\n\n def __getitem__(self, key):\n if key == BODY_NAME:\n area = zope.component.getMultiAdapter(\n (self, self.xml.body),\n zeit.edit.interfaces.IArea,\n name=key)\n return zope.container.contained.contained(area, self, key)\n raise KeyError(key)\n\n @property\n def body(self):\n return self[BODY_NAME]\n\n def send(self):\n category = zeit.newsletter.interfaces.INewsletterCategory(self)\n category.last_created = zope.dublincore.interfaces.IDCTimes(\n self).created\n self._send()\n\n def send_test(self, to):\n self._send(to)\n\n def _send(self, to=None):\n import zeit.optivo.interfaces # UI-only dependency\n category = zeit.newsletter.interfaces.INewsletterCategory(self)\n renderer = zope.component.getUtility(\n zeit.newsletter.interfaces.IRenderer)\n rendered = renderer(self)\n optivo = zope.component.getUtility(zeit.optivo.interfaces.IOptivo)\n if to is None:\n optivo.send(\n category.mandant, category.recipientlist,\n self.subject, rendered['html'], rendered['text'])\n else:\n optivo.test(\n category.mandant, category.recipientlist_test,\n to, u'[test] ' + self.subject,\n rendered['html'], rendered['text'])\n\n\nclass NewsletterType(zeit.cms.type.XMLContentTypeDeclaration):\n\n factory = Newsletter\n interface = zeit.newsletter.interfaces.INewsletter\n type = 'newsletter'\n title = _('Daily Newsletter') # multiple categories are not supported yet\n\n\n@grok.adapter(zeit.newsletter.interfaces.INewsletter)\n@grok.implementer(zeit.newsletter.interfaces.INewsletterCategory)\ndef category_for_newsletter(context):\n if zeit.cms.checkout.interfaces.ILocalContent.providedBy(context):\n context = zeit.cms.interfaces.ICMSContent(context.uniqueId)\n candidate = context.__parent__\n while candidate:\n if zeit.newsletter.interfaces.INewsletterCategory.providedBy(\n candidate):\n return candidate\n candidate = candidate.__parent__\n\n\n@grok.implementer(zeit.newsletter.interfaces.IBody)\nclass Body(zeit.edit.container.TypeOnAttributeContainer,\n grok.MultiAdapter):\n\n __name__ = BODY_NAME\n\n grok.provides(zeit.newsletter.interfaces.IBody)\n grok.adapts(\n zeit.newsletter.interfaces.INewsletter,\n gocept.lxml.interfaces.IObjectified)\n grok.name(BODY_NAME)\n\n def values(self):\n # We re-implement values() so it works without keys(), since those are\n # not present in the repository, but since e.g. zeit.frontend is only\n # interested in the values, anyway, this works out alright.\n result = []\n for node in self.xml.iterchildren():\n result.append(self._get_element_for_node(node))\n return result\n\n\n@grok.implementer(zeit.newsletter.interfaces.IGroup)\nclass Group(zeit.edit.container.TypeOnAttributeContainer,\n grok.MultiAdapter):\n\n grok.provides(zeit.newsletter.interfaces.IGroup)\n grok.adapts(\n zeit.newsletter.interfaces.IBody,\n gocept.lxml.interfaces.IObjectified)\n type = 'group'\n grok.name(type)\n\n title = zeit.cms.content.property.ObjectPathProperty(\n '.head.title', zeit.newsletter.interfaces.IGroup['title'])\n\n def values(self):\n # We re-implement values() so it works without keys(), since those are\n # not present in the repository, but since e.g. zeit.frontend is only\n # interested in the values, anyway, this works out alright.\n result = []\n for node in self.xml.xpath('container'):\n result.append(self._get_element_for_node(node))\n return result\n\n\nclass GroupFactory(zeit.edit.block.TypeOnAttributeElementFactory):\n\n grok.context(zeit.newsletter.interfaces.IBody)\n produces = Group\n tag_name = 'region'\n title = _('Group')\n\n\n@grok.implementer(zeit.newsletter.interfaces.ITeaser)\nclass Teaser(zeit.edit.block.SimpleElement):\n\n area = zeit.newsletter.interfaces.IGroup\n type = 'teaser'\n\n reference = zeit.cms.content.reference.SingleResource(\n '.block', xml_reference_name='related')\n\n\nclass TeaserFactory(zeit.edit.block.TypeOnAttributeElementFactory):\n\n grok.context(zeit.newsletter.interfaces.IGroup)\n produces = Teaser\n title = _('Teaser')\n\n\nclass AdvertisementBase(object):\n\n area = zeit.newsletter.interfaces.IBody\n type = NotImplemented\n\n @property\n def category(self):\n nl = zeit.newsletter.interfaces.INewsletter(self)\n return zeit.newsletter.interfaces.INewsletterCategory(nl)\n\n @property\n def position(self):\n return self.type.replace('advertisement-', '')\n\n @property\n def href(self):\n return getattr(self.category, 'ad_%s_href' % self.position)\n\n @property\n def title(self):\n return getattr(self.category, 'ad_%s_title' % self.position)\n\n @property\n def text(self):\n return getattr(self.category, 'ad_%s_text' % self.position)\n\n @property\n def image(self):\n return getattr(self.category, 'ad_%s_image' % self.position)\n\n\n# XXX Putting implements on AdvertisementBase breaks during grokking, why?\n@grok.implementer(zeit.newsletter.interfaces.IAdvertisement)\nclass MiddleAdvertisement(zeit.edit.block.SimpleElement, AdvertisementBase):\n\n type = 'advertisement-middle'\n\n\n@grok.implementer(zeit.newsletter.interfaces.IAdvertisement)\nclass ThisWeeksAdvertisement(zeit.edit.block.SimpleElement, AdvertisementBase):\n\n type = 'advertisement-thisweeks'\n\n\n@grok.implementer(zeit.newsletter.interfaces.IAdvertisement)\nclass BottomAdvertisement(zeit.edit.block.SimpleElement, AdvertisementBase):\n\n type = 'advertisement-bottom'\n\n\nclass MiddleAdvertisementFactory(\n zeit.edit.block.TypeOnAttributeElementFactory):\n\n grok.context(zeit.newsletter.interfaces.IBody)\n produces = MiddleAdvertisement\n title = _('Advertisement')\n\n\nclass ThisWeeksAdvertisementFactory(\n zeit.edit.block.TypeOnAttributeElementFactory):\n\n grok.context(zeit.newsletter.interfaces.IBody)\n produces = ThisWeeksAdvertisement\n title = _('Advertisement')\n\n\nclass BottomAdvertisementFactory(\n zeit.edit.block.TypeOnAttributeElementFactory):\n\n grok.context(zeit.newsletter.interfaces.IBody)\n produces = BottomAdvertisement\n title = _('Advertisement')\n\n\n@grok.adapter(zeit.edit.interfaces.IElement)\n@grok.implementer(zeit.newsletter.interfaces.INewsletter)\ndef newsletter_for_element(context):\n return zeit.newsletter.interfaces.INewsletter(\n getattr(context, '__parent__', None), None)\n","sub_path":"core/src/zeit/newsletter/newsletter.py","file_name":"newsletter.py","file_ext":"py","file_size_in_byte":7691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"582356588","text":"\n\n__all__ = ['eig_seg',\n 'initialize_eigenanatomy',\n 'sparse_decom2']\n\nimport numpy as np\nfrom scipy.stats import pearsonr\nimport pandas as pd\n\nfrom .. import core\nfrom .. import lib\nfrom .. import utils\nfrom .. import viz\nfrom ..core import ants_image as iio\n\n\ndef sparse_decom2(inmatrix, \n inmask=(None, None), \n sparseness=(0.01, 0.01),\n nvecs=3, \n its=20, \n cthresh=(0,0), \n statdir=None, \n perms=0,\n uselong=0, \n z=0, \n smooth=0, \n robust=0, \n mycoption=0,\n initialization_list=[], \n initialization_list2=[], \n ell1=10,\n prior_weight=0, \n verbose=False, \n rejector=0, \n max_based=False,\n version=1):\n \"\"\"\n Sparse Decomposition of two data views - aka Sparse CCA\n\n Example\n -------\n >>> import numpy as np\n >>> import ants\n >>> mat = np.random.randn(20, 100)\n >>> mat2 = np.random.randn(20, 90)\n >>> mydecom = ants.sparse_decom2(inmatrix = (mat,mat2), \n sparseness=(0.1,0.3), nvecs=3, its=3, perms=0)\n\n Example2\n --------\n >>> import numpy as np\n >>> import pandas as pd\n >>> import ants\n >>> trainingImageData = np.load('/users/ncullen/desktop/trainingImageData.npy')\n >>> trainingAgeMatrix = pd.read_csv('~/desktop/trainingAgeMatrix.csv',index_col=0).values\n >>> grayMatterMask = ants.image_read('~/desktop/grayMatterMask.nii.gz')\n >>> res = ants.sparse_decom2(inmatrix=(trainingImageData,trainingAgeMatrix), \n sparseness=(0.01,0.9), inmask=(grayMatterMask,None),\n nvecs=2,mycoption=0,cthresh=(1000,0),ell1=10,smooth=0)\n \"\"\"\n if inmatrix[0].shape[0] != inmatrix[1].shape[0]:\n raise ValueError('Matrices must have same number of rows (samples)')\n\n idim = 3\n\n if isinstance(inmask[0], iio.ANTsImage):\n maskx = inmask[0].clone('float')\n idim = inmask[0].dimension\n hasmaskx = 1\n elif isinstance(inmask[0], np.ndarray):\n maskx = core.from_numpy(inmask[0], pixeltype='float')\n idim = inmask[0].ndim\n hasmaskx = 1\n else:\n maskx = core.make_image([1]*idim, pixeltype='float')\n hasmaskx = -1\n\n if isinstance(inmask[1], iio.ANTsImage):\n masky = inmask[1].clone('float')\n idim = inmask[1].dimension\n hasmasky = 1\n elif isinstance(inmask[1], np.ndarray):\n masky = core.from_numpy(inmask[1], pixeltype='float')\n idim = inmask[1].ndim\n hasmasky = 1\n else:\n masky = core.make_image([1]*idim, pixeltype='float')\n hasmasky = -1\n\n inmask = [maskx, masky]\n\n if robust > 0:\n raise NotImplementedError('robust > 0 not currently implemented')\n else:\n input_matrices = inmatrix\n\n if idim == 2:\n if version == 1:\n sccancpp_fn = lib.sccanCpp2D\n elif version == 2:\n sccancpp_fn = lib.sccanCpp2DV2\n input_matrices = (input_matrices[0].tolist(), input_matrices[1].tolist())\n elif idim ==3:\n if version == 1:\n sccancpp_fn = lib.sccanCpp3D\n elif version == 2:\n sccancpp_fn = lib.sccanCpp3DV2\n input_matrices = (input_matrices[0].tolist(), input_matrices[1].tolist())\n\n outval = sccancpp_fn(input_matrices[0], input_matrices[1],\n inmask[0]._img, inmask[1]._img,\n hasmaskx, hasmasky,\n sparseness[0], sparseness[1],\n nvecs, its, \n cthresh[0], cthresh[1], \n z, smooth,\n initialization_list, initialization_list2, \n ell1, verbose, \n prior_weight, mycoption, max_based)\n\n\n p1 = np.dot(input_matrices[0], outval['eig1'].T)\n p2 = np.dot(input_matrices[1], outval['eig2'].T)\n outcorrs = np.array([pearsonr(p1[:,i],p2[:,i])[0] for i in range(p1.shape[1])])\n if prior_weight < 1e-10:\n myord = np.argsort(np.abs(outcorrs))[::-1]\n outcorrs = outcorrs[myord]\n p1 = p1[:, myord]\n p2 = p2[:, myord]\n outval['eig1'] = outval['eig1'][myord,:]\n outval['eig2'] = outval['eig2'][myord,:]\n\n cca_summary = np.vstack((outcorrs,[None]*len(outcorrs))).T\n\n if perms > 0:\n cca_summary[:,1] = 0\n\n nsubs = input_matrices[0].shape[0]\n for permer in range(perms):\n m1 = input_matrices[0][np.random.permutation(nsubs),:]\n m2 = input_matrices[1][np.random.permutation(nsubs),:]\n outvalperm = sccancpp_fn(m1, m2,\n inmask[0]._img, inmask[1]._img,\n hasmaskx, hasmasky,\n sparseness[0], sparseness[1],\n nvecs, its, \n cthresh[0], cthresh[1], \n z, smooth,\n initialization_list, initialization_list2, \n ell1, verbose, \n prior_weight, mycoption, max_based)\n p1perm = np.dot(m1, outvalperm['eig1'].T)\n p2perm = np.dot(m2, outvalperm['eig2'].T)\n outcorrsperm = np.array([pearsonr(p1perm[:,i],p2perm[:,i])[0] for i in range(p1perm.shape[1])])\n if prior_weight < 1e-10:\n myord = np.argsort(np.abs(outcorrsperm))[::-1]\n outcorrsperm = outcorrsperm[myord]\n counter = np.abs(cca_summary[:,0]) < np.abs(outcorrsperm)\n counter = counter.astype('int')\n cca_summary[:,1] = cca_summary[:,1] + counter\n\n cca_summary[:,1] = cca_summary[:,1] / float(perms)\n\n return {'projections': p1,\n 'projections2': p2,\n 'eig1': outval['eig1'].T,\n 'eig2': outval['eig2'].T,\n 'summary': pd.DataFrame(cca_summary,columns=['corrs','pvalues'])}\n\n\ndef initialize_eigenanatomy(initmat, mask=None, initlabels=None, nreps=1, smoothing=0):\n \"\"\"\n Arguments\n ---------\n initmat : np.ndarray or ANTsImage \n input matrix where rows provide initial vector values. \n alternatively, this can be an antsImage which contains labeled regions.\n \n mask : ANTsImage\n mask if available\n\n initlabels : list/tuple of integers\n which labels in initmat to use as initial components\n \n nreps : integer\n nrepetitions to use\n \n smoothing : float\n if using an initial label image, optionally smooth each roi\n\n Example\n -------\n >>> import ants\n >>> import numpy as np\n >>> import pandas as pd\n >>> mat = pd.read_csv('~/desktop/mat.csv', index_col=0).values\n >>> init = ants.initialize_eigenanatomy(mat)\n \"\"\"\n if isinstance(initmat, iio.ANTsImage):\n # create initmat from each of the unique labels\n if mask is not None:\n selectvec = mask > 0\n else:\n selectvec = initmat > 0\n initmatvec = initmat[selectvec]\n\n if initlabels is None:\n ulabs = np.sort(np.unique(initmatvec))\n ulabs = ulabs[ulabs > 0]\n else:\n ulabs = initlabels\n\n nvox = len(initmatvec)\n temp = np.zeros((len(ulabs), nvox))\n\n for x in range(len(ulabs)):\n timg = utils.threshold_image(initmat, ulabs[x]-1e-4, ulabs[x]+1e-4)\n if smoothing > 0:\n timg = utils.smooth_image(timg, smoothing)\n temp[x,:] = timg[selectvec]\n initmat = temp\n\n nclasses = initmat.shape[0]\n classlabels = ['init%i'%i for i in range(nclasses)]\n initlist = []\n if mask is None:\n maskmat = np.zeros(initmat.shape)\n maskmat[0,:] = 1\n mask = core.from_numpy(maskmat.astype('float32'))\n\n eanatnames = ['A'] * (nclasses*nreps)\n ct = 0\n for i in range(nclasses):\n vecimg = mask.clone('float')\n initf = initmat[i,:]\n vecimg[mask==1] = initf\n for nr in range(nreps):\n initlist.append(vecimg)\n eanatnames[ct+nr-1] = str(classlabels[i])\n ct = ct + 1\n\n return {'initlist': initlist, 'mask': mask, 'enames': eanatnames}\n\n\n\ndef eig_seg(mask, img_list, apply_segmentation_to_images=False, cthresh=0, smooth=1):\n \"\"\"\n Segment a mask into regions based on the max value in an image list. \n At a given voxel the segmentation label will contain the index to the image \n that has the largest value. If the 3rd image has the greatest value, \n the segmentation label will be 3 at that voxel.\n \n Arguments\n --------- \n mask : ANTsImage\n D-dimensional mask > 0 defining segmentation region.\n\n img_list : collection of ANTsImage or np.ndarray\n images to use\n\n apply_segmentation_to_images : boolean \n determines if original image list is modified by the segmentation.\n\n cthresh : integer\n throw away isolated clusters smaller than this value\n \n smooth : float\n smooth the input data first by this value\n\n Example\n -------\n >>> import ants\n >>> mylist = [ants.image_read(ants.get_ants_data('r16')),\n ants.image_read(ants.get_ants_data('r27')),\n ants.image_read(ants.get_ants_data('r85'))]\n >>> myseg = ants.eig_seg(ants.get_mask(mylist[0]), mylist)\n \"\"\"\n maskvox = mask > 0\n maskseg = mask.clone()\n maskseg[maskvox] = 0\n if isinstance(img_list, np.ndarray):\n mydata = img_list\n elif isinstance(img_list, (tuple, list)):\n mydata = utils.image_list_to_matrix(img_list, mask)\n\n if (smooth > 0):\n for i in range(mydata.shape[0]):\n temp_img = core.make_image(mask, mydata[i,:], pixeltype='float')\n temp_img = utils.smooth_image(temp_img, smooth, sigma_in_physical_coordinates=True)\n mydata[i,:] = temp_img[mask >= 0.5]\n\n segids = np.argmax(np.abs(mydata), axis=0)+1\n segmax = np.max(np.abs(mydata), axis=0)\n maskseg[maskvox] = (segids * (segmax > 1e-09))\n\n if cthresh > 0:\n for kk in range(max(maskseg)):\n timg = utils.threshold_image(maskseg, kk, kk)\n timg = utils.label_clusters(cthresh)\n timg = utils.threshold_image(timg, 1, 1e15) * float(kk)\n maskseg[maskseg == kk] = timg[maskseg == kk]\n\n if (apply_segmentation_to_images) and (not isinstance(img_list, np.ndarray)):\n for i in range(len(img_list)):\n img = img_list[i]\n img[maskseg != float(i)] = 0\n img_list[i] = img\n\n return maskseg\n\n\n\n\n\n","sub_path":"ants/learn/decomposition.py","file_name":"decomposition.py","file_ext":"py","file_size_in_byte":10783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"348912745","text":"import sys\nfrom unicodedata import category\nimport string\nimport pprint\n\n\ndef word_summary(filename):\n \"\"\"this function takes away any strippables\"\"\"\n hist = {}\n fp = open(filename, encoding = 'UTF8')\n\n strippables = ''.join(\n [chr(i) for i in range(sys.maxunicode) if category(chr(i)).startswith(\"P\")]\n )\n for line in fp:\n\n line = line.replace('-', ' ').replace(chr(8212), ' ').replace('=', ' ')\n\n for word in line.split():\n # word could be 'Sussex.'\n word = word.strip(strippables)\n word = word.lower()\n\n # update the dictionary\n hist[word] = hist.get(word, 0) + 1\n\n return hist\n\n\ndef total_words(hist):\n \"\"\"returns the total number of words\"\"\"\n return sum(hist.values())\n\n\ndef ordered_frequency(hist):\n \"\"\"this function returns an ordered list of words by frequency\"\"\"\n x = sorted(hist.items(), key=lambda item: item[1], reverse= True)\n return x\n\n\ndef top_ten(hist):\n \"\"\"this function returns the top 10 frequent words\"\"\"\n x = sorted(hist.items(), key=lambda item: item[1], reverse= True)[0:10]\n return x\n\n\ndef after_10(hist):\n \"\"\"this function returns the top 11-20 frequent words\"\"\"\n x = sorted(hist.items(), key=lambda item: item[1], reverse= True)[11:20]\n return x\n\n\ndef after_20(hist):\n \"\"\"this function returns the top 21-30 frequent words\"\"\"\n x = sorted(hist.items(), key=lambda item: item[1], reverse= True)[21:30]\n return x\n\n\ndef unique_words(hist): \n return len(hist)\n\n\n\ndef bottom_ten(hist):\n \"\"\"this function returns the top 10 frequent words\"\"\"\n x = sorted(hist.items(), key=lambda item: item[1], reverse= False)[0:3350]\n return x\n\n\ndef words_the(hist):\n \"\"\"this function returns a list of words that have the in it\"\"\"\n x = filter(lambda hist: 'the' in hist, hist)\n return list(x)\n\n\ndef words_phone(hist):\n \"\"\"this function returns a list of words that have phone in it\"\"\"\n x = filter(lambda hist: 'phone' in hist, hist)\n return list(x)\n\ndef words_tech(hist):\n \"\"\"this function returns a list of words that have tech in it\"\"\"\n x = filter(lambda hist: 'tech' in hist, hist)\n return list(x)\n\ndef words_apple(hist):\n \"\"\"this function returns a list of words that have apple in it\"\"\"\n x = filter(lambda hist: 'apple' in hist, hist)\n return list(x)\n\n\n \n\n \n\n\n\n\n\ndef main():\n filename ='Iphone.txt'\n hist = word_summary('Iphone.txt')\n \n \n # print(hist)\n print('Total number of words:', total_words(hist))\n print(\"there are\",unique_words(hist),\"unique words in this wikipedia page\")\n print(ordered_frequency(hist)) \n # print(type(ordered_frequency(hist)))\n\n print('the top ten words are:', top_ten(hist))\n print('the next top ten words are:', after_10(hist))\n print('the next top ten words are:', after_20(hist))\n\n print('the ten least used words are:', bottom_ten(hist))\n\n print(words_the(hist))\n print(\"the number of words that have the in it is\", len(words_the(hist)))\n\n print(words_phone(hist))\n print(\"the number of words that have phone in it is\",len(words_phone(hist)))\n\n print(words_tech(hist))\n print(\"the number of words that have tech in it is\",len(words_tech(hist)))\n\n print(words_apple(hist))\n print(\"the number of words that have apple in it is\",len(words_apple(hist)))\n\n \n \n\n\n\nif __name__ == '__main__':\n main()","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"208768414","text":"# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\n\nimport requests\nimport six\nimport yaml\n\nfrom nailgun.utils import grouper\n\n\ndef get_release_file(repo, retries=1):\n \"\"\"Get Release content of a given repo.\n\n :param repo: a repo as dict\n :returns: a release's content as string\n \"\"\"\n if repo['section']:\n # We can't use urljoin here because it works pretty bad in\n # cases when 'uri' doesn't have a trailing slash.\n download_uri = os.path.join(\n repo['uri'], 'dists', repo['suite'], 'Release')\n else:\n # Well, we have a flat repo case, so we should download Release\n # file from a different place. Please note, we have to strip\n # a leading slash from suite because otherwise the download\n # link will be wrong.\n download_uri = os.path.join(\n repo['uri'], repo['suite'].lstrip('/'), 'Release')\n\n for _ in six.moves.range(0, retries):\n response = requests.get(download_uri)\n\n # do not perform retries if release is not found\n if response.status_code == 404:\n break\n\n response.raise_for_status()\n return response.text\n\n\ndef parse_release_file(content):\n \"\"\"Parse Debian repo's Release file content.\n\n :param content: a Debian's Release file content\n :returns: a dict with repo's attributes\n \"\"\"\n\n # TODO(ikalnitsky): Consider to use some existing library for\n # parsing debian's release file (e.g. python-debian).\n\n _multivalued_fields = {\n 'SHA1': ['sha1', 'size', 'name'],\n 'SHA256': ['sha256', 'size', 'name'],\n 'SHA512': ['sha512', 'size', 'name'],\n 'MD5Sum': ['md5sum', 'size', 'name'],\n }\n\n # debian data format is very similiar to yaml, except\n # multivalued field. so we can parse it just like yaml\n # and then perform additional transformation for those\n # fields (we know which ones are multivalues).\n data = yaml.load(content)\n\n for attr, columns in six.iteritems(_multivalued_fields):\n if attr not in data:\n continue\n\n values = data[attr].split()\n data[attr] = []\n\n for group in grouper(values, len(columns)):\n data[attr].append(dict(zip(columns, group)))\n\n return data\n\n\ndef get_apt_preferences_line(deb_release):\n \"\"\"Get an APT Preferences line from repo's release information.\n\n :param deb_release: a Debian's Release content as dict\n :returns: an apt pinning line as string\n \"\"\"\n _transformations = {\n 'Archive': 'a',\n 'Suite': 'a', # suite is a synonym for archive\n 'Codename': 'n',\n 'Version': 'v',\n 'Origin': 'o',\n 'Label': 'l',\n }\n\n conditions = set()\n for field, condition in six.iteritems(_transformations):\n if field in deb_release:\n conditions.add('{0}={1}'.format(condition, deb_release[field]))\n\n return ','.join(conditions)\n","sub_path":"nailgun/nailgun/utils/debian.py","file_name":"debian.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"470345668","text":"#! /home/alexmakh/miniconda3/bin/python3\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nloc_oc43 = pd.DataFrame(pd.read_csv('Results/locus_Result_OC43.txt', sep='\\t'))\nloc_mers = pd.DataFrame(pd.read_csv('Results/locus_Result_MERS.txt', sep='\\t'))\nl_sars2 = pd.DataFrame(pd.read_csv('Results/locus_Result_SARS2.txt', sep='\\t'))\nloc_sars = pd.DataFrame(pd.read_csv('Results/locus_Result_SARS.txt', sep='\\t'))\n\nmirna = 'miR-16-5p'\n\nx1 = loc_sars[loc_sars['miRNA'] == mirna]['MSA_start'].tolist()\ny1 = loc_sars[loc_sars['miRNA'] == mirna]['MSA_end'].tolist()\n\nx2 = loc_oc43[loc_oc43['miRNA'] == mirna]['MSA_start'].tolist()\ny2 = loc_oc43[loc_oc43['miRNA'] == mirna]['MSA_end'].tolist()\n\nx3 = l_sars2[l_sars2['miRNA'] == mirna]['MSA_start'].tolist()\ny3 = l_sars2[l_sars2['miRNA'] == mirna]['MSA_end'].tolist()\n\nx4 = loc_mers[loc_mers['miRNA'] == mirna]['MSA_start'].tolist()\ny4 = loc_mers[loc_mers['miRNA'] == mirna]['MSA_end'].tolist()\n\nfig, ax = plt.subplots(1, 1, figsize=(25, 25), tight_layout=True)\nfig.suptitle('miR-16-5p conserved and nonconserved binding sites',\n va='baseline', color='blue')\n\nax.set_xlabel('Start nt of binding site', fontsize=15, color='blue')\nax.set_ylabel('End nt of binding site', fontsize=15, color='blue')\n\nax.yaxis.set_ticks(np.arange(0, 33001, 1000))\nax.xaxis.set_ticks(np.arange(0, 33001, 1000))\nax.yaxis.set_tick_params(labelsize=10)\nax.xaxis.set_tick_params(labelrotation=45, labelsize=10)\nax.axis([-1000, 33000, -1000, 33000])\nax.grid()\n\nax = plt.plot(x3, y3, 'og', markersize=12, label='SARS2')\nax = plt.plot(x1, y1, 'or', markersize=9, label='SARS')\nax = plt.plot(x2, y2, 'o', color='orange', markersize=6, label='OC43')\nax = plt.plot(x4, y4, 'o', color='purple', markersize=3, label='MERS')\n\nplt.legend()\n\nplt.show()\n","sub_path":"TargetScan/Graphs/mir_16_5p.py","file_name":"mir_16_5p.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"593154","text":"\"\"\"\n@author:liulili\n\n\"\"\"\nfrom utils.common import get_mysql\nfrom utils.tools.base_test_case import BaseTestCase\nfrom api.http_client_growth import HttpClientGrowth\nfrom utils.log import LOGGER\nimport time\nimport pytest\n\n\nclass TestGrowthRoleUrl(BaseTestCase):\n @classmethod\n def setup_class(cls):\n cls.growth = HttpClientGrowth()\n cls.mysql = get_mysql()\n\n def teardown_class(cls):\n LOGGER.info(\"TestGrowthRoleUrl类获取一元分销活动分流地址-测试结束\")\n\n\n def test_growth_roleurl_one(self):\n \"\"\"\n desc:验证用户未购买课程时跳转地址为0元售前页\n steps:\n 1、一元分销分流跳转地址接口,获取response\n 2、查数据库获取用户已购买课程\n 3、断言跳转链接是否一致\n \"\"\"\n\n t1 = round(time.time()) * 1000\n #link = \"https://m.test.ximalaya.com/ort/router/invite/invitepagenew/34?source=1yuan\"\n link_list1 = self.growth.growth_usergrowthroleurl1(t1)\n classmate_list1 = self.mysql.query(\"select * from xmkp_edu.EDU_CLASSMATE where user_id=344746\", True)\n if len(classmate_list1) == 0:\n self.assert_equal('', link_list1.data, error_msg=\"用户未跳转到0元售前页\")\n\n #@pytest.mark.skip(reason=\"暂不执行此条case\")\n def test_growth_roleurl_two(self):\n \"\"\"\n desc:验证用户购买月课或者长期组时跳转到1元分销活动页\n steps:\n 1、一元分销分流跳转地址接口,获取response\n 2、查数据库获取用户已购买课程\n 3、断言跳转链接是否一致\n \"\"\"\n\n t1 = round(time.time()) * 1000\n link = ''\n link_list2 = self.growth.growth_usergrowthroleurl2(t1)\n classmate_list2 = self.mysql.query(\"select * from xmkp_edu.EDU_CLASSMATE where user_id = 794111\", True)\n for i in range(0, len(classmate_list2)):\n camp_ref = classmate_list2[i][\"camp_ref\"]\n camp_list = self.mysql.query(\"select * from xmkp_edu.EDU_CAMP where id = %s\" % camp_ref)\n while camp_list[\"campus\"] == \"月课\" or camp_list[\"campus\"] == \"长期组\":\n self.assert_equal(link, link_list2.data, error_msg=\"用户未跳转到1元分销活动页\")\n break\n\n","sub_path":"auto_api_project/xmkp-api-test/cases/GROWTH/ONE/test_cases/test_user_growth_roleurl.py","file_name":"test_user_growth_roleurl.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"380874217","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 21 15:23:37 2019\n\n@author: wang9\n\"\"\"\nimport torch.nn as nn\nimport torch\n\n#from IPython import embed\n# Here we define our model as a class\nclass DC_net(nn.Module):\n\n def __init__(self, input_dim, hidden_dim, batch_size, embedding_dim,num_layers,dropout):\n super(DC_net, self).__init__()\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.batch_size = batch_size\n self.num_layers = num_layers\n self.dropout = dropout\n self.embedding_dim = embedding_dim \n # Define the LSTM layer\n self.lstm = nn.LSTM(input_size=self.input_dim,hidden_size= self.hidden_dim, \\\n num_layers= self.num_layers,dropout = self.dropout,batch_first = True)\n # self.drop_out = nn.Dropout(p = self.dropout)\n # Define the output layer\n self.linear = nn.Linear(self.hidden_dim, self.input_dim * self.embedding_dim)\n self.non_linear = torch.tanh\n self.eps = 1e-8\n def init_hidden(self):\n # This is what we'll initialise our hidden state as\n return (torch.zeros( self.num_layers,self.batch_size,self.hidden_dim),\n torch.zeros( self.num_layers,self.batch_size, self.hidden_dim))\n# =============================================================================\n# def init_hidden(self, batch_size):\n# return self.rnn.init_hidden(batch_size)\n# =============================================================================\n \n def forward(self, input,hidden):\n # Forward pass through LSTM layer\n # shape of self.hidden: (a, b), where a and b both \n # have shape (num_layers, batch_size, hidden_dim).\n \n # input has shape of [batch_size, seq_len,input_size] because we set batch_first= True\n self.seq_len = input.size(1)\n # self.hidden = self.init_hidden()\n lstm_out, self.hidden = self.lstm(input,hidden) #shape of lstm_out: [batch_size,seq_len,hidden_dim]\n # print('lstm_out shape is ',lstm_out.shape)\n # y = self.drop_out(lstm_out)\n y = lstm_out\n # y= lstm_out\n # embed()\n y = y.contiguous().view(self.batch_size * self.seq_len, self.hidden_dim)# reshape lstm_out to [batch_size * seq_len, hidden_dim]\n # print('y shape is ', y.shape)\n emb_V = self.linear(y) # shape is [batch_size * seq_len, input_dim * embedding_dim]\n emb_V = self.non_linear(emb_V)# shape is [batch_size * seq_len, input_dim * embedding_dim]\n # print('embedding matrix shape is ',emb_V.shape)\n emb_V = emb_V.view(self.batch_size,self.seq_len * self.input_dim, self.embedding_dim)# reshape the embedding to [batch_size, seq_len*input_dim, embedding_dim]\n # print('reshape embdeddng is ', emb_V.shape)\n V_norm = torch.sqrt(torch.sum(torch.pow(emb_V,2), -1))\n V_norm = V_norm.unsqueeze(-1).expand_as(emb_V)\n emb_V_norm = emb_V/(V_norm +self.eps)\n # print('normalized embdedding is ', emb_V_norm.shape)\n # out = self.non_linear(emb_V_norm) # output_shape is [batch_size, input_dim * seq_len, embedding_dim]\n \n\n return emb_V_norm\n # return out\n# =============================================================================\n# input_seq= Variable(torch.randn(glob_constant.batch_size,glob_constant.seq_len,glob_constant.input_dim))\n# \n# model = DC_net(glob_constant.input_dim,glob_constant.hidden_dim,glob_constant.batch_size,\\\n# glob_constant.Embedding,glob_constant.num_layers,glob_constant.dropout)\n# out= model(input_seq)\n# =============================================================================\n","sub_path":"test_/DC_Net_3_128fft_size.py","file_name":"DC_Net_3_128fft_size.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"558894757","text":"from random import choice, randint\n\nfrom django.core.management.base import BaseCommand\n\nfrom faker import Faker\n\nfrom group.models import Group\n\nfrom students.models import Student\n\nfrom teachers.models import Teacher\n\n\nclass Command(BaseCommand):\n help = 'Generates new groups (default = 100)' # noqa django requires 'help'\n\n def add_arguments(self, parser):\n parser.add_argument('number_of_groups', type=int, nargs='?', default=100)\n\n def handle(self, *args, **options):\n fake = Faker()\n specifications = ['Python', 'Javascript', 'Java', 'C++']\n count = options.get('number_of_groups')\n new_groups = []\n for _ in range(count):\n new_groups.append(Group(\n teacher=fake.name(),\n specification=choice(specifications),\n count_of_students=randint(10, 20),\n length_of_course=randint(1, 12),\n head=Student.objects.order_by('?').last(),\n curator=Teacher.objects.order_by('?').last()\n ))\n Group.objects.bulk_create(new_groups)\n","sub_path":"src/group/management/commands/generate_groups.py","file_name":"generate_groups.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"637755940","text":"\nimport numpy as np\nimport math\nfrom itertools import combinations\nfrom math import sqrt\n\nfrom .country import *\nfrom .game import *\nfrom .strategies import *\nfrom .match import *\nfrom .action import *\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.patches as mpatches\n\ndef marker_size(country ,string, factor, change = 0):\n if string == \"m\":\n return 1.128 * sqrt(country.m* factor)\n elif string == \"fit\":\n return 1.128 * sqrt(country.fitness* factor)\n elif string == \"change\":\n return 1.128 * sqrt(change* factor)\n elif string == \"e\":\n return 1.128 * sqrt(country.e* factor)\n elif string == \"i\":\n return 1.128 * sqrt(country.i* factor)\n elif string == \"none\":\n return 10*factor\n else: raise Exception(\"keyword for marker size not implemented\")\n\ndef marker_color(country, string, change = 0):\n if string == \"m\":\n return country.m\n elif string == \"fit\":\n return country.fitness\n elif string == \"change\":\n return change\n elif string == \"e\":\n return country.e\n elif string == \"i\":\n return country.i\n elif string == \"none\":\n return 50\n elif string == \"strat\":\n raise Exception(\"strat not impemented as colorIndicator\")\n else:\n raise Exception(\"colorIndicator not impemented\")\n\ndef marker_style(strat):\n mydict = {Collaborate: \"o\", Defect: \"v\", TitForTat: \"s\", Grudge: \"x\", RandomMove: \"D\", Alternate: \"*\"}\n return mydict[strat]\n\ndef draw_pie(ax, lat, lon, outcomeDict, size=600):\n\n total = sum(outcomeDict.values())\n r1 = outcomeDict[R]/total\n r2 = r1 + outcomeDict[T]/total\n r3 = r2 + outcomeDict[S]/total\n\n x = [0] + np.cos(np.linspace(0, 2 * np.pi * r1, 10)).tolist()\n y = [0] + np.sin(np.linspace(0, 2 * np.pi * r1, 10)).tolist()\n xy1 = np.column_stack([x, y])\n s1 = np.abs(xy1).max()\n\n x = [0] + np.cos(np.linspace(2 * np.pi * r1, 2 * np.pi * r2, 10)).tolist()\n y = [0] + np.sin(np.linspace(2 * np.pi * r1, 2 * np.pi * r2, 10)).tolist()\n xy2 = np.column_stack([x, y])\n s2 = np.abs(xy2).max()\n\n x = [0] + np.cos(np.linspace(2 * np.pi * r2, 2 * np.pi * r3, 10)).tolist()\n y = [0] + np.sin(np.linspace(2 * np.pi * r2, 2 * np.pi * r3, 10)).tolist()\n xy3 = np.column_stack([x, y])\n s3 = np.abs(xy3).max()\n\n x = [0] + np.cos(np.linspace(2 * np.pi * r3, 2 * np.pi, 10)).tolist()\n y = [0] + np.sin(np.linspace(2 * np.pi * r3, 2 * np.pi, 10)).tolist()\n xy4 = np.column_stack([x, y])\n s4 = np.abs(xy4).max()\n\n ax.scatter(lon, lat, marker = (xy1, 0), s=s1 **2 * size, facecolor = 'green')\n ax.scatter(lon, lat, marker = (xy2, 0), s=s2 **2 * size, facecolor = 'red')\n ax.scatter(lon, lat, marker = (xy3, 0), s=s3 **2 * size, facecolor = 'blue')\n ax.scatter(lon, lat, marker = (xy4, 0), s=s4 **2 * size, facecolor = 'black')\n\ndef draw_stack(tournament, rounds= 0, cmap = 'jet', xSize = 20, ySize = 20):\n if rounds ==0:\n rounds = tournament.rounds\n numberOfStrategies = len(tournament.strategyList)\n numberOfRounds = rounds\n matrix = np.zeros((numberOfStrategies, numberOfRounds+1))\n\n cmap = plt.get_cmap(cmap)\n colors = [cmap(value/(numberOfStrategies-1)) for value in range(numberOfStrategies)]\n for country in tournament.countries:\n for i, (n, strat) in enumerate(country.evolution[:-1]):\n row = tournament.strategyList.index(strat)\n next_n = country.evolution[i+1][0]\n matrix[row, n:next_n] += country.m\n #last strategy\n last_evo, last_strategy = country.evolution[-1]\n row = tournament.strategyList.index(last_strategy)\n matrix[row, last_evo:] += country.m\n\n stack = np.vstack(matrix)\n\n fig, ax = plt.subplots(figsize =(xSize, ySize))\n ax.stackplot(range(rounds+1), *matrix, labels=tournament.strategyList, colors= colors) #this needs to be adjusted for the number of strategies\n ax.legend(loc='upper right',bbox_to_anchor=(0.95,0.95),ncol=1, fontsize='xx-large')\n plt.ylabel('Market share')\n plt.xlabel('Round number')\n plt.show()\n\ndef draw_fitness_graph(tournament, selecting=[], filtering = [], cmap = 'gist_rainbow', xSize = 10, ySize = 10):\n\n cmap = plt.get_cmap(cmap)\n\n if selecting:\n countries=selecting\n elif filtering:\n countries = [country for country in tournament.countries if not country in filtering]\n else:\n countries = tournament.countries\n\n fig, ax = plt.subplots(figsize =(xSize, ySize))\n\n for country in countries:\n draw_country_line(country, cmap, tournament.strategyList)\n\n #ax.legend(loc=5)\n plt.ylabel('Fitness')\n plt.xlabel('round')\n plt.show()\n\n\ndef draw_country_line(country, cmap, strategyList): #need to add a color legend and color line option\n\n colors = [cmap(value/(len(strategyList)-1)) for value in range(len(strategyList))]\n\n colorDict = dict(zip(strategyList, colors))\n\n le = len(country.evolution)\n\n for evo_nr in range(le-1):\n Xstart = country.evolution[evo_nr][0]\n Xend = country.evolution[evo_nr+1][0] +1 #number +1 correct?\n newColor = colorDict[country.evolution[evo_nr][1]]\n plt.plot(range(Xstart, Xend ), country.fitnessHistory[Xstart: Xend], color = newColor)\n\n Xstart = country.evolution[-1][0]\n Xend = len(country.fitnessHistory)\n lastColor = colorDict[country.evolution[-1][1]]\n plt.plot(range(Xstart, Xend), country.fitnessHistory[Xstart:], color = lastColor)\n plt.annotate(country.name, xy=(Xend, country.fitnessHistory[-1]))\n\ndef draw_evo(tournament, rounds =0 , cmap = 'jet' , xSize = 20, ySize = 40, selecting = None, filtering = None): #To do: add selecting\n '''draws for every country the evolution of its stategy'''\n if rounds ==0:\n rounds = tournament.rounds\n\n allCountryNames = [str(country) for country in tournament.countries]\n if selecting:\n countryNames = [str(country) for country in selecting]\n countries = selecting\n elif filtering:\n countryNames = [str(country) for country in tournament.countries if country not in filtering ]\n else:\n countryNames = allCountryNames\n countries = tournament.countries\n\n matrix = make_evolution_matrix(tournament, countries, rounds)\n\n fig, ax = plt.subplots(figsize=(xSize, ySize))\n im = ax.imshow(matrix, cmap =plt.get_cmap(cmap), aspect='auto')\n\n #ax.set_xticks(np.arange(rounds))\n ax.set_yticks(np.arange(len(allCountryNames)))\n\n #ax.set_xticklabels(range(rounds))\n ax.set_yticklabels(allCountryNames)\n\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\n # get the colors of the values, according to the\n # colormap used by imshow\n colors = [ im.cmap(im.norm(value)) for value in range(len(tournament.strategyList))]\n # create a patch (proxy artist) for every color\n patches = [ mpatches.Patch(color=colors[i], label=tournament.strategyList[i]) for i in range(len(tournament.strategyList)) ]\n # put those patched as legend-handles into the legend\n plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=5, borderaxespad=0. )\n\n ax.set_title(\"Evolution\")\n #fig.tight_layout()\n plt.show()\n\ndef make_evolution_matrix(tournament, countries, rounds):\n '''helper function to draw_evo'''\n valueDict = dict(zip(tournament.strategyList, range(len(tournament.strategyList))))#{Collaborate: 0, Defect: 1, TitForTat: 2, Grudge: 3, RandomMove: 4, Alternate: 5}\n result = np.zeros((len(countries), rounds+1))\n\n for country_index, country in enumerate(countries):\n le = len(country.evolution)\n for evo_nr in range(le-1):\n value = valueDict[country.evolution[evo_nr][1]]\n result[country_index, country.evolution[evo_nr][0]: country.evolution[evo_nr+1][0] ] = value\n #laatste balk\n last_value = valueDict[country.evolution[-1][1]]\n result[country_index, country.evolution[-1][0]: ] =last_value\n\n return result\n\ndef select_or_filter_names(tournament, selecting =[], filtering = []):#not finished yet\n allCountryNames = [country.__str__() for country in tournament.countries]\n if selecting:\n countryNames = [country.__str__() for country in selecting]\n elif filtering:\n countryNames = [country.__str__() for country in tournament.countries if country not in filtering ]\n else:\n countryNames = allCountryNames\n\ndef draw_round_robin_matrix(tournament, texting = False, selecting = [], filtering = [], decimals =2, cmap = 'gnuplot', xSize = 20, ySize=20):\n '''draws a matrix where for every country the amount of change in fitness due to every other country is drawn'''\n #this should be a helper method using variables: selecting, filtering\n #returning: list of indices\n allCountryNames = [str(country) for country in tournament.countries]\n if selecting:\n countryNames = [str(country) for country in selecting]\n elif filtering:\n countryNames = [str(country) for country in tournament.countries if country not in filtering ]\n else:\n countryNames = allCountryNames\n print(countryNames)\n indices = [allCountryNames.index(cn) for cn in countryNames]\n print(indices)\n #helper method should end here\n\n matrix = tournament.matchResultsMatrix[indices, :][:, indices] #matrix with only rows and columns of indexed countries\n\n fig, ax = plt.subplots(figsize=(xSize, ySize))\n im = ax.imshow(matrix, cmap =plt.get_cmap(cmap))\n\n ax.set_xticks(np.arange(len(countryNames)))\n ax.set_yticks(np.arange(len(countryNames)))\n\n ax.set_xticklabels(countryNames)\n ax.set_yticklabels(countryNames)\n\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\n if texting:\n for i in range(len(countryNames)):\n for j in range(len(countryNames)):\n text = ax.text(j, i, round(matrix[i,j],decimals), ha=\"center\", va=\"center\", color=\"w\")\n\n cb = fig.colorbar(im)\n cb.set_label('change in fitness')\n\n ax.set_title(\"Round Robin Matrix\")\n fig.tight_layout()\n plt.show()\n\ndef colorbarMinAndMax(tournament, colorIndicator):\n if colorIndicator != \"out\":\n if colorIndicator == \"m\":\n lijst = [country.m for country in tournament.countries]\n cmax = max(lijst)\n cmin = min(lijst)\n elif colorIndicator == \"change\":\n cmax = max(tournament.changeInFitness)\n cmin = min(tournament.changeInFitness)\n elif colorIndicator == \"fit\":\n lijst = [country.fitness for country in tournament.countries]\n cmax = max(lijst)\n cmin = min(lijst)\n elif colorIndicator == \"e\":\n lijst = [country.e for country in tournament.countries]\n cmax = max(lijst)\n cmin = min(lijst)\n elif colorIndicator == \"i\":\n lijst = [country.i for country in tournament.countries]\n cmax = max(lijst)\n cmin = min(lijst)\n else:\n cmax = 0\n cmin = 0 #variable not needed\n else:\n cmax = 0\n cmin = 0 # variable not needed\n return (cmin, cmax)\n\ndef draw_geo(tournament, factor = 50, resol = 'c', colorIndicator = \"out\", sizeIndicator = \"m\", projection = 'cyl', xSize = 10, ySize = 10): #sv for state variable\n '''draws a world map with all participating coutnries'''\n fig, ax = plt.subplots(figsize=(xSize, ySize))\n\n m = Basemap(projection=projection,llcrnrlat=-60,urcrnrlat=75,\\\n llcrnrlon=-110,urcrnrlon=180, ax = ax)\n m.drawcoastlines(zorder = 0)\n m.drawcountries(linewidth=0.5, zorder = 0)\n #colorbar preparation:\n cmin, cmax = colorbarMinAndMax(tournament, colorIndicator)\n for i, country in enumerate(tournament.countries):\n draw_country_marker(ax, country, colorIndicator, sizeIndicator, factor = factor, change = tournament.changeInFitness[i], cmax=cmax, cmin = cmin)\n cmap = plt.get_cmap('gnuplot')\n if colorIndicator != \"out\":\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=cmin, vmax=cmax))\n sm._A = []\n plt.colorbar(sm)\n elif sizeIndicator == \"strat\":\n legendKeys = [plt.plot(1, \"r\"+i, markersize = 100) for i in [\"o\", \"v\", \"s\", \"x\", \"D\", \"*\"]]\n legendLabels = tournament.strategyList\n plt.legend(legendKeys, legendLabels)\n\n #sc = ax.scatter([],[], cmap = cmap, vmin = cmin, vmax = cmax)\n #cb = fig.colorbar(sc)\n #cb.set_label('change in fitness')\n\n plt.title(\"Geographical Plot\")\n plt.show()\n\ndef draw_country_marker(ax, country, colorIndicator=\"out\", sizeIndicator=\"m\", factor = 1, change = 100, cmax = 0, cmin = 0):\n '''helper function to draw_geo'''\n if colorIndicator == \"out\":\n draw_pie(ax, country.loc[0], country.loc[1], country.outcomeDict, size = marker_size(country, sizeIndicator, factor, change = change))\n elif sizeIndicator == \"strat\":\n mymarker = marker_style(country.strategy.name())\n mylabel = str(country.strategy)\n ax.scatter(country.loc[1], country.loc[0], marker = mymarker, label = mylabel, s = 3 * factor ,c = marker_color(country, colorIndicator, change), cmap =plt.get_cmap('gnuplot'), vmax = cmax, vmin = cmin)\n else:\n #m.plot(country.loc[1], country.loc[0], 'ro', markersize = marker_size(markerSize, factor) ) #should be area in stead of radius\n ax.scatter(country.loc[1], country.loc[0], s= [marker_size(country, sizeIndicator, factor, change)], \\\n c = marker_color(country, colorIndicator, change), cmap =plt.get_cmap('gnuplot'), vmax = cmax, vmin = cmin)\n","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":13618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"547122011","text":"# encoding: UTF-8\n\n'''\nCTA模块相关的GUI控制组件\n'''\n\n\nfrom uiBasicWidget import QtGui, QtCore, BasicCell,QtWidgets\nfrom eventEngine import *\nfrom ctaStrategy.ctaBase import *\nfrom PyQt5 import QtGui\nimport os,json,time\nfrom language import text\n\n########################################################################################\nclass ParamWindow2(QtGui.QDialog):\n\n def __init__(self,name=None, direction=None, vtSymbol=None, CtaEngineManager=None):\n super(ParamWindow2,self).__init__()\n self.resize(350, 480)\n self.ce = CtaEngineManager\n self.saveButton = QtGui.QPushButton(u\"保存\",self)\n self.cancelButton = QtGui.QPushButton(u\"取消\",self)\n self.setWindowTitle(u\"参数\")\n self.vtSymbol = vtSymbol\n self.setting = {}\n self.paramters = {}\n self.strategyName = \"\"\n self.name = name\n self.firstSave = True\n self.fileName = \"\"\n if name != \"\":\n self.fileName = \"parameter_\" + name + \".json\"\n path = os.path.abspath(os.path.dirname(__file__))\n self.fileName = os.path.join(path, self.fileName) \n\n self.center()\n self.onInit()\n def onInit(self):\n self.saveButton.resize(50, 27)\n self.cancelButton.resize(50, 27)\n self.saveButton.move(220,450)\n self.cancelButton.move(280,450)\n self.saveButton.clicked.connect(self.saveParameter)\n self.cancelButton.clicked.connect(self.cancel) \n self.initLabel()\n self.paramters = self.loadParameter()\n if self.fileName != \"\":\n self.showParam()\n def initLabel(self):\n if self.name == \"\":\n strategyname_label = QtGui.QLabel(u\"策略名\",self)\n strategyname_label.setGeometry(QtCore.QRect(25,25,70,22))\n self.strategyname_label = QtGui.QLineEdit(self)\n self.strategyname_label.setGeometry(QtCore.QRect(120,25,70,22))\n\n self.closeFirst = QtGui.QCheckBox(u'平仓优先',self)\n self.closeFirst.setGeometry(QtCore.QRect(210,25,90,22))\n\n label_symbol = QtGui.QLabel(u\"合约\",self)\n label_symbol.setGeometry(QtCore.QRect(25,50,70,22))\n self.lineEdit_label_symbol = QtGui.QLineEdit(self)\n self.lineEdit_label_symbol.setGeometry(QtCore.QRect(120,50,70,22))\n\n symbolDirection = QtGui.QLabel(u\"方向\",self)\n symbolDirection.setGeometry(QtCore.QRect(210,50,70,22))\n self.directionCombo = QtGui.QComboBox(self)\n self.directionCombo.addItem(\"\")\n self.directionCombo.addItem(\"long\")\n self.directionCombo.addItem('short')\n self.directionCombo.setGeometry(QtCore.QRect(245,50,50,22))\n\n label_longBuyUnit = QtGui.QLabel(u\"每笔数量\",self)\n label_longBuyUnit.setGeometry(QtCore.QRect(25,75,50,22))\n self.lineEdit_label_longBuyUnit = QtGui.QLineEdit(self)\n self.lineEdit_label_longBuyUnit.setGeometry(QtCore.QRect(120,75,70,22))\n\n maxStpLos = QtGui.QLabel(u'止损', self)\n maxStpLos.setGeometry(QtCore.QRect(210,75,70,22))\n self.lineEdit_label_maxStpLos = QtGui.QLineEdit(self)\n self.lineEdit_label_maxStpLos.setGeometry(QtCore.QRect(245,75,60,22))\n\n label_longPriceCoe = QtGui.QLabel(u\"价格系数\",self)\n label_longPriceCoe.setGeometry(QtCore.QRect(25,100,50,22))\n self.lineEdit_label_longPriceCoe = QtGui.QLineEdit(self)\n self.lineEdit_label_longPriceCoe.setGeometry(QtCore.QRect(120,100,70,22))\n\n label_longPosition = QtGui.QLabel(u\"当前持仓量\", self)\n label_longPosition.setGeometry(QtCore.QRect(25,125,50,22))\n self.lineEdit_label_longPosition = QtGui.QLineEdit(self)\n self.lineEdit_label_longPosition.setGeometry(QtCore.QRect(120,125,70,22))\n\n\n label_stpProfit = QtGui.QLabel(u\"止赢\", self)\n label_stpProfit.setGeometry(QtCore.QRect(25,150,50,22))\n self.lineEdit_label_stpProfit = QtGui.QLineEdit(self)\n self.lineEdit_label_stpProfit.setGeometry(QtCore.QRect(120,150,70,22))\n\n label_slippage = QtGui.QLabel(u\"滑点\", self)\n label_slippage.setGeometry(QtCore.QRect(25,175,50,22))\n self.lineEdit_label_slippage = QtGui.QLineEdit(self)\n self.lineEdit_label_slippage.setGeometry(QtCore.QRect(120,175,70,22))\n\n label_mail = QtGui.QLabel(u\"邮箱\", self)\n label_mail.setGeometry(QtCore.QRect(25,200,50,22))\n self.lineEdit_label_mail = QtGui.QLineEdit(self)\n self.lineEdit_label_mail.setGeometry(QtCore.QRect(120,200,200,22))\n\n label_buyPrice = QtGui.QLabel(u\"开仓价差\", self)\n label_buyPrice.setGeometry(QtCore.QRect(25,225,50,22))\n self.lineEdit_label_buyPrice = QtGui.QLineEdit(self)\n self.lineEdit_label_buyPrice.setGeometry(QtCore.QRect(120,225,200,22))\n\n label_stoptime = QtGui.QLabel(u\"停止时间\", self)\n label_stoptime.setGeometry(QtCore.QRect(25,250,50,22))\n self.lineEdit_label_stoptime = QtGui.QLineEdit(self)\n self.lineEdit_label_stoptime.setGeometry(QtCore.QRect(120,250,200,22))\n\n self.isFilter = QtGui.QCheckBox(u'当波动大于', self)\n self.isFilter.setGeometry(QtCore.QRect(25,275,150,22))\n self.lineEdit_label_var = QtGui.QLineEdit(self)\n self.lineEdit_label_var.setGeometry(QtCore.QRect(120,275,20,22))\n label_pct = QtGui.QLabel(u'% 时忽略',self)\n label_pct.setGeometry(QtCore.QRect(141,275,80,22))\n\n def center(self):\n screen = QtGui.QDesktopWidget().screenGeometry()\n size = self.geometry()\n self.move((screen.width() - size.width())/2, (screen.height() - size.height())/2)\n\n def showParam(self):\n self.lineEdit_label_symbol.setText(self.vtSymbol)\n self.lineEdit_label_longBuyUnit.setText(str(self.paramters[\"openUnit\"]))\n self.lineEdit_label_longPriceCoe.setText(str(self.paramters[\"PriceCoe\"]))\n #self.lineEdit_label_longPosition.setText(str(self.paramters[\"postoday\"][self.vtSymbol]))\n self.lineEdit_label_stpProfit.setText(str(self.paramters[\"stpProfit\"]))\n self.lineEdit_label_slippage.setText(str(self.paramters[\"slippage\"]))\n self.lineEdit_label_stoptime.setText(str(self.paramters[\"stoptime\"]))\n self.lineEdit_label_maxStpLos.setText(str(self.paramters[\"maxStpLos\"]))\n if self.paramters['direction'] =='long':\n self.directionCombo.setCurrentIndex(1)\n else :\n self.directionCombo.setCurrentIndex(2)\n\n if self.paramters['closeFirst'] == True:\n self.closeFirst.setChecked(True)\n else :\n self.closeFirst.setChecked(False)\n\n if self.paramters['isFilter'] == True:\n self.isFilter.setChecked(True)\n else :\n self.isFilter.setChecked(False)\n\n rec = \"\"\n for x in self.paramters[\"receivers\"]:\n rec += x\n rec += \",\"\n rec = rec[:-1]\n self.lineEdit_label_mail.setText(rec)\n bp = \"\"\n for x in self.paramters[\"buyPrice\"]:\n bp += str(x)\n bp += ','\n bp = bp[:-1]\n self.lineEdit_label_buyPrice.setText(bp)\n \n\n def cancel(self):\n\n self.showParam()\n\n def loadParameter(self) :\n param = {}\n if self.fileName == \"\":\n return param\n with open(self.fileName, 'r') as f:\n param = json.load(f)\n return param\n\n def saveParameter(self) :\n \n param = {}\n\n try :\n param[\"stpProfit\"] = int(self.lineEdit_label_stpProfit.text())\n except ValueError:\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'止赢应该是一个数字!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes)\n return\n\n try: \n param[\"slippage\"] = int(self.lineEdit_label_slippage.text())\n except ValueError:\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'滑点应该是一个数字!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes)\n return\n bp = []\n m = \"\"\n\n try:\n for x in self.lineEdit_label_buyPrice.text():\n if x == ',':\n bp.append(int(m))\n m = ''\n continue\n m += str(x)\n bp.append(int(m))\n except Exception as e:\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'开仓价应是用英文逗号分隔的一组数字!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes)\n return\n param[\"buyPrice\"] = bp\n pos = {}\n\n self.vtSymbol = str(self.lineEdit_label_symbol.text())\n if self.lineEdit_label_symbol.text() == '':\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'请正确填写longsymbol!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes) \n return\n else :\n self.vtSymbol = str(self.lineEdit_label_symbol.text())\n\n try:\n pos[self.vtSymbol] = int(self.lineEdit_label_longPosition.text())\n except ValueError:\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'请正确填写symbol的持仓!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes) \n return\n\n self.paramters = self.loadParameter()\n param[\"postoday\"] = pos\n if self.closeFirst.isChecked():\n param['closeFirst'] = True\n else :\n param['closeFirst'] = False\n \n if self.isFilter.isChecked():\n param['isFilter'] = True\n else :\n param['isFilter'] = False\n\n if self.isFilter.isChecked():\n try :\n param[\"var\"] = int(self.lineEdit_label_var.text())\n except ValueError:\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'波动率应该是一个数字!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes)\n return\n\n try:\n param['maxStpLos'] = int(self.lineEdit_label_maxStpLos.text())\n except ValueError:\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'止损应该是一个数字!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes) \n return\n\n try:\n param['openUnit'] = int(self.lineEdit_label_longBuyUnit.text())\n except ValueError:\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'请正确填写symbol开仓手数!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes) \n return\n\n\n try:\n param['PriceCoe'] = int(self.lineEdit_label_longPriceCoe.text())\n except ValueError:\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'请正确填写symbol的系数!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes) \n return\n stpTime = str(self.lineEdit_label_stoptime.text())\n if stpTime == \"\":\n param['stoptime'] = '9999'\n else :\n param['stoptime'] = stpTime\n rec = []\n m = \"\"\n for x in str(self.lineEdit_label_mail.text()):\n if x == ',':\n rec.append(m)\n m = \"\"\n continue\n m += x\n if m != '':\n rec.append(m)\n if str(self.directionCombo.currentText()) == '':\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'请选择交易方向!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes)\n return\n else :\n param['direction'] = str(self.directionCombo.currentText())\n\n param['receivers'] = rec\n if self.name == \"\" and self.firstSave:\n if self.strategyname_label.text() == '':\n reply = QtGui.QMessageBox.question(self, u'ERROR!',\n u'策略名不能为空!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.Yes)\n return\n else :\n self.strategyName = self.strategyname_label.text()\n self.fileName = \"parameter_\" + self.strategyName + \".json\"\n param['isStop'] = False\n with open(self.fileName, 'a') as f:\n f.write(\"{}\")\n f.close()\n param['isStop'] = False\n self.paramters = param\n d1 = json.dumps(param,sort_keys=True,indent=4)\n with open(self.fileName, \"w\") as f:\n f.write(d1)\n f.close()\n self.setting['name'] = str(self.strategyName)\n self.setting['className'] = 'theGirdTrading'\n self.setting['vtSymbol'] = self.vtSymbol\n\n if self.name == \"\" and self.firstSave :\n self.ce.ctaEngine.addStrategy(self.setting,self.strategyName)\n self.firstSave = False\n\n########################################################################\nclass CtaValueMonitor(QtWidgets.QTableWidget):\n \"\"\"参数监控\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self, parent=None):\n \"\"\"Constructor\"\"\"\n super(CtaValueMonitor, self).__init__(parent)\n \n self.keyCellDict = {}\n self.data = None\n self.inited = False\n \n self.initUi()\n \n #----------------------------------------------------------------------\n def initUi(self):\n \"\"\"初始化界面\"\"\"\n self.setRowCount(1)\n self.verticalHeader().setVisible(False)\n self.setEditTriggers(self.NoEditTriggers)\n \n self.setMaximumHeight(self.sizeHint().height())\n \n #----------------------------------------------------------------------\n def updateData(self, data):\n \"\"\"更新数据\"\"\"\n if not self.inited:\n self.setColumnCount(len(data))\n self.setHorizontalHeaderLabels(data.keys())\n \n col = 0\n for k, v in data.items():\n cell = QtWidgets.QTableWidgetItem(str(v))\n self.keyCellDict[k] = cell\n self.setItem(0, col, cell)\n col += 1\n \n self.inited = True\n else:\n for k, v in data.items():\n cell = self.keyCellDict[k]\n cell.setText(str(v))\n\n\n########################################################################\nclass CtaStrategyManager(QtWidgets.QGroupBox):\n \"\"\"策略管理组件\"\"\"\n signal = QtCore.pyqtSignal(type(Event()))\n signalBuy = QtCore.pyqtSignal(type(Event())) #开仓信号,需人工确认\n signalShort = QtCore.pyqtSignal(type(Event())) #开仓信号,需人工确认\n\n #----------------------------------------------------------------------\n def __init__(self, ctaEngine, eventEngine, name, parent=None):\n \"\"\"Constructor\"\"\"\n super(CtaStrategyManager, self).__init__(parent)\n \n self.ctaEngine = ctaEngine\n self.eventEngine = eventEngine\n self.name = name\n \n self.initUi()\n self.updateMonitor()\n self.registerEvent()\n \n #----------------------------------------------------------------------\n def initUi(self):\n \"\"\"初始化界面\"\"\"\n self.setTitle(self.name)\n \n self.paramMonitor = CtaValueMonitor(self)\n self.varMonitor = CtaValueMonitor(self)\n \n height = 60\n self.paramMonitor.setFixedHeight(height)\n self.varMonitor.setFixedHeight(height)\n \n buttonInit = QtWidgets.QPushButton(u'初始化')\n buttonStart = QtWidgets.QPushButton(u'启动')\n buttonStop = QtWidgets.QPushButton(u'停止')\n buttonBuy = QtWidgets.QPushButton(u'开多')\n buttonSell = QtWidgets.QPushButton(u'平多')\n buttonShort = QtWidgets.QPushButton(u'开空')\n buttonCover= QtWidgets.QPushButton(u'平空')\n\n buttonParam = QtWidgets.QPushButton(u'参数')\n\n buttonInit.clicked.connect(self.init)\n buttonStart.clicked.connect(self.start)\n buttonStop.clicked.connect(self.stop)\n buttonBuy.clicked.connect(self.buy)\n buttonSell.clicked.connect(self.sell)\n\n buttonShort.clicked.connect(self.short)\n buttonCover.clicked.connect(self.cover)\n\n buttonParam.clicked.connect(self.paramSetting)\n\n \n hbox1 = QtWidgets.QHBoxLayout() \n hbox1.addWidget(buttonInit)\n hbox1.addWidget(buttonStart)\n hbox1.addWidget(buttonStop)\n hbox1.addWidget(buttonBuy)\n hbox1.addWidget(buttonSell)\n \n hbox1.addWidget(buttonShort)\n hbox1.addWidget(buttonCover)\n\n hbox1.addWidget(buttonParam)\n hbox1.addStretch()\n \n hbox2 = QtWidgets.QHBoxLayout()\n hbox2.addWidget(self.paramMonitor)\n \n hbox3 = QtWidgets.QHBoxLayout()\n hbox3.addWidget(self.varMonitor)\n \n vbox = QtWidgets.QVBoxLayout()\n vbox.addLayout(hbox1)\n vbox.addLayout(hbox2)\n vbox.addLayout(hbox3)\n\n self.setLayout(vbox)\n \n #----------------------------------------------------------------------\n def updateMonitor(self, event=None):\n \"\"\"显示策略最新状态\"\"\"\n paramDict = self.ctaEngine.getStrategyParam(self.name)\n if paramDict:\n self.paramMonitor.updateData(paramDict)\n \n varDict = self.ctaEngine.getStrategyVar(self.name)\n if varDict:\n self.varMonitor.updateData(varDict) \n \n #----------------------------------------------------------------------\n def registerEvent(self):\n \"\"\"注册事件监听\"\"\"\n self.signal.connect(self.updateMonitor)\n self.eventEngine.register(EVENT_CTA_STRATEGY+self.name, self.signal.emit)\n\n self.signalBuy.connect(self.buy)\n self.signalShort.connect(self.short)\n self.eventEngine.register(EVENT_CTA_STRATEGY+self.name+\".BUY\", self.signalBuy.emit)\n self.eventEngine.register(EVENT_CTA_STRATEGY+self.name+\".SHORT\", self.signalShort.emit)\n \n #----------------------------------------------------------------------\n def init(self):\n \"\"\"初始化策略\"\"\"\n self.ctaEngine.initStrategy(self.name)\n \n #----------------------------------------------------------------------\n def start(self):\n \"\"\"启动策略\"\"\"\n self.ctaEngine.startStrategy(self.name)\n \n #----------------------------------------------------------------------\n def stop(self):\n \"\"\"停止策略\"\"\"\n self.ctaEngine.stopStrategy(self.name)\n\n #----------------------------------------------------------------------\n def buy(self):\n \"\"\"手动开多\"\"\"\n self.ctaEngine.tradeStrategy(self.name,CTAORDER_BUY)\n self.ctaEngine.writeCtaLog( u'手动开多' + self.name) \n\n #----------------------------------------------------------------------\n def sell(self):\n \"\"\"手动平多\"\"\"\n self.ctaEngine.tradeStrategy(self.name,CTAORDER_SELL)\n self.ctaEngine.writeCtaLog( u'手动平多' + self.name) \n\n #----------------------------------------------------------------------\n def short(self):\n \"\"\"手动开空\"\"\"\n self.ctaEngine.tradeStrategy(self.name,CTAORDER_SHORT)\n self.ctaEngine.writeCtaLog( u'手动开空' + self.name) \n\n def cover(self):\n \"\"\"手动平空\"\"\"\n self.ctaEngine.tradeStrategy(self.name,CTAORDER_COVER)\n self.ctaEngine.writeCtaLog( u'手动平空' + self.name) \n\n def paramSetting(self):\n \"\"\"设置参数窗口\"\"\"\n self.paramWindow = ParamWindow2(self.name)\n self.paramWindow.paramters = self.paramWindow.loadParameter()\n self.paramWindow.showParam()\n self.paramWindow.show()\n\n###################################################˝#####################\nclass CtaEngineManager(QtWidgets.QWidget):\n \"\"\"CTA引擎管理组件\"\"\"\n signal = QtCore.pyqtSignal(type(Event()))\n\n #----------------------------------------------------------------------\n def __init__(self, ctaEngine, eventEngine, parent=None):\n \"\"\"Constructor\"\"\"\n super(CtaEngineManager, self).__init__(parent)\n \n self.ctaEngine = ctaEngine\n self.eventEngine = eventEngine\n \n self.strategyLoaded = False\n \n self.initUi()\n self.registerEvent()\n \n # 记录日志\n self.ctaEngine.writeCtaLog(u'CTA引擎启动成功') \n \n #----------------------------------------------------------------------\n def initUi(self):\n \"\"\"初始化界面\"\"\"\n self.setWindowTitle(u'CTA策略')\n \n # 按钮\n loadButton = QtWidgets.QPushButton(u'加载策略')\n initAllButton = QtWidgets.QPushButton(u'全部初始化')\n startAllButton = QtWidgets.QPushButton(u'全部启动')\n stopAllButton = QtWidgets.QPushButton(u'全部停止')\n savePositionButton = QtWidgets.QPushButton(u'保存持仓')\n \n loadButton.clicked.connect(self.load)\n initAllButton.clicked.connect(self.initAll)\n startAllButton.clicked.connect(self.startAll)\n stopAllButton.clicked.connect(self.stopAll)\n savePositionButton.clicked.connect(self.ctaEngine.savePosition)\n \n # 滚动区域,放置所有的CtaStrategyManager\n self.scrollArea = QtWidgets.QScrollArea()\n self.scrollArea.setWidgetResizable(True)\n \n # CTA组件的日志监控\n self.ctaLogMonitor = QtWidgets.QTextEdit()\n self.ctaLogMonitor.setReadOnly(True)\n self.ctaLogMonitor.setMaximumHeight(200)\n \n # 设置布局\n hbox2 = QtWidgets.QHBoxLayout()\n hbox2.addWidget(loadButton)\n hbox2.addWidget(initAllButton)\n hbox2.addWidget(startAllButton)\n hbox2.addWidget(stopAllButton)\n hbox2.addWidget(savePositionButton)\n hbox2.addStretch()\n \n vbox = QtWidgets.QVBoxLayout()\n vbox.addLayout(hbox2)\n vbox.addWidget(self.scrollArea)\n vbox.addWidget(self.ctaLogMonitor)\n self.setLayout(vbox)\n \n #----------------------------------------------------------------------\n def initStrategyManager(self):\n \"\"\"初始化策略管理组件界面\"\"\" \n w = QtWidgets.QWidget()\n vbox = QtWidgets.QVBoxLayout()\n \n for name in self.ctaEngine.strategyDict.keys():\n strategyManager = CtaStrategyManager(self.ctaEngine, self.eventEngine, name)\n vbox.addWidget(strategyManager)\n \n vbox.addStretch()\n \n w.setLayout(vbox)\n self.scrollArea.setWidget(w) \n \n #----------------------------------------------------------------------\n def initAll(self):\n \"\"\"全部初始化\"\"\"\n for name in self.ctaEngine.strategyDict.keys():\n self.ctaEngine.initStrategy(name) \n \n #----------------------------------------------------------------------\n def startAll(self):\n \"\"\"全部启动\"\"\"\n for name in self.ctaEngine.strategyDict.keys():\n self.ctaEngine.startStrategy(name)\n \n #----------------------------------------------------------------------\n def stopAll(self):\n \"\"\"全部停止\"\"\"\n for name in self.ctaEngine.strategyDict.keys():\n self.ctaEngine.stopStrategy(name)\n \n #----------------------------------------------------------------------\n def load(self):\n \"\"\"加载策略\"\"\"\n if not self.strategyLoaded:\n self.ctaEngine.loadSetting()\n self.initStrategyManager()\n self.strategyLoaded = True\n self.ctaEngine.writeCtaLog(u'策略加载成功')\n \n #----------------------------------------------------------------------\n def updateCtaLog(self, event):\n \"\"\"更新CTA相关日志\"\"\"\n log = event.dict_['data']\n content = '\\t'.join([log.logTime, log.logContent])\n self.ctaLogMonitor.append(content)\n \n #----------------------------------------------------------------------\n def registerEvent(self):\n \"\"\"注册事件监听\"\"\"\n self.signal.connect(self.updateCtaLog)\n self.eventEngine.register(EVENT_CTA_LOG, self.signal.emit)\n \n #----------------------------------------------------------------------\n def closeEvent(self, event):\n \"\"\"关闭窗口时的事件\"\"\"\n reply = QtGui.QMessageBox.question(self, text.SAVE_POSITION_DATA,\n text.SAVE_POSITION_QUESTION, QtGui.QMessageBox.Yes | \n QtGui.QMessageBox.No, QtGui.QMessageBox.No)\n \n if reply == QtGui.QMessageBox.Yes: \n self.ctaEngine.savePosition()\n \n event.accept()\n \n \n \n\n\n\n \n ","sub_path":"vn.trader/ctaStrategy/uiCtaWidget.py","file_name":"uiCtaWidget.py","file_ext":"py","file_size_in_byte":25131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"32669057","text":"# -*- coding:utf-8 -*-\nimport json\nimport time\nimport urllib2\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\n\nAPPID = 'wx21d659c4c87b4510'\nAPPSECRET = '7b6fc0fa5bc3b33309da4b562aed0c58'\n\nclass AccessToken(object):\n\n __access_token = {\n 'access_token': '',\n 'update_time': time.time(),\n 'expires_in': 7200\n }\n\n @classmethod\n def get_access_token(cls):\n # 1. 是否存在 2. 是否过期 3. 返回token\n if not cls.__access_token.get('access_token') or \\\n (time.time() - cls.__access_token.get('update_time') > cls.__access_token.get('expires_in')):\n\n # 获取数据\n\n url = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s' % (APPID, APPSECRET)\n\n response = urllib2.urlopen(url)\n\n resp_data = response.read()\n\n resp_dict = json.loads(resp_data)\n\n if 'errcode' in resp_dict:\n raise Exception(resp_dict.get('errmsg'))\n\n # 重新赋值\n cls.__access_token['access_token'] = resp_dict.get('access_token')\n cls.__access_token['expires_in'] = resp_dict.get('expires_in')\n cls.__access_token['update_time'] = time.time()\n\n # 返回token\n return cls.__access_token.get('access_token')\n\n\n@app.route('/get_qrcode/')\ndef hello_world(scene_id):\n # 获取带参数的二维码的过程包括两步,首先创建二维码ticket,然后凭借ticket到指定URL换取二维码。\n # 1,发送请求, 获取ticket\n ticket_url = 'https://api.weixin.qq.com/cgi-bin/qrcode/create?access_token='+ AccessToken.get_access_token()\n\n params = {\"expire_seconds\": 604800, \"action_name\": \"QR_SCENE\", \"action_info\": {\"scene\": {\"scene_id\": scene_id}}}\n # 使用urllib2发送POST请求\n response = urllib2.urlopen(ticket_url, data=json.dumps(params))\n\n # 获取返回的数据(字符串)\n resp_data = response.read()\n\n # 转化为字典\n resp_dict = json.loads(resp_data)\n\n # 2. 从字典中获取ticket\n ticket = resp_dict.get('ticket')\n\n # 3,到指定地址获取二维码图片\n img_url = 'https://mp.weixin.qq.com/cgi-bin/showqrcode?ticket='+ticket\n return '' % img_url\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"flask1-5/flask_05/generate_img.py","file_name":"generate_img.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"460905616","text":"from django.urls import path, re_path\n\nfrom api.views import (\n GetCredentialModelViewSet,\n CredentialDetailModelViewSet,\n\n GetDealAPIView,\n GetInvoiceAPIView,\n GetOffersAPIView,\n GetEmployeersAPIView,\n LookupAPIView,\n HomeAPIView,\n\n MegaplanCallAPIView,\n\n)\nfrom api.filter_views import (\n DealFilterViewSet,\n InvoiceFilterViewSet,\n OfferFilterViewSet,\n EmployeeFilterViewSet,\n)\n\n\nurlpatterns = [\n # basic views\n path('get_credentials/', GetCredentialModelViewSet.as_view({\"post\": \"create\"}), name='get-credentials'),\n # path('deals/', GetDealAPIView.as_view(), name='get-deals'),\n # path('invoices/', GetInvoiceAPIView.as_view(), name='get-invoice'),\n # path('offers/', GetOffersAPIView.as_view(), name='get-offers'),\n # path('employeers/', GetEmployeersAPIView.as_view(), name='get-get_employeers'),\n\n # filter views\n path('filter/deal/', DealFilterViewSet.as_view({\"post\": \"get_filters\", \"get\": \"get_filters\"}), name='filter-deal-list'),\n path('filter/invoice/', InvoiceFilterViewSet.as_view({\"post\": \"get_filters\", \"get\": \"get_filters\"}), name='filter-invoice-list'),\n path('filter/offer/', OfferFilterViewSet.as_view({\"post\": \"get_filters\", \"get\": \"get_filters\"}), name='filter-offer-list'),\n path('filter/employee/', EmployeeFilterViewSet.as_view({\"post\": \"get_filters\", \"get\": \"get_filters\"}), name='filter-employeers-list'),\n\n # lookup\n path('lookup/', LookupAPIView.as_view(), name='lookup'),\n\n # detail\n re_path(r'^credential/(?P\\d+)/', CredentialDetailModelViewSet.as_view({\"post\": \"get_info\", \"get\": \"get_info\"}), name=\"credential-detail\"),\n\n # methods\n re_path(r'^(?P[-\\w]+)/$', MegaplanCallAPIView.as_view(), name='method-1'),\n\n # home 'api/'\n path('', HomeAPIView.as_view({\"post\": \"get_info\", \"get\": \"get_info\"}), name='home'),\n]","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"61233221","text":"import os\nimport zipfile\nfrom django.core.management.base import BaseCommand, CommandError\n\n\nclass Command(BaseCommand):\n\n def all_exist(self, paths):\n for path in paths:\n if not os.path.exists(path):\n print(path + \" not found, extracting geodata...\")\n return False\n return True\n\n def handle(self, *args, **options):\n boundaries_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../data/boundaries'))\n boundaries_zip = boundaries_dir + '/boundaries.zip'\n boundary_paths = [\n boundaries_dir + '/scottish_local_authority.geojson',\n boundaries_dir + '/SG_NHS_HealthBoards_2019_clipped.geojson',\n boundaries_dir + '/SG_NHS_IntegrationAuthority_2019_clipped.geojson',\n boundaries_dir + '/Countries_December_2017_Ultra_Generalised_Clipped_Boundaries_in_UK.geojson'\n ]\n if self.all_exist(boundary_paths):\n print(\"Boundary data already extracted.\")\n else:\n print(\"Extracting \" + boundaries_zip + \" to \" + boundaries_dir)\n with zipfile.ZipFile(boundaries_zip,\"r\") as zip_ref:\n zip_ref.extractall(boundaries_dir)\n print(\"Done.\")\n","sub_path":"aliss/management/commands/extract_geodata.py","file_name":"extract_geodata.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"189046344","text":"import os\nimport re\nimport argparse\nimport time\nfrom datetime import datetime\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\n\nTHRESHOLD = (1 - 0.7395)\n\ndef process_input_arguments():\n parser = argparse.ArgumentParser('Import a model and classify images.')\n parser.add_argument('-d', '--directory', default='', help='Folder holding category folders')\t\n parser.add_argument('-c1', '--category1', help='Folder of class 1')\n parser.add_argument('-c2', '--category2', help='Folder of class 2')\n parser.add_argument('-s', '--img_size', default=256, help='Image dimension in pixels')\n parser.add_argument('-m', '--model', help='Filepath of model to use')\n args = parser.parse_args()\n\n img_directory = args.directory\n folders = [args.category1, args.category2]\n img_size = args.img_size\n model_directory = args.model\n\n return img_directory, folders, img_size, model_directory\n\ndef import_images(img_directory, folders, img_size):\n ''' Imports images from 2 folders into program\n\n Parameters:\n -----\n img_directory : directory of images folders\n folders : list of two strings; folders[0] is the folder of images with classification = 0, folders[1] is classification 1\n\n Output:\n -----\n features : nparray (shape = #images x img_size x img_size x 3) of ints, containing the RGB pixel values for each image, i.e. the inputs for the model\n labels : nparray (shape = #images x 1) of ints, containing actual classification (based on the image folder)\n img_names : nparray (shape = #images x 1) of strings, contains all filenames\n '''\n all_data = []\n for category in folders:\n path=os.path.join(img_directory,category) #look at each folder of images\n class_index = folders.index(category)\n for img in os.listdir(path): # look at each image\n try:\n img_array = cv2.imread(os.path.join(path,img), -1) #-1 means image is read as color\n img_array = img_array/255.0\n all_data.append([img_array, class_index,img]) #, img])\n except Exception as e:\n pass\n features = []\n labels = []\n img_names = []\n\n\t#store the image features (array of RGB for each pixel) and labels into corresponding arrays\n for data_feature, data_label, img in all_data:\n features.append(data_feature)\n labels.append(data_label)\n img_names.append(img)\n\n #reshape into numpy array\n features = np.array(features) #turns list into a numpy array\n features = features.reshape(-1, img_size, img_size, 3) # 3 bc three channels for RGB values\n # -1 means \"numpy figure out this dimension,\" so the new nparray has the dimensions of: [#_of_images rows, img_size, img_size, 3] \n labels = np.array(labels)\n return features, labels, img_names\n\ndef make_predictions(pixel_values, actual_class, img_filenames, class_labels, model):\n ''' Model predicts classifications for all images, and organizes into a DataFrame\n\n Parameters:\n -----\n @pixel_values : numpy array of RBG pixel values for each image\n @actual_class : numpy array of 0/1's of actual classification of the images\n @img_filenames : numpy array of the image filenames\n @class_labels : list containing the names of the two classes (e.g. ['coastal', 'rostrata'])\n @model : keras model, already loaded\n\n Output:\n -----\n DataFrame with the following columns:\n 1. image filename (string)\n 2. prediction of class = 0 (float)\n 3. prediction of class = 1 (float)\n 4. class predition - argmax (int, 0 or 1)\n 5. actual class (int, 0 or 1)\n 6. predicted class label (string)\n 7. actual class label (string)\n 8. True Positive (1 if the image was correctly predicted to be class=1, 0 otherwise)\n 9. False Negative (1 if the image was incorrectly predicted to be class=1, 0 otherwise)\n 10. False Positive (1 if the image was incorrectly predicted to be class=0, 0 otherwise)\n 11. True Negative (1 if the image was correctly predicted to be class=0, 0 otherwise)\n '''\n # Predict classes of imported images\n predictions = model.predict(pixel_values)\n prediction_integer_func = np.vectorize(lambda t: (1 if t > THRESHOLD else 0))\n prediction_class = prediction_integer_func(predictions[:,[1]]) # 0/1 labels of predictions\n\n prediction_label_func = np.vectorize(lambda t: class_labels[t])\n pred_actual_class_labels = np.c_[prediction_label_func(prediction_class), prediction_label_func(actual_class)]\n \n # Calculate confusion matrix: tp, fn, fp, tn\n conf_matrix = confusion_matrix(prediction_class, actual_class)\n\n # Join all information into one nparray -> pd.DataFrame\n headers = ['filename', class_labels[0] + '_pred', class_labels[1] + '_pred', 'pred_class', 'actual_class', 'pred_label', 'actual_label', 'tp', 'fn', 'fp', 'tn']\n joined_arrays = np.c_ [img_filenames, predictions, prediction_class, actual_class, pred_actual_class_labels, conf_matrix]\n predictions_to_write = pd.DataFrame(joined_arrays, columns=headers)\n\n return predictions_to_write\n\ndef confusion_matrix(prediction_class_labels, actual_class_labels):\n ''' Determines confusion matrix value for each tuple.\n\n PARAMATERS:\n -----\n prediction_class_labels: numpy array with 0/1 predicted classifications (shape = # of images x 1)\n actual_class_labels: numpy array with 0/1 actual classifications (shape = # of images x 1)\n\n OUTPUT:\n -----\n numpy array (shape = # of images x 4), where:\n - col 1 = true positive\n - col 2 = false negative\n - col 3 = false positives\n - col 4 = true negatives\n '''\n # columns: 0=tp, 1=fn, 2=fp, 3=tn\n conf_mat = np.zeros((len(prediction_class_labels), 4))\n \n for idx, pred in enumerate(prediction_class_labels):\n actual = actual_class_labels[idx]\n if pred == 1:\n if pred == actual:\n conf_mat[idx][0] = 1 # true positive\n else:\n conf_mat[idx][2] = 1 # false positive\n elif pred == 0:\n if pred == actual:\n conf_mat[idx][3] = 1 # true negative\n else:\n conf_mat[idx][1] = 1 # false negative\n else:\n print('Invalid value for prediction class!')\n \n return conf_mat\n\ndef write_dataframe_to_CSV(folder, filename, dataframe_to_write):\n ''' Writes the given DataFrame to a file.\n Parameters:\n -----\n @folder : String to designate folder in which to write file\n @filename : String to add designation to filename -- file names are timestamp+filename\n @dataframe_to_write : DataFrame to be written to CSV\n\n Output:\n -----\n File path of the written file\n '''\n timestamp = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-%M-%S')\n filename = timestamp + filename + '.csv'\n filepath = os.path.join(folder, filename)\n dataframe_to_write.to_csv(filepath, encoding = 'utf-8', index = False)\n\n return filepath\n\nif __name__ == '__main__':\n # Start execution and parse arguments\n start_time = time.time()\n img_directory, folders, img_size, model_directory = process_input_arguments()\n\n # Load model\n model = tf.keras.models.load_model(model_directory)\n print('Model loaded.')\n\n # Import images\n pixel_values, actual_class, img_filenames = import_images(img_directory, folders, img_size)\n print('Images imported.')\n\n # Map class numbers to class labels\n class_labels = [ folders[0].split('_')[0], folders[1].split('_')[0] ]\n \n # Generate predictions and organize results\n predictions_to_write = make_predictions(pixel_values, actual_class, img_filenames, class_labels, model)\n print('Predictions generated.')\n\n # Save to file\n if not os.path.exists('predictions'):\n os.makedirs('predictions')\n model_name = re.split('[/\\\\\\\\]+', model_directory)[2].split('.')[0]\n filename = write_dataframe_to_CSV('predictions', 'predictions'+model_name, predictions_to_write)\n print('File written to \\'%s\\'.' % filename)\n\n # Finish execution\n end_time = time.time()\n print('Completed in %.1f seconds' % (end_time - start_time))","sub_path":"classify_images.py","file_name":"classify_images.py","file_ext":"py","file_size_in_byte":8187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"279872423","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^login$', views.login, name='login'),\n url(r'^register$', views.register, name='register'),\n url(r'^home$', views.index, name='home'),\n url(r'^games/(?P\\d+)$', views.games, name='games'),\n url(r'^news/(?P\\d+)$', views.news, name='news'),\n url(r'^business$', views.business, name='business'),\n url(r'^joinUs$', views.joinUs, name='joinUs'),\n url(r'^aboutUs$', views.aboutUs, name='aboutUs'),\n url(r'^gameDetail/(?P\\d+)$', views.gameDetail, name='gameDetail'),\n url(r'^newsDetail/(?P\\d+)$', views.newsDetail, name='newsDetail'),\n url(r'^jobDesc/(?P\\d+)$', views.jobDesc, name='jobDesc'),\n]\n","sub_path":"company/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"623371658","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt, mpld3\nimport sys\n\ndf = pd.read_csv('timing.csv',index_col=0)\nn = str(sys.argv[1])\n\nfound=False\nfor i in range(0,100):\n if df.index[i] == n:\n x=df.iloc[i]\n found=True\n break\n\nif found==False:\n print('NO SUCH BUS EXISTS')\n exit()\n \nx = x[0:]\ny_pos = np.arange(len(x))\n\nfig = plt.figure(1, [7,5])\nax = fig.gca() \nax.set_ylim([-5,7])\nax.scatter(y_pos,x, alpha=0.9)\nax.plot(y_pos, x, alpha=0.5)\nax.plot([0,30], [0,0], color = 'red')\nplt.ylabel('Time Deviation')\nplt.xlabel('Days')\n\nmpld3.save_html(fig,\"scatterPlot.html\")\n","sub_path":"userApp/scatterPlot.py","file_name":"scatterPlot.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"520690331","text":"from os.path import expanduser\nimport csv\nimport platform\nimport os\nfrom maskgen_loader import MaskGenLoader\nfrom json import JSONEncoder\nimport json\nimport logging\n\nclass OperationEncoder(JSONEncoder):\n def default(self, o):\n return o.__dict__\n\ndef getFileName(fileName, path=None):\n import sys\n if (os.path.exists(fileName)):\n logging.getLogger('maskgen').info( 'Loading ' + fileName)\n return fileName\n places = [os.getenv('MASKGEN_RESOURCES', 'resources')]\n places.extend([os.path.join(x,'resources') for x in sys.path if 'maskgen' in x or\n (path is not None and path in x)])\n for place in places:\n newNanme = os.path.abspath(os.path.join(place, fileName))\n if os.path.exists(newNanme):\n logging.getLogger('maskgen').info( 'Loading ' + newNanme)\n return newNanme\n\nclass ProjectProperty:\n description = None\n name = None\n type = None\n operations = None\n parameter = None\n rule = None\n values = None\n value = None\n information = None\n semanticgroup = False\n node = False\n readonly = False\n mandatory= False\n nodetype = None\n \"\"\"\n @type operations: list of str\n @type nodetype: str\n \"\"\"\n\n def __init__(self, name='', type='', operations=None, parameter=None, description=None,\n information=None, value=None, values=None, rule=None, node=False, readonly=False,mandatory=True,\n nodetype=None,semanticgroup=False):\n self.name = name\n self.type = type\n self.operations = operations\n self.parameter = parameter\n self.description = description\n self.rule = rule\n self.values = values\n self.value = value\n self.information = information\n self.node = node\n self.readonly = readonly\n self.mandatory = mandatory\n self.nodetype = nodetype\n self.semanticgroup = semanticgroup\n\n\nclass Operation:\n name = None\n category = None\n includeInMask = False\n description = None\n optionalparameters = {}\n mandatoryparameters = {}\n rules = []\n analysisOperations = []\n transitions = []\n compareparameters = {}\n generateMask = True\n groupedOperations = None\n groupedCategories = None\n maskTransformFunction = None\n compareOperations = None\n\n def __init__(self, name='', category='', includeInMask=False, rules=list(), optionalparameters=dict(),\n mandatoryparameters=dict(), description=None, analysisOperations=list(), transitions=list(),\n compareparameters=dict(),generateMask = True,groupedOperations=None, groupedCategories = None,\n maskTransformFunction=None):\n self.name = name\n self.category = category\n self.includeInMask = includeInMask\n self.rules = rules\n self.mandatoryparameters = mandatoryparameters if mandatoryparameters is not None else {}\n self.optionalparameters = optionalparameters if optionalparameters is not None else {}\n self.description = description\n self.analysisOperations = analysisOperations\n self.transitions = transitions\n self.compareparameters = compareparameters\n self.generateMask = generateMask\n self.groupedOperations = groupedOperations\n self.groupedCategories = groupedCategories\n self.maskTransformFunction = maskTransformFunction\n\n def getConvertFunction(self):\n if 'convert_function' in self.compareparameters:\n funcName = self.compareparameters['convert_function']\n return getRule(funcName)\n return None\n\n def getCompareFunction(self):\n if 'function' in self.compareparameters:\n funcName = self.compareparameters['function']\n return getRule(funcName)\n return None\n\n def to_JSON(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n\n\ndef getOperation(name, fake = False, warning=True):\n \"\"\"\n\n :param name: name of the operation\n :param fake: Set to True to allow fake operations\n :return: Operation\n \"\"\"\n global metadataLoader\n if name == 'Donor':\n return Operation(name='Donor', category='Donor',maskTransformFunction='maskgen.mask_rules.donor')\n if name not in metadataLoader.operations and warning:\n logging.getLogger('maskgen').warning( 'Requested missing operation ' + str(name))\n return metadataLoader.operations[name] if name in metadataLoader.operations else (Operation(name='name', category='Bad') if fake else None)\n\n\ndef getOperations():\n global metadataLoader\n return metadataLoader.operations\n\n\ndef getOperationsByCategory(sourcetype, targettype):\n global metadataLoader\n result = {}\n transition = sourcetype + '.' + targettype\n for name, op in metadataLoader.operations.iteritems():\n if transition in op.transitions:\n if op.category not in result:\n result[op.category] = []\n result[op.category].append(op.name)\n return result\n\ndef getPropertiesBySourceType(source):\n global metadataLoader\n return metadataLoader.node_properties[source]\n\ndef getSoftwareSet():\n global metadataLoader\n return metadataLoader.softwareset\n\n\ndef saveJSON(filename):\n global metadataLoader\n opnamelist = list(metadataLoader.operations.keys())\n opnamelist.sort()\n oplist = [metadataLoader.operations[op] for op in opnamelist]\n with open(filename, 'w') as f:\n json.dump({'operations': oplist}, f, indent=2, cls=OperationEncoder)\n\n\ndef loadProjectPropertyJSON(fileName):\n res = list()\n fileName = getFileName(fileName)\n with open(fileName, 'r') as f:\n props = json.load(f)\n for prop in props['properties']:\n res.append( ProjectProperty(name=prop['name'], type=prop['type'], description=prop['description'],\n parameter=prop['parameter'] if 'parameter' in prop else None,\n rule=prop['rule'] if 'rule' in prop else None,\n values=prop['values'] if 'values' in prop else None,\n value=prop['value'] if 'value' in prop else None,\n node=prop['node'] if 'node' in prop else False,\n information=prop['information'] if 'information' in prop else None,\n operations=[prop['operation']] if 'operation' in prop else\n (prop['operations'] if 'operations' in prop else []),\n readonly=prop['readonly'] if 'readonly' in prop else None,\n mandatory=prop['mandatory'] if 'mandatory' in prop else False,\n semanticgroup=prop['semanticgroup'] if 'semanticgroup' in prop else False,\n nodetype=prop['nodetype'] if 'nodetype' in prop else None))\n return res\n\n\ndef loadOperationJSON(fileName):\n operations = {}\n fileName = getFileName(fileName)\n with open(fileName, 'r') as f:\n ops = json.load(f)\n for op in ops['operations']:\n operations[op['name']] = Operation(name=op['name'], category=op['category'], includeInMask=op['includeInMask'],\n rules=op['rules'], optionalparameters=op['optionalparameters'],\n mandatoryparameters=op['mandatoryparameters'],\n description=op['description'] if 'description' in op else None,\n generateMask=op['generateMask'] if 'generateMask' in op else True,\n analysisOperations=op[\n 'analysisOperations'] if 'analysisOperations' in op else [],\n transitions=op['transitions'] if 'transitions' in op else [],\n compareparameters=op[\n 'compareparameters'] if 'compareparameters' in op else dict(),\n maskTransformFunction=op['maskTransformFunction'] if 'maskTransformFunction' in op else None)\n return operations, ops['filtergroups'] if 'filtergroups' in ops else {}, ops['version'] if 'version' in ops else '0.4.0308.db2133eadc', \\\n ops['node_properties'] if 'node_properties' in ops else {}\n\ncustomRuleFunc = {}\ndef loadCustomRules():\n global customRuleFunc\n import pkg_resources\n for p in pkg_resources.iter_entry_points(\"maskgen_rules\"):\n logging.getLogger('maskgen').info( 'load rule ' + p.name)\n customRuleFunc[p.name] = p.load()\n\ndef insertCustomRule(name,func):\n global customRuleFunc\n customRuleFunc[name] = func\n\ndef noopFule(*arg,**kwargs):\n return None\n\ndef getRule(name, globals={}):\n if name is None:\n return None\n import importlib\n global customRuleFunc\n if name in customRuleFunc:\n return customRuleFunc[name]\n else:\n if '.' not in name:\n return globals.get(name)\n mod_name, func_name = name.rsplit('.', 1)\n try:\n mod = importlib.import_module(mod_name)\n func = getattr(mod, func_name)\n customRuleFunc[name] = func\n return func#globals.get(name)\n except Exception as e:\n logging.getLogger('maskgen').error('Unable to load rule {}: {}'.format(name,str(e)))\n return noopFule\n\ndef getProjectProperties():\n \"\"\"\n\n :return:\n @rtype: list of ProjectProperty\n \"\"\"\n global metadataLoader\n return metadataLoader.projectProperties\n\n\ndef getSemanticGroups():\n return [prop.description for prop in getProjectProperties() if prop.semanticgroup]\n\ndef getFilters(filtertype):\n global metadataLoader\n if filtertype == 'filtergroups':\n return metadataLoader.filters\n else:\n return {}\n\n\nclass MetaDataLoader:\n version = ''\n softwareset = {}\n operations = {}\n filters = {}\n operationsByCategory = {}\n projectProperties = {}\n\n def __init__(self):\n self.operations , self.filters, self.operationsByCategory = self.loadOperations('operations.json')\n self.softwareset = self.loadSoftware('software.csv')\n self.projectProperties = self.loadProjectProperties('project_properties.json')\n\n def loadSoftware(self, fileName):\n fileName = getFileName(fileName)\n self.softwareset = {'image': {}, 'video': {}, 'audio': {}}\n with open(fileName) as f:\n line_no = 0\n for l in f.readlines():\n line_no += 1\n l = l.strip()\n if len(l) == 0:\n continue\n columns = l.split(',')\n if len(columns) < 3:\n logging.getLogger('maskgen').error(\n 'Invalid software description on line ' + str(line_no) + ': ' + l)\n software_type = columns[0].strip()\n software_name = columns[1].strip()\n versions = [x.strip() for x in columns[2:] if len(x) > 0]\n if software_type not in ['both', 'image', 'video', 'audio', 'all']:\n logging.getLogger('maskgen').error('Invalid software type on line ' + str(line_no) + ': ' + l)\n elif len(software_name) > 0:\n types = ['image', 'video'] if software_type == 'both' else [software_type]\n types = ['image', 'video', 'audio'] if software_type == 'all' else types\n types = ['video', 'audio'] if software_type == 'audio' else types\n for stype in types:\n self.softwareset[stype][software_name] = versions\n return self.softwareset\n\n def loadProjectProperties(self, fileName):\n loadCustomRules()\n self.projectProperties = loadProjectPropertyJSON(fileName)\n return self.projectProperties\n\n def loadOperations(self,fileName):\n self.operations, self.filters, self.version, self.node_properties = loadOperationJSON(fileName)\n logging.getLogger('maskgen').info('Loaded operation version ' + self.version)\n self.operationsByCategory = {}\n for op, data in self.operations.iteritems():\n category = data.category\n if category not in self.operationsByCategory:\n self.operationsByCategory[category] = []\n self.operationsByCategory[category].append(op)\n return self.operations, self.filters, self.operationsByCategory\n\n\nglobal metadataLoader\nmetadataLoader = MetaDataLoader()\n\ndef toSoftware(columns):\n return [x.strip() for x in columns[1:] if len(x) > 0]\n\ndef getOS():\n return platform.system() + ' ' + platform.release() + ' ' + platform.version()\n\n\ndef operationVersion():\n global metadataLoader\n return metadataLoader.version\n\ndef validateSoftware(softwareName, softwareVersion):\n global metadataLoader\n for software_type, typed_software_set in metadataLoader.softwareset.iteritems():\n if softwareName in typed_software_set and softwareVersion in typed_software_set[softwareName]:\n return True\n return False\n\n\nclass Software:\n name = None\n version = None\n internal = False\n\n def __init__(self, name, version, internal=False):\n self.name = name\n self.version = version\n self.internal = internal\n\n\nclass SoftwareLoader:\n software = {}\n preference = None\n loader = MaskGenLoader()\n\n def __init__(self):\n self.load()\n\n def load(self):\n res = {}\n self.preference = self.loader.get_key('software_pref')\n newset = self.loader.get_key('software')\n if newset is not None:\n if type(newset) == list:\n for item in newset:\n if validateSoftware(item[0], item[1]):\n res[item[0]] = item[1]\n else:\n for name, version in newset.iteritems():\n if validateSoftware(name, version):\n res[name] = version\n self.software = res\n\n def get_preferred_version(self, name=None):\n if self.preference is not None and (name is None or name == self.preference[0]):\n return self.preference[1]\n if len(self.software) > 0:\n if name in self.software:\n return self.software[name]\n elif name is None:\n return self.software[self.software.keys()[0]]\n return None\n\n def get_preferred_name(self):\n if self.preference is not None:\n return self.preference[0]\n if len(self.software) > 0:\n return self.software.keys()[0]\n return None\n\n def get_names(self, software_type):\n global metadataLoader\n return list(metadataLoader.softwareset[software_type].keys())\n\n def get_versions(self, name, software_type=None, version=None):\n global metadataLoader\n types_to_check = ['image', 'video', 'audio'] if software_type is None else [software_type]\n for type_to_check in types_to_check:\n versions = metadataLoader.softwareset[type_to_check][name] if name in metadataLoader.softwareset[type_to_check] else None\n if versions is None:\n continue\n if version is not None and version not in versions:\n versions = list(versions)\n versions.append(version)\n logging.getLogger('maskgen').warning( version + ' not in approved set for software ' + name)\n return versions\n return []\n\n def add(self, software):\n isChanged = False\n if validateSoftware(software.name, software.version):\n if not software.name in self.software or self.software[software.name] != software.version:\n self.software[software.name] = software.version\n isChanged = True\n pref = self.preference\n if pref is None or pref[0] != software.name or pref[1] != software.version:\n self.preference = [software.name, software.version]\n isChanged = True\n return isChanged\n\n def save(self):\n self.loader.saveall([(\"software\", self.software), (\"software_pref\", self.preference)])\n","sub_path":"maskgen/software_loader.py","file_name":"software_loader.py","file_ext":"py","file_size_in_byte":16588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"570398692","text":"import numpy as np \nimport matplotlib.pyplot as plt\nfrom collections import Counter\nmeans, std, variance = [], [], []\niterations = []\nranks = []\n\n# attacks = [\"Gradient Sign\", \"Additive Gaussian Noise\", \"Blended Uniform Noise\", \"Gaussian Blur\", \"NewtonFool\", \\\n# \"Gradient Sign\", \"DeepFoolAttack\", \"CarliniWagnerL2Attack\", \"SaltAndPepperNoiseAttack\"]\nwith open('./newbatch_parsed.txt') as fin:\n for line in fin:\n if \"Summary of attack\" in line:\n # plt.hist(ranks, bins=5)\n # plt.show()\n iterations = np.array(iterations)\n means.append(np.average(iterations))\n std.append(np.std(iterations))\n variance.append(np.var(iterations))\n iterations = []\n ranks = []\n else:\n iterations.append(int(line.split(\" \")[0]))\n ranks.append(int(line.split(\" \")[1]))\n\nprint (means)\nprint (std)\nprint (variance)\n\n# lines = []\n# with open('./fixed_term_processed_1700.txt') as fin:\n# for line in fin:\n# if \"Begin attack\" in line:\n# # plt.hist(ranks, bins=5)\n# # plt.show()\n# c = Counter(lines)\n# print (c[1], len(lines), line)\n# lines = []\n# else:\n# lines.append(int(line))\n# c = Counter(lines)\n# print (c[1], len(lines))\n\n# f = open(\"./clean_term_parsed.txt\")\n# lines = f.readlines()\n# lines = [int(i) for i in lines]\n# c = Counter(lines)\n# print (c[1], len(lines), \"Clean\")\n","sub_path":"compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"430111653","text":"from mediadownloader import db\n\n\nclass File(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n type = db.Column(db.String(20), unique=False)\n filename = db.Column(db.String(256), unique=False)\n date_added = db.Column(db.DateTime, unique=False)\n\n def __init__(self, type, filename, date_added):\n self.type = type\n self.filename = filename\n self.date_added = date_added\n\n def __repr__(self):\n return '' % self.filename\n\n","sub_path":"mediadownloader/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"458158058","text":"# Modified from https://www.analyticsvidhya.com/blog/2016/07/practical-guide-data-preprocessing-python-scikit-learn/\r\n\r\nimport pandas as pd\r\n\r\n# Importing training data set\r\nX_train=pd.read_csv('X_train.csv')\r\nY_train=pd.read_csv('Y_train.csv')\r\n# Importing testing data set\r\nX_test=pd.read_csv('X_test.csv')\r\nY_test=pd.read_csv('Y_test.csv')\r\n\r\n#print (X_train.head())\r\n\r\n# Initializing and Fitting a k-NN model\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nknn=KNeighborsClassifier(n_neighbors=5)\r\nknn.fit(X_train[['ApplicantIncome', 'CoapplicantIncome','LoanAmount', 'Loan_Amount_Term', 'Credit_History']], Y_train)\r\n# Checking the performance of our model on the testing data set\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"1\", accuracy_score(Y_test,knn.predict(X_test[['ApplicantIncome', 'CoapplicantIncome', 'LoanAmount', 'Loan_Amount_Term', 'Credit_History']])))\r\n\r\n# Just guessing might do better\r\nprint(\"2\", Y_train.Target.value_counts() / Y_train.Target.count())\r\nprint (\"3\", Y_test.Target.value_counts()/Y_test.Target.count())\r\n\r\n# Importing MinMaxScaler and initializing it\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nmin_max=MinMaxScaler()\r\n# Scaling down both train and test data set\r\nX_train_minmax=min_max.fit_transform(X_train[['ApplicantIncome', 'CoapplicantIncome', 'LoanAmount', 'Loan_Amount_Term', 'Credit_History']])\r\nX_test_minmax=min_max.fit_transform(X_test[['ApplicantIncome', 'CoapplicantIncome', 'LoanAmount', 'Loan_Amount_Term', 'Credit_History']])\r\n\r\n# Fitting k-NN on our scaled data set\r\nknn=KNeighborsClassifier(n_neighbors=5)\r\nknn.fit(X_train_minmax,Y_train)\r\n# Checking the model's accuracy\r\nprint(\"4\", accuracy_score(Y_test,knn.predict(X_test_minmax)))\r\n\r\n# Standardizing the train and test data\r\n#from sklearn.preprocessing import scale\r\n#X_train_scale=scale(X_train[['ApplicantIncome', 'CoapplicantIncome', 'LoanAmount', 'Loan_Amount_Term', 'Credit_History']])\r\n#X_test_scale=scale(X_test[['ApplicantIncome', 'CoapplicantIncome', 'LoanAmount', 'Loan_Amount_Term', 'Credit_History']])\r\n\r\n\r\n\r\n# Importing LabelEncoder and initializing it\r\nfrom sklearn.preprocessing import LabelEncoder\r\nle=LabelEncoder()\r\n# Iterating over all the common columns in train and test\r\nfor col in X_test.columns.values:\r\n # Encoding only categorical variables\r\n if X_test[col].dtypes=='object':\r\n # Using whole data to form an exhaustive list of levels\r\n data=X_train[col].append(X_test[col])\r\n le.fit(data.values)\r\n X_train[col]=le.transform(X_train[col])\r\n X_test[col]=le.transform(X_test[col])\r\n\r\n\r\n# One-hot encoding for handling categorical data: Gender', 'Married', 'Dependents', 'Education','Self_Employed','Credit_History', 'Property_Area'\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nenc=OneHotEncoder(sparse=False)\r\nX_train_1=X_train\r\nX_test_1=X_test\r\ncolumns=['Gender', 'Married', 'Dependents', 'Education','Self_Employed','Credit_History', 'Property_Area']\r\nfor col in columns:\r\n # creating an exhaustive list of all possible categorical values\r\n data=X_train[[col]].append(X_test[[col]])\r\n enc.fit(data)\r\n # Fitting One Hot Encoding on train data\r\n temp = enc.transform(X_train[[col]])\r\n # Changing the encoded features into a data frame with new column names\r\n temp=pd.DataFrame(temp,columns=[(col+\"_\"+str(i)) for i in data[col].value_counts().index])\r\n # In side by side concatenation index values should be same\r\n # Setting the index values similar to the X_train data frame\r\n temp=temp.set_index(X_train.index.values)\r\n # adding the new One Hot Encoded varibales to the train data frame\r\n X_train_1=pd.concat([X_train_1,temp],axis=1)\r\n # fitting One Hot Encoding on test data\r\n temp = enc.transform(X_test[[col]])\r\n # changing it into data frame and adding column names\r\n temp=pd.DataFrame(temp,columns=[(col+\"_\"+str(i)) for i in data[col].value_counts().index])\r\n # Setting the index for proper concatenation\r\n temp=temp.set_index(X_test.index.values)\r\n # adding the new One Hot Encoded varibales to test data frame\r\n X_test_1=pd.concat([X_test_1,temp],axis=1)\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nX_train[X_train.dtypes[(X_train.dtypes==\"float64\")|(X_train.dtypes==\"int64\")].index.values].hist(figsize=[11,11])\r\nX_train_1[X_train_1.dtypes[(X_train_1.dtypes==\"float64\")|(X_train_1.dtypes==\"int64\")].index.values].hist(figsize=[11,11])\r\n#plt.draw()\r\n#plt.show()\r\n\r\n# Standardizing the features\r\nfrom sklearn.preprocessing import scale\r\nX_train_scale=scale(X_train_1)\r\nX_test_scale=scale(X_test_1)\r\n\r\n# Pairplots correlation to observe the distribution of data from one feature to the other (only continuous features)\r\npd.set_option('display.max_rows', 500)\r\npd.set_option('display.max_columns', 500)\r\npd.set_option('display.width', 1000)\r\ndf2 = X_train.corr(method='spearman')\r\ndef f(x):\r\n if (x>-0.5 and x<0.5):\r\n return \" \"\r\n else:\r\n return x\r\n\r\nprint(df2.applymap(lambda x: f(x)))\r\n\r\n#Handle missing values\r\nfrom sklearn import preprocessing\r\nimp=preprocessing.Imputer(missing_values='NaN', strategy='mean', axis=0)\r\nX_train_scale=imp.fit_transform(X_train_scale)\r\n\r\n################# Fitting the logistic regression model\r\n# Fitting logistic regression on our standardized data set\r\nfrom sklearn.linear_model import LogisticRegression\r\nlog=LogisticRegression(penalty='l2',C=.01)\r\nlog.fit(X_train_scale,Y_train)\r\n# Checking the models accuracy\r\nprint(\"5\", accuracy_score(Y_test,log.predict(X_test_scale)))\r\n# 0.75. Its working now. But, the accuracy is still the same as we got with logistic regression after standardization from numeric features. This means categorical features we added are not very significant in our objective function.\r\n\r\n","sub_path":"Misc/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":5808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"586969256","text":"#\r\n# p-search: Python Word Search\r\n# Author: Brian Kaiser\r\n# January 2015\r\n# Version 1.0\r\n#\r\n# Simple p-search Python program\r\n#\r\nimport logging\r\nimport time\r\nimport _pSearchMatrix\r\n\r\nif __name__=='__main__':\r\n PSEARCHMatrix_VERSION = '1.0'\r\n # Turn on Logging\r\n logging.basicConfig(filename='pSearchLogMatrix.log',level=logging.DEBUG, format='%(asctime)s %(message)s')\r\n # Process the Command Line Arguments\r\n _pSearch.ParseCommandLine()\r\n log = logging.getLogger('main._psearch')\r\n startTime = time.time()\r\n log.info(\"p-searchMatrix started\")\r\n # Record the Starting Time\r\n # Perform Keyword Search\r\n _pSearchMatrix.SearchWords()\r\n # Record the Ending Time\r\n endTime = time.time()\r\n duration = endTime - startTime\r\n logging.info('Elapsed Time:'+ str(duration) +'seconds')\r\n logging.info('')\r\n logging.info('Program Terminated Normally')\r\n ","sub_path":"root/Desktop/Scripts/Python Forensic Scripts/Test/.Trash-0/files/pSearch/pSearchMatrix.py","file_name":"pSearchMatrix.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"114339614","text":"def main():\r\n k = 0\r\n while True:\r\n try:\r\n k = float(input(\"What is the thermal conductivity? \"))\r\n if k < 0:\r\n raise\r\n break\r\n except:\r\n print(\"Invalid input.\")\r\n\r\n density = 0\r\n while True:\r\n try:\r\n density = float(input(\"What is the density? \"))\r\n if density < 0:\r\n raise\r\n break\r\n except:\r\n print(\"Invalid input.\")\r\n\r\n c = 0\r\n while True:\r\n try:\r\n c = float(input(\"what is the specific heat? \"))\r\n break\r\n except:\r\n print(\"Invalid input.\")\r\n\r\n initialTemp = 0\r\n while True:\r\n try:\r\n initialTemp = float(input(\"What is the initial temperature? \"))\r\n break\r\n except:\r\n print(\"Invalid input.\")\r\n\r\n leftTemp = 0\r\n while True:\r\n try:\r\n leftTemp = float(input(\"What is the left boundary condition? \"))\r\n break\r\n except:\r\n print(\"Invalid input.\")\r\n\r\n rightTemp = 0\r\n while True:\r\n try:\r\n rightTemp = float(input(\"What is the right boundary condition? \"))\r\n if rightTemp <= leftTemp:\r\n raise\r\n break\r\n except:\r\n print(\"Invalid input.\")\r\n\r\n length = 0\r\n while True:\r\n try:\r\n length = float(input(\"What is the length of the wire? \"))\r\n break\r\n except:\r\n print(\"Invalid input.\")\r\n\r\n sections = 0\r\n while True:\r\n try:\r\n sections = int(input(\"How many sections are there? \"))\r\n break\r\n except:\r\n print(\"Invalid input.\")\r\n\r\n timeIntervals = 0\r\n while True:\r\n try:\r\n timeIntervals = int(input(\"How many time intervals are there? \"))\r\n break\r\n except:\r\n print(\"Invalid input.\")\r\n\r\n dt = 0\r\n while True:\r\n try:\r\n dt = float(input(\"What is the change in time? \"))\r\n break\r\n except:\r\n print(\"Invalid input.\")\r\n\r\n dx = length/sections\r\n\r\n constant = (k*dt)/(dx**2 * c * density)\r\n\r\n if abs(constant) < 0.5:\r\n print(\"Error: Constant is not stable.\")\r\n exit\r\n\r\n uold = [initialTemp]*sections\r\n uold[0] = leftTemp\r\n uold[sections-1] = rightTemp\r\n\r\n unew = uold[:]\r\n\r\n for t in range(timeIntervals):\r\n for x in range(1, sections-1):\r\n unew[x] = constant * (uold[x+1] - 2 * uold[x] + uold[x-1]) + uold[x]\r\n uold = unew[:]\r\n print(t+1)\r\n print(unew)\r\n\r\nmain()","sub_path":"CS160H Assignment 9.py","file_name":"CS160H Assignment 9.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"202248895","text":"from __future__ import unicode_literals, absolute_import\n\nfrom django.core.urlresolvers import reverse\nfrom django.db.models.signals import post_delete, post_save\nfrom django.dispatch import receiver\nfrom rest_framework import status\n\nfrom web.articles.models import Article, Category\nfrom web.system.models import PermanentlyRemoved\n\n\n@receiver(post_delete, sender=Article)\ndef remove_permanently(sender, instance, **kwargs):\n \"\"\"\n Add record to PermanentlyRemoved model\n \"\"\"\n PermanentlyRemoved.objects.create(\n status=status.HTTP_410_GONE,\n url=instance.get_absolute_url()\n )\n\n\n@receiver(post_save, sender=Article)\ndef moved_permanently(sender, instance, raw, created, **kwargs):\n \"\"\"\n Add record to PermanentlyRemoved model with 301 status code\n \"\"\"\n if created:\n return\n\n data = {\n 'slug': instance.slug,\n 'category_slug': instance.category.slug if instance.category else None\n }\n\n if instance.original_data != data:\n if instance.original_data['category_slug']:\n url = reverse('view_article', kwargs={\n 'category_slug': instance.original_data['category_slug'],\n 'article_slug': instance.original_data['slug']})\n else:\n url = reverse('view_page', args=(instance.original_data['slug'],))\n\n PermanentlyRemoved.objects.create(\n status=status.HTTP_301_MOVED_PERMANENTLY,\n url=url,\n redirect_url=instance.get_absolute_url()\n )\n instance.original_data = data\n\n\n@receiver(post_save, sender=Category)\ndef category_moved_permanently(sender, instance, raw, created, **kwargs):\n \"\"\"\n Add record to PermanentlyRemoved model with 301 status code\n \"\"\"\n\n def permanently_redirect(category):\n \"\"\"\n Recursively add redirect for each articles per category\n \"\"\"\n for article in category.articles.all():\n redirect_url = article.get_absolute_url()\n\n url = redirect_url.replace('/{}/'.format(instance.slug), '/{}/'.format(instance.original_data['slug']))\n if not instance.original_data['parent'] and original_data['parent']:\n url = url.replace('/{}/'.format(original_data['parent']), '/')\n\n PermanentlyRemoved.objects.create(\n status=status.HTTP_301_MOVED_PERMANENTLY,\n url=url,\n redirect_url=redirect_url\n )\n\n for subcategory in category.subcategories.all():\n permanently_redirect(subcategory)\n\n if created:\n return\n\n original_data = {'slug': instance.slug, 'parent': instance.parent}\n if instance.original_data['slug'] and instance.original_data != original_data:\n\n # Add redirect for articles for current category\n permanently_redirect(instance)\n instance.original_data = original_data\n","sub_path":"web/articles/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"46319007","text":"'''\n给定一个按照升序排列的整数数组 nums,和一个目标值 target。找出给定目标值在数组中的开始位置和结束位置。\nExample 1:\n\nInput: nums = [5,7,7,8,8,10], target = 8\nOutput: [3,4]\nExample 2:\n\nInput: nums = [5,7,7,8,8,10], target = 6\nOutput: [-1,-1]\n'''\n\nclass Solution:\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n # bisect.bisect返回的是插入索引的位置,存在时返回x左/右侧的位置\n i = bisect.bisect_left(nums, target)\n if i == len(nums) or nums[i] != target: return [-1, -1]\n j = bisect.bisect_right(nums, target)\n return [i, j-1]","sub_path":"python/0034.Find First and Last Position of Element in Sorted Array.py","file_name":"0034.Find First and Last Position of Element in Sorted Array.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"222460529","text":"import subprocess\nimport os\n\nendTime = 1300\nfoundEndTime = False\n\nfor index in xrange(70):\n print(endTime)\n\n file = open('check.vcd', 'r')\n newFile = \"\"\n for line in file:\n if (line.find(\"#\" + str(endTime)) != -1):\n foundEndTime = True\n print(line.find('#' + str(endTime)))\n if (foundEndTime == False):\n newFile += line\n file.close()\n\n file = open('AESEncrypt' + str(index+1) + '.verilator.vcd', 'w')\n file.write(newFile)\n file.close()\n foundEndTime = False\n endTime+= 200\n\n\n\n","sub_path":"makeVCDs.py","file_name":"makeVCDs.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"499360464","text":"__author__ = 'venus'\n'''\nDataSet1:\nTotal Tweets:30643\n\nDataSet2:\nTotal Tweets:27949\n\nDataSet3:\nTotal Tweets:28106\n\nDataSet4:\nTotal Tweets:9144\n\nDataSet5:\nTotal Tweets:16019\n\n\nBJP1:\nTotal number of reviews :19461(0.635)\nPositive Reviews :4049\nNegative Reviews :5276\nNeutral Reviews :10136\n\nAAP1:\nTotal number of reviews :6751(0.2203)\nPositive Reviews :1718\nNegative Reviews :1261\nNeutral Reviews :3772\n\nCongress1:\nTotal number of reviews :4016(0.1310)\nPositive Reviews :768\nNegative Reviews :1007\nNeutral Reviews :2241\n\nElections1:\nTotal number of reviews :415(0.01354)\nPositive Reviews :101\nNegative Reviews :29\nNeutral Reviews :285\n\nBJP2:\nTotal number of reviews :18517(0.6625)\nPositive Reviews :3649\nNegative Reviews :5647\nNeutral Reviews :9221\n\nAAP2:\nTotal number of reviews :6190(0.2214)\nPositive Reviews :1517\nNegative Reviews :1223\nNeutral Reviews :3450\n\nCongress2:\nTotal number of reviews :3074(0.1099)\nPositive Reviews :633\nNegative Reviews :720\nNeutral Reviews :1721\n\nElections2:\nTotal number of reviews :168 (0.00601)\nPositive Reviews :45\nNegative Reviews :17\nNeutral Reviews :106\n\nBJP3:\nTotal number of reviews :18054(0.6423)\nPositive Reviews :3937\nNegative Reviews :4865\nNeutral Reviews :9252\n\nAAP3:\nTotal number of reviews :6034(0.214)\nPositive Reviews :1630\nNegative Reviews :1229\nNeutral Reviews :3175\n\nCongress3:\nTotal number of reviews :3792(0.135)\nPositive Reviews :748\nNegative Reviews :936\nNeutral Reviews :2108\n\nElections3:\nTotal number of reviews :226(0.00804)\nPositive Reviews :67\nNegative Reviews :22\nNeutral Reviews :137\n\nBJP4:\nTotal number of reviews :7124(0.78)\nPositive Reviews :2413\nNegative Reviews :976\nNeutral Reviews :3735\n\nAAP4:\nTotal number of reviews :1078(0.1179)\nPositive Reviews :361\nNegative Reviews :166\nNeutral Reviews :551\n\nCongress4:\nTotal number of reviews :840(0.0918)\nPositive Reviews :215\nNegative Reviews :192\nNeutral Reviews :433\n\nElections4:\nTotal number of reviews :102(0.0111)\nPositive Reviews :17\nNegative Reviews :10\nNeutral Reviews :75\n\nBJP5:\nTotal number of reviews :11015(0.6876)\nPositive Reviews :3309\nNegative Reviews :2310\nNeutral Reviews :5396\n\nAAP5:\nTotal number of reviews :3360(0.209)\nPositive Reviews :833\nNegative Reviews :718\nNeutral Reviews :1809\n\nCongress5:\nTotal number of reviews :1569(0.0979)\nPositive Reviews :278\nNegative Reviews :502\nNeutral Reviews :789\n\nElections5:\nTotal number of reviews :75(0.0468)\nPositive Reviews :17\nNegative Reviews :12\nNeutral Reviews :46\n'''\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN = 5\nind = np.arange(N) # the x locations for the groups\nwidth = 0.15 # the width of the bars\nwidth1= 0.16\nwidth2= 0.165\nfig, ax = plt.subplots()\nbjp = (63.5, 66.25, 64.23, 78,68.7)\nrects1 = ax.bar(ind, bjp, width, color='r',alpha=0.8)\n\naap = (22.03, 22.14, 21.4, 11.79,20.9)\nrects2 = ax.bar(ind+width, aap, width, color='w',alpha=0.8)\n\ncongress = (13, 11, 13.5, 9.18,9.79)\nrects3 = ax.bar(ind+width+width1,congress, width, color='g')\n\nelection = [1.35, 0.6, 0.8, 1.11,4.68]\nrects4 = ax.bar(ind+width+width1+width2, election, width, color='b',alpha=0.8)\n\n# add some text for labels, title and axes ticks\nax.set_ylabel('percentage')\nax.set_title('Election Data')\nax.set_xticks(ind+width+width1)\nax.set_xticklabels( ('Dataset1', 'Dataset2', 'Dataset3', 'Dataset4', 'Dataset5') )\n\nax.legend( (rects1[0], rects2[0],rects3[0],rects4[0]), ('BJP', 'AAP','Congress','election') )\n\ndef autolabel(rects):\n # attach some text labels\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x()+rect.get_width()/2., 1*height, '%d'%int(height),\n ha='center', va='bottom')\n\nautolabel(rects1)\nautolabel(rects2)\nautolabel(rects3)\nautolabel(rects4)\n\nplt.show()\n\nN = 4\nind = np.arange(N) # the x locations for the groups\nwidth = 0.15 # the width of the bars\nwidth1= 0.16\nwidth2= 0.165\nwidth3=0.17\nfig, ax = plt.subplots()\ndataset1 = (63.5,22.03,13,1.35)\nrects1 = ax.bar(ind, dataset1, width, color='r',alpha=0.8)\ndataset2 = (66.25,22.14,11,0.6)\nrects2 = ax.bar(ind+width, dataset2, width, color='g',alpha=0.8)\ndataset3 = (64.23,21.4,13.5,0.8)\nrects3 = ax.bar(ind+width+width1, dataset3, width, color='b',alpha=0.8)\ndataset4 = (78,11.79,9.18,1.11)\nrects4 = ax.bar(ind+width+width1+width2, dataset4, width, color='y',alpha=0.8)\ndataset5 = (68.7,20.9,9.79,4.68)\nrects5 = ax.bar(ind+width+width1+width2+width3, dataset5, width, color='m',alpha=0.8)\n\nax.set_ylabel('percentage')\nax.set_title('Election Data')\nax.set_xticks(ind+width+width1)\nax.set_xticklabels( ('BJP', 'AAP','Congress','election') )\n\nax.legend( (rects1[0], rects2[0],rects3[0],rects4[0],rects5[0]), ('Dataset1', 'Dataset2', 'Dataset3', 'Dataset4', 'Dataset5' ))\n\ndef autolabel(rects):\n # attach some text labels\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x()+rect.get_width()/2., 1*height, '%d'%int(height),\n ha='center', va='bottom')\n\nautolabel(rects1)\nautolabel(rects2)\nautolabel(rects3)\nautolabel(rects4)\nautolabel(rects5)\n\nplt.show()","sub_path":"dicts/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"367840460","text":"from django.contrib import admin\n\nfrom privatebeta.models import WaitingList\n\n\nclass WaitingListAdmin(admin.ModelAdmin):\n list_display = [\"email\", \"when\", \"invited\"]\n list_filter = [\"invited\", \"when\"]\n search_fields = [\"email\"]\n\nadmin.site.register(WaitingList, WaitingListAdmin)\n","sub_path":"crate_project/apps/privatebeta/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"529924749","text":"import pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n\n\ndef inicio():\n print('Teste para acessar a pag de um FII e comparar quantos (%) ele valorizou!')\n print( 'e quanto ele esta rendendo')\n\ndef BuscaFIIeRetornaValor(fii):\n #print(fii)\n #print(\"https://www.fundsexplorer.com.br/funds/\" + fii)\n req = requests.get(\"https://www.fundsexplorer.com.br/funds/\" + fii)\n #print(req.status_code)\n if req.status_code == 200:\n #print('Requisição bem sucedida!')\n content = req.content\n soup = BeautifulSoup(content, 'html.parser')\n price = soup.find(class_='price')\n p = str(price.contents[0]).split()[1].split(\",\")\n preco = float(p[0] + \".\" + p[1])\n #fazer a diferença para encontar o lucro e encontar o valor do IR, em seguida subtrair pelo lucro,\n # com lucro liquido, calcular percentual ll/valor_investido, mulplicar por 100\n #print(preco)\n return float(preco)\n else:\n print(\"Erro Request {}\".format(req.status_code))\n\n#xpcm11 = [96.28,65.00,56.49] # valores pagos pelos fii\n#ideia usar banco de dados do google (fireBase)\n#fii1= 'xpcm11'# nome do fii para pesquisa\n\n#valor1 = BuscaFIIeRetornaValor(fii1)\n\ndef calculaLucroSemImposto(pago,atual):\n lucro1 = float(atual - pago)\n if lucro1 <= 0.0:\n return float(((lucro1/ pago) * 100))\n else:\n return float(((lucro1 * 0.80) / pago) * 100)\n\n#print(\"Se vender a cota {0} tera uma ganho/perda de {1:.2f}%\".format(1,calculaLucroSemImposto(xpcm11[1],valor1)))\n","sub_path":"fii.py","file_name":"fii.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"230035464","text":"import pandas as pd\nimport pprint as pp\n\n\n############################## Column names for dataframes ##############################\nmcols = ['movieId', 'title', 'genres']\nrcols = ['userId', 'movieId', 'rating', 'timestamp']\ntcols = ['userId', 'movieId', 'tag', 'timestamp']\n############################## Column names for dataframes ##############################\n\n\n############################## Read data from dat files ##############################\nmovies = pd.read_csv('../ml-10M100K/movies.dat', sep = '::', names = mcols, engine = 'python', keep_default_na = False)\n#links = pd.read_csv('../ml-10M100K/links.csv', engine = 'python')\nratings = pd.read_csv('../ml-10M100K/ratings.dat', sep = '::', names = rcols, engine = 'python', keep_default_na = False)\ntags = pd.read_csv('../ml-10M100K/tags.dat', sep = '::', names = tcols, engine = 'python', keep_default_na = False)\n############################## Read data from dat files ##############################\n\n\n############################## Fix indexes and add column keys ##############################\nmovies.set_index('movieId', inplace = True)\nratings.set_index('movieId', inplace = True)\ntags.set_index('movieId', inplace = True)\n############################## Fix indexes and add column keys ##############################\n\n\n############################## Split year from movie titles ##############################\nyears = list()\nfor i in movies.index:\n title = movies[movies.index == i].title.values[0]\n title = title.rsplit('(', 1)\n if len(title) > 1:\n years.append(title[1].strip(')'))\n else:\n years.append('not specified')\n title = title[0].strip()\n movies.loc[i, 'title'] = title\nmovies['year'] = pd.Series(years, index = movies.index)\n# Move year column to be after title\nmovies = movies.reindex_axis(['title', 'year', 'genres'], axis = 1)\n############################## Split year from movie titles ##############################\n\n\n############################## Add ratings to movies ##############################\navgr = []\nfor i in movies.index:\n r = ratings[ratings.index == i]['rating']\n if r.size > 0:\n avgr.append(r.sum() / r.size)\n else:\n avgr.append(0)\n\nmovies['rating'] = pd.Series(avgr, index = movies.index)\n############################## Add ratings to movies ##############################\n\n\n############################## Create genre dict ##############################\ngenres = {}\nfor i in movies.index:\n movie_genres = movies[movies.index == i]['genres'].values\n for genre_string in movie_genres:\n genre_string = genre_string.split('|')\n for g in genre_string:\n if g not in genres.keys():\n genres[g] = [i,]\n else:\n genres[g].append(i)\n############################## Create genre dict ##############################\n\n\n############################## Create tag sets ##############################\ntmdict = {}\nfor i in tags.index:\n tlist = tags[tags.index == i].values\n for t in tlist:\n if i not in tmdict.keys():\n tmdict[i] = [t[1],]\n else:\n tmdict[i].append(t[1])\nfor k in tmdict.keys():\n tmdict[k] = set(tmdict[k])\n############################## Create tag sets ##############################\n\n\n############################## Create sql commands ##############################\nouts = open('insert_to_movielens.sql', 'w')\n\nfor i in movies.index:\n mov = movies[movies.index == i].values[0]\n outs.write(\"insert into movies (id, title, year, rating) values ('{}', '{}', '{}', {})\\n\".format(i, mov[0].replace(\"'\", \"''\"), mov[1], mov[3]))\n\nfor i in set(ratings.index):\n rlist = ratings[ratings.index == i].values\n for r in rlist:\n outs.write(\"insert into ratings (movieid, userid, rating) values ('{}', '{}', {})\\n\".format(i, r[0], r[1]))\n\n# for i in links.index:\n# l = links[links.index == i].values[0]\n# outs.write(\"insert into links (movieid, imdbid, tmdbid) values ('{}', '{}', '{}')\\n\".format(i, l[0], l[1]))\n\nfor i in set(tags.index):\n tlist = tags[tags.index == i].values\n for t in tlist:\n outs.write(\"insert into tags (movieid, userid, tag) values ('{}', '{}', '{}')\\n\".format(i, t[0], t[1].replace(\"'\", \"''\")))\n\nfor i in genres.keys():\n glist = genres[i]\n for g in glist:\n outs.write(\"insert into genres (genre, movieid) values ('{}', '{}')\\n\".format(i, g))\n\nfor i in tmdict.keys():\n tlist = tmdict[i]\n for t in tlist:\n outs.write(\"insert into mtags (movieid, tag) values ('{}', '{}')\\n\".format(i, t.replace(\"'\", \"''\")))\n\nouts.close()\n############################## Create sql commands ##############################\n","sub_path":"HVTWO/kristofer/readcsv.py","file_name":"readcsv.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"148926837","text":"from math import log\n\n# Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\nwords = open(\"G:\\python_tut\\After_4_Feb_2020\\wordninja_words.txt\").read().split()\nwordcost = dict((k, log((i+1)*log(len(words)))) for i,k in enumerate(words))\nimport csv\nw = csv.writer(open(\"G:\\python_tut\\After_4_Feb_2020\\output.csv\", \"w\"))\nfor key, val in wordcost.items():\n w.writerow([key, val])\nmaxword = max(len(x) for x in words)\n\ndef infer_spaces(s):\n \"\"\"Uses dynamic programming to infer the location of spaces in a string\n without spaces.\"\"\"\n\n # Find the best match for the i first characters, assuming cost has\n # been built for the i-1 first characters.\n # Returns a pair (match_cost, match_length).\n def best_match(i):\n candidates = enumerate(reversed(cost[max(0, i-maxword):i]))\n #for pp,jj in enumerate(reversed(cost[max(0, i-maxword):i])): print('k=',pp,'c=',jj,wordcost.get(s[i-pp-1:i]),cost)\n # for k,c in candidates:\n # print(k,c)\n # print(min(c + wordcost.get(s[i-k-1:i], 9e999), k+1))\n # #print(minimum)\n return min((c + wordcost.get(s[i-k-1:i], 9e999), k+1) for k,c in candidates)\n\n # Build the cost array.\n cost = [0]\n for i in range(1,len(s)+1):\n best_match(i)\n #print(s[i-1],i-1,c,k)\n #cost.append(c)\n\n # Backtrack to recover the minimal-cost string.\n out = []\n i = len(s)\n while i>0:\n best_match(i)\n #print(c,k,cost[i])\n # assert c == cost[i]\n # out.append(s[i-k:i])\n # i -= k\n\n return \" \".join(reversed(out))\ns = 'thumbgreenappleactiveassignmentweeklymetaphor'\nprint(len(s))\nprint(infer_spaces(s))","sub_path":"test_ninja.py","file_name":"test_ninja.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"337589604","text":"# LC 140 Word Break II (Hard)\n\n# Given a string s and a dictionary of strings wordDict, add spaces in s to construct a sentence where each word is a valid dictionary word. Return all such possible sentences in any order.\n\n# Note that the same word in the dictionary may be reused multiple times in the segmentation.\n\n\n# Example 1:\n\n# Input: s = \"catsanddog\", wordDict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"]\n# Output: [\"cats and dog\", \"cat sand dog\"]\n# Example 2:\n\n# Input: s = \"pineapplepenapple\", wordDict = [\"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"]\n# Output: [\"pine apple pen apple\", \"pineapple pen apple\", \"pine applepen apple\"]\n# Explanation: Note that you are allowed to reuse a dictionary word.\n# Example 3:\n\n# Input: s = \"catsandog\", wordDict = [\"cats\", \"dog\", \"sand\", \"and\", \"cat\"]\n# Output: []\n\n\n# Constraints:\n\n# 1 <= s.length <= 20\n# 1 <= wordDict.length <= 1000\n# 1 <= wordDict[i].length <= 10\n# s and wordDict[i] consist of only lowercase English letters.\n# All the strings of wordDict are unique.\n\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:\n N = len(s)\n Dict = set(wordDict)\n\n dp_sol = [False for _ in range(N + 1)]\n dp_sol[0] = True\n for start in range(N):\n for end in range(start + 1, N + 1):\n if dp_sol[start] and s[start:end] in Dict:\n dp_sol[end] = True\n\n if not dp_sol[-1]:\n return []\n\n dp = [[] for _ in range(N + 1)]\n dp[0] = [\"\"]\n\n for start in range(N):\n for end in range(start + 1, N+1):\n if s[start:end] in Dict:\n for sub in dp[start]:\n dp[end].append(sub + \" \" + s[start:end])\n\n return [s[1:] for s in dp[-1]]\n","sub_path":"companyInterviewPractice/Bloomberg/BloombergPhone/140_wordBreakII_hard.py","file_name":"140_wordBreakII_hard.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"306718687","text":"import pyrosetta.toolbox\nfrom pyrosetta.toolbox import pose_from_rcsb\nfrom pyrosetta.io import pose_from_pdb\nfrom pyrosetta import init\nimport os\nimport logging\nfrom biopandas.pdb import PandasPdb\n\n\ndef load_pdb_list():\n with open('entries.idx') as f:\n data = f.readlines()\n pdb_code_list = []\n for line in data[2:]:\n pdb_code_list.append(line[:4])\n return pdb_code_list\n\n\ndef load_pdb_code():\n with open('author.idx') as f:\n data = f.readlines()\n pdb_code_list = []\n for line in data:\n pdb_code_list.append(line[:4])\n return pdb_code_list\n\n\ndef fetch_pdb_pose(pdb_code, log):\n os.chdir('pdb_files')\n file_list = os.listdir()\n pdb_file_name = pdb_code + '.pdb'\n pdb_file_name_clean = pdb_code + '.clean.pdb'\n if pdb_file_name_clean in file_list:\n log.info(\"Load from PDB file.\")\n pose = pose_from_pdb(pdb_file_name_clean)\n else:\n log.info(\"Load from RCSB.\")\n try:\n pose = pose_from_rcsb(pdb_code)\n except:\n log.error(\"Skip this PDB becuase can't download this file.\")\n os.chdir('..')\n return 1\n\n if check_model(pdb_file_name):\n pass\n else:\n log.warning(\"Skip this PDB because this file has multi-model.\")\n os.chdir('..')\n return 2\n\n os.chdir('..')\n return pose\n\n\ndef check_model(pdb_file):\n ppdb = PandasPdb()\n ppdb.read_pdb(pdb_file)\n df = ppdb.df['OTHERS'][ppdb.df['OTHERS']\n ['record_name'] == 'NUMMDL']['entry']\n if df.empty:\n return True\n else:\n return False\n\n\ndef main():\n init()\n # pdb_code_list = load_pdb_code()\n fetch_pdb_pose('5G5D')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"258583647","text":"import re\n\nalpha_path = r\"[a-zA-z]\"\npunctuation_path = r\"[-_.,!#$%^&*()'<>?/|}{~:]\"\n\n\ndef get_symbols(text, pattern):\n return len(re.findall(pattern, text))\n\n\nwith open(\"text.txt\", \"r\") as file:\n lines = file.readlines()\n counter_lines = 1\n for line in lines:\n count_alphas = get_symbols(line, alpha_path)\n count_punctuation_marks = get_symbols(line, punctuation_path)\n print(f\"Line {counter_lines}: {line[:-1]} ({count_alphas})({count_punctuation_marks})\")\n counter_lines += 1","sub_path":"files_exercise_homework/line_numbers.py","file_name":"line_numbers.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"7183841","text":"import pygame\n\npygame.init() #반드시 호출\n\n#화면 크기 설정\n\nscreen_width = 480\nscreen_height = 720\n\nscreen = pygame.display.set_mode((screen_width, screen_height))\n\npygame.display.set_caption(\"Demo game\")\n\n#background = pygame.image.load(\"/Users/mikekang/work/practiceForPython/pygame_basic/background.png\")\n\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: #창이 닫히는 이벤트가 발생하면\n running = False #게임이 끝남\n screen.fill((0,0,255))\n #screen.blit(background, (0,0))\n pygame.display.update()\n\n\n\npygame.quit()\n","sub_path":"pygame_basic/2_background.py","file_name":"2_background.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"344520092","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 08 09:16:43 2017\n\n@author: Michael\n\"\"\"\n#import MySQLdb\nfrom ib.opt import Connection\nimport time\nfrom ib.ext.Contract import Contract\nimport mysql.connector\nimport datetime\nfrom dateutil.parser import parse\nfrom RecoverySettings import daysback\nfrom RecoverySettings import strdaysback\n#from ib.ext.Order import Order\n\n\nCCY1 = \"EUR\"\nCCY2 = \"AUD\"\n\n\ndef reply_handler(msg):\n #print(msg.value)\n print(\"Reply:\", msg)\n test = msg.open\n test2 = msg.high\n test3 = msg.low\n test4 = msg.close\n test5 = msg.date\n \n \n dt = parse(str(test5))\n print(dt)\n # datetime.datetime(2010, 2, 15, 0, 0)\n print(dt.strftime('%m/%d/%Y'))\n newdate = dt.strftime('%m/%d/%Y')\n\n##Convert Date to proper format and relative reference\n# if dayofweek == 0: #if Today is Monday\n# yesterday = today - datetime.timedelta(days=3) #Get Previous Wednesday \n# month = (str(0) + str(yesterday.month))\n# day = (str(0)+ str(yesterday.day))\n# yesterday2 = (month[-2:] +\"/\"+ day[-2:] +\"/\"+str(yesterday.year))\n# print(yesterday2)\n#\n# else:\n# yesterday = today - datetime.timedelta(days=1) #Take 3 Days back \n# month = (str(0) + str(yesterday.month))\n# day = (str(0)+ str(yesterday.day))\n# yesterday2 = (month[-2:] +\"/\"+ day[-2:] +\"/\"+str(yesterday.year))\n# print(\"Yesterday was \" + str(yesterday2))\n# \n \n \n \n if float(test) != -1:\n import time\n #cnx = mysql.connector.connect(user='mjserpico', password='UrzE8B66',host=\"scar01.cqxmc7cib5oh.us-east-1.rds.amazonaws.com\", database='SCAR01')\n cnx = mysql.connector.connect(user='Scarlett01', password='scar01lett',host=\"serpdb01.cqxmc7cib5oh.us-east-1.rds.amazonaws.com\", database='SERPDB01')\n cur = cnx.cursor()\n cur.execute(\"\"\"Insert Into EURCHF (Date, Open, High, Low, Close) values(%s,%s,%s,%s,%s)\"\"\",(newdate,float(test),float(test2),float(test3),float(test4)))\n cnx.commit()\n\nconn = Connection.create(port=4002, clientId=999)\nconn.connect()\ntime.sleep(2)\nconn.register(reply_handler,'HistoricalData') #By registering \"HistoricalData\" --the Method name only --we can eliminate all the open order garbage\n#conn.registerall(reply_handler)\ntime.sleep(3)\n\n\n\n\ntoday = datetime.date.today( )\nprint(\"Today is \" + str(today))\ndayofweek = datetime.datetime.today().weekday()\nprint(\"Today is coded:\" + str(dayofweek))\n\n#0 is Monday, 1 tues 2 wed 3 thurs 4 fri 5 sat 6 sun\n\n# 5 is Saturday\n\n##Convert Date to proper format and relative reference\nif dayofweek == 0: #if Today is Monday\n yesterday = today - datetime.timedelta(days=daysback) #Get 5 days back \n month = (str(0) + str(yesterday.month))\n day = (str(0)+ str(yesterday.day))\n yesterday2 = (month[-2:] +\"/\"+ day[-2:] +\"/\"+str(yesterday.year))\n print(yesterday2)\n\nelse:\n yesterday = today - datetime.timedelta(days=daysback) #Take 5 Day back from day of run 04/08 - 5 is 04/03 <<<<<<<********** \n month = (str(0) + str(yesterday.month))\n day = (str(0)+ str(yesterday.day))\n yesterday2 = (month[-2:] +\"/\"+ day[-2:] +\"/\"+str(yesterday.year))\n print(\"First Date to grab price is \" + str(yesterday2))\n\n\n#cnx = mysql.connector.connect(user='mjserpico', password='UrzE8B66',host=\"scar01.cqxmc7cib5oh.us-east-1.rds.amazonaws.com\", database='SCAR01')\ncnx = mysql.connector.connect(user='Scarlett01', password='scar01lett',host=\"serpdb01.cqxmc7cib5oh.us-east-1.rds.amazonaws.com\", database='SERPDB01')\ncur = cnx.cursor()\nquery = (\"SELECT ID from \" + CCY1 + CCY2 + \" where Date = \\'\" + yesterday2 + \"\\'\")\nprint(query)\ncur.execute(query)\nfor (ID) in cur:\n ID1 = ID\n\nprint(\"ID1 is \" + str(ID1)) \nquery = (\"Delete from \" + CCY1 + CCY2 + \" where ID >= \\\"\" + str(ID1[0]) + \"\\\"\")\ncur.execute(query)\ncnx.commit()\n\nID2 = int(ID1[0])\nprint(ID2)\n\nquery = (\"ALTER TABLE \" + CCY1 + CCY2 + \" AUTO_INCREMENT =\" + str(ID2))\nprint(query)\ncur.execute(query)\n\ncnx.commit()\n#cur.execute(\"\"\"Insert Into EURCHF (Date, Open, High, Low, Close) values(%s,%s,%s,%s,%s)\"\"\",(time.strftime(\"%m/%d/%Y\"),float(test),float(test2),float(test3),float(test4)))\n#cnx.commit()\n\nqqq = Contract() \nqqq.m_symbol = 'EUR' \nqqq.m_secType = 'CASH' \nqqq.m_exchange = 'IDEALPRO' \nqqq.m_currency = 'CHF' \nconn.reqHistoricalData(1, qqq, '', strdaysback, '1 day', 'Midpoint', 1, 2) #Market days \ntime.sleep(1) #give IB time to send us messages\nconn.disconnect()","sub_path":"UAT/bin/SystemManagement/PriceRecovery/EURAUD_OHLC_recover.py","file_name":"EURAUD_OHLC_recover.py","file_ext":"py","file_size_in_byte":4524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"230053748","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport datetime\nimport re\nimport socket\nimport socketserver\nimport platform\nimport logging\n\nfrom tools import print, toStr, isint\n\ndef main():\n do = \"start\"\n if len(sys.argv) > 1:\n do = sys.argv[1]\n if do not in (\"start\", \"stop\"):\n print('usage: python3 -u dmsg2log.py [start | stop]' + OS)\n sys.exit(1)\n print('dmsg2log', do)\n if do == 'stop':\n sys.exit(0)\n\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.INFO)\n handler = logging.FileHandler(os.path.join(os.path.dirname(sys.argv[0]), 'dmsg.log'), 'a', 'utf-8')\n formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='%d.%m.%Y %H:%M:%S')\n handler.setFormatter(formatter)\n root_logger.addHandler(handler)\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect((\"peoplenet.ua\", 80))\n IP = s.getsockname()[0]\n except:\n IP = ''\n s.close()\n\n OS = platform.system()\n print('Platform #15 ' + OS)\n print('Python version #15 ' + sys.version.split(' ')[0])\n\n\n print(str(datetime.datetime.today()).split('.')[\n 0]+'\\n---------- DMSG2LOG ver. 1.1 ---' + (IP if IP else '----------') + '-'*10)\n\n host = \"0.0.0.0\"\n port = 4044\n p = re.compile(r\"\\d{1,2}:\\d{2}:\\d{2}> \")\n\n class myHandler(socketserver.DatagramRequestHandler):\n def handle(self):\n text = str(self.rfile.read().decode('windows-1251')).replace('і', 'i').replace('І', 'I')\n print(text)\n if len(text) > 10:\n t = re.search(p, text)\n s = re.split(p, text)\n if len(s) > 1:\n logging.info(s[1])\n else:\n logging.info(text)\n else:\n logging.info(text)\n\n try:\n srvsocket = socketserver.UDPServer((host, port), myHandler)\n logging.info(\"Start logging\")\n srvsocket.serve_forever()\n except Exception as e:\n if e.errno in (98, 10048):\n print('dmsg2log already running, port %s is busy' %port)\n else:\n print(\"SocketServer error\", e)\n\nimport daemon\nwith daemon.DaemonContext(stdout='/home/oracle/tmp/e-calendar/dmsg2log.print', stderr='/home/oracle/tmp/e-calendar/dmsg2log.errors'):\n main()\n","sub_path":"dmsg2log.py","file_name":"dmsg2log.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"82634732","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 10 13:32:55 2015\n\n@author: x\n\"\"\"\nfrom EV_FPGA import XilinxFPGA\nfrom EV_REG_CTRL import REG_CTRL\nimport time\nGREG_CORRELATOR_CTRL_ADDR = 2\n\nclass CORRELATOR_CTRL(REG_CTRL):\n addrType = \"FpgaPcieAddr\"\n baseAddr = GREG_CORRELATOR_CTRL_ADDR\n status = {\n \"rst\": 1,\n \"enable\": 0,\n #\"view_ch0\": 0,\n \"cfg_en\": 0,\n \"cfg_raw_out\": 0,\n \"cfg_cic_rate\": 128,\n \"cfg_nco_freq\": round(30/125*(2**18)),\n \"cfg_hbit\": 1,\n \"cfg_acc_cnt\": 1\n }\n ctrl_dict = {\n # signal_name: [start_bit,end_bit,reg_offset,rw]\n \"rst\": [31,31, 0,'rw'],\n \"enable\": [30,30, 0,'rw'],\n #\"bypass\": [29,29, 0,'rw'],\n \"cfg_en\": [29,29, 0,'rw'],\n \"cfg_raw_out\": [28,28, 0,'rw'],\n \"cfg_cic_rate\": [27,18, 0,'rw'],\n \"cfg_nco_freq\": [17, 0, 0,'rw'],\n \"sta_overflow\": [31,31, 1,'ro'],\n \"sta_cidx\": [30, 0, 1,'ro'],\n \"cfg_hbit\": [31,31, 2,'rw'],\n \"cfg_acc_cnt\": [23, 0, 2,'rw']\n }\n\n def __init__(self, fpga, baseAddr=-1):\n REG_CTRL.__init__(self,fpga, baseAddr)\n\n # ============================= Basic Funcs ===============================\n def run(self):\n \"\"\"In Write, the addr will be send first, then write the data\n It should be noticed that rst and enable bit should be set before the write\"\"\"\n self.ctrl(\"rst\",1)\n self.ctrl(\"enable\",0)\n self.ctrl(\"cfg_cic_rate\",256)\n self.ctrl(\"cfg_nco_freq\",round((30.0-0.025)/125*(2**18)))\n #self.ctrl(\"view_ch0\",0)\n self.ctrl(\"cfg_raw_out\",0)\n self.ctrl(\"cfg_hbit\",0)\n self.ctrl(\"cfg_acc_cnt\",1500)\n self.ctrl(\"cfg_en\",1)\n self.ctrl(\"rst\",0)\n self.ctrl(\"enable\",1)\n self.ctrl(\"cfg_en\",0)\n \n def sweep_nco_freq(self,basefreq=125.0, sweep = 200, step=0.1):\n for ii in range(1,sweep):\n freqt = basefreq+(ii-sweep/2)*step\n self.ctrl(\"cfg_nco_freq\",round((30.0-0.002)/freqt*(2**18)))\n print (\"Freq: %f\"%(freqt))\n self.ctrl(\"cfg_en\",1)\n self.ctrl(\"cfg_en\",0)\n time.sleep(1)\n \n def sweep_nco_freq2(self,basefreq=30.0, sweep = 200, step=0.1):\n for ii in range(1,sweep):\n freqt = basefreq+(ii-sweep/2)*step\n self.ctrl(\"cfg_nco_freq\",round(basefreq/120*(2**18)))\n print (\"Freq: %f\"%(freqt))\n self.ctrl(\"cfg_en\",1)\n self.ctrl(\"cfg_en\",0)\n time.sleep(1)\n \nif __name__ == '__main__':\n k7 = XilinxFPGA(mode=\"Serial\");\n k7.openDevice();\n cor = CORRELATOR_CTRL(k7);\n \n# #print the cpp definition txt \n# siggen.gen_h_file()\n# siggen.gen_cpp_file()\n \n \n cor.run()\n \n # corse sweep \n #cor.sweep_nco_freq(115.388, 200, 0.2)\n \n # fine sweep\n #cor.sweep_nco_freq(120, 50, 0.01)\n \n # more fine sweep\n #cor.sweep_nco_freq(120, 1, 0.001)\n \n # sweep the 30M freq\n #cor.sweep_nco_freq2(30,200,0.001)\n \n k7.closeDevice();\n","sub_path":"PyFPGA_mini/EV_CORRELATOR_CTRL.py","file_name":"EV_CORRELATOR_CTRL.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"34807884","text":"# -*- coding:utf-8 -*-\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nclass Solution:\n def deleteDuplication(self, pHead):\n # write code here,只是删除重复值的节点\n if not pHead:\n return None\n result = []\n last = 0\n while pHead:\n if len(result) > 0:\n if pHead.val == result[-1].val:\n if len(result) >= 2:\n result[-2].next = pHead.next\n last = pHead.val\n result.pop()\n else:\n result.append(pHead)\n else:\n if last != pHead.val:\n result.append(pHead)\n pHead = pHead.next\n if result != []:\n return result[0]\n else:\n return None","sub_path":"52.py","file_name":"52.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"110078546","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 11 18:43:21 2020\n\n@author: josephgross\n\"\"\"\n\n\n# H < 0.5 - The time series is mean reverting (near 0 means highly mean reverting)\n# H = 0.5 - The time series is a Geometric Brownian Motion\n# H < 0.5 - The time seris is trending (near 1 means strongly trending)\n\nimport datetime as dt\nfrom alpha_vantage.timeseries import TimeSeries\nimport pandas as pd\n\nfrom numpy import array, cumsum, log, polyfit, sqrt, std, subtract\nfrom numpy.random import randn\n\n\ndef hurst(time_series):\n \"\"\"\n Calculates the Hurst Exponent of the time series vector ts\n\n Parameters\n ----------\n time_series : 'np,darray'\n Time series array of prices\n\n Returns\n -------\n 'float'\n The Hurst Exponential of the time series\n\n \"\"\"\n \n # Create the range of lag values\n lags = range(2, 100)\n \n # Calculate the array of the variances of the lagged differences\n tau = [\n sqrt(std(subtract(time_series[lag:], time_series[:-lag])))\n for lag in lags\n ]\n \n # Use a linear fit to estimate the Hurst Exponent\n poly = polyfit(log(lags), log(tau), 1)\n \n # Return the Hurst Exponent from the polyfit output\n return poly[0] * 2.0\n\ndef get_daily_historic_data_alphavantage(ticker, start_date, end_date):\n \"\"\"\n \n Use the generated API call to query AlphaVantage with the \n appropriate API key and return a list of price tuples for \n a particular ticker\n\n Parameters\n ----------\n ticker : 'str'\n The ticker of a stock which will be used to retrieve price data\n from alpha_vantage\n\n Returns\n -------\n 'list'\n List of tuples with historical price data based on a \n specific ticker\n\n \"\"\"\n \n ALPHA_VANTAGE_API_KEY = 'YZZ4PFN9ATU1ASPAN'\n COLUMNS = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close']\n \n try:\n ts = TimeSeries(key=ALPHA_VANTAGE_API_KEY, output_format='json')\n data, meta_data = ts.get_daily_adjusted(symbol=ticker, outputsize='full')\n except Exception as e:\n print(\n \"Could not download AlphaVantage data for %s ticker \"\n \"(%s)...skipping.\" % (ticker, e)\n )\n return []\n else:\n prices = []\n \n for date_str in sorted(data.keys()):\n date = dt.datetime.strptime(date_str, '%Y-%m-%d') \n if date < start_date or date > end_date:\n continue\n \n bar = data[date_str]\n prices.append(\n (\n date_str,\n float(bar['1. open']),\n float(bar['2. high']),\n float(bar['3. low']),\n float(bar['4. close']),\n int(bar['6. volume']),\n float(bar['5. adjusted close'])\n )\n )\n \n return pd.DataFrame(prices, columns=COLUMNS).set_index('Date')\n\n\n# ==================================================================\n\n\nif __name__ == \"__main__\":\n \n # Create a Gometric Brownian Motion, Mean-Reverting and Trending Series\n gbm = log(cumsum(randn(100000)) + 1000)\n mr = log(randn(100000) + 1000)\n tr = log(cumsum(randn(100000) + 1) + 1000)\n \n # Download the Amazon OHLCV data \n start_date = dt.datetime(2000, 1, 1)\n end_date = dt.datetime(2015, 1, 1)\n amzn = get_daily_historic_data_alphavantage('AMZN', start_date, end_date)\n \n # Output the Hurst Exponent for each of the above series\n # and the price of Amazon (the Adjusted Close Price) for\n # the ADF test given above in the article\n print(\"Hurst (GBM): %0.2f\" % hurst(gbm))\n print(\"Hurst (MR): %0.2f\" % hurst(mr))\n print(\"Hurst (TR): %0.2f\" % hurst(tr))\n \n # Calculate the Hurst exponent for the AMZN adjusted closing prices\n print(\"Hurst (AMZN): %0.2f\" % hurst(array(amzn['Adj Close'].tolist())))\n \n \n ","sub_path":"Algorithmic-Trading/algorithmic-trading/Quantiative Finance/Automated Trading/Hurst.py","file_name":"Hurst.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357660255","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 18 20:39:52 2018\r\n\r\n@author: WLM-PC\r\n\"\"\"\r\n\r\nimport pickle\r\n\r\nfileNames = ['BG0003']\r\ntestFileNames = ['BG0003']\r\npart = 'Left'\r\nresolution = 500\r\nviewpoints = [1,2,3]\r\nversion = \"v1\"\r\nmethod = 'ANN' #Logistic, ANN\r\n\r\nwith open('config.pkl','wb') as f:\r\n\tpickle.dump([viewpoints,fileNames,testFileNames,part,version,resolution,method],f)\r\n\r\n","sub_path":"writeConfig.py","file_name":"writeConfig.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115074914","text":"#!/usr/bin/env python\n\nimport os\n\n\nfilename = 'coin_list.txt'\nlines = [line.rstrip('\\n') for line in open(filename)]\n\ni = 1\nfor coin in lines:\n\tcommand = \"python3 SQL_query_to_csv.py %10s out_all.csv && python3 plot_subplots.py\" % coin\n\tprint(\"%03d %s\" % (i, command))\n\tos.system(command)\n\ti += 1","sub_path":"03. CoinMarketCap/05. plot/create_charts_for_all_coins.py","file_name":"create_charts_for_all_coins.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226741647","text":"import os\nimport sys\nimport numpy as np\nimport netCDF4 as nc\nimport xarray as xr\n\nfrom importlib import util as import_util\nfrom met.logger import logger\n\n###########################################\n\nclass dataplane(logger):\n\n KEEP_XARRAY = True\n class_name = \"dataplane\"\n\n MET_FILL_VALUE = -9999.\n ATTR_USER_FILL_VALUE = 'user_fill_value'\n\n @staticmethod\n def call_python(argv):\n logger.log_msg(f\"Module:\\t{repr(argv[0])}\")\n if 1 == len(argv):\n logger.quit(f\"User command is missing\")\n\n logger.log_msg(\"User Command:\\t\" + repr(' '.join(argv[1:])))\n # argv[0] is the python wrapper script (caller)\n # argv[1] contains the user defined python script\n pyembed_module_name = argv[1]\n sys.argv = argv[1:]\n logger.log_msg(f\" sys.argv:\\t{sys.argv}\")\n\n # append user script dir to system path\n pyembed_dir, pyembed_name = os.path.split(pyembed_module_name)\n if pyembed_dir:\n sys.path.insert(0, pyembed_dir)\n\n if not pyembed_module_name.endswith('.py'):\n pyembed_module_name += '.py'\n\n user_base = pyembed_name.replace('.py','')\n\n spec = import_util.spec_from_file_location(user_base, pyembed_module_name)\n met_in = import_util.module_from_spec(spec)\n spec.loader.exec_module(met_in)\n return met_in\n\n @staticmethod\n def is_integer(a_data):\n return isinstance(a_data, int)\n\n @staticmethod\n def is_xarray_dataarray(a_data):\n return isinstance(a_data, xr.DataArray)\n\n ##\n ## create the metadata dictionary\n ##\n\n # Python dictionary items:\n # 'name': data name\n # 'long_name': descriptive name\n # 'valid': valid time (format = 'yyyymmdd_hhmmss')\n # 'init': init time (format = 'yyyymmdd_hhmmss')\n # 'lead': lead time (format = 'hhmmss')\n # 'accum': accumulation time (format = 'hhmmss')\n # 'level': vertilcal level\n # 'units': units of the data\n # 'grid': contains the grid information\n # - a grid name (G212)\n # - a gridded data file name\n # - MET specific grid string, \"lambert 185 129 12.19 -133.459 -95 40.635 6371.2 25 25 N\"\n # - a dictionary for the grid information\n @staticmethod\n def set_dataplane_attrs(data_name, valid_time, init_time, lead_time,\n accum_time, v_level, units, grid_info, long_name=None):\n hdr_attrs = {\n\n 'valid': valid_time,\n 'init': init_time,\n 'lead': lead_time,\n 'accum': accum_time,\n\n 'name': data_name,\n 'long_name': long_name if long_name is not None and long_name != \"\" else data_name + '_long',\n 'level': v_level,\n 'units': units,\n\n 'grid': grid_info\n\n }\n return hdr_attrs\n\n @staticmethod\n def read_2d_text_input(input_file):\n if os.path.exists(input_file):\n met_data = np.loadtxt(input_file)\n else:\n met_data = None\n return met_data\n\n @staticmethod\n def read_dataplane(netcdf_filename):\n # read NetCDF file\n ds = nc.Dataset(netcdf_filename, 'r')\n\n dp = ds['met_data']\n met_data = dp[:]\n attr_name = dataplane.ATTR_USER_FILL_VALUE\n user_fill_value = dp.getncattr(attr_name) if hasattr(dp, attr_name) else None\n\n met_attrs = {}\n\n # grid is defined as a dictionary or string\n grid = {}\n for attr, attr_val in ds.__dict__.items():\n if 'grid.' in attr:\n grid_attr = attr.split('.')[1]\n grid[grid_attr] = attr_val\n else:\n met_attrs[attr] = attr_val\n\n if grid:\n met_attrs['grid'] = grid\n\n met_attrs['name'] = met_attrs['name_str']\n del met_attrs['name_str']\n\n met_info = {}\n met_info['met_data'] = met_data\n if user_fill_value is not None:\n met_attrs['fill_value'] = user_fill_value\n met_info['attrs'] = met_attrs\n\n return met_info\n\n @staticmethod\n def write_dataplane(met_in, netcdf_filename):\n met_info = {'met_data': met_in.met_data}\n if hasattr(met_in.met_data, 'attrs') and met_in.met_data.attrs:\n attrs = met_in.met_data.attrs\n else:\n attrs = met_in.attrs\n met_info['attrs'] = attrs\n\n # write NetCDF file\n ds = nc.Dataset(netcdf_filename, 'w')\n\n # create dimensions and variable\n nx, ny = met_in.met_data.shape\n ds.createDimension('x', nx)\n ds.createDimension('y', ny)\n dp = ds.createVariable('met_data', met_in.met_data.dtype, ('x', 'y'),\n fill_value=dataplane.MET_FILL_VALUE)\n dp[:] = met_in.met_data\n\n # append attributes\n for attr, attr_val in met_info['attrs'].items():\n if attr_val is None:\n continue\n\n if attr == 'name':\n setattr(ds, 'name_str', attr_val)\n elif attr == 'fill_value':\n setattr(dp, dataplane.ATTR_USER_FILL_VALUE, attr_val)\n elif type(attr_val) == dict:\n for key in attr_val:\n setattr(ds, attr + '.' + key, attr_val[key])\n else:\n setattr(ds, attr, attr_val)\n\n ds.close()\n\n @staticmethod\n def validate_met_data(met_data, fill_value=None):\n method_name = f\"{dataplane.class_name}.validate()\"\n #logger.log_msg(f\"{method_name} type(met_data)= {type(met_data)}\")\n attrs = None\n from_xarray = False\n from_ndarray = False\n if met_data is None:\n logger.quit(f\"{method_name} The met_data is None\")\n\n nx, ny = met_data.shape\n met_fill_value = dataplane.MET_FILL_VALUE\n if dataplane.is_xarray_dataarray(met_data):\n from_xarray = True\n attrs = met_data.attrs\n met_data = met_data.data\n modified_met_data = True\n if isinstance(met_data, np.ndarray):\n from_ndarray = True\n met_data = np.ma.array(met_data)\n\n if isinstance(met_data, np.ma.MaskedArray):\n is_int_data = dataplane.is_integer(met_data[0,0]) or dataplane.is_integer(met_data[int(nx/2),int(ny/2)])\n met_data = np.ma.masked_equal(met_data, float('nan'))\n met_data = np.ma.masked_equal(met_data, float('inf'))\n if fill_value is not None:\n met_data = np.ma.masked_equal(met_data, fill_value)\n met_data = met_data.filled(int(met_fill_value) if is_int_data else met_fill_value)\n else:\n logger.log_msg(f\"{method_name} unknown datatype {type(met_data)}\")\n\n if dataplane.KEEP_XARRAY:\n return xr.DataArray(met_data,attrs=attrs) if from_xarray else met_data\n else:\n return met_data\n\n\ndef main(argv):\n global attrs, met_data, met_info\n\n met_in = dataplane.call_python(sys.argv)\n\n user_fill_value = None\n try:\n met_info = met_in.met_info\n attrs = met_info['attrs']\n init_met_data = met_info['met_data']\n except:\n met_info = {}\n init_met_data = met_in.met_data\n try: # numpy and attrs\n attrs = met_in.attrs\n except: # xarray\n attrs = init_met_data.attrs\n met_info['attrs'] = attrs\n if hasattr(met_in, 'user_fill_value'):\n fill_value = met_in.user_fill_value\n\n fill_value = attrs.get('fill_value', None)\n dataplane.log_msg('validating the dataplane array...')\n met_data = dataplane.validate_met_data(init_met_data, fill_value)\n met_info['met_data'] = met_data\n\n if os.environ.get('MET_PYTHON_DEBUG', None) is not None:\n dataplane.log_msg('--- met_data after validating ---')\n dataplane.log_msg(met_data)\n\nif __name__ == '__main__' or __name__ == sys.argv[0]:\n main(sys.argv)\n dataplane.log_msg(f'{__name__} complete')\n","sub_path":"scripts/python/met/dataplane.py","file_name":"dataplane.py","file_ext":"py","file_size_in_byte":7655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"414973422","text":"from rechercheDoc import *\nfrom IndexTree import IndexTree\nimport tkinter as Tk\n\nclass Controler:\n\n def __init__(self, _parser):\n self.tk = Tk.Tk()\n self.view = Toplevel1(self.tk, self)\n self.parser = _parser\n self.index = IndexTree(\"\")\n self.info = []\n self.stoplist = []\n self.stem = {}\n\n def launch(self):\n # lecture du cropus\n self.parser.steming(self.stem)\n self.parser.lectureCorpus(self.info, self.index, self.stoplist)\n\n self.tk.mainloop()\n\n def traitementRequete(self, requete, parametre): # String , Boolean[Stemming,stopList,phrasalqueries,jocker*]\n # prepare les elements de la requete\n requete = self.parser.normalize(requete)\n requetes = requete.split()\n\n if parametre[0]:\n requetes2 = requetes\n requetes = []\n for mot in requetes2:\n if mot in self.stem:\n for stem_mot in self.stem[mot]:\n requetes.append(stem_mot)\n else:\n requetes.append(mot)\n\n\n return requetes\n\n def rechercherIndex(self, requete, parametre): # String[]\n # envoie a la vue la liste des documents pertinent trouvé\n requetes = self.traitementRequete(requete, parametre)\n liste = []\n #print(requetes)\n for elt in requetes:\n liste.append(self.index.rechercheMot(elt))\n #crée un fonction intermédaire qui renvoie un liste trié par pertinence des docs\n self.view.sendResultat(liste)\n","sub_path":"Controler.py","file_name":"Controler.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"162880128","text":"\"\"\"\nThis file contains the JSON APIs used\n\"\"\"\nfrom rest_framework import serializers\nfrom makeReports.models import (\n Assessment,\n DegreeProgram, \n Department, \n Graph,\n SLO, \n SLOInReport\n)\n\nclass DeptSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n Serializes departments to JSON with the primary key and name\n \"\"\"\n class Meta:\n \"\"\"\n Defines the model type and fields for the superclass\n to use to build the serializer.\n \"\"\"\n model = Department\n fields = ['pk','name']\nclass ProgSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n Serializes degree programs to JSON with the primary key, name, and level\n \"\"\"\n class Meta:\n \"\"\"\n Defines the model type and fields for the superclass\n to use to build the serializer\n \"\"\"\n model = DegreeProgram\n fields = ['pk', 'name', 'level']\nclass SLOserializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n Serializes SLOs to JSON with the primary key and name\n \"\"\"\n class Meta:\n \"\"\"\n Defines the model type and fields for the superclass\n to use to build the serializer\n \"\"\"\n model = SLOInReport\n fields = ['pk', 'goalText']\nclass SLOParentSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n Serializes :class:`~makeReports.models.slo_models.SLO` into just its primary key\n \"\"\"\n class Meta:\n \"\"\"\n Defines the model type and fields for the superclass\n to use to build the serializer\n \"\"\"\n model = SLO\n fields = ['pk']\nclass SLOSerializerWithParent(serializers.HyperlinkedModelSerializer):\n \"\"\"\n Serializes SLOs (:class:`~makeReports.models.slo_models.SLOInReport`) to JSON with the primary key and name and primary key of SLO\n \"\"\"\n slo = SLOParentSerializer()\n class Meta:\n \"\"\"\n Defines the model type and fields for the superclass\n to use to build the serializer\n \"\"\"\n model = SLOInReport\n fields = ['pk', 'goalText','slo']\nclass AssessmentParentSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n Serializes parent assessments (:class:`~makeReports.models.assessment_models.Assessment`) into its primary key and title\n \"\"\"\n class Meta:\n \"\"\"\n Defines the model type and fields for the superclass\n to use to build the serializer\n \"\"\"\n model = Assessment\n fields = ['pk','title']\nclass AssessmentSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n Serializes assessments (:class:`~makeReports.models.assessment_models.AssessmentVersion`) to JSON with the primary key and and title\n \"\"\"\n assessment = AssessmentParentSerializer()\n class Meta:\n \"\"\"\n Defines the model type and fields for the superclass\n to use to build the serializer\n \"\"\"\n model = SLOInReport\n fields = ['pk', 'assessment']\nclass FileSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n Serializes graphs to JSON with all fields \n \"\"\"\n class Meta:\n \"\"\"\n Defines the model type and fields for the superclass\n to use to build the serializer\n \"\"\"\n model = Graph\n fields = \"__all__\"\n","sub_path":"AACForm/makeReports/views/API/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"330712340","text":"import logging\n\nfrom environs import Env\nfrom telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\nfrom telegram.ext import (\n Updater,\n Filters,\n CommandHandler,\n MessageHandler,\n)\n\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO\n )\nlogger = logging.getLogger(__name__)\n\n\ndef start(update, _):\n reply_keyboard = [['Принять', 'Отклонить']]\n\n with open('personal_data.pdf', 'rb') as pd_file:\n update.message.reply_document(pd_file)\n\n update.message.reply_text(\n 'Подтвердите солгасие на обработку персональных данных',\n reply_markup=ReplyKeyboardMarkup(reply_keyboard)\n )\n\n\ndef accept(update, _):\n reply_keyboard = [['Введите контактный номер телефона']]\n\n update.message.reply_text(\n 'Изготовление тортов на заказ.',\n reply_markup=ReplyKeyboardMarkup(\n reply_keyboard,\n input_field_placeholder='+7-999-9999',\n )\n )\n\n\ndef phone(update, _):\n reply_keyboard = [['Собрать торт']]\n user = update.message.from_user\n phone_number = update.message.text\n logger.info('Match %s with %s', user, phone_number)\n update.message.reply_text('Вы успешно зарегистрированы!')\n update.message.reply_text(\n 'Выберите ингредиенты, форму, основу, надпись, '\n 'а мы привезем готовый торт к вашему празднику.',\n reply_markup=ReplyKeyboardMarkup(reply_keyboard),\n )\n\n\ndef cancel(update, _):\n \"\"\"Cancels and ends the conversation.\"\"\"\n user = update.message.from_user\n logger.info(\"User %s canceled the conversation.\", user.first_name)\n update.message.reply_text(\n 'Всего доброго!',\n reply_markup=ReplyKeyboardRemove(),\n )\n\n\ndef main():\n env = Env()\n env.read_env()\n\n updater = Updater(token=env('TG_BOT_TOKEN'))\n dispatcher = updater.dispatcher\n\n start_handler = CommandHandler('start', start)\n accept_handler = MessageHandler(Filters.regex('Принять'), accept)\n phone_handler = MessageHandler(Filters.regex('^\\+?\\d{1,3}?( |-)?\\d{3}( |-)?\\d{3}( |-)?\\d{2}( |-)?\\d{2}$'), phone)\n cancel_handler = MessageHandler(Filters.text('Отклонить'), cancel)\n\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(accept_handler)\n dispatcher.add_handler(phone_handler)\n dispatcher.add_handler(cancel_handler)\n\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"529597506","text":"# -*- coding: utf-8 -*-\n# def fact(n=2):\n# if n == 1:\n# return 1\n# return n * fact(n - 1)\n#\n#\n# print fact(2)\n\ndef move(n, a, b, c):\n if n == 1:\n print('move',a , '--->', c)\n else:\n move(n-1, a, c, b)\n move(1, a, b, c)\n move(n-1, b, a, c)\ni = int(input('请输入圆盘数量:'))\nmove(i, 'A', 'B', 'C')\n\n\n","sub_path":"SeleniumTest/venv/fact.py","file_name":"fact.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"59040205","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 3 21:14:50 2021\n\n@author: mkroc\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import mathtext\nmathtext.FontConstantsBase = mathtext.ComputerModernFontConstants\n\nplt.rcParams.update({'mathtext.default': 'default', 'mathtext.fontset': 'stix'})\nplt.rcParams['text.usetex'] = True\nplt.rcParams[\"font.size\"] = 50\n\nchannel = 2 #0:比較なし, 1:1つと比較, 2:2つと比較\ndata1 = np.genfromtxt(r\"/home/satanka/Documents/fort/pds_kai/pds_E_kai_L=9/pds_E_kai_L=9_2_disfun_s=11_MLT=0.0000000000000000.csv\", delimiter=',', unpack=True)\nvperp = data1[2][:]\nvpara = data1[1][:]\nff = data1[9][:]\n\nif(channel == 1 or channel == 2):\n data2 = np.genfromtxt(r\"/home/satanka/Documents/fort/pds_kai/pds_E_kai_L=9/pds_E_kai_L=9_2_disfun_s=1_MLT=0.0000000000000000.csv\", delimiter=',', unpack=True)\n vperp2 = data2[2][:]\n vpara2 = data2[1][:]\n ff2 = data2[9][:]\n vperp = np.concatenate([vperp, vperp2], axis=0)\n vpara = np.concatenate([vpara, vpara2], axis=0)\n ff = np.concatenate([ff, ff2], axis=0)\n\nif(channel == 2):\n data3 = np.genfromtxt(r\"/home/satanka/Documents/fort/pds_kai/pds_E_kai_L=9/pds_E_kai_L=9_2_disfun_s=6_MLT=0.0000000000000000.csv\", delimiter=',', unpack=True)\n vperp3 = data3[2][:]\n vpara3 = data3[1][:]\n ff3 = data3[9][:]\n vperp = np.concatenate([vperp, vperp3], axis=0)\n vpara = np.concatenate([vpara, vpara3], axis=0)\n ff = np.concatenate([ff, ff3], axis=0)\n\nlength = len(ff)\nmaxff = np.nanmax(ff)\nfor ii in range(length):\n if(np.log10(ff[ii]) < np.log10(maxff)-30.):\n ff[ii] = np.nan\n vperp[ii] = np.nan\n vpara[ii] = np.nan\n\nprint(maxff)\nprint(np.nanmin(np.log10(ff)))\nprint(np.nanmax(np.log10(ff)))\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\ncm = plt.cm.get_cmap('turbo')\n\nax.set_xlabel(\"$v_{\\parallel}$ [m/s] (+ : S→N, - : N→S)\")\nax.set_ylabel(\"$v_{\\perp}$ [m/s]\")\nplt.title(\"distribution function (scale=log10)\")\nif(min(np.floor(ff)) != 0.):\n mappable = ax.scatter(vpara, vperp, c=np.log10(ff), vmin=np.floor(np.nanmin(np.log10(ff))), vmax=np.trunc(np.nanmax(np.log10(ff))), cmap=cm, s=700, alpha=0.7)\nif(min(np.floor(ff)) == 0.):\n mappable = ax.scatter(vpara, vperp, c=np.log10(ff), vmin=np.floor(np.nanmax(np.log10(ff))-15.), vmax=np.trunc(np.nanmax(np.log10(ff))), cmap=cm, s=700, alpha=0.7)\n\ncbar = fig.colorbar(mappable, ax=ax)\n#cbar.ax.tick_params(labelsize=25)\n\nax.minorticks_on()\nax.grid(which=\"both\")\nax.set_axisbelow(True)\n\n#ax.tick_params(labelsize=25)\n#ax.xaxis.offsetText.set_fontsize(25)\n#ax.yaxis.offsetText.set_fontsize(25)\nplt.subplots_adjust(wspace=0.4, hspace=0.6)\n\n\n\nplt.show()\n","sub_path":"pds_kai/python_draw/codes/pds_E_kai_disfun.py","file_name":"pds_E_kai_disfun.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"627827952","text":"class RingBuffer:\n def __init__(self, capacity):\n # Defineing capacity which should set limit for the number of values/elements to be added \n self.capacity = capacity\n # Defineing current which should start index at 0\n self.current = 0\n # Defineing storage which should store added values/elements\n self.storage = [None]*capacity\n # Initial commit \n\n def append(self, item):\n # If current is bigger than the setted capacity then set the current to start at index 0\n if self.current >= self.capacity:\n self.current = 0\n # Else set the storage's index at item's index \n self.storage[self.current] = item\n # Add 1 to current's index\n self.current += 1\n\n def get(self):\n return [item for item in self.storage if item is not None]\n\nbuffer = RingBuffer(3)\nbuffer.append('a')\nbuffer.append('b')\nbuffer.append('c')\nbuffer.append('d')\nprint(buffer.get())\n# Completes MVP","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"303170597","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/10/4 16:55\n# @Author : HaoWANG\n# @Site : \n# @File : 045. 拖动条.py\n# @Software: PyCharm\n\n\nimport numpy as np\nimport cv2 as cv\ndef nothing(x):\n pass\n# Create a black image, a window\nimg = cv.imread(\"../pictures/lena.png\")\ncv.namedWindow('image')\n# create trackbars for color change\ncv.createTrackbar('R','image',0,255,nothing)\ncv.createTrackbar('G','image',0,255,nothing)\ncv.createTrackbar('B','image',0,255,nothing)\n# create switch for ON/OFF functionality\nswitch = '0 : OFF \\n1 : ON'\ncv.createTrackbar(switch, 'image',0,1,nothing)\nwhile(1):\n cv.imshow('image',img)\n k = cv.waitKey(1) & 0xFF\n if k == 27:\n break\n # get current positions of four trackbars\n r = cv.getTrackbarPos('R','image')\n g = cv.getTrackbarPos('G','image')\n b = cv.getTrackbarPos('B','image')\n s = cv.getTrackbarPos(switch,'image')\n if s == 0:\n img[:] = 0\n else:\n img[:] = [b,g,r]\ncv.destroyAllWindows()","sub_path":"tutorial4-鼠标和键盘操作/045. 拖动条.py","file_name":"045. 拖动条.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"228894906","text":"#!/usr/bin/env python\n# Detect and object with the camera. \n\nimport rospy\nfrom sensor_msgs.msg import CompressedImage\nimport cv2\nfrom cv_bridge import CvBridge\nfrom geometry_msgs.msg import Point\nfrom std_msgs.msg import String\nimport numpy as np\n\n\n###################################\n## VARIABLE DECLARATION AND SETUP\n###################################\n\nbridge = CvBridge() #Bridge converts the image from ros to openCV\n\nname = \"Object!\"\n\nlower= np.array([255,255,255],np.uint8) # Array (H,S,V) for the lower threshold bound of the HSV image\nupper= np.array([255,255,255],np.uint8) # Array (H,S,V) for the upper threshold bound of the HSV image\nerror = np.array([13,100,100],np.uint8) # Array of error widths to create the upper and lower threshold bounds above.\n\n\ntitleTracker = \"Color Tracker\" # Debugging Image Title\ntitleOriginal = \"Original Image\" # Debugging Image Title\ntitleMask = \"Mask Image\" # Debugging Image Title\ndebug = True # True - shows the images. False - Does not show the images.\n\nwidth = 360 # Width of the image, this is sent in our point message as the z-component to know the zero point in the frame.\nblurSize = 9 # Blur Kernel Size\nmorphOpSize = 5 # Closing and Opening Kernel Size\n\n\nmaxObjects = 1 # Max number of object to detect.\nminObjectArea = 50 # Min number of pixels for an object to be recognized.\n\nstart = False # Set to true when first image is acquired and will start the program.\n\nupdate = False # True - When a new point has been found and can be published. False - Otherwise.\n\nmose = False\n\n\n###################################\n## Function Declaration\n###################################\n\ndef mouseEvent(event, x, y, flags, param):\n # The mouse event is connected to the \"Original Image Window\" and triggers the event when the user click on the image.\n # The HSV values of the pixel that was clicked are used to determine the HSV lower and upper bounds.\n global imgHSV\n global lower\n global upper\n global error\n global mose\n \n if event == cv2.EVENT_LBUTTONDOWN:\n lower = imgHSV[y,x,:]\n upper = imgHSV[y,x,:] \n\n lower = cv2.subtract(lower,error)\n upper = cv2.add(upper,error)\n mose = True\n\n\n #rospy.loginfo(\"Hue Range: [%d %d]\",lower[0], upper[0])\n #rospy.loginfo(\"Sat Range: [%d %d]\",lower[1], upper[1])\n #rospy.loginfo(\"Value Range: [%d %d]\",lower[2], upper[2])\n\ndef morphOps(binaryMatrix, kernelSize):\n # Morphological operations (open and close) used to reduce noise in the acquired image.\n kernel = np.ones((kernelSize,kernelSize), np.uint8)\n tempFix = cv2.morphologyEx(binaryMatrix,cv2.MORPH_CLOSE, kernel) # Fill in holes\n fix = cv2.morphologyEx(tempFix,cv2.MORPH_OPEN, kernel) # Get rid of noise\n return fix\n\ndef drawCOM(frame, x, y, name):\n cv2.circle(frame,(x,y),5,(0,255,0))\n cv2.putText(frame,name,(x-30,y-25),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,0),2)\n\n\ndef findObjects(binaryMatrix):\n #Finds the location of the desired object in the image.\n output = []\n trash, contours, hierarchy = cv2.findContours(binaryMatrix, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Contours the image to find blobs of the same color \n cont = sorted(contours, key = cv2.contourArea, reverse = True)[:maxObjects] # Sorts the blobs by size (Largest to smallest) \n\n # Find the center of mass of the blob if there are any\n if len(cont) > 0:\n for i in range (0,len(cont)):\n M = cv2.moments(cont[i])\n if M['m00'] > minObjectArea: # Check if the total area of the contour is large enough to care about!\n rect = cv2.minAreaRect(cont[0])\n w = int(rect[1][0])\n x = int(M['m10']/M['m00'])\n y = int(M['m01']/M['m00'])\n if(debug):\n cv2.drawContours(imgTrack, cont[i], -1, (255,0,0), 3) # Draws the contour.\n drawCOM(imgTrack,x,y,name)\n if output == []:\n output = [[x,w]]\n else:\n output.append[[x,w]]\n return output\n\n\ndef get_image(CompressedImage):\n # get_image is the main function to find the circles in the image. Get_image triggers each time a new image arrives.\n\n # All the images used to find the ball are made global so we can display them durring debugging.\n global imgBGR\n global imgHSV\n global imgBLUR\n global mask\n global imgMorphOps\n global imgTrack\n\n # Needed parameters from outside this function (lazy and globaling them).\n global p\n global update\n global start\n global morphOpSize\n global blurSize\n global width\n global pt\n\n # The \"CompressedImage\" is transformed to a color image in BGR space and is store in \"imgBGR\"\n imgBGR = bridge.compressed_imgmsg_to_cv2(CompressedImage, \"bgr8\")\n\n # height and width of the image to pass along to the PID controller as the reference point.\n height, width = imgBGR.shape[:2]\n\n # Image used to draw things on!\n imgTrack = imgBGR.copy()\n \n # Blur the image to reduce edges caused by noise or that are useless to us.\n imgBlur = cv2.GaussianBlur(imgBGR,(blurSize,blurSize),0)\n\n # Transform BGR to HSV to avoid lighting issues.\n imgHSV = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2HSV)\t\n \n # Threshold the image using the selected lower and upper bounds of the color of the object.\n mask = cv2.inRange(imgHSV, lower, upper)\n\n # To get rid of noise and fill in gaps in our object use open and close.\n imgMorphOps = morphOps(mask, morphOpSize)\n\n centers = findObjects(imgMorphOps)\n\n #print (\"centers\", not centers)\n\n\n # Not always, the houghCircles function finds circle, so a None inspection is made\n if not centers: \n #print(\"hello\")\n #If no object was found, sends bogus numbers.\n pt = Point()\n\n pt.x = 999\n pt.y = 999\n pt.z = 999 \n update = True\n\n elif centers is not []:\n for i in centers:\n # The x position of the center of the object, the width of the object, and the width of the image.\n p = Point(i[0],i[1],width)\n\n pt = Point()\n\n pt.x = p.x\n pt.y=p.y\n pt.z=p.z\n # Bool to indicate the need to publish new information\n update = True\n \n\n # Once the first image has been processed set start to True to display.\n start = True\n\n\n\n\ndef Init():\n\n # Creates the node, the publisher, and subscribes to the compressedImage.\n\n global pub\n pub = rospy.Publisher('imageLocation', Point, queue_size=10)\n \n #I declare that the find_ball is subcribing to the Compressed Images node.\n rospy.Subscriber(\"/raspicam_node/image/compressed\",CompressedImage, get_image)\n \n #Initializate the node and gives a name, in this case, 'find_ball'\n rospy.init_node('detectObjectUpdated', anonymous=True)\n\n #Create a publisher that will be publishing Geometric message Points\n \n\n\n###################################\n## MAIN\n###################################\n\nif __name__ == '__main__':\n try:\n Init()\n except rospy.ROSInterruptException:\n pass\n\n\n#Rate is used for manage the looping desired rate by using the method 'sleep'\nrate = rospy.Rate(10)\n\n# Create Debugging Windows\nif(debug):\n cv2.namedWindow(titleTracker, cv2.WINDOW_AUTOSIZE )\n cv2.moveWindow(titleTracker, 620, 50)\n cv2.namedWindow(titleMask, cv2.WINDOW_AUTOSIZE )\n cv2.moveWindow(titleMask, 1240, 50)\n cv2.namedWindow(titleOriginal, cv2.WINDOW_AUTOSIZE )\n cv2.moveWindow(titleOriginal, 50, 50)\n\n# The mousecallback is connected to the \"Original Image window\" for the user to select the corresponding color\ncv2.setMouseCallback(titleOriginal,mouseEvent)\n\nwhile not rospy.is_shutdown():\n # This is the infinite loop that keep the program running\n \n # If the first image arrived, the start = True\n if start:\n\n # Display the image\n if debug:\n cv2.imshow(titleOriginal,imgBGR)\n cv2.imshow(titleMask,mask)\n cv2.imshow(titleTracker, imgTrack)\n\n # If a new point was found, then update is True and the point is publish\n if update: \n\n #print (\"point\",pt) \t\n pub.publish(pt)\n update = False\n\n rate.sleep()\n\n #User's options to interact with the software\n k = cv2.waitKey(10)\n\n if k == 49: #number 1\n #Decrease the Hue error\n error[0] = error[0] - 1\n if (error[0] < 0):\n error[0] = 0\n k = 0\n rospy.loginfo(\"Color Error: %d\",error[0]) \n elif k == 50: #number 2\n #Increase the Hue error\n error[0] = error[0] + 1\n if (error[0] > 50):\n error[0] = 50\n k = 0\n rospy.loginfo(\"Color Error: %d\",error[0])\n elif k == 51: #numer 3\n #Decrease the morphOp kernel size\n morphOpSize = morphOpSize - 2\n if morphOpSize < 1:\n morphOpSize = 1\n k = 0\n rospy.loginfo(\"Kernel size for close and open: %d\",morphOpSize)\n\n elif k == 52: #number 4\n #Increase the morphOp kernel size\n morphOpSize = morphOpSize + 2\n k = 0\n rospy.loginfo(\"Kernel size for close and open: %d\",morphOpSize)\n elif k == 53: #numer 5\n #Decrease the blur size\n blurSize = blurSize - 2\n if blurSize < 1:\n blurSize = 1\n k = 0\n rospy.loginfo(\"Bluring kernel size: %d\",blurSize)\n\n elif k == 54: #number 6\n #Increase the blur size\n blurSize = blurSize + 2\n k = 0\n rospy.loginfo(\"Bluring kernel size: %d\",blurSize)\n\n elif k == 55: #number 7\n #Decrease the min pixel area of the tracked object\n minObjectArea = minObjectArea - 1\n if minObjectArea < 1:\n minObjectArea = 1\n k = 0\n rospy.loginfo(\"Min object pixel area: %d\",blurSize)\n\n elif k == 56: #number 8\n #Increase the min pixel area of the tracked object\n minObjectArea = minObjectArea + 1\n k = 0\n rospy.loginfo(\"Min object pixel area: %d\",blurSize)\n\n","sub_path":"src/detectObject.py","file_name":"detectObject.py","file_ext":"py","file_size_in_byte":10570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"259639174","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^ip/', 'application.views.ip', name='ip'),\n url(r'^loc/','application.views.country', name='loc'),\n url(r'^.*/$', 'application.views.allother')\n)\n","sub_path":"application/application/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"300440898","text":"import cv2\nimport sys\nimport os\n\nclass FaceCropper(object):\n #CASCADE_PATH to be replaced with corresponding path containing \"haarcascade_frontalface_default.xml\"\n\n CASCADE_PATH = \"/home/tanmay/opencv/data/haarcascades/haarcascade_frontalface_default.xml\"\n\n def __init__(self):\n self.face_cascade = cv2.CascadeClassifier(self.CASCADE_PATH)\n\n def generate(self, image_path,new_path,i):\n imageloc=\"\"\n folder=\"\"\n\n img = cv2.imread(image_path)\n if (img is None):\n print(\"Can't open image file\")\n imageloc=\"not found\"\n folder=\"no\"\n return imageloc,folder\n\n faces = self.face_cascade.detectMultiScale(img, 1.3, 5, minSize=(20, 20))\n if (faces is None):\n print('Failed to detect face')\n imageloc=\"not found\"\n folder=\"no\"\n return imageloc,folder\n\n facecnt = len(faces)\n print(\"Detected faces: %d\" % facecnt)\n height, width = img.shape[:2]\n if facecnt>1:\n new_folder=new_path+'/'+str(i)\n imageloc='raw_data_cropped/'+str(i)\n folder='yes'\n os.makedirs(new_folder)\n j=1\n for (x, y, w, h) in faces:\n r = max(w, h) / 2\n centerx = x + w / 2\n centery = y + h / 2\n nx = int(centerx - r)\n ny = int(centery - r)\n nr = int(r * 2)\n\n faceimg = img[ny:ny+nr, nx:nx+nr]\n lastimg = cv2.resize(faceimg, (200, 200))\n cv2.imwrite('''raw_data_cropped/{0}/image{1}.jpg''' .format(i,j), lastimg)\n j=j+1\n return imageloc,folder\n elif facecnt==1:\n folder='no'\n for (x, y, w, h) in faces:\n r = max(w, h) / 2\n centerx = x + w / 2\n centery = y + h / 2\n nx = int(centerx - r)\n ny = int(centery - r)\n nr = int(r * 2)\n\n faceimg = img[ny:ny+nr, nx:nx+nr]\n lastimg = cv2.resize(faceimg, (200, 200))\n cv2.imwrite(\"raw_data_cropped/image{0}.jpg\" .format(i), lastimg)\n imageloc=\"raw_data_cropped/image{0}.jpg\" .format(i)\n return imageloc,folder\n elif facecnt==0:\n return \"not found\",\"no\"\n\ndetecter = FaceCropper()\n","sub_path":"face_cropper_rawdata.py","file_name":"face_cropper_rawdata.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"280257453","text":"#!/usr/bin/python\n\n# ===================================================================\n# Take off, then go front for 20 seconds or until QR is detected\n# When the QR VTOL3 is detected, correct the position and wait until\n# 10 second then land\n# Control : proportional\n# Revision history:\n# - going forward for searching QR instead of Hovering\n# - create constants for adjustable parameters\n# - using rangefinder for altitude instead of EKF altitude\n# ===================================================================\n\nimport dronekit\nimport dronekit_sitl\nimport time\nfrom pymavlink import mavutil\nimport roslib\nimport rospy\nimport math\nimport tf\nimport geometry_msgs.msg\n\n# from dronekit import connect, VehicleMode, LocationGlobal, LocationGlobalRelative\n# from pymavlink import mavutil # Needed for command message definitions\n# import time\n# import math\nfrom rospy import Rate\n\nconnection_string = 'udp:10.42.0.161:14550'\n\nSPEED_UD_MAX = 90\nSPEED_LR_MAX = 80\nSPEED_BF_MAX = 80\n\nTAKE_OFF_THRUST = 1620\nTARGET_ALTITUDE = 1.0\nMAX_TARGET_ALTITUDE = 3.0\n\n# Tolerance in M\nUD_TOLERANCE = 0.05\nLR_TOLERANCE = 0.05\nBF_TOLERANCE = 0.05\n\n# How far the drone from QR\nBF_TARGET = 0.8\n\nuse_px = 1\nshow_QR = 1\n# Parameter proportional Kp\nKpUD = 250\nKpLR = 300\nKpBF = 300\n\n# Go front time for searching QR in sec\nFRONT_SEARCH_T = 5\n\n# Max min height\nMAX_HEIGHT = 5.0\nMIN_HEIGHT = 0.5\n\n# RC radio channel\nTHRUST_CHANNEL = 3\nLR_CHANNEL = 1\nBF_CHANNEL = 2\n\n# Timing when drone on the target\nSTABLE_TIME = 15\n\n\n\nload1dropped = 0\nload2dropped = 0\nload3dropped = 0\n\nLRvar = 1500\nUDvar = 1500\n\n#######################################################################\n# Function s\n#######################################################################\n\n# def drop_target(no_target):\n# \tif no_target == 0:\n# \t\tmsg = vehicle.message_factory.command_long_encode(\n# \t\t\t0, 0, # target_system, target_component\n# \t\t\tmavutil.mavlink.MAV_CMD_DO_SET_SERVO, # command\n# \t\t\t0, # confirmation\n# \t\t\t12, # servo number\n# \t\t\t900, # servo position between 1000 and 2000\n# \t\t\t0, 0, 0, 0, 0) # param 3 ~ 7 not used\n# \t\tload1dropped = 1\n#\n# \tif no_target==1:\n# \t\tmsg = vehicle.message_factory.command_long_encode(\n# 0, 0, # target_system, target_component\n# mavutil.mavlink.MAV_CMD_DO_SET_SERVO, # command\n# 0, # confirmation\n# 12, # servo number\n# 500, # servo position between 1000 and 2000\n# 0, 0, 0, 0, 0) # param 3 ~ 7 not used\n# \t\tload1dropped = 1\n#\n# \tif no_target == 2:\n# \t\tmsg = vehicle.message_factory.command_long_encode(\n# \t\t\t0, 0, # target_system, target_component\n# \t\t\tmavutil.mavlink.MAV_CMD_DO_SET_SERVO, # command\n# \t\t\t0, # confirmation\n# \t\t\t12, # servo number\n# \t\t\t1500, # servo position between 1000 and 2000\n# \t\t\t0, 0, 0, 0, 0) # param 3 ~ 7 not used\n# \t\tload2dropped = 1\n#\n# \tif no_target == 3:\n# \t\tmsg = vehicle.message_factory.command_long_encode(\n# \t\t\t0, 0, # target_system, target_component\n# \t\t\tmavutil.mavlink.MAV_CMD_DO_SET_SERVO, # command\n# \t\t\t0, # confirmation\n# \t\t\t12, # servo number\n# \t\t\t2300, # servo position between 1000 and 2000\n# \t\t\t0, 0, 0, 0, 0) # param 3 ~ 7 not used\n# \t\tload3dropped = 1\n#\n# \t# send command to vehicle\n# \tvehicle.send_mavlink(msg)\n\n\ndef hover(time_sec):\n\tcounter = time_sec / 0.25\n\ti = 0\n\twhile i < counter:\n\t\tvehicle.channels.overrides[3] = 1500\n\t\tvehicle.channels.overrides[1] = 1500\n\t\tvehicle.channels.overrides[2] = 1500\n\t\ttime.sleep(0.25)\n\t\ti = i + 1\n\n\ndef hover_once():\n\tvehicle.channels.overrides[3] = 1500\n\tvehicle.channels.overrides[1] = 1500\n\tvehicle.channels.overrides[2] = 1500\n\n\ndef updown_once(vSpeed):\n\tvehicle.channels.overrides[3] = 1500 + vSpeed\n\tvehicle.channels.overrides[1] = 1500\n\tvehicle.channels.overrides[2] = 1500\n\n\n# minus left, plus right\ndef left_right_timed(speed, ttime_sec):\n\tcounter = ttime_sec / 0.25\n\ti = 0\n\tif speed > SPEED_LR_MAX:\n\t\tspeed = SPEED_LR_MAX\n\telif speed < -SPEED_LR_MAX:\n\t\tspeed = -SPEED_LR_MAX\n\n\twhile i < counter:\n\t\tvehicle.channels.overrides[3] = 1500\n\t\tvehicle.channels.overrides[1] = 1500 + speed\n\t\tvehicle.channels.overrides[2] = 1500\n\t\ttime.sleep(0.25)\n\t\ti = i + 1\n\t\tif not vehicle.armed:\n\t\t\tvehicle.channels.overrides[3] = 1500\n\t\t\tvehicle.channels.overrides[1] = 1500\n\t\t\tvehicle.channels.overrides[2] = 1500\n\t\t\tbreak\n\n\n# minus left, plus right\ndef left_right_once(speed):\n\tif speed > SPEED_LR_MAX:\n\t\tspeed = SPEED_LR_MAX\n\telif speed < -SPEED_LR_MAX:\n\t\tspeed = -SPEED_LR_MAX\n\n\tvehicle.channels.overrides[3] = 1500\n\tvehicle.channels.overrides[1] = 1500 + speed\n\tvehicle.channels.overrides[2] = 1500\n\ttime.sleep(0.25)\n\n\n# minus backward, plus forward\ndef back_forward_timed(speed, ttime_sec):\n\tcounter = ttime_sec / 0.25\n\ti = 0\n\tif speed > SPEED_BF_MAX:\n\t\tspeed = SPEED_BF_MAX\n\telif speed < -SPEED_BF_MAX:\n\t\tspeed = -SPEED_BF_MAX\n\n\twhile i < counter:\n\t\tvehicle.channels.overrides[3] = 1500\n\t\tvehicle.channels.overrides[1] = 1500\n\t\tvehicle.channels.overrides[2] = 1500 + speed\n\t\ttime.sleep(0.25)\n\t\ti = i + 1\n\t\tif not vehicle.armed:\n\t\t\tvehicle.channels.overrides[3] = 1500\n\t\t\tvehicle.channels.overrides[1] = 1500\n\t\t\tvehicle.channels.overrides[2] = 1500\n\t\t\tbreak\n\n\n# minus backward, plus forward\ndef back_forward_once(speed):\n\tif speed > SPEED_BF_MAX:\n\t\tspeed = SPEED_BF_MAX\n\telif speed < -SPEED_BF_MAX:\n\t\tspeed = -SPEED_BF_MAX\n\n\tvehicle.channels.overrides[3] = 1500\n\tvehicle.channels.overrides[1] = 1500\n\tvehicle.channels.overrides[2] = 1500 + speed\n\ttime.sleep(0.25)\n\n\n\ndef correct_altitude(x, y, z):\n\tglobal UDvar, LRvar\n\tif abs(x) > UD_TOLERANCE:\n\t\teUD = x * KpUD\n\t\tif eUD > SPEED_UD_MAX:\n\t\t\teUD = SPEED_UD_MAX\n\t\tif eUD < -SPEED_UD_MAX:\n\t\t\teUD = -SPEED_UD_MAX\n\n\t\tif eUD > 0:\n\t\t\tif vehicle.rangefinder.distance > MIN_HEIGHT:\n\t\t\t\tvehicle.channels.overrides[THRUST_CHANNEL] = 1500 - eUD\n\t\t\t\tUDvar = 1500 - eUD\n\t\t\telse:\n\t\t\t\tprint(\"Minimum height reached\")\n\t\t\t\tprint(vehicle.rangefinder.distance)\n\t\t\t\tvehicle.channels.overrides[THRUST_CHANNEL] = 1500\n\t\t\t\tUDvar = 1500\n\t\telse:\n\t\t\tif vehicle.rangefinder.distance < MAX_HEIGHT:\n\t\t\t\tvehicle.channels.overrides[THRUST_CHANNEL] = 1500 - eUD\n\t\t\t\tUDvar = 1500 - eUD\n\n\n\t\t\telse:\n\t\t\t\tprint(\"Maximim height reached\")\n\t\t\t\tprint(vehicle.rangefinder.distance)\n\t\t\t\tvehicle.channels.overrides[THRUST_CHANNEL] = 1500\n\t\t\t\tUDvar = 1500\n\n\telse:\n\t\tvehicle.channels.overrides[THRUST_CHANNEL] = 1500\n\t\tUDvar = 1500\n\n\t# y left right, y plus go right\n\tif abs(y) > LR_TOLERANCE:\n\t\teLR = y * KpLR\n\t\tif eLR > SPEED_LR_MAX:\n\t\t\teLR = SPEED_LR_MAX\n\t\tif eLR < -SPEED_LR_MAX:\n\t\t\teLR = -SPEED_LR_MAX\n\t\tvehicle.channels.overrides[LR_CHANNEL] = 1500 + eLR\n\t\tLRvar = 1500 + eLR\n\telse:\n\t\tvehicle.channels.overrides[LR_CHANNEL] = 1500\n\t\tLRvar = 1500\n\n\teBF = z - BF_TARGET\n\tif abs(eBF) > BF_TOLERANCE:\n\t\teBF = eBF * KpBF\n\t\tif eBF > SPEED_BF_MAX:\n\t\t\teBF = SPEED_BF_MAX\n\t\tif eBF < -SPEED_BF_MAX:\n\t\t\teBF = -SPEED_BF_MAX\n\t\teBF = 0\n\t\tvehicle.channels.overrides[BF_CHANNEL] = 1500 - eBF\n\telse:\n\t\tvehicle.channels.overrides[BF_CHANNEL] = 1500\n\n\n#######################################################################\n#\n# MAIN\n#\n#######################################################################\n\nif use_px==1:\n\tvehicle = dronekit.connect(connection_string, wait_ready=True, timeout=120)\n\n\tprint(\"Check altitude\")\n\twhile vehicle.rangefinder.distance> 3:\n\t\tprint(vehicle.rangefinder.distance)\n\t\ttime.sleep(0.5)\n\t# Ensure the load is locked\n\t# drop_target(0)\n\n\t# Set vehicle mode\n\tdesired_mode = 'LOITER'\n\twhile vehicle.mode != desired_mode:\n\t\tvehicle.mode = dronekit.VehicleMode(desired_mode)\n\t\ttime.sleep(0.5)\n\n# print(\"Waitting the drone is armed\")\n# while not vehicle.armed:\n#\ttime.sleep(0.5)\n# print(\"Drone is ARMED\")\n\n\nclost = 0\n\n\nif __name__ == '__main__':\n\trospy.init_node('drone_tf_listener')\n\tlistener = tf.TransformListener()\n\n\n\tif use_px==1:\n\t\tprint(\"Waitting the drone is armed\")\n\t\twhile not vehicle.armed:\n\t\t\ttime.sleep(0.5)\n\n\t\t# Waitting 2 second for situational awareness\n\t\ttime.sleep(1)\n\n\n\t\tprint(\"Taking off\")\n\t\twhile True:\n\t\t\tvehicle.channels.overrides[THRUST_CHANNEL] = TAKE_OFF_THRUST\n\t\t\tif not vehicle.armed:\n\t\t\t\tbreak\n\n\t\t\tif vehicle.rangefinder.distance >= TARGET_ALTITUDE:\n\t\t\t\tprint('Reached target altitude: {0:.2f}m'.format(vehicle.rangefinder.distance))\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"Altitude: {0:.2f}m\".format(vehicle.rangefinder.distance))\n\n\t\t\tif vehicle.rangefinder.distance >= MAX_TARGET_ALTITUDE:\n\t\t\t\tbreak\n\t\t\ttime.sleep(0.1)\n\n\t\t# Altitude reached hover for 2 seconds\n\t\tif vehicle.rangefinder.distance < MAX_TARGET_ALTITUDE:\n\t\t\thover(2)\n\n\n\tstate = 10\n\tcounterFront = 0\n\tprint(\"Main loop\")\n\trate = rospy.Rate(20.0) # type: Rate\n\n\ttVTOL1_now = 0\n\ttVTOL1_prev = 0\n\ttVTOL2_now = 0\n\ttVTOL2_prev = 0\n\ttVTOL3_now = 0\n\ttVTOL3_prev = 0\n\twhile not rospy.is_shutdown():\n\n\t\ttVTOL1_prev = tVTOL1_now\n\t\ttVTOL2_prev = tVTOL2_now\n\t\ttVTOL3_prev = tVTOL3_now\n\n\n\t\ttry:\n\t\t\t(transVTOL1, rotVTOL1) = listener.lookupTransform('/drone', '/VTOL1', rospy.Time(0))\n\t\t\ttVTOL1_now = listener.getLatestCommonTime(\"/drone\", \"/VTOL1\")\n\t\texcept:\n\t\t\tdm = 1\n\n\t\ttry:\n\t\t\t(transVTOL2, rotVTOL2) = listener.lookupTransform('/drone', '/VTOL2', rospy.Time(0))\n\t\t\ttVTOL2_now = listener.getLatestCommonTime(\"/drone\", \"/VTOL2\")\n\t\texcept:\n\t\t\tdm = 1\n\n\t\ttry:\n\t\t\t(transVTOL3, rotVTOL3) = listener.lookupTransform('/drone', '/VTOL3', rospy.Time(0))\n\t\t\ttVTOL3_now = listener.getLatestCommonTime(\"/drone\", \"/VTOL3\")\n\t\texcept:\n\t\t\tdm = 1\n\n\t\tif tVTOL1_now!=tVTOL1_prev:\n\t\t\tVTOL1detected=1\n\t\telse:\n\t\t\tVTOL1detected=0\n\n\t\tif tVTOL2_now!=tVTOL2_prev:\n\t\t\tVTOL2detected=1\n\t\telse:\n\t\t\tVTOL2detected=0\n\n\t\tif tVTOL3_now!=tVTOL3_prev:\n\t\t\tVTOL3detected=1\n\t\telse:\n\t\t\tVTOL3detected=0\n\n\t\tif show_QR==1:\n\t\t\tif VTOL1detected == 1:\n\t\t\t\tprint(\"VTOL1 {:.2f} \".format(transVTOL1[0]) + \"{:.2f} \".format(transVTOL1[1]) + \"{:.2f} --> \".format(\n\t\t\t\t\ttransVTOL1[2]) + \"{:.2f} \".format(rotVTOL1[0]) + \"{:.2f} \".format(rotVTOL1[1]) + \"{:.2f} \".format(\n\t\t\t\t\trotVTOL1[2]) + \"{:.2f} \".format(rotVTOL1[3]))\n\t\t\telse:\n\t\t\t\tprint(\"VTOL 1 not detected\")\n\n\t\t\tif VTOL2detected == 1:\n\t\t\t\tprint(\"VTOL2 {:.2f} \".format(transVTOL2[0]) + \"{:.2f} \".format(transVTOL2[1]) + \"{:.2f} --> \".format(\n\t\t\t\t\ttransVTOL2[2]) + \"{:.2f} \".format(rotVTOL2[0]) + \"{:.2f} \".format(rotVTOL2[1]) + \"{:.2f} \".format(\n\t\t\t\t\trotVTOL2[2]) + \"{:.2f} \".format(rotVTOL2[3]))\n\t\t\telse:\n\t\t\t\tprint(\"VTOL 2 not detected\")\n\n\n\t\t\tif VTOL3detected == 1:\n\t\t\t\tprint(\"VTOL3 {:.2f} \".format(transVTOL3[0]) + \"{:.2f} \".format(transVTOL3[1]) + \"{:.2f} --> \".format(\n\t\t\t\t\ttransVTOL3[2]) + \"{:.2f} \".format(rotVTOL3[0]) + \"{:.2f} \".format(rotVTOL3[1]) + \"{:.2f} \".format(\n\t\t\t\t\trotVTOL3[2]) + \"{:.2f} \".format(rotVTOL3[3]))\n\t\t\telse:\n\t\t\t\tprint(\"VTOL 3 not detected\")\n\n\n\n\t\tif use_px==1:\n\n\t\t\t# Protection if mode changed the exit\n\t\t\tvehicle.mode = dronekit.VehicleMode(desired_mode)\n\t\t\tif vehicle.mode != 'LOITER':\n\t\t\t\thover_once()\n\t\t\t\tbreak\n\n\t\t\tif vehicle.armed==False:\n\t\t\t\thover_once()\n\t\t\t\tbreak\n\n\t\t\t# Waitting until QR is detected\n\t\t\tif state == 10:\n\t\t\t\tprint(\"Hovering until see QR\")\n\t\t\t\thover_once()\n\n#\t\t\t\tprint(\"Going forward until see QR\")\n#\t\t\t\tback_forward_once(100)\n#\t\t\t\tcounterFront = counterFront + 1\n#\t\t\t\tif counterFront > FRONT_SEARCH_T * 20:\n#\t\t\t\t\thover(2)\n#\t\t\t\t\tbreak\n\n\t\t\t\tif VTOL3detected == 1:\n\t\t\t\t\tprint(\"QR VTOL 3 is detected\")\n\t\t\t\t\thover_once()\n\t\t\t\t\tstate = 20\n\t\t\t\t\tcstabil = 0\n\t\t\t\t\tclost = 0\n\n\t\t\t# Correcting pose QR\n\t\t\tif state == 20:\n\t\t\t\tif VTOL3detected == 1:\n\t\t\t\t\tprint(\"QR VTOL3 is detected\")\n\t\t\t\t\tclost = 0\n\n\t\t\t\t\tcorrect_altitude(transVTOL3[1],transVTOL3[0],transVTOL3[2])\n\t\t\t\t\tif (vehicle.channels.overrides[1] == 1500) and (vehicle.channels.overrides[2] == 1500) and (\n\t\t\t\t\t\tvehicle.channels.overrides[3] == 1500):\n\t\t\t\t\t\tcstabil = cstabil + 1\n\t\t\t\t\t\tif cstabil > STABLE_TIME * 20:\n\t\t\t\t\t\t\tstate = 30\n\t\t\t\t\t\t\tstatecounter = 0\n\n\t\t\t\telse:\n\t\t\t\t\tclost = clost + 1\n\t\t\t\t\tif clost > 10:\n\t\t\t\t\t\tclost = 12\n\t\t\t\t\t\thover_once()\n\t\t\t\t\t\tprint(\"QR lost hovering\")\n\n\t\t\tif state == 30:\n\t\t\t\tprint(\"State 30\")\n\t\t\t\tstatecounter = statecounter + 1\n\t\t\t\thover_once()\n\t\t\t\t# if statecounter > 2*20:\n\t\t\t\t# \tdrop_target(1)\n\t\t\t\tif statecounter > 4*20:\n\t\t\t\t\tstate = 40\n\t\t\t\t\tstatecounter = 0\n\n\t\t\tif state == 40:\n\t\t\t\tstatecounter = statecounter + 1\n\t\t\t\tback_forward_once(50)\n\t\t\t\tprint(\"State 40\")\n\t\t\t\tprint(statecounter)\n\t\t\t\tif statecounter > 2*20:\n\t\t\t\t\thover_once()\n\t\t\t\t\tbreak\n\n\n\n\n\n\t\t\tprint(\"UD:{0:.2f} LR:{0:.2f} BF:{0:.2f}\".format(UDvar,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tLRvar,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tvehicle.channels.overrides[BF_CHANNEL]))\n\t\t\tprint(state)\n\n\t\trate.sleep()\n\nif use_px==1:\n\tprint(\"Setting LAND mode...\")\n\tvehicle.mode = dronekit.VehicleMode('LAND')\n\ttime.sleep(10)\n\n\t# Close vehicle object before exiting script\n\tprint(\"Close vehicle object\")\n\tvehicle.close()\n","sub_path":"skripsi/Test03_TrackQR1.py","file_name":"Test03_TrackQR1.py","file_ext":"py","file_size_in_byte":12486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"626007768","text":"from pymui import createMuiTheme, colors, makeStyles, styled, Button\nfrom pyreact import createElement as el\n\ntheme = createMuiTheme({\n 'palette': {\n 'primary': colors['teal'],\n 'secondary': colors['pink'],\n 'special': {\n 'main': colors['deepPurple'][600],\n 'contrastText': colors['common']['white'],\n },\n },\n 'overrides': {\n 'MuiButton': {\n 'root': {\n 'margin': '0.5rem'\n },\n },\n },\n 'props': {\n 'MuiButton': {\n 'variant': 'contained',\n 'size': 'small',\n },\n 'MuiTextField': {\n 'type': 'text',\n 'variant': 'outlined',\n 'InputLabelProps': {'shrink': True},\n 'InputProps': {'margin': 'dense'},\n 'margin': 'dense',\n },\n },\n})\n\nStyledButton = styled(Button)({\n 'minWidth': '6rem',\n 'margin': '0 0.5rem 0 0',\n '&:hover': {'backgroundColor': theme['palette']['special']['main']}\n})\n\nuseStyle = makeStyles({\n 'root': {\n 'minWidth': '6rem',\n 'margin': '0 0.5rem 0 0',\n '&:hover': {'backgroundColor': lambda props: props['bgcolor']}\n }\n})\n\ndef ListButton(props):\n new_props = {'style': {'minWidth': '6rem', 'margin': '0 0.5rem 0 0'}}\n new_props.update(props)\n return el(Button, new_props)\n\n","sub_path":"ch19/appTheme.py","file_name":"appTheme.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"643619596","text":"# coding: utf-8\n\n\"\"\"\n Payment Gateway API Specification.\n\n The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501\n\n The version of the OpenAPI document: 21.1.0.20210122.001\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass Mobile(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'header_area': 'MobileHeaderArea',\n 'show_logo': 'bool'\n }\n\n attribute_map = {\n 'header_area': 'headerArea',\n 'show_logo': 'showLogo'\n }\n\n def __init__(self, header_area=None, show_logo=None): # noqa: E501\n \"\"\"Mobile - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._header_area = None\n self._show_logo = None\n self.discriminator = None\n\n if header_area is not None:\n self.header_area = header_area\n if show_logo is not None:\n self.show_logo = show_logo\n\n @property\n def header_area(self):\n \"\"\"Gets the header_area of this Mobile. # noqa: E501\n\n\n :return: The header_area of this Mobile. # noqa: E501\n :rtype: MobileHeaderArea\n \"\"\"\n return self._header_area\n\n @header_area.setter\n def header_area(self, header_area):\n \"\"\"Sets the header_area of this Mobile.\n\n\n :param header_area: The header_area of this Mobile. # noqa: E501\n :type: MobileHeaderArea\n \"\"\"\n\n self._header_area = header_area\n\n @property\n def show_logo(self):\n \"\"\"Gets the show_logo of this Mobile. # noqa: E501\n\n check if logo should be shown # noqa: E501\n\n :return: The show_logo of this Mobile. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._show_logo\n\n @show_logo.setter\n def show_logo(self, show_logo):\n \"\"\"Sets the show_logo of this Mobile.\n\n check if logo should be shown # noqa: E501\n\n :param show_logo: The show_logo of this Mobile. # noqa: E501\n :type: bool\n \"\"\"\n\n self._show_logo = show_logo\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Mobile):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"openapi_client/models/mobile.py","file_name":"mobile.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"48954545","text":"# pylint: disable=missing-docstring\n\nimport os\nimport unittest\n\nfrom submitty_utils import glob\n\nCURRENT_PATH = os.path.dirname(os.path.realpath(__file__))\n\n\nclass TestGlob(unittest.TestCase):\n def test_glob(self):\n os.chdir(os.path.join(CURRENT_PATH, '..'))\n self.assertTrue(len(glob.glob(os.path.join('.', '**'), recursive=True)) >\n len(glob.glob(os.path.join('.', '**'))))\n\n def test_iglob(self):\n os.chdir(os.path.join(CURRENT_PATH, '..'))\n self.assertTrue(len(list(glob.iglob(os.path.join('.', '**'), recursive=True))) >\n len(list(glob.iglob(os.path.join('.', '**')))))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_glob.py","file_name":"test_glob.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"64669826","text":"import os\nimport time\n\nPATH_TO_SENSOR = \"\"\n\ndef temp_raw():\n global PATH_TO_SENSOR\n temp_sensor = PATH_TO_SENSOR #'/sys/bus/w1/devices/28-800000271ffe/w1_slave'\n f = open(temp_sensor, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\ndef read_temp():\n lines = temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = temp_raw()\n \n temp_output = lines[1].find('t=')\n \n if temp_output != -1:\n temp_string = lines[1].strip()[temp_output+2:]\n temp_c = float(temp_string) / 1000.0\n temp_f = temp_c * 9.0 / 5.0 + 32.0\n return temp_c, temp_f\n\ndef findTempSensor(serialNum = None):\n pathToSensor = \"\"\n sensorBasePath = '/sys/bus/w1/devices/'\n sensorPrefix = '28-'\n sensorFile = 'w1_slave'\n\n if serialNum != None:\n tryPath = os.path.join(sensorBasePath, sensorPrefix+serialNum, sensorFile)\n if os.path.isfile(tryPath):\n pathToSensor = tryPath\n\n if pathToSensor == \"\" and os.path.isdir(sensorBasePath):\n sensors = os.listdir(sensorBasePath)\n for sensor in sensors:\n try:\n if sensor[:len(sensorPrefix)] == sensorPrefix and os.path.isfile(os.path.join(sensorBasePath, sensor, sensorFile)):\n pathToSensor = os.path.join(sensorBasePath, sensor, sensorFile)\n except:\n pass\n\n return pathToSensor\n\n\ndef temperatureSensor_init(serialNum = None):\n global PATH_TO_SENSOR\n PATH_TO_SENSOR = findTempSensor(serialNum)\n os.system('modprobe w1-gpio')\n os.system('modprobe w1-therm')\n\ndef temperatureSensor_getFahrenheit():\n temperature = None\n try:\n dummy, temperature = read_temp()\n except:\n pass\n return temperature\n\ndef temperatureSensor_getCelsius():\n temperature = None\n try:\n temperature, dummy = read_temp()\n except:\n pass\n return temperature\n\n\n\ndef temperatureSensor_debugTest():\n temperatureSensor_init()\n while True:\n print(read_temp())\n print(temperatureSensor_getFahrenheit())\n print(temperatureSensor_getCelsius())\n time.sleep(1)\n \n \n#temperatureSensor_debugTest()","sub_path":"temperatureSensorLib.py","file_name":"temperatureSensorLib.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186207896","text":"from django.shortcuts import render\nfrom web_platform.my_models.get_basic_data import get_basic_data\nfrom my_python_code.Operation_control.Run_case_of_api import run_api_case\ndef test_control(request):\n context = get_basic_data()\n run_api_case()\n if request.method == 'GET':\n a=request.GET.get(\"subject\")\n print(a)\n\n return render(request, 'test.html', context)","sub_path":"web_platform/my_views/test_control.py","file_name":"test_control.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"371829549","text":"\"\"\"\nQUESTION #20 - VALID PARENTHESES\nDifficulty: Easy\n\"\"\"\n\n# test case 1 - \"((})\" returns False\n# test case 2 - \"(){}[\" returns False (odd length, edge case)\n# test case 3 - \"{}[]()\" returns True\n# test case 4 - \"({[]})\" returns True\n# test case 5 - \"((\" returns False (didn't come up with this one myself)\n\ndef isValid(s) -> bool:\n if len(s) % 2 != 0:\n return False\n\n openbrackets = [\"(\", \"{\", \"[\"]\n brackets = []\n\n for index in range(len(s)):\n if s[index] in openbrackets:\n brackets.append(s[index])\n continue\n\n if len(brackets) > 0:\n check = brackets.pop()\n else:\n return False\n\n if s[index] == \")\" and check == \"(\":\n continue\n elif s[index] == \"}\" and check == \"{\":\n continue\n elif s[index] == \"]\" and check == \"[\":\n continue\n else:\n return False\n\n if len(brackets) == 0:\n return True\n else:\n return False\n\n\"\"\"\nNOTES:\n- how do you come up with better test cases? Is there a systematic way to do so?\n- made a few syntax errors, need to be more careful!!\n- time complexity: O(N) where N = len(s)\n\nRESULTS: faster than 39.38% and memory usage less than 87.02% of submissions\n\nIMPROVEMENTS:\n- Using hash maps makes comparison of corresponding brackets easier & faster\n\"\"\"","sub_path":"01_arrays_and_strings/20_valid_parentheses.py","file_name":"20_valid_parentheses.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"480567975","text":"import ssl\nimport sys\n\nimport paho.mqtt.client\n\ndef on_connect(client, userdata, flags, rc):\n\tprint('connected (%s)' % client._client_id)\n\tclient.subscribe(topic='/casa/temperatura/output', qos=2)\n\ndef on_message(client, userdata, message):\n\tprint('------------------------------')\n\tprint('topic: %s' % message.topic)\n\tprint('payload: %s' % message.payload)\n\tprint('qos: %d' % message.qos)\n\ndef main():\n\tclient = paho.mqtt.client.Client(client_id='Ultrasonico-sensor', clean_session=False)\n\tclient.on_connect = on_connect\n\tclient.on_message = on_message\n\tclient.connect(host='35.239.161.254', port=1883)\n\t#client.connect(host='192.168.0.13', port=1883)\n\tclient.loop_forever()\n\nif __name__ == '__main__':\n\tmain()\n\nsys.exit(0)\n","sub_path":"sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"377469742","text":"import matplotlib.pyplot as plt\nimport matplotlib.style as style\n\nfrom data.cifar10_dataset import Cifar10Dataset\nfrom data.config.cifar10_config import set_cifar_configs\nfrom data.config.isic_config import set_isic_configs\nfrom data.isic_dataset import ISICDataset\nfrom data.jurkat_dataset import JurkatDataset\nfrom data.matek_dataset import MatekDataset\nfrom data.plasmodium_dataset import PlasmodiumDataset\nfrom data.config.matek_config import set_matek_configs\nfrom data.config.jurkat_config import set_jurkat_configs\nfrom data.config.plasmodium_config import set_plasmodium_configs\nfrom results import ratio_metrics\nfrom options.visualization_options import get_arguments\nimport os\nimport numpy as np\n\n\"\"\"\nplot the accuracy vs data proportion being used, graph\ncredits to: Alex Olteanu (https://www.dataquest.io/blog/making-538-plots/) for the plot style\n:return: None\n\"\"\"\n\ndatasets = {'matek': MatekDataset, 'cifar10': Cifar10Dataset, 'plasmodium': PlasmodiumDataset,\n 'jurkat': JurkatDataset, 'isic': ISICDataset}\nconfigs = {'matek': set_matek_configs, 'jurkat': set_jurkat_configs,\n 'plasmodium': set_plasmodium_configs, 'cifar10': set_cifar_configs, 'isic': set_isic_configs}\n\nplot_configs = {'matek': (2, 5),\n 'jurkat': (2, 4),\n 'plasmodium': (1, 2),\n 'cifar10': (2, 5),\n 'isic': (2, 4)\n }\n\nfully_supervised = {\n 'matek': {'recall': 0.8621, 'f1-score': 0.7348, 'precision': 0.7112, 'accuracy': 0.9469},\n 'jurkat': {'recall': 0.6351, 'f1-score': 0.6138, 'precision': 0.7056, 'accuracy': 0.7445},\n 'plasmodium': 0.9763,\n 'cifar10': 0.75,\n 'isic': {'recall': 0.6752, 'f1-score': 0.6702, 'precision': 0.6707, 'accuracy': 0.7452}\n}\n\nfully_supervised_std = {\n 'matek': {'recall': 0.0318, 'f1-score': 0.0224, 'precision': 0.0103, 'accuracy': 0.0249},\n 'jurkat': {'recall': 0.0326, 'f1-score': 0.0326, 'precision': 0.0216, 'accuracy': 0.0265},\n 'plasmodium': 0.9763,\n 'cifar10': 0.75,\n 'isic': {'recall': 0.0159, 'f1-score': 0.0165, 'precision': 0.0119, 'accuracy': 0.0356}\n}\n\nmethods_default = [\n 'random_sampling',\n 'mc_dropout',\n 'entropy_based',\n 'augmentations_based',\n]\n\ndataset_rep = {\n 'matek': 'White Blood Cell Dataset',\n 'isic': 'Skin Lesion Dataset',\n 'jurkat': 'Cell Cycle Dataset',\n}\n\n\ndef plot_ratio_class_wise_metrics(metric, classes, label_y, prop, plot_config):\n fig = plt.figure(figsize=(20, 7))\n style.use('fivethirtyeight')\n\n colors = [[0, 0, 0, 1], [230 / 255, 159 / 255, 0, 1], [86 / 255, 180 / 255, 233 / 255, 1],\n [0, 158 / 255, 115 / 255, 1], [213 / 255, 94 / 255, 0, 1], [0, 114 / 255, 178 / 255, 1],\n [93 / 255, 58 / 255, 155 / 255, 1], [153 / 255, 79 / 255, 0, 1], [211 / 255, 95 / 255, 183 / 255, 1],\n [238 / 255, 136 / 255, 102 / 255, 1]]\n ax_main = fig.add_subplot(111)\n for i, cls in enumerate(classes):\n ax = fig.add_subplot(plot_config[0], plot_config[1], i + 1)\n for j, method in enumerate(methods_default):\n if len(metric[j]) == 0:\n continue\n linestyle = '-'\n ax.errorbar(prop, metric[j][i][1], yerr=(metric[j][i][0] - metric[j][i][2]) / 2,\n color=colors[j % len(colors)],\n label=methods_default[j], linewidth=2, linestyle=linestyle, marker='o', capsize=3)\n # ax.fill_between(prop, metric[j][i][0], metric[j][i][2], color=colors[i % len(colors)], alpha=0.05)\n ax.set_title(classes[i])\n\n ax_main.spines['top'].set_color('none')\n ax_main.spines['bottom'].set_color('none')\n ax_main.spines['left'].set_color('none')\n ax_main.spines['right'].set_color('none')\n ax_main.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)\n\n ax_main.set_xlabel(\"Active Learning Cycles\", fontsize=20, weight='bold', alpha=.75)\n ax_main.set_ylabel(label_y, fontsize=20, weight='bold', alpha=.75)\n plt.show()\n\n\ndef plot_ratio_metrics(prop, metric, label_y, fully_supervised_metric, save_path, methods, title,\n fully_supervised_std_metric):\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n plt.rcParams[\"font.weight\"] = \"ultralight\"\n plt.rcParams[\"axes.labelweight\"] = \"ultralight\"\n\n plt.figure(figsize=(14, 10))\n plt.rc('xtick', labelsize=45)\n plt.rc('ytick', labelsize=45)\n # plt.grid(color='black')\n style.use(['science', 'no-latex'])\n\n colors = [[86 / 255, 180 / 255, 233 / 255, 1], [230 / 255, 159 / 255, 0, 1], [212 / 255, 16 / 255, 16 / 255, 1],\n [0, 158 / 255, 115 / 255, 1], [213 / 255, 94 / 255, 0, 1], [0, 114 / 255, 178 / 255, 1],\n [93 / 255, 58 / 255, 155 / 255, 1], [153 / 255, 79 / 255, 0, 1], [211 / 255, 95 / 255, 183 / 255, 1],\n [238 / 255, 136 / 255, 102 / 255, 1]]\n\n if 'Recall' in label_y:\n plt.errorbar(prop, [fully_supervised_metric['recall']] * len(prop),\n yerr=[fully_supervised_std_metric['recall']] * len(prop),\n color=[0, 0, 0, 1], label='fully_supervised', linewidth=2, linestyle='--', marker=',', capsize=3)\n plt.fill_between(prop,\n np.array([fully_supervised_metric['recall']] * len(prop)) - fully_supervised_std_metric[\n 'recall'],\n np.array([fully_supervised_metric['recall']] * len(prop)) + fully_supervised_std_metric[\n 'recall'],\n color=[0, 0, 0, 1], alpha=0.05)\n elif 'Precision' in label_y:\n plt.errorbar(prop, [fully_supervised_metric['precision']] * len(prop),\n yerr=[fully_supervised_std_metric['precision']] * len(prop), color=[0, 0, 0, 1],\n label='fully_supervised', linewidth=2, linestyle='--', marker=',', capsize=3)\n plt.fill_between(prop,\n np.array([fully_supervised_metric['precision']] * len(prop)) - fully_supervised_std_metric[\n 'precision'],\n np.array([fully_supervised_metric['precision']] * len(prop)) + fully_supervised_std_metric[\n 'precision'],\n color=[0, 0, 0, 1], alpha=0.05)\n elif 'F1-score' in label_y:\n plt.errorbar(prop, [fully_supervised_metric['f1-score']] * len(prop),\n yerr=[fully_supervised_std_metric['f1-score']] * len(prop), color=[0, 0, 0, 1],\n label='fully_supervised', linewidth=2, linestyle='--', marker=',', capsize=3)\n plt.fill_between(prop,\n np.array([fully_supervised_metric['f1-score']] * len(prop)) - fully_supervised_std_metric[\n 'f1-score'],\n np.array([fully_supervised_metric['f1-score']] * len(prop)) + fully_supervised_std_metric[\n 'f1-score'],\n color=[0, 0, 0, 1], alpha=0.05)\n else:\n plt.errorbar(prop, [fully_supervised_metric['accuracy']] * len(prop),\n yerr=[fully_supervised_std_metric['accuracy']] * len(prop), color=[0, 0, 0, 1],\n label='fully_supervised', linewidth=2, linestyle='--', marker=',', capsize=3)\n plt.fill_between(prop,\n np.array([fully_supervised_metric['accuracy']] * len(prop)) - fully_supervised_std_metric[\n 'accuracy'],\n np.array([fully_supervised_metric['accuracy']] * len(prop)) + fully_supervised_std_metric[\n 'accuracy'],\n color=[0, 0, 0, 1], alpha=0.05)\n\n for i, method in enumerate(methods):\n if len(metric[i]) == 0:\n continue\n if 'Semi-supervised' in method:\n linestyle = '-'\n else:\n linestyle = '--'\n\n if 'Entropy Based' in method:\n c = colors[3]\n elif 'MC Dropout' in method:\n c = colors[1]\n elif 'Augmentations Based' in method:\n c = colors[2]\n else:\n c = colors[0]\n\n if 'SimCLR' in method:\n marker = 's'\n elif 'Autoencoder' in method:\n marker = 'o'\n elif 'ImageNet' in method:\n marker = '^'\n else:\n marker = ','\n plt.errorbar(prop, metric[i][1], yerr=(metric[i][0] - metric[i][2]) / 2, color=c, markersize=15,\n label=method, linewidth=2, linestyle=linestyle, marker=marker, capsize=3)\n plt.fill_between(prop, metric[i][0], metric[i][2], color=c, alpha=0.05)\n\n plt.xlabel(\"Added annotated data (%)\", fontsize=45)\n plt.ylabel(label_y, fontsize=45)\n plt.legend(loc='lower right', fontsize=18)\n # plt.title(title, fontsize=45, weight='bold', alpha=.75)\n plt.xticks(ticks=prop)\n plt.yticks(ticks=np.arange(0.10, 1.0, step=0.10))\n plt.savefig(save_path)\n\n\ndef plot_epoch_class_wise_loss(values, classes, label_y, epochs, plot_config):\n fig = plt.figure(figsize=(20, 7))\n style.use('fivethirtyeight')\n\n colors = [[0, 0, 0, 1], [230 / 255, 159 / 255, 0, 1], [86 / 255, 180 / 255, 233 / 255, 1],\n [0, 158 / 255, 115 / 255, 1], [213 / 255, 94 / 255, 0, 1], [0, 114 / 255, 178 / 255, 1],\n [93 / 255, 58 / 255, 155 / 255, 1], [153 / 255, 79 / 255, 0, 1], [211 / 255, 95 / 255, 183 / 255, 1],\n [238 / 255, 136 / 255, 102 / 255, 1]]\n ax_main = fig.add_subplot(111)\n for i, cls in enumerate(classes):\n ax = fig.add_subplot(plot_config[0], plot_config[1], i + 1)\n if len(values[i]) == 0:\n continue\n linestyle = '-'\n ax.plot(epochs, values[i][0], color=colors[0], label='Train Loss',\n linewidth=2, linestyle=linestyle)\n ax.plot(epochs, values[i][1], color=colors[1], label='Valid Loss',\n linewidth=2, linestyle=linestyle)\n ax.set_title(classes[i])\n\n ax_main.spines['top'].set_color('none')\n ax_main.spines['bottom'].set_color('none')\n ax_main.spines['left'].set_color('none')\n ax_main.spines['right'].set_color('none')\n ax_main.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)\n\n ax_main.set_xlabel(\"Epochs\", fontsize=20, weight='bold')\n ax_main.set_ylabel(label_y, fontsize=15, weight='bold')\n plt.show()\n\n\ndef plot_ae_loss(losses, logs, epochs):\n plt.figure(figsize=(15, 10))\n style.use('fivethirtyeight')\n\n colors = [[0, 0, 0, 1], [230 / 255, 159 / 255, 0, 1], [86 / 255, 180 / 255, 233 / 255, 1],\n [0, 158 / 255, 115 / 255, 1], [213 / 255, 94 / 255, 0, 1], [0, 114 / 255, 178 / 255, 1],\n [93 / 255, 58 / 255, 155 / 255, 1], [153 / 255, 79 / 255, 0, 1], [211 / 255, 95 / 255, 183 / 255, 1],\n [238 / 255, 136 / 255, 102 / 255, 1]]\n\n for i, log in enumerate(logs):\n if i >= len(losses):\n break\n plt.plot(epochs, log, color=colors[i], label=losses[i], linewidth=2)\n\n plt.xlabel(\"Epochs\", fontsize=20, weight='bold', alpha=.75)\n plt.ylabel(\"Loss Value\", fontsize=20, weight='bold', alpha=.75)\n plt.legend(loc='lower right', fontsize=18)\n plt.show()\n\n\ndef main(args):\n args = configs[args.dataset](args)\n\n num = [i for i in range(0, 21, 5)]\n\n if 'macro' in args.metric_ratio:\n y_label = f'Macro {args.metric.capitalize()}'\n else:\n y_label = 'Accuracy'\n\n dataset_title = {'matek': 'White blood cells', 'jurkat': 'Jurkat cell cycle', 'isic': 'Skin lesions',\n 'plasmodium': 'Red blood cells'}\n\n '''\n ratio_class_wise_metrics_log = ratio_class_wise_metrics(args.metric, dataset.classes, args.dataset)\n plot_ratio_class_wise_metrics(ratio_class_wise_metrics_log, dataset.classes, y_label, num,\n plot_configs[args.dataset])\n '''\n\n ratio_metrics_logs = ratio_metrics(args.metric, args.dataset, cls=args.metric_ratio,\n methods=args.methods_default_results)\n plot_ratio_metrics(num[:5], ratio_metrics_logs, y_label, fully_supervised[args.dataset],\n save_path=args.save_path, methods=args.methods_default,\n title=dataset_title[args.dataset],\n fully_supervised_std_metric=fully_supervised_std[args.dataset])\n\n '''\n epoch_class_wise_log = epoch_class_wise_loss(dataset.classes, methods[args.method_id], args.dataset)\n plot_epoch_class_wise_loss(epoch_class_wise_log, dataset.classes, y_label_alt,\n list(range(len(epoch_class_wise_log[0][0]))), plot_configs[args.dataset])\n\n ae_loss_logs = ae_loss(args.dataset)\n plot_ae_loss(losses=['bce', 'l1', 'l2', 'ssim'], logs=ae_loss_logs, epochs=list(range(len(ae_loss_logs[0]))))\n '''\n\n\nif __name__ == '__main__':\n\n root_vis = '/home/ahmad/thesis/visualization'\n arguments = get_arguments()\n methods_states = {\n 'h': ['Augmentations Based + ImageNet + FixMatch',\n 'Augmentations Based + SimCLR + FixMatch',\n 'Augmentations Based + ImageNet + Supervised',\n 'MC Dropout + ImageNet + Supervised',\n 'Entropy Based + ImageNet + Supervised',\n 'Random Sampling + Random + Supervised'],\n }\n methods_states_results = {\n 'h': ['fixmatch_with_al_augmentations_based_pretrained',\n 'fixmatch_with_al_augmentations_based_pretrained_simclr',\n 'augmentations_based_pretrained',\n 'mc_dropout_pretrained',\n 'entropy_based_pretrained',\n 'random_sampling'],\n }\n\n datasets = ['matek', 'isic', 'jurkat']\n\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n plt.rcParams[\"font.weight\"] = \"ultralight\"\n plt.rcParams[\"axes.labelweight\"] = \"ultralight\"\n plt.rc('xtick', labelsize=30)\n plt.rc('ytick', labelsize=30)\n plt.rcParams['legend.fontsize'] = 30\n\n fig, ax = plt.subplots(3, 4, figsize=(40, 20))\n # fig.suptitle('Cell Cycle Dataset', fontsize=45)\n for itera, dataset in enumerate(datasets):\n for k, method_state in methods_states.items():\n if arguments.run_batch:\n states = [\n (dataset, 'recall', 'accuracy'),\n (dataset, 'precision', 'macro avg'),\n (dataset, 'recall', 'macro avg'),\n (dataset, 'f1-score', 'macro avg'),\n ]\n\n for j, (d, m, r) in enumerate(states):\n root_path = os.path.join(root_vis, d, k)\n if not os.path.exists(root_path):\n os.makedirs(root_path)\n arguments.dataset = d\n arguments.metric = m\n arguments.metric_ratio = r\n arguments.methods_default = method_state\n arguments.methods_default_results = methods_states_results[k]\n arguments.save_path = os.path.join(root_path, f'{m}_{r}.png')\n args = configs[arguments.dataset](arguments)\n\n num = [i for i in range(0, 21, 5)]\n\n if 'macro' in args.metric_ratio:\n y_label = f'Macro {args.metric.capitalize()}'\n else:\n y_label = 'Accuracy'\n\n dataset_title = {'matek': 'White blood cells', 'jurkat': 'Jurkat cell cycle',\n 'isic': 'Skin lesions',\n 'plasmodium': 'Red blood cells'}\n\n '''\n ratio_class_wise_metrics_log = ratio_class_wise_metrics(args.metric, dataset.classes, args.dataset)\n plot_ratio_class_wise_metrics(ratio_class_wise_metrics_log, dataset.classes, y_label, num,\n plot_configs[args.dataset])\n '''\n\n ratio_metrics_logs = ratio_metrics(args.metric, args.dataset, cls=args.metric_ratio,\n methods=args.methods_default_results)\n\n prop = num[:5]\n metric = ratio_metrics_logs\n label_y = y_label\n fully_supervised_metric = fully_supervised[args.dataset]\n save_path = args.save_path\n methods = args.methods_default\n title = dataset_title[args.dataset]\n fully_supervised_std_metric = fully_supervised_std[args.dataset]\n\n # plt.figure(figsize=(14, 10))\n # plt.grid(color='black')\n style.use(['science', 'no-latex'])\n\n colors = [[86 / 255, 180 / 255, 233 / 255, 1], [230 / 255, 159 / 255, 0, 1],\n [212 / 255, 16 / 255, 16 / 255, 1],\n [0, 158 / 255, 115 / 255, 1], [213 / 255, 94 / 255, 0, 1], [0, 114 / 255, 178 / 255, 1],\n [93 / 255, 58 / 255, 155 / 255, 1], [153 / 255, 79 / 255, 0, 1],\n [211 / 255, 95 / 255, 183 / 255, 1],\n [238 / 255, 136 / 255, 102 / 255, 1]]\n\n if 'Recall' in label_y:\n ax[itera, j].errorbar(prop, [fully_supervised_metric['recall']] * len(prop),\n yerr=[fully_supervised_std_metric['recall']] * len(prop),\n color=[0, 0, 0, 1], label='Fully Supervised', linewidth=2, linestyle='--',\n marker=',',\n capsize=3)\n ax[itera, j].fill_between(prop,\n np.array([fully_supervised_metric['recall']] * len(prop)) -\n fully_supervised_std_metric[\n 'recall'],\n np.array([fully_supervised_metric['recall']] * len(prop)) +\n fully_supervised_std_metric[\n 'recall'],\n color=[0, 0, 0, 1], alpha=0.05)\n elif 'Precision' in label_y:\n ax[itera, j].errorbar(prop, [fully_supervised_metric['precision']] * len(prop),\n yerr=[fully_supervised_std_metric['precision']] * len(prop),\n color=[0, 0, 0, 1],\n label='Fully Supervised', linewidth=2, linestyle='--', marker=',',\n capsize=3)\n ax[itera, j].fill_between(prop,\n np.array([fully_supervised_metric['precision']] * len(prop)) -\n fully_supervised_std_metric[\n 'precision'],\n np.array([fully_supervised_metric['precision']] * len(prop)) +\n fully_supervised_std_metric[\n 'precision'],\n color=[0, 0, 0, 1], alpha=0.05)\n elif 'F1-score' in label_y:\n ax[itera, j].errorbar(prop, [fully_supervised_metric['f1-score']] * len(prop),\n yerr=[fully_supervised_std_metric['f1-score']] * len(prop),\n color=[0, 0, 0, 1],\n label='Fully Supervised', linewidth=2, linestyle='--', marker=',',\n capsize=3)\n ax[itera, j].fill_between(prop,\n np.array([fully_supervised_metric['f1-score']] * len(prop)) -\n fully_supervised_std_metric[\n 'f1-score'],\n np.array([fully_supervised_metric['f1-score']] * len(prop)) +\n fully_supervised_std_metric[\n 'f1-score'],\n color=[0, 0, 0, 1], alpha=0.05)\n else:\n ax[itera, j].errorbar(prop, [fully_supervised_metric['accuracy']] * len(prop),\n yerr=[fully_supervised_std_metric['accuracy']] * len(prop),\n color=[0, 0, 0, 1],\n label='Fully Supervised', linewidth=2, linestyle='--', marker=',',\n capsize=3)\n ax[itera, j].fill_between(prop,\n np.array([fully_supervised_metric['accuracy']] * len(prop)) -\n fully_supervised_std_metric[\n 'accuracy'],\n np.array([fully_supervised_metric['accuracy']] * len(prop)) +\n fully_supervised_std_metric[\n 'accuracy'],\n color=[0, 0, 0, 1], alpha=0.05)\n\n for i, method in enumerate(methods):\n if len(metric[i]) == 0:\n continue\n if 'FixMatch' in method:\n linestyle = '-'\n else:\n linestyle = '--'\n\n if 'Entropy Based' in method:\n c = colors[3]\n elif 'MC Dropout' in method:\n c = colors[1]\n elif 'Augmentations Based' in method:\n c = colors[2]\n else:\n c = colors[0]\n\n if 'SimCLR' in method:\n marker = 's'\n elif 'Autoencoder' in method:\n marker = 'o'\n elif 'ImageNet' in method:\n marker = '^'\n else:\n marker = ','\n ax[itera, j].errorbar(prop, metric[i][1], yerr=(metric[i][0] - metric[i][2]) / 2, color=c,\n markersize=10,\n label=method, linewidth=2, linestyle=linestyle, marker=marker, capsize=3)\n ax[itera, j].fill_between(prop, metric[i][0], metric[i][2], color=c, alpha=0.05)\n\n # ax[itera, j].set_legend(loc='lower right', fontsize=18)\n # plt.title(title, fontsize=30, weight='bold', alpha=.75)\n ax[itera, j].set_xticks(ticks=prop)\n ax[itera, j].set_yticks(ticks=np.arange(0.10, 1.0, step=0.10))\n ax[itera, j].xaxis.set_ticklabels([])\n ax[itera, j].yaxis.set_ticklabels([])\n # ax[itera, j].savefig(save_path)\n else:\n main(args=arguments)\n\n ax[2, 0].set_xlabel(\"Added annotated data (%)\", fontsize=30)\n ax[2, 1].set_xlabel(\"Added annotated data (%)\", fontsize=30)\n ax[2, 2].set_xlabel(\"Added annotated data (%)\", fontsize=30)\n ax[2, 3].set_xlabel(\"Added annotated data (%)\", fontsize=30)\n\n ax[0, 0].set_ylabel('White Blood Cell', fontsize=30)\n ax[1, 0].set_ylabel('Skin Lesion', fontsize=30)\n ax[2, 0].set_ylabel('Cell Cycle', fontsize=30)\n\n ax[0, 0].set_title('Accuracy', fontsize=30)\n ax[0, 1].set_title('Macro Precision', fontsize=30)\n ax[0, 2].set_title('Macro Recall', fontsize=30)\n ax[0, 3].set_title('Macro F1-Score', fontsize=30)\n\n ax[2, 0].set_xticks(ticks=prop)\n ax[2, 1].set_xticks(ticks=prop)\n ax[2, 2].set_xticks(ticks=prop)\n ax[2, 3].set_xticks(ticks=prop)\n\n ax[0, 0].set_yticks(ticks=np.arange(0.10, 1.0, step=0.10))\n ax[1, 0].set_yticks(ticks=np.arange(0.10, 1.0, step=0.10))\n ax[2, 0].set_yticks(ticks=np.arange(0.10, 1.0, step=0.10))\n\n ax[2, 0].xaxis.set_ticklabels([str(pr)[:3] for pr in prop])\n ax[2, 1].xaxis.set_ticklabels([str(pr)[:3] for pr in prop])\n ax[2, 2].xaxis.set_ticklabels([str(pr)[:3] for pr in prop])\n ax[2, 3].xaxis.set_ticklabels([str(pr)[:3] for pr in prop])\n\n ax[0, 0].yaxis.set_ticklabels(np.round(np.arange(0.10, 1.0, step=0.10), decimals=1))\n ax[1, 0].yaxis.set_ticklabels(np.round(np.arange(0.10, 1.0, step=0.10), decimals=1))\n ax[2, 0].yaxis.set_ticklabels(np.round(np.arange(0.10, 1.0, step=0.10), decimals=1))\n\n fig.subplots_adjust(right=0.8)\n\n handles, labels = ax[2, 3].get_legend_handles_labels()\n lgd1 = fig.legend(handles, labels, bbox_to_anchor=(0.962, 0.27))\n\n handles, labels = ax[1, 3].get_legend_handles_labels()\n lgd2 = fig.legend(handles, labels, bbox_to_anchor=(0.964, 0.55))\n\n handles, labels = ax[0, 3].get_legend_handles_labels()\n lgd3 = fig.legend(handles, labels, bbox_to_anchor=(0.964, 0.82))\n\n handles, labels = ax[1, 1].get_legend_handles_labels()\n lgd4 = fig.legend(handles, [\"\" for lbl in labels], bbox_to_anchor=(1.1, 0.82))\n\n fig.savefig('temp.png', dpi=fig.dpi)\n'''\n states = [\n 'random_sampling',\n 'mc_dropout',\n 'entropy_based',\n 'augmentations_based',\n 'random_sampling_pretrained',\n 'mc_dropout_pretrained',\n 'entropy_based_pretrained',\n 'augmentations_based_pretrained',\n 'auto_encoder',\n 'auto_encoder_with_al_mc_dropout',\n 'auto_encoder_with_al_entropy_based',\n 'auto_encoder_with_al_augmentations_based',\n 'simclr',\n 'simclr_with_al_mc_dropout',\n 'simclr_with_al_entropy_based',\n 'simclr_with_al_augmentations_based',\n 'fixmatch',\n 'fixmatch_with_al_mc_dropout',\n 'fixmatch_with_al_entropy_based',\n 'fixmatch_with_al_augmentations_based',\n 'fixmatch_pretrained',\n 'fixmatch_with_al_mc_dropout_pretrained',\n 'fixmatch_with_al_entropy_based_pretrained',\n 'fixmatch_with_al_augmentations_based_pretrained',\n 'fixmatch_pretrained_autoencoder',\n 'fixmatch_with_al_mc_dropout_pretrained_autoencoder',\n 'fixmatch_with_al_entropy_based_pretrained_autoencoder',\n 'fixmatch_with_al_augmentations_based_pretrained_autoencoder',\n 'fixmatch_pretrained_simclr',\n 'fixmatch_with_al_mc_dropout_pretrained_simclr',\n 'fixmatch_with_al_entropy_based_pretrained_simclr',\n 'fixmatch_with_al_augmentations_based_pretrained_simclr',\n ]\n\n datasets = ['isic', 'matek', 'jurkat']\n datasets_rep = ['Skin Lesions', 'White blood cells', 'Jurkat cells cycle']\n inits = ['Random', 'ImageNet', 'Autoencoder', 'SimCLR']\n trainings = ['Supervised learning', 'Semi-supervised learning']\n uncertainty_samplings = ['Random', 'MC dropout', 'Entropy Based', 'Augmentations Based']\n metrics = ['recall', 'precision', 'f1-score', 'accuracy']\n metrics_rep = ['Recall', 'Precision', 'F1-score', 'Accuracy']\n\n rows = []\n for dataset, dataset_rep in zip(datasets, datasets_rep):\n i = 0\n for training in trainings:\n for init in inits:\n for uncertainty_sampling in uncertainty_samplings:\n row = {'Dataset': dataset_rep, 'Network Initialization': init, 'Training Method': training,\n 'Uncertainty Sampling': uncertainty_sampling}\n for metric, metric_rep in zip(metrics, metrics_rep):\n if metric == 'accuracy':\n ratio_metrics_logs = ratio_metrics('recall', dataset, cls='accuracy', methods=[states[i]])\n else:\n print(i)\n ratio_metrics_logs = ratio_metrics(metric, dataset, cls='macro avg', methods=[states[i]])\n if len(ratio_metrics_logs[0]) == 0:\n print(row, states[i])\n continue\n for iterations in range(len(ratio_metrics_logs[0][1][:5])):\n row.update({f'{metric_rep} {iterations}': ratio_metrics_logs[0][1][iterations],\n f'{metric_rep} STD. {iterations}': ratio_metrics_logs[0][1][iterations] -\n ratio_metrics_logs[0][0][iterations]})\n i = i + 1\n if len(ratio_metrics_logs[0]) == 0:\n continue\n else:\n rows.append(row)\n import pandas as pd\n df = pd.DataFrame(rows)\n df.to_csv('results.csv')\n'''\n\"\"\"\nCombinations:\n 'random_sampling',\n 'mc_dropout',\n 'entropy_based',\n 'augmentations_based',\n\n 'fixmatch',\n 'fixmatch_pretrained',\n 'fixmatch_with_al_augmentations_based',\n 'fixmatch_with_al_augmentations_based_pretrained',\n 'fixmatch_with_al_entropy_based',\n 'fixmatch_with_al_entropy_based_pretrained',\n 'fixmatch_with_al_mc_dropout',\n 'fixmatch_with_al_mc_dropout_pretrained'\n\n 'simclr',\n 'simclr_pretrained',\n 'simclr_with_al_augmentations_based',\n 'fixmatch_with_al_augmentations_based_pretrained_simclr',\n 'simclr_with_al_entropy_based',\n 'fixmatch_with_al_entropy_based_pretrained_simclr',\n 'simclr_with_al_mc_dropout',\n 'fixmatch_with_al_mc_dropout_pretrained_simclr'\n\n 'random_sampling',\n 'mc_dropout',\n 'entropy_based',\n 'augmentations_based',\n 'random_sampling_pretrained',\n 'mc_dropout_pretrained',\n 'entropy_based_pretrained',\n 'augmentations_based_pretrained',\n\n 'auto_encoder',\n 'auto_encoder_pretrained',\n 'auto_encoder_with_al_augmentations_based',\n 'auto_encoder_with_al_augmentations_based_pretrained',\n 'auto_encoder_with_al_entropy_based',\n 'auto_encoder_with_al_entropy_based_pretrained',\n 'auto_encoder_with_al_mc_dropout',\n 'auto_encoder_with_al_mc_dropout_pretrained'\n\n 'augmentations_based',\n 'augmentations_based_pretrained',\n 'simclr_with_al_augmentations_based',\n 'auto_encoder_with_al_augmentations_based'\n\n 'fixmatch',\n 'fixmatch_pretrained',\n 'fixmatch_pretrained_simclr',\n 'fixmatch_pretrained_autoencoder',\n\"\"\"\n","sub_path":"code/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":30956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"561616488","text":"# v.1\n# Задача на программирование: очередь с приоритетами\n# Первая строка входа содержит число операций 1≤n≤105. Каждая из последующих n строк задают операцию одного\n# из следующих двух типов:\n# Insert x, где 0≤x≤109 — целое число;\n# ExtractMax.\n# Первая операция добавляет число x в очередь с приоритетами, вторая — извлекает максимальное число и выводит его.\n#\n# Sample Input:\n# 6\n# Insert 200\n# Insert 10\n# ExtractMax\n# Insert 5\n# Insert 500\n# ExtractMax\n# Sample Output:\n# 200\n# 500\n#\n\n\nclass MaxHeap:\n\n def __init__(self):\n \"\"\"\n Create an empty list that will be used same way as array to store heap data\n List is useful as it can store data of any type\n node[i] -> parent node[i/2]\n node[i] -> child_0[2i]\n -> child_1[2i+1]\n :return: None (constructor cannot return any other value than None)\n \"\"\"\n self.heap_list = ['empty']\n\n def shift_up(self):\n index = len(self.heap_list) - 1\n # print('index=', index, self.heap_list[index])\n while index > 1:\n value = self.heap_list[index]\n parent_index = index//2\n parent_value = self.heap_list[parent_index]\n # print('shift_up:', index, value, parent_index, parent_value)\n if parent_value < value:\n # exchange nodes\n self.heap_list[parent_index] = value\n self.heap_list[index] = parent_value\n index = parent_index\n else:\n break\n\n def shift_down(self):\n index = 1\n last_index = len(self.heap_list) - 1\n # print('index=', index, self.heap_list[index])\n child_list = []\n while index <= last_index:\n value = self.heap_list[index]\n if 2*index <= last_index:\n child_list.append([self.heap_list[2*index], 2*index])\n if 2*index+1 <= last_index:\n child_list.append([self.heap_list[2*index+1], 2*index+1])\n # print('child_list:', child_list)\n if len(child_list) == 0:\n break\n else:\n # child_list.sort(reverse=True)\n child_list.sort()\n # child_value, child_index = child_list[0]\n child_value, child_index = child_list.pop()\n # print('shift_down:', index, value, child_index, child_value)\n if child_value > value:\n # exchange nodes\n self.heap_list[child_index] = value\n self.heap_list[index] = child_value\n index = child_index\n child_list.clear()\n else:\n break\n\n def insert(self, elem):\n self.heap_list.append(elem)\n self.shift_up()\n\n def extract_max(self):\n value = self.heap_list[1]\n self.heap_list[1] = self.heap_list[-1]\n del self.heap_list[len(self.heap_list) - 1]\n self.shift_down()\n return value\n\n def print_all(self):\n print(self.heap_list)\n\n\ndef main():\n # Create MaxHeap\n mh = MaxHeap()\n\n # Get input data\n n = int(input())\n\n for i in range(n):\n comm = input()\n # print(comm)\n if 'Insert' in comm:\n op, arg = comm.split()\n mh.insert(int(arg))\n else:\n print(mh.extract_max())\n\n # mh.print_all()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Step3-x-x/Step3-3-8-v1.py","file_name":"Step3-3-8-v1.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"148607667","text":"class Stack:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def peek(self):\n return self.items[len(self.items) - 1]\n\n def size(self):\n return len(self.items)\n\n\ndef to_postfix(infix):\n prec = {}\n prec['^'] = 4\n prec['*'] = 3\n prec['/'] = 3\n prec['+'] = 2\n prec['-'] = 2\n prec['('] = 1\n opstack = Stack()\n postfix = []\n token_list = list(infix)\n for token in token_list:\n if token in \"0123456789\":\n postfix.append(token)\n elif token == '(':\n opstack.push(token)\n elif token == ')':\n top_token = opstack.pop()\n while top_token != '(':\n postfix.append(top_token)\n top_token = opstack.pop()\n else:\n while (not opstack.isEmpty()) and (prec[opstack.peek()] >= prec[token]):\n postfix.append(opstack.pop())\n opstack.push(token)\n\n while not opstack.isEmpty():\n postfix.append(opstack.pop())\n return \"\".join(postfix)\n","sub_path":"infixtopostfix.py","file_name":"infixtopostfix.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"257822612","text":"#coding=utf-8\nimport requests\nimport json\nimport sqlite3\nimport urllib\nimport psycopg2\nimport logging\nimport json\nfrom dbacc import *\n\n\nclass PgSQLStore(object):\n settings = None\n conn = None\n cur = None\n #schema = 'jobs'\n schema = 'public'\n jobs_table = 'jobs'\n employers_table = 'employers'\n employer_feed_settings_table = 'employer_feed_settings'\n employers_table = 'employers'\n db = {'default': None, 'legacy': None}\n\n\n def __init__(self, env, log_is_enabled=False):\n if log_is_enabled:\n print(\"log is enabled!\")\n if env == 'prod':\n self.db['default'] = prod\n self.db['legacy'] = coreapi_prod\n elif env == 'dev':\n self.db['default'] = dev\n self.db['legacy'] = coreapi_dev\n\n @classmethod\n def from_crawler(self, crawler):\n self.settings = crawler.settings\n return self(self.settings)\n\n def getSettings(self):\n return self.settings\n\n def dbopen(self, option='default'):\n if self.conn is None:\n self.dbname = self.db[option]['dbname']\n self.dbhost = self.db[option]['dbhost']\n self.dbport = self.db[option]['dbport']\n self.dbuser = self.db[option]['dbuser']\n self.dbpass = self.db[option]['dbpass']\n self.conn = psycopg2.connect(dbname=self.dbname, user=self.dbuser, password=self.dbpass, host=self.dbhost, port=self.dbport)\n self.cur = self.conn.cursor()\n\n def dbclose(self):\n if self.cur is not None:\n self.cur.close()\n self.cur = None\n if self.conn is not None:\n self.conn.close()\n self.conn = None\n\n def _get_fld_list(self, table, dbclose=False, option='default'):\n if '.' in table:\n table = table.split('.').pop()\n self.dbopen(option)\n self.cur.execute(\n \"SELECT column_name FROM information_schema.columns WHERE table_schema = %s AND table_name = %s\",\n (self.schema, table))\n res = self.cur.fetchall()\n if res is not None:\n res = map(lambda i: i[0], res)\n if dbclose:\n self.dbclose()\n return res\n\n def _get(self, table, field_list=None, where='', data=None, option='default'):\n self.dbopen(option)\n if field_list is None:\n field_list = self._get_fld_list(table)\n sql = ' '.join(['SELECT', ','.join(field_list), 'FROM', table, 'WHERE', where, ';'])\n if data is None:\n self.cur.execute(sql)\n elif type(data) is tuple or type(data) is list:\n self.cur.execute(sql, data)\n else:\n raise Exception(self.__class__ + ':data must be tuple or list!')\n data = self.cur.fetchall()\n res = []\n for row in data:\n d = {}\n for i in range(len(row)):\n d[field_list[i]] = row[i]\n res.append(d)\n self.dbclose()\n return res\n\n def _get_part(self, table, field_list=None, tail='', data=None, option='default'):\n self.dbopen(option)\n if field_list is None:\n field_list = self._get_fld_list(table)\n sql = ' '.join(['SELECT', ','.join(field_list), 'FROM', table, tail, ';'])\n if data is None:\n self.cur.execute(sql)\n elif type(data) is tuple or type(data) is list:\n self.cur.execute(sql, data)\n else:\n raise Exception(self.__class__ + ':data must be tuple or list!')\n data = self.cur.fetchall()\n res = []\n for row in data:\n d = {}\n for i in range(len(row)):\n d[field_list[i]] = row[i]\n res.append(d)\n self.dbclose()\n return res\n\n def _getraw(self, sql, field_list, data=None, option='default'):\n self.dbopen(option)\n if data is None:\n # print sql\n self.cur.execute(sql)\n elif type(data) is tuple or type(data) is list:\n self.cur.execute(sql, data)\n else:\n raise Exception(self.__class__ + ':data must be tuple or list!')\n data = self.cur.fetchall()\n res = []\n for row in data:\n if len(field_list) != len(row):\n raise Exception('Number fields in fields list no match number columns in result!')\n d = {}\n for i in range(len(row)):\n d[field_list[i]] = row[i]\n res.append(d)\n self.dbclose()\n return res\n\n def _exec(self, sql, data=None):\n self.dbopen()\n if data is None:\n res = self.cur.execute(sql)\n elif type(data) is tuple or type(data) is list:\n res = self.cur.execute(sql, data)\n else:\n raise Exception(self.__class__ + ':data must be tuple or list!')\n self.conn.commit()\n return res\n\n def _count_rows(self, table, where=None, data=None):\n if where is None:\n sql = ' '.join(['SELECT count(*) AS count FROM', table])\n res = self._getraw(sql, ['count'])\n else:\n sql = ' '.join(['SELECT count(*) AS count FROM', table, 'WHERE', where])\n if data is None:\n res = self._getraw(sql, ['count'])\n else:\n res = self._getraw(sql, ['count'], data)\n return int(res[0]['count'])\n\n\nclass PgSQLStoreMonitor(PgSQLStore):\n\n def getJobs(self, employer_id=None, status=None, fld_list=None, offset=0, limit=None):\n table = '.'.join([self.schema, self.jobs_table])\n if status is None:\n if employer_id is not None:\n return self._get_part(table, field_list=fld_list, tail='WHERE employer_id=%s ORDER BY id OFFSET %s LIMIT %s', data=[employer_id, offset, limit])\n else:\n return self._get_part(table, field_list=fld_list, tail='ORDER BY id OFFSET %s LIMIT %s', data=[offset, limit])\n else:\n if employer_id is not None:\n return self._get_part(table, field_list=fld_list, tail='WHERE employer_id=%s AND status=%s ORDER BY id OFFSET %s LIMIT %s', data=[employer_id, status, offset, limit])\n else:\n return self._get_part(table, field_list=fld_list, tail='WHERE status=%s ORDER BY id OFFSET %s LIMIT %s', data=[status, offset, limit])\n\n\n\n def countJobs(self, employer_id=None, status=None):\n table = '.'.join([self.schema, self.jobs_table])\n if status is None:\n if employer_id is not None:\n return self._count_rows(table, 'employer_id=%s', [employer_id])\n else:\n return self._count_rows(table)\n else:\n if employer_id is not None:\n return self._count_rows(table, 'employer_id=%s AND status=%s', [employer_id, status])\n else:\n return self._count_rows(table, 'status=%s', [status])\n\n\n def getCompanies(self, employer_id=None):\n if employer_id is None:\n sql = 'SELECT employer_id, company_slug FROM jobs GROUP BY employer_id, company_slug'\n return self._getraw(sql, field_list=['employer_id', 'company_slug'], data=None)\n else:\n sql = 'SELECT employer_id, company_slug FROM jobs WHERE employer_id=%s GROUP BY employer_id, company_slug'\n return self._getraw(sql, field_list=['employer_id', 'company_slug'], data=[employer_id])\n\n\n def getCompanyProfile(self, company_slug):\n sql = 'SELECT profile FROM company WHERE slug=%s'\n res = self._getraw(sql, field_list=['profile'], data=[company_slug], option='legacy')\n if type(res) is list and len(res) > 0:\n return res[0]['profile']\n return {}\n\n def getCompanyName(self, company_slug):\n sql = 'SELECT name FROM company WHERE slug=%s'\n res = self._getraw(sql, field_list=['name'], data=[company_slug], option='legacy')\n if type(res) is list and len(res) > 0:\n return res[0]['name']\n return ''\n\n def getCompanyDescription(self, company_slug):\n sql = 'SELECT description FROM company WHERE slug=%s'\n res = self._getraw(sql, field_list=['description'], data=[company_slug], option='legacy')\n if type(res) is list and len(res) > 0:\n return res[0]['description']\n return ''\n\n def getDemoJob(self, slug):\n table = self.jobs_table\n res = self._get(table, field_list=None, where='slug=%s', data=[slug])\n if len(res) > 0:\n return res[0]\n return None\n\n def getEmployerUid(self, employer_id):\n table = self.employers_table\n res = self._get(table, ['uid'], where='id=%s', data=[employer_id])\n if len(res) > 0:\n return res[0]['uid']\n return None\n\n def getAdminToken(self):\n sql = 'SELECT id FROM extendedaccesstoken WHERE userid=(SELECT id FROM xuser WHERE isadmin = TRUE)'\n res = self._getraw(sql, field_list=['id'], data=None, option='legacy')\n if len(res) > 0:\n return res[0]['id']\n return None\n\n def getCandidateByEmail(self, email):\n table = 'candidates'\n res = self._get(table, field_list=None, where=\"email=%s AND name='monitor-candidate'\", data=[email])\n if len(res) > 0:\n return res[0]\n return None\n\n def getEmployerLogo(self, job):\n table = '.'.join([self.schema, self.employers_table])\n res = self._get(table, ['uid'], 'id=%s', [job['employer_id']])\n if len(res) > 0:\n uid = res[0]['uid']\n sql = \"SELECT profile->>'logo' AS logo FROM company WHERE profile->>'uid' = %s\"\n res = self._getraw(sql, field_list=['logo'], data=[uid], option='legacy')\n if len(res) > 0 and res[0]['logo'] is not None:\n return 'https://api.xtramile.io/api/v1/files/%s/download' % res[0]['logo']\n return None\n\n\n\n","sub_path":"monitor/monitor/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":9882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"496233162","text":"\n# -*- coding: utf-8 -*-\n\nimport itertools\n\nimport numpy as np\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport random\n\nimport sklearn.metrics as metrics\n\nfrom sklearn.datasets import load_iris\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n\nfrom GCForest_four_nfs import gcForest\nfrom sklearn.metrics import confusion_matrix\n\ndef plot_confusion_matrix(cm, classes, normalize=False,\n\n title='Confusion matrix', cmap=plt.cm.Blues):\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n\n plt.title(title)\n\n plt.colorbar()\n\n tick_marks = np.arange(len(classes))\n\n plt.xticks(tick_marks, classes, rotation=45)\n\n plt.yticks(tick_marks, classes)\n\n\n\n if normalize:\n\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n print(\"Normalized confusion matrix\")\n\n else:\n\n print('Confusion matrix, without normalization')\n\n\n\n thresh = cm.max() / 2.\n\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n\n plt.text(j, i, cm[i, j],\n\n horizontalalignment=\"center\",\n\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n\n\n plt.tight_layout()\n\n plt.ylabel('True label')\n\n plt.xlabel('Predicted label')\n\n plt.show()\n\n\n\n\n\ndef gcf(X_train, X_test, y_train, y_test, cnames):\n\n\n\n clf = gcForest(shape_1X=(1, 18988),window=[1000,2000],stride=10)\n\n clf.fit(X_train, y_train)\n\n\n\n y_pred = clf.predict(X_test)\n print(y_pred)\n\n\n\n\n # print('accuracy:', metrics.accuracy_score(y_test, y_pred))\n\n #print('kappa:', metrics.cohen_kappa_score(y_test, y_pred))\n\n #print(metrics.classification_report(y_test, y_pred, target_names=cnames))\n\n\n\n #cnf_matrix = metrics.confusion_matrix(y_test, y_pred)\n\n #plot_confusion_matrix(cnf_matrix, classes=cnames, normalize=True,\n\n # title='Normalized confusion matrix')\ndef numeric(target):\n target = pd.to_numeric(target)\n return target\ndef normalize01(target):\n target = pd.to_numeric(target)\n # print(target)\n target_min = target.min()\n target_max = target.max()\n\n\n target_normal = (target-target_min)/(target_max-target_min)\n # print(\"min:\",target_normal.min())\n # print(\"max:\",target_normal.max())\n # print(\"target_normal:\",target_normal)\n return target_normal\ndef normalize(target):\n #z-score 标准化\n # print(target)\n mean = target.mean()\n std = target.std()\n # print(\"mean:\",mean,\" std:\",std)\n target_normal = (target-mean)/std\n # print(\"target_normal:\",target_normal)\n # print(\"mean:\",target_normal.mean())\n # print(\"std:\",target_normal.std())\n return target_normal\n###1:resistant 0:sensitive 2:none\ndef labeldata(drugdata,target_name):\n # print(drugdata.shape)\n label_y= pd.Series(range(0,drugdata.shape[0]))\n # print(label_y)\n for i in range(0,drugdata.shape[0]):\n if drugdata[target_name][i] >0.8:\n label_y[i] = 1\n elif drugdata[target_name][i] <-0.8:\n label_y[i] = 0\n else:\n label_y[i] = 2\n # print(label_y)\n drugdata[\"label_y\"] = label_y\n return drugdata\n\n\ndef five_fold(n):\n L=[]\n np.random.seed(5)\n numbers = np.random.permutation(range(n))\n print(numbers)\n if n==8 :\n a1 = numbers[0:2]\n L.append(a1)\n a2 = numbers[2:4]\n L.append(a2)\n a3 = numbers[4:6]\n L.append(a3)\n a4 = numbers[6:7]\n L.append(a4)\n a5 = numbers[7:]\n L.append(a5)\n else:\n a1 = numbers[0:round(n/5)]\n L.append(a1)\n a2 = numbers[round(n/5):2*round(n/5)]\n L.append(a2)\n a3 = numbers[2*round(n/5):3*round(n/5)]\n L.append(a3)\n a4 = numbers[3*round(n/5):4*round(n/5)]\n L.append(a4)\n a5 = numbers[4*round(n/5):]\n L.append(a5)\n return L\n\n\n\nif __name__ == '__main__':\n ########################################drug response part#################################################\n # read drug response file\n cgp_drugdata = pd.DataFrame(pd.read_csv('drug.csv'))\n\n # print(cgp_drugdata.columns)\n cgp_drugdata.info()\n # normalize ic50\n ic50 = cgp_drugdata[\"LN_IC50\"]\n # print(type(ic50))\n ic50_normal = normalize(ic50)\n cgp_drugdata[\"ic50_normal\"]=ic50_normal\n\n\n\n # label (sensitive or resistant)according to ic50_normal\n cgp_drugdata = labeldata(cgp_drugdata,\"ic50_normal\")\n\n\n # order ccle_drugdata by ic50_normal\n cgp_drugdata=cgp_drugdata.sort_values(by=\"ic50_normal\", ascending=False)\n # cgp_drugdata[\"ic50_normal\"].plot(kind=\"bar\")\n # plt.show()\n #\n # output\n # cgp_drugdata.to_csv('cgp_drugdata.csv')\n #\n cgp_drugdata2 = cgp_drugdata[cgp_drugdata[\"label_y\"]!=2]\n cgp_drugdata2.to_csv('cgp_drugdata2.csv')\n drug_for1 = np.unique(cgp_drugdata[cgp_drugdata[\"label_y\"]==1][\"DRUG_ID\"])\n drug_for0 = np.unique(cgp_drugdata[cgp_drugdata[\"label_y\"]==0][\"DRUG_ID\"])\n drugs = [val for val in list(drug_for1) if val in list(drug_for0)]\n print(drugs)\n\n\n ########################################gene exprSet part#################################################\n # read gene exprset file\n L=[]\n file=open(\"gene.txt\",\"r\")\n line=file.readline()\n while line:\n line=line.split()\n L.append(line)\n line=file.readline()\n cgp_exprSet = pd.DataFrame(L)\n\n cgp_exprSet2=cgp_exprSet.T\n # print(cgp_exprSet2)\n cgp_exprSet3 = cgp_exprSet2.drop([0]).reset_index(drop=True)\n cgp_exprSet3.columns=cgp_exprSet2.iloc[0,:].tolist()\n # print(cgp_exprSet3)\n # cgp_exprSet3.to_csv('cgp_exprSet3.csv')\n cgp_exprSet4 = pd.DataFrame(index=cgp_exprSet3.index)\n cgp_exprSet4[cgp_exprSet3.columns[0]]=cgp_exprSet3.iloc[:,0]\n for i in range(1,17738):\n each_column = cgp_exprSet3[cgp_exprSet3.columns[i]]\n cgp_exprSet4[cgp_exprSet3.columns[i]] = normalize01(each_column)\n # print(cgp_exprSet4)\n cgp_exprSet4.info()\n # cgp_exprSet4.to_csv('cgp_exprSet4.csv')\n\n\n\n\n\n\n ########################################copy number part#################################################\n # read copy number file\n L=[]\n file=open(\"cna.txt\",\"r\")\n line=file.readline()\n while line:\n line=line.split()\n L.append(line)\n line=file.readline()\n cgp_cnaSet = pd.DataFrame(L)\n cgp_cnaSet2 = cgp_cnaSet.drop([0]).reset_index(drop=True)\n cgp_cnaSet2.columns=cgp_cnaSet.iloc[0,:].tolist()\n # print(cgp_cnaSet2)\n # cgp_cnaSet2.info()\n cgp_cnaSet3 = pd.DataFrame(index=cgp_cnaSet2.index)\n cgp_cnaSet3[cgp_cnaSet2.columns[0]]=cgp_cnaSet2.iloc[:,0]\n for i in range(1,426):\n each_column = cgp_cnaSet2[cgp_cnaSet2.columns[i]]\n cgp_cnaSet3[cgp_cnaSet2.columns[i]] = numeric(each_column)\n cgp_cnaSet3.info()\n # cgp_cnaSet3.to_csv('cgp_cnaSet3.csv')\n\n\n\n\n\n\n\n\n ##################################generate x and y for each drug######################################\n drugs = [1,5,29,32,34,37,38,41,45,52,55,56,62,71,88]\n # for i in cgp_drugdata[\"DRUG_ID\"].tolist():\n # if i not in drugs:\n # drugs.append(i)\n # print(drugs)\n\n #####choose the same celllines in both gene and cna\n cgp_exprSet4.rename(columns={\"ensembl_gene\":\"COSMIC_ID\"},inplace=True)##inplace=True表示在原数据上进行操作\n cgp_cnaSet3.rename(columns={\"Name\":\"COSMIC_ID\"},inplace=True)\n exprCName = cgp_exprSet4[\"COSMIC_ID\"]\n # print(list(exprCName))\n cnaCName = cgp_cnaSet3[\"COSMIC_ID\"]\n # print(list(cnaCName))\n conCName = [val for val in list(exprCName) if val in list(cnaCName)]\n conCName.remove(\"905954\")\n conCName.remove(\"905954\")\n conCName.remove(\"909976\")\n conCName.remove(\"909976\")\n conCName.remove(\"1330983\")\n conCName.remove(\"1330983\")\n conCName.remove(\"1503362\")\n conCName.remove(\"1503362\")\n print(\"con-cellline:\",list(conCName))\n # pd.DataFrame(list(conCName)).to_csv(\"conCName.csv\")\n\n\n #####for each drug\n all_drug_acc=[]\n for i in range(15):\n each_drug_acc = []\n drug=drugs[i]\n each_drug_acc.append(drug)\n each_drugdata = pd.DataFrame(cgp_drugdata2[cgp_drugdata2[\"DRUG_ID\"]==drugs[i]])\n # each_drugdata.to_csv('each_drugdata.csv')\n print(each_drugdata)\n\n\n ######~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~prepare x and y(gene part)~~~~~~~~~~~~~~~~~~~~~~~~~####################\n cgp_exprSet4['COSMIC_ID'] = pd.to_numeric(cgp_exprSet4['COSMIC_ID'])\n cgp_exprSet5 = cgp_exprSet4[cgp_exprSet4['COSMIC_ID'].isin(list(conCName))]\n cgp_exprSet5[\"COSMIC_ID\"].to_csv(\"cgp_exprSet5.csv\")\n\n each_data = pd.merge(each_drugdata,cgp_exprSet5,on=\"COSMIC_ID\",how=\"inner\")\n print(each_data)\n ###########################################test gcForest######################################33\n random.seed(5)\n randomCols = random.sample(range(10,17747),400)\n x = each_data.iloc[:,randomCols]\n y = each_data.iloc[:,9]\n x.index=each_data[\"COSMIC_ID\"]\n y.index=each_data[\"COSMIC_ID\"]\n x.sort_index()\n y.sort_index()\n print(x)\n print(y)\n folds_expr = five_fold(x.shape[0])\n pd.DataFrame(folds_expr).to_csv(\"nfs2/folds_expr.csv\")\n\n\n\n ######~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~prepare x and y(cna part)~~~~~~~~~~~~~~~~~~~~~~~~~####################\n cgp_cnaSet3['COSMIC_ID'] = pd.to_numeric(cgp_cnaSet3['COSMIC_ID'])\n cgp_cnaSet4 = cgp_cnaSet3[cgp_cnaSet3['COSMIC_ID'].isin(list(conCName))]\n cgp_cnaSet4[\"COSMIC_ID\"].to_csv(\"cgp_cnaSet4.csv\")\n each_cna_data = pd.merge(each_drugdata,cgp_cnaSet4,on=\"COSMIC_ID\",how=\"inner\")\n ###########################################test gcForest######################################33\n random.seed(5)\n randomCols = random.sample(range(10,435),400)\n x_cna = each_cna_data.iloc[:,randomCols]\n y_cna = each_cna_data.iloc[:,9]\n x_cna.index=each_cna_data[\"COSMIC_ID\"]\n y_cna.index=each_cna_data[\"COSMIC_ID\"]\n x_cna.sort_index()\n y_cna.sort_index()\n folds_cna = five_fold(x_cna.shape[0])\n pd.DataFrame(folds_cna).to_csv(\"nfs2/folds_cna.csv\")\n\n ############################################five fold##########################################\n feature_flag = \"\"\n five_results = []\n for j in range(5):\n each_fold_result=[]\n each_fold_result.append(j)\n X_test = x.iloc[folds_expr[j],:]\n y_test = y.iloc[folds_expr[j]]\n X_train = x.iloc[list(set(range(x.shape[0])).difference(set(folds_expr[j]))),:]\n y_train = y.iloc[list(set(range(x.shape[0])).difference(set(folds_expr[j])))]\n X_test.to_csv(\"nfs2/\"+str(j)+str(drug)+\"_X_test_expr.csv\")\n y_test.to_csv(\"nfs2/\"+str(j)+str(drug)+\"_y_test_expr.csv\")\n train_cellline = X_train.index\n print(\"train_cellline:\",train_cellline)\n test_cellline = X_test.index\n print(\"test_cellline:\",test_cellline)\n\n X_test_cna = x_cna.iloc[folds_cna[j],:]\n y_test_cna = y_cna.iloc[folds_cna[j]]\n X_train_cna = x_cna.iloc[list(set(range(x.shape[0])).difference(set(folds_cna[j]))),:]\n y_train_cna = y_cna.iloc[list(set(range(x.shape[0])).difference(set(folds_cna[j])))]\n X_test_cna.to_csv(\"nfs2/\"+str(j)+str(drug)+\"_X_test_cna.csv\")\n y_test_cna.to_csv(\"nfs2/\"+str(j)+str(drug)+\"_y_test_cna.csv\")\n\n\n ######################mgs expr part################\n levels = np.unique(np.array(y_train))\n print(\"levels:\",levels)\n File = open(\"nfs2/\"+str(j)+str(drug)+\".txt\", \"w\")\n File.write(\"levels:\"+str(levels)+\"\\n\")\n clf = gcForest(shape_1X=(1, 400),window=[100,200],stride=2,levels=levels,f=File)\n if np.shape(X_train)[0] != len(y_train):\n raise ValueError('Sizes of y and X do not match.')\n expr_mgs_X = clf.mg_scanning(np.array(X_train), np.array(y_train))\n expr_window1 = expr_mgs_X[0]\n expr_window2 = expr_mgs_X[1]\n expr_mgs_X_test = clf.mg_scanning(np.array(X_test))\n expr_window1_test = expr_mgs_X_test[0]\n expr_window2_test = expr_mgs_X_test[1]\n\n\n\n\n\n\n\n ######################mgs cna part################\n clf = gcForest(shape_1X=(1, 400),window=[100,200],stride=2,levels=levels,f=File)\n if np.shape(X_train_cna)[0] != len(y_train_cna):\n raise ValueError('Sizes of y and X do not match.')\n cna_mgs_X = clf.mg_scanning(np.array(X_train_cna), np.array(y_train_cna))\n cna_window1 = cna_mgs_X[0]\n cna_window2 = cna_mgs_X[1]\n cna_mgs_X_test = clf.mg_scanning(np.array(X_test_cna))\n cna_window1_test = cna_mgs_X_test[0]\n cna_window2_test = cna_mgs_X_test[1]\n\n\n\n\n\n ######################cascade expr_cna part################\n train_predict_y = clf.cascade_forest(expr_window1,expr_window2,cna_window1,cna_window2, np.array(y_train))\n\n\n\n #####################predict values###########################\n cascade_all_pred_prob = clf.cascade_forest(expr_window1_test,expr_window2_test,cna_window1_test,cna_window2_test)\n predict_proba = np.mean(cascade_all_pred_prob, axis=0)\n pd.DataFrame(predict_proba).to_csv(\"nfs2/\"+str(j)+str(drug)+\"_predict_proba.csv\")\n predictions = levels[np.argmax(predict_proba, axis=1)]\n pd.DataFrame(predictions).to_csv(\"nfs2/\"+str(j)+str(drug)+\"_predictions.csv\")\n prediction_accuracy = accuracy_score(y_true=y_test, y_pred=predictions)\n each_fold_result.append(prediction_accuracy)\n print('Layer validation accuracy = {}'.format(prediction_accuracy))\n File.write('prediction_accuracy = {}'.format(prediction_accuracy)+\"\\n\")\n # tn, fp, fn, tp = confusion_matrix(y_test, predictions).ravel()\n # specificity = tn /float(tn+fp)\n # each_fold_result.append(specificity)\n # print(\"specificity:\",specificity)\n # File.write('specificity = {}'.format(specificity)+\"\\n\")\n # sensitivity= tp/float(tp+fn)\n # each_fold_result.append(sensitivity)\n # print(\"sensitivity:\",sensitivity)\n # File.write('sensitivity = {}'.format(sensitivity)+\"\\n\")\n File.close()\n five_results.append(each_fold_result)\n pd.DataFrame(five_results).to_csv(\"nfs2/\"+str(drug)+\"five_results.csv\")\n print(five_results)\n each_drug_acc.append(pd.DataFrame(five_results).iloc[:,1].mean())\n all_drug_acc.append(each_drug_acc)\n pd.DataFrame(all_drug_acc).to_csv(\"nfs2/all_drug_acc.csv\")\n\n\n","sub_path":"cgp_gcForest/no_fs2.py","file_name":"no_fs2.py","file_ext":"py","file_size_in_byte":14802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"383273013","text":"# Given a circular array (the next element of the last element is the first element of the array), print the Next Greater Number for every element. The Next Greater Number of a number x is the first greater number to its traversing-order next in the array, which means you could search circularly to find its next greater number. If it doesn't exist, output -1 for this number.\n\n# Example 1:\n# Input: [1,2,1]\n# Output: [2,-1,2]\n# Explanation: The first 1's next greater number is 2; \n# The number 2 can't find next greater number; \n# The second 1's next greater number needs to search circularly, which is also 2.\n# Note: The length of given array won't exceed 10000.\n\n\n\n\nclass Solution(object):\n def nextGreaterElements(self, nums):\n stack, res = [], [-1] * len(nums)\n for i in range(len(nums)) * 2:\n while stack and (nums[stack[-1]] < nums[i]):\n res[stack.pop()] = nums[i]\n stack.append(i)\n return res\n\n\n\n def nextGreaterElements(self, nums): # TLE on the last one\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n if not nums:\n return []\n res = []\n ma, n = max(nums), len(nums)\n for i in xrange(n):\n if nums[i] == ma:\n res.append(-1)\n else:\n for j in xrange(i+1, i+n):\n if nums[j%n] > nums[i]:\n res.append(nums[j%n])\n break\n return res","sub_path":"Google/2. medium/503. Next Greater Element II.py","file_name":"503. Next Greater Element II.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"209530844","text":"import torch\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\n\nx = Variable(torch.linspace(-5, 5, 200))\nx_np = x.data.numpy()\n\ny_relu = torch.relu(x).data.numpy()\ny_sigmoid = torch.sigmoid(x).data.numpy()\ny_tan = torch.tanh(x).data.numpy()\ny_softplus = torch.nn.functional.softplus(x).data.numpy()\n\nplt.figure(1, figsize = (8, 6))\nplt.subplot(221)\nplt.plot(x_np, y_relu, c = 'red', label = 'relu')\nplt.legend(loc = 'best')\nplt.ylim(-0.2, 1.2)\n\nplt.figure(1, figsize = (8, 6))\nplt.subplot(222)\nplt.plot(x_np, y_sigmoid, c = 'yellow', label = 'sigmoid')\nplt.legend(loc = 'best')\nplt.ylim(-0.2, 1.2)\n\nplt.figure(1, figsize = (8, 6))\nplt.subplot(223)\nplt.plot(x_np, y_tan, c = 'blue', label = 'tanh')\nplt.legend(loc = 'best')\nplt.ylim(-1, +1)\n\nplt.figure(1, figsize = (8, 6))\nplt.subplot(224)\nplt.plot(x_np, y_softplus, c = 'green', label = 'softplus')\nplt.legend(loc = 'best')\nplt.ylim(0, +6)\n\nplt.show()","sub_path":"Pytorch/pytorch_example3(Activition).py","file_name":"pytorch_example3(Activition).py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"475189679","text":"# coding=utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport os\nimport json\nimport glob\nimport random\nimport collections\nimport math\nimport time\nimport tensorflow.contrib.slim as slim\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input_dir\", help=\"path to folder containing images\")\nparser.add_argument(\"--A_dir\", help=\"path to folder containing A images\")\nparser.add_argument(\"--B_dir\", help=\"path to folder containing B images\")\nparser.add_argument(\"--D_dir\", help=\"path to folder containing D images\")\n\nparser.add_argument(\"--mode\", required=True, choices=[\"train\", \"test\", \"export\"])\nparser.add_argument(\"--output_dir\", required=True, help=\"where to put output files\")\nparser.add_argument(\"--seed\", type=int)\nparser.add_argument(\"--checkpoint\", default=None,\n help=\"directory with checkpoint to resume training from or use for testing\")\n\nparser.add_argument(\"--max_steps\", type=int, help=\"number of training steps (0 to disable)\")\nparser.add_argument(\"--max_epochs\", type=int, help=\"number of training epochs\")\nparser.add_argument(\"--summary_freq\", type=int, default=100, help=\"update summaries every summary_freq steps\")\nparser.add_argument(\"--progress_freq\", type=int, default=50, help=\"display progress every progress_freq steps\")\nparser.add_argument(\"--trace_freq\", type=int, default=0, help=\"trace execution every trace_freq steps\")\nparser.add_argument(\"--display_freq\", type=int, default=0,\n help=\"write current training images every display_frelina1\"\n \"q steps\")\nparser.add_argument(\"--save_freq\", type=int, default=1000, help=\"save model every save_freq steps, 0 to disable\")\n\nparser.add_argument(\"--aspect_ratio\", type=float, default=1.0, help=\"aspect ratio of output images (width/height)\")\nparser.add_argument(\"--lab_colorization\", action=\"store_true\",\n help=\"split input image into brightness (A) and color (B)\")\nparser.add_argument(\"--batch_size\", type=int, default=1, help=\"number of images in batch\")\nparser.add_argument(\"--which_direction\", type=str, default=\"AtoB\", choices=[\"AtoB\", \"BtoA\"])\nparser.add_argument(\"--ngf\", type=int, default=64, help=\"number of generator filters in first conv layer\")\nparser.add_argument(\"--ndf\", type=int, default=64, help=\"number of discriminator filters in first conv layer\")\nparser.add_argument(\"--scale_size\", type=int, default=256, help=\"scale images to this size before cropping to 256x256\")\nparser.add_argument(\"--flip\", dest=\"flip\", action=\"store_true\", help=\"flip images horizontally\")\nparser.add_argument(\"--no_flip\", dest=\"flip\", action=\"store_false\", help=\"don't flip images horizontally\")\nparser.set_defaults(flip=False)\n\nparser.add_argument(\"--lr\", type=float, default=0.0002, help=\"initial learning rate for adam\")\nparser.add_argument(\"--beta1\", type=float, default=0.5, help=\"momentum term of adam\")\nparser.add_argument(\"--l1_weight\", type=float, default=100.0, help=\"weight on L1 term for generator gradient\")\nparser.add_argument(\"--gan_weight\", type=float, default=1.0, help=\"weight on GAN term for generator gradient\")\n\n\n\n\n\n# export options\nparser.add_argument(\"--output_filetype\", default=\"png\", choices=[\"png\", \"jpeg\"])\na = parser.parse_args()\n\nEPS = 1e-12\nCROP_SIZE = 256\n\nExamples = collections.namedtuple(\"Examples\", \"paths_A, paths_B,paths_D ,label_B,a_image,b_image,d_images, count, steps_per_epoch\")\n\nModel = collections.namedtuple(\"Model\", \"g_loss_a2b, g_loss_b2a, g_loss,da_loss,g_loss_water \"\n \" db_loss1,db_loss2,db_loss_fake2,db_loss_real2,discrim_grads_and_vars, gen_grads_and_vars, outputs_a, output_b, train,label, fc8r\")\n\n\ndef preprocess(image):\n with tf.name_scope(\"preprocess\"):\n # [0, 1] => [-1, 1]\n return image * 2 - 1\n\n\ndef deprocess(image):\n with tf.name_scope(\"deprocess\"):\n # [-1, 1] => [0, 1]\n return (image + 1) / 2\n\n\ndef preprocess_lab(lab):\n with tf.name_scope(\"preprocess_lab\"):\n L_chan, a_chan, b_chan = tf.unstack(lab, axis=2)\n # L_chan: black and white with input range [0, 100]\n # a_chan/b_chan: color channels with input range ~[-110, 110], not exact\n # [0, 100] => [-1, 1], ~[-110, 110] => [-1, 1]\n return [L_chan / 50 - 1, a_chan / 110, b_chan / 110]\n\n\ndef deprocess_lab(L_chan, a_chan, b_chan):\n with tf.name_scope(\"deprocess_lab\"):\n # this is axis=3 instead of axis=2 because we process individual images but deprocess batches\n return tf.stack([(L_chan + 1) / 2 * 100, a_chan * 110, b_chan * 110], axis=3)\n\n\ndef augment(image, brightness):\n # (a, b) color channels, combine with L channel and convert to rgb\n a_chan, b_chan = tf.unstack(image, axis=3)\n L_chan = tf.squeeze(brightness, axis=3)\n lab = deprocess_lab(L_chan, a_chan, b_chan)\n rgb = lab_to_rgb(lab)\n return rgb\ndef lrelu1(x, leak=0.2, name=\"lrelu\"):\n return tf.maximum(x, leak*x)\n\ndef lrelu(x, a):\n with tf.name_scope(\"lrelu\"):\n # adding these together creates the leak part and linear part\n # then cancels them out by subtracting/adding an absolute value term\n # leak: a*x/2 - a*abs(x)/2\n # linear: x/2 + abs(x)/2\n\n # this block looks like it has 2 inputs on the graph unless we do this\n x = tf.identity(x)\n return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)\n\ndef linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):\n shape = input_.get_shape().as_list()\n\n with tf.variable_scope(scope or \"Linear\"):\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\n tf.random_normal_initializer(stddev=stddev))\n bias = tf.get_variable(\"bias\", [output_size],initializer=tf.constant_initializer(bias_start))\n if with_w:\n return tf.matmul(input_, matrix) + bias, matrix, bias\n else:\n return tf.matmul(input_, matrix) + bias\n\ndef check_image(image):\n assertion = tf.assert_equal(tf.shape(image)[-1], 3, message=\"image must have 3 color channels\")\n with tf.control_dependencies([assertion]):\n image = tf.identity(image)\n\n if image.get_shape().ndims not in (3, 4):\n raise ValueError(\"image must be either 3 or 4 dimensions\")\n\n # make the last dimension 3 so that you can unstack the colors\n shape = list(image.get_shape())\n shape[-1] = 3\n image.set_shape(shape)\n return image\n\n\ndef load_examples():\n\n #A_paths = glob.glob(os.path.join(a.A_dir, \"*.jpg\"))\n A_paths = glob.glob(os.path.join(a.A_dir, \"*.jpg\"))\n B_paths = glob.glob(os.path.join(a.B_dir, \"*.jpg\"))\n\n D_paths = glob.glob(os.path.join(a.D_dir, '*.jpg'))\n decode_A = tf.image.decode_jpeg\n if len(A_paths) == 0:\n A_paths = glob.glob(os.path.join(a.A_dir, \"*.png\"))\n decode_A = tf.image.decode_png\n\n if len(A_paths) == 0:\n raise Exception(\"input_dir contains no image files\")\n\n\n decode_D = tf.image.decode_jpeg\n if len(D_paths) == 0:\n D_paths = glob.glob(os.path.join(a.D_dir, \"*.png\"))\n decode_D = tf.image.decode_png\n\n if len(D_paths) == 0:\n raise Exception(\"input_dir contains no image files\")\n\n\n\n\n\n decode_B = tf.image.decode_jpeg\n if len(B_paths) == 0:\n B_paths = glob.glob(os.path.join(a.B_dir, \"*.png\"))\n decode_B = tf.image.decode_png\n\n if len(B_paths) == 0:\n raise Exception(\"input_dir contains no image files\")\n\n def get_name(path):\n name, _ = os.path.splitext(os.path.basename(path))\n return name\n\n # if the image names are numbers, sort by the value rather than asciibetically\n # having sorted inputs means that the outputs are sorted in test mode\n if all(get_name(path).isdigit() for path in A_paths) and all(get_name(path_B).isdigit() for path_B in B_paths) and all(get_name(path_D).isdigit() for path_D in D_paths):\n A_paths = sorted(A_paths, key=lambda path: int(get_name(path)))\n B_paths = sorted(B_paths, key=lambda path_B: int(get_name(path_B)))\n D_paths = sorted(D_paths, key=lambda path_D: int(get_name(path_D)))\n else:\n # random.shuffle(A_paths)\n # random.shuffle(B_paths)\n A_paths = sorted(A_paths)\n B_paths = sorted(B_paths)\n D_paths = sorted(D_paths)\n img_name_label_B = np.zeros((len(B_paths), 1))\n img_name_label_D = np.zeros((len(D_paths), 1))\n for i in range(len(B_paths)):\n name = B_paths[i].split('/')\n name_img = name[2]\n # name_img = name[7]\n nn = name_img.split('.')\n img_name = nn[0]\n label = int(img_name[-1])\n img_name_label_B[i][0] = label\n for i in range(len(D_paths)):\n name = D_paths[i].split('/')\n name_img = name[2]\n # name_img = name[7]\n nn = name_img.split('.')\n img_name = nn[0]\n label = int(img_name[-1])\n img_name_label_D[i][0] = label\n\n with open('fen000.csv', \"w\") as foo:\n np.savetxt(foo, img_name_label_B, delimiter=',')\n with open('fend000.csv', \"w\") as foo:\n np.savetxt(foo, img_name_label_D, delimiter=',')\n with tf.name_scope(\"load_images_A\"):\n path_queue = tf.train.string_input_producer(A_paths, shuffle=False)\n # tf.train.string_input_producer\n reader = tf.WholeFileReader()\n paths_A, contents = reader.read(path_queue)\n raw_input = decode_A(contents)\n raw_input = tf.image.convert_image_dtype(raw_input, dtype=tf.float32)\n\n assertion = tf.assert_equal(tf.shape(raw_input)[2], 3, message=\"image does not have 3 channels\")\n with tf.control_dependencies([assertion]):\n raw_input = tf.identity(raw_input)\n raw_input.set_shape([None, None, 3])\n a_images = preprocess(raw_input)\n\n with tf.name_scope(\"load_images_B\"):\n path_queue = tf.train.string_input_producer(B_paths, shuffle=False)\n\n label_queue = tf.train.string_input_producer(['fen000.csv'], shuffle=False)\n reader_label = tf.TextLineReader()\n reader = tf.WholeFileReader()\n key, val = reader_label.read(label_queue)\n record_defaults = [[]]\n img_l = tf.decode_csv(val, record_defaults=record_defaults)\n # str_img=tf.cast(img_l,tf.string)\n label_tensor = tf.cast(img_l, tf.int32)\n img_label_B = tf.one_hot(label_tensor, 7)\n paths_B, contents = reader.read(path_queue)\n raw_input = decode_B(contents)\n raw_input = tf.image.convert_image_dtype(raw_input, dtype=tf.float32)\n assertion = tf.assert_equal(tf.shape(raw_input)[2], 3, message=\"image does not have 3 channels\")\n with tf.control_dependencies([assertion]):\n raw_input = tf.identity(raw_input)\n raw_input.set_shape([None, None, 3])\n b_images = preprocess(raw_input)\n\n with tf.name_scope(\"load_images_D\"):\n path_queue = tf.train.string_input_producer(D_paths, shuffle=False)\n\n label_queue = tf.train.string_input_producer(['fend000.csv'], shuffle=False)\n reader_label = tf.TextLineReader()\n reader = tf.WholeFileReader()\n key, val = reader_label.read(label_queue)\n record_defaults = [[]]\n img_l = tf.decode_csv(val, record_defaults=record_defaults)\n # str_img=tf.cast(img_l,tf.string)\n label_tensor = tf.cast(img_l, tf.int32)\n img_label_D = tf.one_hot(label_tensor, 7)\n paths_D, contents = reader.read(path_queue)\n raw_input = decode_D(contents)\n raw_input = tf.image.convert_image_dtype(raw_input, dtype=tf.float32)\n assertion = tf.assert_equal(tf.shape(raw_input)[2], 3, message=\"image does not have 3 channels\")\n with tf.control_dependencies([assertion]):\n raw_input = tf.identity(raw_input)\n raw_input.set_shape([None, None, 3])\n d_images = preprocess(raw_input)\n\n # input and output images\n seed = random.randint(0, 2 ** 31 - 1)\n\n def transform(image):\n r = image\n if a.flip:\n r = tf.image.random_flip_left_right(r, seed=seed)\n\n # area produces a nice downscaling, but does nearest neighbor for upscaling\n # assume we're going to be doing downscaling here\n r = tf.image.resize_images(r, [a.scale_size, a.scale_size], method=tf.image.ResizeMethod.AREA)\n\n offset = tf.cast(tf.floor(tf.random_uniform([2], 0, a.scale_size - CROP_SIZE + 1, seed=seed)), dtype=tf.int32)\n if a.scale_size > CROP_SIZE:\n r = tf.image.crop_to_bounding_box(r, offset[0], offset[1], CROP_SIZE, CROP_SIZE)\n elif a.scale_size < CROP_SIZE:\n raise Exception(\"scale size cannot be less than crop size\")\n return r\n\n with tf.name_scope(\"A_images\"):\n a_images = transform(a_images)\n\n with tf.name_scope(\"target_images\"):\n b_images = transform(b_images)\n\n with tf.name_scope(\"target_imagesD\"):\n d_images = transform(d_images)\n\n img_label_B = tf.reshape(img_label_B, [1, 7, -1])\n img_label_D = tf.reshape(img_label_D, [1, 7, -1])\n paths_batch_A,paths_batch_B, paths_batch_D,a_images_batch, b_images_batch,d_images_batch,img_label_B_batch ,img_label_D_batch= tf.train.batch(\n [paths_A,paths_B ,paths_D,a_images, b_images,d_images,img_label_B,img_label_D], batch_size=a.batch_size)\n\n steps_per_epoch = int(math.ceil(max(len(B_paths),len(A_paths)) / a.batch_size))\n\n return Examples(\n paths_A=paths_batch_A,#A类图的路径\n paths_B=paths_batch_B,#B类图的路径\n paths_D=paths_batch_D,\n label_B=img_label_B_batch,\n a_image=a_images_batch,\n b_image=b_images_batch,\n d_images=d_images_batch,\n count=max(len(A_paths),len(B_paths)),\n steps_per_epoch=steps_per_epoch,\n\n )\n\n\n#def batch_norm(x, name=\"batch_norm\"):\n# return tf.contrib.layers.batch_norm(x, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, scope=name)\nclass batch_norm(object):\n def __init__(self, epsilon=1e-5, momentum = 0.9, name=\"batch_norm\"):\n with tf.variable_scope(name):\n self.epsilon = epsilon\n self.momentum = momentum\n self.name = name\n\n def __call__(self, x, train=True):\n return tf.contrib.layers.batch_norm(x,\n decay=self.momentum,\n updates_collections=None,\n epsilon=self.epsilon,\n scale=True,\n is_training=train,\n scope=self.name)\n\ndef instance_norm(input, name=\"instance_norm\"):\n with tf.variable_scope(name):\n depth = input.get_shape()[3]\n scale = tf.get_variable(\"scale\", [depth], initializer=tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32))\n offset = tf.get_variable(\"offset\", [depth], initializer=tf.constant_initializer(0.0))\n mean, variance = tf.nn.moments(input, axes=[1, 2], keep_dims=True)\n epsilon = 1e-5\n inv = tf.rsqrt(variance + epsilon)\n normalized = (input - mean) * inv\n return scale * normalized + offset\n\n\ndef conv2d(input_, output_dim, ks=4, s=2, stddev=0.02, padding='SAME', name=\"conv2d\"):\n with tf.variable_scope(name):\n return slim.conv2d(input_, output_dim, ks, s, padding=padding, activation_fn=None,\n weights_initializer=tf.truncated_normal_initializer(stddev=stddev),\n biases_initializer=None)\n\n\ndef deconv2d(input_, output_dim, ks=4, s=2, stddev=0.02, name=\"deconv2d\"):\n with tf.variable_scope(name):\n return slim.conv2d_transpose(input_, output_dim, ks, s, padding='SAME', activation_fn=None,\n weights_initializer=tf.truncated_normal_initializer(stddev=stddev),\n biases_initializer=None)\n\ndef ssim(x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n\n mu_x = slim.avg_pool2d(x, 13, 2, 'VALID')\n mu_y = slim.avg_pool2d(y, 13, 2, 'VALID')\n\n sigma_x = slim.avg_pool2d(x ** 2, 13, 2, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 13, 2, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y, 13, 2, 'VALID') - mu_x * mu_y\n\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n\n SSIM = SSIM_n / SSIM_d\n\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n\ndef L1(a,b):\n little_a = slim.avg_pool2d(a,13, 2, \"VALID\")\n little_b = slim.avg_pool2d(b,13, 2, \"VALID\")\n l1= tf.reduce_mean(tf.abs(little_a-little_b))\n return l1\n\n\ndef generator_water( image, depth,num, auxiliary,reuse=False, name=\"generator_water\"):\n with tf.variable_scope(name):\n # image is 256 x 256 x input_c_dim\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n with tf.variable_scope(\"generator_water\") as scope:\n output_height = 256\n output_width = 256\n batch_size = 1\n\n g_bn0 = batch_norm(name='g_bn0')\n g_bn1 = batch_norm(name='g_bn1')\n g_bn2 = batch_norm(name='g_bn2')\n g_bn3 = batch_norm(name='g_bn3')\n g_bn4 = batch_norm(name='g_bn4')\n\n r2 = np.ones([output_height, output_width], np.float32)\n r4 = np.ones([output_height, output_width], np.float32)\n r6 = np.ones([output_height, output_width], np.float32)\n cx = output_width / 2\n cy = output_height / 2\n for i in range(0, output_height):\n for j in range(0, output_width):\n r = np.sqrt((i - cy) * (i - cy) + (j - cx) * (j - cx)) / (np.sqrt(cy * cy + cx * cx))\n r2[i, j] = r * r\n r4[i, j] = r * r * r * r\n r6[i, j] = r * r * r * r * r * r\n # water-based attenuation and backscatter\n\n with tf.variable_scope(\"fc\"):\n\n fc = tf.get_variable(\"full-connect\", shape=[1, 1, 256 * 256 * 3, num], dtype=tf.float32,\n initializer=tf.ones_initializer())\n auxiliary = tf.matmul(fc, auxiliary)\n r = tf.reshape(auxiliary, [1, 256, 256, 3])\n image += r\n with tf.variable_scope(\"g_atten\"):\n #第一阶段公式,注意初始化的参数和之后将其进行打包\n init_r = tf.random_normal([1,1,1],mean=0.35,stddev=0.01,dtype=tf.float32)\n eta_r = tf.get_variable(\"g_eta_r\",initializer=init_r)\n init_b = tf.random_normal([1,1,1],mean=0.015,stddev=0.01,dtype=tf.float32)\n eta_b = tf.get_variable(\"g_eta_b\",initializer=init_b)\n init_g = tf.random_normal([1,1,1],mean=0.036,stddev=0.01,dtype=tf.float32)\n eta_g = tf.get_variable(\"g_eta_g\",initializer=init_g)\n eta = tf.stack([eta_r,eta_g,eta_b],axis=3)\n eta_d = tf.exp(tf.multiply(-1.0,tf.multiply(depth,eta)))#depth就是rc\n\n h0 = tf.multiply(image,eta_d)#这里传进去的image是Iair,I air e −η(λ\n #返回的参数是G1\n # z =\n # # backscattering\n # z_,h0z_w, h0z_b = linear(z,output_width*output_height*batch_size*1, 'g_h0_lin', with_w=True)\n # #把参数也进行传回了\n # h0z = tf.reshape(z_, [-1, output_height, output_width, 1])\n # h0z = tf.nn.relu(g_bn0(h0z))\n # h0z = tf.multiply(h0z,depth)\n # print(h0z.get_shape)\n\n #copy 3份,分别作为rgb的输入\n with tf.variable_scope('g_h1_conv'):\n w = tf.get_variable('g_w',[ 5,5, h0.get_shape()[-1], 1],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n h1z = tf.nn.conv2d(h0, w, strides=[1, 1,1, 1], padding='SAME')\n h_g = lrelu1(g_bn1(h1z))\n\n with tf.variable_scope('g_h1_convr'):\n wr = tf.get_variable('g_wr',[ 5,5, h0.get_shape()[-1], 1],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n h1zr = tf.nn.conv2d(h0, wr, strides=[1, 1, 1, 1], padding='SAME')\n h_r = lrelu1(g_bn3(h1zr))\n\n with tf.variable_scope('g_h1_convb'):\n wb = tf.get_variable('g_wb',[ 5,5, h0.get_shape()[-1], 1],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n h1zb = tf.nn.conv2d(h0, wb, strides=[1, 1, 1, 1], padding='SAME')\n h_b = lrelu1(g_bn4(h1zb))#进行batch norm的操作\n #去掉最后一层维度为1\n h_r = tf.squeeze(h_r,axis=3)\n h_g = tf.squeeze(h_g,axis=3)\n h_b = tf.squeeze(h_b,axis=3)\n\n h_final=tf.stack([h_r,h_g,h_b],axis=3)\n #将三个参数进行连接,然后就变成了一个RGB的最终生成图\n\n h2 = tf.add(h_final,h0)#拿第一阶段的输出与第二阶段的输出直接矩阵相加\n\n # camera model\n with tf.variable_scope(\"g_vig\"):\n A = tf.get_variable('g_amp', [1],\n initializer=tf.truncated_normal_initializer(mean=0.9,stddev=0.01))\n C1 = tf.get_variable('g_c1', [1],\n initializer=tf.truncated_normal_initializer(mean=1.0,stddev=0.001))\n C2 = tf.get_variable('g_c2', [1],\n initializer=tf.truncated_normal_initializer(mean=1.0,stddev=0.001))\n C3 = tf.get_variable('g_c3', [1],\n initializer=tf.truncated_normal_initializer(mean=1.0,stddev=0.001))\n #三通道的R2,R4,R6和一个参数进行相除\n h11 = tf.multiply(r2,C1)\n h22 = tf.multiply(r4,C2)\n h33 = tf.multiply(r6,C3)\n h44 = tf.ones([output_height,output_width],tf.float32)\n h1 = tf.add(tf.add(h44,h11),tf.add(h22,h33))#将四个通道的值进行相加,然后直接相除\n V = tf.expand_dims(h1,axis=2)\n h1a = tf.divide(h2,V)\n h_out = tf.multiply(h1a,A)\n return h_out, eta_r,eta_g,eta_b, C1,C2,C3,A\n\n\ndef generator_resnet(image, num, auxiliary, reuse=False, name=\"generator\"):\n with tf.variable_scope(name):\n # image is 256 x 256 x input_c_dim\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n\n def residule_block(x, dim, ks=3, s=1, name='res'):\n p = int((ks - 1) / 2)\n y = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], \"REFLECT\")\n y = instance_norm(conv2d(y, dim, ks, s, padding='VALID', name=name + '_c1'), name + '_bn1')\n y = tf.pad(tf.nn.relu(y), [[0, 0], [p, p], [p, p], [0, 0]], \"REFLECT\")\n y = instance_norm(conv2d(y, dim, ks, s, padding='VALID', name=name + '_c2'), name + '_bn2')\n return y + x\n # Justin Johnson's model from https://github.com/jcjohnson/fast-neural-style/\n # The network with 9 blocks consists of: c7s1-32, d64, d128, R128, R128, R128,\n # R128, R128, R128, R128, R128, R128, u64, u32, c7s1-3\n with tf.variable_scope(\"fc\"):\n # fc = tf.get_variable(\"full-connect1\", shape=[1, 1, 256*256*3, num], dtype=tf.float32,\n # initializer=tf.random_normal_initializer(1.0, 0.02))\n fc = tf.get_variable(\"full-connect\", shape=[1, 1, 256 * 256 * 3, num], dtype=tf.float32,\n initializer=tf.ones_initializer())\n # bias = tf.get_variable(\"bias\", shape=[1, 1,1, num], dtype=tf.float32,\n # initializer=tf.random_normal_initializer(1.0, 0.02))\n # auxiliary [1,1,num,1]\n auxiliary = tf.matmul(fc, auxiliary)\n r = tf.reshape(auxiliary, [1, 256, 256, 3])\n image += r\n c0 = tf.pad(image, [[0, 0], [3, 3], [3, 3], [0, 0]], \"REFLECT\")\n c1 = tf.nn.relu(instance_norm(conv2d(c0, a.ngf, 7, 1, padding='VALID', name='g_e1_c'), 'g_e1_bn'))\n c2 = tf.nn.relu(instance_norm(conv2d(c1, a.ngf * 2, 3, 2, name='g_e2_c'), 'g_e2_bn'))\n c3 = tf.nn.relu(instance_norm(conv2d(c2, a.ngf * 4, 3, 2, name='g_e3_c'), 'g_e3_bn'))\n # define G network with 9 resnet blocks\n r1 = residule_block(c3, a.ngf * 4, name='g_r1')\n r2 = residule_block(r1, a.ngf * 4, name='g_r2')\n r3 = residule_block(r2, a.ngf * 4, name='g_r3')\n r4 = residule_block(r3, a.ngf * 4, name='g_r4')\n r5 = residule_block(r4, a.ngf * 4, name='g_r5')\n r6 = residule_block(r5, a.ngf * 4, name='g_r6')\n r7 = residule_block(r6, a.ngf * 4, name='g_r7')\n r8 = residule_block(r7, a.ngf * 4, name='g_r8')\n r9 = residule_block(r8, a.ngf * 4, name='g_r9')\n\n #with tf.variable_scope(\"fc\"):\n # fc = tf.get_variable(\"full-connect\", shape=[1, 1, a.ngf * a.ngf * a.ngf * 4, num], dtype=tf.float32,\n # initializer=tf.random_normal_initializer(1.0, 0.02))\n #fc = tf.get_variable(\"full-connect\", shape=[1, 1, a.ngf * a.ngf * a.ngf * 4, num], dtype=tf.float32,\n # initializer=tf.ones_initializer())\n # bias = tf.get_variable(\"bias\", shape=[1, 1,1, num], dtype=tf.float32,\n # initializer=tf.random_normal_initializer(1.0, 0.02))\n # auxiliary [1,1,num,1]\n #auxiliary = tf.matmul(fc, auxiliary)\n #r = tf.reshape(auxiliary, [1, 64, 64, 64 * 4])\n #r9 += r\n d1 = deconv2d(r9, a.ngf * 2, 3, 2, name='g_d1_dc')\n\n d1 = tf.nn.relu(instance_norm(d1, 'g_d1_bn'))\n d2 = deconv2d(d1, a.ngf, 3, 2, name='g_d2_dc')\n d2 = tf.nn.relu(instance_norm(d2, 'g_d2_bn'))\n d2 = tf.pad(d2, [[0, 0], [3, 3], [3, 3], [0, 0]], \"REFLECT\")\n pred = tf.nn.tanh(conv2d(d2, 3, 7, 1, padding='VALID', name='g_pred_c'))\n\n return pred\n\ndef create_model(a_images, b_images,label_B,d_images):\n def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name,\n padding='SAME', groups=1):\n \"\"\"Create a convolution layer.\n\n Adapted from: https://github.com/ethereon/caffe-tensorflow\n \"\"\"\n # Get number of input channels\n input_channels = int(x.get_shape()[-1])\n\n # Create lambda function for the convolution\n convolve = lambda i, k: tf.nn.conv2d(i, k,\n strides=[1, stride_y, stride_x, 1],\n padding=padding)\n\n with tf.variable_scope(name) as scope:\n # Create tf variables for the weights and biases of the conv layer\n weights = tf.get_variable('weights', shape=[filter_height,\n filter_width,\n input_channels / groups,\n num_filters])\n biases = tf.get_variable('biases', shape=[num_filters])\n\n if groups == 1:\n conv = convolve(x, weights)\n\n # In the cases of multiple groups, split inputs & weights and\n else:\n # Split input and weights and convolve them separately\n input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)\n weight_groups = tf.split(axis=3, num_or_size_splits=groups,\n value=weights)\n output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)]\n\n # Concat the convolved output together again\n conv = tf.concat(axis=3, values=output_groups)\n\n # Add biases\n bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv))\n\n # Apply relu function\n relu = tf.nn.relu(bias, name=scope.name)\n\n return relu\n def fc(x, num_in, num_out, name, relu=True):\n \"\"\"Create a fully connected layer.\"\"\"\n with tf.variable_scope(name) as scope:\n\n # Create tf variables for the weights and biases\n weights = tf.get_variable('weights', shape=[num_in, num_out],\n trainable=True)\n biases = tf.get_variable('biases', [num_out], trainable=True)\n\n # Matrix multiply weights and inputs and add bias\n act = tf.nn.xw_plus_b(x, weights, biases, name=scope.name)\n\n if relu:\n # Apply ReLu non linearity\n relu = tf.nn.relu(act)\n return relu\n else:\n return act\n\n def max_pool(x, filter_height, filter_width, stride_y, stride_x, name,\n padding='SAME'):\n \"\"\"Create a max pooling layer.\"\"\"\n return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1],\n strides=[1, stride_y, stride_x, 1],\n padding=padding, name=name)\n\n def lrn(x, radius, alpha, beta, name, bias=1.0):\n \"\"\"Create a local response normalization layer.\"\"\"\n return tf.nn.local_response_normalization(x, depth_radius=radius,\n alpha=alpha, beta=beta,\n bias=bias, name=name)\n\n def dropout(x, keep_prob):\n \"\"\"Create a dropout layer.\"\"\"\n return tf.nn.dropout(x, keep_prob)\n\n def discriminator1(image,num,label,reuse=False,name=\"discriminator1\"):\n with tf.variable_scope(name):\n # image is 256 x 256 x input_c_dim\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n\n\n with tf.variable_scope(\"fc\"):\n # fc = tf.get_variable(\"full-connect1\", shape=[1, 1, 256*256*3, num], dtype=tf.float32,\n # initializer=tf.random_normal_initializer(1.0, 0.02))\n fc = tf.get_variable(\"full-connect1\", shape=[1, 1, 256*256*3, num], dtype=tf.float32,\n initializer=tf.ones_initializer())\n # bias = tf.get_variable(\"bias\", shape=[1, 1,1, num], dtype=tf.float32,\n # initializer=tf.random_normal_initializer(1.0, 0.02))\n # auxiliary [1,1,num,1]\n auxiliary = tf.matmul(fc, label)\n r = tf.reshape(auxiliary, [1, 256, 256, 3])\n image+=r\n h0 = lrelu(conv2d(image, 64, name='d_h0_conv'),0.2)\n # h0 is (128 x 128 x self.df_dim)\n h1 = lrelu(instance_norm(conv2d(h0, 64 * 2, name='d_h1_conv'), 'd_bn1'),0.2)\n # h1 is (64 x 64 x self.df_dim*2)\n h2 = lrelu(instance_norm(conv2d(h1, 64 * 4, name='d_h2_conv'), 'd_bn2'),0.2)\n # h2 is (32x 32 x self.df_dim*4)\n h3 = lrelu(instance_norm(conv2d(h2, 64 * 8, s=1, name='d_h3_conv'), 'd_bn3'),0.2)\n # h3 is (32 x 32 x self.df_dim*8)\n h4 = conv2d(h3, 1, s=1, name='d_h3_pred')\n # h4 is (32 x 32 x 1)\n\n h4sg=tf.sigmoid(h4)\n\n return h4sg\n def discriminator2(image,num,label,reuse=False,name=\"discriminator2\"):\n with tf.variable_scope(name):\n # image is 256 x 256 x input_c_dim\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n\n \"\"\"Create the network graph.\"\"\"\n # 1st Layer: Conv (w ReLu) -> Lrn -> Pool\n image = tf.image.resize_images(image, [227, 227])\n conv1 = conv(image, 11, 11, 96, 4, 4, padding='VALID', name='conv1')\n norm1 = lrn(conv1, 2, 2e-05, 0.75, name='norm1')\n pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')\n\n # 2nd Layer: Conv (w ReLu) -> Lrn -> Pool with 2 groups\n conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2')\n norm2 = lrn(conv2, 2, 2e-05, 0.75, name='norm2')\n pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')\n\n # 3rd Layer: Conv (w ReLu)\n conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3')\n\n # 4th Layer: Conv (w ReLu) splitted into two groups\n conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4')\n\n # 5th Layer: Conv (w ReLu) -> Pool splitted into two groups\n conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5')\n pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5')\n\n # 6th Layer: Flatten -> FC (w ReLu) -> Dropout\n flattened = tf.reshape(pool5, [-1, 6 * 6 * 256])\n fc6 = fc(flattened, 6 * 6 * 256, 4096, name='fc6')\n dropout6 = dropout(fc6, 0.5)\n\n # 7th Layer: FC (w ReLu) -> Dropout\n fc7 = fc(dropout6, 4096, 4096, name='fc7')\n dropout7 = dropout(fc7, 0.5)\n\n # 8th Layer: FC and return unscaled activations\n fc8 = fc(dropout7, 4096, num, relu=False, name='fc8')\n fc8r = tf.reshape(fc8,[1,1,num,1])\n # sess = tf.Session()\n # print(sess.run(fc8r))\n # print(sess.run(label))\n with tf.name_scope(\"cross_ent\"):\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label,logits=fc8r))\n\n return loss, label, fc8r\n\n with tf.variable_scope(\"generatorA2B\"):\n #alpha=tf.Constant([1,256,256,3],0.8)\n alpha = tf.Variable(tf.constant(0.9,dtype=tf.float32,shape=[1,256,256,3]))\n tmp1=generator_resnet(a_images, 7, label_B, False, \"generator_resnet_A2B\")\n tmp2,eta_r_real,eta_g_real,eta_b_real,C1_real,C2_real,C3_real,A_real=generator_water(a_images,d_images,7,label_B,False,\"generator_water_A2B\")\n fake_b = tf.multiply(tmp1,alpha)+tf.multiply(tmp2,1-alpha)\n\n with tf.variable_scope(\"generatorB2A\"):\n recover_a=generator_resnet(fake_b,7,label_B,False,\"generator_resnet_B2A\")\n\n\n with tf.variable_scope(\"generatorB2A\",reuse=True):\n fake_a=generator_resnet(b_images,7,label_B,True,\"generator_resnet_B2A\")\n\n with tf.variable_scope(\"generatorA2B\",reuse=True):\n recover_b=generator_resnet(fake_a,7,label_B,True,\"generator_resnet_A2B\")\n\n\n DB_fake=discriminator1(fake_b,num=7,label=label_B,reuse=False,name=\"discriminatorB1\")\n DA_fake=discriminator1(fake_a,num=7,label=label_B,reuse=False,name=\"discriminatorA1\")\n DB_real=discriminator1(b_images,num=7,label=label_B,reuse=True,name=\"discriminatorB1\")\n DA_real=discriminator1(a_images,num=7,label=label_B,reuse=True,name=\"discriminatorA1\")\n\n DBS_fake, label1, fc8rs= discriminator2(fake_b, 7, label_B, reuse=False, name=\"discriminatorB2\")\n #DAS_fake= discriminator1(fake_a, 7, label_B, reuse=False, name=\"discriminatorA1\")\n DBS_real,label2, fc8rs1= discriminator2(b_images, 7, label_B, reuse=True, name=\"discriminatorB2\")\n #DAS_real= discriminator1(a_images, 7, label_B, reuse=True, name=\"discriminatorA1\")\n\n with tf.name_scope(\"generator_loss\"):\n g_loss_a2b=tf.reduce_mean((DB_fake-tf.ones_like(DB_fake))**2)+tf.reduce_mean((DBS_fake-tf.zeros_like(DBS_fake,dtype=tf.float32))**2)*1.0+tf.reduce_mean(tf.abs(a_images-recover_a))*10+tf.reduce_mean(tf.abs(b_images - recover_b)) * 10+L1(fake_b,b_images)*2.0+tf.reduce_mean(ssim(a_images,fake_b))*1.0\n g_loss_b2a = tf.reduce_mean((DA_fake-tf.ones_like(DA_fake))** 2)+tf.reduce_mean(tf.abs(b_images - recover_b)) * 10+tf.reduce_mean(tf.abs(a_images-recover_a))*10+L1(fake_a,a_images)*2.0+tf.reduce_mean(ssim(b_images,fake_a))*1.0\n g_loss=tf.reduce_mean((DB_fake-tf.ones_like(DB_fake) )** 2)+tf.reduce_mean((DA_fake-tf.ones_like(DA_fake) )** 2)+tf.reduce_mean((DBS_fake-tf.zeros_like(DBS_fake,dtype=tf.float32))**2) + tf.reduce_mean(tf.abs(a_images - recover_a)) * 10 +\\\n tf.reduce_mean(tf.abs(b_images - recover_b)) * 10+L1(fake_a,a_images)*2.0+L1(fake_b,b_images)*2.0+tf.reduce_mean(ssim(a_images,fake_b))*1.0+tf.reduce_mean(ssim(b_images,fake_a))*1.0\n # g_loss_a2b=tf.reduce_mean((DB_fake-tf.ones_like(DB_fake))**2)+tf.reduce_mean((DBS_fake-tf.zeros_like(DBS_fake,dtype=tf.float32))**2)+tf.reduce_mean(tf.abs(a_images-recover_a))*10.0+tf.reduce_mean(tf.abs(b_images - recover_b)) * 10.0+tf.reduce_mean(tf.abs(fake_b-b_images))*0.5+tf.reduce_mean(ssim(a_images,fake_b))*0.5\n # g_loss_b2a = tf.reduce_mean((DA_fake-tf.ones_like(DA_fake))** 2)+ tf.reduce_mean((DAS_fake-tf.zeros_like(DAS_fake,dtype=tf.float32))**2)+tf.reduce_mean(tf.abs(b_images - recover_b)) * 10.0+tf.reduce_mean(tf.abs(a_images-recover_a))*10.0+tf.reduce_mean(tf.abs(fake_a-a_images))*0.5+tf.reduce_mean(ssim(b_images,fake_a))*0.5\n # g_loss=tf.reduce_mean((DB_fake-tf.ones_like(DB_fake) )** 2)+tf.reduce_mean((DA_fake-tf.ones_like(DA_fake) )** 2)+tf.reduce_mean((DBS_fake-tf.zeros_like(DBS_fake,dtype=tf.float32))**2) +tf.reduce_mean((DAS_fake-tf.zeros_like(DAS_fake,dtype=tf.float32))**2)+ tf.reduce_mean(tf.abs(a_images - recover_a)) * 10.0 +\\\n # tf.reduce_mean(tf.abs(b_images - recover_b)) * 10.0+tf.reduce_mean(tf.abs(fake_b-b_images))*0.5+tf.reduce_mean(tf.abs(fake_a-a_images))*0.5+tf.reduce_mean(ssim(a_images,fake_b))*0.5+tf.reduce_mean(ssim(b_images,fake_a))*0.5\n c1_loss=-tf.minimum(tf.reduce_min(C1_real),0)*10000\n c2_loss=-tf.minimum(tf.reduce_min(-1*(4*C2_real*C2_real-12*C1_real*C3_real)),0)*10000\n eta_r_loss = -tf.minimum(tf.reduce_min(eta_r_real), 0) * 10000\n eta_g_loss = -tf.minimum(tf.reduce_min(eta_g_real), 0) * 10000\n eta_b_loss = -tf.minimum(tf.reduce_min(eta_b_real), 0) * 10000\n A_loss = -tf.minimum(tf.reduce_min(A_real),0)*10000\n g_loss_water = c1_loss + c2_loss + tf.reduce_mean((DB_fake-tf.ones_like(DB_fake))**2)+ eta_r_loss + eta_g_loss +eta_b_loss + A_loss\n\n with tf.name_scope(\"discriminator_loss\"):\n\n db_loss_real1=tf.reduce_mean((DB_real-tf.ones_like(DB_real))**2)\n db_loss_fake1 = tf.reduce_mean((DB_fake - tf.zeros_like(DB_fake)) ** 2)\n db_loss1=(db_loss_real1 + db_loss_fake1)/2.0\n\n db_loss_real2=tf.reduce_mean((DBS_real-tf.zeros_like(DBS_real,dtype=tf.float32))**2)\n db_loss_fake2 = tf.reduce_mean((DBS_fake-tf.zeros_like(DBS_fake,dtype=tf.float32))**2)\n\n db_loss2 = (db_loss_real2 + db_loss_fake2) / 2.0\n\n db_loss=db_loss1+db_loss2\n\n\n da_loss_real1 = tf.reduce_mean((DA_real - tf.ones_like(DA_real)) ** 2)\n da_loss_fake1 = tf.reduce_mean((DA_fake - tf.zeros_like(DA_fake)) ** 2)\n da_loss = (da_loss_real1 + da_loss_fake1) / 2.0\n\n\n d_loss=da_loss+db_loss\n\n with tf.name_scope(\"A2B_discriminator_train\"):\n # discrim_tvars = [var for var in tf.trainable_variables() if var.name.startswith(\"discriminator\")]\n discrim_tvars_B1 = [var for var in tf.trainable_variables() if 'discriminatorB1' in var.name]\n discrim_optim_B1 = tf.train.AdamOptimizer(a.lr, a.beta1)\n discrim_grads_and_vars_B1 = discrim_optim_B1.compute_gradients(db_loss1, var_list=discrim_tvars_B1)\n discrim_train_B1 = discrim_optim_B1.apply_gradients(discrim_grads_and_vars_B1)\n with tf.name_scope(\"A2B_discriminator_train\"):\n discrim_tvars_B2 = [var for var in tf.trainable_variables() if 'discriminatorB2' in var.name]\n discrim_optim_B2 = tf.train.AdamOptimizer(a.lr, a.beta1)\n discrim_grads_and_vars_B2 = discrim_optim_B2.compute_gradients(db_loss2, var_list=discrim_tvars_B2)\n discrim_train_B2 = discrim_optim_B2.apply_gradients(discrim_grads_and_vars_B2)\n\n\n with tf.name_scope(\"B2A_discriminator_train\"):\n # discrim_tvars = [var for var in tf.trainable_variables() if var.name.startswith(\"discriminator\")]\n discrim_tvars_A = [var for var in tf.trainable_variables() if 'discriminatorA1' in var.name]\n discrim_optim_A = tf.train.AdamOptimizer(a.lr, a.beta1)\n discrim_grads_and_vars_A = discrim_optim_A.compute_gradients(da_loss, var_list=discrim_tvars_A)\n discrim_train_A = discrim_optim_A.apply_gradients(discrim_grads_and_vars_A)\n\n with tf.name_scope(\"A2B_generator_train\"):\n with tf.control_dependencies([discrim_train_B1,discrim_train_B2]):\n gen_tvars_B = [var for var in tf.trainable_variables() if 'generator_resnet_A2B' in var.name]\n gen_optim_B = tf.train.AdamOptimizer(a.lr, a.beta1)\n gen_grads_and_vars_B = gen_optim_B.compute_gradients(g_loss_a2b, var_list=gen_tvars_B)\n gen_train_B = gen_optim_B.apply_gradients(gen_grads_and_vars_B)\n\n\n\n with tf.name_scope(\"B2A_generator_train\"):\n with tf.control_dependencies([discrim_train_A]):\n gen_tvars_A = [var for var in tf.trainable_variables() if 'generator_resnet_B2A' in var.name]\n gen_optim_A = tf.train.AdamOptimizer(a.lr, a.beta1)\n gen_grads_and_vars_A = gen_optim_A.compute_gradients(g_loss_b2a, var_list=gen_tvars_A)\n gen_train_A = gen_optim_A.apply_gradients(gen_grads_and_vars_A)\n\n\n\n\n with tf.name_scope(\"A2B_generator_water_train\"):\n\n gen_tvars_water = [var for var in tf.trainable_variables() if 'generator_water_A2B' in var.name]\n gen_optim_water = tf.train.AdamOptimizer(a.lr, a.beta1)\n gen_grads_and_vars_water = gen_optim_water.compute_gradients(g_loss_water, var_list=gen_tvars_water)\n gen_train_water = gen_optim_water.apply_gradients(gen_grads_and_vars_water)\n\n\n ema = tf.train.ExponentialMovingAverage(decay=0.99)\n update_losses = ema.apply(\n [g_loss_a2b, g_loss_b2a, g_loss, da_loss, db_loss1,db_loss2, g_loss_water])\n\n global_step = tf.contrib.framework.get_or_create_global_step()\n incr_global_step = tf.assign(global_step, global_step + 1)\n\n return Model(\n g_loss_a2b=ema.average(g_loss_a2b),\n g_loss_b2a=ema.average(g_loss_b2a),\n g_loss=ema.average(g_loss),\n da_loss=ema.average(da_loss),\n\n db_loss1=ema.average(db_loss1),\n db_loss2=ema.average(db_loss2),\n db_loss_real2=ema.average(db_loss_real2),\n db_loss_fake2=ema.average(db_loss_fake2),\n g_loss_water=ema.average(g_loss_water),\n discrim_grads_and_vars=discrim_grads_and_vars_B1,\n gen_grads_and_vars=gen_grads_and_vars_B,\n outputs_a=fake_a,\n output_b=fake_b,\n train=tf.group(update_losses, incr_global_step, gen_train_B,gen_train_A,gen_train_water),\n label=label1,\n fc8r=fc8rs,\n )\n\ndef save_images(fetches, step=None):\n image_dir = os.path.join(a.output_dir, \"images\")\n if not os.path.exists(image_dir):\n os.makedirs(image_dir)\n\n filesets = []\n for i, in_path in enumerate(fetches[\"paths\"]):\n name, _ = os.path.splitext(os.path.basename(in_path.decode(\"utf8\")))\n fileset = {\"name\": name, \"step\": step}\n for kind in [\"inputs\", \"outputs\", \"targets\"]:\n filename = name + \"-\" + kind + \".png\"\n if step is not None:\n filename = \"%08d-%s\" % (step, filename)\n fileset[kind] = filename\n out_path = os.path.join(image_dir, filename)\n contents = fetches[kind][i]\n with open(out_path, \"wb\") as f:\n f.write(contents)\n filesets.append(fileset)\n return filesets\n\ndef append_index_test_B(filesets,step=False):\n\n index_path = os.path.join(a.output_dir, \"indexB.html\")\n if os.path.exists(index_path):\n index = open(index_path, \"a\")\n else:\n index = open(index_path, \"w\")\n index.write(\"\")\n if step:\n index.write(\"\")\n index.write(\"\")\n\n for fileset in filesets:\n index.write(\"\")\n\n if step:\n index.write(\"\" % fileset[\"step\"])\n index.write(\"\" % fileset[\"name\"])\n\n for kind in [\"a_image\", \"outputs_b\", \"b_image\"]:\n index.write(\"\" % fileset[kind])\n\n index.write(\"\")\n return index_path\n\ndef append_index_test_A(filesets,step=False):\n\n\n index_path = os.path.join(a.output_dir, \"indexA.html\")\n if os.path.exists(index_path):\n index = open(index_path, \"a\")\n else:\n index = open(index_path, \"w\")\n index.write(\"
stepnameinputoutputtarget
%d%s
\")\n if step:\n index.write(\"\")\n index.write(\"\")\n\n for fileset in filesets:\n index.write(\"\")\n\n if step:\n index.write(\"\" % fileset[\"step\"])\n index.write(\"\" % fileset[\"name\"])\n\n for kind in [\"b_image\", \"outputs_a\", \"a_image\"]:\n index.write(\"\" % fileset[kind])\n\n index.write(\"\")\n return index_path\n\ndef append_index(filesets, step=False):\n index_path = os.path.join(a.output_dir, \"index.html\")\n if os.path.exists(index_path):\n index = open(index_path, \"a\")\n else:\n index = open(index_path, \"w\")\n index.write(\"
stepnameinputoutputtarget
%d%s
\")\n if step:\n index.write(\"\")\n index.write(\"\")\n\n for fileset in filesets:\n index.write(\"\")\n\n if step:\n index.write(\"\" % fileset[\"step\"])\n index.write(\"\" % fileset[\"name\"])\n\n for kind in [\"inputs\", \"outputs\", \"targets\"]:\n index.write(\"\" % fileset[kind])\n\n index.write(\"\")\n return index_path\n\ndef save_generated_A_test(fetches,step=None):\n image_dir = os.path.join(a.output_dir,\"A\")\n if not os.path.exists(image_dir):\n os.makedirs(image_dir) # \\D0½\\A8\\C9\\FA\\B3\\C9\\D5\\D5Ƭ·\\BE\\B6\n\n filesets = []\n for i, in_path in enumerate(fetches[\"paths_B\"]):\n name, _ = os.path.splitext(os.path.basename(in_path.decode(\"utf8\")))\n fileset = {\"name\": name, \"step\": step}\n for kind in [\"b_image\",\"outputs_a\",\"a_image\"]:\n # filename = name + \".png\"\n filename = name + \"-\" + kind + \".png\"\n if step is not None:\n filename = \"%08d-%s\" % (step, filename)\n fileset[kind] = filename\n out_path = os.path.join(image_dir, filename)\n contents = fetches[kind][i]\n with open(out_path, \"wb\") as f:\n f.write(contents)\n filesets.append(fileset)\n return filesets\n\ndef save_generated_B_test(fetches,step=None):\n image_dir = os.path.join(a.output_dir, \"B\")\n if not os.path.exists(image_dir):\n os.makedirs(image_dir) # \\D0½\\A8\\C9\\FA\\B3\\C9\\D5\\D5Ƭ·\\BE\\B6\n\n filesets = []\n index_list=[]\n for i , in_path in enumerate(fetches[\"paths_B\"]):\n name, _ = os.path.splitext(os.path.basename(in_path.decode(\"utf8\")))\n name_list=name.split('.')\n name0=name_list[0]\n index=name0[-1]\n index_list.append(index)\n for i, in_path in enumerate(fetches[\"paths_A\"]):\n name, _ = os.path.splitext(os.path.basename(in_path.decode(\"utf8\")))\n fileset = {\"name\": name, \"step\": step}\n for kind in [\"a_image\",\"outputs_b\",\"b_image\"]:\n filename = name +index_list[i]+ \"-\" + kind + \".png\"\n # filename = name +index_list[i]+ \".png\"\n if step is not None:\n filename = \"%08d-%s\" % (step, filename)\n fileset[kind] = filename\n out_path = os.path.join(image_dir, filename)\n contents = fetches[kind][i]\n with open(out_path, \"wb\") as f:\n f.write(contents)\n filesets.append(fileset)\n return filesets\n\n\ndef save_generated_A(fetches, epoch, step=None):\n image_dir = os.path.join(a.output_dir, \"generated_A\" + str(epoch))\n if not os.path.exists(image_dir):\n os.makedirs(image_dir) # \\D0½\\A8\\C9\\FA\\B3\\C9\\D5\\D5Ƭ·\\BE\\B6\n\n filesets = []\n for i, in_path in enumerate(fetches[\"paths_B\"]):\n name, _ = os.path.splitext(os.path.basename(in_path.decode(\"utf8\")))\n fileset = {\"name\": name, \"step\": step}\n for kind in [\"outputs_a\"]:\n filename = name + \".png\"\n if step is not None:\n filename = \"%08d-%s\" % (step, filename)\n fileset[kind] = filename\n out_path = os.path.join(image_dir, filename)\n contents = fetches[kind][i]\n with open(out_path, \"wb\") as f:\n f.write(contents)\n filesets.append(fileset)\n return filesets\n\ndef save_generated_B(fetches, epoch, step=None):\n image_dir = os.path.join(a.output_dir, \"generated_B\" + str(epoch))\n if not os.path.exists(image_dir):\n os.makedirs(image_dir) # \\D0½\\A8\\C9\\FA\\B3\\C9\\D5\\D5Ƭ·\\BE\\B6\n\n filesets = []\n index_list=[]\n for i , in_path in enumerate(fetches[\"paths_B\"]):\n name, _ = os.path.splitext(os.path.basename(in_path.decode(\"utf8\")))\n name_list=name.split('.')\n name0=name_list[0]\n index=name0[-1]\n index_list.append(index)\n for i, in_path in enumerate(fetches[\"paths_A\"]):\n name, _ = os.path.splitext(os.path.basename(in_path.decode(\"utf8\")))\n fileset = {\"name\": name, \"step\": step}\n for kind in [\"outputs_b\"]:\n filename = name +index_list[i]+ \".png\"\n if step is not None:\n filename = \"%08d-%s\" % (step, filename)\n fileset[kind] = filename\n out_path = os.path.join(image_dir, filename)\n contents = fetches[kind][i]\n with open(out_path, \"wb\") as f:\n f.write(contents)\n filesets.append(fileset)\n return filesets\n\ndef main():\n if tf.__version__.split('.')[0] != \"1\":\n raise Exception(\"Tensorflow version 1 required\")\n\n if a.seed is None:\n a.seed = random.randint(0, 2 ** 31 - 1)\n\n tf.set_random_seed(a.seed)\n np.random.seed(a.seed)\n random.seed(a.seed)\n\n if not os.path.exists(a.output_dir):\n os.makedirs(a.output_dir)\n\n if a.mode == \"test\" or a.mode == \"export\":\n if a.checkpoint is None:\n raise Exception(\"checkpoint required for test mode\")\n\n # load some options from the checkpoint\n options = {\"which_direction\", \"ngf\", \"ndf\", \"lab_colorization\"}\n with open(os.path.join(a.checkpoint, \"options.json\")) as f:\n for key, val in json.loads(f.read()).items():\n if key in options:\n print(\"loaded\", key, \"=\", val)\n setattr(a, key, val)\n # disable these features in test mode\n a.scale_size = CROP_SIZE\n a.flip = False\n\n for k, v in a._get_kwargs():\n print(k, \"=\", v)\n\n with open(os.path.join(a.output_dir, \"options.json\"), \"w\") as f:\n f.write(json.dumps(vars(a), sort_keys=True, indent=4))\n\n\n examples = load_examples()\n print(\"examples count = %d\" % examples.count)\n\n # inputs and targets are [batch_size, height, width, channels]\n model = create_model(examples.a_image, examples.b_image,examples.label_B,examples.d_images)\n labelp = model.label\n fc8rp = model.fc8r\n # undo colorization splitting on images that we use for display/output\n\n a_image = deprocess(examples.a_image)\n b_image = deprocess(examples.b_image)\n outputs_a = deprocess(model.outputs_a)\n output_b = deprocess(model.output_b)\n\n def convert(image):\n if a.aspect_ratio != 1.0:\n # upscale to correct aspect ratio\n size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]\n image = tf.image.resize_images(image, size=size, method=tf.image.ResizeMethod.BICUBIC)\n\n return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)\n\n # reverse any processing on images so they can be written to disk or displayed to user\n with tf.name_scope(\"convert_a\"):\n converted_a_image = convert(a_image)\n\n with tf.name_scope(\"convert_b\"):\n converted_b_image = convert(b_image)\n\n with tf.name_scope(\"convert_outputs_a\"):\n converted_outputs_a = convert(outputs_a)\n\n with tf.name_scope(\"convert_outputs_b\"):\n converted_output_b = convert(output_b)\n\n with tf.name_scope(\"encode_images\"):\n display_fetches = {\n \"paths_A\": examples.paths_A,\n \"paths_B\": examples.paths_B,\n \"a_image\": tf.map_fn(tf.image.encode_png, converted_a_image, dtype=tf.string, name=\"a_image_pngs\"),\n \"b_image\": tf.map_fn(tf.image.encode_png, converted_b_image, dtype=tf.string, name=\"b_image_pngs\"),\n \"outputs_a\": tf.map_fn(tf.image.encode_png, converted_outputs_a, dtype=tf.string, name=\"output_a_pngs\"),\n \"outputs_b\": tf.map_fn(tf.image.encode_png, converted_output_b, dtype=tf.string, name=\"output_b_pngs\"),\n }\n\n # summaries\n with tf.name_scope(\"a_image_summary\"):\n tf.summary.image(\"a_image\", converted_a_image)\n\n with tf.name_scope(\"b_image_summary\"):\n tf.summary.image(\"b_image\", converted_b_image)\n\n with tf.name_scope(\"outputs_a_summary\"):\n tf.summary.image(\"outputs_a\", converted_outputs_a)\n\n with tf.name_scope(\"outputs_b_summary\"):\n tf.summary.image(\"outputs_b\", converted_output_b)\n\n\n\n #tf.summary.scalar(\"d_loss\", model.d_loss)\n tf.summary.scalar(\"g_loss\", model.g_loss)\n tf.summary.scalar(\"g_loss_a2b\", model.g_loss_a2b)\n tf.summary.scalar(\"g_loss_b2a\", model.g_loss_b2a)\n\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name + \"/values\", var)\n\n # for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:\n # tf.summary.histogram(var.op.name + \"/gradients\", grad)\n\n with tf.name_scope(\"parameter_count\"):\n parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])\n\n saver = tf.train.Saver(max_to_keep=0)\n\n logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None\n sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)\n with sv.managed_session() as sess:\n print(\"parameter_count =\", sess.run(parameter_count))\n\n if a.checkpoint is not None:\n print(\"loading model from checkpoint\")\n checkpoint = tf.train.latest_checkpoint(a.checkpoint)\n saver.restore(sess, checkpoint)\n\n max_steps = 2 ** 32\n if a.max_epochs is not None:\n max_steps = examples.steps_per_epoch * a.max_epochs\n if a.max_steps is not None:\n max_steps = a.max_steps\n\n if a.mode == \"test\":\n # testing\n # at most, process the test data once\n max_steps = min(examples.steps_per_epoch, max_steps)\n for step in range(max_steps):\n results = sess.run(display_fetches)\n # filesets = save_images(results)\n filesets_A = save_generated_A_test(results)\n filesets_B= save_generated_B_test(results)\n # for i, f in enumerate(filesets):\n # print(\"evaluated image\", f[\"name\"])\n index_path_B = append_index_test_B(filesets_B)\n index_path_A = append_index_test_A(filesets_A)\n\n print(\"wrote index at\", index_path_B)\n print(\"wrote index at\", index_path_A)\n else:\n # training\n start = time.time()\n\n max_steps = examples.count * 100\n cnt = 0\n for step in range(max_steps):\n mod = cnt % (examples.count )\n if mod < examples.count and cnt >= examples.count :\n results_generated = sess.run(display_fetches)\n _ = save_generated_A(results_generated, int(cnt / (examples.count )))\n _ = save_generated_B(results_generated, int(cnt / (examples.count )))\n cnt += 1\n\n def should(freq):\n return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)\n\n #print(\"lsble_count =\", sess.run(labelp))\n #print(\"frc_count =\", sess.run(fc8rp))\n\n options = None\n run_metadata = None\n if should(a.trace_freq):\n options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n\n fetches = {\n \"train\": model.train,\n \"global_step\": sv.global_step,\n }\n\n if should(a.progress_freq):\n #fetches[\"d_loss\"] = model.d_loss\n fetches[\"db_loss1\"] = model.db_loss1\n fetches[\"db_loss2\"] = model.db_loss2\n fetches[\"da_loss\"] = model.da_loss\n fetches[\"g_loss\"] = model.g_loss\n fetches[\"g_loss_a2b\"] = model.g_loss_a2b\n fetches[\"g_loss_b2a\"] = model.g_loss_b2a\n fetches[\"db_loss_real2\"] = model.db_loss_real2\n fetches[\"db_loss_fake2\"] = model.db_loss_fake2\n\n if should(a.summary_freq):\n fetches[\"summary\"] = sv.summary_op\n\n if should(a.display_freq):\n fetches[\"display\"] = display_fetches\n\n results = sess.run(fetches, options=options, run_metadata=run_metadata)\n\n if should(a.summary_freq):\n print(\"recording summary\")\n sv.summary_writer.add_summary(results[\"summary\"], results[\"global_step\"])\n\n if should(a.display_freq):\n print(\"saving display images\")\n filesets = save_images(results[\"display\"], step=results[\"global_step\"])\n append_index(filesets, step=True)\n\n if should(a.trace_freq):\n print(\"recording trace\")\n sv.summary_writer.add_run_metadata(run_metadata, \"step_%d\" % results[\"global_step\"])\n\n if should(a.progress_freq):\n # global_step will have the correct step count if we resume from a checkpoint\n train_epoch = math.ceil(results[\"global_step\"] / examples.steps_per_epoch)\n train_step = (results[\"global_step\"] - 1) % examples.steps_per_epoch + 1\n rate = (step + 1) * a.batch_size / (time.time() - start)\n remaining = (max_steps - step) * a.batch_size / rate\n print(\"progress epoch %d step %d image/sec %0.1f remaining %dm\" % (\n train_epoch, train_step, rate, remaining / 60))\n #print(\"d_loss\", results[\"d_loss\"])\n print(\"da_loss\", results[\"da_loss\"])\n print(\"db_loss1\", results[\"db_loss1\"])\n print(\"db_loss2\", results[\"db_loss2\"])\n print(\"g_loss\", results[\"g_loss\"])\n print(\"g_loss_a2b\", results[\"g_loss_a2b\"])\n print(\"g_loss_b2a\", results[\"g_loss_b2a\"])\n print(\"db_loss_fake2\", results[\"g_loss_b2a\"])\n print(\"db_loss_real2\", results[\"g_loss_b2a\"])\n\n if should(a.save_freq):\n print(\"saving model\")\n saver.save(sess, os.path.join(a.output_dir, \"model\"), global_step=sv.global_step)\n\n if sv.should_stop():\n break\n\n\nmain()","sub_path":"image_cls/tmp2.py","file_name":"tmp2.py","file_ext":"py","file_size_in_byte":59472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"626186455","text":"from gat_impl.ConfigGAT import *\nfrom baseline_impl.ConfigBaselines import *\nfrom baseline_impl.InnerEvaluationBaselines import evaluate_baseline, inner_losses_baseline\nfrom gat_impl.InnerEvaluationGAT import evaluate_gat, inner_losses_gat\nimport numpy as np\nfrom scipy.stats import pearsonr\nfrom sklearn.metrics import r2_score, mean_squared_error\n\n\ndef get_best_models(model_name, data_set, trait) -> dict:\n '''\n Retrieve the best GAT/baseline models on the inner CV evaluated on a specific dataset and targeting a personality\n trait for each choice of outer evaluation\n :param model_name: str, required to differentiate between baselines\n :param data_set: specific loading function for the structural or functional data\n :param trait: personality trait targeted\n :return:\n '''\n # get the entire evaluation data for the inner CV of the filtered models\n if model_name == 'GAT':\n inner_results, lookup_table = inner_losses_gat(filter_by_params={'load_specific_data': data_set})\n else:\n inner_results, lookup_table = inner_losses_baseline(baseline_name=model_name,\n filter_by_params={'load_specific_data': data_set})\n best_models = {}\n for out_split in inner_results.keys():\n best_models[out_split] = {}\n # average test loss on the inner splits when predicting a specific trait for each model\n avg_cv_inner_loss = {}\n for model in inner_results[out_split].keys():\n # accumulate each training loss for this trait for every inner split\n trait_inner_losses = []\n for in_split in inner_results[out_split][model].keys():\n if trait in inner_results[out_split][model][in_split].keys():\n trait_inner_losses.append(inner_results[out_split][model][in_split][trait])\n assert len(trait_inner_losses) == ConfigGAT().params['k_inner']\n avg_cv_inner_loss[model] = np.mean(np.array(trait_inner_losses))\n else:\n IOError('The inner results for the outer evaluation choice %d does not exist' % out_split)\n\n sort_by_trait = list(sorted(avg_cv_inner_loss.keys(), key=lambda name: (avg_cv_inner_loss[name], name)))\n best_models[out_split] = (lookup_table[sort_by_trait[0]], avg_cv_inner_loss[sort_by_trait[0]])\n\n return best_models\n\n\ndef outer_evaluation(model_name, refresh=False):\n '''\n Outer evaluate the best inner models for a specific regression and return their outer MSE losses.\n :return: dictionary of the loss results over the outer folds for each choice of dataset and trait targeted\n '''\n outer_results_file = os.path.join(os.path.dirname((__file__)), 'Results', 'outer_evaluation_%s.pck' % model_name)\n if os.path.exists(outer_results_file) and not refresh:\n with open(outer_results_file, 'rb') as fp:\n return pkl.load(fp)\n\n outer_losses = {model_name: {}}\n # get only the hyper-parameter configurations that were inner evaluated\n for data_set in [load_struct_data, load_funct_data]:\n outer_losses[model_name][data_set] = {}\n out_eval_folds = list(range(ConfigGAT().params['k_outer']))\n traits = sorted(ConfigGAT().params['pers_traits_selection'])\n for trait in traits:\n outer_losses[model_name][data_set][trait] = {'loss': np.zeros(len(out_eval_folds)),\n 'best_models': []}\n best_specific_model = get_best_models(model_name=model_name, data_set=data_set, trait=trait)\n for eval_fold in out_eval_folds:\n config, _ = best_specific_model[eval_fold]\n # set the configuration object for the outer evaluation of the best inner model\n config.update({'nested_CV_level': 'outer',\n 'eval_fold_in': 0,\n 'eval_fold_out': eval_fold})\n # change the config for the particular trait only in case of baseline as GAT targets all at once\n if model_name != 'GAT':\n config.params['pers_traits_selection'] = [trait]\n config = ConfigBaselines(config.params)\n evaluate_model = evaluate_baseline\n else:\n evaluate_model = evaluate_gat\n config = ConfigGAT(config.params)\n out_eval_loss = evaluate_model(config)['test_loss'][trait]\n outer_losses[model_name][data_set][trait]['loss'][eval_fold] = out_eval_loss\n outer_losses[model_name][data_set][trait]['best_models'].append(config)\n\n # save the outer evaluation losses\n with open(outer_results_file, 'wb') as handle:\n pkl.dump(outer_losses, handle)\n return outer_losses\n\n\ndef compute_metric(config, metric_name, trait):\n '''\n Compute the metric result on the outer evaluation for a model specified by a configuration object when predicting a\n specific trait\n :param config: configuration object\n :param metric_name: pearson-r or r-squared metric\n :param trait: personality trait targeted\n :return: value of the metric\n '''\n # if the model was not trained on outer CV, start its evaluation now\n results = evaluate_gat(config) if config.params['name'] == 'GAT' else evaluate_baseline(config)\n if metric_name == 'pearson_score':\n calculate_metric = pearsonr\n elif metric_name == 'r2_score':\n calculate_metric = r2_score\n elif metric_name == 'test_loss':\n calculate_metric = mean_squared_error\n else:\n raise ValueError('Possible metric:{pearson_score,r2_score}, not %s' % metric_name)\n observations, predictions = np.array(list(map(list, zip(*results['predictions'][trait]))))\n\n return calculate_metric(observations, predictions)\n\n\ndef get_outer_metrics(model_name):\n '''\n Prints the Pearson and R-squared metrics obtained from the outer predictions of the best-scoring GAT models per\n dataset and trait choices.\n :return: void\n '''\n outer_losses = outer_evaluation(model_name)[model_name]\n for data_set, data_set_dict in outer_losses.items():\n for trait, trait_dict in data_set_dict.items():\n pearson_values, pearson_p_values, r_squared_values, test_loss = [], [], [], []\n\n for eval_fold, best_config in enumerate(trait_dict['best_models']):\n pearson_tuple = compute_metric(best_config, 'pearson_score', trait)\n pearson_values.append(pearson_tuple[0])\n pearson_p_values.append(pearson_tuple[1])\n r_squared_values.append(compute_metric(best_config, 'r2_score', trait))\n test_loss.append(compute_metric(best_config, 'test_loss', trait))\n\n # average the metrics values over the outer folds\n avg_pearson = np.mean(np.array(pearson_values)), np.std(np.array(pearson_values))\n avg_p_value = np.mean(np.array(pearson_p_values))\n avg_r_squared = np.mean(np.array(r_squared_values)), np.std(np.array(r_squared_values))\n avg_test_loss = np.mean(np.array(test_loss)), np.std(np.array(test_loss))\n print('DATA: %s, predicting: %s' % (data_set.__name__, trait))\n print('(avg.PEARSON, STDEV): %s and p-VALUE %s: ' % (avg_pearson, avg_p_value))\n print('(avr.R-SQUARED, STDEV): %s ' % (avg_r_squared,))\n print('(avg. TEST LOSS, STDEV): %s \\n' % (avg_test_loss,))\n print()\n\n\ndef validate_gat_inference():\n '''\n Apply a plain CV experiment on predicting a different target variable and report the pearson and r-squared.\n :return: void\n '''\n trait = 'FAC3'\n config = ConfigGAT({'pers_traits_selection': [trait],\n 'nested_CV_level': 'outer',\n 'eval_fold_in': 0,\n 'hidden_units': [30, 20, 15],\n 'attention_heads': [3, 3, 2],\n 'include_ew': False,\n 'readout_aggregator': GATModel.master_node_aggregator,\n 'load_specific_data': load_struct_data,\n 'learning_rate': 0.0001,\n 'attn_drop': 0.6})\n\n pearson_values, r_squared_values, p_values = [], [], []\n for out_fold in range(config.params['k_outer']):\n config.params['eval_fold_out'] = out_fold\n evaluate_gat(config)\n pearson_values.append(compute_metric(config, 'pearson_score', trait)[0])\n p_values.append(compute_metric(config, 'pearson_score', trait)[1])\n r_squared_values.append(compute_metric(config, 'r2_score', trait))\n\n # average the metric values over the outer folds\n avg_pearson = np.mean(np.array(pearson_values))\n avg_p_value = np.mean(np.array(p_values))\n avg_r_squared = np.mean(np.array(r_squared_values))\n print('Model %s | Pearson: %f | p-value: %s | R-squared: %f' % (config, avg_pearson, avg_p_value, avg_r_squared))\n\n\nif __name__ == \"__main__\":\n outer_evaluation('GAT')\n outer_evaluation('LR')\n outer_evaluation('SVR')\n outer_evaluation('RVM')\n\n","sub_path":"Evaluation.py","file_name":"Evaluation.py","file_ext":"py","file_size_in_byte":9143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"484018374","text":"import matplotlib.pyplot as plt\nimport os\n\n#name = input('100nm-5HT-glutamate-trode2-(3)_CV.txt')\n#handle = open(name)\ndef function():\n \n x_axis = list()\n x_expon = list()\n x_dec = list()\n\n voltage_axis = list()\n voltage_expon = list()\n voltage_dec = list()\n\n current_axis = list()\n current_expon = list()\n current_dec = list()\n\n# Assign each column to seperate lists\n for line in f:\n sep = line.split()\n x_axis.append(sep[0])\n voltage_axis.append(sep[1])\n current_axis.append(sep[2])\n\n\n# Conversion of exponents to decinmals of Current\n for exp in current_axis:\n conv = exp.split('E')\n exp_num = float(conv[1])\n current_expon.append(10**exp_num)\n\n dec_num = float(conv[0])\n current_dec.append(dec_num)\n\n# Creates list of exponents and decimals \n for exp in voltage_axis:\n conv = exp.split('E')\n exp_num = float(conv[1])\n voltage_expon.append(10**exp_num)\n\n dec_num = float(conv[0])\n voltage_dec.append(dec_num)\n \n# Multiples lists of decimals and exponents to give absolute float values\n final_current = [m*n for m, n in zip(current_dec, current_expon)] \n final_voltage = [j*k for j, k in zip(voltage_dec, voltage_expon)] \n \n print('The highest current value is: ')\n x_max = max(final_current) \n print(x_max)\n\n plt.axhline(0, color='black')\n plt.xlabel('$Voltage$ (V)')\n plt.ylabel('$Current$ (nA)')\n plt.plot(final_voltage, final_current)\n plt.legend(loc=1)\n plt.savefig(filename+'.png', dpi=400)\n plt.clf()\n \n#name = input()\n#file = open(name)\n\n\ndirectory = '/Users/Wayne/Desktop/Python'\n\nfor filename in os.listdir(directory):\n if filename.endswith(\".txt\"):\n f = open(filename)\n print('File opened: ',filename)\n function()\n \n else: continue \n\n\n\n ","sub_path":"Melissa_Code_1.3(openiteration).py","file_name":"Melissa_Code_1.3(openiteration).py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"480999412","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 7 15:04:57 2019\n\n@author: tarun.bhavnani@dev.smecorner.com\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 1 11:50:22 2019\n\n@Author: tarun.bhavnani@dev.smecorner.com\n\"\"\"\n\n#data prep\nimport pandas as pd\nimport os\nimport re\nimport numpy as np\nimport pickle\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Embedding, MaxPooling1D, GlobalAveragePooling1D,SeparableConv1D\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport pandas as pd\nfrom transc_function import clean_transc\nfrom transc_function import pattern\nfrom keras.callbacks import EarlyStopping\nfrom time import time\n\n#os.chdir(\"/home/tarun.bhavnani@dev.smecorner.com/Desktop/ocr_trans/Final_Models/new\")\n#first recreating the final data for model training!!\ndat= pd.read_csv(\"final_clean_data_for_training_model.csv\")\n\nlist(dat)\ndat= clean_transc(dat)\ndat[\"classification\"].value_counts()\n#dat[\"classification\"]=[y if y!=\"Not_Tagged\" else x for x,y in zip(dat[\"classification\"], dat[\"cl_cl\"])]\ndat[\"classification\"]=[y if x==\"Not_Tagged\" else x for x,y in zip(dat[\"classification\"], dat[\"Merged_tagging\"])]\n\n#merge classes\nchngs={\"number\":\"others\",\"nach\":\"nach/emi\",\"emi\":\"nach/emi\",\"charge\":\"charges\",\"o/w returnreturn\":\"o/wreturn\",\"Tax\":\"tax\",\"Transfer\":\"transfer\",\"IMPS\":\"imps\",\"sudhircharges\": \"charges\",\"Fund Transfer\": \"transfer\", \"ecs\":\"nach/emi\", \"mmt\": \"cash\",\"sudhirreturn\": \"return\" }\n\ndat[\"classification\"]= [chngs[i] if i in chngs else i for i in dat[\"classification\"]]\nlen(set(dat[\"classification\"]))\n#23\n\n\n\n#######\nle = LabelEncoder()\ndat[\"labels\"]=le.fit_transform(dat.classification)\n\n#save encoder model\nnp.save('classes.npy', le.classes_)\n\n\n####\nY=pd.get_dummies(dat[\"labels\"])\n\n#X_train, X_test, y_train, y_test = train_test_split(dat,Y, test_size=.33, random_state=43)\nX_train, X_test, y_train, y_test = train_test_split(dat,Y, test_size=0, random_state=43)#for final training\n\n#Initiate the tokenizer with all the values\n\ntok= Tokenizer(num_words=2000, split=\" \", oov_token=\"-OOV-\")\ntok.fit_on_texts(dat[\"Des\"])\n\ntok.word_index[\"-OOV-\"]\ntok.word_index[\"i\"]#71\ntok.word_index[\"-OOV-\"]=71\n\n\n#save tokenizer\n\nimport pickle\n\n# saving\nwith open('tokenizer.pickle', 'wb') as handle:\n pickle.dump(tok, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n#for X_train\nX_tr=tok.texts_to_sequences(X_train[\"Des\"])\n\nX_tr=pad_sequences(X_tr, maxlen=10)\n\nX_tt=tok.texts_to_sequences(X_test[\"Des\"])\n\nX_tt=pad_sequences(X_tt, maxlen=10)\n\n#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\nY_tr=y_train\nY_tt=y_test\n\n#Model!!\n\n\nop_units, op_activation = len(set(dat[\"labels\"])), \"softmax\"\n \nfrom keras.models import Sequential\nfrom keras.layers import Embedding, Dropout, Dense, MaxPooling1D, GlobalAveragePooling1D, SeparableConv1D, Flatten\n\nmodel = Sequential()\nmodel.add(Embedding(input_dim=2000, output_dim= 128,input_length= X_tr.shape[1]))\nmodel.add(Dropout(rate=.2))\nmodel.add(SeparableConv1D(filters=32, kernel_size=3, padding=\"same\", dilation_rate=1,\n activation=\"relu\", bias_initializer=\"random_uniform\",\n depthwise_initializer=\"random_uniform\"))\n \nmodel.add(SeparableConv1D(filters=32, kernel_size=3, padding=\"same\", dilation_rate=1,\n activation=\"relu\", bias_initializer=\"random_uniform\",\n depthwise_initializer=\"random_uniform\"))\nmodel.add(MaxPooling1D())\n #model.add() \nmodel.add(SeparableConv1D(filters=64, kernel_size=3, padding=\"same\", dilation_rate=1,\n activation=\"relu\", bias_initializer=\"random_uniform\",\n depthwise_initializer=\"random_uniform\"))\nmodel.add(SeparableConv1D(filters=64, kernel_size=3, padding=\"same\", dilation_rate=1,\n activation=\"relu\", bias_initializer=\"random_uniform\",\n depthwise_initializer=\"random_uniform\"))\n\nmodel.add(GlobalAveragePooling1D())\nmodel.add(Dropout(rate=.2))\nmodel.add(Dense(op_units, activation=op_activation))\nmodel.summary()\n \n\nmodel.compile(loss=\"categorical_crossentropy\", metrics=[\"acc\"], optimizer= \"adam\")\n\n\nt0 = time()\nearly_stop = EarlyStopping(monitor='val_loss', patience=2, verbose=1)\nhistory= model.fit(X_tr,Y_tr, epochs=10, batch_size=32, validation_split=.05,callbacks=[early_stop])\n\nprint(\"done in %0.3fs\" % (time() - t0))\n\nhist = history.history\nprint('Validation accuracy: {acc}, loss: {loss}'.format(\n acc=hist['val_acc'][-1], loss=hist['val_loss'][-1]))\n\n\n\n\n\n#Save Model\n\n# serialize model to JSON\nfrom keras.models import model_from_json\nmodel_json = model.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"model.h5\")\nprint(\"Saved model to disk\")\n\n\n# load json and create model\n#json_file = open('model.json', 'r')\n#loaded_model_json = json_file.read()\n#json_file.close()\n","sub_path":"Trans_Final_Model/Transaction_classification_model_tb.py","file_name":"Transaction_classification_model_tb.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"576372267","text":"import os\nimport json\nimport random\nimport argparse\nimport collections\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\nfrom typing import Optional, Tuple\nfrom tqdm.auto import tqdm\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\nfrom transformers.integrations import hp_params\n\nimport math\nimport time\n\nfrom transformers import Trainer\nfrom transformers.utils import logging\nfrom transformers.trainer_callback import (\n DefaultFlowCallback,\n ProgressCallback,\n TrainerState,\n)\nfrom transformers.file_utils import WEIGHTS_NAME\nfrom transformers.trainer_pt_utils import nested_detach\nfrom transformers.trainer_utils import (\n TrainOutput,\n get_last_checkpoint,\n speed_metrics,\n)\n\nDEFAULT_CALLBACKS = [DefaultFlowCallback]\nDEFAULT_PROGRESS_CALLBACK = ProgressCallback\n\nlogger = logging.get_logger(__name__)\n\ndef set_seed(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # Multi GPU\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.enabled = False\n\ndef get_label_list(labels):\n return list(set(labels))\n\ndef str2bool(i):\n if isinstance(i, bool):\n return i\n if i.lower() in ('true', 't', '1'):\n return True\n elif i.lower() in ('false', 'f', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('It is not Boolean')\n\n\nclass CustomTrainer(Trainer):\n def compute_loss(self, model, inputs, return_outputs=False):\n \"\"\"\n How the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n Subclass and override for custom behavior.\n \"\"\"\n if self.label_smoother is not None and \"labels\" in inputs:\n labels = inputs.pop(\"labels\")\n else:\n labels = None\n outputs = model(**inputs)\n\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index]\n\n if labels is not None:\n loss = self.label_smoother(outputs, labels)\n else:\n loss = outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]\n\n return (loss, outputs) if return_outputs else loss\n \n def custom_training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], cur_epoch=-1) -> torch.Tensor:\n \"\"\"\n Perform a training step on a batch of inputs.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to train.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n\n Return:\n :obj:`torch.Tensor`: The tensor with training loss on this batch.\n \"\"\"\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.args.gradient_accumulation_steps > 1:\n loss = loss / self.args.gradient_accumulation_steps\n\n loss.backward()\n\n return loss.detach()\n \n def train(self, resume_from_checkpoint: Optional[Union[str, bool]] = None,\n trial: Union[\"optuna.Trial\", Dict[str, Any]] = None, ignore_keys_for_eval: Optional[List[str]] = None, **kwargs):\n \"\"\"\n Main training entry point.\n\n Args:\n resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):\n If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of\n :class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in\n `args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,\n training will resume from the model/optimizer/scheduler states loaded here.\n trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):\n The trial run or the hyperparameter dictionary for hyperparameter search.\n kwargs:\n Additional keyword arguments used to hide deprecated arguments\n \"\"\"\n\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n\n args = self.args\n\n self.is_in_train = True\n\n # do_train is not a reliable argument, as it might not be set and .train() still called, so\n # the following is a workaround:\n if args.fp16_full_eval and not args.do_train:\n self.model = self.model.to(args.device)\n\n if \"model_path\" in kwargs:\n resume_from_checkpoint = kwargs.pop(\"model_path\")\n if len(kwargs) > 0:\n raise TypeError(f\"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.\")\n\n # This might change the seed so needs to run first.\n self._hp_search_setup(trial)\n\n # Model re-init\n model_reloaded = False\n if self.model_init is not None:\n # Seed must be set before instantiating the model when using model_init.\n set_seed(args.seed)\n self.model = self.call_model_init(trial)\n model_reloaded = True\n\n # Reinitializes optimizer and scheduler\n self.optimizer, self.lr_scheduler = None, None\n\n # Load potential model checkpoint\n if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:\n resume_from_checkpoint = get_last_checkpoint(args.output_dir)\n if resume_from_checkpoint is None:\n raise ValueError(f\"No valid checkpoint found in output directory ({args.output_dir})\")\n\n # If model was re-initialized, put it on the right device and update self.model_wrapped\n if model_reloaded:\n if self.place_model_on_device:\n self.model = self.model.to(args.device)\n self.model_wrapped = self.model\n\n # Keeping track whether we can len() on the dataset or not\n train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)\n\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n if train_dataset_is_sized:\n num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n if args.max_steps > 0:\n max_steps = args.max_steps\n num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(\n args.max_steps % num_update_steps_per_epoch > 0\n )\n else:\n max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(args.num_train_epochs)\n else:\n # see __init__. max_steps is set when the dataset has no __len__\n max_steps = args.max_steps\n num_train_epochs = int(args.num_train_epochs)\n num_update_steps_per_epoch = max_steps\n\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n model = self._wrap_model(self.model_wrapped)\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(resume_from_checkpoint)\n\n # Train!\n world_size = 1 # number of processes in parallel\n\n total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * world_size\n num_examples = (self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps)\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n steps_trained_progress_bar = None\n\n # Check if continuing training from a checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, \"trainer_state.json\")):\n self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, \"trainer_state.json\"))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(f\" Continuing training from global step {self.state.global_step}\")\n if not args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` \"\n \"flag to your launch command, but you will resume the training on data already seen by your model.\"\n )\n if self.is_local_process_zero() and not args.disable_tqdm:\n steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)\n steps_trained_progress_bar.set_description(\"Skipping the first batches\")\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None\n self.state.trial_params = hp_params(trial) if trial is not None else None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = self.state.global_step\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(args, self.state, self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not args.ignore_data_skip:\n for epoch in range(epochs_trained):\n # We just need to begin an iteration to create the randomization of the sampler.\n for _ in train_dataloader:\n break\n\n for epoch in range(epochs_trained, num_train_epochs):\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = (len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps)\n self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)\n\n for step, inputs in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n if steps_trained_progress_bar is not None:\n steps_trained_progress_bar.update(1)\n if steps_trained_in_current_epoch == 0:\n self._load_rng_state(resume_from_checkpoint)\n continue\n\n elif steps_trained_progress_bar is not None:\n steps_trained_progress_bar.close()\n steps_trained_progress_bar = None\n\n if step % args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(args, self.state, self.control)\n\n tr_loss += self.custom_training_step(model, inputs)\n\n self.current_flos += float(self.floating_point_ops(inputs))\n\n # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps\n if (step + 1) % args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if args.max_grad_norm is not None and args.max_grad_norm > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n # Optimizer step\n optimizer_was_run = True\n self.optimizer.step()\n\n if optimizer_was_run:\n self.lr_scheduler.step()\n\n model.zero_grad()\n\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n self.control = self.callback_handler.on_step_end(args, self.state, self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n\n self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)\n\n if self.control.should_training_stop:\n break\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n logger.info(f\"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).\")\n\n # We load the model state dict on the CPU to avoid an OOM error.\n state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME), map_location=\"cpu\")\n # If the model is on the GPU, it still works!\n self.model.load_state_dict(state_dict)\n\n metrics = speed_metrics(\"train\", start_time, self.state.max_steps)\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n self.log(metrics)\n\n self.control = self.callback_handler.on_train_end(args, self.state, self.control)\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n\n self.is_in_train = False\n\n self._memory_tracker.stop_and_update_metrics(metrics)\n\n return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)\n\n def prediction_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on :obj:`model` using obj:`inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to evaluate.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (:obj:`bool`):\n Whether or not to return the loss only.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n\n Return:\n Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,\n logits and labels (each being optional).\n \"\"\"\n\n has_labels = all(inputs.get(k) is not None for k in self.label_names)\n inputs = self._prepare_inputs(inputs)\n if ignore_keys is None:\n if hasattr(self.model, \"config\"):\n ignore_keys = getattr(self.model.config, \"keys_to_ignore_at_inference\", [])\n else:\n ignore_keys = []\n\n # labels may be popped when computing the loss (label smoothing for instance) so we grab them first.\n if has_labels:\n labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))\n if len(labels) == 1:\n labels = labels[0]\n else:\n labels = None\n\n with torch.no_grad():\n if has_labels:\n loss, outputs = self.compute_loss(model, inputs, return_outputs=True)\n loss = loss.mean().detach()\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + [\"loss\"])\n else:\n logits = outputs[1:]\n else:\n loss = None\n outputs = model(**inputs)\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)\n else:\n logits = outputs\n\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index - 1]\n\n if prediction_loss_only:\n return (loss, None, None)\n\n logits = nested_detach(logits)\n if len(logits) == 1:\n logits = logits[0]\n\n return (loss, logits, labels)","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":20024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"165251304","text":"import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\n\nimport settings\nfrom helpers import *\nfrom application.application import Application\nfrom settings import server_url, selenium_hub\n\n\n@pytest.fixture\ndef app(request):\n app = Application()\n request.addfinalizer(app.quit)\n return app\n\n@pytest.fixture(scope='session')\ndef driver(request):\n # wd = webdriver.Remote(selenium_hub, desired_capabilities={\"browserName\": browser.lower()})\n\n if settings.browser == \"Ie\":\n wd = webdriver.Ie()\n wd.implicitly_wait(5)\n elif settings.browser == \"Firefox\":\n wd = webdriver.Firefox()\n elif settings.browser == \"Edge\":\n wd = webdriver.Edge()\n wd.implicitly_wait(5)\n else:\n opt = webdriver.ChromeOptions()\n opt.add_experimental_option('w3c', False)\n wd = webdriver.Chrome(options=opt) # Use Chrome browser by default\n\n request.addfinalizer(wd.quit)\n return wd\n\n@pytest.fixture(scope='session')\ndef remote_driver(request):\n wd = webdriver.Remote(selenium_hub, desired_capabilities={\"browserName\": settings.browser.lower()})\n\n request.addfinalizer(wd.quit)\n\n return wd\n\n\n\n@pytest.fixture\ndef loginadmin(request, driver):\n driver.get(server_url + \"/admin\")\n if not is_element_present(driver, By.ID, \"box-apps-menu-wrapper\"):\n driver.find_element_by_name(\"username\").send_keys(\"admin\")\n driver.find_element_by_name(\"password\").send_keys(\"admin\")\n driver.find_element_by_name(\"login\").click()\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#body-wrapper\")))\n\n@pytest.fixture\ndef mainapp(request,driver):\n driver.get(server_url + \"/en/\")","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"74492529","text":"import random\n\n\ndef gcd(a, b):\n return a if not b else gcd(b, a % b)\n\n\ndef check(a, p, powers, power2, q):\n if gcd(a, p) > 1:\n return 0\n if pow(a, q, p) == 1:\n return 1\n for i in range(power2):\n if pow(a, powers[i] * q, p) == p - 1:\n return 1\n return 0\n\n\ndef is_prime(n):\n if n == 2:\n return 1\n exit(0)\n if not n & 1:\n return 0\n exit(0)\n powers = [1]\n power2 = 0\n q = n - 1\n # print(n, \"JOOOOOOOOPA\")\n while q % 2 == 0:\n # print(q)\n # input()\n q //= 2\n power2 += 1\n powers.append(powers[-1] * 2)\n for i in range(min(373, n)):\n a = random.randint(2, n - 1)\n if not check(a, n, powers, power2, q):\n return 0\n # exit(0)\n return 1\n\n\ndef f(a, n):\n return (a ** 2 + 2) % n\n\n\nn = int(input())\nk = int(n ** 0.5)\ncnt = 0\narr1 = []\narr = [n]\nif is_prime(n):\n print(n)\n exit(0)\n# print(arr)\nwhile len(arr):\n # print(len(arr))\n # print(arr[len(arr) - 1], 'lol')\n kek = arr[len(arr) - 1]\n start = random.randint(0, arr[len(arr) - 1] - 1)\n cnt = 0\n # print(len(arr), 'kek', arr)\n # print(kek)\n x0, x1 = start % kek, f(start, kek)\n g = gcd((x0 - x1) % arr[len(arr) - 1], arr[len(arr) - 1])\n f1 = 0\n while cnt < max(k, 100):\n if 1 < g < n:\n # print(arr[-1])\n arr.pop(len(arr) - 1)\n # print(arr, arr1, 'before', g, kek // g)\n if is_prime(g):\n arr1.append(g)\n else:\n arr.append(g)\n if kek // g == 1:\n break\n if is_prime(kek // g):\n arr1.append(kek // g)\n else:\n arr.append(kek // g)\n # print(arr, arr1)\n # input()\n break\n x0, x1 = f(x0, kek), f(f(x1, kek), kek)\n g = gcd(abs(x0 - x1), kek)\n cnt += 1\nprint(*sorted(arr1), sep='\\n')\n","sub_path":"lksh/2017/zachet/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"496516281","text":"#Python multithreading example to demonstrate locking.\n#1. Define a subclass using Thread class.\n#2. Instantiate the subclass and trigger the thread. \n#3. Implement locks in thread's run method. \n\nfrom __future__ import print_function\nfrom threadrealtimedetection import TRealtimeObjectDetection\nfrom imutils.video import VideoStream\nimport numpy as np\nimport imutils\nimport time\nimport threading\nimport datetime\nimport cv2\n\n\nexitFlag = 0\n\nclass myThread (threading.Thread):\n def __init__(self, name, counter):\n threading.Thread.__init__(self)\n self.threadID = counter\n self.name = name\n self.counter = counter\n def run(self):\n print (\"Starting \" + self.name)\n # Acquire lock to synchronize thread\n threadLock.acquire()\n print_date(self.name, self.counter)\n #line(number)\n # Release lock for the next thread\n threadLock.release()\n print (\"Exiting \" + self.name)\n\n \ndef line(tNumber,number):\n #number = 7\n #number = number\n #threadLock.acquire()\n while (number > 0):\n threadLock.acquire()\n print (tNumber ,\" is at \" , number)\n number = number - 1\n threadLock.release()\n # time.sleep(0.1)\n #threadLock.release()\n # Release lock for the next thread\n\ndef print_date(threadName, counter):\n datefields = []\n today = datetime.date.today()\n datefields.append(today)\n print (\"%s[%d]: %s\" % ( threadName, counter, datefields[0] ))\n\nthreadLock = threading.Lock()\nthreads = []\n\n# Create new threads\nthread1 = myThread(\"Thread\", 1)\nthread2 = myThread(\"Thread\", 2)\n\n#arguments to run\nn1=10\nn2=5\nthread1 = threading.Thread(target=line, args=(1,n1,))\nthread2 = threading.Thread(target=line, args=(2,n2,))\n\n# Start new Threads\nthread1.start()\nthread2.start()\n\n# Add threads to thread list\nthreads.append(thread1)\nthreads.append(thread2)\n\n# Wait for all threads to complete\nfor t in threads:\n t.join()\nprint (\"Exiting the Program!!!\")","sub_path":"multicam/lan/multithreading.py","file_name":"multithreading.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437678653","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 29 16:14:13 2017\n\n@author: kading\n\"\"\"\n\n\nimport cv2 # OpenCV Library\n \n#-----------------------------------------------------------------------------\n# Load and configure Haar Cascade Classifiers\n#-----------------------------------------------------------------------------\n \n# location of OpenCV Haar Cascade Classifiers:\n#baseCascadePath = \"C:\\Users\\kading\\Downloads\\data\\haarcascades\"\n\n# xml files describing our haar cascade classifiers\nfaceCascadeFilePath = \"haarcascade_frontalface_default.xml\"\neyeCascadeFilePath = \"haarcascade_eye_tree_eyeglasses.xml\"\n\n# build our cv2 Cascade Classifiers\nfaceCascade = cv2.CascadeClassifier(faceCascadeFilePath)\neyeCascade = cv2.CascadeClassifier(eyeCascadeFilePath)\n \n#-----------------------------------------------------------------------------\n# Load and configure mustache (.png with alpha transparency)\n#-----------------------------------------------------------------------------\n \n# Load our overlay image: hat.png\nimgHat = cv2.imread('hat.png',-1)\n \n# Create the mask for the hat\norig_mask = imgHat[:,:,3]\n \n# Create the inverted mask for the hat\norig_mask_inv = cv2.bitwise_not(orig_mask)\n \n# Convert hat image to BGR\n# and save the original image size (used later when re-sizing the image)\nimgHat = imgHat[:,:,0:3]\norigHatHeight, origHatWidth = imgHat.shape[:2]\n\nvideo_capture = cv2.VideoCapture(0)\nwhile True:\n # Capture video feed\n ret, frame = video_capture.read()\n # Create greyscale image from the video feed\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # Detect faces in input video stream\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.cv.CV_HAAR_SCALE_IMAGE\n )\n # Iterate over each face found\n for (x, y, w, h) in faces:\n x1 = x-w/3\n x2 = x+w*4/3\n y1 = y-h*2/3\n y2 = y\n if y1 < 0:\n break\n HatWidth = x2 - x1\n HatHeight = y2 - y1\n Hat = cv2.resize(imgHat, (HatWidth,HatHeight), interpolation = cv2.INTER_AREA)\n mask = cv2.resize(orig_mask, (HatWidth,HatHeight), interpolation = cv2.INTER_AREA)\n mask_inv = cv2.resize(orig_mask_inv, (HatWidth,HatHeight), interpolation = cv2.INTER_AREA)\n roi = frame[y1:y2, x1:x2]\n # roi_bg contains the original image only where the mustache is not\n # in the region that is the size of the mustache.\n roi_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)\n # roi_fg contains the image of the mustache only where the mustache is\n roi_fg = cv2.bitwise_and(Hat,Hat,mask = mask)\n # join the roi_bg and roi_fg\n dst = cv2.add(roi_bg,roi_fg)\n # place the joined image, saved to dst back over the original image\n frame[y1:y2, x1:x2] = dst\n # Display the resulting frame\n cv2.imshow('Video', frame)\n # press any key to exit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\nvideo_capture.release()\ncv2.destroyAllWindows()\n \n","sub_path":"Face_Recognition_Windows/Add_Paparazi/hat.py","file_name":"hat.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589279899","text":"import csv\r\nimport random\r\nimport Mantel # https://jwcarr.github.io/MantelTest/\r\nimport numpy as np\r\nfrom scipy.cluster.hierarchy import linkage\r\nfrom scipy.spatial.distance import squareform\r\nfrom clusim.clustering import Clustering\r\nimport clusim.sim as sim\r\nimport matplotlib.pyplot as plt\r\n\r\nperms_of_each_n=10 #average of perms_of_each_n for n participants\r\nperms_of_mantel_test=20#default is 10000\r\n\r\n\r\n\r\n\r\n\"\"\"\r\n#SKROUTZ\r\ncards=54 #count of cards\r\ntotal_participants=203 #count of all participants\r\n#participants_range: lists with items the count of participants that we run the test\r\n#so each of its items is a number from 0 to total_participants\r\n#the 0 we use it only for the graphp O(0,0)\r\nparticipants_range=[0,1,2,3,5,7,10,15,20,25,30,35,40,45,50,60]\r\ncolumn_category_label=3 #column 4 in csv (python starts from 0)\r\n\r\n#column 1 of csv: participant_id\r\n#column 2 of csv: card_index\r\n#column 3 of csv: card_label\r\n#column 4 of csv: category_label\r\n#column 5 of csv: participant_sex\r\n#column 6 of csv: participant_age\r\n#column 7 of csv: participant_time_in_internet\r\n#column 8 of csv: participant_previous_experience_at_eshop_domain\r\n#column 9 of csv: difficult_cards\r\n#column 10 of csv: how_dificult_was_the_procedure\r\n\r\n#the row data of csv is sorted firstly by participant_id and secondly by card_index\r\nall_data = []\r\nwith open(r\"skroutz.csv\", encoding=\"utf8\") as csvfile:\r\n reader = csv.reader(csvfile)\r\n for row in reader: # each row is a list\r\n all_data.append(row)\r\n\"\"\"\r\n \r\n \r\n\r\n\r\n\r\n#CELESTINO\r\ncards=59 #count of cards\r\ntotal_participants=210 #count of all participants\r\n#participants_range: lists with items the count of participants that we run the test\r\n#so each of its items is a number from 0 to total_participants\r\n#the 0 we use it only for the graph O(0,0)\r\nparticipants_range=[0,1,2,3,5,7,10,15,20,25,30,35,40,45,50,60]\r\ncolumn_category_label=3 #column 4 in csv (python starts from 0)\r\n\r\n#column 1 of csv: participant_id\r\n#column 2 of csv: card_index\r\n#column 3 of csv: card_label\r\n#column 4 of csv: category_label\r\n#column 5 of csv: participant_sex\r\n#column 6 of csv: participant_age\r\n#column 7 of csv: participant_time_in_internet\r\n#column 8 of csv: participant_previous_experience_at_eshop_domain\r\n#column 9 of csv: difficult_cards\r\n#column 10 of csv: how_dificult_was_the_procedure\r\n\r\n#the row data of csv is sorted firstly by participant_id and secondly by card_index\r\nall_data = []\r\nwith open(r\"celestino.csv\", encoding=\"utf8\") as csvfile:\r\n reader = csv.reader(csvfile)\r\n for row in reader: # each row is a list\r\n all_data.append(row)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef dissimilarity_matrix(some_participants):\r\n \r\n global column_category_label, all_data, cards, total_participants\r\n \r\n #participants_range: lists with items the count of participants that we run the test\r\n #so each of its items is a number from 0 to total_participants-1 (python starts counting from 0)\r\n #with random.sample we take UNIQUE numbers from the list in the first parameter of sample method\r\n #examples of random.sample() here: https://www.geeksforgeeks.org/python-random-sample-function\r\n participants_range=random.sample(range(0, total_participants), some_participants)\r\n \r\n #Initialize the dissimilarity matrix with zeros\r\n #The elements of the main diagonal will be always 0\r\n dissimilarity_matrix = [[0 for x in range(cards)] for y in range(cards)]\r\n \r\n for participant in participants_range:\r\n for x in range(participant*cards, (participant+1)*cards): #take cards/rows only form current participant\r\n \r\n for y in range(x+1,(participant+1)*cards):#Because the dissimilarity_matrix is symmetrical, we start the itteration form x+1 \r\n \r\n #if the cards have been sorted in differents groups from the current user\r\n if all_data[x][column_category_label]!=all_data[y][column_category_label]: \r\n \r\n #We compare x-participant*cards and y-participant*cards in this itteration\r\n # x-participant*cards is the card_index (0, 1, ... , cards-1) of the 1st compared card from participant\r\n # y-participant*cards is the card_index (0, 1, ... , cards-1) of the 2nd compared card from participant \r\n dissimilarity_matrix[x-participant*cards][y-participant*cards]=dissimilarity_matrix[x-participant*cards][y-participant*cards]+1\r\n \r\n #With the following line, we make the matrix symmetrical\r\n dissimilarity_matrix[y-participant*cards][x-participant*cards]=dissimilarity_matrix[x-participant*cards][y-participant*cards]\r\n\r\n return dissimilarity_matrix\r\n\r\n\r\n\r\n\r\n\r\ndef clustering_with_clusim(dis):\r\n mat = np.array(dis)\r\n dists = squareform(mat)\r\n linkage_matrix = linkage(dists, \"average\")\r\n c = Clustering().from_scipy_linkage(linkage_matrix, dist_rescaled = True)\r\n return c\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef mantel_elsim_r_average_and_errors(some_participants):\r\n \r\n global column_category_label, all_data, cards, total_participants, perms_of_each_n, perms_of_mantel_test\r\n dis2=dissimilarity_matrix(total_participants)\r\n \r\n c2 =clustering_with_clusim(dis2) \r\n \r\n \r\n mantel_SUM=0\r\n elsim_SUM=0\r\n # r is between -1 and 1\r\n mantel_minimum=10 \r\n mantel_maximum=-10\r\n\r\n elsim_minimum=10 \r\n elsim_maximum=-10\r\n \r\n for i in range(perms_of_each_n):\r\n dis1=dissimilarity_matrix(some_participants)\r\n \r\n # Mantel Method\r\n mantel=Mantel.test(dis1, dis2, perms_of_mantel_test, method='pearson', tail='two-tail')\r\n mantel_r = mantel[0]\r\n \r\n #find errors (minimum and maximum)\r\n if mantel_r < mantel_minimum:\r\n mantel_minimum = mantel_r\r\n elif mantel_r > mantel_maximum:\r\n mantel_maximum = mantel_r\r\n mantel_SUM = mantel_SUM + mantel_r\r\n\r\n\r\n c1 =clustering_with_clusim(dis1)\r\n \r\n # Element-centric Similarity\r\n elsim_r = sim.element_sim(c1, c2, r=1.0, alpha=0.9)\r\n\r\n #find errors (minimum and maximum)\r\n if elsim_r < elsim_minimum:\r\n elsim_minimum = elsim_r\r\n elif elsim_r > elsim_maximum:\r\n elsim_maximum = elsim_r\r\n elsim_SUM = elsim_SUM + elsim_r\r\n \r\n \r\n mantel_average = mantel_SUM / perms_of_each_n #average of mantel_r\r\n mantel_l_error = mantel_average - mantel_minimum #mantel_lower_error\r\n mantel_u_error = mantel_maximum - mantel_average #mantel_upper_error\r\n\r\n elsim_average = elsim_SUM / perms_of_each_n #average of elsim_r\r\n elsim_l_error = elsim_average - elsim_minimum #mantel_lower_error\r\n elsim_u_error = elsim_maximum - elsim_average #mantel_upper_error\r\n \r\n return mantel_average, mantel_l_error, mantel_u_error, elsim_average, elsim_l_error, elsim_u_error\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef mantel_elsim_r_average_and_errors_in_participants_range(participants_range):\r\n \r\n global total_participants\r\n \r\n #Initialization with zeros\r\n \r\n mantel_r_average_of_each_n=[]\r\n mantel_r_lower_error_of_each_n=[]\r\n mantel_r_upper_error_of_each_n=[]\r\n \r\n elsim_r_average_of_each_n=[]\r\n elsim_r_lower_error_of_each_n=[]\r\n elsim_r_upper_error_of_each_n=[]\r\n \r\n for y in range(0,total_participants+1):\r\n mantel_r_average_of_each_n.append(0)\r\n mantel_r_lower_error_of_each_n.append(0)\r\n mantel_r_upper_error_of_each_n.append(0)\r\n \r\n elsim_r_average_of_each_n.append(0)\r\n elsim_r_lower_error_of_each_n.append(0)\r\n elsim_r_upper_error_of_each_n.append(0)\r\n \r\n \r\n \r\n \r\n # x is the number/id of participant (possible values: 1 , ... , total_participants)\r\n for x in participants_range:\r\n if x==0: \r\n #0 ~ O(0,0) is the axis origin\r\n mantel_r_average_of_each_n[0]=0\r\n mantel_r_lower_error_of_each_n[0]=0\r\n mantel_r_upper_error_of_each_n[0]=0\r\n \r\n elsim_r_average_of_each_n[0]=0\r\n elsim_r_lower_error_of_each_n[0]=0\r\n elsim_r_upper_error_of_each_n[0]=0\r\n else:\r\n mantel_average, mantel_l_error, mantel_u_error, elsim_average, elsim_l_error, elsim_u_error = mantel_elsim_r_average_and_errors(x)\r\n \r\n #put the values in an array\r\n mantel_r_average_of_each_n[x]=mantel_average\r\n mantel_r_lower_error_of_each_n[x]=mantel_l_error\r\n mantel_r_upper_error_of_each_n[x]=mantel_u_error\r\n\r\n elsim_r_average_of_each_n[x]=elsim_average\r\n elsim_r_lower_error_of_each_n[x]=elsim_l_error\r\n elsim_r_upper_error_of_each_n[x]=elsim_u_error\r\n \r\n #we return the arrays\r\n return mantel_r_average_of_each_n, mantel_r_lower_error_of_each_n, mantel_r_upper_error_of_each_n, elsim_r_average_of_each_n, elsim_r_lower_error_of_each_n, elsim_r_upper_error_of_each_n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef save_errorbar(r_average_of_each_n, r_lower_error_of_each_n, r_upper_error_of_each_n, participants_range, title, xlabel, ylabel, save, clear):\r\n global total_participants\r\n \r\n x=[np.array(range(0,total_participants+1))[i] for i in participants_range] \r\n y=[r_average_of_each_n[i] for i in participants_range]\r\n \r\n lower_error = [r_lower_error_of_each_n[i] for i in participants_range] \r\n upper_error = [r_upper_error_of_each_n[i] for i in participants_range]\r\n asymmetric_error = [lower_error, upper_error]\r\n \r\n plt.errorbar(x, y, yerr=asymmetric_error)\r\n plt.title(title)\r\n plt.xlabel(xlabel)\r\n\r\n plt.ylabel(ylabel)\r\n if save==True:\r\n plt.savefig(title+\".png\",dpi=300)\r\n if clear==True:\r\n plt.show()\r\n\r\n\r\n\r\n\r\nmantel_r_average_of_each_n, mantel_r_lower_error_of_each_n, mantel_r_upper_error_of_each_n, elsim_r_average_of_each_n, elsim_r_lower_error_of_each_n, elsim_r_upper_error_of_each_n = mantel_elsim_r_average_and_errors_in_participants_range(participants_range)\r\n\r\ni=0\r\nfor i in participants_range:\r\n if i==0:\r\n print(\"participants;\", \" \", \"mantel;\", \"\", \"elsim\")\r\n continue\r\n print(i,\"; \", mantel_r_average_of_each_n[i], \"; \", elsim_r_average_of_each_n[i]) \r\n\r\n\r\n#CASE: 2 graphs together\r\nsave_errorbar(mantel_r_average_of_each_n, mantel_r_lower_error_of_each_n, mantel_r_upper_error_of_each_n, participants_range, \"Mantel\", \"Sample Size\", \"Average correlation\", False, False)\r\nsave_errorbar(elsim_r_average_of_each_n, elsim_r_lower_error_of_each_n, elsim_r_upper_error_of_each_n, participants_range, \"Elsim & Mantel Error Bar\", \"Sample Size\", \"Average correlation\", True, True)\r\n\r\n#CASE: graphs seperately\r\nsave_errorbar(mantel_r_average_of_each_n, mantel_r_lower_error_of_each_n, mantel_r_upper_error_of_each_n, participants_range, \"Mantel Error Bar\", \"Sample Size\", \"Average correlation\", True, True)\r\nsave_errorbar(elsim_r_average_of_each_n, elsim_r_lower_error_of_each_n, elsim_r_upper_error_of_each_n, participants_range, \"Elsim Error Bar\", \"Sample Size\", \"Average correlation\", True, True)\r\n\r\n\r\n","sub_path":"FINAL2.py","file_name":"FINAL2.py","file_ext":"py","file_size_in_byte":11124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"158715943","text":"\"\"\"\nUsage:\n\nFirst you need to set up the database connection pool by creating\nan instance of PooledDB, passing the following parameters:\n\n creator: either an arbitrary function returning new DB-API 2\n connection objects or a DB-API 2 compliant database module\n\n mincached: the initial number of idle connections in the pool\n (the default of 0 means no connections are made at startup)\n\n maxcached: the maximum number of idle connections in the pool\n (the default value of 0 or None means unlimited pool size)\n\n maxshared: maximum number of shared connections allowed\n (the default value of 0 or None means all connections are dedicated 专用)\n When this maximum number is reached, connections are\n shared if they have been requested as shareable.\n\n maxconnections: maximum number of connections generally allowed\n (the default value of 0 or None means any number of connections)\n\n blocking: determines behavior when exceeding the maximum\n (if this is set to true, block and wait until the number of\n connections decreases, but by default an error will be reported)\n 确定超出最大值时的行为(如果将其设置为true,则阻止并等待直到连接数量减少,但默认情况下会报告错误)\n\n maxusage: maximum number of reuses of a single connection\n (the default of 0 or None means unlimited reuse)\n When this maximum usage number of the connection is reached,\n the connection is automatically reset (closed and reopened).\n\n setsession: an optional list of SQL commands that may serve to\n prepare the session, e.g. [\"set datestyle to german\", ...]\n\n reset: how connections should be reset when returned to the pool\n (False or None to rollback transcations started with begin(),\n the default value True always issues a rollback for safety's sake)\n 返回池后应如何重置连接(对于以begin() 开始的回滚事务,为False或None,为安全起见,默认值True始终发出回滚)\n\n failures: an optional exception class or a tuple of exception classes\n for which the connection failover mechanism shall be applied,\n if the default (OperationalError, InternalError) is not adequate\n\n ping: an optional flag controlling when connections are checked\n with the ping() method if such a method is available\n (\n 0 = None = never,\n 1 = default = whenever fetched from the pool,\n 2 = when a cursor is created,\n 4 = when a query is executed,\n 7 = always, and all other bit combinations of these values\n )\n\nFor instance, if you are using pgdb as your DB-API 2 database module and\nwant a pool of at least five connections to your local database 'mydb':\n\n import pgdb # import used DB-API 2 module\n from DBUtils.PooledDB import PooledDB\n pool = PooledDB(pgdb, 5, database='mydb')\n\nOnce you have set up the connection pool you can request\ndatabase connections from that pool:\n\n db = pool.connection()\n\nYou can use these connections just as if they were ordinary\nDB-API 2 connections. Actually what you get is the hardened\nSteadyDB version of the underlying DB-API 2 connection.\n您可以像使用普通连接一样使用这些连接DB-API 2连接。 其实你得到的是硬化\n基础DB-API 2连接的SteadyDB版本。\n\nPlease note that the connection may be shared with other threads\nby default if you set a non-zero maxshared parameter and the DB-API 2\nmodule allows this. If you want to have a dedicated 专用 connection, use:\n\n db = pool.connection(shareable=False)\n\nYou can also use this to get a dedicated 专用 connection:\n\n db = pool.dedicated_connection()\n\nIf you don't need it any more, you should immediately return it to the\npool with db.close(). You can get another connection in the same way.\n\nWarning: In a threaded environment, never do the following:\n\n pool.connection().cursor().execute(...)\n\nThis would release the connection too early for reuse which may be\nfatal if the connections are not thread-safe. Make sure that the\nconnection object stays alive as long as you are using it, like that:\n这将太早释放连接而无法重用,如果连接不是线程安全的,这可能是会致命。 只要使用连接对象,\n就要确保连接对象就会保持活动状态,如下所示:\n\n db = pool.connection()\n cur = db.cursor()\n cur.execute(...)\n res = cur.fetchone()\n cur.close() # or del cur\n db.close() # or del db\n\nNote that you need to explicitly start transactions by calling the\nbegin() method. This ensures that the connection will not be shared\nwith other threads, that the transparent reopening will be suspended\nuntil the end of the transaction, and that the connection will be rolled\nback before being given back to the connection pool.\n请注意,您需要通过调用begin()方法明确开启一个事务。 这样可以确保\n连接不会与其他线程共享,当事务结束时将透明重新打开连接,当连接返回给连接池时会被回滚。\n\n\nmincached | maxcached\n0 0\n0 3\n3 0\n3 3\n3 5\n\nmaxshared\n0\n2\n3\n4\n5\n6\n\nmincached | maxcached | maxshared\n0 0 0\n0 0 3\n\n0 3 0\n0 3 2\n0 3 3\n0 3 4\n\n3 0 0\n3 0 2\n3 0 3\n3 0 5\n\n3 3 0\n3 3 2\n3 3 3\n3 3 4\n\n3 5 0\n3 5 2\n3 5 3\n3 5 4\n3 5 5\n3 5 6\n\n\nmincached | maxcached | maxshared | maxconnections | connections\n0 0 0 0\n0 0 0 2\n\n0 0 3 0\n0 0 3 3\n0 0 3 5\n\n0 3 0 0\n0 3 0 3\n0 3 0 5\n\n0 3 2 0\n0 3 2 3\n0 3 2 5\n\n0 3 3 0\n0 3 3 3\n0 3 3 5\n\n0 3 4 0\n0 3 4 4\n0 3 4 5\n\n3 0 0 0\n3 0 0 3\n3 0 0 5\n\n3 0 2 0\n3 0 2 3\n3 0 2 5\n\n3 0 3 0\n3 0 3 3\n3 0 3 5\n\n3 0 5 0\n3 0 5 5\n3 0 5 6\n\n3 3 0 0\n3 3 0 3\n3 3 0 5\n\n3 3 2 0\n3 3 2 3\n3 3 2 5\n\n3 3 3 0\n3 3 3 3\n3 3 3 5\n\n3 3 4 0\n3 3 4 4\n3 3 4 5\n\n3 5 0 0\n3 5 0 5\n3 5 0 6\n\n3 5 2 0\n3 5 2 5\n3 5 2 6\n\n3 5 3 0\n3 5 3 5\n3 5 3 6\n\n3 5 4 0\n3 5 4 5\n3 5 4 6\n\n3 5 5 0\n3 5 5 5\n3 5 5 6\n\n3 5 6 0\n3 5 6 6\n3 5 6 7\n\n\"\"\"\n\nfrom threading import Condition\n\nfrom DBUtils.SteadyDB import connect\n\n__version__ = '1.3'\n\n\nclass PooledDBError(Exception):\n \"\"\"General PooledDB error.\"\"\"\n\n\nclass InvalidConnection(PooledDBError):\n \"\"\"Database connection is invalid.\"\"\"\n\n\nclass NotSupportedError(PooledDBError):\n \"\"\"DB-API module not supported by PooledDB.\"\"\"\n\n\nclass TooManyConnections(PooledDBError):\n \"\"\"Too many database connections were opened.\"\"\"\n\n\nclass PooledDB:\n version = __version__\n\n def __init__(\n self, creator, mincached=0, maxcached=0,\n maxshared=0, maxconnections=0, blocking=False,\n maxusage=None, setsession=None, reset=True,\n failures=None, ping=1,\n *args, **kwargs):\n\n try:\n threadsafety = creator.threadsafety\n except AttributeError:\n try:\n if not callable(creator.connect):\n raise AttributeError\n except AttributeError:\n threadsafety = 2\n else:\n threadsafety = 0\n if not threadsafety:\n raise NotSupportedError(\"Database module is not thread-safe.\")\n\n self._creator = creator\n self._args, self._kwargs = args, kwargs\n self._blocking = blocking\n self._maxusage = maxusage\n self._setsession = setsession\n self._reset = reset\n self._failures = failures\n self._ping = ping\n\n if mincached is None:\n mincached = 0\n if maxcached is None:\n maxcached = 0\n if maxconnections is None:\n maxconnections = 0\n if maxcached:\n if maxcached < mincached:\n maxcached = mincached\n self._maxcached = maxcached\n else:\n self._maxcached = 0\n\n if threadsafety > 1 and maxshared:\n self._maxshared = maxshared\n self._shared_cache = [] # self._shared_cache = [SharedDBConnection]\n else:\n self._maxshared = 0\n\n if maxconnections:\n if maxconnections < maxcached:\n maxconnections = maxcached\n if maxconnections < maxshared:\n maxconnections = maxshared\n self._maxconnections = maxconnections\n else:\n self._maxconnections = 0\n\n self._idle_cache = [] # self._idle_cache = [SteadyDB]\n self._lock = Condition()\n self._connections = 0\n # Establish an initial number of idle database connections:\n idle = [self.dedicated_connection() for i in range(mincached)]\n while idle:\n idle.pop().close()\n\n def steady_connection(self):\n \"\"\"Get a steady, unpooled DB-API 2 connection.\"\"\"\n return connect(\n self._creator, self._maxusage, self._setsession,\n self._failures, self._ping, True, *self._args, **self._kwargs)\n\n def connection(self, shareable=True):\n \"\"\"Get a steady, cached DB-API 2 connection from the pool.\n\n If shareable is set and the underlying DB-API 2 allows it,\n then the connection may be shared with other threads.\n\n \"\"\"\n if shareable and self._maxshared:\n self._lock.acquire()\n try:\n while not self._shared_cache and self._maxconnections and self._connections >= self._maxconnections:\n self._wait_lock()\n \"\"\"\n if not self._blocking:\n raise TooManyConnections\n self._lock.wait()\n \"\"\"\n if len(self._shared_cache) < self._maxshared:\n # shared cache is not full, get a dedicated connection\n try: # first try to get it from the idle cache\n con = self._idle_cache.pop(0)\n except IndexError: # else get a fresh connection\n con = self.steady_connection()\n else:\n con._ping_check() # check this connection\n con = SharedDBConnection(con)\n self._connections += 1\n else: # shared cache full or no more connections allowed\n self._shared_cache.sort() # least shared connection first\n con = self._shared_cache.pop(0) # get it\n while con.con._transaction:\n # do not share connections which are in a transaction\n self._shared_cache.insert(0, con)\n self._wait_lock()\n \"\"\"\n if not self._blocking:\n raise TooManyConnections\n self._lock.wait()\n \"\"\"\n self._shared_cache.sort()\n con = self._shared_cache.pop(0)\n con.con._ping_check() # check the underlying connection\n con.share() # increase share of this connection\n # put the connection (back) into the shared cache\n self._shared_cache.append(con)\n self._lock.notify()\n finally:\n self._lock.release()\n con = PooledSharedDBConnection(self, con)\n else: # try to get a dedicated connection\n self._lock.acquire()\n try:\n while self._maxconnections and self._connections >= self._maxconnections:\n self._wait_lock()\n \"\"\"\n if not self._blocking:\n raise TooManyConnections\n self._lock.wait()\n \"\"\"\n # connection limit not reached, get a dedicated connection\n try: # first try to get it from the idle cache\n con = self._idle_cache.pop(0)\n except IndexError: # else get a fresh connection\n con = self.steady_connection()\n else:\n con._ping_check() # check connection\n con = PooledDedicatedDBConnection(self, con)\n self._connections += 1\n finally:\n self._lock.release()\n return con\n\n def dedicated_connection(self):\n \"\"\"Alias for connection(shareable=False).\"\"\"\n return self.connection(False)\n\n def unshare(self, con):\n \"\"\"Decrease the share of a connection in the shared cache.\"\"\"\n self._lock.acquire()\n try:\n con.unshare()\n shared = con.shared\n if not shared: # connection is idle,\n try: # so try to remove it\n self._shared_cache.remove(con) # from shared cache\n except ValueError:\n pass # pool has already been closed\n finally:\n self._lock.release()\n if not shared: # connection has become idle,\n self.cache(con.con) # so add it to the idle cache\n\n def cache(self, con):\n \"\"\"\n con: the underlying SteadyDB connection\n \"\"\"\n self._lock.acquire()\n try:\n if not self._maxcached:\n con._reset(force=self._reset)\n self._idle_cache.append(con)\n elif len(self._idle_cache) < self._maxcached:\n con._reset(force=self._reset)\n self._idle_cache.append(con)\n else:\n con.close()\n self._connections -= 1\n \"\"\"\n if not self._maxcached or len(self._idle_cache) < self._maxcached:\n con._reset(force=self._reset) # rollback possible transaction\n # the idle cache is not full, so put it there\n self._idle_cache.append(con) # append it to the idle cache\n else: # if the idle cache is already full,\n con.close() # then close the connection\n \"\"\"\n self._connections -= 1\n self._lock.notify()\n finally:\n self._lock.release()\n\n def close(self):\n \"\"\"Close all connections in the pool.\"\"\"\n self._lock.acquire()\n try:\n while self._idle_cache: # close all idle connections\n con = self._idle_cache.pop(0)\n try:\n con.close()\n except Exception:\n pass\n if self._maxshared: # close all shared connections\n while self._shared_cache:\n con = self._shared_cache.pop(0).con\n try:\n con.close()\n except Exception:\n pass\n self._connections -= 1\n self._lock.notifyAll()\n finally:\n self._lock.release()\n\n def __del__(self):\n \"\"\"Delete the pool.\"\"\"\n try:\n self.close()\n except Exception:\n pass\n\n def _wait_lock(self):\n \"\"\"Wait until notified or report an error.\"\"\"\n if not self._blocking:\n raise TooManyConnections\n self._lock.wait()\n\n\n# Auxiliary classes for pooled connections\n\nclass PooledDedicatedDBConnection:\n def __init__(self, pool, con):\n \"\"\"\n pool: the corresponding PooledDB instance\n con: the underlying SteadyDB connection\n \"\"\"\n # basic initialization to make finalizer work\n self._con = None\n # proper initialization of the connection\n if not con.threadsafety():\n raise NotSupportedError(\"Database module is not thread-safe.\")\n self._pool = pool\n self._con = con\n\n def close(self):\n \"\"\"Close the pooled dedicated connection.\"\"\"\n # Instead of actually closing the connection,\n # return it to the pool for future reuse.\n if self._con:\n self._pool.cache(self._con)\n self._con = None\n\n def __getattr__(self, name):\n \"\"\"Proxy all members of the class.\"\"\"\n if self._con:\n return getattr(self._con, name)\n else:\n raise InvalidConnection\n\n def __del__(self):\n \"\"\"Delete the pooled connection.\"\"\"\n try:\n self.close()\n except Exception:\n pass\n\n\nclass SharedDBConnection:\n def __init__(self, con):\n \"\"\"\n con: the underlying SteadyDB connection\n \"\"\"\n self.con = con\n self.shared = 1\n\n # 小于\n def __lt__(self, other):\n if self.con._transaction == other.con._transaction:\n return self.shared < other.shared\n else:\n return not self.con._transaction\n\n # 小于等于\n def __le__(self, other):\n if self.con._transaction == other.con._transaction:\n return self.shared <= other.shared\n else:\n return not self.con._transaction\n\n # 等于\n def __eq__(self, other):\n return (self.con._transaction == other.con._transaction and self.shared == other.shared)\n\n # 不等于\n def __ne__(self, other):\n return not self.__eq__(other)\n\n # 大于\n def __gt__(self, other):\n return other.__lt__(self)\n\n # 大于等于\n def __ge__(self, other):\n return other.__le__(self)\n\n def share(self):\n \"\"\"Increase the share of this connection.\"\"\"\n self.shared += 1\n\n def unshare(self):\n \"\"\"Decrease the share of this connection.\"\"\"\n self.shared -= 1\n\n\nclass PooledSharedDBConnection:\n def __init__(self, pool, shared_con):\n \"\"\"\n pool: the corresponding PooledDB instance\n con: the underlying SharedDBConnection\n \"\"\"\n # basic initialization to make finalizer work\n self._con = None\n # proper initialization of the connection\n con = shared_con.con\n if not con.threadsafety() > 1:\n raise NotSupportedError(\"Database connection is not thread-safe.\")\n self._pool = pool\n self._shared_con = shared_con\n self._con = con\n\n def close(self):\n \"\"\"Close the pooled shared connection.\"\"\"\n # Instead of actually closing the connection,\n # unshare it and/or return it to the pool.\n if self._con:\n self._pool.unshare(self._shared_con)\n self._shared_con = self._con = None\n\n def __getattr__(self, name):\n \"\"\"Proxy all members of the class.\"\"\"\n if self._con:\n return getattr(self._con, name)\n else:\n raise InvalidConnection\n\n def __del__(self):\n \"\"\"Delete the pooled connection.\"\"\"\n try:\n self.close()\n except Exception:\n pass\n","sub_path":"evolution_03/DBUtils/PooledDB.py","file_name":"PooledDB.py","file_ext":"py","file_size_in_byte":20769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"275500723","text":"from datetime import date\n\n\ndef save_config(ssh, host_name, bkp_dir):\n today = date.today()\n config = ssh.send_command(\"show running-config\")\n\n file_name = host_name + '_' + today.strftime(\"%Y-%m-%d\") + \".txt\"\n\n with open(bkp_dir + '/' + file_name, 'w') as file:\n file.write(config)\n","sub_path":"functions/save_config.py","file_name":"save_config.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"340959926","text":"\"\"\"\n《邢不行-2019新版|Python股票量化投资课程》\nauthor: 邢不行/西蒙斯\n微信: xingbuxing0807\n\n# 课程内容\n- 原始数据介绍\n- 如何使用pandas导入数据\n- DataFrame数据结构介绍\n\"\"\"\nimport pandas as pd # 将pandas作为第三方库导入,我们一般为pandas取一个别名叫做pd\n\n# =====导入数据\ndf = pd.read_csv(\n # 该参数为数据在电脑中的路径,\n # 要注意字符串转义符号 \\ ,可以使用加r变为raw string或者每一个进行\\\\转义\n filepath_or_buffer=r'C:\\Users\\Simons\\Desktop\\xbx_stock_2019\\data\\sh600000.csv',\n # 编码格式,不同的文件有不同的编码方式,一般文件中有中文的,编码是gbk,默认是utf8\n # ** 大家不用去特意记住很多编码,我们常用的就是gbk和utf8,切换一下看一下程序不报错就好了\n encoding='gbk',\n # 该参数代表数据的分隔符,csv文件默认是逗号。其他常见的是'\\t'\n sep=',',\n # 该参数代表跳过数据文件的的第1行不读入\n skiprows=1,\n # nrows,只读取前n行数据,若不指定,读入全部的数据\n # nrows=15,\n # 将指定列的数据识别为日期格式。若不指定,时间数据将会以字符串形式读入。一开始先不用。\n parse_dates=['交易日期'],\n # 将指定列设置为index。若不指定,index默认为0, 1, 2, 3, 4...\n index_col=['交易日期'],\n # 读取指定的这几列数据,其他数据不读取。若不指定,读入全部列\n # usecols=['交易日期', '收盘价'],\n # 当某行数据有问题时,报错。设定为False时即不报错,直接跳过该行。当数据比较脏乱的时候用这个。\n # error_bad_lines=False,\n # 将数据中的null识别为空值\n # na_values='NULL',\n\n # 更多其他参数,请直接在搜索引擎搜索\"pandas read_csv\",要去逐个查看一下。比较重要的,header等\n)\n\nprint(df)\n\n# 使用read_csv导入数据非常方便\n\n# 导入的数据的数据类型是DataFrame。\n\n# 导入数据主要使用read系列函数\n# 还有read_table、read_excel、read_json等,他们的参数内容都是大同小异,可以自行搜索查看。\n","sub_path":"program/pandas基础/1_数据导入.py","file_name":"1_数据导入.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"178552876","text":"#로봇청소기\nimport sys\nfrom collections import deque\nR,C = map(int,sys.stdin.readline().split())\nX,Y,D = map(int,sys.stdin.readline().split())\nA = [list(map(int,sys.stdin.readline().split())) for _ in range(R)]\n\nvis = [[0 for _ in range(C)] for _ in range(R)]\nfor i in range(R):\n for j in range(C):\n if A[i][j] == 1:\n vis[i][j] = 1\n\nqueue = deque()\nqueue.append([X,Y,D])\nans = 0 \n\nD = [[-1,0],[0,1],[1,0],[0,-1]]\n\nwhile(queue):\n x,y,d = queue.popleft()\n flag = 0 \n if vis[x][y] == 0:\n vis[x][y] = 1\n ans += 1\n for _ in range(4):\n d -= 1\n if d == -1: d = 3\n xx = x + D[d][0]\n yy = y + D[d][1]\n # print([xx,yy,d])\n if xx >= R or xx < 0 or yy >= C or yy < 0: continue\n if vis[xx][yy] == 1: continue\n queue.append([xx,yy,d])\n flag = 1\n break\n if not flag:\n xx = x - D[d][0]\n yy = y - D[d][1]\n if xx >= R or xx < 0 or yy >= C or yy < 0: break\n if A[xx][yy] == 1: break\n queue.append([xx,yy,d])\n # print(ans)\n\nprint(ans)\n \n \n \n\n\n\n","sub_path":"대회,기출/삼성_로봇청소기.py","file_name":"삼성_로봇청소기.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"86065781","text":"from time import sleep\nfrom twisted.internet import protocol\nfrom twisted.internet import reactor, endpoints\nfrom twisted.web import server, resource\n\n\nclass Echo(protocol.Protocol):\n def dataReceived(self, data):\n print(\"收到客户端的消息\", bytes.decode(data))\n sleep(5)\n self.transport.write(data)\n print(\"返回消息给客户端\")\n\n\nclass EchoFactory(protocol.Factory):\n def buildProtocol(self, addr):\n print(addr)\n return Echo()\n\n\nclass Counter(resource.Resource):\n isLeaf = True\n numberRequests = 0\n\n def render_GET(self, request):\n print(\"收到客户端的请求\", request.client)\n self.numberRequests += 1\n request.setHeader(b\"content-type\", b\"text/plain\")\n request.setHeader(b\"Server\", b\"myserver\")\n request.setHeader(b\"Date\", b\"2018\")\n temp = request.content.read()\n data = bytes.decode(temp)\n len_data = (len(data))\n print(\"len_data:\",len_data)\n content = u\"I am request #{}\\n\".format(self.numberRequests)\n return content.encode(\"ascii\")\n def render_POST(self, request):\n print(\"收到post请求\")\n client = request.client\n pass\ndef start_echo_server():\n \"\"\"一个简单的echoserver\"\"\"\n endpoints.serverFromString(reactor, \"tcp:1234\").listen(EchoFactory())\n reactor.run()\n\n\ndef start_http_server():\n endpoints.serverFromString(reactor, \"tcp:8080\").listen(server.Site(Counter()))\n reactor.run()\n\n\ndef do_something(flag):\n print(flag)\n\n\ndef start_echo_http_server():\n \"\"\"同时开启多个服务\"\"\"\n endpoints.serverFromString(reactor, \"tcp:1234\").listen(EchoFactory())\n endpoints.serverFromString(reactor, \"tcp:8080\").listen(server.Site(Counter()))\n print(\"服务启动\")\n reactor.callLater(2, do_something, 2)\n reactor.callLater(3, do_something, 3)\n reactor.callLater(3, do_something, 3)\n reactor.run()\n\n\nif __name__ == '__main__':\n # start_echo_server()\n start_http_server()\n # start_echo_http_server()\n","sub_path":"twisted_test/servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"141783904","text":"import lbs_data_extractor as de\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\n\n'''\nResource:\nhttps://machinelearningmastery.com/how-to-choose-loss-functions-when-training-deep-learning-neural-networks/\n'''\n\nx_data , y_data = de.get_data('pisite.flat.all\\\\all')\n\nnum_instances = len(x_data)\n\nx_data = np.array(x_data)\ny_data = y_data\n\ntrain_test_cutoff = int(0.7*num_instances)\nx_train, y_train = x_data[:train_test_cutoff], {y: y_data[y][:train_test_cutoff] for y in y_data}\nx_test, y_test = x_data[train_test_cutoff:], {y: y_data[y][train_test_cutoff:] for y in y_data}\n\nx_train, x_test = x_train / 25.0, x_test / 25.0\nprint('Building model...')\ndef build_branch(inputs, numCategories, name, finalAct=\"softmax\"):\n\tx = inputs\n\tx = keras.layers.Flatten()(x)\n\tx = keras.layers.Dense(int(0.2 * x_data.shape[1] * x_data.shape[2]))(x)\n\tx = keras.layers.Activation(\"relu\")(x)\n\tx = keras.layers.Dense(numCategories)(x)\n\tx = keras.layers.Activation(finalAct,name=name)(x)\n\treturn x\n\n# model = keras.Sequential([\n# \tkeras.layers.Flatten(input_shape=(x_data.shape[1], x_data.shape[2])),\n# \tkeras.layers.Dense(int(0.2 * x_data.shape[1] * x_data.shape[2]), activation=\"relu\"),\n# \tkeras.layers.Dense(y_data.shape[1], activation=\"linear\")\n# \t])\n\nprint('Building model...')\ninputs = keras.layers.Input(shape=(x_data.shape[1], x_data.shape[2]))\n\nprint('Building model...')\nmodel = keras.models.Model(\n\tinputs=inputs,\n\toutputs=[build_branch(inputs, 2, \"act_\" + str(i)) for i in range(2000)])\nprint('Model built')\n\nmodel.compile(loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs=5)\n\ntest_loss, test_acc = model.evaluate(x_test, y_test)\n\nprint(\"Tested accuracy: \", test_acc)\nprint(model.summary())","sub_path":"lbs_nn.py","file_name":"lbs_nn.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"623323135","text":"from numpy import linspace\n\nclass Channel:\n def __init__(self):\n self.id = id(self)\n self.name = type(self).__name__\n self.parameters = []\n\n self._t_needs_refresh = True\n\n def __setattr__(self, name, value):\n super().__setattr__(name, value)\n\n if name in ['start_time', 'end_time', 'sample_depth']:\n self._t_needs_refresh = True\n\n @property\n def t(self):\n if self._t_needs_refresh:\n self._t = linspace(\n self.start_time,\n self.end_time,\n self.sample_depth)\n self._t_needs_refresh = False\n\n return self._t\n\n @property\n def data(self):\n try:\n y = self.y # y needs to be accessed first, because it may change t\n t = self.t\n except Exception as e:\n print(e)\n y = None\n t = None\n\n return t, y\n\n def get_parameter_by_name(self, name):\n return next(filter(lambda parameter: parameter.name == name, self.parameters))\n","sub_path":"backend/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"294658913","text":"import cv2\r\n\r\nwebcam = cv2.VideoCapture(0)\r\nwhile True:\r\n s,imagem = webcam.read()\r\n imagem = cv2.flip(imagem, 180)\r\n classificador = cv2.CascadeClassifier(\"recursos/haarcascade_frontalface_default.xml\")\r\n facesDetectadas = classificador.detectMultiScale(imagem)\r\n for (x, y, l, a) in facesDetectadas:\r\n cv2.rectangle(imagem, (x, y), (x + l, y + a), (0, 255, 0), 2)\r\n\r\n cv2.imshow(\"Detector haar\", imagem)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\nwebcam.release()\r\ncv2.destroyAllWindows()\r\n\r\n\r\n","sub_path":"facialDetectorHaar.py","file_name":"facialDetectorHaar.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"518539755","text":"import threading\nimport urllib\nimport time\nimport requests\nimport re\nfrom lxml import etree\nimport os\nfrom urllib import request\nimport bs4\nimport configparser\nimport json\n\n\nclass DAILI:\n def __init__(self, url):\n self.__URL = url\n\n def is_json(self, file):\n '''\n 判断是不是json\n '''\n try:\n json.loads(file)\n except:\n return False\n return True\n\n def str_json(self, data):\n data = json.loads(data)\n return data\n\n def get(self, url):\n '''\n get请求,不带参数\n '''\n try:\n response = request.urlopen(url)\n # print(\"查看 response 响应信息类型: \",type(response))\n page = response.read()\n page = page.decode('utf-8')\n # url解码\n data = parse.unquote(page)\n # print(\"data@@@@@@@@@@@@@@@@@@@@@@@@\",data)\n if self.is_json(data):\n data = self.str_json(data)\n return data\n except:\n return 'error'\n\n def gets(self, url, data):\n '''\n get请求,带参数\n '''\n try:\n # data = json.dumps(data)\n # print(data)\n data = bytes(urllib.parse.urlencode(data), encoding='utf8')\n response = request.urlopen(url=url, data=data)\n # print(\"查看 response 响应信息类型: \",type(response))\n page = response.read()\n page = page.decode('utf-8')\n # url解码\n data = parse.unquote(page)\n # print(\"data@@@@@@@@@@@@@@@@@@@@@@@@\",data)\n if self.is_json(data):\n data = self.str_json(data)\n return data\n except:\n return 'error'\n\n def post(self, url, data):\n '''\n post请求\n '''\n try:\n\n data_string = urllib.parse.urlencode(data)\n last_data = bytes(data_string, encoding='utf-8')\n response = urllib.request.urlopen(url, data=last_data)\n data = response.read().decode('utf-8')\n # print(response.read().decode('utf-8'))\n if self.is_json(data):\n data = self.str_json(data)\n return data\n except:\n print(\"接口有问题\")\n\n\ndef foldexist(abspath: str) -> None:\n \"\"\"\n 判断当前路径文件夹是否存在,如果不存在会自动创建\n \"\"\"\n if not os.path.exists(abspath):\n os.makedirs(abspath)\n# DATA=[]\n\n\ndef gaoni():\n foldexist('G:/CSDN_L/python/案例/国内高匿代理')\n while True:\n global A\n A = A+1\n with open('G:/CSDN_L/python/案例/国内高匿代理/log.txt', 'w') as file: # .txt可以不自己新建,代码会自动新建\n file.write(str(A))\n resp = requests.get('https://www.kuaidaili.com/free/inha/'+str(A)+'/')\n soup = bs4.BeautifulSoup(resp.text, 'lxml')\n elements = soup.select('#list>table>tbody>tr')\n data = {}\n for element in elements:\n ip = element.select('td')[0].string\n port = element.select('td')[1].string\n niming = element.select('td')[2].string\n leixing = element.select('td')[3].string\n weizhi = element.select('td')[4].string\n xiangyingsudu = element.select('td')[5].string\n zuihouyanzhengshijian = element.select('td')[6].string\n data[\"ip\"] = ip\n data[\"port\"] = port\n data[\"匿名度\"] = niming\n data[\"类型\"] = leixing\n data[\"位置\"] = weizhi\n data[\"响应速度\"] = xiangyingsudu\n data[\"最后验证时间\"] = zuihouyanzhengshijian\n # DATA.append(data)\n bbb(data)\n data = {}\n print(\"开始第%d次爬取\" % (A))\n time.sleep(1)\n\n\ndef bbb(data):\n with open(\"G:/CSDN_L/python/案例/国内高匿代理/ip.txt\", \"a\") as file:\n file.write(str(data)+\"\\n\")\n\n\ndef rini(data, data1, data2):\n\n abspath = \"G:/CSDN_L/python/py必备模块整理100/log.ini\"\n # if not os.path.exists(abspath):\n # with open(abspath,\"a\") as file:\n # pass\n # else:\n # pass\n\n config = configparser.ConfigParser()\n config.read(abspath)\n try:\n config.add_section(data)\n config.set(data, data1, data2)\n # config.set(\"School\",\"Mask\",\"255.255.255.0\")\n # config.set(\"School\",\"Gateway\",\"10.15.40.1\")\n # config.set(\"School\",\"DNS\",\"211.82.96.1\")\n except configparser.DuplicateSectionError:\n print(\"Section 'School' already exists\")\n\n\ndef aaa():\n DATA = []\n with open('G:/CSDN_L/python/py必备模块整理100/log.ini', 'w') as file: # .txt可以不自己新建,代码会自动新建\n file.write('')\n url = 'https://blog.csdn.net/jiahaoangle/article/details/102740223?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522160566879919724836716796%2522%252C%2522scm%2522%253A%252220140713.130102334.pc%255Fall.%2522%257D&request_id=160566879919724836716796&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~all~first_rank_v2~rank_v28-1-102740223.pc_first_rank_v2_rank_v28&utm_term=python%E5%BF%85%E5%A4%87%E6%A8%A1%E5%9D%97%E6%95%B4%E7%90%86100&spm=1018.2118.3001.4449'\n\n data = request.urlopen((url)).read() # 读取url响应结果\n data = data.decode('utf-8') # 将响应结果用utf8编码\n\n soup = bs4.BeautifulSoup(data, 'lxml')\n\n elements = soup.select('#content_views>table>tbody')\n # print(\"el\",elements)\n\n for element in elements:\n el = element.select('tr')\n for i in el:\n el_data = []\n iii = i.select('td')\n\n for j in iii:\n el_data.append(j.string)\n\n DATA.append(el_data)\n\n # print(iii)\n\n # print(\"el_data\",el_data)\n print('------------------')\n #print(\"DATA\", DATA)\n\n for k in DATA:\n print(k[1])\n print(type(k[1]))\n # k2=json.dumps(k[2])\n # print(k2)\n k3=str(k[1]).encode('utf8')\n # rini(k[1],k[2],k[3])\n print('------------------1')\n print(k3)\n\n # print(m)\n # rini('')\n\n # print(\"DATA:\",DATA)\n # print(el.string)\n\n\n # ip = element.select('td')[0].string\n # port = element.select('td')[1].string\n # niming=element.select('td')[2].string\n # leixing=element.select('td')[3].string\n # weizhi=element.select('td')[4].string\n # xiangyingsudu=element.select('td')[5].string\n # zuihouyanzhengshijian=element.select('td')[6].string\n # data[\"ip\"]=ip\n # data[\"port\"]=port\n # data[\"匿名度\"]=niming\n # data[\"类型\"]=leixing\n # data[\"位置\"]=weizhi\n # data[\"响应速度\"]=xiangyingsudu\n # data[\"最后验证时间\"]=zuihouyanzhengshijian\n # #DATA.append(data)\n # bbb(data)\n # data={}\n # print(\"开始第%d次爬取\"%(A))\n\n\n\n\ndef main():\n aaa()\nif __name__ == \"__main__\":\n main()\n","sub_path":"py必备模块整理100/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"533710110","text":"import requests\nfrom dbtools import query\nfrom filetools import read_file\nfrom filetools import write_file\n\n#用户修改文章接口\n# 1.构造请求\n# nt = read_file(\"./user_token.txt\")\n# iid = read_file(\"./aiticle_id.txt\")\nu = \"http://118.24.105.78:2333/article/update\" # 接口地址\n# h = {\"Content-Type\":\"application/json\", \"token\":nt} # 请求头\nh = {\"Content-Type\":\"application/json\",\"token\":\"{}\".format(read_file(\"./user_token.txt\"))} \nd ={\"title\":\"楞个干哈子嘛\", \"content\":\"风中\", \"tags\":\"测测测测测\", \"brief\":\"没得\", \"ximg\":\"dsfsdf.jpg\", \n\"aid\":'{}'.format(read_file(\"./aiticle_id.txt\")) } # 请求参数\nr = requests.post(url=u,headers=h,json=d) # r 是返回值\nprint(r.text) # r.text:响应值\n\n\n# # 2.判断结果\n# print( r.status_code)\nassert r.status_code == 200 # 判断状态码, 获取本次响应的状态码是否等于200\n# print(r.json()) # r.json():把返回值 r 转换成字典\nassert r.json()[\"status\"] == 200 # 判断结果码\n\n# 3.数据库查询\n\n# print(iid)\nsql = \"select * from t_article where id = '{}'\".format(read_file(\"./aiticle_id.txt\"))\nassert len(query(sql)) !=0 # 如果账号存在 > sql应该是有结果的 > query(sql)长度 != 0\nprint(\"修改文章接口测试用例执行通过!\")\nprint(query(sql))\n\n","sub_path":"pythonTest/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"379869404","text":"instructions = []\nwith open('input/day12.txt') as f:\n for line in f:\n direction = line[0]\n value = int(line.rstrip('\\n')[1:])\n instructions.append([direction, value])\n\nclass Ship:\n def __init__(self, position, waypoint, heading):\n self.position = position\n self.waypoint = waypoint\n self.heading = heading\n\n def move(self, direction, value, ship):\n if ship:\n pos = self.position\n else:\n pos = self.waypoint\n if direction in ['N', 0]:\n pos[1] += value\n elif direction in ['E', 90]:\n pos[0] += value\n elif direction in ['S', 180]:\n pos[1] -= value\n elif direction in ['W', 270]:\n pos[0] -= value\n\n def moveToWp(self, value):\n self.position = [self.position[0] + value * self.waypoint[0], self.position[1] + value * self.waypoint[1]]\n\n def turn(self, direction, value):\n if direction == 'L':\n self.heading = (self.heading - value) % 360\n elif direction == 'R':\n self.heading = (self.heading + value) % 360\n\n def rotate(self, direction, value):\n waypoint = self.waypoint.copy()\n if (direction == 'L' and value == 90) or (direction == 'R' and value == 270):\n self.waypoint[0] = 0 - waypoint[1]\n self.waypoint[1] = waypoint[0]\n elif (direction == 'L' and value == 270) or (direction == 'R' and value == 90):\n self.waypoint[0] = waypoint[1]\n self.waypoint[1] = 0 - waypoint[0]\n else:\n self.waypoint[0] = 0 - waypoint[0]\n self.waypoint[1] = 0 - waypoint[1]\n\n def instruct(self, instruction, partOne):\n if instruction[0] in 'NESW':\n self.move(instruction[0], instruction[1], partOne)\n elif instruction[0] in 'LR':\n if partOne:\n self.turn(instruction[0], instruction[1])\n else:\n self.rotate(instruction[0], instruction[1])\n elif instruction[0] == 'F':\n if partOne:\n self.move(self.heading, instruction[1], True)\n else:\n self.moveToWp(instruction[1])\n\n# Part One\nship = Ship([0, 0], [0, 0], 90)\nfor instruction in instructions:\n ship.instruct(instruction, True)\nprint(abs(ship.position[0]) + abs(ship.position[1]))\n\n# Part Two\nship = Ship([0, 0], [10, 1], 0)\nfor instruction in instructions:\n ship.instruct(instruction, False)\nprint(abs(ship.position[0]) + abs(ship.position[1]))","sub_path":"day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"175185841","text":"import numpy as np\nfrom sklearn.metrics import accuracy_score\nimport multiprocessing as mp\n\nimport sys, os\nsys.path.append(os.getcwd())\n\ntry:\n from quadboost.weak_learner import _WeakLearnerBase\n from quadboost.utils import split_int, timed, ComparableMixin\n from quadboost.utils.multiprocessing_utils import PicklableExceptionWrapper, SafeQueue, parallel_processes\nexcept ModuleNotFoundError:\n from weak_learner import _WeakLearnerBase\n from utils import split_int, timed, ComparableMixin\n from utils.multiprocessing_utils import PicklableExceptionWrapper, SafeQueue, parallel_processes\n\n\nclass MulticlassDecisionStump(_WeakLearnerBase):\n \"\"\"\n Decision stump classifier with innate multiclass algorithm.\n It finds a stump to partition examples into 2 parts which minimizes the quadratic multiclass risk.\n It assigns a confidence rates (scalar) for each class for each partition.\n Parallelization is implemented for the 'fit' method.\n \"\"\"\n def fit(self, X, Y, W=None, n_jobs=1, sorted_X=None, sorted_X_idx=None):\n \"\"\"\n Fits the model by finding the best decision stump using the algorithm implemented in the StumpFinder class.\n\n Args:\n X (Array of shape (n_examples, ...)): Examples\n Y (Array of shape (n_examples,) or (n_examples, n_classes)): Labels for the examples. If an encoder was provided at construction, Y should be a vector to be encoded.\n W (Array of shape (n_examples, n_classes)): Weights of each examples according to their class. Should be None if Y is not encoded.\n n_jobs (int, optional, default=1): Number of processes to execute in parallel to find the stump.\n sorted_X (Array of shape (n_examples, ...), optional, default=None): Sorted examples along axis 0. If None, 'X' will be sorted, else it will not.\n sorted_X_idx (Array of shape (n_examples, ...), optional, default=None): Indices of the sorted examples along axis 0 (corresponds to argsort). If None, 'X' will be argsorted, else it will not.\n\n Returns self\n \"\"\"\n if self.encoder is not None:\n Y, W = self.encoder.encode_labels(Y)\n if sorted_X is None or sorted_X_idx is None:\n sorted_X, sorted_X_idx = self.sort_data(X)\n\n stump = self.find_stump(sorted_X, sorted_X_idx, Y, W, n_jobs)\n\n for attr in ['feature', 'confidence_rates', 'stump', 'stump_idx', 'risks', 'risk']:\n setattr(self, attr, getattr(stump, attr))\n\n return self\n\n def find_stump(self, sorted_X, sorted_X_idx, Y, W, n_jobs):\n stump_finder = StumpFinder(sorted_X, sorted_X_idx, Y, W)\n stumps_queue = SafeQueue()\n\n if n_jobs > 1: # Need parallelization\n n_features = sorted_X.shape[1]\n args_iter = ((stumps_queue, sub_idx) for sub_idx in split_int(n_features, n_jobs))\n parallel_processes(stump_finder.safe_find_stump, args_iter)\n else: # No parallelization\n stump_finder.find_stump(stumps_queue)\n\n return min(stump for stump in stumps_queue)\n\n def predict(self, X):\n n_partitions, n_classes = self.confidence_rates.shape\n n_examples = X.shape[0]\n Y_pred = np.zeros((n_examples, n_classes))\n for i, partition in enumerate(self.partition_generator(X)):\n Y_pred[i] = self.confidence_rates[partition]\n return Y_pred\n\n def partition_generator(self, X):\n \"\"\"\n Partition examples into 2 sets denoted by 0 and 1 in an lazy iterator fashion.\n \"\"\"\n n_examples = X.shape[0]\n for x in X.reshape((n_examples, -1)):\n yield int(x[self.feature] > self.stump)\n\n def partition(self, X, dtype=bool):\n return np.array([p for p in self.partition_generator(X)], dtype=dtype)\n\n @staticmethod\n def sort_data(X):\n \"\"\"\n Necessary sorting operations on the data to find the optimal stump. It is useful to sort the data prior to boost to speed up the algorithm, since the sorting step will not be made at each round.\n\n 'sorted_X' and 'sorted_X_idx' should be passed as keyword arguments to the 'fit' method to speed up the algorithm.\n \"\"\"\n X = X.reshape((X.shape[0],-1))\n n_examples, n_features = X.shape\n sorted_X_idx = np.argsort(X, axis=0)\n sorted_X = X[sorted_X_idx, range(n_features)]\n\n return sorted_X, sorted_X_idx\n\n\nclass StumpFinder:\n \"\"\"\n Implements the algorithm to find the stump. It is separated from the class MulticlassDecisionStump so that it can be pickled when parallelized with 'multiprocessing' (which uses pickle).\n \"\"\"\n def __init__(self, sorted_X, sorted_X_idx, Y, W):\n\n # multiprocessing Arrays are shared between processed to alleviate pickling\n self.sorted_X = np.ctypeslib.as_array(mp.RawArray('d', sorted_X.size)).reshape(sorted_X.shape)\n self.sorted_X[:] = sorted_X\n self.sorted_X_idx = np.ctypeslib.as_array(mp.RawArray('i', sorted_X_idx.size)).reshape(sorted_X_idx.shape)\n self.sorted_X_idx[:] = sorted_X_idx\n\n self.zeroth_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)\n self.zeroth_moments[:] = W\n self.first_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)\n self.first_moments[:] = W*Y\n self.second_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)\n self.second_moments[:] = self.first_moments*Y\n\n # # multiprocessing Arrays are shared between processed to alleviate pickling\n # self.X_shape = sorted_X.shape\n # self.X_idx_shape = sorted_X_idx.shape\n # self.moments_shape = W.shape\n # self.sorted_X = mp.Array('d', sorted_X.reshape(-1))\n # self.sorted_X_idx = mp.Array('i', sorted_X_idx.reshape(-1))\n\n # self.zeroth_moments = mp.Array('d', W.reshape(-1))\n # self.first_moments = mp.Array('d', (W*Y).reshape(-1))\n # self.second_moments = mp.Array('d', (W*Y*Y).reshape(-1))\n\n def safe_find_stump(self, stumps_queue, sub_idx=(None,)):\n \"\"\"\n Handles exception raised in a subprocess so the script will not hang indefinitely.\n\n This is basically a decorator for find_stump, but parallelizing requires pickling, and decorators cannot be pickled.\n \"\"\"\n with stumps_queue: # Context manager handles exceptions\n self.find_stump(stumps_queue, sub_idx)\n\n def find_stump(self, stumps_queue, sub_idx=(None,)):\n \"\"\"\n Algorithm to the best stump within the sub array of X specified by the bounds 'sub_idx'.\n \"\"\"\n X = self.sorted_X[:,slice(*sub_idx)]\n X_idx = self.sorted_X_idx[:,slice(*sub_idx)]\n\n _, n_classes = self.zeroth_moments.shape\n n_examples, n_features = X.shape\n n_partitions = 2\n n_moments = 3\n\n moments = np.zeros((n_moments, n_partitions, n_features, n_classes))\n\n # At first, all examples are in partition 1\n # Moments are not normalized so they can be computed cumulatively\n moments[0,1] = np.sum(self.zeroth_moments[X_idx[:,0]], axis=0)\n moments[1,1] = np.sum(self.first_moments[X_idx[:,0]], axis=0)\n moments[2,1] = np.sum(self.second_moments[X_idx[:,0]], axis=0)\n\n risks = self.compute_risks(moments) # Shape (n_partitions, n_features)\n best_stump = Stump(risks, moments)\n\n for i, row in enumerate(X_idx[:-1]):\n self.update_moments(moments, row)\n possible_stumps = ~np.isclose(X[i+1] - X[i], 0)\n\n if possible_stumps.any():\n risk = self.compute_risks(moments[:,:,possible_stumps,:])\n best_stump.update(risk, moments, possible_stumps, stump_idx=i+1)\n\n best_stump.compute_stump_value(X)\n best_stump.feature += sub_idx[0] if sub_idx[0] is not None else 0\n stumps_queue.append(best_stump)\n\n def update_moments(self, moments, row_idx):\n moments_update = np.array([self.zeroth_moments[row_idx],\n self.first_moments[row_idx],\n self.second_moments[row_idx]])\n moments[:,0] += moments_update\n moments[:,1] -= moments_update\n\n def compute_risks(self, moments):\n \"\"\"\n Computes the risks for each partitions for every features.\n \"\"\"\n moments[np.isclose(moments,0)] = 0\n with np.errstate(divide='ignore', invalid='ignore'):\n # We could use\n # np.divide(moments[1]**2, moments[0], where=~np.isclose(moments[0]))\n # However, the buffer size is not big enough for several examples and the resulting division is not done correctly\n normalized_m1 = np.nan_to_num(moments[1]**2/moments[0])\n risks = np.sum(moments[2] - normalized_m1, axis=2) # Shape (n_partitions, n_features)\n return risks\n\n\nclass Stump(ComparableMixin, cmp_attr='risk'):\n \"\"\"\n Stump is a simple class that stores the variables used by the MulticlassDecisionStump algorithm. It provides a method 'update' that changes the values only if the new stump is better than the previous one. Easy comparison between the stumps is provided with the ComparableMixin parent class, which is useful to determine the best stump among many.\n \"\"\"\n def __init__(self, risks, moments):\n super().__init__()\n risk = np.sum(risks, axis=0)\n self.feature = risk.argmin()\n self.risks = risks[:,self.feature]\n self.stump_idx = 0\n self.moment_0 = moments[0,:,self.feature,:].copy()\n self.moment_1 = moments[1,:,self.feature,:].copy()\n\n @property\n def risk(self):\n return np.sum(self.risks)\n\n def update(self, risks, moments, possible_stumps, stump_idx):\n \"\"\"\n Updates the current stump with the new stumps only if the new risk is lower than the previous one.\n\n To optimize the algorithm, the risks are computed only for the acceptable stumps, which happen to be represented as the non zero entries of the variable 'possible_stumps'.\n \"\"\"\n risk = np.sum(risks, axis=0)\n sparse_feature_idx = risk.argmin()\n if risk[sparse_feature_idx] < self.risk:\n self.feature = possible_stumps.nonzero()[0][sparse_feature_idx] # Retrieves the actual index of the feature\n self.risks = risks[:,sparse_feature_idx]\n self.moment_0 = moments[0,:,self.feature,:].copy()\n self.moment_1 = moments[1,:,self.feature,:].copy()\n self.stump_idx = stump_idx\n\n @property\n def confidence_rates(self):\n return np.divide(self.moment_1, self.moment_0, where=self.moment_0!=0)\n\n def compute_stump_value(self, sorted_X):\n feat_val = lambda idx: sorted_X[idx, self.feature]\n if self.stump_idx != 0:\n self.stump = (feat_val(self.stump_idx) + feat_val(self.stump_idx-1))/2\n else:\n self.stump = feat_val(self.stump_idx) - 1\n return self.stump\n\n\n@timed\ndef main():\n mnist = MNISTDataset.load()\n (Xtr, Ytr), (Xts, Yts) = mnist.get_train_test(center=False, reduce=False)\n\n # encoder = LabelEncoder.load_encodings('js_without_0', convert_to_int=True)\n # encoder = LabelEncoder.load_encodings('mario')\n encoder = OneHotEncoder(Ytr)\n # encoder = AllPairsEncoder(Ytr)\n\n m = 6_0\n X = Xtr[:m].reshape((m,-1))\n Y = Ytr[:m]\n # X, Y = Xtr, Ytr\n wl = MulticlassDecisionStump(encoder=encoder)\n sorted_X, sorted_X_idx = wl.sort_data(X)\n wl.fit(X, Y, n_jobs=1, sorted_X=sorted_X, sorted_X_idx=sorted_X_idx)\n print('WL train acc:', wl.evaluate(X, Y))\n # print('WL test acc:', wl.evaluate(Xts, Yts))\n\n\nif __name__ == '__main__':\n from quadboost.datasets import MNISTDataset\n from quadboost.label_encoder import *\n # import cProfile\n # cProfile.run('main()', sort='tottime')\n main()\n","sub_path":"quadboost/weak_learner/decision_stump.py","file_name":"decision_stump.py","file_ext":"py","file_size_in_byte":11851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"478283595","text":"import numpy as np\nimport cv2\nfrom mss import mss\nimport pyautogui\n\nregion_of_interest = {\n\n 'left': 688,\n 'top': 371,\n 'width': 88,\n 'height': 40\n}\nk = 1\n\nwhile True:\n monitor = mss().grab(region_of_interest) # Capture region of interest\n monitor = np.array(monitor) # convert image into numpy array\n\n # [red,green,blue]\n cactus = monitor[28, :, 0] # storing red channel(x values) of cactus [row,column,channel] from 30th pixel of y\n bird = monitor[1, :, 0] # storing red channel(x values) of bird from 1st pixel\n\n cactus_sum = np.sum(cactus)\n bird_sum = np.sum(bird)\n print(cactus_sum, 'cactus')\n print(bird_sum, 'bird')\n\n if cactus_sum < 21736:\n pyautogui.press('up')\n\n if bird_sum < 21736:\n pyautogui.keyDown('down')\n k = 1\n\n if bird_sum == 21736 and k == 1:\n pyautogui.keyUp('down')\n k = 0\n\n\n cv2.imshow('trex', monitor)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n","sub_path":"trex.py","file_name":"trex.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"128147639","text":"from scrapy.crawler import Crawler\nfrom scrapy.conf import settings\nfrom olx_parcer.spiders.olx import OlxSpider\nfrom scrapy import log, project\nfrom twisted.internet import reactor\nfrom billiard import Process\nfrom scrapy.utils.project import get_project_settings\n\nclass UrlCrawlerScript(Process):\n def __init__(self, spider):\n Process.__init__(self)\n settings = get_project_settings()\n self.crawler = Crawler(settings)\n self.crawler.configure()\n self.crawler.signals.connect(reactor.stop, signal=signals.spider_closed)\n self.spider = spider\n\n def run(self):\n self.crawler.crawl(self.spider)\n self.crawler.start()\n reactor.run()\n\ndef run_spider(url):\n spider = OlxSpider(url)\n crawler = UrlCrawlerScript(spider)\n crawler.start()\n crawler.join()","sub_path":"olx_site/olx_app/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"328337754","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport asyncio\nimport aiohttp\n\nfrom django.test import TestCase\nfrom melinoe.app.utils import parse, async_parse, local_skip\n\nclient = aiohttp.ClientSession()\n\n\nclass PocketTest(TestCase):\n\n @local_skip\n def test_parse(self):\n valid_url = \"https://mercury.postlight.com/web-parser/\"\n self.assertTrue(parse(valid_url) is not None)\n\n invalid_url = \"htt://mercury.postlight.com/web-parser/\"\n self.assertTrue(parse(invalid_url) is None)\n\n @local_skip\n def test_async_parse(self):\n loop = asyncio.get_event_loop()\n valid_url = \"https://mercury.postlight.com/web-parser/\"\n fut = asyncio.ensure_future(\n async_parse(client, valid_url)\n )\n data = loop.run_until_complete(fut)\n self.assertTrue(data is not None)\n\n invalid_url = \"htt://mercury.postlight.com/web-parser/\"\n fut = asyncio.ensure_future(\n async_parse(client, invalid_url)\n )\n invalid_data = loop.run_until_complete(fut)\n self.assertTrue(invalid_data is None)\n","sub_path":"melinoe/app/tests/test_mercury.py","file_name":"test_mercury.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"298299276","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ncalculate similarity between repeats\n\"\"\"\nimport sys\nimport os\nfrom collections import OrderedDict\n\n\ndef align_lis_lis(lis_lis):\n \"\"\"align and nested list to print a table\"\"\"\n lis_lis = [[str(l) for l in lis]\n for lis in lis_lis] # trans every element to str\n # make all inner lists of the same length\n inner_lis_max_len = max(len(lis) for lis in lis_lis)\n lis_lis = [lis + (inner_lis_max_len - len(lis)) * [''] for lis in lis_lis]\n # trans list, so that the elements of the same column are in one list\n lis_lis = [[lis[i] for lis in lis_lis] for i in range(inner_lis_max_len)]\n # make element in the same list have the same length\n aligned = []\n for lis in lis_lis:\n width = max([len(l) for l in lis])\n lis = [l + (width - len(l)) * ' ' for l in lis]\n aligned.append(lis)\n # trans list_list to the original list_list\n inner_lis_max_len = max(len(lis) for lis in lis_lis)\n lis_lis = [[lis[i] for lis in aligned] for i in range(inner_lis_max_len)]\n return lis_lis\n\n\ndef align(seq1, seq2):\n from Bio import pairwise2\n from Bio.SubsMat import MatrixInfo as matlist\n matrix = matlist.blosum62\n gap_open = -10 # usual value\n gap_extend = -0.5 # usual value\n\n alns = pairwise2.align.globalds(seq1, seq2, matrix, gap_open, gap_extend)\n\n seq1 = alns[0][0]\n seq2 = alns[0][1]\n identity = [1 for i, s in enumerate(seq1) if s == seq2[i]]\n identity = 1.0 * len(identity) / len(seq1)\n return float('{0:<4.2f}'.format(identity))\n\n\ndef read_repeats(repeat_f):\n with open(repeat_f) as o_f:\n lines = o_f.readlines()\n lines = [line.rstrip('\\r\\n') for line in lines]\n lines = [line for line in lines if line]\n pro_repeats = []\n for line in lines:\n words = line.split(':')\n pro = words[0].split('.')[0]\n repeats = words[1].split()\n repeats = [''.join(r.split('-')) for r in repeats]\n pro_repeats.append((pro, repeats))\n return pro_repeats\n\n\ndef get_similarity(pro_repeats):\n pro_repeats_similarity = []\n\n len_repeats = len(pro_repeats[0][1])\n title = ['id']\n for i in range(len_repeats):\n for j in range(len_repeats):\n if j > i:\n title.append('r'+str(i+1) + '_' + str(j+1))\n pro_repeats_similarity.append(title)\n\n for pro, repeats in pro_repeats:\n similarity = []\n for i in range(len_repeats):\n for j in range(len_repeats):\n if j > i:\n sim = align(repeats[i], repeats[j])\n similarity.append(sim)\n pro_repeats_similarity.append([pro] + similarity)\n return pro_repeats_similarity\n\n\ndef main():\n repeat_f = sys.argv[-1]\n pro_repeats = read_repeats(repeat_f)\n pro_repeats_similarity = get_similarity(pro_repeats)\n\n trans_pro_repeats_similarity = [\n [lis[i] for lis in pro_repeats_similarity] for i in range(len(pro_repeats_similarity[0]))]\n trans_pro_repeats_similarity = align_lis_lis(trans_pro_repeats_similarity)\n with open('similarity_trans.txt', 'w') as w_f:\n for pro_similarity in trans_pro_repeats_similarity:\n print >> w_f, ' '.join(pro_similarity)\n\n pro_repeats_similarity = align_lis_lis(pro_repeats_similarity)\n with open('similarity.txt', 'w') as w_f:\n for pro_similarity in pro_repeats_similarity:\n print >> w_f, ' '.join(pro_similarity)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ancestor_sequence/ancestor-repeats-similarity.py","file_name":"ancestor-repeats-similarity.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315166536","text":"import re\r\nimport collections\r\nimport sys\r\nimport math\r\nimport time\r\nimport random\r\nimport getopt\r\n\r\nsegted_text = []\r\nsegted_text_fs = {}\r\n\r\nSegment = collections.namedtuple('Segment', ['seg', 'ismorph',\r\n 'count', 'split'])\r\n\r\nsum_updates = 0\r\nsum_newcost = 0\r\nsum_restore = 0\r\ncount_loops = 0\r\n\r\ndef usage():\r\n print('--help --input=filepath --output=flepath --verbose=digit --mode=sera')\r\n\r\ndef main(argv):\r\n try:\r\n opts, args = getopt.getopt(argv, \"hi:o:v:m:c:\",\r\n [\"help\", \"input=\", \"output=\", \"verbose=\", \"mode=\"])\r\n except getopt.GetoptError:\r\n usage()\r\n sys.exit(2)\r\n\r\n filename_in = ' '\r\n filename_out = ' '\r\n mode = 'sera'\r\n verbose = 0\r\n wc = 1000\r\n\r\n for opt, arg in opts:\r\n if opt in ('-h', '--help'):\r\n usage()\r\n sys.exit()\r\n elif opt in ('-i', '--input'):\r\n filename_in = arg\r\n elif opt in ('-o', '--output'):\r\n filename_out = arg\r\n elif opt in ('-v', '--verbose'):\r\n verbose = int(arg)\r\n elif opt in ('-m', '--mode'):\r\n mode = arg\r\n elif opt == '-c':\r\n wc = int(arg)\r\n\r\n if filename_in == ' ' or filename_out == ' ':\r\n print('Input and output files must be supplied.')\r\n usage()\r\n sys.exit(1)\r\n\r\n reader = Reader_am(filename_in, mode)\r\n text = reader.get_text()\r\n text = list(set(text))\r\n random.shuffle(text)\r\n text = text[:wc]\r\n\r\n analyzer = Analyzer(text, filename_out)\r\n analyzer.initialize_segments()\r\n analyzer.analyze(verbose)\r\n analyzer.write_out_as_list()\r\n \r\n##def main():\r\n## reader = Reader_am('//ATKK/home/e/Eetusjob/Desktop/' +\r\n## 'yo/compmorpho/projectNew/wic-tagged.txt',\r\n## 'sera')\r\n## text = reader.get_text()\r\n## text = list(set(text))\r\n## random.shuffle(text)\r\n## text = text[:1000]\r\n##\r\n## analyzer = Analyzer(text, '//ATKK/home/e/Eetusjob/Desktop/yo/' +\r\n## 'compmorpho/projectFinal/segmented_as_list.txt')\r\n## analyzer.initialize_segments()\r\n## \r\n## analyzer.analyze(100)\r\n## analyzer.write_out_as_list()\r\n\r\n\r\n\r\nclass Analyzer():\r\n\r\n def get_bow(self):\r\n return self.__text_as_bag_of_words\r\n\r\n def get_segs(self):\r\n return self.__segments\r\n \r\n def __init__(self, text_as_list, filepath_out):\r\n self.__text = text_as_list\r\n self.__text_as_bag_of_words = {}\r\n self.__segments = {}\r\n self.__total_tokens = 0\r\n self.__filepath_out = filepath_out\r\n try:\r\n self.__file_out = open(filepath_out, \"w\")\r\n except:\r\n print(\"Error trying to open output file. Terminating process.\")\r\n sys.exit(1)\r\n\r\n\r\n def initialize_segments(self):\r\n for word in self.__text:\r\n self.__total_tokens += 1\r\n self.increase_count(word, ismorph=True)\r\n \r\n if word in self.__text_as_bag_of_words.keys():\r\n self.__text_as_bag_of_words[word] += 1\r\n else:\r\n self.__text_as_bag_of_words[word] = 1\r\n\r\n\r\n def analyze(self, verbose):\r\n amount_analyzed = 0\r\n start_analysis = time.time()\r\n start_chunk = time.time()\r\n\r\n if verbose > 0:\r\n for word in list(set(self.__text)):\r\n self.find_optimal_split(word)\r\n\r\n amount_analyzed += 1\r\n if amount_analyzed % verbose == 0:\r\n print('%d/%d analyzed' % (amount_analyzed, len(set(self.__text))))\r\n print('Time for last %d: %d seconds' % (verbose, (time.time() - start_chunk)))\r\n print('Total time elapsed: %d minutes %d seconds\\n' % \\\r\n (((time.time() - start_analysis)/60),\r\n ((time.time() - start_analysis)%60)))\r\n start_chunk = time.time()\r\n else:\r\n for word in self.__text:\r\n self.find_optimal_split(word)\r\n\r\n total_time_elapsed = time.time() - start_analysis \r\n print('Analyzed %d words.' % len(self.__text))\r\n print('Total time: %d minutes and %d seconds.' % \\\r\n ((total_time_elapsed/60),\r\n (total_time_elapsed%60)))\r\n\r\n\r\n def find_optimal_split(self, segment):\r\n global sum_updates\r\n global sum_newcost\r\n global sum_restore\r\n global count_loops\r\n\r\n if len(segment) == 1 or self.__segments[segment].split != 0:\r\n return\r\n\r\n optimal_cost = self.total_cost()\r\n optimal_split = 0\r\n segment_count = self.__segments[segment].count\r\n\r\n for split_position in range(1, len(segment)):\r\n count_loops += 1 # !!!!!!!!!\r\n\r\n tupdates = time.time() # !!!!!!!!!\r\n\r\n left = segment[:split_position]\r\n right = segment[split_position:]\r\n\r\n self.update_segments(segment, segment_count, split_position,\r\n left, right, False)\r\n\r\n sum_updates += (time.time() - tupdates) # !!!!!!!!!!\r\n \r\n tcost = time.time() # !!!!!!!!!!\r\n new_cost = self.total_cost()\r\n \r\n if new_cost < optimal_cost:\r\n optimal_cost = new_cost\r\n optimal_split = split_position\r\n\r\n sum_newcost += (time.time() - tcost) # !!!!!!!!!!!\r\n\r\n trestore = time.time() # !!!!!!!!!!\r\n self.restore_segments(segment, segment_count, left, right)\r\n sum_restore += (time.time() - trestore) # !!!!!!!!!!\r\n\r\n if optimal_split > 0:\r\n left = segment[:optimal_split]\r\n right = segment[optimal_split:]\r\n \r\n self.update_segments(segment, segment_count, optimal_split,\r\n left, right, True)\r\n \r\n self.find_optimal_split(left)\r\n self.find_optimal_split(right)\r\n\r\n def update_segments(self, segment, segment_count, split_position,\r\n left, right, true_morphs):\r\n self.increase_count(left, segment_count)\r\n self.increase_count(right, segment_count)\r\n \r\n if left in self.__text_as_bag_of_words.keys():\r\n self.__text_as_bag_of_words[left] += segment_count\r\n else:\r\n self.__text_as_bag_of_words[left] = segment_count\r\n\r\n if right in self.__text_as_bag_of_words.keys():\r\n self.__text_as_bag_of_words[right] += segment_count\r\n else:\r\n self.__text_as_bag_of_words[right] = segment_count\r\n\r\n self.__total_tokens += segment_count\r\n\r\n self.__segments[segment] = \\\r\n self.__segments[segment]._replace(count = 0, split = split_position)\r\n\r\n del self.__text_as_bag_of_words[segment]\r\n\r\n if true_morphs == True:\r\n self.__segments[left] = \\\r\n self.__segments[left]._replace(ismorph=True)\r\n self.__segments[right] = \\\r\n self.__segments[right]._replace(ismorph=True)\r\n\r\n\r\n def restore_segments(self, segment, segment_count, left, right):\r\n self.decrease_count(left, segment_count)\r\n self.decrease_count(right, segment_count)\r\n \r\n self.__text_as_bag_of_words[left] -= segment_count\r\n if self.__text_as_bag_of_words[left] <= 0:\r\n del self.__text_as_bag_of_words[left]\r\n \r\n self.__text_as_bag_of_words[right] -= segment_count\r\n if self.__text_as_bag_of_words[right] <= 0:\r\n del self.__text_as_bag_of_words[right]\r\n\r\n\r\n self.__segments[segment] = \\\r\n self.__segments[segment]._replace(count = segment_count, split = 0)\r\n self.__text_as_bag_of_words[segment] = segment_count\r\n self.__total_tokens -= segment_count\r\n\r\n @profile\r\n def total_cost(self):\r\n cost_code = 0\r\n cost_text = 0.0\r\n for (segment, count) in list(self.__text_as_bag_of_words.items()):\r\n length = len(segment)\r\n cost_code += 5*length\r\n if length == 1:\r\n cost_text -= 2*count*math.log(count/self.__total_tokens)\r\n else:\r\n cost_text -= count*math.log(count/self.__total_tokens)\r\n \r\n return cost_code + cost_text\r\n\r\n\r\n def increase_count(self, segment, by=1, ismorph=False):\r\n if segment in self.__segments.keys():\r\n old_count = self.__segments[segment].count\r\n split = self.__segments[segment].split\r\n self.__segments[segment] = \\\r\n self.__segments[segment]._replace(count = old_count + by)\r\n\r\n if split != 0:\r\n left = segment[:split]\r\n right = segment[split:]\r\n added_left = self.increase_count(left, by)\r\n added_right = self.increase_count(right, by)\r\n if added_left == True or added_right == True:\r\n return True\r\n \r\n return False\r\n else:\r\n self.__segments[segment] = Segment(segment, ismorph, by, 0)\r\n return True\r\n\r\n\r\n def decrease_count(self, segment, by=1):\r\n old_count = self.__segments[segment].count\r\n split = self.__segments[segment].split\r\n\r\n if old_count - by > 0:\r\n self.__segments[segment] = \\\r\n self.__segments[segment]._replace(count = old_count - by)\r\n elif self.__segments[segment].ismorph == True:\r\n self.__segments[segment] = \\\r\n self.__segments[segment]._replace(count = 0)\r\n else:\r\n del self.__segments[segment]\r\n\r\n if split > 0:\r\n left = segment[:split]\r\n right = segment[split:]\r\n self.decrease_count(left, by)\r\n self.decrease_count(right, by)\r\n\r\n\r\n def get_segmentation(self, segment):\r\n global segted_text\r\n \r\n segmentation = ''\r\n split = self.__segments[segment].split\r\n if split == 0:\r\n segted_text.append(segment)\r\n return segment\r\n return (self.get_segmentation(segment[:split])\r\n + ' ' + self.get_segmentation(segment[split:]))\r\n\r\n\r\n def write_out_as_list(self):\r\n for word in self.__text:\r\n self.__file_out.write(\"%s %s\\n\" % (word, self.get_segmentation(word)))\r\n self.__file_out.close()\r\n\r\n\r\nclass Reader_am():\r\n def __init__(self, filepath, mode):\r\n self.__fp = filepath\r\n if mode == 'sera':\r\n self.__text = self.read_sera()\r\n elif mode == 'list':\r\n self.__text = self.read_list\r\n else:\r\n print(\"Readinf mode unknows. Terminating process.\")\r\n sys.exit(1)\r\n \r\n\r\n def read_sera(self):\r\n try:\r\n file = open(self.__fp, \"r\")\r\n except:\r\n print(\"Error opening file. Terminating process.\")\r\n sys.exit(1)\r\n\r\n print(\"Reading file..\") \r\n text = file.read()\r\n print(\"File read successfully.\")\r\n\r\n print(\"Cleaning up and tokenizing text..\")\r\n st = re.findall(r\"\\n(.*)\\n\", text)\r\n sents_split = [re.split(r\"\\s<.*?>[\\s\\n]+?\", block) for block in st]\r\n words = [word for sent in sents_split for word in sent\r\n if word.isalpha()]\r\n self.__text = words\r\n \r\n print(\"Text tokenized successfully.\\n\"\r\n + \"Tokens: %d / Types: %d\\n\" % (len(words), len(set(words))))\r\n\r\n file.close()\r\n return words\r\n\r\n \r\n def read_list(self):\r\n try:\r\n file = open(self.__fp, \"r\")\r\n except:\r\n print(\"Error opening file. Terminating process.\")\r\n sys.exit(1)\r\n\r\n words = []\r\n print(\"Reading words from file..\")\r\n for line in file:\r\n words.append(line)\r\n print(\"Words read successfully.\")\r\n print(\"Tokens: %d / Types: %d\\n\" % (len(words), len(set(words))))\r\n\r\n file.close()\r\n return words\r\n\r\n\r\n def get_text(self):\r\n return self.__text\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"214469869","text":"#! /usr/bin/env python3\nimport sys\n\ndef str_to_bytestream(hexstream):\n hsList = []\n hsiter = iter(hexstream)\n for byte in hsiter:\n hsList.append(\"\\\\x{}{}\".format(byte, next(hsiter)))\n\n return hsList\n\ndef main():\n\n if len(sys.argv) != 2:\n print(\"Usage: {} \".format(sys.argv[0]))\n return\n\n hsList = str_to_bytestream(sys.argv[1])\n\n print('echo -n -e \"{}\"'.format(\"\".join(hsList)))\n\n return 0\n\nif __name__ == '__main__':\n main()\n","sub_path":"bytestream_to_str.py","file_name":"bytestream_to_str.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"33622522","text":"__author__ = 'aka'\n\nimport os.path\nimport nltk\n\npath = \"./data/en/\"\ndirs = os.listdir( path )\n\nvocab = {}\nid = 0\n\nfor file in dirs:\n in_file = open(path+file)\n print (file)\n for line in in_file:\n splitted = line.split('\\t')\n num, text = splitted[0].split(' ', 1)\n tokens = nltk.word_tokenize(text)\n words = [w.lower() for w in tokens]\n for w in words:\n if w not in vocab and w != ' ':\n print (w + \"+++\")\n vocab[w] = id\n id += 1\n\n if len(splitted) > 1:\n answers = splitted[1].strip(' ').lower().split(',')\n for answer in answers:\n if (answer not in vocab) and (answer != ' '):\n print (answer + \"+++\")\n vocab[answer] = id\n id += 1\n\nout_file = open('./data/vocab.txt', 'w')\nout_file.write(str(len(vocab))+'\\n')\nfor k, v in vocab.items():\n out_file.write(k + \" \" + str(v)+'\\n')\n","sub_path":"buildVocab.py","file_name":"buildVocab.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"240074342","text":"from Scripts.Common.PScript import *\nfrom Scripts.Common.VehicleController import *\nfrom Scripts.Common.EffectsArray import *\n\nclass MacoWheelInfo(WheelInfo):\n # le rayon des roues\n wheelRadius = 1.05\n # la longeur maximal des suspensions\n suspensionRestLength = 0.1\n # la compresion maximal des suspensions\n compression = 10\n # l'absorption des suspensions\n damping = 1\n # le rebond des suspensions\n stifness = 20\n # la fristion de la roue\n tyreFriction = 10\n # l'influence sur tout l'ensemble\n roolInfluence = 1\n \nNONE_MODE = -1\nTURRET_MODE = 0\nPERISCOPE_MODE = 1\nDRIVE_MODE = 2\n\ndef switchGunMode():\n if logic.mode == TURRET_MODE:\n logic.mode = PERISCOPE_MODE\n sce.active_camera = \"Periscope_Viseur_Turret\"\n else:\n logic.mode = TURRET_MODE\n sce.active_camera = \"Gun_Viseur_Turret\"\n\ndef switchDriveMode():\n if logic.mode != DRIVE_MODE:\n logic.mode = DRIVE_MODE\n sce.active_camera = \"Camera_TrackTo\"\n\ndef fire():\n if logic.mode == TURRET_MODE:\n gun = objList[\"Gun_Turret\"]\n \n if gun[\"reload_time\"] > 0:\n gun[\"reload_time\"] = 0\n mantelet = objList[\"Mantelet\"]\n hull = objList[\"Hull\"]\n # appliquation du recul du canon\n point = mantelet.worldPosition\n impulse = Vector((0, -500, 0)) * mantelet.worldOrientation.inverted()\n hull.applyImpulse(point, impulse, False)\n # animation du recul du canon\n gun.playAction(\"Gun_TurretAction\", 0, 10)\n # effet de fumée\n smoke_origin = objList[\"Fire_Effect_Origin\"]\n effects.createEffect(\"MuzzleEffect\", smoke_origin, smoke_origin.worldPosition, smoke_origin.worldOrientation)\ndef upGear():\n if logic.mode == DRIVE_MODE:\n logic.vehicleController.upGear()\n\ndef downGear():\n if logic.mode == DRIVE_MODE:\n logic.vehicleController.downGear()\n\ndef turnLeft():\n if logic.mode == DRIVE_MODE:\n logic.vehicleController.turn(.02)\n\ndef turnRight():\n if logic.mode == DRIVE_MODE:\n logic.vehicleController.turn(-.02)\n\ndef switchMainBrake():\n if logic.mode == DRIVE_MODE:\n logic.vehicleController.setMainbrake(not logic.vehicleController.useMainBrake)\n\ndef updateEngine():\n if logic.mode == DRIVE_MODE:\n logic.vehicleController.update() \n\ndef main(cout):\n car = objList[\"Hull\"]\n wheelsLeft = [\n MacoWheelInfo(objList[\"Wheel_0_L\"], True, False),\n MacoWheelInfo(objList[\"Wheel_1_L\"], False, False),\n MacoWheelInfo(objList[\"Wheel_2_L\"], False, False)\n ]\n wheelsRight = [\n MacoWheelInfo(objList[\"Wheel_0_R\"], True, False),\n MacoWheelInfo(objList[\"Wheel_1_R\"], False, False),\n MacoWheelInfo(objList[\"Wheel_2_R\"], False, False)\n ]\n gearBox = [GearInfo(0), GearInfo(-200), GearInfo(-500), GearInfo(-800), GearInfo(-1200)]\n logic.vehicleController = VehicleController(car, wheelsLeft, wheelsRight, gearBox, .2)\n\n logic.mode = DRIVE_MODE\n # mode de vues\n logic.keyboard.setEventCallBack(events.MKEY, logic.KX_INPUT_JUST_ACTIVATED, [switchGunMode])\n logic.keyboard.setEventCallBack(events.EKEY, logic.KX_INPUT_JUST_ACTIVATED, [switchDriveMode])\n # tire de canon\n logic.mouse.setEventCallBack(events.LEFTMOUSE, logic.KX_INPUT_JUST_ACTIVATED, [fire])\n # changement de vitesse\n logic.keyboard.setEventCallBack(events.ZKEY, logic.KX_INPUT_JUST_ACTIVATED, [upGear])\n logic.keyboard.setEventCallBack(events.SKEY, logic.KX_INPUT_JUST_ACTIVATED, [downGear])\n # volant\n logic.keyboard.setEventCallBack(events.QKEY, logic.KX_INPUT_ACTIVE, [turnLeft])\n logic.keyboard.setEventCallBack(events.DKEY, logic.KX_INPUT_ACTIVE, [turnRight])\n # frein a main\n logic.keyboard.setEventCallBack(events.SPACEKEY, logic.KX_INPUT_JUST_ACTIVATED, [switchMainBrake])\n # actualisation du moteur\n sce.post_draw = [updateEngine]\n\n # effet de poussieres\n initializeEffects()\n effects.createEffect(\"TrackEffect\", \"Wheel_0_L\", Vector((0, -1, -.8)), useLocalPos = True)\n effects.createEffect(\"TrackEffect\", \"Wheel_1_L\", Vector((0, -1, -.8)), useLocalPos = True)\n effects.createEffect(\"TrackEffect\", \"Wheel_2_L\", Vector((0, -1, -.8)), useLocalPos = True)\n effects.createEffect(\"TrackEffect\", \"Wheel_0_R\", Vector((0, -1, -.8)), useLocalPos = True)\n effects.createEffect(\"TrackEffect\", \"Wheel_1_R\", Vector((0, -1, -.8)), useLocalPos = True)\n effects.createEffect(\"TrackEffect\", \"Wheel_2_R\", Vector((0, -1, -.8)), useLocalPos = True)\n\n\ndef turnGun(cont):\n if logic.mode == TURRET_MODE:\n cont.deactivate(\"PeriscopeLook\")\n cont.deactivate(\"PeriscopeCamLook\")\n cont.activate(\"TurretLook\")\n cont.activate(\"ManteletLook\")\n elif logic.mode == PERISCOPE_MODE:\n cont.deactivate(\"TurretLook\")\n cont.deactivate(\"ManteletLook\")\n cont.activate(\"PeriscopeLook\")\n cont.activate(\"PeriscopeCamLook\")","sub_path":"Scripts/Starter/CarWheel.py","file_name":"CarWheel.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"517098945","text":"def read():\n buf = input().split(' ')\n return [(int)(x) for x in buf]\n\nbuf = read()\na = buf[0]\nb = buf[1]\nd = a // b\nr = a % b\nf = a / b\nprint('{} {} {:5f}'.format(d, r, f))\n","sub_path":"aoj/10000-/10008.py","file_name":"10008.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"70239796","text":"from __future__ import print_function\n\nimport shlex\nfrom unittest import TestCase\nimport os\nimport subprocess\nimport pytest\n\nfrom distributed import LocalCluster\n\nfrom jobqueue_features import (\n mpi_wrap,\n MPIEXEC,\n MPICH,\n SRUN,\n OPENMPI,\n SUPPORTED_MPI_LAUNCHERS,\n on_cluster,\n mpi_task,\n which,\n get_task_mpi_comm,\n set_task_mpi_comm,\n serialize_function_and_args,\n deserialize_and_execute,\n mpi_deserialize_and_execute,\n verify_mpi_communicator,\n flush_and_abort,\n)\n\nfrom jobqueue_features.clusters_controller import (\n clusters_controller_singleton as controller,\n)\n\n# Use logging if there are hard to see issues in the CI\n\n# import logging\n# logging.basicConfig(format=\"%(levelname)s:%(message)s\", level=logging.DEBUG)\n\n\nclass TestMPIWrap(TestCase):\n def setUp(self):\n # Kill any existing clusters\n controller._close()\n\n self.local_cluster = LocalCluster(name=\"test\")\n self.executable = \"python\"\n self.script_path = os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"resources\", \"helloworld.py\")\n )\n self.number_of_processes = 4\n\n @mpi_task(cluster_id=\"test\")\n def mpi_wrap_task(**kwargs):\n return mpi_wrap(**kwargs)\n\n @on_cluster(cluster=self.local_cluster, cluster_id=\"test\")\n def test_function(\n script_path,\n mpi_launcher=MPIEXEC,\n launcher_args=None,\n nodes=1,\n ntasks_per_node=4,\n cpus_per_task=1,\n return_wrapped_command=False,\n ):\n mpi_tasks = ntasks_per_node * nodes\n t = mpi_wrap_task(\n executable=self.executable,\n exec_args=script_path,\n mpi_launcher=mpi_launcher,\n launcher_args=launcher_args,\n mpi_tasks=mpi_tasks,\n cpus_per_task=cpus_per_task,\n ntasks_per_node=ntasks_per_node,\n nodes=nodes,\n return_wrapped_command=return_wrapped_command,\n )\n result = t.result()\n\n return result\n\n self.test_function = test_function\n\n def mpi_task1(task_name):\n comm = get_task_mpi_comm()\n size = comm.Get_size()\n # Since it is a return value it will only get printed by root\n return \"Running %d tasks of type %s.\" % (size, task_name)\n\n self.mpi_task1 = mpi_task1\n\n def string_task(string, kwarg_string=None):\n return \" \".join([s for s in [string, kwarg_string] if s])\n\n self.string_task = string_task\n\n def tearDown(self):\n # Kill any existing clusters\n controller._close()\n\n def is_mpich(self):\n cmd = \"mpicc -v\"\n proc = subprocess.Popen(\n shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n if b\"mpich\" in proc.stdout.read().lower():\n return True\n return False\n\n def test_which(self):\n # Check it finds a full path\n self.assertEqual(which(self.script_path), self.script_path)\n # Check it searches the PATH envvar\n os.environ[\"PATH\"] += os.pathsep + os.path.dirname(self.script_path)\n self.assertEqual(which(os.path.basename(self.script_path)), self.script_path)\n # Check it returns None if the executable doesn't exist\n self.assertIsNone(which(\"not_an_executable\"))\n # Check it returns None when a file is not executable\n self.assertIsNone(which(os.path.realpath(__file__)))\n\n def test_mpi_wrap_execution(self):\n # Only check the ones that work in CI\n if self.is_mpich():\n # Haven't implemented explicit MPICH support yet\n launchers = [MPICH, MPIEXEC]\n else:\n launchers = [OPENMPI, MPIEXEC]\n for launcher in launchers:\n # Include some (non-standard) OpenMPI options so that we can run this in CI\n launcher_args = \"--allow-run-as-root --oversubscribe\"\n if self.is_mpich():\n # In PBS we use mpich which doesn't require these\n launcher_args = \"\"\n\n if which(launcher[\"launcher\"]) is None:\n print(\"Didn't find {}, skipping test\".format(launcher))\n pass\n else:\n print(\"Found {} launcher in env, running MPI test\".format(launcher))\n result = self.test_function(\n self.script_path, mpi_launcher=launcher, launcher_args=launcher_args\n )\n for n in range(self.number_of_processes):\n text = \"Hello, World! I am process {} of {}\".format(\n n, self.number_of_processes\n )\n self.assertIn(text.encode(), result[\"out\"])\n\n def test_mpi_wrap(self):\n # Test syntax of wrapped MPI launcher commands\n mpi_launchers = SUPPORTED_MPI_LAUNCHERS\n # specific example of 2 nodes and 3 processes\n expected_launcher_args = [\n \"\",\n \"-n 6\",\n \"-np 6 --map-by ppr:3:node\",\n \"-n 6 -perhost 3\",\n \"-n 6 -ppn 3\",\n ]\n # specific example of 2 nodes, 3 processes and 4 OpenMP threads\n hybrid_expected_launcher_args = [\n \"\",\n \"-n 6\",\n \"-np 6 --map-by ppr:3:node:pe=4\",\n \"-n 6 -perhost 3 -env I_MPI_PIN_DOMAIN 4\",\n \"-n 6 -ppn 3 -genv OMP_NUM_THREADS 4 -bind-to core:4\",\n ]\n for mpi_launcher, expected_launcher_opts, hybrid_expected_launcher_opts in zip(\n mpi_launchers, expected_launcher_args, hybrid_expected_launcher_args\n ):\n result = self.test_function(\n self.script_path,\n mpi_launcher=mpi_launcher,\n nodes=2,\n ntasks_per_node=3,\n return_wrapped_command=True,\n )\n _cmd = (\n mpi_launcher[\"launcher\"],\n expected_launcher_opts,\n self.executable,\n self.script_path,\n )\n expected_result = \" \".join(filter(len, map(str, _cmd)))\n self.assertEqual(result, expected_result)\n\n # Now check OpenMP threaded versions\n result = self.test_function(\n self.script_path,\n mpi_launcher=mpi_launcher,\n nodes=2,\n ntasks_per_node=3,\n cpus_per_task=4,\n return_wrapped_command=True,\n )\n _cmd = (\n mpi_launcher[\"launcher\"],\n hybrid_expected_launcher_opts,\n self.executable,\n self.script_path,\n )\n expected_result = \" \".join(filter(len, map(str, _cmd)))\n self.assertEqual(result, expected_result)\n\n # Test the MPI wrapper in isolation for srun (which we assume doesn't exist):\n def test_mpi_srun_wrapper(self):\n if which(SRUN[\"launcher\"]) is None:\n print(\n \"Didn't find {}, running OSError test for no available launcher\".format(\n SRUN\n )\n )\n with self.assertRaises(OSError) as context:\n mpi_wrap(\n executable=\"python\",\n exec_args=self.script_path,\n mpi_launcher=SRUN,\n mpi_tasks=self.number_of_processes,\n )\n self.assertTrue(\n \"OS error caused by constructed command\" in str(context.exception)\n )\n else:\n pass\n\n # Test our serialisation method\n def test_serialize_function_and_args(self):\n # First check elements in our dict\n serialized_object = serialize_function_and_args(self.string_task)\n for key in serialized_object.keys():\n self.assertIn(key, [\"header\", \"frames\"])\n serialized_object = serialize_function_and_args(self.string_task, \"chicken\")\n for key in serialized_object.keys():\n self.assertIn(key, [\"header\", \"frames\", \"args_header\", \"args_frames\"])\n serialized_object = serialize_function_and_args(\n self.string_task, kwarg_string=\"dog\"\n )\n for key in serialized_object.keys():\n self.assertIn(key, [\"header\", \"frames\", \"kwargs_header\", \"kwargs_frames\"])\n serialized_object = serialize_function_and_args(\n self.string_task, \"chicken\", kwarg_string=\"dog\"\n )\n for key in serialized_object.keys():\n self.assertIn(\n key,\n [\n \"header\",\n \"frames\",\n \"args_header\",\n \"args_frames\",\n \"kwargs_header\",\n \"kwargs_frames\",\n ],\n )\n\n def test_deserialize_and_execute(self):\n serialized_object = serialize_function_and_args(\n self.string_task, \"chicken\", kwarg_string=\"dog\"\n )\n self.assertEqual(\"chicken dog\", deserialize_and_execute(serialized_object))\n\n def test_verify_mpi_communicator_raise(self):\n with self.assertRaises(SystemExit) as cm:\n verify_mpi_communicator(\"Not a communicator\", mpi_abort=False)\n self.assertEqual(cm.exception.code, 1)\n\n def test_mpi_deserialize_and_execute_raise(self):\n trivial = \"trivial\"\n serialized_object = serialize_function_and_args(self.mpi_task1, trivial)\n # For the deserializer to work we need to first set the task MPI communicator\n with self.assertRaises(AttributeError):\n mpi_deserialize_and_execute(serialized_object)\n\n def test_flush_and_abort(self):\n with self.assertRaises(SystemExit) as cm:\n flush_and_abort(mpi_abort=False)\n self.assertEqual(cm.exception.code, 1)\n with self.assertRaises(SystemExit) as cm:\n flush_and_abort(error_code=2, mpi_abort=False)\n self.assertEqual(cm.exception.code, 2)\n\n # Since this test initialises an MPI environment in the test context, it needs to\n # be run last as it interferes with other tests above\n @pytest.mark.last\n def test_mpi_deserialize_and_execute(self):\n from mpi4py import MPI\n\n comm = MPI.COMM_WORLD\n self.assertTrue(verify_mpi_communicator(comm, mpi_abort=False))\n # The test framework is not started with an MPI launcher so we have\n # a single task\n set_task_mpi_comm(parent_comm=comm)\n trivial = \"trivial\"\n serialized_object = serialize_function_and_args(self.mpi_task1, trivial)\n expected_string = \"Running 1 tasks of type {}.\".format(trivial)\n return_value = mpi_deserialize_and_execute(serialized_object)\n self.assertEqual(expected_string, return_value)\n","sub_path":"jobqueue_features/tests/test_mpi_wrapper.py","file_name":"test_mpi_wrapper.py","file_ext":"py","file_size_in_byte":10863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"323133783","text":"'''\nThis App is to illustrate binding keyboard events,\nevents are listed, just change them to see what happens...\n'''\n#Packages\nfrom tkinter import *\n\nroot = Tk()\nroot.title('KeyBoard Key Binder')\nroot.iconbitmap(\"D:/e-Learning/Tkinter/Images/india-flag.ico\")\nroot.geometry(\"400x400\")\n\ndef click(event):\n lbl = Label(root, text=\"You Clicked this Button : \" + event.keysym)\n #+ event.char)#+ str(event.x) + \" \" + str(event.y) <- add this to above to get cursor location\n lbl.pack()\n\nbtn = Button(root, text=\"Click Me!\")\nbtn.bind(\"\", click)\n\n'''\n# events #\nButton-1: message appears when you left click on button\nButton-2: message appears when you click on scroll button on button\nButton-3: message appears when you right click on button\nEnter : move cursor in the button to see magic\nLeave : move cursor out of button to see magic\nFocusIn : tab to highlight button\nFocusOut: tab to highlight button\nReturn : press tab to highlight button then press enter key\nKey : press tab to highlight button the any key on keyboard to see magic\n'''\nbtn.pack(pady=20)\n\n#event handler\nroot.mainloop()\n","sub_path":"App's/bind.py","file_name":"bind.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"150954167","text":"\nimport pygame as pg\nimport random\nfrom settings import *\nfrom sprites import *\n\nclass Game:\n def __init__(self):\n # initialize game window, etc\n pg.init()\n pg.mixer.init()\n self.screen = pg.display.set_mode((WIDTH, HEIGHT))\n pg.display.set_caption(TITLE)\n self.clock = pg.time.Clock()\n self.running = True\n self.font_name = pg.font.match_font(\"bitstreamverasans\")\n self.score = 0\n\n def new(self):\n # start a new game\n self.all_sprites = pg.sprite.Group()\n self.platforms = pg.sprite.Group()\n self.player = Player(self)\n self.all_sprites.add(self.player)\n for plat in PLATFORM_LIST:\n p = Platform(*plat)\n self.all_sprites.add(p)\n self.platforms.add(p)\n self.run()\n\n def run(self):\n # Game Loop\n self.playing = True\n while self.playing:\n self.clock.tick(FPS)\n self.events()\n self.update()\n self.draw()\n\n def update(self):\n # Game Loop - Update\n self.all_sprites.update()\n # check if player hits a platform - only if falling\n if self.player.vel.y > 0:\n hits = pg.sprite.spritecollide(self.player, self.platforms, False)\n if hits:\n self.player.pos.y = hits[0].rect.top\n self.player.vel.y = 0\n\n if self.player.rect.bottom > HEIGHT:\n self.player.kill()\n self.playing = False\n\n\n def events(self):\n # Game Loop - events\n for event in pg.event.get():\n # check for closing window\n if event.type == pg.QUIT:\n if self.playing:\n self.playing = False\n self.running = False\n\n\n def draw(self):\n # Game Loop - draw\n self.screen.fill(BOU_BLUE)\n self.screen.set_alpha(150)\n\n self.all_sprites.draw(self.screen)\n surf = pg.Surface((100, 100))\n pg.draw.circle(surf, YELLOW, (WIDTH / 2, HEIGHT/2), playerRadius, playerThiccness)\n # *after* drawing everything, flip the display\n pg.display.flip()\n\n def show_start_screen(self):\n # game splash/start screen\n self.screen.fill(BLACK)\n self.draw_text(TITLE, 48, WHITE, WIDTH / 2, HEIGHT / 4)\n self.draw_text(\"Arrows to move, Space to jump\", 22, WHITE, WIDTH / 2, HEIGHT / 2)\n self.draw_text(\"Press a key to play\", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)\n pg.display.flip()\n self.wait_for_key()\n\n def show_go_screen(self):\n # game over/continue\n if not self.running:\n return\n self.screen.fill(BLACK)\n self.draw_text(\"GAME OVER\", 48, WHITE, WIDTH / 2, HEIGHT / 4)\n self.draw_text(\"Score: \" + str(self.score), 22, WHITE, WIDTH / 2, HEIGHT / 2)\n self.draw_text(\"Press a key to play again\", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)\n pg.display.flip()\n self.wait_for_key()\n\n def wait_for_key(self):\n waiting = True\n while waiting:\n self.clock.tick(FPS)\n for event in pg.event.get():\n if event.type == pg.QUIT:\n waiting = False\n self.running = False\n if event.type == pg.KEYUP:\n waiting = False\n\n def draw_text(self, text, size, color, x, y):\n font = pg.font.Font(self.font_name, size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n self.screen.blit(text_surface, text_rect)\n\n\ng = Game()\n#g.show_start_screen()\nwhile g.running:\n g.new()\n #g.show_go_screen()\n\npg.quit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"24754489","text":"# -*- coding: utf-8 -*-\n#\n# Project: Azimuthal integration\n# https://forge.epn-campus.eu/projects/azimuthal\n#\n# File: \"$Id$\"\n#\n# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\n__author__ = \"Jerome Kieffer\"\n__license__ = \"GPLv3\"\n__date__ = \"18/10/2012\"\n__copyright__ = \"2012, ESRF, Grenoble\"\n__contact__ = \"jerome.kieffer@esrf.fr\"\n\nimport os, gc, logging\nimport threading\nimport hashlib\nimport numpy\nfrom .opencl import ocl, pyopencl\nfrom .splitBBoxLUT import HistoBBox1d\nfrom .utils import get_cl_file\nif pyopencl:\n mf = pyopencl.mem_flags\nelse:\n raise ImportError(\"pyopencl is not installed\")\ntry:\n from .fastcrc import crc32\nexcept:\n from zlib import crc32\nlogger = logging.getLogger(\"pyFAI.ocl_azim_csr\")\n\nclass OCL_CSR_Integrator(object):\n def __init__(self, lut, image_size, devicetype=\"all\",\n padded=False, block_size=32,\n platformid=None, deviceid=None, \n checksum=None, profile=False):\n \"\"\"\n @param lut: 3-tuple of arrays \n data: coefficient of the matrix in a 1D vector of float32 - size of nnz\n indices: Column index position for the data (same size as data) \n indptr: row pointer indicates the start of a given row. len nbin+1\n @param image_size: \n @param devicetype: can be \"cpu\",\"gpu\",\"acc\" or \"all\"\n @param block_size: the chosen size for WORKGROUP_SIZE\n @param platformid: number of the platform as given by clinfo\n @type platformid: int\n @param deviceid: number of the device as given by clinfo\n @type deviceid: int\n @param checksum: pre - calculated checksum to prevent re - calculating it :)\n @param profile: store profiling elements\n \"\"\"\n self.BLOCK_SIZE = block_size # query for warp size\n self.padded = padded\n self._sem = threading.Semaphore()\n self._data = lut[0]\n self._indices = lut[1]\n self._indptr = lut[2]\n self.bins = self._indptr.shape[0] - 1\n if self._data.shape[0] != self._indices.shape[0]:\n raise RuntimeError(\"data.shape[0] != indices.shape[0]\")\n self.data_size = self._data.shape[0] \n self.size = image_size\n self.profile = profile\n if not checksum:\n checksum = crc32(self._data)\n self.on_device = {\"data\":checksum, \"dark\":None, \"flat\":None, \"polarization\":None, \"solidangle\":None}\n self._cl_kernel_args = {}\n self._cl_mem = {}\n self.events = []\n if (platformid is None) and (deviceid is None):\n platformid, deviceid = ocl.select_device(devicetype)\n elif platformid is None:\n platformid = 0\n elif deviceid is None:\n deviceid = 0\n self.platform = ocl.platforms[platformid]\n self.device = self.platform.devices[deviceid]\n self.device_type = self.device.type\n if (self.device_type == \"CPU\") and (self.platform.vendor == \"Apple\"):\n logger.warning(\"This is a workaround for Apple's OpenCL on CPU: enforce BLOCK_SIZE=1\")\n self.BLOCK_SIZE = 1\n self.workgroup_size = self.BLOCK_SIZE,\n self.wdim_bins = (self.bins * self.BLOCK_SIZE),\n self.wdim_data = (self.size + self.BLOCK_SIZE - 1) & ~(self.BLOCK_SIZE - 1),\n try:\n self._ctx = pyopencl.Context(devices=[pyopencl.get_platforms()[platformid].get_devices()[deviceid]])\n if self.profile: \n self._queue = pyopencl.CommandQueue(self._ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE)\n else:\n self._queue = pyopencl.CommandQueue(self._ctx)\n self._allocate_buffers()\n self._compile_kernels()\n self._set_kernel_arguments()\n except pyopencl.MemoryError as error:\n raise MemoryError(error)\n# if self.device_type == \"CPU\":\n# ev = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"data\"], data)\n# ev = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"indices\"], indices)\n# ev = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"indptr\"], indptr)\n# else:\n ev = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"data\"], self._data)\n if self.profile: self.events.append((\"copy Coefficient data\",ev))\n ev = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"indices\"], self._indices)\n if self.profile: self.events.append((\"copy Row Index data\",ev))\n ev = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"indptr\"], self._indptr)\n if self.profile: self.events.append((\"copy Column Pointer data\",ev))\n \n def __del__(self):\n \"\"\"\n Destructor: release all buffers\n \"\"\"\n self._free_kernels()\n self._free_buffers()\n self._queue = None\n self._ctx = None\n gc.collect()\n\n def _allocate_buffers(self):\n \"\"\"\n Allocate OpenCL buffers required for a specific configuration\n\n Note that an OpenCL context also requires some memory, as well as Event and other OpenCL functionalities which cannot and\n are not taken into account here.\n The memory required by a context varies depending on the device. Typical for GTX580 is 65Mb but for a 9300m is ~15Mb\n In addition, a GPU will always have at least 3-5Mb of memory in use.\n Unfortunately, OpenCL does NOT have a built-in way to check the actual free memory on a device, only the total memory.\n \"\"\"\n if self.size < self.BLOCK_SIZE:\n raise RuntimeError(\"Fatal error in _allocate_buffers. size (%d) must be >= BLOCK_SIZE (%d)\\n\", self.size, self.BLOCK_SIZE)\n size_of_float = numpy.dtype(numpy.float32).itemsize\n size_of_short = numpy.dtype(numpy.int16).itemsize\n size_of_int = numpy.dtype(numpy.int32).itemsize\n size_of_long = numpy.dtype(numpy.int64).itemsize\n\n ualloc = (self.size * size_of_float) * 5\n ualloc += (self.size * size_of_short)\n ualloc += (self.data_size * (size_of_float + size_of_int))\n ualloc += ((self.bins + 1) * size_of_int)\n ualloc += (self.bins * size_of_float) * 3\n memory = self.device.memory\n logger.info(\"%.3fMB are needed on device which has %.3fMB\" % (ualloc / 1.0e6, memory / 1.0e6))\n if ualloc >= memory:\n raise MemoryError(\"Fatal error in _allocate_buffers. Not enough device memory for buffers (%lu requested, %lu available)\" % (ualloc, memory))\n # now actually allocate:\n try:\n self._cl_mem[\"data\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * self.data_size)\n self._cl_mem[\"indices\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_int * self.data_size)\n self._cl_mem[\"indptr\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_int * (self.bins+1))\n self._cl_mem[\"outData\"] = pyopencl.Buffer(self._ctx, mf.WRITE_ONLY, size_of_float * self.bins)\n self._cl_mem[\"outCount\"] = pyopencl.Buffer(self._ctx, mf.WRITE_ONLY, size_of_float * self.bins)\n self._cl_mem[\"outMerge\"] = pyopencl.Buffer(self._ctx, mf.WRITE_ONLY, size_of_float * self.bins)\n self._cl_mem[\"image_u16\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size=size_of_short * self.size)\n self._cl_mem[\"image\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size=size_of_float * self.size)\n self._cl_mem[\"dark\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size=size_of_float * self.size)\n self._cl_mem[\"flat\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size=size_of_float * self.size)\n self._cl_mem[\"polarization\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size=size_of_float * self.size)\n self._cl_mem[\"solidangle\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size=size_of_float * self.size)\n except pyopencl.MemoryError as error:\n self._free_buffers()\n raise MemoryError(error)\n\n def _free_buffers(self):\n \"\"\"\n free all memory allocated on the device\n \"\"\"\n for buffer_name in self._cl_mem:\n if self._cl_mem[buffer_name] is not None:\n try:\n self._cl_mem[buffer_name].release()\n self._cl_mem[buffer_name] = None\n except pyopencl.LogicError:\n logger.error(\"Error while freeing buffer %s\" % buffer_name)\n\n\n\n def _compile_kernels(self, kernel_file=None):\n \"\"\"\n Call the OpenCL compiler\n @param kernel_file: path tothe\n \"\"\"\n kernel_name = \"ocl_azim_CSR.cl\"\n if kernel_file is None:\n if os.path.isfile(kernel_name):\n kernel_file = os.path.abspath(kernel_name)\n else:\n kernel_file = get_cl_file(kernel_name)\n else:\n kernel_file = str(kernel_file)\n with open(kernel_file, \"r\") as kernelFile:\n kernel_src = kernelFile.read()\n\n compile_options = \"-D NBINS=%i -D NIMAGE=%i -D WORKGROUP_SIZE=%i -D ON_CPU=%i\" % \\\n (self.bins, self.size, self.BLOCK_SIZE, int(self.device_type == \"CPU\"))\n logger.info(\"Compiling file %s with options %s\" % (kernel_file, compile_options))\n try:\n self._program = pyopencl.Program(self._ctx, kernel_src).build(options=compile_options)\n except pyopencl.MemoryError as error:\n raise MemoryError(error)\n\n def _free_kernels(self):\n \"\"\"\n free all kernels\n \"\"\"\n for kernel in self._cl_kernel_args:\n self._cl_kernel_args[kernel] = []\n self._program = None\n\n def _set_kernel_arguments(self):\n \"\"\"Tie arguments of OpenCL kernel-functions to the actual kernels\n\n set_kernel_arguments() is a private method, called by configure().\n It uses the dictionary _cl_kernel_args.\n Note that by default, since TthRange is disabled, the integration kernels have tth_min_max tied to the tthRange argument slot.\n When setRange is called it replaces that argument with tthRange low and upper bounds. When unsetRange is called, the argument slot\n is reset to tth_min_max.\n \"\"\"\n self._cl_kernel_args[\"corrections\"] = [self._cl_mem[\"image\"], numpy.int32(0), self._cl_mem[\"dark\"], numpy.int32(0), self._cl_mem[\"flat\"], \\\n numpy.int32(0), self._cl_mem[\"solidangle\"], numpy.int32(0), self._cl_mem[\"polarization\"], \\\n numpy.int32(0), numpy.float32(0), numpy.float32(0)]\n self._cl_kernel_args[\"csr_integrate\"] = [self._cl_mem[\"image\"], self._cl_mem[\"data\"], self._cl_mem[\"indices\"], self._cl_mem[\"indptr\"], \\\n numpy.int32(0), numpy.float32(0), \\\n self._cl_mem[\"outData\"], self._cl_mem[\"outCount\"], self._cl_mem[\"outMerge\"]]\n self._cl_kernel_args[\"memset_out\"] = [self._cl_mem[i] for i in [\"outData\", \"outCount\", \"outMerge\"]]\n self._cl_kernel_args[\"u16_to_float\"] = [self._cl_mem[i] for i in [\"image_u16\", \"image\"]]\n self._cl_kernel_args[\"s32_to_float\"] = [self._cl_mem[i] for i in [\"image\", \"image\"]]\n\n def integrate(self, data, dummy=None, delta_dummy=None, dark=None, flat=None, solidAngle=None, polarization=None,\n dark_checksum=None, flat_checksum=None, solidAngle_checksum=None, polarization_checksum=None):\n events = []\n with self._sem:\n if data.dtype == numpy.uint16:\n copy_image = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"image_u16\"], numpy.ascontiguousarray(data))\n cast_u16_to_float = self._program.u16_to_float(self._queue, self.wdim_data, self.workgroup_size, *self._cl_kernel_args[\"u16_to_float\"])\n events+=[(\"copy image\",copy_image),(\"cast\", cast_u16_to_float)]\n elif data.dtype == numpy.int32:\n copy_image = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"image\"], numpy.ascontiguousarray(data))\n cast_s32_to_float = self._program.s32_to_float(self._queue, self.wdim_data, self.workgroup_size, *self._cl_kernel_args[\"s32_to_float\"])\n events+=[(\"copy image\",copy_image),(\"cast\", cast_s32_to_float)]\n else:\n copy_image = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"image\"], numpy.ascontiguousarray(data, dtype=numpy.float32))\n events+=[(\"copy image\",copy_image)]\n memset = self._program.memset_out(self._queue, self.wdim_bins, self.workgroup_size, *self._cl_kernel_args[\"memset_out\"])\n events+=[(\"memset\",memset)]\n if dummy is not None:\n do_dummy = numpy.int32(1)\n dummy = numpy.float32(dummy)\n if delta_dummy == None:\n delta_dummy = numpy.float32(0)\n else:\n delta_dummy = numpy.float32(abs(delta_dummy))\n else:\n do_dummy = numpy.int32(0)\n dummy = numpy.float32(0)\n delta_dummy = numpy.float32(0)\n self._cl_kernel_args[\"corrections\"][9] = do_dummy\n self._cl_kernel_args[\"corrections\"][10] = dummy\n self._cl_kernel_args[\"corrections\"][11] = delta_dummy\n self._cl_kernel_args[\"csr_integrate\"][4] = do_dummy\n self._cl_kernel_args[\"csr_integrate\"][5] = dummy\n\n if dark is not None:\n do_dark = numpy.int32(1)\n if not dark_checksum:\n dark_checksum = crc32(dark)\n if dark_checksum != self.on_device[\"dark\"]:\n ev = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"dark\"], numpy.ascontiguousarray(dark, dtype=numpy.float32))\n events.append(\"copy dark\",ev)\n self.on_device[\"dark\"] = dark_checksum\n else:\n do_dark = numpy.int32(0)\n self._cl_kernel_args[\"corrections\"][1] = do_dark\n if flat is not None:\n do_flat = numpy.int32(1)\n if not flat_checksum:\n flat_checksum = crc32(flat)\n if self.on_device[\"flat\"] != flat_checksum:\n ev=pyopencl.enqueue_copy(self._queue, self._cl_mem[\"flat\"], numpy.ascontiguousarray(flat, dtype=numpy.float32))\n events.append(\"copy flat\",ev)\n self.on_device[\"flat\"] = flat_checksum\n else:\n do_flat = numpy.int32(0)\n self._cl_kernel_args[\"corrections\"][3] = do_flat\n\n if solidAngle is not None:\n do_solidAngle = numpy.int32(1)\n if not solidAngle_checksum:\n solidAngle_checksum = crc32(solidAngle)\n if solidAngle_checksum != self.on_device[\"solidangle\"]:\n ev=pyopencl.enqueue_copy(self._queue, self._cl_mem[\"solidangle\"], numpy.ascontiguousarray(solidAngle, dtype=numpy.float32))\n events.append((\"copy solidangle\",ev))\n self.on_device[\"solidangle\"] = solidAngle_checksum\n else:\n do_solidAngle = numpy.int32(0)\n self._cl_kernel_args[\"corrections\"][5] = do_solidAngle\n\n if polarization is not None:\n do_polarization = numpy.int32(1)\n if not polarization_checksum:\n polarization_checksum = crc32(polarization)\n if polarization_checksum != self.on_device[\"polarization\"]:\n ev=pyopencl.enqueue_copy(self._queue, self._cl_mem[\"polarization\"], numpy.ascontiguousarray(polarization, dtype=numpy.float32))\n events.append((\"copy polarization\",ev))\n self.on_device[\"polarization\"] = polarization_checksum\n else:\n do_polarization = numpy.int32(0)\n self._cl_kernel_args[\"corrections\"][7] = do_polarization\n copy_image.wait()\n if do_dummy + do_polarization + do_solidAngle + do_flat + do_dark > 0:\n ev = self._program.corrections(self._queue, self.wdim_data, self.workgroup_size, *self._cl_kernel_args[\"corrections\"])\n events.append((\"corrections\",ev))\n #if self.padded is True:\n #integrate = self._program.csr_integrate_padded(self._queue, self.wdim_bins, self.workgroup_size, *self._cl_kernel_args[\"csr_integrate\"])\n #else:\n #integrate = self._program.csr_integrate(self._queue, self.wdim_bins, self.workgroup_size, *self._cl_kernel_args[\"csr_integrate\"])\n integrate = self._program.csr_integrate_dis(self._queue, self.wdim_bins, self.workgroup_size, *self._cl_kernel_args[\"csr_integrate\"])\n events.append((\"integrate\",integrate))\n outMerge = numpy.empty(self.bins, dtype=numpy.float32)\n outData = numpy.empty(self.bins, dtype=numpy.float32)\n outCount = numpy.empty(self.bins, dtype=numpy.float32)\n ev=pyopencl.enqueue_copy(self._queue, outMerge, self._cl_mem[\"outMerge\"])\n events.append((\"copy D->H outMerge\",ev))\n ev=pyopencl.enqueue_copy(self._queue, outData, self._cl_mem[\"outData\"])\n events.append((\"copy D->H outData\",ev))\n ev=pyopencl.enqueue_copy(self._queue, outCount, self._cl_mem[\"outCount\"])\n events.append((\"copy D->H outCount\",ev))\n ev.wait()\n if self.profile: \n self.events+=events \n return outMerge, outData, outCount\n\n def log_profile(self):\n \"\"\"\n If we are in profiling mode, prints out all timing for every single OpenCL call\n \"\"\"\n t = 0.0\n if self.profile:\n for e in self.events:\n if \"__len__\" in dir(e) and len(e) >= 2:\n et = 1e-6 * (e[1].profile.end - e[1].profile.start)\n print(\"%50s:\\t%.3fms\" % (e[0], et))\n t += et\n\n print(\"_\"*80)\n print(\"%50s:\\t%.3fms\" % (\"Total execution time\", t))\n","sub_path":"pyFAI-src/ocl_azim_csr_dis.py","file_name":"ocl_azim_csr_dis.py","file_ext":"py","file_size_in_byte":18853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"147703321","text":"import matplotlib.pyplot as plt\nfrom skimage import io, morphology, measure\nimport numpy as np\nfrom pylab import figure,mpl\nmpl.rcParams['font.sans-serif'] = ['SimHei']\nNOISE_SIZE_THRESHOLD = 40\ndef delete_close_area(image):\n row , col = image.shape\n image = image.astype(int)\n image = np.where(image > 244, 0, 1)\n lb_img = measure.label(image,neighbors=4)\n regions = measure.regionprops(lb_img)\n\n max = np.max(lb_img)\n print(\"max = \",max )\n flag = np.zeros(max + 1)\n for i in range(1,row - 1,):\n flag[lb_img[i][0]] = flag[lb_img[i][0]] + 1\n flag[lb_img[i][col - 1]] = flag[lb_img[i][col - 1]] + 1\n\n for i in range(0,col):\n flag[lb_img[0][i]] = flag[lb_img[0][i]] + 1\n\n for i in range(0,col):\n flag[lb_img[row - 1][i]] = flag[lb_img[row - 1][i]] + 1\n lb = np.where(lb_img > 0, 0, 1)\n\n for i in range(row):\n for j in range(col):\n if flag[lb_img[i][j]] != 0:\n lb_img[i][j] = 0\n lb_img = np.where(lb_img > 0, 1, 0)\n print(flag)\n return lb_img + lb\n#生成二值测试图像\nimg = io.imread(\"D:\\藏文识别\\相关文献\\data\\Sticky_text\\ chos lugs-pan chen blo chos kyi rgyal mtshan gsung 'bum-1-0007_1_015.png\")\n# img = io.imread(\"C:\\\\Users\\\\Zqc\\Desktop\\\\ttt.bmp\",as_grey=True)\nimport matplotlib.pyplot as plt\nfrom skimage import data,color,morphology,feature\n#生成二值测试图像\n#检测canny边缘,得到二值图片\n# edgs=feature.canny(img, sigma=2, low_threshold=10, high_threshold=10)\n# chull = morphology.convex_hull_object(edgs,neighbors=8)\n# p1 = plt.subplot(211)\n# p1.imshow(img,cmap = \"gray\")\n# p1 = plt.subplot(212)\n# p1.imshow(delete_close_area(img),cmap = \"gray\")\n\nimage1 = io.imread(\"D:\\藏文识别\\相关文献\\data\\Sticky_text\\ chos lugs-pan chen blo chos kyi rgyal mtshan gsung 'bum-1-0013_0_015.png\")\nimage2 = io.imread(\"D:\\藏文识别\\相关文献\\data\\Sticky_text\\ chos lugs-pan chen blo chos kyi rgyal mtshan gsung 'bum-1-0014_0_260.png\")\n#\n# image1 = np.where(image1 > 0, 0, 1)\n# image2 = np.where(image2 > 0, 0, 1)\n\np1 = plt.subplot(121)\np1.imshow(image1, cmap = \"gray\")\np1.set_title(\"中部垂直粘连\")\n\np2 = plt.subplot(122)\np2.imshow(image2, cmap = \"gray\")\np2.set_title(\"起笔倾斜粘连\")\n\ndef remove_noise(img):\n # 噪声分为两类,一类是小的像素点,一类是和边框连在一起的\n rows, cols = img.shape\n img_bin = np.where(img >= 0.9, 0, 1)\n img_labeled = measure.label(img_bin)\n\n regions = measure.regionprops(img_labeled)\n\n for region in regions:\n region_label = region.label\n minr, minc, maxr, maxc = region.bbox\n # 和边框连接在一起的噪声去除\n if minr == 0 or minc == 0 or maxr >= rows or maxc == cols or region.area < NOISE_SIZE_THRESHOLD or (maxr - minr) < 8:\n # print(\"****************************************************************************\" + str(region.area))\n for point in region.coords:\n row,col = point\n img[row,col] = 1\n return img\n\n# image2 = io.imread(\"D:\\\\database\\\\mark_lines\\\\chos lugs-pan chen blo chos kyi rgyal mtshan gsung 'bum-1-0002_1_1.png\")\n# image1 = io.imread(\"D:\\藏文识别\\相关文献\\data\\gt_text_lines\\chos lugs-pan chen blo chos kyi rgyal mtshan gsung 'bum-1-0002_1_1.png\",as_grey=True)\n\n\n# p1 = plt.subplot(211)\n#\n# image1 = image1[:,0:700]\n# image1 = remove_noise(image1)\n# image1 = np.where(image1 > 5,1,0)\n#\n# image2 = image2[:,0:700]\n#\n#\n# p1.imshow(image1, cmap = \"gray\")\n# p1.set_title(\"原始藏文历史文献行图像\")\n# p2 = plt.subplot(212)\n# p2.imshow(image2, cmap = \"gray\")\n# p2.set_title(\"预处理提取粘连后的行图像\")\n\np1 = plt.subplot(221)\np2 = plt.subplot(222)\np1.imshow(image1,cmap = \"gray\")\ncol,row = image1.shape\nh_profile = np.sum(np.where(image1==255,1,0),axis = 0)\n# print(image1)\n\nx = range(0,len(h_profile),1)\ny1 = np.zeros(row)\ny1[:] = col - 1\n\np1.fill_between(x, (col - h_profile / 2) , y1,color=\"#2ec3e7\")\np1.set_title(\"藏文历史文献粘连字丁串垂直投影\")\nprint(h_profile)\nmin_x = np.min(h_profile[int(row / 5): int(row / 5 * 4)])\nk = list(h_profile).index(np.min(h_profile[int(row / 5): int(row / 5 * 4)]),int(row / 5), int(row / 5 * 4))\npointx = [k,k]\npointy = [0,col - 1]\np2.imshow(image1,cmap = \"gray\")\np2.plot(pointx,pointy,\"r.-\")\np2.set_title(\"藏文历史文献粘连字丁串切分路径\")\n\n\np3 = plt.subplot(223)\np4 = plt.subplot(224)\np3.imshow(image2,cmap = \"gray\")\ncol,row = image2.shape\nh_profile = np.sum(np.where(image2==255,1,0),axis = 0)\n# print(image1)\n\nx = range(0,len(h_profile),1)\n\ny2 = np.zeros(row)\n\ny2[:] = col - 1\n\np3.fill_between(x, (col - (h_profile / 2)), y2,color=\"#2ec3e7\")\n\nprint(h_profile)\nmin_x = np.min(h_profile[int(row / 5): int(row / 5 * 4)])\nk = list(h_profile).index(np.min(h_profile[int(row / 5): int(row / 5 * 4)]),int(row / 5), int(row / 5 * 4))\npointx = [k,k]\npointy = [0,col - 1]\np4.imshow(image2,cmap = \"gray\")\np4.plot(pointx,pointy,\"r.-\")\n\nplt.show()\n","sub_path":"water/verticle-picture.py","file_name":"verticle-picture.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"472996023","text":"# Задача 5. Вариант 44.\n# Напишите программу, которая бы при запуске случайным образом отображала \n# название одной из семи основных физических единиц, согласно Международной системы единиц.\n\n# Golubev A. S.\n# 31.03.2016\n\nimport random\n\nsi = 'Длина, Масса, Время, Сила электрического тока, \\\nТемпература, Количество веществ, Сила света'.split(\", \")\n\nprint(\"Программа случайным образом отображает название одной из семи \\\nосновных физических единиц, согласно Международной системы единиц.\")\n\nchislo = random.randint(0,6)\n\nprint('\\nЕдиница СИ', end=' ')\n\nprint(si[chislo])\n\ninput(\"\\n\\nНажмите Enter для выхода.\")","sub_path":"IVTa/2014/GOLUBEV_A_S/task_5_44.py","file_name":"task_5_44.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"244787200","text":"\"\"\"\nCaptures UDP telemetry packets for use by the Project CARS Replay\nEnhancer. Run on the network to which Project CARS is broadcasting\n\nWrites the packets to a directory named \"packetdata\" with an\nappended timestamp. Each packet is named \"pdata\" with an appended\nsequence number.\n\nStop telemetry packet capture by hitting CTRL+C.\n\"\"\"\nimport datetime\nimport os\nimport socket\n\n# Create a new UDP socket.\nSOCKET = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# Bind the socket to the port\nSERVER_ADDRESS = (\"\", 5606)\nprint(\"Starting listener on port {}\".format(SERVER_ADDRESS[1]))\nSOCKET.bind(SERVER_ADDRESS)\n\ni = 0\nDIRECTORY = \"packetdata-\"+datetime.datetime.now().strftime(\n \"%Y%m%d-%H%M%S\")\ntry:\n if not os.path.exists(DIRECTORY):\n os.makedirs(DIRECTORY)\n while True:\n DATA, _ = SOCKET.recvfrom(65565)\n print(\"Writing packet {}\".format(i))\n FILE = open('./'+DIRECTORY+'/pdata'+str(i), 'wb')\n FILE.write(DATA)\n FILE.close()\n i += 1\n\nexcept KeyboardInterrupt:\n print(\"Closing listener on port {}\".format(SERVER_ADDRESS[1]))\n\nfinally:\n if i == 0:\n os.rmdir(DIRECTORY)\n","sub_path":"packetgrab.py","file_name":"packetgrab.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"622712593","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nPrepares the figure and table descriptions in html.\r\n\r\n\"\"\"\r\nimport os\r\nimport sys\r\nimport subprocess\r\n\r\n# Add the path to bbob_pproc\r\nif __name__ == \"__main__\":\r\n (filepath, filename) = os.path.split(sys.argv[0])\r\n sys.path.append(os.path.join(filepath, os.path.pardir))\r\n import matplotlib\r\n matplotlib.use('Agg') # To avoid window popup and use without X forwarding\r\n\r\nfrom bbob_pproc import preparetexforhtml, genericsettings\r\n\r\ndef main():\r\n\r\n preparetexforhtml.main()\r\n\r\n texFile = os.path.join(os.path.dirname(os.path.realpath(__file__)), genericsettings.latex_commands_for_html + '.tex')\r\n\r\n FNULL = open(os.devnull, 'w')\r\n args = \"pdflatex %s\" % texFile\r\n subprocess.call(args.split(), stdout=FNULL, stderr=FNULL, shell=False) \r\n \r\n tthFile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tth\\\\tth.exe')\r\n args = \"%s %s\" % (tthFile, texFile)\r\n subprocess.call(args.split(), stdout=FNULL, stderr=FNULL, shell=False) \r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"code-postprocessing/bbob_pproc/preparehtml.py","file_name":"preparehtml.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"409512699","text":"import pygame\nimport random\nimport itertools\nfrom ship_class import Ship\nfrom asteroid_class import Asteroid\nfrom projectile_class import Projectile\nfrom station_class import Station\n\npygame.init()\nwindow_width = 1200\nwindow_height = 900\nscreen = pygame.display.set_mode((window_width, window_height))\nlist_of_ships = []\nlist_of_red_ship = []\nlist_of_blue_ship = []\nlist_of_asteroids = []\nlist_of_projectiles = []\nactive_ships = []\nscroll = [0, 0]\nctrl_group_1 = []\nctrl_group_2 = []\nctrl_group_3 = []\nctrl_group_4 = []\nctrl_group_5 = []\nscrolled_distance_x = 0\nscrolled_distance_y = 0\n### For event loop ###\nobject_detected = False\n######################\n\n\n# To be replaced with dynamic ship generation.\ndef ship_generator():\n for i in range(5):\n x = random.randint(0, window_width)\n y = random.randint(0, window_height)\n player = pygame.sprite.GroupSingle(Ship(x, y, 'blue'))\n # player.add()\n list_of_ships.append(player)\n for i in range(10):\n x = random.randint(0, window_width)\n y = random.randint(0, window_height)\n player = pygame.sprite.GroupSingle(Ship(x, y, 'red'))\n # player.add()\n list_of_red_ship.append(player)\n\n\ndef add_ship_list_to_ships():\n for sprite in list_of_ships:\n for ship in sprite.sprites():\n ship.list_of_ships = list_of_ships\n\n\ndef asteroid_generator():\n for i in range(50):\n x = random.randint(-window_width, window_width)\n y = random.randint(-window_height, window_height)\n asteroid = pygame.sprite.GroupSingle(Asteroid(x, y))\n # asteroid.add()\n list_of_asteroids.append(asteroid)\n\n\ndef projectile_generator(x, y, target, target_object):\n shot = pygame.sprite.GroupSingle(Projectile(x, y, target, target_object))\n list_of_projectiles.append(shot)\n\n\nasteroid_generator()\nship_generator()\nadd_ship_list_to_ships()\n\n\"\"\"TEST\"\"\"\n\nstation = pygame.sprite.GroupSingle(Station(window_width / 3, window_height / 2))\n\nstation2 = pygame.sprite.GroupSingle(Station(window_width / 2, (window_height / 2)))\n\n\"\"\"TEST\"\"\"\n\nbackground = pygame.image.load(r'graphics/SpaceShooterRedux/Backgrounds/black.png').convert_alpha()\nbackground = pygame.transform.scale(background, (1600, 1000))\n\n\ndef main():\n global object_detected\n pygame.display.set_caption(\"RTS maybe\")\n clock = pygame.time.Clock()\n test_font = pygame.font.Font('font/Pixeltype.ttf', 50)\n text_surface = test_font.render(\"test text\", False, (0, 0, 0)) # (text, anti aliasing, color)\n selection_box_start = None\n selection = None\n\n\n while True:\n screen.blit(background, (0, 0))\n screen_scrolling()\n # Resets object_detected to false value for every frame.\n object_detected = False\n waypoint_generator()\n # Event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_1 and pygame.key.get_mods() & pygame.KMOD_CTRL:\n populate_ctrl_1()\n elif event.key == pygame.K_2 and pygame.key.get_mods() & pygame.KMOD_CTRL:\n populate_ctrl_2()\n elif event.key == pygame.K_1:\n activate_ctrl_1()\n elif event.key == pygame.K_2:\n activate_ctrl_2()\n\n if event.type == pygame.MOUSEBUTTONUP and event.button == 3:\n if pygame.key.get_mods() & pygame.KMOD_SHIFT:\n forward_wp_to_active_ship_list(event.pos)\n\n else:\n # Checks if there is any targetable object in click location. If yes, ship moves to engage,\n # if not, it continues with regular move.\n check_for_targetable_object(event.pos)\n if object_detected is True:\n engage_target()\n elif object_detected is False:\n waypoint = event.pos\n forward_wp_to_active_ship(waypoint)\n\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n click_pos = event.pos\n\n #deactivate_active_ships()\n #active_ships.clear()\n\n selection_check(click_pos)\n selection_box_start = click_pos\n\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n if selection is not None:\n selection_check(selection)\n selection = None\n\n if pygame.mouse.get_pressed(num_buttons=3) == (1, 0, 0):\n selection_box_end = pygame.mouse.get_pos()\n width = -(selection_box_start[0] - selection_box_end[0])\n height = -(selection_box_start[1] - selection_box_end[1])\n selection = pygame.draw.rect(screen, (255, 0, 0), pygame.Rect(selection_box_start[0],\n selection_box_start[1], width, height), 2)\n add_scroll_to_ship_pos()\n draw_objects()\n remove_destroyed_objects()\n engage_target()\n #################\n station.draw(screen)\n station2.draw(screen)\n #################\n print(active_ships)\n\n\n pygame.display.update()\n clock.tick(60) # <-- limits program to 60 fps\n\n\n# Takes all objects and draws then on the screen in each frame.\n\ndef draw_objects():\n list_chain = itertools.chain(list_of_asteroids, list_of_ships, list_of_projectiles, list_of_red_ship)\n for item in list_chain:\n item.draw(screen)\n item.update()\n\n\n# Prevents removed/destroyed objects from being drawn.\ndef remove_destroyed_objects():\n for projectile in list_of_projectiles:\n for projectile_sprite in projectile.sprites():\n if projectile_sprite.explosion_end is True:\n list_of_projectiles.remove(projectile)\n\n for ship in list_of_red_ship:\n for ship_sprite in ship.sprites():\n if ship_sprite.hitpoints <= 0:\n list_of_red_ship.remove(ship)\n\n\n# Updates objects position with scroll value.\n\ndef add_scroll_to_ship_pos():\n int_scroll_x = int(scroll[0])\n int_scroll_y = int(scroll[1])\n\n for rectangle in station.sprites():\n rectangle.rect.x -= int_scroll_x\n rectangle.rect.y -= int_scroll_y\n\n for rectangle in station2.sprites():\n rectangle.rect.x -= int_scroll_x\n rectangle.rect.y -= int_scroll_y\n\n ###################################\n\n list_chain = itertools.chain(list_of_ships, list_of_red_ship)\n\n for ship_object in list_chain:\n\n for ship_sprite in ship_object.sprites():\n ship_sprite.coord_x -= int_scroll_x\n ship_sprite.coord_y -= int_scroll_y\n ship_sprite.rect.x -= int_scroll_x\n ship_sprite.rect.y -= int_scroll_y\n\n if ship_sprite.target is not None:\n new_x = ship_sprite.target[0]\n new_y = ship_sprite.target[1]\n new_x -= int_scroll_x\n new_y -= int_scroll_y\n ship_sprite.target = (new_x, new_y)\n\n # Ship waypoints update\n new_waypoints = []\n for waypoint in ship_sprite.waypoints:\n coord_x = waypoint[0]\n coord_y = waypoint[1]\n coord_x -= int_scroll_x\n coord_y -= int_scroll_y\n new_waypoint = (coord_x, coord_y)\n new_waypoints.append(new_waypoint)\n ship_sprite.waypoints = new_waypoints\n if ship_sprite.active_waypoint is not None:\n ship_sprite.active_waypoint = ((ship_sprite.active_waypoint[0] - int_scroll_x),\n (ship_sprite.active_waypoint[1] - int_scroll_y))\n\n for asteroid_object in list_of_asteroids:\n for asteroid_rectangle in asteroid_object.sprites():\n # Both coord_x/y and rect.x/y have to be updated.\n # Adjusting just the coord variable could be sufficient, however it creates a visible shift upon scrolling.\n # Updating rect as well removes that shift.\n asteroid_rectangle.coord_x -= int_scroll_x\n asteroid_rectangle.coord_y -= int_scroll_y\n asteroid_rectangle.rect.x -= int_scroll_x\n asteroid_rectangle.rect.y -= int_scroll_y\n\n for projectile in list_of_projectiles: # Updates both projectile position and target variable.\n for projectile_sprite in projectile.sprites():\n projectile_sprite.coord_x -= int_scroll_x\n projectile_sprite.coord_y -= int_scroll_y\n projectile_sprite.rect.x -= int_scroll_x\n projectile_sprite.rect.y -= int_scroll_y\n\n new_target_x = projectile_sprite.target[0]\n new_target_y = projectile_sprite.target[1]\n new_target_x -= int_scroll_x\n new_target_y -= int_scroll_y\n projectile_sprite.target = (new_target_x, new_target_y)\n pass\n\n\n'''''''''MOVEMENT FUNCTIONS'''''''''''\n\n\n# Move towards a single waypoint.\ndef forward_wp_to_active_ship(wp):\n for ship in list_of_ships:\n for item in ship.sprites():\n if item.active is True:\n item.new_waypoint(wp)\n item.waypoints.append(wp)\n # Prevents ship from shooting once move order was made.\n item.target = None\n\n\n# Adds additional waypoints to waypoints list.\ndef forward_wp_to_active_ship_list(wp):\n for ship in list_of_ships:\n for item in ship.sprites():\n if item.active is True:\n # Adds new waypoint to the list. If there is a single entry in the waypoints list,\n # it become the active waypoint.\n item.waypoints.append(wp)\n if len(item.waypoints) == 1:\n item.new_waypoint_keep_list(wp)\n\n\ndef move_to_target(click_pos):\n active_ships.clear()\n populate_active_ship_list()\n for asteroid_sprite in list_of_asteroids:\n for asteroid_obj in asteroid_sprite.sprites():\n if asteroid_obj.rect.collidepoint(click_pos):\n for ship in active_ships:\n ship.attacking_target = True\n ship.target = (asteroid_obj.rect.x, asteroid_obj.rect.y)\n\n\n'''''''''SUPPORTING FUNCTIONS'''''''''''\n\n\ndef selection_check(selection):\n if type(selection) == tuple:\n for ship in list_of_ships:\n for item in ship.sprites():\n if item.rect.collidepoint(selection):\n item.activate_ship()\n\n else:\n item.deactivate_ship()\n active_ships.clear()\n populate_active_ship_list()\n\n elif type(selection) == pygame.Rect:\n for ship in list_of_ships:\n for item in ship.sprites():\n if item.rect.colliderect(selection):\n item.activate_ship()\n\n else:\n item.deactivate_ship()\n active_ships.clear()\n populate_active_ship_list()\n\n\ndef populate_active_ship_list():\n for ship in list_of_ships:\n for ship_sprite in ship.sprites():\n if ship_sprite.active is True:\n active_ships.append(ship_sprite)\n\n\ndef deactivate_active_ships():\n for ship in active_ships:\n if ship.active is True:\n ship.active = False\n\n\ndef populate_ctrl_1():\n global ctrl_group_1\n ctrl_group_1.clear()\n ctrl_group_1 = active_ships.copy()\n\n\ndef populate_ctrl_2():\n global ctrl_group_2\n ctrl_group_2.clear()\n ctrl_group_2 = active_ships.copy()\n\n\ndef activate_ctrl_1():\n global active_ships\n deactivate_active_ships()\n active_ships.clear()\n for ship in ctrl_group_1:\n ship.active = True\n active_ships = ctrl_group_1.copy()\n\n\ndef activate_ctrl_2():\n global active_ships\n deactivate_active_ships()\n active_ships.clear()\n for ship in ctrl_group_2:\n ship.active = True\n active_ships = ctrl_group_2.copy()\n\n\n# Function checks if clicked object is a valid target (present in one of the list within list_chain).\ndef check_for_targetable_object(click_pos):\n global object_detected\n active_ships.clear()\n populate_active_ship_list()\n\n list_chain = itertools.chain(list_of_asteroids, list_of_red_ship)\n\n for item in list_chain:\n for item_sprite in item.sprites():\n if item_sprite.rect.collidepoint(click_pos):\n # Passes the rect of the target object to ship.\n pass_target_to_active_ships(item_sprite.rect, item_sprite)\n object_detected = True\n\n\ndef pass_target_to_active_ships(target, target_object):\n for ship in active_ships:\n ship.target = (target[0], target[1])\n ship.target_object = target_object\n ship.orbit_mode = True\n ship.orbit_target()\n\n\n# Function takes the forwarded sprite from ship object and uses it to generate a projectile object.\ndef engage_target():\n for ship_item in list_of_ships:\n for ship in ship_item.sprites():\n if ship.target is not None:\n ship.check_distance(ship.target)\n if ship.dist <= ship.max_range:\n if ship.shot_interval == 0:\n x_cor = int(ship.coord_x)\n y_cor = int(ship.coord_y)\n projectile_generator(x_cor, y_cor, ship.target, ship.target_object)\n ship.shot_interval += .1\n if ship.shot_interval >= 5:\n ship.shot_interval = 0\n ship.waypoints = []\n\n\ndef screen_scrolling():\n global scroll, scrolled_distance_x, scrolled_distance_y\n limit = 20\n mouse_pos = pygame.mouse.get_pos()\n\n if mouse_pos[0] < 20 and scrolled_distance_x > -limit:\n scroll[0] -= 1\n scrolled_distance_x -= 1\n elif mouse_pos[0] > (window_width - 20) and scrolled_distance_x < limit:\n scroll[0] += 1\n scrolled_distance_x += 1\n elif mouse_pos[1] < 20 and scrolled_distance_y > -limit:\n scroll[1] -= 1\n scrolled_distance_y -= 1\n elif mouse_pos[1] > (window_height - 20) and scrolled_distance_y < limit:\n scroll[1] += 1\n scrolled_distance_y += 1\n else:\n scroll = [0, 0]\n\n\n# Generates waypoint for red ships (targets)\ndef waypoint_generator():\n for ship in list_of_red_ship:\n for ship_sprite in ship.sprites():\n if ship_sprite.active_waypoint is None:\n x = random.randint(0, window_width)\n y = random.randint(0, window_height)\n waypoint = (x, y)\n ship_sprite.new_waypoint(waypoint)\n ship_sprite.waypoints.append(waypoint)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":14882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
stepnameinputoutputtarget
%d%s